Codebase list matrix-synapse / 483b488
New upstream version 0.99.3.2 Andrej Shadura 5 years ago
178 changed file(s) with 8284 addition(s) and 3806 deletion(s). Raw diff Collapse all Expand all
0 CI
1 BUILDKITE
2 BUILDKITE_BUILD_NUMBER
3 BUILDKITE_BRANCH
4 BUILDKITE_BUILD_NUMBER
5 BUILDKITE_JOB_ID
6 BUILDKITE_BUILD_URL
7 BUILDKITE_PROJECT_SLUG
8 BUILDKITE_COMMIT
9 BUILDKITE_PULL_REQUEST
10 BUILDKITE_TAG
11 CODECOV_TOKEN
12 TRIAL_FLAGS
0 version: '3.1'
1
2 services:
3
4 postgres:
5 image: postgres:9.4
6 environment:
7 POSTGRES_PASSWORD: postgres
8
9 testenv:
10 image: python:2.7
11 depends_on:
12 - postgres
13 env_file: .env
14 environment:
15 SYNAPSE_POSTGRES_HOST: postgres
16 SYNAPSE_POSTGRES_USER: postgres
17 SYNAPSE_POSTGRES_PASSWORD: postgres
18 working_dir: /app
19 volumes:
20 - ..:/app
0 version: '3.1'
1
2 services:
3
4 postgres:
5 image: postgres:9.5
6 environment:
7 POSTGRES_PASSWORD: postgres
8
9 testenv:
10 image: python:2.7
11 depends_on:
12 - postgres
13 env_file: .env
14 environment:
15 SYNAPSE_POSTGRES_HOST: postgres
16 SYNAPSE_POSTGRES_USER: postgres
17 SYNAPSE_POSTGRES_PASSWORD: postgres
18 working_dir: /app
19 volumes:
20 - ..:/app
0 version: '3.1'
1
2 services:
3
4 postgres:
5 image: postgres:9.4
6 environment:
7 POSTGRES_PASSWORD: postgres
8
9 testenv:
10 image: python:3.5
11 depends_on:
12 - postgres
13 env_file: .env
14 environment:
15 SYNAPSE_POSTGRES_HOST: postgres
16 SYNAPSE_POSTGRES_USER: postgres
17 SYNAPSE_POSTGRES_PASSWORD: postgres
18 working_dir: /app
19 volumes:
20 - ..:/app
0 version: '3.1'
1
2 services:
3
4 postgres:
5 image: postgres:9.5
6 environment:
7 POSTGRES_PASSWORD: postgres
8
9 testenv:
10 image: python:3.5
11 depends_on:
12 - postgres
13 env_file: .env
14 environment:
15 SYNAPSE_POSTGRES_HOST: postgres
16 SYNAPSE_POSTGRES_USER: postgres
17 SYNAPSE_POSTGRES_PASSWORD: postgres
18 working_dir: /app
19 volumes:
20 - ..:/app
0 version: '3.1'
1
2 services:
3
4 postgres:
5 image: postgres:11
6 environment:
7 POSTGRES_PASSWORD: postgres
8
9 testenv:
10 image: python:3.7
11 depends_on:
12 - postgres
13 env_file: .env
14 environment:
15 SYNAPSE_POSTGRES_HOST: postgres
16 SYNAPSE_POSTGRES_USER: postgres
17 SYNAPSE_POSTGRES_PASSWORD: postgres
18 working_dir: /app
19 volumes:
20 - ..:/app
0 version: '3.1'
1
2 services:
3
4 postgres:
5 image: postgres:9.5
6 environment:
7 POSTGRES_PASSWORD: postgres
8
9 testenv:
10 image: python:3.7
11 depends_on:
12 - postgres
13 env_file: .env
14 environment:
15 SYNAPSE_POSTGRES_HOST: postgres
16 SYNAPSE_POSTGRES_USER: postgres
17 SYNAPSE_POSTGRES_PASSWORD: postgres
18 working_dir: /app
19 volumes:
20 - ..:/app
0 env:
1 CODECOV_TOKEN: "2dd7eb9b-0eda-45fe-a47c-9b5ac040045f"
2
3 steps:
4 - command:
5 - "python -m pip install tox"
6 - "tox -e pep8"
7 label: "\U0001F9F9 PEP-8"
8 plugins:
9 - docker#v3.0.1:
10 image: "python:3.6"
11
12 - command:
13 - "python -m pip install tox"
14 - "tox -e packaging"
15 label: "\U0001F9F9 packaging"
16 plugins:
17 - docker#v3.0.1:
18 image: "python:3.6"
19
20 - command:
21 - "python -m pip install tox"
22 - "tox -e check_isort"
23 label: "\U0001F9F9 isort"
24 plugins:
25 - docker#v3.0.1:
26 image: "python:3.6"
27
28 - command:
29 - "python -m pip install tox"
30 - "scripts-dev/check-newsfragment"
31 label: ":newspaper: Newsfile"
32 branches: "!master !develop !release-*"
33 plugins:
34 - docker#v3.0.1:
35 image: "python:3.6"
36 propagate-environment: true
37
38 - wait
39
40 - command:
41 - "python -m pip install tox"
42 - "tox -e check-sampleconfig"
43 label: "\U0001F9F9 check-sample-config"
44 plugins:
45 - docker#v3.0.1:
46 image: "python:3.6"
47
48 - command:
49 - "python -m pip install tox"
50 - "tox -e py27,codecov"
51 label: ":python: 2.7 / SQLite"
52 env:
53 TRIAL_FLAGS: "-j 2"
54 plugins:
55 - docker#v3.0.1:
56 image: "python:2.7"
57 propagate-environment: true
58
59 - command:
60 - "python -m pip install tox"
61 - "tox -e py35,codecov"
62 label: ":python: 3.5 / SQLite"
63 env:
64 TRIAL_FLAGS: "-j 2"
65 plugins:
66 - docker#v3.0.1:
67 image: "python:3.5"
68 propagate-environment: true
69
70 - command:
71 - "python -m pip install tox"
72 - "tox -e py36,codecov"
73 label: ":python: 3.6 / SQLite"
74 env:
75 TRIAL_FLAGS: "-j 2"
76 plugins:
77 - docker#v3.0.1:
78 image: "python:3.6"
79 propagate-environment: true
80
81 - command:
82 - "python -m pip install tox"
83 - "tox -e py37,codecov"
84 label: ":python: 3.7 / SQLite"
85 env:
86 TRIAL_FLAGS: "-j 2"
87 plugins:
88 - docker#v3.0.1:
89 image: "python:3.7"
90 propagate-environment: true
91
92 - command:
93 - "python -m pip install tox"
94 - "tox -e py27-old,codecov"
95 label: ":python: 2.7 / SQLite / Old Deps"
96 env:
97 TRIAL_FLAGS: "-j 2"
98 plugins:
99 - docker#v3.0.1:
100 image: "python:2.7"
101 propagate-environment: true
102
103 - label: ":python: 2.7 / :postgres: 9.4"
104 env:
105 TRIAL_FLAGS: "-j 4"
106 command:
107 - "bash -c 'python -m pip install tox && python -m tox -e py27-postgres,codecov'"
108 plugins:
109 - docker-compose#v2.1.0:
110 run: testenv
111 config:
112 - .buildkite/docker-compose.py27.pg94.yaml
113
114 - label: ":python: 2.7 / :postgres: 9.5"
115 env:
116 TRIAL_FLAGS: "-j 4"
117 command:
118 - "bash -c 'python -m pip install tox && python -m tox -e py27-postgres,codecov'"
119 plugins:
120 - docker-compose#v2.1.0:
121 run: testenv
122 config:
123 - .buildkite/docker-compose.py27.pg95.yaml
124
125 - label: ":python: 3.5 / :postgres: 9.4"
126 env:
127 TRIAL_FLAGS: "-j 4"
128 command:
129 - "bash -c 'python -m pip install tox && python -m tox -e py35-postgres,codecov'"
130 plugins:
131 - docker-compose#v2.1.0:
132 run: testenv
133 config:
134 - .buildkite/docker-compose.py35.pg94.yaml
135
136 - label: ":python: 3.5 / :postgres: 9.5"
137 env:
138 TRIAL_FLAGS: "-j 4"
139 command:
140 - "bash -c 'python -m pip install tox && python -m tox -e py35-postgres,codecov'"
141 plugins:
142 - docker-compose#v2.1.0:
143 run: testenv
144 config:
145 - .buildkite/docker-compose.py35.pg95.yaml
146
147 - label: ":python: 3.7 / :postgres: 9.5"
148 env:
149 TRIAL_FLAGS: "-j 4"
150 command:
151 - "bash -c 'python -m pip install tox && python -m tox -e py37-postgres,codecov'"
152 plugins:
153 - docker-compose#v2.1.0:
154 run: testenv
155 config:
156 - .buildkite/docker-compose.py37.pg95.yaml
157
158 - label: ":python: 3.7 / :postgres: 11"
159 env:
160 TRIAL_FLAGS: "-j 4"
161 command:
162 - "bash -c 'python -m pip install tox && python -m tox -e py37-postgres,codecov'"
163 plugins:
164 - docker-compose#v2.1.0:
165 run: testenv
166 config:
167 - .buildkite/docker-compose.py37.pg11.yaml
33
44 ---
55
6 <!--
6 <!--
77
8 **IF YOU HAVE SUPPORT QUESTIONS ABOUT RUNNING OR CONFIGURING YOUR OWN HOME SERVER**:
8 **IF YOU HAVE SUPPORT QUESTIONS ABOUT RUNNING OR CONFIGURING YOUR OWN HOME SERVER**:
99 You will likely get better support more quickly if you ask in ** #matrix:matrix.org ** ;)
1010
1111
1616 You can also preview your report before submitting it. You may remove sections
1717 that aren't relevant to your particular case.
1818
19 Text between <!-- and --> marks will be invisible in the report.
19 Text between <!-- and --​> marks will be invisible in the report.
2020
2121 -->
2222
3030 - that reproduce the bug
3131 - using hyphens as bullet points
3232
33 <!--
33 <!--
3434 Describe how what happens differs from what you expected.
3535
3636 If you can identify any relevant log snippets from _homeserver.log_, please include
4747
4848 If not matrix.org:
4949
50 <!--
51 What version of Synapse is running?
50 <!--
51 What version of Synapse is running?
5252 You can find the Synapse version by inspecting the server headers (replace matrix.org with
5353 your own homeserver domain):
5454 $ curl -v https://matrix.org/_matrix/client/versions 2>&1 | grep "Server:"
1111 _trial_temp*/
1212
1313 # stuff that is likely to exist when you run a server locally
14 /*.db
15 /*.log
16 /*.log.config
17 /*.pid
1418 /*.signing.key
15 /*.tls.crt
16 /*.tls.key
19 /env/
20 /homeserver*.yaml
21 /media_store/
1722 /uploads
18 /media_store/
1923
2024 # IDEs
2125 /.idea/
+0
-97
.travis.yml less more
0 dist: xenial
1 language: python
2
3 cache:
4 directories:
5 # we only bother to cache the wheels; parts of the http cache get
6 # invalidated every build (because they get served with a max-age of 600
7 # seconds), which means that we end up re-uploading the whole cache for
8 # every build, which is time-consuming In any case, it's not obvious that
9 # downloading the cache from S3 would be much faster than downloading the
10 # originals from pypi.
11 #
12 - $HOME/.cache/pip/wheels
13
14 # don't clone the whole repo history, one commit will do
15 git:
16 depth: 1
17
18 # only build branches we care about (PRs are built seperately)
19 branches:
20 only:
21 - master
22 - develop
23 - /^release-v/
24 - rav/pg95
25
26 # When running the tox environments that call Twisted Trial, we can pass the -j
27 # flag to run the tests concurrently. We set this to 2 for CPU bound tests
28 # (SQLite) and 4 for I/O bound tests (PostgreSQL).
29 matrix:
30 fast_finish: true
31 include:
32 - name: "pep8"
33 python: 3.6
34 env: TOX_ENV="pep8,check_isort,packaging"
35
36 - name: "py2.7 / sqlite"
37 python: 2.7
38 env: TOX_ENV=py27,codecov TRIAL_FLAGS="-j 2"
39
40 - name: "py2.7 / sqlite / olddeps"
41 python: 2.7
42 env: TOX_ENV=py27-old TRIAL_FLAGS="-j 2"
43
44 - name: "py2.7 / postgres9.5"
45 python: 2.7
46 addons:
47 postgresql: "9.5"
48 env: TOX_ENV=py27-postgres,codecov TRIAL_FLAGS="-j 4"
49 services:
50 - postgresql
51
52 - name: "py3.5 / sqlite"
53 python: 3.5
54 env: TOX_ENV=py35,codecov TRIAL_FLAGS="-j 2"
55
56 - name: "py3.7 / sqlite"
57 python: 3.7
58 env: TOX_ENV=py37,codecov TRIAL_FLAGS="-j 2"
59
60 - name: "py3.7 / postgres9.4"
61 python: 3.7
62 addons:
63 postgresql: "9.4"
64 env: TOX_ENV=py37-postgres TRIAL_FLAGS="-j 4"
65 services:
66 - postgresql
67
68 - name: "py3.7 / postgres9.5"
69 python: 3.7
70 addons:
71 postgresql: "9.5"
72 env: TOX_ENV=py37-postgres,codecov TRIAL_FLAGS="-j 4"
73 services:
74 - postgresql
75
76 - # we only need to check for the newsfragment if it's a PR build
77 if: type = pull_request
78 name: "check-newsfragment"
79 python: 3.6
80 script: scripts-dev/check-newsfragment
81
82 install:
83 # this just logs the postgres version we will be testing against (if any)
84 - psql -At -U postgres -c 'select version();' || true
85
86 - pip install tox
87
88 # if we don't have python3.6 in this environment, travis unhelpfully gives us
89 # a `python3.6` on our path which does nothing but spit out a warning. Tox
90 # tries to run it (even if we're not running a py36 env), so the build logs
91 # then have warnings which look like errors. To reduce the noise, remove the
92 # non-functional python3.6.
93 - ( ! command -v python3.6 || python3.6 --version ) &>/dev/null || rm -f $(command -v python3.6)
94
95 script:
96 - tox -e $TOX_ENV
6868
6969 Jason Robinson <jasonr at matrix.org>
7070 * Minor fixes
71
72 Joseph Weston <joseph at weston.cloud>
73 + Add admin API for querying HS version
0 Synapse 0.99.3.2 (2019-05-03)
1 =============================
2
3 Internal Changes
4 ----------------
5
6 - Ensure that we have `urllib3` <1.25, to resolve incompatibility with `requests`. ([\#5135](https://github.com/matrix-org/synapse/issues/5135))
7
8
9 Synapse 0.99.3.1 (2019-05-03)
10 =============================
11
12 Security update
13 ---------------
14
15 This release includes two security fixes:
16
17 - Switch to using a cryptographically-secure random number generator for token strings, ensuring they cannot be predicted by an attacker. Thanks to @opnsec for identifying and responsibly disclosing this issue! ([\#5133](https://github.com/matrix-org/synapse/issues/5133))
18 - Blacklist 0.0.0.0 and :: by default for URL previews. Thanks to @opnsec for identifying and responsibly disclosing this issue too! ([\#5134](https://github.com/matrix-org/synapse/issues/5134))
19
20 Synapse 0.99.3 (2019-04-01)
21 ===========================
22
23 No significant changes.
24
25
26 Synapse 0.99.3rc1 (2019-03-27)
27 ==============================
28
29 Features
30 --------
31
32 - The user directory has been rewritten to make it faster, with less chance of falling behind on a large server. ([\#4537](https://github.com/matrix-org/synapse/issues/4537), [\#4846](https://github.com/matrix-org/synapse/issues/4846), [\#4864](https://github.com/matrix-org/synapse/issues/4864), [\#4887](https://github.com/matrix-org/synapse/issues/4887), [\#4900](https://github.com/matrix-org/synapse/issues/4900), [\#4944](https://github.com/matrix-org/synapse/issues/4944))
33 - Add configurable rate limiting to the /register endpoint. ([\#4735](https://github.com/matrix-org/synapse/issues/4735), [\#4804](https://github.com/matrix-org/synapse/issues/4804))
34 - Move server key queries to federation reader. ([\#4757](https://github.com/matrix-org/synapse/issues/4757))
35 - Add support for /account/3pid REST endpoint to client_reader worker. ([\#4759](https://github.com/matrix-org/synapse/issues/4759))
36 - Add an endpoint to the admin API for querying the server version. Contributed by Joseph Weston. ([\#4772](https://github.com/matrix-org/synapse/issues/4772))
37 - Include a default configuration file in the 'docs' directory. ([\#4791](https://github.com/matrix-org/synapse/issues/4791), [\#4801](https://github.com/matrix-org/synapse/issues/4801))
38 - Synapse is now permissive about trailing slashes on some of its federation endpoints, allowing zero or more to be present. ([\#4793](https://github.com/matrix-org/synapse/issues/4793))
39 - Add support for /keys/query and /keys/changes REST endpoints to client_reader worker. ([\#4796](https://github.com/matrix-org/synapse/issues/4796))
40 - Add checks to incoming events over federation for events evading auth (aka "soft fail"). ([\#4814](https://github.com/matrix-org/synapse/issues/4814))
41 - Add configurable rate limiting to the /login endpoint. ([\#4821](https://github.com/matrix-org/synapse/issues/4821), [\#4865](https://github.com/matrix-org/synapse/issues/4865))
42 - Remove trailing slashes from certain outbound federation requests. Retry if receiving a 404. Context: #3622. ([\#4840](https://github.com/matrix-org/synapse/issues/4840))
43 - Allow passing --daemonize flags to workers in the same way as with master. ([\#4853](https://github.com/matrix-org/synapse/issues/4853))
44 - Batch up outgoing read-receipts to reduce federation traffic. ([\#4890](https://github.com/matrix-org/synapse/issues/4890), [\#4927](https://github.com/matrix-org/synapse/issues/4927))
45 - Add option to disable searching the user directory. ([\#4895](https://github.com/matrix-org/synapse/issues/4895))
46 - Add option to disable searching of local and remote public room lists. ([\#4896](https://github.com/matrix-org/synapse/issues/4896))
47 - Add ability for password providers to login/register a user via 3PID (email, phone). ([\#4931](https://github.com/matrix-org/synapse/issues/4931))
48
49
50 Bugfixes
51 --------
52
53 - Fix a bug where media with spaces in the name would get a corrupted name. ([\#2090](https://github.com/matrix-org/synapse/issues/2090))
54 - Fix attempting to paginate in rooms where server cannot see any events, to avoid unnecessarily pulling in lots of redacted events. ([\#4699](https://github.com/matrix-org/synapse/issues/4699))
55 - 'event_id' is now a required parameter in federated state requests, as per the matrix spec. ([\#4740](https://github.com/matrix-org/synapse/issues/4740))
56 - Fix tightloop over connecting to replication server. ([\#4749](https://github.com/matrix-org/synapse/issues/4749))
57 - Fix parsing of Content-Disposition headers on remote media requests and URL previews. ([\#4763](https://github.com/matrix-org/synapse/issues/4763))
58 - Fix incorrect log about not persisting duplicate state event. ([\#4776](https://github.com/matrix-org/synapse/issues/4776))
59 - Fix v4v6 option in HAProxy example config. Contributed by Flakebi. ([\#4790](https://github.com/matrix-org/synapse/issues/4790))
60 - Handle batch updates in worker replication protocol. ([\#4792](https://github.com/matrix-org/synapse/issues/4792))
61 - Fix bug where we didn't correctly throttle sending of USER_IP commands over replication. ([\#4818](https://github.com/matrix-org/synapse/issues/4818))
62 - Fix potential race in handling missing updates in device list updates. ([\#4829](https://github.com/matrix-org/synapse/issues/4829))
63 - Fix bug where synapse expected an un-specced `prev_state` field on state events. ([\#4837](https://github.com/matrix-org/synapse/issues/4837))
64 - Transfer a user's notification settings (push rules) on room upgrade. ([\#4838](https://github.com/matrix-org/synapse/issues/4838))
65 - fix test_auto_create_auto_join_where_no_consent. ([\#4886](https://github.com/matrix-org/synapse/issues/4886))
66 - Fix a bug where hs_disabled_message was sometimes not correctly enforced. ([\#4888](https://github.com/matrix-org/synapse/issues/4888))
67 - Fix bug in shutdown room admin API where it would fail if a user in the room hadn't consented to the privacy policy. ([\#4904](https://github.com/matrix-org/synapse/issues/4904))
68 - Fix bug where blocked world-readable rooms were still peekable. ([\#4908](https://github.com/matrix-org/synapse/issues/4908))
69
70
71 Internal Changes
72 ----------------
73
74 - Add a systemd setup that supports synapse workers. Contributed by Luca Corbatto. ([\#4662](https://github.com/matrix-org/synapse/issues/4662))
75 - Change from TravisCI to Buildkite for CI. ([\#4752](https://github.com/matrix-org/synapse/issues/4752))
76 - When presence is disabled don't send over replication. ([\#4757](https://github.com/matrix-org/synapse/issues/4757))
77 - Minor docstring fixes for MatrixFederationAgent. ([\#4765](https://github.com/matrix-org/synapse/issues/4765))
78 - Optimise EDU transmission for the federation_sender worker. ([\#4770](https://github.com/matrix-org/synapse/issues/4770))
79 - Update test_typing to use HomeserverTestCase. ([\#4771](https://github.com/matrix-org/synapse/issues/4771))
80 - Update URLs for riot.im icons and logos in the default notification templates. ([\#4779](https://github.com/matrix-org/synapse/issues/4779))
81 - Removed unnecessary $ from some federation endpoint path regexes. ([\#4794](https://github.com/matrix-org/synapse/issues/4794))
82 - Remove link to deleted title in README. ([\#4795](https://github.com/matrix-org/synapse/issues/4795))
83 - Clean up read-receipt handling. ([\#4797](https://github.com/matrix-org/synapse/issues/4797))
84 - Add some debug about processing read receipts. ([\#4798](https://github.com/matrix-org/synapse/issues/4798))
85 - Clean up some replication code. ([\#4799](https://github.com/matrix-org/synapse/issues/4799))
86 - Add some docstrings. ([\#4815](https://github.com/matrix-org/synapse/issues/4815))
87 - Add debug logger to try and track down #4422. ([\#4816](https://github.com/matrix-org/synapse/issues/4816))
88 - Make shutdown API send explanation message to room after users have been forced joined. ([\#4817](https://github.com/matrix-org/synapse/issues/4817))
89 - Update example_log_config.yaml. ([\#4820](https://github.com/matrix-org/synapse/issues/4820))
90 - Document the `generate` option for the docker image. ([\#4824](https://github.com/matrix-org/synapse/issues/4824))
91 - Fix check-newsfragment for debian-only changes. ([\#4825](https://github.com/matrix-org/synapse/issues/4825))
92 - Add some debug logging for device list updates to help with #4828. ([\#4828](https://github.com/matrix-org/synapse/issues/4828))
93 - Improve federation documentation, specifically .well-known support. Many thanks to @vaab. ([\#4832](https://github.com/matrix-org/synapse/issues/4832))
94 - Disable captcha registration by default in unit tests. ([\#4839](https://github.com/matrix-org/synapse/issues/4839))
95 - Add stuff back to the .gitignore. ([\#4843](https://github.com/matrix-org/synapse/issues/4843))
96 - Clarify what registration_shared_secret allows for. ([\#4844](https://github.com/matrix-org/synapse/issues/4844))
97 - Correctly log expected errors when fetching server keys. ([\#4847](https://github.com/matrix-org/synapse/issues/4847))
98 - Update install docs to explicitly state a full-chain (not just the top-level) TLS certificate must be provided to Synapse. This caused some people's Synapse ports to appear correct in a browser but still (rightfully so) upset the federation tester. ([\#4849](https://github.com/matrix-org/synapse/issues/4849))
99 - Move client read-receipt processing to federation sender worker. ([\#4852](https://github.com/matrix-org/synapse/issues/4852))
100 - Refactor federation TransactionQueue. ([\#4855](https://github.com/matrix-org/synapse/issues/4855))
101 - Comment out most options in the generated config. ([\#4863](https://github.com/matrix-org/synapse/issues/4863))
102 - Fix yaml library warnings by using safe_load. ([\#4869](https://github.com/matrix-org/synapse/issues/4869))
103 - Update Apache setup to remove location syntax. Thanks to @cwmke! ([\#4870](https://github.com/matrix-org/synapse/issues/4870))
104 - Reinstate test case that runs unit tests against oldest supported dependencies. ([\#4879](https://github.com/matrix-org/synapse/issues/4879))
105 - Update link to federation docs. ([\#4881](https://github.com/matrix-org/synapse/issues/4881))
106 - fix test_auto_create_auto_join_where_no_consent. ([\#4886](https://github.com/matrix-org/synapse/issues/4886))
107 - Use a regular HomeServerConfig object for unit tests rater than a Mock. ([\#4889](https://github.com/matrix-org/synapse/issues/4889))
108 - Add some notes about tuning postgres for larger deployments. ([\#4895](https://github.com/matrix-org/synapse/issues/4895))
109 - Add a config option for torture-testing worker replication. ([\#4902](https://github.com/matrix-org/synapse/issues/4902))
110 - Log requests which are simulated by the unit tests. ([\#4905](https://github.com/matrix-org/synapse/issues/4905))
111 - Allow newsfragments to end with exclamation marks. Exciting! ([\#4912](https://github.com/matrix-org/synapse/issues/4912))
112 - Refactor some more tests to use HomeserverTestCase. ([\#4913](https://github.com/matrix-org/synapse/issues/4913))
113 - Refactor out the state deltas portion of the user directory store and handler. ([\#4917](https://github.com/matrix-org/synapse/issues/4917))
114 - Fix nginx example in ACME doc. ([\#4923](https://github.com/matrix-org/synapse/issues/4923))
115 - Use an explicit dbname for postgres connections in the tests. ([\#4928](https://github.com/matrix-org/synapse/issues/4928))
116 - Fix `ClientReplicationStreamProtocol.__str__()`. ([\#4929](https://github.com/matrix-org/synapse/issues/4929))
117
118
0119 Synapse 0.99.2 (2019-03-01)
1120 ===========================
2121
7070 will probably want to specify your domain (`example.com`) rather than a
7171 matrix-specific hostname here (in the same way that your email address is
7272 probably `user@example.com` rather than `user@email.example.com`) - but
73 doing so may require more advanced setup. - see [Setting up Federation](README.rst#setting-up-federation). Beware that the server name cannot be changed later.
73 doing so may require more advanced setup: see [Setting up Federation](docs/federate.md).
74 Beware that the server name cannot be changed later.
7475
7576 This command will generate you a config file that you can then customise, but it will
7677 also generate a set of keys for you. These keys will allow your Home Server to
373374 * You will also need to uncomment the `tls_certificate_path` and
374375 `tls_private_key_path` lines under the `TLS` section. You can either
375376 point these settings at an existing certificate and key, or you can
376 enable Synapse's built-in ACME (Let's Encrypt) support. Instructions
377 for having Synapse automatically provision and renew federation
378 certificates through ACME can be found at [ACME.md](docs/ACME.md).
377 enable Synapse's built-in ACME (Let's Encrypt) support. Instructions
378 for having Synapse automatically provision and renew federation
379 certificates through ACME can be found at [ACME.md](docs/ACME.md). If you
380 are using your own certificate, be sure to use a `.pem` file that includes
381 the full certificate chain including any intermediate certificates (for
382 instance, if using certbot, use `fullchain.pem` as your certificate, not
383 `cert.pem`).
384
385 For those of you upgrading your TLS certificate in readiness for Synapse 1.0,
386 please take a look at [our guide](docs/MSC1711_certificates_FAQ.md#configuring-certificates-for-compatibility-with-synapse-100).
379387
380388 ## Registering a user
381389
401409 `homeserver.yaml`, which is shared between Synapse itself and the
402410 `register_new_matrix_user` script. It doesn't matter what it is (a random
403411 value is generated by `--generate-config`), but it should be kept secret, as
404 anyone with knowledge of it can register users on your server even if
405 `enable_registration` is `false`.
412 anyone with knowledge of it can register users, including admin accounts,
413 on your server even if `enable_registration` is `false`.
406414
407415 ## Setting up a TURN server
408416
3838 prune .coveragerc
3939 prune debian
4040 prune .codecov.yml
41 prune .buildkite
4142
4243 exclude jenkins*
4344 recursive-exclude jenkins *.sh
7979 Synapse Installation
8080 ====================
8181
82 For details on how to install synapse, see `<INSTALL.md>`_.
82 .. _federation:
83
84 * For details on how to install synapse, see `<INSTALL.md>`_.
85 * For specific details on how to configure Synapse for federation see `docs/federate.md <docs/federate.md>`_
8386
8487
8588 Connecting to Synapse from a client
9295 general, you will need to enable TLS support before you can successfully
9396 connect from a client: see `<INSTALL.md#tls-certificates>`_.
9497
95 An easy way to get started is to login or register via Riot at
96 https://riot.im/app/#/login or https://riot.im/app/#/register respectively.
98 An easy way to get started is to login or register via Riot at
99 https://riot.im/app/#/login or https://riot.im/app/#/register respectively.
97100 You will need to change the server you are logging into from ``matrix.org``
98 and instead specify a Homeserver URL of ``https://<server_name>:8448``
99 (or just ``https://<server_name>`` if you are using a reverse proxy).
100 (Leave the identity server as the default - see `Identity servers`_.)
101 If you prefer to use another client, refer to our
101 and instead specify a Homeserver URL of ``https://<server_name>:8448``
102 (or just ``https://<server_name>`` if you are using a reverse proxy).
103 (Leave the identity server as the default - see `Identity servers`_.)
104 If you prefer to use another client, refer to our
102105 `client breakdown <https://matrix.org/docs/projects/clients-matrix>`_.
103106
104107 If all goes well you should at least be able to log in, create a room, and
116119 Once ``enable_registration`` is set to ``true``, it is possible to register a
117120 user via `riot.im <https://riot.im/app/#/register>`_ or other Matrix clients.
118121
119 Your new user name will be formed partly from the ``server_name`` (see
120 `Configuring synapse`_), and partly from a localpart you specify when you
121 create the account. Your name will take the form of::
122 Your new user name will be formed partly from the ``server_name``, and partly
123 from a localpart you specify when you create the account. Your name will take
124 the form of::
122125
123126 @localpart:my.domain.name
124127
150153 See https://github.com/vector-im/riot-web/issues/1977 and
151154 https://developer.github.com/changes/2014-04-25-user-content-security for more details.
152155
153 Troubleshooting
154 ===============
155
156 Running out of File Handles
157 ---------------------------
158
159 If synapse runs out of filehandles, it typically fails badly - live-locking
160 at 100% CPU, and/or failing to accept new TCP connections (blocking the
161 connecting client). Matrix currently can legitimately use a lot of file handles,
162 thanks to busy rooms like #matrix:matrix.org containing hundreds of participating
163 servers. The first time a server talks in a room it will try to connect
164 simultaneously to all participating servers, which could exhaust the available
165 file descriptors between DNS queries & HTTPS sockets, especially if DNS is slow
166 to respond. (We need to improve the routing algorithm used to be better than
167 full mesh, but as of June 2017 this hasn't happened yet).
168
169 If you hit this failure mode, we recommend increasing the maximum number of
170 open file handles to be at least 4096 (assuming a default of 1024 or 256).
171 This is typically done by editing ``/etc/security/limits.conf``
172
173 Separately, Synapse may leak file handles if inbound HTTP requests get stuck
174 during processing - e.g. blocked behind a lock or talking to a remote server etc.
175 This is best diagnosed by matching up the 'Received request' and 'Processed request'
176 log lines and looking for any 'Processed request' lines which take more than
177 a few seconds to execute. Please let us know at #synapse:matrix.org if
178 you see this failure mode so we can help debug it, however.
179
180 Help!! Synapse eats all my RAM!
181 -------------------------------
182
183 Synapse's architecture is quite RAM hungry currently - we deliberately
184 cache a lot of recent room data and metadata in RAM in order to speed up
185 common requests. We'll improve this in future, but for now the easiest
186 way to either reduce the RAM usage (at the risk of slowing things down)
187 is to set the almost-undocumented ``SYNAPSE_CACHE_FACTOR`` environment
188 variable. The default is 0.5, which can be decreased to reduce RAM usage
189 in memory constrained enviroments, or increased if performance starts to
190 degrade.
191
192 Using `libjemalloc <http://jemalloc.net/>`_ can also yield a significant
193 improvement in overall amount, and especially in terms of giving back RAM
194 to the OS. To use it, the library must simply be put in the LD_PRELOAD
195 environment variable when launching Synapse. On Debian, this can be done
196 by installing the ``libjemalloc1`` package and adding this line to
197 ``/etc/default/matrix-synapse``::
198
199 LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.1
200
201 This can make a significant difference on Python 2.7 - it's unclear how
202 much of an improvement it provides on Python 3.x.
203156
204157 Upgrading an existing Synapse
205158 =============================
210163
211164 .. _UPGRADE.rst: UPGRADE.rst
212165
213 .. _federation:
214
215 Setting up Federation
216 =====================
217
218 Federation is the process by which users on different servers can participate
219 in the same room. For this to work, those other servers must be able to contact
220 yours to send messages.
221
222 The ``server_name`` in your ``homeserver.yaml`` file determines the way that
223 other servers will reach yours. By default, they will treat it as a hostname
224 and try to connect to port 8448. This is easy to set up and will work with the
225 default configuration, provided you set the ``server_name`` to match your
226 machine's public DNS hostname, and give Synapse a TLS certificate which is
227 valid for your ``server_name``.
228
229 For a more flexible configuration, you can set up a DNS SRV record. This allows
230 you to run your server on a machine that might not have the same name as your
231 domain name. For example, you might want to run your server at
232 ``synapse.example.com``, but have your Matrix user-ids look like
233 ``@user:example.com``. (A SRV record also allows you to change the port from
234 the default 8448).
235
236 To use a SRV record, first create your SRV record and publish it in DNS. This
237 should have the format ``_matrix._tcp.<yourdomain.com> <ttl> IN SRV 10 0 <port>
238 <synapse.server.name>``. The DNS record should then look something like::
239
240 $ dig -t srv _matrix._tcp.example.com
241 _matrix._tcp.example.com. 3600 IN SRV 10 0 8448 synapse.example.com.
242
243 Note that the server hostname cannot be an alias (CNAME record): it has to point
244 directly to the server hosting the synapse instance.
245
246 You can then configure your homeserver to use ``<yourdomain.com>`` as the domain in
247 its user-ids, by setting ``server_name``::
248
249 python -m synapse.app.homeserver \
250 --server-name <yourdomain.com> \
251 --config-path homeserver.yaml \
252 --generate-config
253 python -m synapse.app.homeserver --config-path homeserver.yaml
254
255 If you've already generated the config file, you need to edit the ``server_name``
256 in your ``homeserver.yaml`` file. If you've already started Synapse and a
257 database has been created, you will have to recreate the database.
258
259 If all goes well, you should be able to `connect to your server with a client`__,
260 and then join a room via federation. (Try ``#matrix-dev:matrix.org`` as a first
261 step. "Matrix HQ"'s sheer size and activity level tends to make even the
262 largest boxes pause for thought.)
263
264 .. __: `Connecting to Synapse from a client`_
265
266 Troubleshooting
267 ---------------
268
269 You can use the `federation tester <https://matrix.org/federationtester>`_ to
270 check if your homeserver is all set.
271
272 The typical failure mode with federation is that when you try to join a room,
273 it is rejected with "401: Unauthorized". Generally this means that other
274 servers in the room couldn't access yours. (Joining a room over federation is a
275 complicated dance which requires connections in both directions).
276
277 So, things to check are:
278
279 * If you are not using a SRV record, check that your ``server_name`` (the part
280 of your user-id after the ``:``) matches your hostname, and that port 8448 on
281 that hostname is reachable from outside your network.
282 * If you *are* using a SRV record, check that it matches your ``server_name``
283 (it should be ``_matrix._tcp.<server_name>``), and that the port and hostname
284 it specifies are reachable from outside your network.
285
286 Another common problem is that people on other servers can't join rooms that
287 you invite them to. This can be caused by an incorrectly-configured reverse
288 proxy: see `<docs/reverse_proxy.rst>`_ for instructions on how to correctly
289 configure a reverse proxy.
290
291 Running a Demo Federation of Synapses
292 -------------------------------------
293
294 If you want to get up and running quickly with a trio of homeservers in a
295 private federation, there is a script in the ``demo`` directory. This is mainly
296 useful just for development purposes. See `<demo/README>`_.
297
298166
299167 Using PostgreSQL
300168 ================
301169
302 As of Synapse 0.9, `PostgreSQL <https://www.postgresql.org>`_ is supported as an
303 alternative to the `SQLite <https://sqlite.org/>`_ database that Synapse has
304 traditionally used for convenience and simplicity.
305
306 The advantages of Postgres include:
170 Synapse offers two database engines:
171 * `SQLite <https://sqlite.org/>`_
172 * `PostgreSQL <https://www.postgresql.org>`_
173
174 By default Synapse uses SQLite in and doing so trades performance for convenience.
175 SQLite is only recommended in Synapse for testing purposes or for servers with
176 light workloads.
177
178 Almost all installations should opt to use PostreSQL. Advantages include:
307179
308180 * significant performance improvements due to the superior threading and
309181 caching model, smarter query optimiser
439311 Building internal API documentation::
440312
441313 python setup.py build_sphinx
314
315 Troubleshooting
316 ===============
317
318 Running out of File Handles
319 ---------------------------
320
321 If synapse runs out of file handles, it typically fails badly - live-locking
322 at 100% CPU, and/or failing to accept new TCP connections (blocking the
323 connecting client). Matrix currently can legitimately use a lot of file handles,
324 thanks to busy rooms like #matrix:matrix.org containing hundreds of participating
325 servers. The first time a server talks in a room it will try to connect
326 simultaneously to all participating servers, which could exhaust the available
327 file descriptors between DNS queries & HTTPS sockets, especially if DNS is slow
328 to respond. (We need to improve the routing algorithm used to be better than
329 full mesh, but as of March 2019 this hasn't happened yet).
330
331 If you hit this failure mode, we recommend increasing the maximum number of
332 open file handles to be at least 4096 (assuming a default of 1024 or 256).
333 This is typically done by editing ``/etc/security/limits.conf``
334
335 Separately, Synapse may leak file handles if inbound HTTP requests get stuck
336 during processing - e.g. blocked behind a lock or talking to a remote server etc.
337 This is best diagnosed by matching up the 'Received request' and 'Processed request'
338 log lines and looking for any 'Processed request' lines which take more than
339 a few seconds to execute. Please let us know at #synapse:matrix.org if
340 you see this failure mode so we can help debug it, however.
341
342 Help!! Synapse eats all my RAM!
343 -------------------------------
344
345 Synapse's architecture is quite RAM hungry currently - we deliberately
346 cache a lot of recent room data and metadata in RAM in order to speed up
347 common requests. We'll improve this in the future, but for now the easiest
348 way to either reduce the RAM usage (at the risk of slowing things down)
349 is to set the almost-undocumented ``SYNAPSE_CACHE_FACTOR`` environment
350 variable. The default is 0.5, which can be decreased to reduce RAM usage
351 in memory constrained enviroments, or increased if performance starts to
352 degrade.
353
354 Using `libjemalloc <http://jemalloc.net/>`_ can also yield a significant
355 improvement in overall amount, and especially in terms of giving back RAM
356 to the OS. To use it, the library must simply be put in the LD_PRELOAD
357 environment variable when launching Synapse. On Debian, this can be done
358 by installing the ``libjemalloc1`` package and adding this line to
359 ``/etc/default/matrix-synapse``::
360
361 LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.1
362
363 This can make a significant difference on Python 2.7 - it's unclear how
364 much of an improvement it provides on Python 3.x.
1818 # example output to console
1919 console:
2020 class: logging.StreamHandler
21 formatter: fmt
2122 filters: [context]
2223
2324 # example output to file - to enable, edit 'root' config below.
2829 maxBytes: 100000000
2930 backupCount: 3
3031 filters: [context]
31
32 encoding: utf8
3233
3334 root:
3435 level: INFO
0 # Setup Synapse with Workers and Systemd
1
2 This is a setup for managing synapse with systemd including support for
3 managing workers. It provides a `matrix-synapse`, as well as a
4 `matrix-synapse-worker@` service for any workers you require. Additionally to
5 group the required services it sets up a `matrix.target`. You can use this to
6 automatically start any bot- or bridge-services. More on this in
7 [Bots and Bridges](#bots-and-bridges).
8
9 See the folder [system](system) for any service and target files.
10
11 The folder [workers](workers) contains an example configuration for the
12 `federation_reader` worker. Pay special attention to the name of the
13 configuration file. In order to work with the `matrix-synapse-worker@.service`
14 service, it needs to have the exact same name as the worker app.
15
16 This setup expects neither the homeserver nor any workers to fork. Forking is
17 handled by systemd.
18
19 ## Setup
20
21 1. Adjust your matrix configs. Make sure that the worker config files have the
22 exact same name as the worker app. Compare `matrix-synapse-worker@.service` for
23 why. You can find an example worker config in the [workers](workers) folder. See
24 below for relevant settings in the `homeserver.yaml`.
25 2. Copy the `*.service` and `*.target` files in [system](system) to
26 `/etc/systemd/system`.
27 3. `systemctl enable matrix-synapse.service` this adds the homeserver
28 app to the `matrix.target`
29 4. *Optional.* `systemctl enable
30 matrix-synapse-worker@federation_reader.service` this adds the federation_reader
31 app to the `matrix-synapse.service`
32 5. *Optional.* Repeat step 4 for any additional workers you require.
33 6. *Optional.* Add any bots or bridges by enabling them.
34 7. Start all matrix related services via `systemctl start matrix.target`
35 8. *Optional.* Enable autostart of all matrix related services on system boot
36 via `systemctl enable matrix.target`
37
38 ## Usage
39
40 After you have setup you can use the following commands to manage your synapse
41 installation:
42
43 ```
44 # Start matrix-synapse, all workers and any enabled bots or bridges.
45 systemctl start matrix.target
46
47 # Restart matrix-synapse and all workers (not necessarily restarting bots
48 # or bridges, see "Bots and Bridges")
49 systemctl restart matrix-synapse.service
50
51 # Stop matrix-synapse and all workers (not necessarily restarting bots
52 # or bridges, see "Bots and Bridges")
53 systemctl stop matrix-synapse.service
54
55 # Restart a specific worker (i. e. federation_reader), the homeserver is
56 # unaffected by this.
57 systemctl restart matrix-synapse-worker@federation_reader.service
58
59 # Add a new worker (assuming all configs are setup already)
60 systemctl enable matrix-synapse-worker@federation_writer.service
61 systemctl restart matrix-synapse.service
62 ```
63
64 ## The Configs
65
66 Make sure the `worker_app` is set in the `homeserver.yaml` and it does not fork.
67
68 ```
69 worker_app: synapse.app.homeserver
70 daemonize: false
71 ```
72
73 None of the workers should fork, as forking is handled by systemd. Hence make
74 sure this is present in all worker config files.
75
76 ```
77 worker_daemonize: false
78 ```
79
80 The config files of all workers are expected to be located in
81 `/etc/matrix-synapse/workers`. If you want to use a different location you have
82 to edit the provided `*.service` files accordingly.
83
84 ## Bots and Bridges
85
86 Most bots and bridges do not care if the homeserver goes down or is restarted.
87 Depending on the implementation this may crash them though. So look up the docs
88 or ask the community of the specific bridge or bot you want to run to make sure
89 you choose the correct setup.
90
91 Whichever configuration you choose, after the setup the following will enable
92 automatically starting (and potentially restarting) your bot/bridge with the
93 `matrix.target`.
94
95 ```
96 systemctl enable <yourBotOrBridgeName>.service
97 ```
98
99 **Note** that from an inactive synapse the bots/bridges will only be started with
100 synapse if you start the `matrix.target`, not if you start the
101 `matrix-synapse.service`. This is on purpose. Think of `matrix-synapse.service`
102 as *just* synapse, but `matrix.target` being anything matrix related, including
103 synapse and any and all enabled bots and bridges.
104
105 ### Start with synapse but ignore synapse going down
106
107 If the bridge can handle shutdowns of the homeserver you'll want to install the
108 service in the `matrix.target` and optionally add a
109 `After=matrix-synapse.service` dependency to have the bot/bridge start after
110 synapse on starting everything.
111
112 In this case the service file should look like this.
113
114 ```
115 [Unit]
116 # ...
117 # Optional, this will only ensure that if you start everything, synapse will
118 # be started before the bot/bridge will be started.
119 After=matrix-synapse.service
120
121 [Service]
122 # ...
123
124 [Install]
125 WantedBy=matrix.target
126 ```
127
128 ### Stop/restart when synapse stops/restarts
129
130 If the bridge can't handle shutdowns of the homeserver you'll still want to
131 install the service in the `matrix.target` but also have to specify the
132 `After=matrix-synapse.service` *and* `BindsTo=matrix-synapse.service`
133 dependencies to have the bot/bridge stop/restart with synapse.
134
135 In this case the service file should look like this.
136
137 ```
138 [Unit]
139 # ...
140 # Mandatory
141 After=matrix-synapse.service
142 BindsTo=matrix-synapse.service
143
144 [Service]
145 # ...
146
147 [Install]
148 WantedBy=matrix.target
149 ```
0 [Unit]
1 Description=Synapse Matrix Worker
2 After=matrix-synapse.service
3 BindsTo=matrix-synapse.service
4
5 [Service]
6 Type=simple
7 User=matrix-synapse
8 WorkingDirectory=/var/lib/matrix-synapse
9 EnvironmentFile=/etc/default/matrix-synapse
10 ExecStart=/opt/venvs/matrix-synapse/bin/python -m synapse.app.%i --config-path=/etc/matrix-synapse/homeserver.yaml --config-path=/etc/matrix-synapse/conf.d/ --config-path=/etc/matrix-synapse/workers/%i.yaml
11 ExecReload=/bin/kill -HUP $MAINPID
12 Restart=always
13 RestartSec=3
14
15 [Install]
16 WantedBy=matrix-synapse.service
0 [Unit]
1 Description=Synapse Matrix Homeserver
2
3 [Service]
4 Type=simple
5 User=matrix-synapse
6 WorkingDirectory=/var/lib/matrix-synapse
7 EnvironmentFile=/etc/default/matrix-synapse
8 ExecStartPre=/opt/venvs/matrix-synapse/bin/python -m synapse.app.homeserver --config-path=/etc/matrix-synapse/homeserver.yaml --config-path=/etc/matrix-synapse/conf.d/ --generate-keys
9 ExecStart=/opt/venvs/matrix-synapse/bin/python -m synapse.app.homeserver --config-path=/etc/matrix-synapse/homeserver.yaml --config-path=/etc/matrix-synapse/conf.d/
10 ExecReload=/bin/kill -HUP $MAINPID
11 Restart=always
12 RestartSec=3
13
14 [Install]
15 WantedBy=matrix.target
0 [Unit]
1 Description=Contains matrix services like synapse, bridges and bots
2 After=network.target
3 AllowIsolate=no
4
5 [Install]
6 WantedBy=multi-user.target
0 worker_app: synapse.app.federation_reader
1
2 worker_replication_host: 127.0.0.1
3 worker_replication_port: 9092
4 worker_replication_http_port: 9093
5
6 worker_listeners:
7 - type: http
8 port: 8011
9 resources:
10 - names: [federation]
11
12 worker_daemonize: false
13 worker_log_config: /etc/matrix-synapse/federation-reader-log.yaml
0 matrix-synapse-py3 (0.99.3.2) stable; urgency=medium
1
2 * New synapse release 0.99.3.2.
3
4 -- Synapse Packaging team <packages@matrix.org> Fri, 03 May 2019 18:56:20 +0100
5
6 matrix-synapse-py3 (0.99.3.1) stable; urgency=medium
7
8 * New synapse release 0.99.3.1.
9
10 -- Synapse Packaging team <packages@matrix.org> Fri, 03 May 2019 16:02:43 +0100
11
12 matrix-synapse-py3 (0.99.3) stable; urgency=medium
13
14 [ Richard van der Hoff ]
15 * Fix warning during preconfiguration. (Fixes: #4819)
16
17 [ Synapse Packaging team ]
18 * New synapse release 0.99.3.
19
20 -- Synapse Packaging team <packages@matrix.org> Mon, 01 Apr 2019 12:48:21 +0000
21
022 matrix-synapse-py3 (0.99.2) stable; urgency=medium
123
224 * Fix overwriting of config settings on upgrade.
44 . /usr/share/debconf/confmodule
55
66 # try to update the debconf db according to whatever is in the config files
7 /opt/venvs/matrix-synapse/lib/manage_debconf.pl read || true
7 #
8 # note that we may get run during preconfiguration, in which case the script
9 # will not yet be installed.
10 [ -x /opt/venvs/matrix-synapse/lib/manage_debconf.pl ] && \
11 /opt/venvs/matrix-synapse/lib/manage_debconf.pl read
812
913 db_input high matrix-synapse/server-name || true
1014 db_input high matrix-synapse/report-stats || true
5454 python3-pip \
5555 python3-setuptools \
5656 python3-venv \
57 sqlite3
57 sqlite3 \
58 libpq-dev
5859
5960 COPY --from=builder /dh-virtualenv_1.1-1_all.deb /
6061
2727 docker run \
2828 -d \
2929 --name synapse \
30 -v ${DATA_PATH}:/data \
30 --mount type=volume,src=synapse-data,dst=/data \
3131 -e SYNAPSE_SERVER_NAME=my.matrix.host \
3232 -e SYNAPSE_REPORT_STATS=yes \
3333 matrixdotorg/synapse:latest
8686 * ``SYNAPSE_CONFIG_PATH``, path to a custom config file
8787
8888 If ``SYNAPSE_CONFIG_PATH`` is set, you should generate a configuration file
89 then customize it manually. No other environment variable is required.
89 then customize it manually: see [Generating a config
90 file](#generating-a-config-file).
9091
91 Otherwise, a dynamic configuration file will be used. The following environment
92 variables are available for configuration:
92 Otherwise, a dynamic configuration file will be used.
93
94 ### Environment variables used to build a dynamic configuration file
95
96 The following environment variables are used to build the configuration file
97 when ``SYNAPSE_CONFIG_PATH`` is not set.
9398
9499 * ``SYNAPSE_SERVER_NAME`` (mandatory), the server public hostname.
95100 * ``SYNAPSE_REPORT_STATS``, (mandatory, ``yes`` or ``no``), enable anonymous
142147 any.
143148 * ``SYNAPSE_SMTP_PASSWORD``, password for authenticating against the mail
144149 server if any.
150
151 ### Generating a config file
152
153 It is possible to generate a basic configuration file for use with
154 `SYNAPSE_CONFIG_PATH` using the `generate` commandline option. You will need to
155 specify values for `SYNAPSE_CONFIG_PATH`, `SYNAPSE_SERVER_NAME` and
156 `SYNAPSE_REPORT_STATS`, and mount a docker volume to store the data on. For
157 example:
158
159 ```
160 docker run -it --rm
161 --mount type=volume,src=synapse-data,dst=/data \
162 -e SYNAPSE_CONFIG_PATH=/data/homeserver.yaml \
163 -e SYNAPSE_SERVER_NAME=my.matrix.host \
164 -e SYNAPSE_REPORT_STATS=yes \
165 matrixdotorg/synapse:latest generate
166 ```
167
168 This will generate a `homeserver.yaml` in (typically)
169 `/var/lib/docker/volumes/synapse-data/_data`, which you can then customise and
170 use with:
171
172 ```
173 docker run -d --name synapse \
174 --mount type=volume,src=synapse-data,dst=/data \
175 -e SYNAPSE_CONFIG_PATH=/data/homeserver.yaml \
176 matrixdotorg/synapse:latest
177 ```
0 # The config is maintained as an up-to-date snapshot of the default
1 # homeserver.yaml configuration generated by Synapse.
2 #
3 # It is intended to act as a reference for the default configuration,
4 # helping admins keep track of new options and other changes, and compare
5 # their configs with the current default. As such, many of the actual
6 # config values shown are placeholders.
7 #
8 # It is *not* intended to be copied and used as the basis for a real
9 # homeserver.yaml. Instead, if you are starting from scratch, please generate
10 # a fresh config using Synapse by following the instructions in INSTALL.md.
11
6666
6767 ```
6868 location /.well-known/acme-challenge {
69 proxy_pass http://localhost:8009/;
69 proxy_pass http://localhost:8009;
7070 }
7171 ```
7272
0 Version API
1 ===========
2
3 This API returns the running Synapse version and the Python version
4 on which Synapse is being run. This is useful when a Synapse instance
5 is behind a proxy that does not forward the 'Server' header (which also
6 contains Synapse version information).
7
8 The api is::
9
10 GET /_matrix/client/r0/admin/server_version
11
12 including an ``access_token`` of a server admin.
13
14 It returns a JSON body like the following:
15
16 .. code:: json
17
18 {
19 "server_version": "0.99.2rc1 (b=develop, abcdef123)",
20 "python_version": "3.6.8"
21 }
0 Setting up Federation
1 =====================
2
3 Federation is the process by which users on different servers can participate
4 in the same room. For this to work, those other servers must be able to contact
5 yours to send messages.
6
7 The ``server_name`` configured in the Synapse configuration file (often
8 ``homeserver.yaml``) defines how resources (users, rooms, etc.) will be
9 identified (eg: ``@user:example.com``, ``#room:example.com``). By
10 default, it is also the domain that other servers will use to
11 try to reach your server (via port 8448). This is easy to set
12 up and will work provided you set the ``server_name`` to match your
13 machine's public DNS hostname, and provide Synapse with a TLS certificate
14 which is valid for your ``server_name``.
15
16 Once you have completed the steps necessary to federate, you should be able to
17 join a room via federation. (A good place to start is ``#synapse:matrix.org`` - a
18 room for Synapse admins.)
19
20
21 ## Delegation
22
23 For a more flexible configuration, you can have ``server_name``
24 resources (eg: ``@user:example.com``) served by a different host and
25 port (eg: ``synapse.example.com:443``). There are two ways to do this:
26
27 - adding a ``/.well-known/matrix/server`` URL served on ``https://example.com``.
28 - adding a DNS ``SRV`` record in the DNS zone of domain
29 ``example.com``.
30
31 Without configuring delegation, the matrix federation will
32 expect to find your server via ``example.com:8448``. The following methods
33 allow you retain a `server_name` of `example.com` so that your user IDs, room
34 aliases, etc continue to look like `*:example.com`, whilst having your
35 federation traffic routed to a different server.
36
37 ### .well-known delegation
38
39 To use this method, you need to be able to alter the
40 ``server_name`` 's https server to serve the ``/.well-known/matrix/server``
41 URL. Having an active server (with a valid TLS certificate) serving your
42 ``server_name`` domain is out of the scope of this documentation.
43
44 The URL ``https://<server_name>/.well-known/matrix/server`` should
45 return a JSON structure containing the key ``m.server`` like so:
46
47 {
48 "m.server": "<synapse.server.name>[:<yourport>]"
49 }
50
51 In our example, this would mean that URL ``https://example.com/.well-known/matrix/server``
52 should return:
53
54 {
55 "m.server": "synapse.example.com:443"
56 }
57
58 Note, specifying a port is optional. If a port is not specified an SRV lookup
59 is performed, as described below. If the target of the
60 delegation does not have an SRV record, then the port defaults to 8448.
61
62 Most installations will not need to configure .well-known. However, it can be
63 useful in cases where the admin is hosting on behalf of someone else and
64 therefore cannot gain access to the necessary certificate. With .well-known,
65 federation servers will check for a valid TLS certificate for the delegated
66 hostname (in our example: ``synapse.example.com``).
67
68 .well-known support first appeared in Synapse v0.99.0. To federate with older
69 servers you may need to additionally configure SRV delegation. Alternatively,
70 encourage the server admin in question to upgrade :).
71
72 ### DNS SRV delegation
73
74 To use this delegation method, you need to have write access to your
75 ``server_name`` 's domain zone DNS records (in our example it would be
76 ``example.com`` DNS zone).
77
78 This method requires the target server to provide a
79 valid TLS certificate for the original ``server_name``.
80
81 You need to add a SRV record in your ``server_name`` 's DNS zone with
82 this format:
83
84 _matrix._tcp.<yourdomain.com> <ttl> IN SRV <priority> <weight> <port> <synapse.server.name>
85
86 In our example, we would need to add this SRV record in the
87 ``example.com`` DNS zone:
88
89 _matrix._tcp.example.com. 3600 IN SRV 10 5 443 synapse.example.com.
90
91 Once done and set up, you can check the DNS record with ``dig -t srv
92 _matrix._tcp.<server_name>``. In our example, we would expect this:
93
94 $ dig -t srv _matrix._tcp.example.com
95 _matrix._tcp.example.com. 3600 IN SRV 10 0 443 synapse.example.com.
96
97 Note that the target of a SRV record cannot be an alias (CNAME record): it has to point
98 directly to the server hosting the synapse instance.
99
100 ## Troubleshooting
101
102 You can use the [federation tester](
103 <https://matrix.org/federationtester>) to check if your homeserver is
104 configured correctly. Alternatively try the [JSON API used by the federation tester](https://matrix.org/federationtester/api/report?server_name=DOMAIN).
105 Note that you'll have to modify this URL to replace ``DOMAIN`` with your
106 ``server_name``. Hitting the API directly provides extra detail.
107
108 The typical failure mode for federation is that when the server tries to join
109 a room, it is rejected with "401: Unauthorized". Generally this means that other
110 servers in the room could not access yours. (Joining a room over federation is
111 a complicated dance which requires connections in both directions).
112
113 Another common problem is that people on other servers can't join rooms that
114 you invite them to. This can be caused by an incorrectly-configured reverse
115 proxy: see [reverse_proxy.rst](<reverse_proxy.rst>) for instructions on how to correctly
116 configure a reverse proxy.
117
118 ## Running a Demo Federation of Synapses
119
120 If you want to get up and running quickly with a trio of homeservers in a
121 private federation, there is a script in the ``demo`` directory. This is mainly
122 useful just for development purposes. See [demo/README](<../demo/README>).
7474 result from the ``/login`` call (including ``access_token``, ``device_id``,
7575 etc.)
7676
77 ``someprovider.check_3pid_auth``\(*medium*, *address*, *password*)
78
79 This method, if implemented, is called when a user attempts to register or
80 log in with a third party identifier, such as email. It is passed the
81 medium (ex. "email"), an address (ex. "jdoe@example.com") and the user's
82 password.
83
84 The method should return a Twisted ``Deferred`` object, which resolves to
85 a ``str`` containing the user's (canonical) User ID if authentication was
86 successful, and ``None`` if not.
87
88 As with ``check_auth``, the ``Deferred`` may alternatively resolve to a
89 ``(user_id, callback)`` tuple.
90
7791 ``someprovider.check_password``\(*user_id*, *password*)
7892
7993 This method provides a simpler interface than ``get_supported_login_types``
4747 sudo yum install postgresql-devel libpqxx-devel.x86_64
4848 export PATH=/usr/pgsql-9.4/bin/:$PATH
4949 pip install psycopg2
50
51 Tuning Postgres
52 ===============
53
54 The default settings should be fine for most deployments. For larger scale
55 deployments tuning some of the settings is recommended, details of which can be
56 found at https://wiki.postgresql.org/wiki/Tuning_Your_PostgreSQL_Server.
57
58 In particular, we've found tuning the following values helpful for performance:
59
60 - ``shared_buffers``
61 - ``effective_cache_size``
62 - ``work_mem``
63 - ``maintenance_work_mem``
64 - ``autovacuum_work_mem``
65
66 Note that the appropriate values for those fields depend on the amount of free
67 memory the database host has available.
5068
5169 Synapse config
5270 ==============
128146 database configuration file ``homeserver-postgres.yaml``::
129147
130148 ./synctl stop
131 mv homeserver.yaml homeserver-old-sqlite.yaml
132 mv homeserver-postgres.yaml homeserver.yaml
149 mv homeserver.yaml homeserver-old-sqlite.yaml
150 mv homeserver-postgres.yaml homeserver.yaml
133151 ./synctl start
134152
135153 Synapse should now be running against PostgreSQL.
1717 name or port. Indeed, clients will use port 443 by default, whereas servers
1818 default to port 8448. Where these are different, we refer to the 'client port'
1919 and the 'federation port'. See `Setting up federation
20 <../README.rst#setting-up-federation>`_ for more details of the algorithm used for
20 <federate.md>`_ for more details of the algorithm used for
2121 federation connections.
2222
2323 Let's assume that we expect clients to connect to our server at
6868 SSLEngine on
6969 ServerName matrix.example.com;
7070
71 <Location /_matrix>
72 ProxyPass http://127.0.0.1:8008/_matrix nocanon
73 ProxyPassReverse http://127.0.0.1:8008/_matrix
74 </Location>
71 ProxyPass /_matrix http://127.0.0.1:8008/_matrix nocanon
72 ProxyPassReverse /_matrix http://127.0.0.1:8008/_matrix
7573 </VirtualHost>
7674
7775 <VirtualHost *:8448>
7876 SSLEngine on
7977 ServerName example.com;
80
81 <Location /_matrix>
82 ProxyPass http://127.0.0.1:8008/_matrix nocanon
83 ProxyPassReverse http://127.0.0.1:8008/_matrix
84 </Location>
78
79 ProxyPass /_matrix http://127.0.0.1:8008/_matrix nocanon
80 ProxyPassReverse /_matrix http://127.0.0.1:8008/_matrix
8581 </VirtualHost>
8682
8783 * HAProxy::
8884
8985 frontend https
90 bind 0.0.0.0:443 v4v6 ssl crt /etc/ssl/haproxy/ strict-sni alpn h2,http/1.1
91 bind :::443 ssl crt /etc/ssl/haproxy/ strict-sni alpn h2,http/1.1
92
86 bind :::443 v4v6 ssl crt /etc/ssl/haproxy/ strict-sni alpn h2,http/1.1
87
9388 # Matrix client traffic
9489 acl matrix hdr(host) -i matrix.example.com
9590 use_backend matrix if matrix
96
91
9792 frontend matrix-federation
98 bind 0.0.0.0:8448 v4v6 ssl crt /etc/ssl/haproxy/synapse.pem alpn h2,http/1.1
99 bind :::8448 ssl crt /etc/ssl/haproxy/synapse.pem alpn h2,http/1.1
93 bind :::8448 v4v6 ssl crt /etc/ssl/haproxy/synapse.pem alpn h2,http/1.1
10094 default_backend matrix
101
95
10296 backend matrix
10397 server matrix 127.0.0.1:8008
10498
0 # The config is maintained as an up-to-date snapshot of the default
1 # homeserver.yaml configuration generated by Synapse.
2 #
3 # It is intended to act as a reference for the default configuration,
4 # helping admins keep track of new options and other changes, and compare
5 # their configs with the current default. As such, many of the actual
6 # config values shown are placeholders.
7 #
8 # It is *not* intended to be copied and used as the basis for a real
9 # homeserver.yaml. Instead, if you are starting from scratch, please generate
10 # a fresh config using Synapse by following the instructions in INSTALL.md.
11
12 ## Server ##
13
14 # The domain name of the server, with optional explicit port.
15 # This is used by remote servers to connect to this server,
16 # e.g. matrix.org, localhost:8080, etc.
17 # This is also the last part of your UserID.
18 #
19 server_name: "SERVERNAME"
20
21 # When running as a daemon, the file to store the pid in
22 #
23 pid_file: DATADIR/homeserver.pid
24
25 # CPU affinity mask. Setting this restricts the CPUs on which the
26 # process will be scheduled. It is represented as a bitmask, with the
27 # lowest order bit corresponding to the first logical CPU and the
28 # highest order bit corresponding to the last logical CPU. Not all CPUs
29 # may exist on a given system but a mask may specify more CPUs than are
30 # present.
31 #
32 # For example:
33 # 0x00000001 is processor #0,
34 # 0x00000003 is processors #0 and #1,
35 # 0xFFFFFFFF is all processors (#0 through #31).
36 #
37 # Pinning a Python process to a single CPU is desirable, because Python
38 # is inherently single-threaded due to the GIL, and can suffer a
39 # 30-40% slowdown due to cache blow-out and thread context switching
40 # if the scheduler happens to schedule the underlying threads across
41 # different cores. See
42 # https://www.mirantis.com/blog/improve-performance-python-programs-restricting-single-cpu/.
43 #
44 # This setting requires the affinity package to be installed!
45 #
46 #cpu_affinity: 0xFFFFFFFF
47
48 # The path to the web client which will be served at /_matrix/client/
49 # if 'webclient' is configured under the 'listeners' configuration.
50 #
51 #web_client_location: "/path/to/web/root"
52
53 # The public-facing base URL that clients use to access this HS
54 # (not including _matrix/...). This is the same URL a user would
55 # enter into the 'custom HS URL' field on their client. If you
56 # use synapse with a reverse proxy, this should be the URL to reach
57 # synapse via the proxy.
58 #
59 #public_baseurl: https://example.com/
60
61 # Set the soft limit on the number of file descriptors synapse can use
62 # Zero is used to indicate synapse should set the soft limit to the
63 # hard limit.
64 #
65 #soft_file_limit: 0
66
67 # Set to false to disable presence tracking on this homeserver.
68 #
69 #use_presence: false
70
71 # The GC threshold parameters to pass to `gc.set_threshold`, if defined
72 #
73 #gc_thresholds: [700, 10, 10]
74
75 # Set the limit on the returned events in the timeline in the get
76 # and sync operations. The default value is -1, means no upper limit.
77 #
78 #filter_timeline_limit: 5000
79
80 # Whether room invites to users on this server should be blocked
81 # (except those sent by local server admins). The default is False.
82 #
83 #block_non_admin_invites: True
84
85 # Room searching
86 #
87 # If disabled, new messages will not be indexed for searching and users
88 # will receive errors when searching for messages. Defaults to enabled.
89 #
90 #enable_search: false
91
92 # Restrict federation to the following whitelist of domains.
93 # N.B. we recommend also firewalling your federation listener to limit
94 # inbound federation traffic as early as possible, rather than relying
95 # purely on this application-layer restriction. If not specified, the
96 # default is to whitelist everything.
97 #
98 #federation_domain_whitelist:
99 # - lon.example.com
100 # - nyc.example.com
101 # - syd.example.com
102
103 # List of ports that Synapse should listen on, their purpose and their
104 # configuration.
105 #
106 # Options for each listener include:
107 #
108 # port: the TCP port to bind to
109 #
110 # bind_addresses: a list of local addresses to listen on. The default is
111 # 'all local interfaces'.
112 #
113 # type: the type of listener. Normally 'http', but other valid options are:
114 # 'manhole' (see docs/manhole.md),
115 # 'metrics' (see docs/metrics-howto.rst),
116 # 'replication' (see docs/workers.rst).
117 #
118 # tls: set to true to enable TLS for this listener. Will use the TLS
119 # key/cert specified in tls_private_key_path / tls_certificate_path.
120 #
121 # x_forwarded: Only valid for an 'http' listener. Set to true to use the
122 # X-Forwarded-For header as the client IP. Useful when Synapse is
123 # behind a reverse-proxy.
124 #
125 # resources: Only valid for an 'http' listener. A list of resources to host
126 # on this port. Options for each resource are:
127 #
128 # names: a list of names of HTTP resources. See below for a list of
129 # valid resource names.
130 #
131 # compress: set to true to enable HTTP comression for this resource.
132 #
133 # additional_resources: Only valid for an 'http' listener. A map of
134 # additional endpoints which should be loaded via dynamic modules.
135 #
136 # Valid resource names are:
137 #
138 # client: the client-server API (/_matrix/client). Also implies 'media' and
139 # 'static'.
140 #
141 # consent: user consent forms (/_matrix/consent). See
142 # docs/consent_tracking.md.
143 #
144 # federation: the server-server API (/_matrix/federation). Also implies
145 # 'media', 'keys', 'openid'
146 #
147 # keys: the key discovery API (/_matrix/keys).
148 #
149 # media: the media API (/_matrix/media).
150 #
151 # metrics: the metrics interface. See docs/metrics-howto.rst.
152 #
153 # openid: OpenID authentication.
154 #
155 # replication: the HTTP replication API (/_synapse/replication). See
156 # docs/workers.rst.
157 #
158 # static: static resources under synapse/static (/_matrix/static). (Mostly
159 # useful for 'fallback authentication'.)
160 #
161 # webclient: A web client. Requires web_client_location to be set.
162 #
163 listeners:
164 # TLS-enabled listener: for when matrix traffic is sent directly to synapse.
165 #
166 # Disabled by default. To enable it, uncomment the following. (Note that you
167 # will also need to give Synapse a TLS key and certificate: see the TLS section
168 # below.)
169 #
170 #- port: 8448
171 # type: http
172 # tls: true
173 # resources:
174 # - names: [client, federation]
175
176 # Unsecure HTTP listener: for when matrix traffic passes through a reverse proxy
177 # that unwraps TLS.
178 #
179 # If you plan to use a reverse proxy, please see
180 # https://github.com/matrix-org/synapse/blob/master/docs/reverse_proxy.rst.
181 #
182 - port: 8008
183 tls: false
184 bind_addresses: ['::1', '127.0.0.1']
185 type: http
186 x_forwarded: true
187
188 resources:
189 - names: [client, federation]
190 compress: false
191
192 # example additonal_resources:
193 #
194 #additional_resources:
195 # "/_matrix/my/custom/endpoint":
196 # module: my_module.CustomRequestHandler
197 # config: {}
198
199 # Turn on the twisted ssh manhole service on localhost on the given
200 # port.
201 #
202 #- port: 9000
203 # bind_addresses: ['::1', '127.0.0.1']
204 # type: manhole
205
206
207 ## Homeserver blocking ##
208
209 # How to reach the server admin, used in ResourceLimitError
210 #
211 #admin_contact: 'mailto:admin@server.com'
212
213 # Global blocking
214 #
215 #hs_disabled: False
216 #hs_disabled_message: 'Human readable reason for why the HS is blocked'
217 #hs_disabled_limit_type: 'error code(str), to help clients decode reason'
218
219 # Monthly Active User Blocking
220 #
221 #limit_usage_by_mau: False
222 #max_mau_value: 50
223 #mau_trial_days: 2
224
225 # If enabled, the metrics for the number of monthly active users will
226 # be populated, however no one will be limited. If limit_usage_by_mau
227 # is true, this is implied to be true.
228 #
229 #mau_stats_only: False
230
231 # Sometimes the server admin will want to ensure certain accounts are
232 # never blocked by mau checking. These accounts are specified here.
233 #
234 #mau_limit_reserved_threepids:
235 # - medium: 'email'
236 # address: 'reserved_user@example.com'
237
238
239 ## TLS ##
240
241 # PEM-encoded X509 certificate for TLS.
242 # This certificate, as of Synapse 1.0, will need to be a valid and verifiable
243 # certificate, signed by a recognised Certificate Authority.
244 #
245 # See 'ACME support' below to enable auto-provisioning this certificate via
246 # Let's Encrypt.
247 #
248 # If supplying your own, be sure to use a `.pem` file that includes the
249 # full certificate chain including any intermediate certificates (for
250 # instance, if using certbot, use `fullchain.pem` as your certificate,
251 # not `cert.pem`).
252 #
253 #tls_certificate_path: "CONFDIR/SERVERNAME.tls.crt"
254
255 # PEM-encoded private key for TLS
256 #
257 #tls_private_key_path: "CONFDIR/SERVERNAME.tls.key"
258
259 # ACME support: This will configure Synapse to request a valid TLS certificate
260 # for your configured `server_name` via Let's Encrypt.
261 #
262 # Note that provisioning a certificate in this way requires port 80 to be
263 # routed to Synapse so that it can complete the http-01 ACME challenge.
264 # By default, if you enable ACME support, Synapse will attempt to listen on
265 # port 80 for incoming http-01 challenges - however, this will likely fail
266 # with 'Permission denied' or a similar error.
267 #
268 # There are a couple of potential solutions to this:
269 #
270 # * If you already have an Apache, Nginx, or similar listening on port 80,
271 # you can configure Synapse to use an alternate port, and have your web
272 # server forward the requests. For example, assuming you set 'port: 8009'
273 # below, on Apache, you would write:
274 #
275 # ProxyPass /.well-known/acme-challenge http://localhost:8009/.well-known/acme-challenge
276 #
277 # * Alternatively, you can use something like `authbind` to give Synapse
278 # permission to listen on port 80.
279 #
280 acme:
281 # ACME support is disabled by default. Uncomment the following line
282 # (and tls_certificate_path and tls_private_key_path above) to enable it.
283 #
284 #enabled: true
285
286 # Endpoint to use to request certificates. If you only want to test,
287 # use Let's Encrypt's staging url:
288 # https://acme-staging.api.letsencrypt.org/directory
289 #
290 #url: https://acme-v01.api.letsencrypt.org/directory
291
292 # Port number to listen on for the HTTP-01 challenge. Change this if
293 # you are forwarding connections through Apache/Nginx/etc.
294 #
295 #port: 80
296
297 # Local addresses to listen on for incoming connections.
298 # Again, you may want to change this if you are forwarding connections
299 # through Apache/Nginx/etc.
300 #
301 #bind_addresses: ['::', '0.0.0.0']
302
303 # How many days remaining on a certificate before it is renewed.
304 #
305 #reprovision_threshold: 30
306
307 # The domain that the certificate should be for. Normally this
308 # should be the same as your Matrix domain (i.e., 'server_name'), but,
309 # by putting a file at 'https://<server_name>/.well-known/matrix/server',
310 # you can delegate incoming traffic to another server. If you do that,
311 # you should give the target of the delegation here.
312 #
313 # For example: if your 'server_name' is 'example.com', but
314 # 'https://example.com/.well-known/matrix/server' delegates to
315 # 'matrix.example.com', you should put 'matrix.example.com' here.
316 #
317 # If not set, defaults to your 'server_name'.
318 #
319 #domain: matrix.example.com
320
321 # List of allowed TLS fingerprints for this server to publish along
322 # with the signing keys for this server. Other matrix servers that
323 # make HTTPS requests to this server will check that the TLS
324 # certificates returned by this server match one of the fingerprints.
325 #
326 # Synapse automatically adds the fingerprint of its own certificate
327 # to the list. So if federation traffic is handled directly by synapse
328 # then no modification to the list is required.
329 #
330 # If synapse is run behind a load balancer that handles the TLS then it
331 # will be necessary to add the fingerprints of the certificates used by
332 # the loadbalancers to this list if they are different to the one
333 # synapse is using.
334 #
335 # Homeservers are permitted to cache the list of TLS fingerprints
336 # returned in the key responses up to the "valid_until_ts" returned in
337 # key. It may be necessary to publish the fingerprints of a new
338 # certificate and wait until the "valid_until_ts" of the previous key
339 # responses have passed before deploying it.
340 #
341 # You can calculate a fingerprint from a given TLS listener via:
342 # openssl s_client -connect $host:$port < /dev/null 2> /dev/null |
343 # openssl x509 -outform DER | openssl sha256 -binary | base64 | tr -d '='
344 # or by checking matrix.org/federationtester/api/report?server_name=$host
345 #
346 #tls_fingerprints: [{"sha256": "<base64_encoded_sha256_fingerprint>"}]
347
348
349
350 ## Database ##
351
352 database:
353 # The database engine name
354 name: "sqlite3"
355 # Arguments to pass to the engine
356 args:
357 # Path to the database
358 database: "DATADIR/homeserver.db"
359
360 # Number of events to cache in memory.
361 #
362 #event_cache_size: 10K
363
364
365 ## Logging ##
366
367 # A yaml python logging config file
368 #
369 log_config: "CONFDIR/SERVERNAME.log.config"
370
371
372 ## Ratelimiting ##
373
374 # Number of messages a client can send per second
375 #
376 #rc_messages_per_second: 0.2
377
378 # Number of message a client can send before being throttled
379 #
380 #rc_message_burst_count: 10.0
381
382 # Ratelimiting settings for registration and login.
383 #
384 # Each ratelimiting configuration is made of two parameters:
385 # - per_second: number of requests a client can send per second.
386 # - burst_count: number of requests a client can send before being throttled.
387 #
388 # Synapse currently uses the following configurations:
389 # - one for registration that ratelimits registration requests based on the
390 # client's IP address.
391 # - one for login that ratelimits login requests based on the client's IP
392 # address.
393 # - one for login that ratelimits login requests based on the account the
394 # client is attempting to log into.
395 # - one for login that ratelimits login requests based on the account the
396 # client is attempting to log into, based on the amount of failed login
397 # attempts for this account.
398 #
399 # The defaults are as shown below.
400 #
401 #rc_registration:
402 # per_second: 0.17
403 # burst_count: 3
404 #
405 #rc_login:
406 # address:
407 # per_second: 0.17
408 # burst_count: 3
409 # account:
410 # per_second: 0.17
411 # burst_count: 3
412 # failed_attempts:
413 # per_second: 0.17
414 # burst_count: 3
415
416 # The federation window size in milliseconds
417 #
418 #federation_rc_window_size: 1000
419
420 # The number of federation requests from a single server in a window
421 # before the server will delay processing the request.
422 #
423 #federation_rc_sleep_limit: 10
424
425 # The duration in milliseconds to delay processing events from
426 # remote servers by if they go over the sleep limit.
427 #
428 #federation_rc_sleep_delay: 500
429
430 # The maximum number of concurrent federation requests allowed
431 # from a single server
432 #
433 #federation_rc_reject_limit: 50
434
435 # The number of federation requests to concurrently process from a
436 # single server
437 #
438 #federation_rc_concurrent: 3
439
440 # Target outgoing federation transaction frequency for sending read-receipts,
441 # per-room.
442 #
443 # If we end up trying to send out more read-receipts, they will get buffered up
444 # into fewer transactions.
445 #
446 #federation_rr_transactions_per_room_per_second: 50
447
448
449
450 # Directory where uploaded images and attachments are stored.
451 #
452 media_store_path: "DATADIR/media_store"
453
454 # Media storage providers allow media to be stored in different
455 # locations.
456 #
457 #media_storage_providers:
458 # - module: file_system
459 # # Whether to write new local files.
460 # store_local: false
461 # # Whether to write new remote media
462 # store_remote: false
463 # # Whether to block upload requests waiting for write to this
464 # # provider to complete
465 # store_synchronous: false
466 # config:
467 # directory: /mnt/some/other/directory
468
469 # Directory where in-progress uploads are stored.
470 #
471 uploads_path: "DATADIR/uploads"
472
473 # The largest allowed upload size in bytes
474 #
475 #max_upload_size: 10M
476
477 # Maximum number of pixels that will be thumbnailed
478 #
479 #max_image_pixels: 32M
480
481 # Whether to generate new thumbnails on the fly to precisely match
482 # the resolution requested by the client. If true then whenever
483 # a new resolution is requested by the client the server will
484 # generate a new thumbnail. If false the server will pick a thumbnail
485 # from a precalculated list.
486 #
487 #dynamic_thumbnails: false
488
489 # List of thumbnails to precalculate when an image is uploaded.
490 #
491 #thumbnail_sizes:
492 # - width: 32
493 # height: 32
494 # method: crop
495 # - width: 96
496 # height: 96
497 # method: crop
498 # - width: 320
499 # height: 240
500 # method: scale
501 # - width: 640
502 # height: 480
503 # method: scale
504 # - width: 800
505 # height: 600
506 # method: scale
507
508 # Is the preview URL API enabled?
509 #
510 # 'false' by default: uncomment the following to enable it (and specify a
511 # url_preview_ip_range_blacklist blacklist).
512 #
513 #url_preview_enabled: true
514
515 # List of IP address CIDR ranges that the URL preview spider is denied
516 # from accessing. There are no defaults: you must explicitly
517 # specify a list for URL previewing to work. You should specify any
518 # internal services in your network that you do not want synapse to try
519 # to connect to, otherwise anyone in any Matrix room could cause your
520 # synapse to issue arbitrary GET requests to your internal services,
521 # causing serious security issues.
522 #
523 # (0.0.0.0 and :: are always blacklisted, whether or not they are explicitly
524 # listed here, since they correspond to unroutable addresses.)
525 #
526 # This must be specified if url_preview_enabled is set. It is recommended that
527 # you uncomment the following list as a starting point.
528 #
529 #url_preview_ip_range_blacklist:
530 # - '127.0.0.0/8'
531 # - '10.0.0.0/8'
532 # - '172.16.0.0/12'
533 # - '192.168.0.0/16'
534 # - '100.64.0.0/10'
535 # - '169.254.0.0/16'
536 # - '::1/128'
537 # - 'fe80::/64'
538 # - 'fc00::/7'
539
540 # List of IP address CIDR ranges that the URL preview spider is allowed
541 # to access even if they are specified in url_preview_ip_range_blacklist.
542 # This is useful for specifying exceptions to wide-ranging blacklisted
543 # target IP ranges - e.g. for enabling URL previews for a specific private
544 # website only visible in your network.
545 #
546 #url_preview_ip_range_whitelist:
547 # - '192.168.1.1'
548
549 # Optional list of URL matches that the URL preview spider is
550 # denied from accessing. You should use url_preview_ip_range_blacklist
551 # in preference to this, otherwise someone could define a public DNS
552 # entry that points to a private IP address and circumvent the blacklist.
553 # This is more useful if you know there is an entire shape of URL that
554 # you know that will never want synapse to try to spider.
555 #
556 # Each list entry is a dictionary of url component attributes as returned
557 # by urlparse.urlsplit as applied to the absolute form of the URL. See
558 # https://docs.python.org/2/library/urlparse.html#urlparse.urlsplit
559 # The values of the dictionary are treated as an filename match pattern
560 # applied to that component of URLs, unless they start with a ^ in which
561 # case they are treated as a regular expression match. If all the
562 # specified component matches for a given list item succeed, the URL is
563 # blacklisted.
564 #
565 #url_preview_url_blacklist:
566 # # blacklist any URL with a username in its URI
567 # - username: '*'
568 #
569 # # blacklist all *.google.com URLs
570 # - netloc: 'google.com'
571 # - netloc: '*.google.com'
572 #
573 # # blacklist all plain HTTP URLs
574 # - scheme: 'http'
575 #
576 # # blacklist http(s)://www.acme.com/foo
577 # - netloc: 'www.acme.com'
578 # path: '/foo'
579 #
580 # # blacklist any URL with a literal IPv4 address
581 # - netloc: '^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$'
582
583 # The largest allowed URL preview spidering size in bytes
584 #
585 #max_spider_size: 10M
586
587
588 ## Captcha ##
589 # See docs/CAPTCHA_SETUP for full details of configuring this.
590
591 # This Home Server's ReCAPTCHA public key.
592 #
593 #recaptcha_public_key: "YOUR_PUBLIC_KEY"
594
595 # This Home Server's ReCAPTCHA private key.
596 #
597 #recaptcha_private_key: "YOUR_PRIVATE_KEY"
598
599 # Enables ReCaptcha checks when registering, preventing signup
600 # unless a captcha is answered. Requires a valid ReCaptcha
601 # public/private key.
602 #
603 #enable_registration_captcha: false
604
605 # A secret key used to bypass the captcha test entirely.
606 #
607 #captcha_bypass_secret: "YOUR_SECRET_HERE"
608
609 # The API endpoint to use for verifying m.login.recaptcha responses.
610 #
611 #recaptcha_siteverify_api: "https://www.recaptcha.net/recaptcha/api/siteverify"
612
613
614 ## TURN ##
615
616 # The public URIs of the TURN server to give to clients
617 #
618 #turn_uris: []
619
620 # The shared secret used to compute passwords for the TURN server
621 #
622 #turn_shared_secret: "YOUR_SHARED_SECRET"
623
624 # The Username and password if the TURN server needs them and
625 # does not use a token
626 #
627 #turn_username: "TURNSERVER_USERNAME"
628 #turn_password: "TURNSERVER_PASSWORD"
629
630 # How long generated TURN credentials last
631 #
632 #turn_user_lifetime: 1h
633
634 # Whether guests should be allowed to use the TURN server.
635 # This defaults to True, otherwise VoIP will be unreliable for guests.
636 # However, it does introduce a slight security risk as it allows users to
637 # connect to arbitrary endpoints without having first signed up for a
638 # valid account (e.g. by passing a CAPTCHA).
639 #
640 #turn_allow_guests: True
641
642
643 ## Registration ##
644 #
645 # Registration can be rate-limited using the parameters in the "Ratelimiting"
646 # section of this file.
647
648 # Enable registration for new users.
649 #
650 #enable_registration: false
651
652 # The user must provide all of the below types of 3PID when registering.
653 #
654 #registrations_require_3pid:
655 # - email
656 # - msisdn
657
658 # Explicitly disable asking for MSISDNs from the registration
659 # flow (overrides registrations_require_3pid if MSISDNs are set as required)
660 #
661 #disable_msisdn_registration: true
662
663 # Mandate that users are only allowed to associate certain formats of
664 # 3PIDs with accounts on this server.
665 #
666 #allowed_local_3pids:
667 # - medium: email
668 # pattern: '.*@matrix\.org'
669 # - medium: email
670 # pattern: '.*@vector\.im'
671 # - medium: msisdn
672 # pattern: '\+44'
673
674 # If set, allows registration of standard or admin accounts by anyone who
675 # has the shared secret, even if registration is otherwise disabled.
676 #
677 # registration_shared_secret: <PRIVATE STRING>
678
679 # Set the number of bcrypt rounds used to generate password hash.
680 # Larger numbers increase the work factor needed to generate the hash.
681 # The default number is 12 (which equates to 2^12 rounds).
682 # N.B. that increasing this will exponentially increase the time required
683 # to register or login - e.g. 24 => 2^24 rounds which will take >20 mins.
684 #
685 #bcrypt_rounds: 12
686
687 # Allows users to register as guests without a password/email/etc, and
688 # participate in rooms hosted on this server which have been made
689 # accessible to anonymous users.
690 #
691 #allow_guest_access: false
692
693 # The identity server which we suggest that clients should use when users log
694 # in on this server.
695 #
696 # (By default, no suggestion is made, so it is left up to the client.
697 # This setting is ignored unless public_baseurl is also set.)
698 #
699 #default_identity_server: https://matrix.org
700
701 # The list of identity servers trusted to verify third party
702 # identifiers by this server.
703 #
704 # Also defines the ID server which will be called when an account is
705 # deactivated (one will be picked arbitrarily).
706 #
707 #trusted_third_party_id_servers:
708 # - matrix.org
709 # - vector.im
710
711 # Users who register on this homeserver will automatically be joined
712 # to these rooms
713 #
714 #auto_join_rooms:
715 # - "#example:example.com"
716
717 # Where auto_join_rooms are specified, setting this flag ensures that the
718 # the rooms exist by creating them when the first user on the
719 # homeserver registers.
720 # Setting to false means that if the rooms are not manually created,
721 # users cannot be auto-joined since they do not exist.
722 #
723 #autocreate_auto_join_rooms: true
724
725
726 ## Metrics ###
727
728 # Enable collection and rendering of performance metrics
729 #
730 #enable_metrics: False
731
732 # Enable sentry integration
733 # NOTE: While attempts are made to ensure that the logs don't contain
734 # any sensitive information, this cannot be guaranteed. By enabling
735 # this option the sentry server may therefore receive sensitive
736 # information, and it in turn may then diseminate sensitive information
737 # through insecure notification channels if so configured.
738 #
739 #sentry:
740 # dsn: "..."
741
742 # Whether or not to report anonymized homeserver usage statistics.
743 # report_stats: true|false
744
745
746 ## API Configuration ##
747
748 # A list of event types that will be included in the room_invite_state
749 #
750 #room_invite_state_types:
751 # - "m.room.join_rules"
752 # - "m.room.canonical_alias"
753 # - "m.room.avatar"
754 # - "m.room.encryption"
755 # - "m.room.name"
756
757
758 # A list of application service config files to use
759 #
760 #app_service_config_files:
761 # - app_service_1.yaml
762 # - app_service_2.yaml
763
764 # Uncomment to enable tracking of application service IP addresses. Implicitly
765 # enables MAU tracking for application service users.
766 #
767 #track_appservice_user_ips: True
768
769
770 # a secret which is used to sign access tokens. If none is specified,
771 # the registration_shared_secret is used, if one is given; otherwise,
772 # a secret key is derived from the signing key.
773 #
774 # macaroon_secret_key: <PRIVATE STRING>
775
776 # Used to enable access token expiration.
777 #
778 #expire_access_token: False
779
780 # a secret which is used to calculate HMACs for form values, to stop
781 # falsification of values. Must be specified for the User Consent
782 # forms to work.
783 #
784 # form_secret: <PRIVATE STRING>
785
786 ## Signing Keys ##
787
788 # Path to the signing key to sign messages with
789 #
790 signing_key_path: "CONFDIR/SERVERNAME.signing.key"
791
792 # The keys that the server used to sign messages with but won't use
793 # to sign new messages. E.g. it has lost its private key
794 #
795 #old_signing_keys:
796 # "ed25519:auto":
797 # # Base64 encoded public key
798 # key: "The public part of your old signing key."
799 # # Millisecond POSIX timestamp when the key expired.
800 # expired_ts: 123456789123
801
802 # How long key response published by this server is valid for.
803 # Used to set the valid_until_ts in /key/v2 APIs.
804 # Determines how quickly servers will query to check which keys
805 # are still valid.
806 #
807 #key_refresh_interval: 1d
808
809 # The trusted servers to download signing keys from.
810 #
811 #perspectives:
812 # servers:
813 # "matrix.org":
814 # verify_keys:
815 # "ed25519:auto":
816 # key: "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw"
817
818
819 # Enable SAML2 for registration and login. Uses pysaml2.
820 #
821 # `sp_config` is the configuration for the pysaml2 Service Provider.
822 # See pysaml2 docs for format of config.
823 #
824 # Default values will be used for the 'entityid' and 'service' settings,
825 # so it is not normally necessary to specify them unless you need to
826 # override them.
827 #
828 #saml2_config:
829 # sp_config:
830 # # point this to the IdP's metadata. You can use either a local file or
831 # # (preferably) a URL.
832 # metadata:
833 # #local: ["saml2/idp.xml"]
834 # remote:
835 # - url: https://our_idp/metadata.xml
836 #
837 # # The rest of sp_config is just used to generate our metadata xml, and you
838 # # may well not need it, depending on your setup. Alternatively you
839 # # may need a whole lot more detail - see the pysaml2 docs!
840 #
841 # description: ["My awesome SP", "en"]
842 # name: ["Test SP", "en"]
843 #
844 # organization:
845 # name: Example com
846 # display_name:
847 # - ["Example co", "en"]
848 # url: "http://example.com"
849 #
850 # contact_person:
851 # - given_name: Bob
852 # sur_name: "the Sysadmin"
853 # email_address": ["admin@example.com"]
854 # contact_type": technical
855 #
856 # # Instead of putting the config inline as above, you can specify a
857 # # separate pysaml2 configuration file:
858 # #
859 # config_path: "CONFDIR/sp_conf.py"
860
861
862
863 # Enable CAS for registration and login.
864 #
865 #cas_config:
866 # enabled: true
867 # server_url: "https://cas-server.com"
868 # service_url: "https://homeserver.domain.com:8448"
869 # #required_attributes:
870 # # name: value
871
872
873 # The JWT needs to contain a globally unique "sub" (subject) claim.
874 #
875 #jwt_config:
876 # enabled: true
877 # secret: "a secret"
878 # algorithm: "HS256"
879
880
881 password_config:
882 # Uncomment to disable password login
883 #
884 #enabled: false
885
886 # Uncomment and change to a secret random string for extra security.
887 # DO NOT CHANGE THIS AFTER INITIAL SETUP!
888 #
889 #pepper: "EVEN_MORE_SECRET"
890
891
892
893 # Enable sending emails for notification events
894 # Defining a custom URL for Riot is only needed if email notifications
895 # should contain links to a self-hosted installation of Riot; when set
896 # the "app_name" setting is ignored.
897 #
898 # If your SMTP server requires authentication, the optional smtp_user &
899 # smtp_pass variables should be used
900 #
901 #email:
902 # enable_notifs: false
903 # smtp_host: "localhost"
904 # smtp_port: 25
905 # smtp_user: "exampleusername"
906 # smtp_pass: "examplepassword"
907 # require_transport_security: False
908 # notif_from: "Your Friendly %(app)s Home Server <noreply@example.com>"
909 # app_name: Matrix
910 # # if template_dir is unset, uses the example templates that are part of
911 # # the Synapse distribution.
912 # #template_dir: res/templates
913 # notif_template_html: notif_mail.html
914 # notif_template_text: notif_mail.txt
915 # notif_for_new_users: True
916 # riot_base_url: "http://localhost/riot"
917
918
919 #password_providers:
920 # - module: "ldap_auth_provider.LdapAuthProvider"
921 # config:
922 # enabled: true
923 # uri: "ldap://ldap.example.com:389"
924 # start_tls: true
925 # base: "ou=users,dc=example,dc=com"
926 # attributes:
927 # uid: "cn"
928 # mail: "email"
929 # name: "givenName"
930 # #bind_dn:
931 # #bind_password:
932 # #filter: "(objectClass=posixAccount)"
933
934
935
936 # Clients requesting push notifications can either have the body of
937 # the message sent in the notification poke along with other details
938 # like the sender, or just the event ID and room ID (`event_id_only`).
939 # If clients choose the former, this option controls whether the
940 # notification request includes the content of the event (other details
941 # like the sender are still included). For `event_id_only` push, it
942 # has no effect.
943 #
944 # For modern android devices the notification content will still appear
945 # because it is loaded by the app. iPhone, however will send a
946 # notification saying only that a message arrived and who it came from.
947 #
948 #push:
949 # include_content: true
950
951
952 #spam_checker:
953 # module: "my_custom_project.SuperSpamChecker"
954 # config:
955 # example_option: 'things'
956
957
958 # Uncomment to allow non-server-admin users to create groups on this server
959 #
960 #enable_group_creation: true
961
962 # If enabled, non server admins can only create groups with local parts
963 # starting with this prefix
964 #
965 #group_creation_prefix: "unofficial/"
966
967
968
969 # User Directory configuration
970 #
971 # 'enabled' defines whether users can search the user directory. If
972 # false then empty responses are returned to all queries. Defaults to
973 # true.
974 #
975 # 'search_all_users' defines whether to search all users visible to your HS
976 # when searching the user directory, rather than limiting to users visible
977 # in public rooms. Defaults to false. If you set it True, you'll have to run
978 # UPDATE user_directory_stream_pos SET stream_id = NULL;
979 # on your database to tell it to rebuild the user_directory search indexes.
980 #
981 #user_directory:
982 # enabled: true
983 # search_all_users: false
984
985
986 # User Consent configuration
987 #
988 # for detailed instructions, see
989 # https://github.com/matrix-org/synapse/blob/master/docs/consent_tracking.md
990 #
991 # Parts of this section are required if enabling the 'consent' resource under
992 # 'listeners', in particular 'template_dir' and 'version'.
993 #
994 # 'template_dir' gives the location of the templates for the HTML forms.
995 # This directory should contain one subdirectory per language (eg, 'en', 'fr'),
996 # and each language directory should contain the policy document (named as
997 # '<version>.html') and a success page (success.html).
998 #
999 # 'version' specifies the 'current' version of the policy document. It defines
1000 # the version to be served by the consent resource if there is no 'v'
1001 # parameter.
1002 #
1003 # 'server_notice_content', if enabled, will send a user a "Server Notice"
1004 # asking them to consent to the privacy policy. The 'server_notices' section
1005 # must also be configured for this to work. Notices will *not* be sent to
1006 # guest users unless 'send_server_notice_to_guests' is set to true.
1007 #
1008 # 'block_events_error', if set, will block any attempts to send events
1009 # until the user consents to the privacy policy. The value of the setting is
1010 # used as the text of the error.
1011 #
1012 # 'require_at_registration', if enabled, will add a step to the registration
1013 # process, similar to how captcha works. Users will be required to accept the
1014 # policy before their account is created.
1015 #
1016 # 'policy_name' is the display name of the policy users will see when registering
1017 # for an account. Has no effect unless `require_at_registration` is enabled.
1018 # Defaults to "Privacy Policy".
1019 #
1020 #user_consent:
1021 # template_dir: res/templates/privacy
1022 # version: 1.0
1023 # server_notice_content:
1024 # msgtype: m.text
1025 # body: >-
1026 # To continue using this homeserver you must review and agree to the
1027 # terms and conditions at %(consent_uri)s
1028 # send_server_notice_to_guests: True
1029 # block_events_error: >-
1030 # To continue using this homeserver you must review and agree to the
1031 # terms and conditions at %(consent_uri)s
1032 # require_at_registration: False
1033 # policy_name: Privacy Policy
1034 #
1035
1036
1037 # Server Notices room configuration
1038 #
1039 # Uncomment this section to enable a room which can be used to send notices
1040 # from the server to users. It is a special room which cannot be left; notices
1041 # come from a special "notices" user id.
1042 #
1043 # If you uncomment this section, you *must* define the system_mxid_localpart
1044 # setting, which defines the id of the user which will be used to send the
1045 # notices.
1046 #
1047 # It's also possible to override the room name, the display name of the
1048 # "notices" user, and the avatar for the user.
1049 #
1050 #server_notices:
1051 # system_mxid_localpart: notices
1052 # system_mxid_display_name: "Server Notices"
1053 # system_mxid_avatar_url: "mxc://server.com/oumMVlgDnLYFaPVkExemNVVZ"
1054 # room_name: "Server Notices"
1055
1056
1057
1058 # Uncomment to disable searching the public room list. When disabled
1059 # blocks searching local and remote room lists for local and remote
1060 # users by always returning an empty list for all queries.
1061 #
1062 #enable_room_list_search: false
1063
1064 # The `alias_creation` option controls who's allowed to create aliases
1065 # on this server.
1066 #
1067 # The format of this option is a list of rules that contain globs that
1068 # match against user_id, room_id and the new alias (fully qualified with
1069 # server name). The action in the first rule that matches is taken,
1070 # which can currently either be "allow" or "deny".
1071 #
1072 # Missing user_id/room_id/alias fields default to "*".
1073 #
1074 # If no rules match the request is denied. An empty list means no one
1075 # can create aliases.
1076 #
1077 # Options for the rules include:
1078 #
1079 # user_id: Matches against the creator of the alias
1080 # alias: Matches against the alias being created
1081 # room_id: Matches against the room ID the alias is being pointed at
1082 # action: Whether to "allow" or "deny" the request if the rule matches
1083 #
1084 # The default is:
1085 #
1086 #alias_creation_rules:
1087 # - user_id: "*"
1088 # alias: "*"
1089 # room_id: "*"
1090 # action: allow
1091
1092 # The `room_list_publication_rules` option controls who can publish and
1093 # which rooms can be published in the public room list.
1094 #
1095 # The format of this option is the same as that for
1096 # `alias_creation_rules`.
1097 #
1098 # If the room has one or more aliases associated with it, only one of
1099 # the aliases needs to match the alias rule. If there are no aliases
1100 # then only rules with `alias: *` match.
1101 #
1102 # If no rules match the request is denied. An empty list means no one
1103 # can publish rooms.
1104 #
1105 # Options for the rules include:
1106 #
1107 # user_id: Matches agaisnt the creator of the alias
1108 # room_id: Matches against the room ID being published
1109 # alias: Matches against any current local or canonical aliases
1110 # associated with the room
1111 # action: Whether to "allow" or "deny" the request if the rule matches
1112 #
1113 # The default is:
1114 #
1115 #room_list_publication_rules:
1116 # - user_id: "*"
1117 # alias: "*"
1118 # room_id: "*"
1119 # action: allow
187187 A single update in a stream
188188
189189 POSITION (S)
190 The position of the stream has been updated
190 The position of the stream has been updated. Sent to the client after all
191 missing updates for a stream have been sent to the client and they're now
192 up to date.
191193
192194 ERROR (S, C)
193195 There was an error
181181 ^/_matrix/federation/v1/event_auth/
182182 ^/_matrix/federation/v1/exchange_third_party_invite/
183183 ^/_matrix/federation/v1/send/
184 ^/_matrix/key/v2/query
184185
185186 The above endpoints should all be routed to the federation_reader worker by the
186187 reverse-proxy configuration.
222223 ^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/members$
223224 ^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/state$
224225 ^/_matrix/client/(api/v1|r0|unstable)/login$
226 ^/_matrix/client/(api/v1|r0|unstable)/account/3pid$
227 ^/_matrix/client/(api/v1|r0|unstable)/keys/query$
228 ^/_matrix/client/(api/v1|r0|unstable)/keys/changes$
225229
226230 Additionally, the following REST endpoints can be handled, but all requests must
227231 be routed to the same instance::
00 #!/usr/bin/env python
11
22 import argparse
3 import shutil
34 import sys
45
56 from synapse.config.homeserver import HomeServerConfig
4950 help="File to write the configuration to. Default: stdout",
5051 )
5152
53 parser.add_argument(
54 "--header-file",
55 type=argparse.FileType('r'),
56 help="File from which to read a header, which will be printed before the "
57 "generated config.",
58 )
59
5260 args = parser.parse_args()
5361
5462 report_stats = args.report_stats
6371 report_stats=report_stats,
6472 )
6573
74 if args.header_file:
75 shutil.copyfileobj(args.header_file, args.output_file)
76
6677 args.output_file.write(conf)
2323 "ubuntu:xenial",
2424 "ubuntu:bionic",
2525 "ubuntu:cosmic",
26 "ubuntu:disco",
2627 )
2728
2829 DESC = '''\
66
77 # make sure that origin/develop is up to date
88 git remote set-branches --add origin develop
9 git fetch --depth=1 origin develop
10
11 UPSTREAM=origin/develop
9 git fetch origin develop
1210
1311 # if there are changes in the debian directory, check that the debian changelog
1412 # has been updated
15 if ! git diff --quiet $UPSTREAM... -- debian; then
16 if git diff --quiet $UPSTREAM... -- debian/changelog; then
13 if ! git diff --quiet FETCH_HEAD... -- debian; then
14 if git diff --quiet FETCH_HEAD... -- debian/changelog; then
1715 echo "Updates to debian directory, but no update to the changelog." >&2
1816 exit 1
1917 fi
2119
2220 # if there are changes *outside* the debian directory, check that the
2321 # newsfragments have been updated.
24 if git diff --name-only $UPSTREAM... | grep -qv '^develop/'; then
22 if git diff --name-only FETCH_HEAD... | grep -qv '^debian/'; then
2523 tox -e check-newsfragment
2624 fi
2725
3028 echo
3129
3230 # check that any new newsfiles on this branch end with a full stop.
33 for f in `git diff --name-only $UPSTREAM... -- changelog.d`; do
31 for f in `git diff --name-only FETCH_HEAD... -- changelog.d`; do
3432 lastchar=`tr -d '\n' < $f | tail -c 1`
35 if [ $lastchar != '.' ]; then
36 echo -e "\e[31mERROR: newsfragment $f does not end with a '.'\e[39m" >&2
33 if [ $lastchar != '.' -a $lastchar != '!' ]; then
34 echo -e "\e[31mERROR: newsfragment $f does not end with a '.' or '!'\e[39m" >&2
3735 exit 1
3836 fi
3937 done
7575
7676
7777 def main():
78 config = yaml.load(open(sys.argv[1]))
78 config = yaml.safe_load(open(sys.argv[1]))
7979 valid_until = int(time.time() / (3600 * 24)) * 1000 * 3600 * 24
8080
8181 server_name = config["server_name"]
0 #!/bin/bash
1 #
2 # Update/check the docs/sample_config.yaml
3
4 set -e
5
6 cd `dirname $0`/..
7
8 SAMPLE_CONFIG="docs/sample_config.yaml"
9
10 if [ "$1" == "--check" ]; then
11 diff -u "$SAMPLE_CONFIG" <(./scripts/generate_config --header-file docs/.sample_config_header.yaml) >/dev/null || {
12 echo -e "\e[1m\e[31m$SAMPLE_CONFIG is not up-to-date. Regenerate it with \`scripts-dev/generate_sample_config\`.\e[0m" >&2
13 exit 1
14 }
15 else
16 ./scripts/generate_config --header-file docs/.sample_config_header.yaml -o "$SAMPLE_CONFIG"
17 fi
2626 except ImportError:
2727 pass
2828
29 __version__ = "0.99.2"
29 __version__ = "0.99.3.2"
620620
621621 Returns:
622622 True if the the sender is allowed to redact the target event if the
623 target event was created by them.
623 target event was created by them.
624624 False if the sender is allowed to redact the target event with no
625 further checks.
625 further checks.
626626
627627 Raises:
628628 AuthError if the event sender is definitely not allowed to redact
629 the target event.
629 the target event.
630630 """
631631 return event_auth.check_redaction(room_version, event, auth_events)
632632
742742
743743 Returns:
744744 Deferred[tuple[str, str|None]]: Resolves to the current membership of
745 the user in the room and the membership event ID of the user. If
746 the user is not in the room and never has been, then
747 `(Membership.JOIN, None)` is returned.
745 the user in the room and the membership event ID of the user. If
746 the user is not in the room and never has been, then
747 `(Membership.JOIN, None)` is returned.
748748 """
749749
750750 try:
776776
777777 Args:
778778 user_id(str|None): If present, checks for presence against existing
779 MAU cohort
779 MAU cohort
780780
781781 threepid(dict|None): If present, checks for presence against configured
782 reserved threepid. Used in cases where the user is trying register
783 with a MAU blocked server, normally they would be rejected but their
784 threepid is on the reserved list. user_id and
785 threepid should never be set at the same time.
782 reserved threepid. Used in cases where the user is trying register
783 with a MAU blocked server, normally they would be rejected but their
784 threepid is on the reserved list. user_id and
785 threepid should never be set at the same time.
786786 """
787787
788788 # Never fail an auth check for the server notices users or support user
789789 # This can be a problem where event creation is prohibited due to blocking
790 is_support = yield self.store.is_support_user(user_id)
791 if user_id == self.hs.config.server_notices_mxid or is_support:
792 return
790 if user_id is not None:
791 if user_id == self.hs.config.server_notices_mxid:
792 return
793 if (yield self.store.is_support_user(user_id)):
794 return
793795
794796 if self.hs.config.hs_disabled:
795797 raise ResourceLimitError(
1313
1414 import collections
1515
16 from synapse.api.errors import LimitExceededError
17
1618
1719 class Ratelimiter(object):
1820 """
2224 def __init__(self):
2325 self.message_counts = collections.OrderedDict()
2426
25 def send_message(self, user_id, time_now_s, msg_rate_hz, burst_count, update=True):
26 """Can the user send a message?
27 def can_do_action(self, key, time_now_s, rate_hz, burst_count, update=True):
28 """Can the entity (e.g. user or IP address) perform the action?
2729 Args:
28 user_id: The user sending a message.
30 key: The key we should use when rate limiting. Can be a user ID
31 (when sending events), an IP address, etc.
2932 time_now_s: The time now.
30 msg_rate_hz: The long term number of messages a user can send in a
33 rate_hz: The long term number of messages a user can send in a
3134 second.
3235 burst_count: How many messages the user can send before being
3336 limited.
4043 """
4144 self.prune_message_counts(time_now_s)
4245 message_count, time_start, _ignored = self.message_counts.get(
43 user_id, (0., time_now_s, None),
46 key, (0., time_now_s, None),
4447 )
4548 time_delta = time_now_s - time_start
46 sent_count = message_count - time_delta * msg_rate_hz
49 sent_count = message_count - time_delta * rate_hz
4750 if sent_count < 0:
4851 allowed = True
4952 time_start = time_now_s
5558 message_count += 1
5659
5760 if update:
58 self.message_counts[user_id] = (
59 message_count, time_start, msg_rate_hz
61 self.message_counts[key] = (
62 message_count, time_start, rate_hz
6063 )
6164
62 if msg_rate_hz > 0:
65 if rate_hz > 0:
6366 time_allowed = (
64 time_start + (message_count - burst_count + 1) / msg_rate_hz
67 time_start + (message_count - burst_count + 1) / rate_hz
6568 )
6669 if time_allowed < time_now_s:
6770 time_allowed = time_now_s
7174 return allowed, time_allowed
7275
7376 def prune_message_counts(self, time_now_s):
74 for user_id in list(self.message_counts.keys()):
75 message_count, time_start, msg_rate_hz = (
76 self.message_counts[user_id]
77 for key in list(self.message_counts.keys()):
78 message_count, time_start, rate_hz = (
79 self.message_counts[key]
7780 )
7881 time_delta = time_now_s - time_start
79 if message_count - time_delta * msg_rate_hz > 0:
82 if message_count - time_delta * rate_hz > 0:
8083 break
8184 else:
82 del self.message_counts[user_id]
85 del self.message_counts[key]
86
87 def ratelimit(self, key, time_now_s, rate_hz, burst_count, update=True):
88 allowed, time_allowed = self.can_do_action(
89 key, time_now_s, rate_hz, burst_count, update
90 )
91
92 if not allowed:
93 raise LimitExceededError(
94 retry_after_ms=int(1000 * (time_allowed - time_now_s)),
95 )
6262
6363 start_reactor(
6464 appname,
65 config.soft_file_limit,
66 config.gc_thresholds,
67 config.worker_pid_file,
68 config.worker_daemonize,
69 config.worker_cpu_affinity,
70 logger,
65 soft_file_limit=config.soft_file_limit,
66 gc_thresholds=config.gc_thresholds,
67 pid_file=config.worker_pid_file,
68 daemonize=config.worker_daemonize,
69 cpu_affinity=config.worker_cpu_affinity,
70 print_pidfile=config.print_pidfile,
71 logger=logger,
7172 )
7273
7374
7879 pid_file,
7980 daemonize,
8081 cpu_affinity,
82 print_pidfile,
8183 logger,
8284 ):
8385 """ Run the reactor in the main process
9294 pid_file (str): name of pid file to write to if daemonize is True
9395 daemonize (bool): true to run the reactor in a background process
9496 cpu_affinity (int|None): cpu affinity mask
97 print_pidfile (bool): whether to print the pid file, if daemonize is True
9598 logger (logging.Logger): logger instance to pass to Daemonize
9699 """
97100
123126 reactor.run()
124127
125128 if daemonize:
129 if print_pidfile:
130 print(pid_file)
131
126132 daemon = Daemonize(
127133 app=appname,
128134 pid=pid_file,
3232 from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
3333 from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
3434 from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
35 from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
36 from synapse.replication.slave.storage.devices import SlavedDeviceStore
3537 from synapse.replication.slave.storage.directory import DirectoryStore
3638 from synapse.replication.slave.storage.events import SlavedEventStore
3739 from synapse.replication.slave.storage.keys import SlavedKeyStore
40 from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
41 from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
3842 from synapse.replication.slave.storage.registration import SlavedRegistrationStore
3943 from synapse.replication.slave.storage.room import RoomStore
4044 from synapse.replication.slave.storage.transactions import SlavedTransactionStore
4751 RoomMemberListRestServlet,
4852 RoomStateRestServlet,
4953 )
54 from synapse.rest.client.v2_alpha.account import ThreepidRestServlet
55 from synapse.rest.client.v2_alpha.keys import KeyChangesServlet, KeyQueryServlet
5056 from synapse.rest.client.v2_alpha.register import RegisterRestServlet
5157 from synapse.server import HomeServer
5258 from synapse.storage.engines import create_engine
5965
6066
6167 class ClientReaderSlavedStore(
68 SlavedDeviceInboxStore,
69 SlavedDeviceStore,
70 SlavedReceiptsStore,
71 SlavedPushRuleStore,
6272 SlavedAccountDataStore,
6373 SlavedEventStore,
6474 SlavedKeyStore,
95105 RoomEventContextServlet(self).register(resource)
96106 RegisterRestServlet(self).register(resource)
97107 LoginRestServlet(self).register(resource)
108 ThreepidRestServlet(self).register(resource)
109 KeyQueryServlet(self).register(resource)
110 KeyChangesServlet(self).register(resource)
98111
99112 resources.update({
100113 "/_matrix/client/r0": resource,
2020
2121 import synapse
2222 from synapse import events
23 from synapse.api.urls import FEDERATION_PREFIX
23 from synapse.api.urls import FEDERATION_PREFIX, SERVER_KEY_V2_PREFIX
2424 from synapse.app import _base
2525 from synapse.config._base import ConfigError
2626 from synapse.config.homeserver import HomeServerConfig
4343 from synapse.replication.slave.storage.room import RoomStore
4444 from synapse.replication.slave.storage.transactions import SlavedTransactionStore
4545 from synapse.replication.tcp.client import ReplicationClientHandler
46 from synapse.rest.key.v2 import KeyApiV2Resource
4647 from synapse.server import HomeServer
4748 from synapse.storage.engines import create_engine
4849 from synapse.util.httpresourcetree import create_resource_tree
9798 servlet_groups=["openid"],
9899 ),
99100 })
101
102 if name in ["keys", "federation"]:
103 resources[SERVER_KEY_V2_PREFIX] = KeyApiV2Resource(self)
100104
101105 root_resource = create_resource_tree(resources, NoResource())
102106
2727 from synapse.federation import send_queue
2828 from synapse.http.site import SynapseSite
2929 from synapse.metrics import RegistryProxy
30 from synapse.metrics.background_process_metrics import run_as_background_process
3031 from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
3132 from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
3233 from synapse.replication.slave.storage.devices import SlavedDeviceStore
3637 from synapse.replication.slave.storage.registration import SlavedRegistrationStore
3738 from synapse.replication.slave.storage.transactions import SlavedTransactionStore
3839 from synapse.replication.tcp.client import ReplicationClientHandler
40 from synapse.replication.tcp.streams import ReceiptsStream
3941 from synapse.server import HomeServer
4042 from synapse.storage.engines import create_engine
43 from synapse.types import ReadReceipt
4144 from synapse.util.async_helpers import Linearizer
4245 from synapse.util.httpresourcetree import create_resource_tree
4346 from synapse.util.logcontext import LoggingContext, run_in_background
201204 """
202205 def __init__(self, hs, replication_client):
203206 self.store = hs.get_datastore()
207 self._is_mine_id = hs.is_mine_id
204208 self.federation_sender = hs.get_federation_sender()
205209 self.replication_client = replication_client
206210
233237 elif stream_name == "events":
234238 self.federation_sender.notify_new_events(token)
235239
240 # ... and when new receipts happen
241 elif stream_name == ReceiptsStream.NAME:
242 run_as_background_process(
243 "process_receipts_for_federation", self._on_new_receipts, rows,
244 )
245
246 @defer.inlineCallbacks
247 def _on_new_receipts(self, rows):
248 """
249 Args:
250 rows (iterable[synapse.replication.tcp.streams.ReceiptsStreamRow]):
251 new receipts to be processed
252 """
253 for receipt in rows:
254 # we only want to send on receipts for our own users
255 if not self._is_mine_id(receipt.user_id):
256 continue
257 receipt_info = ReadReceipt(
258 receipt.room_id,
259 receipt.receipt_type,
260 receipt.user_id,
261 [receipt.event_id],
262 receipt.data,
263 )
264 yield self.federation_sender.send_read_receipt(receipt_info)
265
236266 @defer.inlineCallbacks
237267 def update_token(self, token):
238268 try:
375375 logger.info("Database prepared in %s.", config.database_config['name'])
376376
377377 hs.setup()
378 hs.setup_master()
378379
379380 @defer.inlineCallbacks
380381 def do_acme():
635636 # be quite busy the first few minutes
636637 clock.call_later(5 * 60, start_phone_stats_home)
637638
638 if hs.config.daemonize and hs.config.print_pidfile:
639 print(hs.config.pid_file)
640
641639 _base.start_reactor(
642640 "synapse-homeserver",
643 hs.config.soft_file_limit,
644 hs.config.gc_thresholds,
645 hs.config.pid_file,
646 hs.config.daemonize,
647 hs.config.cpu_affinity,
648 logger,
641 soft_file_limit=hs.config.soft_file_limit,
642 gc_thresholds=hs.config.gc_thresholds,
643 pid_file=hs.config.pid_file,
644 daemonize=hs.config.daemonize,
645 cpu_affinity=hs.config.cpu_affinity,
646 print_pidfile=hs.config.print_pidfile,
647 logger=logger,
649648 )
650649
651650
136136 @staticmethod
137137 def read_config_file(file_path):
138138 with open(file_path) as file_stream:
139 return yaml.load(file_stream)
139 return yaml.safe_load(file_stream)
140140
141141 def invoke_all(self, name, *args, **kargs):
142142 results = []
179179 Returns:
180180 str: the yaml config file
181181 """
182 default_config = "# vim:ft=yaml\n"
183
184 default_config += "\n\n".join(
182 default_config = "\n\n".join(
185183 dedent(conf)
186184 for conf in self.invoke_all(
187185 "default_config",
215213 " Defaults to the directory containing the last config file",
216214 )
217215
216 obj = cls()
217
218 obj.invoke_all("add_arguments", config_parser)
219
218220 config_args = config_parser.parse_args(argv)
219221
220222 config_files = find_config_files(search_paths=config_args.config_path)
221223
222 obj = cls()
223224 obj.read_config_files(
224225 config_files, keys_directory=config_args.keys_directory, generate_keys=False
225226 )
227
228 obj.invoke_all("read_arguments", config_args)
229
226230 return obj
227231
228232 @classmethod
296300 "Must specify a server_name to a generate config for."
297301 " Pass -H server.name."
298302 )
303
304 config_str = obj.generate_config(
305 config_dir_path=config_dir_path,
306 data_dir_path=os.getcwd(),
307 server_name=server_name,
308 report_stats=(config_args.report_stats == "yes"),
309 generate_secrets=True,
310 )
311
299312 if not cls.path_exists(config_dir_path):
300313 os.makedirs(config_dir_path)
301314 with open(config_path, "w") as config_file:
302 config_str = obj.generate_config(
303 config_dir_path=config_dir_path,
304 data_dir_path=os.getcwd(),
305 server_name=server_name,
306 report_stats=(config_args.report_stats == "yes"),
307 generate_secrets=True,
315 config_file.write(
316 "# vim:ft=yaml\n\n"
308317 )
309 config = yaml.load(config_str)
310 obj.invoke_all("generate_files", config)
311318 config_file.write(config_str)
319
320 config = yaml.safe_load(config_str)
321 obj.invoke_all("generate_files", config)
322
312323 print(
313324 (
314325 "A config file has been generated in %r for server name"
378389 server_name=server_name,
379390 generate_secrets=False,
380391 )
381 config = yaml.load(config_string)
392 config = yaml.safe_load(config_string)
382393 config.pop("log_config")
383394 config.update(specified_config)
384395
393404 self.invoke_all("generate_files", config)
394405 return
395406
396 self.invoke_all("read_config", config)
407 self.parse_config_dict(config)
408
409 def parse_config_dict(self, config_dict):
410 self.invoke_all("read_config", config_dict)
397411
398412
399413 def find_config_files(search_paths):
3333
3434 # A list of event types that will be included in the room_invite_state
3535 #
36 room_invite_state_types:
37 - "{JoinRules}"
38 - "{CanonicalAlias}"
39 - "{RoomAvatar}"
40 - "{RoomEncryption}"
41 - "{Name}"
36 #room_invite_state_types:
37 # - "{JoinRules}"
38 # - "{CanonicalAlias}"
39 # - "{RoomAvatar}"
40 # - "{RoomEncryption}"
41 # - "{Name}"
4242 """.format(**vars(EventTypes))
3636
3737 def default_config(cls, **kwargs):
3838 return """\
39 # A list of application service config file to use
39 # A list of application service config files to use
4040 #
41 app_service_config_files: []
41 #app_service_config_files:
42 # - app_service_1.yaml
43 # - app_service_2.yaml
4244
43 # Whether or not to track application service IP addresses. Implicitly
45 # Uncomment to enable tracking of application service IP addresses. Implicitly
4446 # enables MAU tracking for application service users.
4547 #
46 track_appservice_user_ips: False
48 #track_appservice_user_ips: True
4749 """
4850
4951
6567 try:
6668 with open(config_file, 'r') as f:
6769 appservice = _load_appservice(
68 hostname, yaml.load(f), config_file
70 hostname, yaml.safe_load(f), config_file
6971 )
7072 if appservice.id in seen_ids:
7173 raise ConfigError(
1717 class CaptchaConfig(Config):
1818
1919 def read_config(self, config):
20 self.recaptcha_private_key = config["recaptcha_private_key"]
21 self.recaptcha_public_key = config["recaptcha_public_key"]
22 self.enable_registration_captcha = config["enable_registration_captcha"]
20 self.recaptcha_private_key = config.get("recaptcha_private_key")
21 self.recaptcha_public_key = config.get("recaptcha_public_key")
22 self.enable_registration_captcha = config.get(
23 "enable_registration_captcha", False
24 )
2325 self.captcha_bypass_secret = config.get("captcha_bypass_secret")
24 self.recaptcha_siteverify_api = config["recaptcha_siteverify_api"]
26 self.recaptcha_siteverify_api = config.get(
27 "recaptcha_siteverify_api",
28 "https://www.recaptcha.net/recaptcha/api/siteverify",
29 )
2530
2631 def default_config(self, **kwargs):
2732 return """\
3035
3136 # This Home Server's ReCAPTCHA public key.
3237 #
33 recaptcha_public_key: "YOUR_PUBLIC_KEY"
38 #recaptcha_public_key: "YOUR_PUBLIC_KEY"
3439
3540 # This Home Server's ReCAPTCHA private key.
3641 #
37 recaptcha_private_key: "YOUR_PRIVATE_KEY"
42 #recaptcha_private_key: "YOUR_PRIVATE_KEY"
3843
3944 # Enables ReCaptcha checks when registering, preventing signup
4045 # unless a captcha is answered. Requires a valid ReCaptcha
4146 # public/private key.
4247 #
43 enable_registration_captcha: False
48 #enable_registration_captcha: false
4449
4550 # A secret key used to bypass the captcha test entirely.
51 #
4652 #captcha_bypass_secret: "YOUR_SECRET_HERE"
4753
4854 # The API endpoint to use for verifying m.login.recaptcha responses.
49 recaptcha_siteverify_api: "https://www.recaptcha.net/recaptcha/api/siteverify"
55 #
56 #recaptcha_siteverify_api: "https://www.recaptcha.net/recaptcha/api/siteverify"
5057 """
4848 def default_config(self, data_dir_path, **kwargs):
4949 database_path = os.path.join(data_dir_path, "homeserver.db")
5050 return """\
51 # Database configuration
51 ## Database ##
52
5253 database:
5354 # The database engine name
5455 name: "sqlite3"
5859 database: "%(database_path)s"
5960
6061 # Number of events to cache in memory.
61 event_cache_size: "10K"
62 #
63 #event_cache_size: 10K
6264 """ % locals()
6365
6466 def read_arguments(self, args):
2222
2323 def default_config(self, **kwargs):
2424 return """\
25 # Whether to allow non server admins to create groups on this server
25 # Uncomment to allow non-server-admin users to create groups on this server
2626 #
27 enable_group_creation: false
27 #enable_group_creation: true
2828
2929 # If enabled, non server admins can only create groups with local parts
3030 # starting with this prefix
3737 class KeyConfig(Config):
3838
3939 def read_config(self, config):
40 self.signing_key = self.read_signing_key(config["signing_key_path"])
40 # the signing key can be specified inline or in a separate file
41 if "signing_key" in config:
42 self.signing_key = read_signing_keys([config["signing_key"]])
43 else:
44 self.signing_key = self.read_signing_key(config["signing_key_path"])
45
4146 self.old_signing_keys = self.read_old_signing_keys(
4247 config.get("old_signing_keys", {})
4348 )
4449 self.key_refresh_interval = self.parse_duration(
45 config["key_refresh_interval"]
50 config.get("key_refresh_interval", "1d"),
4651 )
4752 self.perspectives = self.read_perspectives(
48 config["perspectives"]
53 config.get("perspectives", {}).get("servers", {
54 "matrix.org": {"verify_keys": {
55 "ed25519:auto": {
56 "key": "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw",
57 }
58 }}
59 })
4960 )
5061
5162 self.macaroon_secret_key = config.get(
8798
8899 # Used to enable access token expiration.
89100 #
90 expire_access_token: False
101 #expire_access_token: False
91102
92103 # a secret which is used to calculate HMACs for form values, to stop
93104 # falsification of values. Must be specified for the User Consent
116127 # Determines how quickly servers will query to check which keys
117128 # are still valid.
118129 #
119 key_refresh_interval: "1d" # 1 Day.
130 #key_refresh_interval: 1d
120131
121132 # The trusted servers to download signing keys from.
122133 #
123 perspectives:
124 servers:
125 "matrix.org":
126 verify_keys:
127 "ed25519:auto":
128 key: "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw"
134 #perspectives:
135 # servers:
136 # "matrix.org":
137 # verify_keys:
138 # "ed25519:auto":
139 # key: "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw"
129140 """ % locals()
130141
131 def read_perspectives(self, perspectives_config):
142 def read_perspectives(self, perspectives_servers):
132143 servers = {}
133 for server_name, server_config in perspectives_config["servers"].items():
144 for server_name, server_config in perspectives_servers.items():
134145 for key_id, key_data in server_config["verify_keys"].items():
135146 if is_signing_algorithm_supported(key_id):
136147 key_base64 = key_data["key"]
8080
8181 def default_config(self, config_dir_path, server_name, **kwargs):
8282 log_config = os.path.join(config_dir_path, server_name + ".log.config")
83 return """
83 return """\
84 ## Logging ##
85
8486 # A yaml python logging config file
8587 #
8688 log_config: "%(log_config)s"
192194 else:
193195 def load_log_config():
194196 with open(log_config, 'r') as f:
195 logging.config.dictConfig(yaml.load(f))
197 logging.config.dictConfig(yaml.safe_load(f))
196198
197199 def sighup(*args):
198200 # it might be better to use a file watcher or something for this.
2323
2424 class MetricsConfig(Config):
2525 def read_config(self, config):
26 self.enable_metrics = config["enable_metrics"]
26 self.enable_metrics = config.get("enable_metrics", False)
2727 self.report_stats = config.get("report_stats", None)
2828 self.metrics_port = config.get("metrics_port")
2929 self.metrics_bind_host = config.get("metrics_bind_host", "127.0.0.1")
4747
4848 # Enable collection and rendering of performance metrics
4949 #
50 enable_metrics: False
50 #enable_metrics: False
5151
5252 # Enable sentry integration
5353 # NOTE: While attempts are made to ensure that the logs don't contain
2121
2222 def read_config(self, config):
2323 password_config = config.get("password_config", {})
24 if password_config is None:
25 password_config = {}
26
2427 self.password_enabled = password_config.get("enabled", True)
2528 self.password_pepper = password_config.get("pepper", "")
2629
2730 def default_config(self, config_dir_path, server_name, **kwargs):
28 return """
29 # Enable password for login.
30 #
31 return """\
3132 password_config:
32 enabled: true
33 # Uncomment to disable password login
34 #
35 #enabled: false
36
3337 # Uncomment and change to a secret random string for extra security.
3438 # DO NOT CHANGE THIS AFTER INITIAL SETUP!
35 #pepper: ""
39 #
40 #pepper: "EVEN_MORE_SECRET"
3641 """
1414 from ._base import Config
1515
1616
17 class RateLimitConfig(object):
18 def __init__(self, config):
19 self.per_second = config.get("per_second", 0.17)
20 self.burst_count = config.get("burst_count", 3.0)
21
22
1723 class RatelimitConfig(Config):
1824
1925 def read_config(self, config):
20 self.rc_messages_per_second = config["rc_messages_per_second"]
21 self.rc_message_burst_count = config["rc_message_burst_count"]
26 self.rc_messages_per_second = config.get("rc_messages_per_second", 0.2)
27 self.rc_message_burst_count = config.get("rc_message_burst_count", 10.0)
2228
23 self.federation_rc_window_size = config["federation_rc_window_size"]
24 self.federation_rc_sleep_limit = config["federation_rc_sleep_limit"]
25 self.federation_rc_sleep_delay = config["federation_rc_sleep_delay"]
26 self.federation_rc_reject_limit = config["federation_rc_reject_limit"]
27 self.federation_rc_concurrent = config["federation_rc_concurrent"]
29 self.rc_registration = RateLimitConfig(config.get("rc_registration", {}))
30
31 rc_login_config = config.get("rc_login", {})
32 self.rc_login_address = RateLimitConfig(rc_login_config.get("address", {}))
33 self.rc_login_account = RateLimitConfig(rc_login_config.get("account", {}))
34 self.rc_login_failed_attempts = RateLimitConfig(
35 rc_login_config.get("failed_attempts", {}),
36 )
37
38 self.federation_rc_window_size = config.get("federation_rc_window_size", 1000)
39 self.federation_rc_sleep_limit = config.get("federation_rc_sleep_limit", 10)
40 self.federation_rc_sleep_delay = config.get("federation_rc_sleep_delay", 500)
41 self.federation_rc_reject_limit = config.get("federation_rc_reject_limit", 50)
42 self.federation_rc_concurrent = config.get("federation_rc_concurrent", 3)
43
44 self.federation_rr_transactions_per_room_per_second = config.get(
45 "federation_rr_transactions_per_room_per_second", 50,
46 )
2847
2948 def default_config(self, **kwargs):
3049 return """\
3251
3352 # Number of messages a client can send per second
3453 #
35 rc_messages_per_second: 0.2
54 #rc_messages_per_second: 0.2
3655
3756 # Number of message a client can send before being throttled
3857 #
39 rc_message_burst_count: 10.0
58 #rc_message_burst_count: 10.0
59
60 # Ratelimiting settings for registration and login.
61 #
62 # Each ratelimiting configuration is made of two parameters:
63 # - per_second: number of requests a client can send per second.
64 # - burst_count: number of requests a client can send before being throttled.
65 #
66 # Synapse currently uses the following configurations:
67 # - one for registration that ratelimits registration requests based on the
68 # client's IP address.
69 # - one for login that ratelimits login requests based on the client's IP
70 # address.
71 # - one for login that ratelimits login requests based on the account the
72 # client is attempting to log into.
73 # - one for login that ratelimits login requests based on the account the
74 # client is attempting to log into, based on the amount of failed login
75 # attempts for this account.
76 #
77 # The defaults are as shown below.
78 #
79 #rc_registration:
80 # per_second: 0.17
81 # burst_count: 3
82 #
83 #rc_login:
84 # address:
85 # per_second: 0.17
86 # burst_count: 3
87 # account:
88 # per_second: 0.17
89 # burst_count: 3
90 # failed_attempts:
91 # per_second: 0.17
92 # burst_count: 3
4093
4194 # The federation window size in milliseconds
4295 #
43 federation_rc_window_size: 1000
96 #federation_rc_window_size: 1000
4497
4598 # The number of federation requests from a single server in a window
4699 # before the server will delay processing the request.
47100 #
48 federation_rc_sleep_limit: 10
101 #federation_rc_sleep_limit: 10
49102
50103 # The duration in milliseconds to delay processing events from
51104 # remote servers by if they go over the sleep limit.
52105 #
53 federation_rc_sleep_delay: 500
106 #federation_rc_sleep_delay: 500
54107
55108 # The maximum number of concurrent federation requests allowed
56109 # from a single server
57110 #
58 federation_rc_reject_limit: 50
111 #federation_rc_reject_limit: 50
59112
60113 # The number of federation requests to concurrently process from a
61114 # single server
62115 #
63 federation_rc_concurrent: 3
116 #federation_rc_concurrent: 3
117
118 # Target outgoing federation transaction frequency for sending read-receipts,
119 # per-room.
120 #
121 # If we end up trying to send out more read-receipts, they will get buffered up
122 # into fewer transactions.
123 #
124 #federation_rr_transactions_per_room_per_second: 50
64125 """
2323
2424 def read_config(self, config):
2525 self.enable_registration = bool(
26 strtobool(str(config["enable_registration"]))
26 strtobool(str(config.get("enable_registration", False)))
2727 )
2828 if "disable_registration" in config:
2929 self.enable_registration = not bool(
3535 self.registration_shared_secret = config.get("registration_shared_secret")
3636
3737 self.bcrypt_rounds = config.get("bcrypt_rounds", 12)
38 self.trusted_third_party_id_servers = config["trusted_third_party_id_servers"]
38 self.trusted_third_party_id_servers = config.get(
39 "trusted_third_party_id_servers",
40 ["matrix.org", "vector.im"],
41 )
3942 self.default_identity_server = config.get("default_identity_server")
4043 self.allow_guest_access = config.get("allow_guest_access", False)
4144
6366
6467 return """\
6568 ## Registration ##
69 #
70 # Registration can be rate-limited using the parameters in the "Ratelimiting"
71 # section of this file.
6672
6773 # Enable registration for new users.
68 enable_registration: False
74 #
75 #enable_registration: false
6976
7077 # The user must provide all of the below types of 3PID when registering.
7178 #
7683 # Explicitly disable asking for MSISDNs from the registration
7784 # flow (overrides registrations_require_3pid if MSISDNs are set as required)
7885 #
79 #disable_msisdn_registration: True
86 #disable_msisdn_registration: true
8087
8188 # Mandate that users are only allowed to associate certain formats of
8289 # 3PIDs with accounts on this server.
8996 # - medium: msisdn
9097 # pattern: '\\+44'
9198
92 # If set, allows registration by anyone who also has the shared
93 # secret, even if registration is otherwise disabled.
99 # If set, allows registration of standard or admin accounts by anyone who
100 # has the shared secret, even if registration is otherwise disabled.
94101 #
95102 %(registration_shared_secret)s
96103
100107 # N.B. that increasing this will exponentially increase the time required
101108 # to register or login - e.g. 24 => 2^24 rounds which will take >20 mins.
102109 #
103 bcrypt_rounds: 12
110 #bcrypt_rounds: 12
104111
105112 # Allows users to register as guests without a password/email/etc, and
106113 # participate in rooms hosted on this server which have been made
107114 # accessible to anonymous users.
108115 #
109 allow_guest_access: False
116 #allow_guest_access: false
110117
111118 # The identity server which we suggest that clients should use when users log
112119 # in on this server.
122129 # Also defines the ID server which will be called when an account is
123130 # deactivated (one will be picked arbitrarily).
124131 #
125 trusted_third_party_id_servers:
126 - matrix.org
127 - vector.im
132 #trusted_third_party_id_servers:
133 # - matrix.org
134 # - vector.im
128135
129136 # Users who register on this homeserver will automatically be joined
130137 # to these rooms
138145 # Setting to false means that if the rooms are not manually created,
139146 # users cannot be auto-joined since they do not exist.
140147 #
141 autocreate_auto_join_rooms: true
148 #autocreate_auto_join_rooms: true
142149 """ % locals()
143150
144151 def add_arguments(self, parser):
1717 from synapse.util.module_loader import load_module
1818
1919 from ._base import Config, ConfigError
20
21 DEFAULT_THUMBNAIL_SIZES = [
22 {
23 "width": 32,
24 "height": 32,
25 "method": "crop",
26 }, {
27 "width": 96,
28 "height": 96,
29 "method": "crop",
30 }, {
31 "width": 320,
32 "height": 240,
33 "method": "scale",
34 }, {
35 "width": 640,
36 "height": 480,
37 "method": "scale",
38 }, {
39 "width": 800,
40 "height": 600,
41 "method": "scale"
42 },
43 ]
44
45 THUMBNAIL_SIZE_YAML = """\
46 # - width: %(width)i
47 # height: %(height)i
48 # method: %(method)s
49 """
2050
2151 MISSING_NETADDR = (
2252 "Missing netaddr library. This is required for URL preview API."
76106
77107 class ContentRepositoryConfig(Config):
78108 def read_config(self, config):
79 self.max_upload_size = self.parse_size(config["max_upload_size"])
80 self.max_image_pixels = self.parse_size(config["max_image_pixels"])
81 self.max_spider_size = self.parse_size(config["max_spider_size"])
109 self.max_upload_size = self.parse_size(config.get("max_upload_size", "10M"))
110 self.max_image_pixels = self.parse_size(config.get("max_image_pixels", "32M"))
111 self.max_spider_size = self.parse_size(config.get("max_spider_size", "10M"))
82112
83113 self.media_store_path = self.ensure_directory(config["media_store_path"])
84114
138168 )
139169
140170 self.uploads_path = self.ensure_directory(config["uploads_path"])
141 self.dynamic_thumbnails = config["dynamic_thumbnails"]
171 self.dynamic_thumbnails = config.get("dynamic_thumbnails", False)
142172 self.thumbnail_requirements = parse_thumbnail_requirements(
143 config["thumbnail_sizes"]
173 config.get("thumbnail_sizes", DEFAULT_THUMBNAIL_SIZES),
144174 )
145175 self.url_preview_enabled = config.get("url_preview_enabled", False)
146176 if self.url_preview_enabled:
155185 except ImportError:
156186 raise ConfigError(MISSING_NETADDR)
157187
158 if "url_preview_ip_range_blacklist" in config:
159 self.url_preview_ip_range_blacklist = IPSet(
160 config["url_preview_ip_range_blacklist"]
161 )
162 else:
188 if "url_preview_ip_range_blacklist" not in config:
163189 raise ConfigError(
164190 "For security, you must specify an explicit target IP address "
165191 "blacklist in url_preview_ip_range_blacklist for url previewing "
166192 "to work"
167193 )
168194
195 self.url_preview_ip_range_blacklist = IPSet(
196 config["url_preview_ip_range_blacklist"]
197 )
198
199 # we always blacklist '0.0.0.0' and '::', which are supposed to be
200 # unroutable addresses.
201 self.url_preview_ip_range_blacklist.update(['0.0.0.0', '::'])
202
169203 self.url_preview_ip_range_whitelist = IPSet(
170204 config.get("url_preview_ip_range_whitelist", ())
171205 )
177211 def default_config(self, data_dir_path, **kwargs):
178212 media_store = os.path.join(data_dir_path, "media_store")
179213 uploads_path = os.path.join(data_dir_path, "uploads")
214
215 formatted_thumbnail_sizes = "".join(
216 THUMBNAIL_SIZE_YAML % s for s in DEFAULT_THUMBNAIL_SIZES
217 )
218 # strip final NL
219 formatted_thumbnail_sizes = formatted_thumbnail_sizes[:-1]
220
180221 return r"""
181222 # Directory where uploaded images and attachments are stored.
182223 #
203244
204245 # The largest allowed upload size in bytes
205246 #
206 max_upload_size: "10M"
247 #max_upload_size: 10M
207248
208249 # Maximum number of pixels that will be thumbnailed
209250 #
210 max_image_pixels: "32M"
251 #max_image_pixels: 32M
211252
212253 # Whether to generate new thumbnails on the fly to precisely match
213254 # the resolution requested by the client. If true then whenever
215256 # generate a new thumbnail. If false the server will pick a thumbnail
216257 # from a precalculated list.
217258 #
218 dynamic_thumbnails: false
259 #dynamic_thumbnails: false
219260
220261 # List of thumbnails to precalculate when an image is uploaded.
221262 #
222 thumbnail_sizes:
223 - width: 32
224 height: 32
225 method: crop
226 - width: 96
227 height: 96
228 method: crop
229 - width: 320
230 height: 240
231 method: scale
232 - width: 640
233 height: 480
234 method: scale
235 - width: 800
236 height: 600
237 method: scale
238
239 # Is the preview URL API enabled? If enabled, you *must* specify
240 # an explicit url_preview_ip_range_blacklist of IPs that the spider is
241 # denied from accessing.
242 #
243 url_preview_enabled: False
263 #thumbnail_sizes:
264 %(formatted_thumbnail_sizes)s
265
266 # Is the preview URL API enabled?
267 #
268 # 'false' by default: uncomment the following to enable it (and specify a
269 # url_preview_ip_range_blacklist blacklist).
270 #
271 #url_preview_enabled: true
244272
245273 # List of IP address CIDR ranges that the URL preview spider is denied
246274 # from accessing. There are no defaults: you must explicitly
249277 # to connect to, otherwise anyone in any Matrix room could cause your
250278 # synapse to issue arbitrary GET requests to your internal services,
251279 # causing serious security issues.
280 #
281 # (0.0.0.0 and :: are always blacklisted, whether or not they are explicitly
282 # listed here, since they correspond to unroutable addresses.)
283 #
284 # This must be specified if url_preview_enabled is set. It is recommended that
285 # you uncomment the following list as a starting point.
252286 #
253287 #url_preview_ip_range_blacklist:
254288 # - '127.0.0.0/8'
260294 # - '::1/128'
261295 # - 'fe80::/64'
262296 # - 'fc00::/7'
263 #
297
264298 # List of IP address CIDR ranges that the URL preview spider is allowed
265299 # to access even if they are specified in url_preview_ip_range_blacklist.
266300 # This is useful for specifying exceptions to wide-ranging blacklisted
305339 # - netloc: '^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$'
306340
307341 # The largest allowed URL preview spidering size in bytes
308 max_spider_size: "10M"
309
342 #
343 #max_spider_size: 10M
310344 """ % locals()
1919
2020 class RoomDirectoryConfig(Config):
2121 def read_config(self, config):
22 self.enable_room_list_search = config.get(
23 "enable_room_list_search", True,
24 )
25
2226 alias_creation_rules = config.get("alias_creation_rules")
2327
2428 if alias_creation_rules is not None:
5357
5458 def default_config(self, config_dir_path, server_name, **kwargs):
5559 return """
60 # Uncomment to disable searching the public room list. When disabled
61 # blocks searching local and remote room lists for local and remote
62 # users by always returning an empty list for all queries.
63 #
64 #enable_room_list_search: false
65
5666 # The `alias_creation` option controls who's allowed to create aliases
5767 # on this server.
5868 #
6363 }
6464
6565 def default_config(self, config_dir_path, server_name, **kwargs):
66 return """
66 return """\
6767 # Enable SAML2 for registration and login. Uses pysaml2.
6868 #
6969 # `sp_config` is the configuration for the pysaml2 Service Provider.
4444
4545 self.pid_file = self.abspath(config.get("pid_file"))
4646 self.web_client_location = config.get("web_client_location", None)
47 self.soft_file_limit = config["soft_file_limit"]
47 self.soft_file_limit = config.get("soft_file_limit", 0)
4848 self.daemonize = config.get("daemonize")
4949 self.print_pidfile = config.get("print_pidfile")
5050 self.user_agent_suffix = config.get("user_agent_suffix")
124124 if self.public_baseurl[-1] != '/':
125125 self.public_baseurl += '/'
126126 self.start_pushers = config.get("start_pushers", True)
127
128 # (undocumented) option for torturing the worker-mode replication a bit,
129 # for testing. The value defines the number of milliseconds to pause before
130 # sending out any replication updates.
131 self.replication_torture_level = config.get("replication_torture_level")
127132
128133 self.listeners = []
129134 for listener in config.get("listeners", []):
259264 # This is used by remote servers to connect to this server,
260265 # e.g. matrix.org, localhost:8080, etc.
261266 # This is also the last part of your UserID.
267 #
262268 server_name: "%(server_name)s"
263269
264270 # When running as a daemon, the file to store the pid in
271 #
265272 pid_file: %(pid_file)s
266273
267274 # CPU affinity mask. Setting this restricts the CPUs on which the
303310 # Set the soft limit on the number of file descriptors synapse can use
304311 # Zero is used to indicate synapse should set the soft limit to the
305312 # hard limit.
306 soft_file_limit: 0
313 #
314 #soft_file_limit: 0
307315
308316 # Set to false to disable presence tracking on this homeserver.
309 use_presence: true
317 #
318 #use_presence: false
310319
311320 # The GC threshold parameters to pass to `gc.set_threshold`, if defined
312321 #
180180 # See 'ACME support' below to enable auto-provisioning this certificate via
181181 # Let's Encrypt.
182182 #
183 # If supplying your own, be sure to use a `.pem` file that includes the
184 # full certificate chain including any intermediate certificates (for
185 # instance, if using certbot, use `fullchain.pem` as your certificate,
186 # not `cert.pem`).
187 #
183188 #tls_certificate_path: "%(tls_certificate_path)s"
184189
185190 # PEM-encoded private key for TLS
2121 """
2222
2323 def read_config(self, config):
24 self.user_directory_search_enabled = True
2425 self.user_directory_search_all_users = False
2526 user_directory_config = config.get("user_directory", None)
2627 if user_directory_config:
28 self.user_directory_search_enabled = (
29 user_directory_config.get("enabled", True)
30 )
2731 self.user_directory_search_all_users = (
2832 user_directory_config.get("search_all_users", False)
2933 )
3236 return """
3337 # User Directory configuration
3438 #
39 # 'enabled' defines whether users can search the user directory. If
40 # false then empty responses are returned to all queries. Defaults to
41 # true.
42 #
3543 # 'search_all_users' defines whether to search all users visible to your HS
3644 # when searching the user directory, rather than limiting to users visible
3745 # in public rooms. Defaults to false. If you set it True, you'll have to run
3947 # on your database to tell it to rebuild the user_directory search indexes.
4048 #
4149 #user_directory:
50 # enabled: true
4251 # search_all_users: false
4352 """
2121 self.turn_shared_secret = config.get("turn_shared_secret")
2222 self.turn_username = config.get("turn_username")
2323 self.turn_password = config.get("turn_password")
24 self.turn_user_lifetime = self.parse_duration(config["turn_user_lifetime"])
24 self.turn_user_lifetime = self.parse_duration(
25 config.get("turn_user_lifetime", "1h"),
26 )
2527 self.turn_allow_guests = config.get("turn_allow_guests", True)
2628
2729 def default_config(self, **kwargs):
4446
4547 # How long generated TURN credentials last
4648 #
47 turn_user_lifetime: "1h"
49 #turn_user_lifetime: 1h
4850
4951 # Whether guests should be allowed to use the TURN server.
5052 # This defaults to True, otherwise VoIP will be unreliable for guests.
5254 # connect to arbitrary endpoints without having first signed up for a
5355 # valid account (e.g. by passing a CAPTCHA).
5456 #
55 turn_allow_guests: True
57 #turn_allow_guests: True
5658 """
2727 if self.worker_app == "synapse.app.homeserver":
2828 self.worker_app = None
2929
30 self.worker_listeners = config.get("worker_listeners")
30 self.worker_listeners = config.get("worker_listeners", [])
3131 self.worker_daemonize = config.get("worker_daemonize")
3232 self.worker_pid_file = config.get("worker_pid_file")
3333 self.worker_log_file = config.get("worker_log_file")
4747 self.worker_main_http_uri = config.get("worker_main_http_uri", None)
4848 self.worker_cpu_affinity = config.get("worker_cpu_affinity")
4949
50 # This option is really only here to support `--manhole` command line
51 # argument.
52 manhole = config.get("worker_manhole")
53 if manhole:
54 self.worker_listeners.append({
55 "port": manhole,
56 "bind_addresses": ["127.0.0.1"],
57 "type": "manhole",
58 "tls": False,
59 })
60
5061 if self.worker_listeners:
5162 for listener in self.worker_listeners:
5263 bind_address = listener.pop("bind_address", None)
5667 bind_addresses.append(bind_address)
5768 elif not bind_addresses:
5869 bind_addresses.append('')
70
71 def read_arguments(self, args):
72 # We support a bunch of command line arguments that override options in
73 # the config. A lot of these options have a worker_* prefix when running
74 # on workers so we also have to override them when command line options
75 # are specified.
76
77 if args.daemonize is not None:
78 self.worker_daemonize = args.daemonize
79 if args.log_config is not None:
80 self.worker_log_config = args.log_config
81 if args.log_file is not None:
82 self.worker_log_file = args.log_file
83 if args.manhole is not None:
84 self.worker_manhole = args.worker_manhole
685685 try:
686686 with PreserveLoggingContext():
687687 _, key_id, verify_key = yield verify_request.deferred
688 except (IOError, RequestSendFailed) as e:
688 except KeyLookupError as e:
689689 logger.warn(
690 "Got IOError when downloading keys for %s: %s %s",
690 "Failed to download keys for %s: %s %s",
691691 server_name, type(e).__name__, str(e),
692692 )
693693 raise SynapseError(
7676 """
7777 return getattr(self, "recheck_redaction", False)
7878
79 def is_soft_failed(self):
80 """Whether the event has been soft failed.
81
82 Soft failed events should be handled as usual, except:
83 1. They should not go down sync or event streams, or generally
84 sent to clients.
85 2. They should not be added to the forward extremities (and
86 therefore not to current state).
87
88 Returns:
89 bool
90 """
91 return getattr(self, "soft_failed", False)
92
7993
8094 def _event_dict_property(key):
8195 # We want to be able to use hasattr with the event dict properties.
126140 origin = _event_dict_property("origin")
127141 origin_server_ts = _event_dict_property("origin_server_ts")
128142 prev_events = _event_dict_property("prev_events")
129 prev_state = _event_dict_property("prev_state")
130143 redacts = _event_dict_property("redacts")
131144 room_id = _event_dict_property("room_id")
132145 sender = _event_dict_property("sender")
885885 def on_edu(self, edu_type, origin, content):
886886 """Overrides FederationHandlerRegistry
887887 """
888 if not self.config.use_presence and edu_type == "m.presence":
889 return
890
888891 handler = self.edu_handlers.get(edu_type)
889892 if handler:
890893 return super(ReplicationFederationHandlerRegistry, self).on_edu(
4545
4646
4747 class FederationRemoteSendQueue(object):
48 """A drop in replacement for TransactionQueue"""
48 """A drop in replacement for FederationSender"""
4949
5050 def __init__(self, hs):
5151 self.server_name = hs.hostname
153153 del self.device_messages[key]
154154
155155 def notify_new_events(self, current_id):
156 """As per TransactionQueue"""
156 """As per FederationSender"""
157157 # We don't need to replicate this as it gets sent down a different
158158 # stream.
159159 pass
160160
161 def send_edu(self, destination, edu_type, content, key=None):
162 """As per TransactionQueue"""
161 def build_and_send_edu(self, destination, edu_type, content, key=None):
162 """As per FederationSender"""
163 if destination == self.server_name:
164 logger.info("Not sending EDU to ourselves")
165 return
166
163167 pos = self._next_pos()
164168
165169 edu = Edu(
178182
179183 self.notifier.on_new_replication_data()
180184
185 def send_read_receipt(self, receipt):
186 """As per FederationSender
187
188 Args:
189 receipt (synapse.types.ReadReceipt):
190 """
191 # nothing to do here: the replication listener will handle it.
192 pass
193
181194 def send_presence(self, states):
182 """As per TransactionQueue
195 """As per FederationSender
183196
184197 Args:
185198 states (list(UserPresenceState))
196209 self.notifier.on_new_replication_data()
197210
198211 def send_device_messages(self, destination):
199 """As per TransactionQueue"""
212 """As per FederationSender"""
200213 pos = self._next_pos()
201214 self.device_messages[pos] = destination
202215 self.notifier.on_new_replication_data()
434447 transaction queue ready for sending to the relevant homeservers.
435448
436449 Args:
437 transaction_queue (TransactionQueue)
450 transaction_queue (FederationSender)
438451 rows (list(synapse.replication.tcp.streams.FederationStreamRow))
439452 """
440453
464477
465478 for destination, edu_map in iteritems(buff.keyed_edus):
466479 for key, edu in edu_map.items():
467 transaction_queue.send_edu(
468 edu.destination, edu.edu_type, edu.content, key=key,
469 )
480 transaction_queue.send_edu(edu, key)
470481
471482 for destination, edu_list in iteritems(buff.edus):
472483 for edu in edu_list:
473 transaction_queue.send_edu(
474 edu.destination, edu.edu_type, edu.content, key=None,
475 )
484 transaction_queue.send_edu(edu, None)
476485
477486 for destination in buff.device_destinations:
478487 transaction_queue.send_device_messages(destination)
0 # -*- coding: utf-8 -*-
1 # Copyright 2019 New Vector Ltd
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import logging
16
17 from six import itervalues
18
19 from prometheus_client import Counter
20
21 from twisted.internet import defer
22
23 import synapse.metrics
24 from synapse.federation.sender.per_destination_queue import PerDestinationQueue
25 from synapse.federation.sender.transaction_manager import TransactionManager
26 from synapse.federation.units import Edu
27 from synapse.handlers.presence import get_interested_remotes
28 from synapse.metrics import (
29 LaterGauge,
30 event_processing_loop_counter,
31 event_processing_loop_room_count,
32 events_processed_counter,
33 )
34 from synapse.metrics.background_process_metrics import run_as_background_process
35 from synapse.util import logcontext
36 from synapse.util.metrics import measure_func
37
38 logger = logging.getLogger(__name__)
39
40 sent_pdus_destination_dist_count = Counter(
41 "synapse_federation_client_sent_pdu_destinations:count",
42 "Number of PDUs queued for sending to one or more destinations",
43 )
44
45 sent_pdus_destination_dist_total = Counter(
46 "synapse_federation_client_sent_pdu_destinations:total", ""
47 "Total number of PDUs queued for sending across all destinations",
48 )
49
50
51 class FederationSender(object):
52 def __init__(self, hs):
53 self.hs = hs
54 self.server_name = hs.hostname
55
56 self.store = hs.get_datastore()
57 self.state = hs.get_state_handler()
58
59 self.clock = hs.get_clock()
60 self.is_mine_id = hs.is_mine_id
61
62 self._transaction_manager = TransactionManager(hs)
63
64 # map from destination to PerDestinationQueue
65 self._per_destination_queues = {} # type: dict[str, PerDestinationQueue]
66
67 LaterGauge(
68 "synapse_federation_transaction_queue_pending_destinations",
69 "",
70 [],
71 lambda: sum(
72 1 for d in self._per_destination_queues.values()
73 if d.transmission_loop_running
74 ),
75 )
76
77 # Map of user_id -> UserPresenceState for all the pending presence
78 # to be sent out by user_id. Entries here get processed and put in
79 # pending_presence_by_dest
80 self.pending_presence = {}
81
82 LaterGauge(
83 "synapse_federation_transaction_queue_pending_pdus",
84 "",
85 [],
86 lambda: sum(
87 d.pending_pdu_count() for d in self._per_destination_queues.values()
88 ),
89 )
90 LaterGauge(
91 "synapse_federation_transaction_queue_pending_edus",
92 "",
93 [],
94 lambda: sum(
95 d.pending_edu_count() for d in self._per_destination_queues.values()
96 ),
97 )
98
99 self._order = 1
100
101 self._is_processing = False
102 self._last_poked_id = -1
103
104 self._processing_pending_presence = False
105
106 # map from room_id to a set of PerDestinationQueues which we believe are
107 # awaiting a call to flush_read_receipts_for_room. The presence of an entry
108 # here for a given room means that we are rate-limiting RR flushes to that room,
109 # and that there is a pending call to _flush_rrs_for_room in the system.
110 self._queues_awaiting_rr_flush_by_room = {
111 } # type: dict[str, set[PerDestinationQueue]]
112
113 self._rr_txn_interval_per_room_ms = (
114 1000.0 / hs.get_config().federation_rr_transactions_per_room_per_second
115 )
116
117 def _get_per_destination_queue(self, destination):
118 """Get or create a PerDestinationQueue for the given destination
119
120 Args:
121 destination (str): server_name of remote server
122
123 Returns:
124 PerDestinationQueue
125 """
126 queue = self._per_destination_queues.get(destination)
127 if not queue:
128 queue = PerDestinationQueue(self.hs, self._transaction_manager, destination)
129 self._per_destination_queues[destination] = queue
130 return queue
131
132 def notify_new_events(self, current_id):
133 """This gets called when we have some new events we might want to
134 send out to other servers.
135 """
136 self._last_poked_id = max(current_id, self._last_poked_id)
137
138 if self._is_processing:
139 return
140
141 # fire off a processing loop in the background
142 run_as_background_process(
143 "process_event_queue_for_federation",
144 self._process_event_queue_loop,
145 )
146
147 @defer.inlineCallbacks
148 def _process_event_queue_loop(self):
149 try:
150 self._is_processing = True
151 while True:
152 last_token = yield self.store.get_federation_out_pos("events")
153 next_token, events = yield self.store.get_all_new_events_stream(
154 last_token, self._last_poked_id, limit=100,
155 )
156
157 logger.debug("Handling %s -> %s", last_token, next_token)
158
159 if not events and next_token >= self._last_poked_id:
160 break
161
162 @defer.inlineCallbacks
163 def handle_event(event):
164 # Only send events for this server.
165 send_on_behalf_of = event.internal_metadata.get_send_on_behalf_of()
166 is_mine = self.is_mine_id(event.sender)
167 if not is_mine and send_on_behalf_of is None:
168 return
169
170 try:
171 # Get the state from before the event.
172 # We need to make sure that this is the state from before
173 # the event and not from after it.
174 # Otherwise if the last member on a server in a room is
175 # banned then it won't receive the event because it won't
176 # be in the room after the ban.
177 destinations = yield self.state.get_current_hosts_in_room(
178 event.room_id, latest_event_ids=event.prev_event_ids(),
179 )
180 except Exception:
181 logger.exception(
182 "Failed to calculate hosts in room for event: %s",
183 event.event_id,
184 )
185 return
186
187 destinations = set(destinations)
188
189 if send_on_behalf_of is not None:
190 # If we are sending the event on behalf of another server
191 # then it already has the event and there is no reason to
192 # send the event to it.
193 destinations.discard(send_on_behalf_of)
194
195 logger.debug("Sending %s to %r", event, destinations)
196
197 self._send_pdu(event, destinations)
198
199 @defer.inlineCallbacks
200 def handle_room_events(events):
201 for event in events:
202 yield handle_event(event)
203
204 events_by_room = {}
205 for event in events:
206 events_by_room.setdefault(event.room_id, []).append(event)
207
208 yield logcontext.make_deferred_yieldable(defer.gatherResults(
209 [
210 logcontext.run_in_background(handle_room_events, evs)
211 for evs in itervalues(events_by_room)
212 ],
213 consumeErrors=True
214 ))
215
216 yield self.store.update_federation_out_pos(
217 "events", next_token
218 )
219
220 if events:
221 now = self.clock.time_msec()
222 ts = yield self.store.get_received_ts(events[-1].event_id)
223
224 synapse.metrics.event_processing_lag.labels(
225 "federation_sender").set(now - ts)
226 synapse.metrics.event_processing_last_ts.labels(
227 "federation_sender").set(ts)
228
229 events_processed_counter.inc(len(events))
230
231 event_processing_loop_room_count.labels(
232 "federation_sender"
233 ).inc(len(events_by_room))
234
235 event_processing_loop_counter.labels("federation_sender").inc()
236
237 synapse.metrics.event_processing_positions.labels(
238 "federation_sender").set(next_token)
239
240 finally:
241 self._is_processing = False
242
243 def _send_pdu(self, pdu, destinations):
244 # We loop through all destinations to see whether we already have
245 # a transaction in progress. If we do, stick it in the pending_pdus
246 # table and we'll get back to it later.
247
248 order = self._order
249 self._order += 1
250
251 destinations = set(destinations)
252 destinations.discard(self.server_name)
253 logger.debug("Sending to: %s", str(destinations))
254
255 if not destinations:
256 return
257
258 sent_pdus_destination_dist_total.inc(len(destinations))
259 sent_pdus_destination_dist_count.inc()
260
261 for destination in destinations:
262 self._get_per_destination_queue(destination).send_pdu(pdu, order)
263
264 @defer.inlineCallbacks
265 def send_read_receipt(self, receipt):
266 """Send a RR to any other servers in the room
267
268 Args:
269 receipt (synapse.types.ReadReceipt): receipt to be sent
270 """
271
272 # Some background on the rate-limiting going on here.
273 #
274 # It turns out that if we attempt to send out RRs as soon as we get them from
275 # a client, then we end up trying to do several hundred Hz of federation
276 # transactions. (The number of transactions scales as O(N^2) on the size of a
277 # room, since in a large room we have both more RRs coming in, and more servers
278 # to send them to.)
279 #
280 # This leads to a lot of CPU load, and we end up getting behind. The solution
281 # currently adopted is as follows:
282 #
283 # The first receipt in a given room is sent out immediately, at time T0. Any
284 # further receipts are, in theory, batched up for N seconds, where N is calculated
285 # based on the number of servers in the room to achieve a transaction frequency
286 # of around 50Hz. So, for example, if there were 100 servers in the room, then
287 # N would be 100 / 50Hz = 2 seconds.
288 #
289 # Then, after T+N, we flush out any receipts that have accumulated, and restart
290 # the timer to flush out more receipts at T+2N, etc. If no receipts accumulate,
291 # we stop the cycle and go back to the start.
292 #
293 # However, in practice, it is often possible to flush out receipts earlier: in
294 # particular, if we are sending a transaction to a given server anyway (for
295 # example, because we have a PDU or a RR in another room to send), then we may
296 # as well send out all of the pending RRs for that server. So it may be that
297 # by the time we get to T+N, we don't actually have any RRs left to send out.
298 # Nevertheless we continue to buffer up RRs for the room in question until we
299 # reach the point that no RRs arrive between timer ticks.
300 #
301 # For even more background, see https://github.com/matrix-org/synapse/issues/4730.
302
303 room_id = receipt.room_id
304
305 # Work out which remote servers should be poked and poke them.
306 domains = yield self.state.get_current_hosts_in_room(room_id)
307 domains = [d for d in domains if d != self.server_name]
308 if not domains:
309 return
310
311 queues_pending_flush = self._queues_awaiting_rr_flush_by_room.get(
312 room_id
313 )
314
315 # if there is no flush yet scheduled, we will send out these receipts with
316 # immediate flushes, and schedule the next flush for this room.
317 if queues_pending_flush is not None:
318 logger.debug("Queuing receipt for: %r", domains)
319 else:
320 logger.debug("Sending receipt to: %r", domains)
321 self._schedule_rr_flush_for_room(room_id, len(domains))
322
323 for domain in domains:
324 queue = self._get_per_destination_queue(domain)
325 queue.queue_read_receipt(receipt)
326
327 # if there is already a RR flush pending for this room, then make sure this
328 # destination is registered for the flush
329 if queues_pending_flush is not None:
330 queues_pending_flush.add(queue)
331 else:
332 queue.flush_read_receipts_for_room(room_id)
333
334 def _schedule_rr_flush_for_room(self, room_id, n_domains):
335 # that is going to cause approximately len(domains) transactions, so now back
336 # off for that multiplied by RR_TXN_INTERVAL_PER_ROOM
337 backoff_ms = self._rr_txn_interval_per_room_ms * n_domains
338
339 logger.debug("Scheduling RR flush in %s in %d ms", room_id, backoff_ms)
340 self.clock.call_later(backoff_ms, self._flush_rrs_for_room, room_id)
341 self._queues_awaiting_rr_flush_by_room[room_id] = set()
342
343 def _flush_rrs_for_room(self, room_id):
344 queues = self._queues_awaiting_rr_flush_by_room.pop(room_id)
345 logger.debug("Flushing RRs in %s to %s", room_id, queues)
346
347 if not queues:
348 # no more RRs arrived for this room; we are done.
349 return
350
351 # schedule the next flush
352 self._schedule_rr_flush_for_room(room_id, len(queues))
353
354 for queue in queues:
355 queue.flush_read_receipts_for_room(room_id)
356
357 @logcontext.preserve_fn # the caller should not yield on this
358 @defer.inlineCallbacks
359 def send_presence(self, states):
360 """Send the new presence states to the appropriate destinations.
361
362 This actually queues up the presence states ready for sending and
363 triggers a background task to process them and send out the transactions.
364
365 Args:
366 states (list(UserPresenceState))
367 """
368 if not self.hs.config.use_presence:
369 # No-op if presence is disabled.
370 return
371
372 # First we queue up the new presence by user ID, so multiple presence
373 # updates in quick successtion are correctly handled
374 # We only want to send presence for our own users, so lets always just
375 # filter here just in case.
376 self.pending_presence.update({
377 state.user_id: state for state in states
378 if self.is_mine_id(state.user_id)
379 })
380
381 # We then handle the new pending presence in batches, first figuring
382 # out the destinations we need to send each state to and then poking it
383 # to attempt a new transaction. We linearize this so that we don't
384 # accidentally mess up the ordering and send multiple presence updates
385 # in the wrong order
386 if self._processing_pending_presence:
387 return
388
389 self._processing_pending_presence = True
390 try:
391 while True:
392 states_map = self.pending_presence
393 self.pending_presence = {}
394
395 if not states_map:
396 break
397
398 yield self._process_presence_inner(list(states_map.values()))
399 except Exception:
400 logger.exception("Error sending presence states to servers")
401 finally:
402 self._processing_pending_presence = False
403
404 @measure_func("txnqueue._process_presence")
405 @defer.inlineCallbacks
406 def _process_presence_inner(self, states):
407 """Given a list of states populate self.pending_presence_by_dest and
408 poke to send a new transaction to each destination
409
410 Args:
411 states (list(UserPresenceState))
412 """
413 hosts_and_states = yield get_interested_remotes(self.store, states, self.state)
414
415 for destinations, states in hosts_and_states:
416 for destination in destinations:
417 if destination == self.server_name:
418 continue
419 self._get_per_destination_queue(destination).send_presence(states)
420
421 def build_and_send_edu(self, destination, edu_type, content, key=None):
422 """Construct an Edu object, and queue it for sending
423
424 Args:
425 destination (str): name of server to send to
426 edu_type (str): type of EDU to send
427 content (dict): content of EDU
428 key (Any|None): clobbering key for this edu
429 """
430 if destination == self.server_name:
431 logger.info("Not sending EDU to ourselves")
432 return
433
434 edu = Edu(
435 origin=self.server_name,
436 destination=destination,
437 edu_type=edu_type,
438 content=content,
439 )
440
441 self.send_edu(edu, key)
442
443 def send_edu(self, edu, key):
444 """Queue an EDU for sending
445
446 Args:
447 edu (Edu): edu to send
448 key (Any|None): clobbering key for this edu
449 """
450 queue = self._get_per_destination_queue(edu.destination)
451 if key:
452 queue.send_keyed_edu(edu, key)
453 else:
454 queue.send_edu(edu)
455
456 def send_device_messages(self, destination):
457 if destination == self.server_name:
458 logger.info("Not sending device update to ourselves")
459 return
460
461 self._get_per_destination_queue(destination).attempt_new_transaction()
462
463 def get_current_token(self):
464 return 0
0 # -*- coding: utf-8 -*-
1 # Copyright 2014-2016 OpenMarket Ltd
2 # Copyright 2019 New Vector Ltd
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 import datetime
16 import logging
17
18 from prometheus_client import Counter
19
20 from twisted.internet import defer
21
22 from synapse.api.errors import (
23 FederationDeniedError,
24 HttpResponseException,
25 RequestSendFailed,
26 )
27 from synapse.events import EventBase
28 from synapse.federation.units import Edu
29 from synapse.handlers.presence import format_user_presence_state
30 from synapse.metrics import sent_transactions_counter
31 from synapse.metrics.background_process_metrics import run_as_background_process
32 from synapse.storage import UserPresenceState
33 from synapse.util.retryutils import NotRetryingDestination, get_retry_limiter
34
35 logger = logging.getLogger(__name__)
36
37
38 sent_edus_counter = Counter(
39 "synapse_federation_client_sent_edus",
40 "Total number of EDUs successfully sent",
41 )
42
43 sent_edus_by_type = Counter(
44 "synapse_federation_client_sent_edus_by_type",
45 "Number of sent EDUs successfully sent, by event type",
46 ["type"],
47 )
48
49
50 class PerDestinationQueue(object):
51 """
52 Manages the per-destination transmission queues.
53
54 Args:
55 hs (synapse.HomeServer):
56 transaction_sender (TransactionManager):
57 destination (str): the server_name of the destination that we are managing
58 transmission for.
59 """
60 def __init__(self, hs, transaction_manager, destination):
61 self._server_name = hs.hostname
62 self._clock = hs.get_clock()
63 self._store = hs.get_datastore()
64 self._transaction_manager = transaction_manager
65
66 self._destination = destination
67 self.transmission_loop_running = False
68
69 # a list of tuples of (pending pdu, order)
70 self._pending_pdus = [] # type: list[tuple[EventBase, int]]
71 self._pending_edus = [] # type: list[Edu]
72
73 # Pending EDUs by their "key". Keyed EDUs are EDUs that get clobbered
74 # based on their key (e.g. typing events by room_id)
75 # Map of (edu_type, key) -> Edu
76 self._pending_edus_keyed = {} # type: dict[tuple[str, str], Edu]
77
78 # Map of user_id -> UserPresenceState of pending presence to be sent to this
79 # destination
80 self._pending_presence = {} # type: dict[str, UserPresenceState]
81
82 # room_id -> receipt_type -> user_id -> receipt_dict
83 self._pending_rrs = {}
84 self._rrs_pending_flush = False
85
86 # stream_id of last successfully sent to-device message.
87 # NB: may be a long or an int.
88 self._last_device_stream_id = 0
89
90 # stream_id of last successfully sent device list update.
91 self._last_device_list_stream_id = 0
92
93 def __str__(self):
94 return "PerDestinationQueue[%s]" % self._destination
95
96 def pending_pdu_count(self):
97 return len(self._pending_pdus)
98
99 def pending_edu_count(self):
100 return (
101 len(self._pending_edus)
102 + len(self._pending_presence)
103 + len(self._pending_edus_keyed)
104 )
105
106 def send_pdu(self, pdu, order):
107 """Add a PDU to the queue, and start the transmission loop if neccessary
108
109 Args:
110 pdu (EventBase): pdu to send
111 order (int):
112 """
113 self._pending_pdus.append((pdu, order))
114 self.attempt_new_transaction()
115
116 def send_presence(self, states):
117 """Add presence updates to the queue. Start the transmission loop if neccessary.
118
119 Args:
120 states (iterable[UserPresenceState]): presence to send
121 """
122 self._pending_presence.update({
123 state.user_id: state for state in states
124 })
125 self.attempt_new_transaction()
126
127 def queue_read_receipt(self, receipt):
128 """Add a RR to the list to be sent. Doesn't start the transmission loop yet
129 (see flush_read_receipts_for_room)
130
131 Args:
132 receipt (synapse.api.receipt_info.ReceiptInfo): receipt to be queued
133 """
134 self._pending_rrs.setdefault(
135 receipt.room_id, {},
136 ).setdefault(
137 receipt.receipt_type, {}
138 )[receipt.user_id] = {
139 "event_ids": receipt.event_ids,
140 "data": receipt.data,
141 }
142
143 def flush_read_receipts_for_room(self, room_id):
144 # if we don't have any read-receipts for this room, it may be that we've already
145 # sent them out, so we don't need to flush.
146 if room_id not in self._pending_rrs:
147 return
148 self._rrs_pending_flush = True
149 self.attempt_new_transaction()
150
151 def send_keyed_edu(self, edu, key):
152 self._pending_edus_keyed[(edu.edu_type, key)] = edu
153 self.attempt_new_transaction()
154
155 def send_edu(self, edu):
156 self._pending_edus.append(edu)
157 self.attempt_new_transaction()
158
159 def attempt_new_transaction(self):
160 """Try to start a new transaction to this destination
161
162 If there is already a transaction in progress to this destination,
163 returns immediately. Otherwise kicks off the process of sending a
164 transaction in the background.
165 """
166 # list of (pending_pdu, deferred, order)
167 if self.transmission_loop_running:
168 # XXX: this can get stuck on by a never-ending
169 # request at which point pending_pdus just keeps growing.
170 # we need application-layer timeouts of some flavour of these
171 # requests
172 logger.debug(
173 "TX [%s] Transaction already in progress",
174 self._destination
175 )
176 return
177
178 logger.debug("TX [%s] Starting transaction loop", self._destination)
179
180 run_as_background_process(
181 "federation_transaction_transmission_loop",
182 self._transaction_transmission_loop,
183 )
184
185 @defer.inlineCallbacks
186 def _transaction_transmission_loop(self):
187 pending_pdus = []
188 try:
189 self.transmission_loop_running = True
190
191 # This will throw if we wouldn't retry. We do this here so we fail
192 # quickly, but we will later check this again in the http client,
193 # hence why we throw the result away.
194 yield get_retry_limiter(self._destination, self._clock, self._store)
195
196 pending_pdus = []
197 while True:
198 device_message_edus, device_stream_id, dev_list_id = (
199 yield self._get_new_device_messages()
200 )
201
202 # BEGIN CRITICAL SECTION
203 #
204 # In order to avoid a race condition, we need to make sure that
205 # the following code (from popping the queues up to the point
206 # where we decide if we actually have any pending messages) is
207 # atomic - otherwise new PDUs or EDUs might arrive in the
208 # meantime, but not get sent because we hold the
209 # transmission_loop_running flag.
210
211 pending_pdus = self._pending_pdus
212
213 # We can only include at most 50 PDUs per transactions
214 pending_pdus, self._pending_pdus = pending_pdus[:50], pending_pdus[50:]
215
216 pending_edus = []
217
218 pending_edus.extend(self._get_rr_edus(force_flush=False))
219
220 # We can only include at most 100 EDUs per transactions
221 pending_edus.extend(self._pop_pending_edus(100 - len(pending_edus)))
222
223 pending_edus.extend(
224 self._pending_edus_keyed.values()
225 )
226
227 self._pending_edus_keyed = {}
228
229 pending_edus.extend(device_message_edus)
230
231 pending_presence = self._pending_presence
232 self._pending_presence = {}
233 if pending_presence:
234 pending_edus.append(
235 Edu(
236 origin=self._server_name,
237 destination=self._destination,
238 edu_type="m.presence",
239 content={
240 "push": [
241 format_user_presence_state(
242 presence, self._clock.time_msec()
243 )
244 for presence in pending_presence.values()
245 ]
246 },
247 )
248 )
249
250 if pending_pdus:
251 logger.debug("TX [%s] len(pending_pdus_by_dest[dest]) = %d",
252 self._destination, len(pending_pdus))
253
254 if not pending_pdus and not pending_edus:
255 logger.debug("TX [%s] Nothing to send", self._destination)
256 self._last_device_stream_id = device_stream_id
257 return
258
259 # if we've decided to send a transaction anyway, and we have room, we
260 # may as well send any pending RRs
261 if len(pending_edus) < 100:
262 pending_edus.extend(self._get_rr_edus(force_flush=True))
263
264 # END CRITICAL SECTION
265
266 success = yield self._transaction_manager.send_new_transaction(
267 self._destination, pending_pdus, pending_edus
268 )
269 if success:
270 sent_transactions_counter.inc()
271 sent_edus_counter.inc(len(pending_edus))
272 for edu in pending_edus:
273 sent_edus_by_type.labels(edu.edu_type).inc()
274 # Remove the acknowledged device messages from the database
275 # Only bother if we actually sent some device messages
276 if device_message_edus:
277 yield self._store.delete_device_msgs_for_remote(
278 self._destination, device_stream_id
279 )
280 logger.info(
281 "Marking as sent %r %r", self._destination, dev_list_id
282 )
283 yield self._store.mark_as_sent_devices_by_remote(
284 self._destination, dev_list_id
285 )
286
287 self._last_device_stream_id = device_stream_id
288 self._last_device_list_stream_id = dev_list_id
289 else:
290 break
291 except NotRetryingDestination as e:
292 logger.debug(
293 "TX [%s] not ready for retry yet (next retry at %s) - "
294 "dropping transaction for now",
295 self._destination,
296 datetime.datetime.fromtimestamp(
297 (e.retry_last_ts + e.retry_interval) / 1000.0
298 ),
299 )
300 except FederationDeniedError as e:
301 logger.info(e)
302 except HttpResponseException as e:
303 logger.warning(
304 "TX [%s] Received %d response to transaction: %s",
305 self._destination, e.code, e,
306 )
307 except RequestSendFailed as e:
308 logger.warning("TX [%s] Failed to send transaction: %s", self._destination, e)
309
310 for p, _ in pending_pdus:
311 logger.info("Failed to send event %s to %s", p.event_id,
312 self._destination)
313 except Exception:
314 logger.exception(
315 "TX [%s] Failed to send transaction",
316 self._destination,
317 )
318 for p, _ in pending_pdus:
319 logger.info("Failed to send event %s to %s", p.event_id,
320 self._destination)
321 finally:
322 # We want to be *very* sure we clear this after we stop processing
323 self.transmission_loop_running = False
324
325 def _get_rr_edus(self, force_flush):
326 if not self._pending_rrs:
327 return
328 if not force_flush and not self._rrs_pending_flush:
329 # not yet time for this lot
330 return
331
332 edu = Edu(
333 origin=self._server_name,
334 destination=self._destination,
335 edu_type="m.receipt",
336 content=self._pending_rrs,
337 )
338 self._pending_rrs = {}
339 self._rrs_pending_flush = False
340 yield edu
341
342 def _pop_pending_edus(self, limit):
343 pending_edus = self._pending_edus
344 pending_edus, self._pending_edus = pending_edus[:limit], pending_edus[limit:]
345 return pending_edus
346
347 @defer.inlineCallbacks
348 def _get_new_device_messages(self):
349 last_device_stream_id = self._last_device_stream_id
350 to_device_stream_id = self._store.get_to_device_stream_token()
351 contents, stream_id = yield self._store.get_new_device_msgs_for_remote(
352 self._destination, last_device_stream_id, to_device_stream_id
353 )
354 edus = [
355 Edu(
356 origin=self._server_name,
357 destination=self._destination,
358 edu_type="m.direct_to_device",
359 content=content,
360 )
361 for content in contents
362 ]
363
364 last_device_list = self._last_device_list_stream_id
365 now_stream_id, results = yield self._store.get_devices_by_remote(
366 self._destination, last_device_list
367 )
368 edus.extend(
369 Edu(
370 origin=self._server_name,
371 destination=self._destination,
372 edu_type="m.device_list_update",
373 content=content,
374 )
375 for content in results
376 )
377 defer.returnValue((edus, stream_id, now_stream_id))
0 # -*- coding: utf-8 -*-
1 # Copyright 2019 New Vector Ltd
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import logging
15
16 from twisted.internet import defer
17
18 from synapse.api.errors import HttpResponseException
19 from synapse.federation.persistence import TransactionActions
20 from synapse.federation.units import Transaction
21 from synapse.util.metrics import measure_func
22
23 logger = logging.getLogger(__name__)
24
25
26 class TransactionManager(object):
27 """Helper class which handles building and sending transactions
28
29 shared between PerDestinationQueue objects
30 """
31 def __init__(self, hs):
32 self._server_name = hs.hostname
33 self.clock = hs.get_clock() # nb must be called this for @measure_func
34 self._store = hs.get_datastore()
35 self._transaction_actions = TransactionActions(self._store)
36 self._transport_layer = hs.get_federation_transport_client()
37
38 # HACK to get unique tx id
39 self._next_txn_id = int(self.clock.time_msec())
40
41 @measure_func("_send_new_transaction")
42 @defer.inlineCallbacks
43 def send_new_transaction(self, destination, pending_pdus, pending_edus):
44
45 # Sort based on the order field
46 pending_pdus.sort(key=lambda t: t[1])
47 pdus = [x[0] for x in pending_pdus]
48 edus = pending_edus
49
50 success = True
51
52 logger.debug("TX [%s] _attempt_new_transaction", destination)
53
54 txn_id = str(self._next_txn_id)
55
56 logger.debug(
57 "TX [%s] {%s} Attempting new transaction"
58 " (pdus: %d, edus: %d)",
59 destination, txn_id,
60 len(pdus),
61 len(edus),
62 )
63
64 logger.debug("TX [%s] Persisting transaction...", destination)
65
66 transaction = Transaction.create_new(
67 origin_server_ts=int(self.clock.time_msec()),
68 transaction_id=txn_id,
69 origin=self._server_name,
70 destination=destination,
71 pdus=pdus,
72 edus=edus,
73 )
74
75 self._next_txn_id += 1
76
77 yield self._transaction_actions.prepare_to_send(transaction)
78
79 logger.debug("TX [%s] Persisted transaction", destination)
80 logger.info(
81 "TX [%s] {%s} Sending transaction [%s],"
82 " (PDUs: %d, EDUs: %d)",
83 destination, txn_id,
84 transaction.transaction_id,
85 len(pdus),
86 len(edus),
87 )
88
89 # Actually send the transaction
90
91 # FIXME (erikj): This is a bit of a hack to make the Pdu age
92 # keys work
93 def json_data_cb():
94 data = transaction.get_dict()
95 now = int(self.clock.time_msec())
96 if "pdus" in data:
97 for p in data["pdus"]:
98 if "age_ts" in p:
99 unsigned = p.setdefault("unsigned", {})
100 unsigned["age"] = now - int(p["age_ts"])
101 del p["age_ts"]
102 return data
103
104 try:
105 response = yield self._transport_layer.send_transaction(
106 transaction, json_data_cb
107 )
108 code = 200
109 except HttpResponseException as e:
110 code = e.code
111 response = e.response
112
113 if e.code in (401, 404, 429) or 500 <= e.code:
114 logger.info(
115 "TX [%s] {%s} got %d response",
116 destination, txn_id, code
117 )
118 raise e
119
120 logger.info(
121 "TX [%s] {%s} got %d response",
122 destination, txn_id, code
123 )
124
125 yield self._transaction_actions.delivered(
126 transaction, code, response
127 )
128
129 logger.debug("TX [%s] {%s} Marked as delivered", destination, txn_id)
130
131 if code == 200:
132 for e_id, r in response.get("pdus", {}).items():
133 if "error" in r:
134 logger.warn(
135 "TX [%s] {%s} Remote returned error for %s: %s",
136 destination, txn_id, e_id, r,
137 )
138 else:
139 for p in pdus:
140 logger.warn(
141 "TX [%s] {%s} Failed to send event %s",
142 destination, txn_id, p.event_id,
143 )
144 success = False
145
146 defer.returnValue(success)
+0
-699
synapse/federation/transaction_queue.py less more
0 # -*- coding: utf-8 -*-
1 # Copyright 2014-2016 OpenMarket Ltd
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import datetime
15 import logging
16
17 from six import itervalues
18
19 from prometheus_client import Counter
20
21 from twisted.internet import defer
22
23 import synapse.metrics
24 from synapse.api.errors import (
25 FederationDeniedError,
26 HttpResponseException,
27 RequestSendFailed,
28 )
29 from synapse.handlers.presence import format_user_presence_state, get_interested_remotes
30 from synapse.metrics import (
31 LaterGauge,
32 event_processing_loop_counter,
33 event_processing_loop_room_count,
34 events_processed_counter,
35 sent_transactions_counter,
36 )
37 from synapse.metrics.background_process_metrics import run_as_background_process
38 from synapse.util import logcontext
39 from synapse.util.metrics import measure_func
40 from synapse.util.retryutils import NotRetryingDestination, get_retry_limiter
41
42 from .persistence import TransactionActions
43 from .units import Edu, Transaction
44
45 logger = logging.getLogger(__name__)
46
47 sent_pdus_destination_dist_count = Counter(
48 "synapse_federation_client_sent_pdu_destinations:count",
49 "Number of PDUs queued for sending to one or more destinations",
50 )
51
52 sent_pdus_destination_dist_total = Counter(
53 "synapse_federation_client_sent_pdu_destinations:total", ""
54 "Total number of PDUs queued for sending across all destinations",
55 )
56
57 sent_edus_counter = Counter(
58 "synapse_federation_client_sent_edus",
59 "Total number of EDUs successfully sent",
60 )
61
62 sent_edus_by_type = Counter(
63 "synapse_federation_client_sent_edus_by_type",
64 "Number of sent EDUs successfully sent, by event type",
65 ["type"],
66 )
67
68
69 class TransactionQueue(object):
70 """This class makes sure we only have one transaction in flight at
71 a time for a given destination.
72
73 It batches pending PDUs into single transactions.
74 """
75
76 def __init__(self, hs):
77 self.hs = hs
78 self.server_name = hs.hostname
79
80 self.store = hs.get_datastore()
81 self.state = hs.get_state_handler()
82 self.transaction_actions = TransactionActions(self.store)
83
84 self.transport_layer = hs.get_federation_transport_client()
85
86 self.clock = hs.get_clock()
87 self.is_mine_id = hs.is_mine_id
88
89 # Is a mapping from destinations -> deferreds. Used to keep track
90 # of which destinations have transactions in flight and when they are
91 # done
92 self.pending_transactions = {}
93
94 LaterGauge(
95 "synapse_federation_transaction_queue_pending_destinations",
96 "",
97 [],
98 lambda: len(self.pending_transactions),
99 )
100
101 # Is a mapping from destination -> list of
102 # tuple(pending pdus, deferred, order)
103 self.pending_pdus_by_dest = pdus = {}
104 # destination -> list of tuple(edu, deferred)
105 self.pending_edus_by_dest = edus = {}
106
107 # Map of user_id -> UserPresenceState for all the pending presence
108 # to be sent out by user_id. Entries here get processed and put in
109 # pending_presence_by_dest
110 self.pending_presence = {}
111
112 # Map of destination -> user_id -> UserPresenceState of pending presence
113 # to be sent to each destinations
114 self.pending_presence_by_dest = presence = {}
115
116 # Pending EDUs by their "key". Keyed EDUs are EDUs that get clobbered
117 # based on their key (e.g. typing events by room_id)
118 # Map of destination -> (edu_type, key) -> Edu
119 self.pending_edus_keyed_by_dest = edus_keyed = {}
120
121 LaterGauge(
122 "synapse_federation_transaction_queue_pending_pdus",
123 "",
124 [],
125 lambda: sum(map(len, pdus.values())),
126 )
127 LaterGauge(
128 "synapse_federation_transaction_queue_pending_edus",
129 "",
130 [],
131 lambda: (
132 sum(map(len, edus.values()))
133 + sum(map(len, presence.values()))
134 + sum(map(len, edus_keyed.values()))
135 ),
136 )
137
138 # destination -> stream_id of last successfully sent to-device message.
139 # NB: may be a long or an int.
140 self.last_device_stream_id_by_dest = {}
141
142 # destination -> stream_id of last successfully sent device list
143 # update.
144 self.last_device_list_stream_id_by_dest = {}
145
146 # HACK to get unique tx id
147 self._next_txn_id = int(self.clock.time_msec())
148
149 self._order = 1
150
151 self._is_processing = False
152 self._last_poked_id = -1
153
154 self._processing_pending_presence = False
155
156 def notify_new_events(self, current_id):
157 """This gets called when we have some new events we might want to
158 send out to other servers.
159 """
160 self._last_poked_id = max(current_id, self._last_poked_id)
161
162 if self._is_processing:
163 return
164
165 # fire off a processing loop in the background
166 run_as_background_process(
167 "process_event_queue_for_federation",
168 self._process_event_queue_loop,
169 )
170
171 @defer.inlineCallbacks
172 def _process_event_queue_loop(self):
173 try:
174 self._is_processing = True
175 while True:
176 last_token = yield self.store.get_federation_out_pos("events")
177 next_token, events = yield self.store.get_all_new_events_stream(
178 last_token, self._last_poked_id, limit=100,
179 )
180
181 logger.debug("Handling %s -> %s", last_token, next_token)
182
183 if not events and next_token >= self._last_poked_id:
184 break
185
186 @defer.inlineCallbacks
187 def handle_event(event):
188 # Only send events for this server.
189 send_on_behalf_of = event.internal_metadata.get_send_on_behalf_of()
190 is_mine = self.is_mine_id(event.sender)
191 if not is_mine and send_on_behalf_of is None:
192 return
193
194 try:
195 # Get the state from before the event.
196 # We need to make sure that this is the state from before
197 # the event and not from after it.
198 # Otherwise if the last member on a server in a room is
199 # banned then it won't receive the event because it won't
200 # be in the room after the ban.
201 destinations = yield self.state.get_current_hosts_in_room(
202 event.room_id, latest_event_ids=event.prev_event_ids(),
203 )
204 except Exception:
205 logger.exception(
206 "Failed to calculate hosts in room for event: %s",
207 event.event_id,
208 )
209 return
210
211 destinations = set(destinations)
212
213 if send_on_behalf_of is not None:
214 # If we are sending the event on behalf of another server
215 # then it already has the event and there is no reason to
216 # send the event to it.
217 destinations.discard(send_on_behalf_of)
218
219 logger.debug("Sending %s to %r", event, destinations)
220
221 self._send_pdu(event, destinations)
222
223 @defer.inlineCallbacks
224 def handle_room_events(events):
225 for event in events:
226 yield handle_event(event)
227
228 events_by_room = {}
229 for event in events:
230 events_by_room.setdefault(event.room_id, []).append(event)
231
232 yield logcontext.make_deferred_yieldable(defer.gatherResults(
233 [
234 logcontext.run_in_background(handle_room_events, evs)
235 for evs in itervalues(events_by_room)
236 ],
237 consumeErrors=True
238 ))
239
240 yield self.store.update_federation_out_pos(
241 "events", next_token
242 )
243
244 if events:
245 now = self.clock.time_msec()
246 ts = yield self.store.get_received_ts(events[-1].event_id)
247
248 synapse.metrics.event_processing_lag.labels(
249 "federation_sender").set(now - ts)
250 synapse.metrics.event_processing_last_ts.labels(
251 "federation_sender").set(ts)
252
253 events_processed_counter.inc(len(events))
254
255 event_processing_loop_room_count.labels(
256 "federation_sender"
257 ).inc(len(events_by_room))
258
259 event_processing_loop_counter.labels("federation_sender").inc()
260
261 synapse.metrics.event_processing_positions.labels(
262 "federation_sender").set(next_token)
263
264 finally:
265 self._is_processing = False
266
267 def _send_pdu(self, pdu, destinations):
268 # We loop through all destinations to see whether we already have
269 # a transaction in progress. If we do, stick it in the pending_pdus
270 # table and we'll get back to it later.
271
272 order = self._order
273 self._order += 1
274
275 destinations = set(destinations)
276 destinations.discard(self.server_name)
277 logger.debug("Sending to: %s", str(destinations))
278
279 if not destinations:
280 return
281
282 sent_pdus_destination_dist_total.inc(len(destinations))
283 sent_pdus_destination_dist_count.inc()
284
285 for destination in destinations:
286 self.pending_pdus_by_dest.setdefault(destination, []).append(
287 (pdu, order)
288 )
289
290 self._attempt_new_transaction(destination)
291
292 @logcontext.preserve_fn # the caller should not yield on this
293 @defer.inlineCallbacks
294 def send_presence(self, states):
295 """Send the new presence states to the appropriate destinations.
296
297 This actually queues up the presence states ready for sending and
298 triggers a background task to process them and send out the transactions.
299
300 Args:
301 states (list(UserPresenceState))
302 """
303 if not self.hs.config.use_presence:
304 # No-op if presence is disabled.
305 return
306
307 # First we queue up the new presence by user ID, so multiple presence
308 # updates in quick successtion are correctly handled
309 # We only want to send presence for our own users, so lets always just
310 # filter here just in case.
311 self.pending_presence.update({
312 state.user_id: state for state in states
313 if self.is_mine_id(state.user_id)
314 })
315
316 # We then handle the new pending presence in batches, first figuring
317 # out the destinations we need to send each state to and then poking it
318 # to attempt a new transaction. We linearize this so that we don't
319 # accidentally mess up the ordering and send multiple presence updates
320 # in the wrong order
321 if self._processing_pending_presence:
322 return
323
324 self._processing_pending_presence = True
325 try:
326 while True:
327 states_map = self.pending_presence
328 self.pending_presence = {}
329
330 if not states_map:
331 break
332
333 yield self._process_presence_inner(list(states_map.values()))
334 except Exception:
335 logger.exception("Error sending presence states to servers")
336 finally:
337 self._processing_pending_presence = False
338
339 @measure_func("txnqueue._process_presence")
340 @defer.inlineCallbacks
341 def _process_presence_inner(self, states):
342 """Given a list of states populate self.pending_presence_by_dest and
343 poke to send a new transaction to each destination
344
345 Args:
346 states (list(UserPresenceState))
347 """
348 hosts_and_states = yield get_interested_remotes(self.store, states, self.state)
349
350 for destinations, states in hosts_and_states:
351 for destination in destinations:
352 if destination == self.server_name:
353 continue
354
355 self.pending_presence_by_dest.setdefault(
356 destination, {}
357 ).update({
358 state.user_id: state for state in states
359 })
360
361 self._attempt_new_transaction(destination)
362
363 def send_edu(self, destination, edu_type, content, key=None):
364 edu = Edu(
365 origin=self.server_name,
366 destination=destination,
367 edu_type=edu_type,
368 content=content,
369 )
370
371 if destination == self.server_name:
372 logger.info("Not sending EDU to ourselves")
373 return
374
375 if key:
376 self.pending_edus_keyed_by_dest.setdefault(
377 destination, {}
378 )[(edu.edu_type, key)] = edu
379 else:
380 self.pending_edus_by_dest.setdefault(destination, []).append(edu)
381
382 self._attempt_new_transaction(destination)
383
384 def send_device_messages(self, destination):
385 if destination == self.server_name:
386 logger.info("Not sending device update to ourselves")
387 return
388
389 self._attempt_new_transaction(destination)
390
391 def get_current_token(self):
392 return 0
393
394 def _attempt_new_transaction(self, destination):
395 """Try to start a new transaction to this destination
396
397 If there is already a transaction in progress to this destination,
398 returns immediately. Otherwise kicks off the process of sending a
399 transaction in the background.
400
401 Args:
402 destination (str):
403
404 Returns:
405 None
406 """
407 # list of (pending_pdu, deferred, order)
408 if destination in self.pending_transactions:
409 # XXX: pending_transactions can get stuck on by a never-ending
410 # request at which point pending_pdus_by_dest just keeps growing.
411 # we need application-layer timeouts of some flavour of these
412 # requests
413 logger.debug(
414 "TX [%s] Transaction already in progress",
415 destination
416 )
417 return
418
419 logger.debug("TX [%s] Starting transaction loop", destination)
420
421 run_as_background_process(
422 "federation_transaction_transmission_loop",
423 self._transaction_transmission_loop,
424 destination,
425 )
426
427 @defer.inlineCallbacks
428 def _transaction_transmission_loop(self, destination):
429 pending_pdus = []
430 try:
431 self.pending_transactions[destination] = 1
432
433 # This will throw if we wouldn't retry. We do this here so we fail
434 # quickly, but we will later check this again in the http client,
435 # hence why we throw the result away.
436 yield get_retry_limiter(destination, self.clock, self.store)
437
438 pending_pdus = []
439 while True:
440 device_message_edus, device_stream_id, dev_list_id = (
441 yield self._get_new_device_messages(destination)
442 )
443
444 # BEGIN CRITICAL SECTION
445 #
446 # In order to avoid a race condition, we need to make sure that
447 # the following code (from popping the queues up to the point
448 # where we decide if we actually have any pending messages) is
449 # atomic - otherwise new PDUs or EDUs might arrive in the
450 # meantime, but not get sent because we hold the
451 # pending_transactions flag.
452
453 pending_pdus = self.pending_pdus_by_dest.pop(destination, [])
454
455 # We can only include at most 50 PDUs per transactions
456 pending_pdus, leftover_pdus = pending_pdus[:50], pending_pdus[50:]
457 if leftover_pdus:
458 self.pending_pdus_by_dest[destination] = leftover_pdus
459
460 pending_edus = self.pending_edus_by_dest.pop(destination, [])
461
462 # We can only include at most 100 EDUs per transactions
463 pending_edus, leftover_edus = pending_edus[:100], pending_edus[100:]
464 if leftover_edus:
465 self.pending_edus_by_dest[destination] = leftover_edus
466
467 pending_presence = self.pending_presence_by_dest.pop(destination, {})
468
469 pending_edus.extend(
470 self.pending_edus_keyed_by_dest.pop(destination, {}).values()
471 )
472
473 pending_edus.extend(device_message_edus)
474 if pending_presence:
475 pending_edus.append(
476 Edu(
477 origin=self.server_name,
478 destination=destination,
479 edu_type="m.presence",
480 content={
481 "push": [
482 format_user_presence_state(
483 presence, self.clock.time_msec()
484 )
485 for presence in pending_presence.values()
486 ]
487 },
488 )
489 )
490
491 if pending_pdus:
492 logger.debug("TX [%s] len(pending_pdus_by_dest[dest]) = %d",
493 destination, len(pending_pdus))
494
495 if not pending_pdus and not pending_edus:
496 logger.debug("TX [%s] Nothing to send", destination)
497 self.last_device_stream_id_by_dest[destination] = (
498 device_stream_id
499 )
500 return
501
502 # END CRITICAL SECTION
503
504 success = yield self._send_new_transaction(
505 destination, pending_pdus, pending_edus,
506 )
507 if success:
508 sent_transactions_counter.inc()
509 sent_edus_counter.inc(len(pending_edus))
510 for edu in pending_edus:
511 sent_edus_by_type.labels(edu.edu_type).inc()
512 # Remove the acknowledged device messages from the database
513 # Only bother if we actually sent some device messages
514 if device_message_edus:
515 yield self.store.delete_device_msgs_for_remote(
516 destination, device_stream_id
517 )
518 logger.info("Marking as sent %r %r", destination, dev_list_id)
519 yield self.store.mark_as_sent_devices_by_remote(
520 destination, dev_list_id
521 )
522
523 self.last_device_stream_id_by_dest[destination] = device_stream_id
524 self.last_device_list_stream_id_by_dest[destination] = dev_list_id
525 else:
526 break
527 except NotRetryingDestination as e:
528 logger.debug(
529 "TX [%s] not ready for retry yet (next retry at %s) - "
530 "dropping transaction for now",
531 destination,
532 datetime.datetime.fromtimestamp(
533 (e.retry_last_ts + e.retry_interval) / 1000.0
534 ),
535 )
536 except FederationDeniedError as e:
537 logger.info(e)
538 except HttpResponseException as e:
539 logger.warning(
540 "TX [%s] Received %d response to transaction: %s",
541 destination, e.code, e,
542 )
543 except RequestSendFailed as e:
544 logger.warning("TX [%s] Failed to send transaction: %s", destination, e)
545
546 for p, _ in pending_pdus:
547 logger.info("Failed to send event %s to %s", p.event_id,
548 destination)
549 except Exception:
550 logger.exception(
551 "TX [%s] Failed to send transaction",
552 destination,
553 )
554 for p, _ in pending_pdus:
555 logger.info("Failed to send event %s to %s", p.event_id,
556 destination)
557 finally:
558 # We want to be *very* sure we delete this after we stop processing
559 self.pending_transactions.pop(destination, None)
560
561 @defer.inlineCallbacks
562 def _get_new_device_messages(self, destination):
563 last_device_stream_id = self.last_device_stream_id_by_dest.get(destination, 0)
564 to_device_stream_id = self.store.get_to_device_stream_token()
565 contents, stream_id = yield self.store.get_new_device_msgs_for_remote(
566 destination, last_device_stream_id, to_device_stream_id
567 )
568 edus = [
569 Edu(
570 origin=self.server_name,
571 destination=destination,
572 edu_type="m.direct_to_device",
573 content=content,
574 )
575 for content in contents
576 ]
577
578 last_device_list = self.last_device_list_stream_id_by_dest.get(destination, 0)
579 now_stream_id, results = yield self.store.get_devices_by_remote(
580 destination, last_device_list
581 )
582 edus.extend(
583 Edu(
584 origin=self.server_name,
585 destination=destination,
586 edu_type="m.device_list_update",
587 content=content,
588 )
589 for content in results
590 )
591 defer.returnValue((edus, stream_id, now_stream_id))
592
593 @measure_func("_send_new_transaction")
594 @defer.inlineCallbacks
595 def _send_new_transaction(self, destination, pending_pdus, pending_edus):
596
597 # Sort based on the order field
598 pending_pdus.sort(key=lambda t: t[1])
599 pdus = [x[0] for x in pending_pdus]
600 edus = pending_edus
601
602 success = True
603
604 logger.debug("TX [%s] _attempt_new_transaction", destination)
605
606 txn_id = str(self._next_txn_id)
607
608 logger.debug(
609 "TX [%s] {%s} Attempting new transaction"
610 " (pdus: %d, edus: %d)",
611 destination, txn_id,
612 len(pdus),
613 len(edus),
614 )
615
616 logger.debug("TX [%s] Persisting transaction...", destination)
617
618 transaction = Transaction.create_new(
619 origin_server_ts=int(self.clock.time_msec()),
620 transaction_id=txn_id,
621 origin=self.server_name,
622 destination=destination,
623 pdus=pdus,
624 edus=edus,
625 )
626
627 self._next_txn_id += 1
628
629 yield self.transaction_actions.prepare_to_send(transaction)
630
631 logger.debug("TX [%s] Persisted transaction", destination)
632 logger.info(
633 "TX [%s] {%s} Sending transaction [%s],"
634 " (PDUs: %d, EDUs: %d)",
635 destination, txn_id,
636 transaction.transaction_id,
637 len(pdus),
638 len(edus),
639 )
640
641 # Actually send the transaction
642
643 # FIXME (erikj): This is a bit of a hack to make the Pdu age
644 # keys work
645 def json_data_cb():
646 data = transaction.get_dict()
647 now = int(self.clock.time_msec())
648 if "pdus" in data:
649 for p in data["pdus"]:
650 if "age_ts" in p:
651 unsigned = p.setdefault("unsigned", {})
652 unsigned["age"] = now - int(p["age_ts"])
653 del p["age_ts"]
654 return data
655
656 try:
657 response = yield self.transport_layer.send_transaction(
658 transaction, json_data_cb
659 )
660 code = 200
661 except HttpResponseException as e:
662 code = e.code
663 response = e.response
664
665 if e.code in (401, 404, 429) or 500 <= e.code:
666 logger.info(
667 "TX [%s] {%s} got %d response",
668 destination, txn_id, code
669 )
670 raise e
671
672 logger.info(
673 "TX [%s] {%s} got %d response",
674 destination, txn_id, code
675 )
676
677 yield self.transaction_actions.delivered(
678 transaction, code, response
679 )
680
681 logger.debug("TX [%s] {%s} Marked as delivered", destination, txn_id)
682
683 if code == 200:
684 for e_id, r in response.get("pdus", {}).items():
685 if "error" in r:
686 logger.warn(
687 "TX [%s] {%s} Remote returned error for %s: %s",
688 destination, txn_id, e_id, r,
689 )
690 else:
691 for p in pdus:
692 logger.warn(
693 "TX [%s] {%s} Failed to send event %s",
694 destination, txn_id, p.event_id,
695 )
696 success = False
697
698 defer.returnValue(success)
5050 logger.debug("get_room_state dest=%s, room=%s",
5151 destination, room_id)
5252
53 path = _create_v1_path("/state/%s/", room_id)
53 path = _create_v1_path("/state/%s", room_id)
5454 return self.client.get_json(
5555 destination, path=path, args={"event_id": event_id},
56 try_trailing_slash_on_400=True,
5657 )
5758
5859 @log_function
7273 logger.debug("get_room_state_ids dest=%s, room=%s",
7374 destination, room_id)
7475
75 path = _create_v1_path("/state_ids/%s/", room_id)
76 path = _create_v1_path("/state_ids/%s", room_id)
7677 return self.client.get_json(
7778 destination, path=path, args={"event_id": event_id},
79 try_trailing_slash_on_400=True,
7880 )
7981
8082 @log_function
9496 logger.debug("get_pdu dest=%s, event_id=%s",
9597 destination, event_id)
9698
97 path = _create_v1_path("/event/%s/", event_id)
98 return self.client.get_json(destination, path=path, timeout=timeout)
99 path = _create_v1_path("/event/%s", event_id)
100 return self.client.get_json(
101 destination, path=path, timeout=timeout,
102 try_trailing_slash_on_400=True,
103 )
99104
100105 @log_function
101106 def backfill(self, destination, room_id, event_tuples, limit):
120125 # TODO: raise?
121126 return
122127
123 path = _create_v1_path("/backfill/%s/", room_id)
128 path = _create_v1_path("/backfill/%s", room_id)
124129
125130 args = {
126131 "v": event_tuples,
131136 destination,
132137 path=path,
133138 args=args,
139 try_trailing_slash_on_400=True,
134140 )
135141
136142 @defer.inlineCallbacks
166172 # generated by the json_data_callback.
167173 json_data = transaction.get_dict()
168174
169 path = _create_v1_path("/send/%s/", transaction.transaction_id)
175 path = _create_v1_path("/send/%s", transaction.transaction_id)
170176
171177 response = yield self.client.put_json(
172178 transaction.destination,
175181 json_data_callback=json_data_callback,
176182 long_retries=True,
177183 backoff_on_404=True, # If we get a 404 the other side has gone
184 try_trailing_slash_on_400=True,
178185 )
179186
180187 defer.returnValue(response)
958965
959966 Example:
960967
961 _create_v1_path("/event/%s/", event_id)
968 _create_v1_path("/event/%s", event_id)
962969
963970 Args:
964971 path (str): String template for the path
979986
980987 Example:
981988
982 _create_v2_path("/event/%s/", event_id)
989 _create_v2_path("/event/%s", event_id)
983990
984991 Args:
985992 path (str): String template for the path
311311
312312
313313 class FederationSendServlet(BaseFederationServlet):
314 PATH = "/send/(?P<transaction_id>[^/]*)/"
314 PATH = "/send/(?P<transaction_id>[^/]*)/?"
315315
316316 def __init__(self, handler, server_name, **kwargs):
317317 super(FederationSendServlet, self).__init__(
377377
378378
379379 class FederationEventServlet(BaseFederationServlet):
380 PATH = "/event/(?P<event_id>[^/]*)/"
380 PATH = "/event/(?P<event_id>[^/]*)/?"
381381
382382 # This is when someone asks for a data item for a given server data_id pair.
383383 def on_GET(self, origin, content, query, event_id):
385385
386386
387387 class FederationStateServlet(BaseFederationServlet):
388 PATH = "/state/(?P<context>[^/]*)/"
388 PATH = "/state/(?P<context>[^/]*)/?"
389389
390390 # This is when someone asks for all data for a given context.
391391 def on_GET(self, origin, content, query, context):
392392 return self.handler.on_context_state_request(
393393 origin,
394394 context,
395 parse_string_from_args(query, "event_id", None),
395 parse_string_from_args(query, "event_id", None, required=True),
396396 )
397397
398398
399399 class FederationStateIdsServlet(BaseFederationServlet):
400 PATH = "/state_ids/(?P<room_id>[^/]*)/"
400 PATH = "/state_ids/(?P<room_id>[^/]*)/?"
401401
402402 def on_GET(self, origin, content, query, room_id):
403403 return self.handler.on_state_ids_request(
404404 origin,
405405 room_id,
406 parse_string_from_args(query, "event_id", None),
406 parse_string_from_args(query, "event_id", None, required=True),
407407 )
408408
409409
410410 class FederationBackfillServlet(BaseFederationServlet):
411 PATH = "/backfill/(?P<context>[^/]*)/"
411 PATH = "/backfill/(?P<context>[^/]*)/?"
412412
413413 def on_GET(self, origin, content, query, context):
414414 versions = [x.decode('ascii') for x in query[b"v"]]
758758 class FederationGroupsProfileServlet(BaseFederationServlet):
759759 """Get/set the basic profile of a group on behalf of a user
760760 """
761 PATH = "/groups/(?P<group_id>[^/]*)/profile$"
761 PATH = "/groups/(?P<group_id>[^/]*)/profile"
762762
763763 @defer.inlineCallbacks
764764 def on_GET(self, origin, content, query, group_id):
786786
787787
788788 class FederationGroupsSummaryServlet(BaseFederationServlet):
789 PATH = "/groups/(?P<group_id>[^/]*)/summary$"
789 PATH = "/groups/(?P<group_id>[^/]*)/summary"
790790
791791 @defer.inlineCallbacks
792792 def on_GET(self, origin, content, query, group_id):
804804 class FederationGroupsRoomsServlet(BaseFederationServlet):
805805 """Get the rooms in a group on behalf of a user
806806 """
807 PATH = "/groups/(?P<group_id>[^/]*)/rooms$"
807 PATH = "/groups/(?P<group_id>[^/]*)/rooms"
808808
809809 @defer.inlineCallbacks
810810 def on_GET(self, origin, content, query, group_id):
822822 class FederationGroupsAddRoomsServlet(BaseFederationServlet):
823823 """Add/remove room from group
824824 """
825 PATH = "/groups/(?P<group_id>[^/]*)/room/(?P<room_id>[^/]*)$"
825 PATH = "/groups/(?P<group_id>[^/]*)/room/(?P<room_id>[^/]*)"
826826
827827 @defer.inlineCallbacks
828828 def on_POST(self, origin, content, query, group_id, room_id):
854854 """
855855 PATH = (
856856 "/groups/(?P<group_id>[^/]*)/room/(?P<room_id>[^/]*)"
857 "/config/(?P<config_key>[^/]*)$"
857 "/config/(?P<config_key>[^/]*)"
858858 )
859859
860860 @defer.inlineCallbacks
873873 class FederationGroupsUsersServlet(BaseFederationServlet):
874874 """Get the users in a group on behalf of a user
875875 """
876 PATH = "/groups/(?P<group_id>[^/]*)/users$"
876 PATH = "/groups/(?P<group_id>[^/]*)/users"
877877
878878 @defer.inlineCallbacks
879879 def on_GET(self, origin, content, query, group_id):
891891 class FederationGroupsInvitedUsersServlet(BaseFederationServlet):
892892 """Get the users that have been invited to a group
893893 """
894 PATH = "/groups/(?P<group_id>[^/]*)/invited_users$"
894 PATH = "/groups/(?P<group_id>[^/]*)/invited_users"
895895
896896 @defer.inlineCallbacks
897897 def on_GET(self, origin, content, query, group_id):
909909 class FederationGroupsInviteServlet(BaseFederationServlet):
910910 """Ask a group server to invite someone to the group
911911 """
912 PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/invite$"
912 PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/invite"
913913
914914 @defer.inlineCallbacks
915915 def on_POST(self, origin, content, query, group_id, user_id):
927927 class FederationGroupsAcceptInviteServlet(BaseFederationServlet):
928928 """Accept an invitation from the group server
929929 """
930 PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/accept_invite$"
930 PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/accept_invite"
931931
932932 @defer.inlineCallbacks
933933 def on_POST(self, origin, content, query, group_id, user_id):
944944 class FederationGroupsJoinServlet(BaseFederationServlet):
945945 """Attempt to join a group
946946 """
947 PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/join$"
947 PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/join"
948948
949949 @defer.inlineCallbacks
950950 def on_POST(self, origin, content, query, group_id, user_id):
961961 class FederationGroupsRemoveUserServlet(BaseFederationServlet):
962962 """Leave or kick a user from the group
963963 """
964 PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/remove$"
964 PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/remove"
965965
966966 @defer.inlineCallbacks
967967 def on_POST(self, origin, content, query, group_id, user_id):
979979 class FederationGroupsLocalInviteServlet(BaseFederationServlet):
980980 """A group server has invited a local user
981981 """
982 PATH = "/groups/local/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/invite$"
982 PATH = "/groups/local/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/invite"
983983
984984 @defer.inlineCallbacks
985985 def on_POST(self, origin, content, query, group_id, user_id):
996996 class FederationGroupsRemoveLocalUserServlet(BaseFederationServlet):
997997 """A group server has removed a local user
998998 """
999 PATH = "/groups/local/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/remove$"
999 PATH = "/groups/local/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/remove"
10001000
10011001 @defer.inlineCallbacks
10021002 def on_POST(self, origin, content, query, group_id, user_id):
10131013 class FederationGroupsRenewAttestaionServlet(BaseFederationServlet):
10141014 """A group or user's server renews their attestation
10151015 """
1016 PATH = "/groups/(?P<group_id>[^/]*)/renew_attestation/(?P<user_id>[^/]*)$"
1016 PATH = "/groups/(?P<group_id>[^/]*)/renew_attestation/(?P<user_id>[^/]*)"
10171017
10181018 @defer.inlineCallbacks
10191019 def on_POST(self, origin, content, query, group_id, user_id):
10361036 PATH = (
10371037 "/groups/(?P<group_id>[^/]*)/summary"
10381038 "(/categories/(?P<category_id>[^/]+))?"
1039 "/rooms/(?P<room_id>[^/]*)$"
1039 "/rooms/(?P<room_id>[^/]*)"
10401040 )
10411041
10421042 @defer.inlineCallbacks
10791079 """Get all categories for a group
10801080 """
10811081 PATH = (
1082 "/groups/(?P<group_id>[^/]*)/categories/$"
1082 "/groups/(?P<group_id>[^/]*)/categories/?"
10831083 )
10841084
10851085 @defer.inlineCallbacks
10991099 """Add/remove/get a category in a group
11001100 """
11011101 PATH = (
1102 "/groups/(?P<group_id>[^/]*)/categories/(?P<category_id>[^/]+)$"
1102 "/groups/(?P<group_id>[^/]*)/categories/(?P<category_id>[^/]+)"
11031103 )
11041104
11051105 @defer.inlineCallbacks
11491149 """Get roles in a group
11501150 """
11511151 PATH = (
1152 "/groups/(?P<group_id>[^/]*)/roles/$"
1152 "/groups/(?P<group_id>[^/]*)/roles/?"
11531153 )
11541154
11551155 @defer.inlineCallbacks
11691169 """Add/remove/get a role in a group
11701170 """
11711171 PATH = (
1172 "/groups/(?P<group_id>[^/]*)/roles/(?P<role_id>[^/]+)$"
1172 "/groups/(?P<group_id>[^/]*)/roles/(?P<role_id>[^/]+)"
11731173 )
11741174
11751175 @defer.inlineCallbacks
12251225 PATH = (
12261226 "/groups/(?P<group_id>[^/]*)/summary"
12271227 "(/roles/(?P<role_id>[^/]+))?"
1228 "/users/(?P<user_id>[^/]*)$"
1228 "/users/(?P<user_id>[^/]*)"
12291229 )
12301230
12311231 @defer.inlineCallbacks
12681268 """Get roles in a group
12691269 """
12701270 PATH = (
1271 "/get_groups_publicised$"
1271 "/get_groups_publicised"
12721272 )
12731273
12741274 @defer.inlineCallbacks
12831283 class FederationGroupsSettingJoinPolicyServlet(BaseFederationServlet):
12841284 """Sets whether a group is joinable without an invite or knock
12851285 """
1286 PATH = "/groups/(?P<group_id>[^/]*)/settings/m.join_policy$"
1286 PATH = "/groups/(?P<group_id>[^/]*)/settings/m.join_policy"
12871287
12881288 @defer.inlineCallbacks
12891289 def on_PUT(self, origin, content, query, group_id):
9292 messages_per_second = self.hs.config.rc_messages_per_second
9393 burst_count = self.hs.config.rc_message_burst_count
9494
95 allowed, time_allowed = self.ratelimiter.send_message(
95 allowed, time_allowed = self.ratelimiter.can_do_action(
9696 user_id, time_now,
97 msg_rate_hz=messages_per_second,
97 rate_hz=messages_per_second,
9898 burst_count=burst_count,
9999 update=update,
100100 )
164164 member_event.room_id,
165165 "leave",
166166 ratelimit=False,
167 require_consent=False,
167168 )
168169 except Exception as e:
169170 logger.exception("Error kicking guest user: %s" % (e,))
3434 StoreError,
3535 SynapseError,
3636 )
37 from synapse.api.ratelimiting import Ratelimiter
3738 from synapse.module_api import ModuleApi
3839 from synapse.types import UserID
3940 from synapse.util import logcontext
9798 if t not in login_types:
9899 login_types.append(t)
99100 self._supported_login_types = login_types
101
102 self._account_ratelimiter = Ratelimiter()
103 self._failed_attempts_ratelimiter = Ratelimiter()
104
105 self._clock = self.hs.get_clock()
100106
101107 @defer.inlineCallbacks
102108 def validate_user_via_ui_auth(self, requester, request_body, clientip):
567573 Returns:
568574 defer.Deferred: (unicode) canonical_user_id, or None if zero or
569575 multiple matches
570 """
576
577 Raises:
578 LimitExceededError if the ratelimiter's login requests count for this
579 user is too high too proceed.
580 """
581 self.ratelimit_login_per_account(user_id)
571582 res = yield self._find_user_id_and_pwd_hash(user_id)
572583 if res is not None:
573584 defer.returnValue(res[0])
633644 StoreError if there was a problem accessing the database
634645 SynapseError if there was a problem with the request
635646 LoginError if there was an authentication problem.
647 LimitExceededError if the ratelimiter's login requests count for this
648 user is too high too proceed.
636649 """
637650
638651 if username.startswith('@'):
641654 qualified_user_id = UserID(
642655 username, self.hs.hostname
643656 ).to_string()
657
658 self.ratelimit_login_per_account(qualified_user_id)
644659
645660 login_type = login_submission.get("type")
646661 known_login_type = False
714729 if not known_login_type:
715730 raise SynapseError(400, "Unknown login type %s" % login_type)
716731
717 # unknown username or invalid password. We raise a 403 here, but note
718 # that if we're doing user-interactive login, it turns all LoginErrors
719 # into a 401 anyway.
732 # unknown username or invalid password.
733 self._failed_attempts_ratelimiter.ratelimit(
734 qualified_user_id.lower(), time_now_s=self._clock.time(),
735 rate_hz=self.hs.config.rc_login_failed_attempts.per_second,
736 burst_count=self.hs.config.rc_login_failed_attempts.burst_count,
737 update=True,
738 )
739
740 # We raise a 403 here, but note that if we're doing user-interactive
741 # login, it turns all LoginErrors into a 401 anyway.
720742 raise LoginError(
721743 403, "Invalid password",
722744 errcode=Codes.FORBIDDEN
723745 )
724746
725747 @defer.inlineCallbacks
748 def check_password_provider_3pid(self, medium, address, password):
749 """Check if a password provider is able to validate a thirdparty login
750
751 Args:
752 medium (str): The medium of the 3pid (ex. email).
753 address (str): The address of the 3pid (ex. jdoe@example.com).
754 password (str): The password of the user.
755
756 Returns:
757 Deferred[(str|None, func|None)]: A tuple of `(user_id,
758 callback)`. If authentication is successful, `user_id` is a `str`
759 containing the authenticated, canonical user ID. `callback` is
760 then either a function to be later run after the server has
761 completed login/registration, or `None`. If authentication was
762 unsuccessful, `user_id` and `callback` are both `None`.
763 """
764 for provider in self.password_providers:
765 if hasattr(provider, "check_3pid_auth"):
766 # This function is able to return a deferred that either
767 # resolves None, meaning authentication failure, or upon
768 # success, to a str (which is the user_id) or a tuple of
769 # (user_id, callback_func), where callback_func should be run
770 # after we've finished everything else
771 result = yield provider.check_3pid_auth(
772 medium, address, password,
773 )
774 if result:
775 # Check if the return value is a str or a tuple
776 if isinstance(result, str):
777 # If it's a str, set callback function to None
778 result = (result, None)
779 defer.returnValue(result)
780
781 defer.returnValue((None, None))
782
783 @defer.inlineCallbacks
726784 def _check_local_password(self, user_id, password):
727785 """Authenticate a user against the local password database.
728786
733791 user_id (unicode): complete @user:id
734792 password (unicode): the provided password
735793 Returns:
736 (unicode) the canonical_user_id, or None if unknown user / bad password
794 Deferred[unicode] the canonical_user_id, or Deferred[None] if
795 unknown user/bad password
796
797 Raises:
798 LimitExceededError if the ratelimiter's login requests count for this
799 user is too high too proceed.
737800 """
738801 lookupres = yield self._find_user_id_and_pwd_hash(user_id)
739802 if not lookupres:
762825 auth_api.validate_macaroon(macaroon, "login", True, user_id)
763826 except Exception:
764827 raise AuthError(403, "Invalid token", errcode=Codes.FORBIDDEN)
828 self.ratelimit_login_per_account(user_id)
765829 yield self.auth.check_auth_blocking(user_id)
766830 defer.returnValue(user_id)
767831
933997 else:
934998 return defer.succeed(False)
935999
1000 def ratelimit_login_per_account(self, user_id):
1001 """Checks whether the process must be stopped because of ratelimiting.
1002
1003 Checks against two ratelimiters: the generic one for login attempts per
1004 account and the one specific to failed attempts.
1005
1006 Args:
1007 user_id (unicode): complete @user:id
1008
1009 Raises:
1010 LimitExceededError if one of the ratelimiters' login requests count
1011 for this user is too high too proceed.
1012 """
1013 self._failed_attempts_ratelimiter.ratelimit(
1014 user_id.lower(), time_now_s=self._clock.time(),
1015 rate_hz=self.hs.config.rc_login_failed_attempts.per_second,
1016 burst_count=self.hs.config.rc_login_failed_attempts.burst_count,
1017 update=False,
1018 )
1019
1020 self._account_ratelimiter.ratelimit(
1021 user_id.lower(), time_now_s=self._clock.time(),
1022 rate_hz=self.hs.config.rc_login_account.per_second,
1023 burst_count=self.hs.config.rc_login_account.burst_count,
1024 update=True,
1025 )
1026
9361027
9371028 @attr.s
9381029 class MacaroonGenerator(object):
163163 room_id,
164164 "leave",
165165 ratelimit=False,
166 require_consent=False,
166167 )
167168 except Exception:
168169 logger.exception(
3636 logger = logging.getLogger(__name__)
3737
3838
39 class DeviceHandler(BaseHandler):
39 class DeviceWorkerHandler(BaseHandler):
4040 def __init__(self, hs):
41 super(DeviceHandler, self).__init__(hs)
41 super(DeviceWorkerHandler, self).__init__(hs)
4242
4343 self.hs = hs
4444 self.state = hs.get_state_handler()
4545 self._auth_handler = hs.get_auth_handler()
46 self.federation_sender = hs.get_federation_sender()
47
48 self._edu_updater = DeviceListEduUpdater(hs, self)
49
50 federation_registry = hs.get_federation_registry()
51
52 federation_registry.register_edu_handler(
53 "m.device_list_update", self._edu_updater.incoming_device_list_update,
54 )
55 federation_registry.register_query_handler(
56 "user_devices", self.on_federation_query_user_devices,
57 )
58
59 hs.get_distributor().observe("user_left_room", self.user_left_room)
60
61 @defer.inlineCallbacks
62 def check_device_registered(self, user_id, device_id,
63 initial_device_display_name=None):
64 """
65 If the given device has not been registered, register it with the
66 supplied display name.
67
68 If no device_id is supplied, we make one up.
69
70 Args:
71 user_id (str): @user:id
72 device_id (str | None): device id supplied by client
73 initial_device_display_name (str | None): device display name from
74 client
75 Returns:
76 str: device id (generated if none was supplied)
77 """
78 if device_id is not None:
79 new_device = yield self.store.store_device(
80 user_id=user_id,
81 device_id=device_id,
82 initial_device_display_name=initial_device_display_name,
83 )
84 if new_device:
85 yield self.notify_device_update(user_id, [device_id])
86 defer.returnValue(device_id)
87
88 # if the device id is not specified, we'll autogen one, but loop a few
89 # times in case of a clash.
90 attempts = 0
91 while attempts < 5:
92 device_id = stringutils.random_string(10).upper()
93 new_device = yield self.store.store_device(
94 user_id=user_id,
95 device_id=device_id,
96 initial_device_display_name=initial_device_display_name,
97 )
98 if new_device:
99 yield self.notify_device_update(user_id, [device_id])
100 defer.returnValue(device_id)
101 attempts += 1
102
103 raise errors.StoreError(500, "Couldn't generate a device ID.")
10446
10547 @defer.inlineCallbacks
10648 def get_devices_by_user(self, user_id):
14890 _update_device_from_client_ips(device, ips)
14991 defer.returnValue(device)
15092
151 @defer.inlineCallbacks
152 def delete_device(self, user_id, device_id):
153 """ Delete the given device
154
155 Args:
156 user_id (str):
157 device_id (str):
158
159 Returns:
160 defer.Deferred:
161 """
162
163 try:
164 yield self.store.delete_device(user_id, device_id)
165 except errors.StoreError as e:
166 if e.code == 404:
167 # no match
168 pass
169 else:
170 raise
171
172 yield self._auth_handler.delete_access_tokens_for_user(
173 user_id, device_id=device_id,
174 )
175
176 yield self.store.delete_e2e_keys_by_device(
177 user_id=user_id, device_id=device_id
178 )
179
180 yield self.notify_device_update(user_id, [device_id])
181
182 @defer.inlineCallbacks
183 def delete_all_devices_for_user(self, user_id, except_device_id=None):
184 """Delete all of the user's devices
185
186 Args:
187 user_id (str):
188 except_device_id (str|None): optional device id which should not
189 be deleted
190
191 Returns:
192 defer.Deferred:
193 """
194 device_map = yield self.store.get_devices_by_user(user_id)
195 device_ids = list(device_map)
196 if except_device_id is not None:
197 device_ids = [d for d in device_ids if d != except_device_id]
198 yield self.delete_devices(user_id, device_ids)
199
200 @defer.inlineCallbacks
201 def delete_devices(self, user_id, device_ids):
202 """ Delete several devices
203
204 Args:
205 user_id (str):
206 device_ids (List[str]): The list of device IDs to delete
207
208 Returns:
209 defer.Deferred:
210 """
211
212 try:
213 yield self.store.delete_devices(user_id, device_ids)
214 except errors.StoreError as e:
215 if e.code == 404:
216 # no match
217 pass
218 else:
219 raise
220
221 # Delete access tokens and e2e keys for each device. Not optimised as it is not
222 # considered as part of a critical path.
223 for device_id in device_ids:
224 yield self._auth_handler.delete_access_tokens_for_user(
225 user_id, device_id=device_id,
226 )
227 yield self.store.delete_e2e_keys_by_device(
228 user_id=user_id, device_id=device_id
229 )
230
231 yield self.notify_device_update(user_id, device_ids)
232
233 @defer.inlineCallbacks
234 def update_device(self, user_id, device_id, content):
235 """ Update the given device
236
237 Args:
238 user_id (str):
239 device_id (str):
240 content (dict): body of update request
241
242 Returns:
243 defer.Deferred:
244 """
245
246 try:
247 yield self.store.update_device(
248 user_id,
249 device_id,
250 new_display_name=content.get("display_name")
251 )
252 yield self.notify_device_update(user_id, [device_id])
253 except errors.StoreError as e:
254 if e.code == 404:
255 raise errors.NotFoundError()
256 else:
257 raise
258
259 @measure_func("notify_device_update")
260 @defer.inlineCallbacks
261 def notify_device_update(self, user_id, device_ids):
262 """Notify that a user's device(s) has changed. Pokes the notifier, and
263 remote servers if the user is local.
264 """
265 users_who_share_room = yield self.store.get_users_who_share_room_with_user(
266 user_id
267 )
268
269 hosts = set()
270 if self.hs.is_mine_id(user_id):
271 hosts.update(get_domain_from_id(u) for u in users_who_share_room)
272 hosts.discard(self.server_name)
273
274 position = yield self.store.add_device_change_to_streams(
275 user_id, device_ids, list(hosts)
276 )
277
278 room_ids = yield self.store.get_rooms_for_user(user_id)
279
280 yield self.notifier.on_new_event(
281 "device_list_key", position, rooms=room_ids,
282 )
283
284 if hosts:
285 logger.info("Sending device list update notif to: %r", hosts)
286 for host in hosts:
287 self.federation_sender.send_device_messages(host)
288
28993 @measure_func("device.get_user_ids_changed")
29094 @defer.inlineCallbacks
29195 def get_user_ids_changed(self, user_id, from_token):
296100 user_id (str)
297101 from_token (StreamToken)
298102 """
299 now_token = yield self.hs.get_event_sources().get_current_token()
103 now_room_key = yield self.store.get_room_events_max_id()
300104
301105 room_ids = yield self.store.get_rooms_for_user(user_id)
302106
309113 rooms_changed = self.store.get_rooms_that_changed(room_ids, from_token.room_key)
310114
311115 member_events = yield self.store.get_membership_changes_for_user(
312 user_id, from_token.room_key, now_token.room_key
116 user_id, from_token.room_key, now_room_key,
313117 )
314118 rooms_changed.update(event.room_id for event in member_events)
315119
406210 "left": list(possibly_left),
407211 })
408212
213
214 class DeviceHandler(DeviceWorkerHandler):
215 def __init__(self, hs):
216 super(DeviceHandler, self).__init__(hs)
217
218 self.federation_sender = hs.get_federation_sender()
219
220 self._edu_updater = DeviceListEduUpdater(hs, self)
221
222 federation_registry = hs.get_federation_registry()
223
224 federation_registry.register_edu_handler(
225 "m.device_list_update", self._edu_updater.incoming_device_list_update,
226 )
227 federation_registry.register_query_handler(
228 "user_devices", self.on_federation_query_user_devices,
229 )
230
231 hs.get_distributor().observe("user_left_room", self.user_left_room)
232
233 @defer.inlineCallbacks
234 def check_device_registered(self, user_id, device_id,
235 initial_device_display_name=None):
236 """
237 If the given device has not been registered, register it with the
238 supplied display name.
239
240 If no device_id is supplied, we make one up.
241
242 Args:
243 user_id (str): @user:id
244 device_id (str | None): device id supplied by client
245 initial_device_display_name (str | None): device display name from
246 client
247 Returns:
248 str: device id (generated if none was supplied)
249 """
250 if device_id is not None:
251 new_device = yield self.store.store_device(
252 user_id=user_id,
253 device_id=device_id,
254 initial_device_display_name=initial_device_display_name,
255 )
256 if new_device:
257 yield self.notify_device_update(user_id, [device_id])
258 defer.returnValue(device_id)
259
260 # if the device id is not specified, we'll autogen one, but loop a few
261 # times in case of a clash.
262 attempts = 0
263 while attempts < 5:
264 device_id = stringutils.random_string(10).upper()
265 new_device = yield self.store.store_device(
266 user_id=user_id,
267 device_id=device_id,
268 initial_device_display_name=initial_device_display_name,
269 )
270 if new_device:
271 yield self.notify_device_update(user_id, [device_id])
272 defer.returnValue(device_id)
273 attempts += 1
274
275 raise errors.StoreError(500, "Couldn't generate a device ID.")
276
277 @defer.inlineCallbacks
278 def delete_device(self, user_id, device_id):
279 """ Delete the given device
280
281 Args:
282 user_id (str):
283 device_id (str):
284
285 Returns:
286 defer.Deferred:
287 """
288
289 try:
290 yield self.store.delete_device(user_id, device_id)
291 except errors.StoreError as e:
292 if e.code == 404:
293 # no match
294 pass
295 else:
296 raise
297
298 yield self._auth_handler.delete_access_tokens_for_user(
299 user_id, device_id=device_id,
300 )
301
302 yield self.store.delete_e2e_keys_by_device(
303 user_id=user_id, device_id=device_id
304 )
305
306 yield self.notify_device_update(user_id, [device_id])
307
308 @defer.inlineCallbacks
309 def delete_all_devices_for_user(self, user_id, except_device_id=None):
310 """Delete all of the user's devices
311
312 Args:
313 user_id (str):
314 except_device_id (str|None): optional device id which should not
315 be deleted
316
317 Returns:
318 defer.Deferred:
319 """
320 device_map = yield self.store.get_devices_by_user(user_id)
321 device_ids = list(device_map)
322 if except_device_id is not None:
323 device_ids = [d for d in device_ids if d != except_device_id]
324 yield self.delete_devices(user_id, device_ids)
325
326 @defer.inlineCallbacks
327 def delete_devices(self, user_id, device_ids):
328 """ Delete several devices
329
330 Args:
331 user_id (str):
332 device_ids (List[str]): The list of device IDs to delete
333
334 Returns:
335 defer.Deferred:
336 """
337
338 try:
339 yield self.store.delete_devices(user_id, device_ids)
340 except errors.StoreError as e:
341 if e.code == 404:
342 # no match
343 pass
344 else:
345 raise
346
347 # Delete access tokens and e2e keys for each device. Not optimised as it is not
348 # considered as part of a critical path.
349 for device_id in device_ids:
350 yield self._auth_handler.delete_access_tokens_for_user(
351 user_id, device_id=device_id,
352 )
353 yield self.store.delete_e2e_keys_by_device(
354 user_id=user_id, device_id=device_id
355 )
356
357 yield self.notify_device_update(user_id, device_ids)
358
359 @defer.inlineCallbacks
360 def update_device(self, user_id, device_id, content):
361 """ Update the given device
362
363 Args:
364 user_id (str):
365 device_id (str):
366 content (dict): body of update request
367
368 Returns:
369 defer.Deferred:
370 """
371
372 try:
373 yield self.store.update_device(
374 user_id,
375 device_id,
376 new_display_name=content.get("display_name")
377 )
378 yield self.notify_device_update(user_id, [device_id])
379 except errors.StoreError as e:
380 if e.code == 404:
381 raise errors.NotFoundError()
382 else:
383 raise
384
385 @measure_func("notify_device_update")
386 @defer.inlineCallbacks
387 def notify_device_update(self, user_id, device_ids):
388 """Notify that a user's device(s) has changed. Pokes the notifier, and
389 remote servers if the user is local.
390 """
391 users_who_share_room = yield self.store.get_users_who_share_room_with_user(
392 user_id
393 )
394
395 hosts = set()
396 if self.hs.is_mine_id(user_id):
397 hosts.update(get_domain_from_id(u) for u in users_who_share_room)
398 hosts.discard(self.server_name)
399
400 position = yield self.store.add_device_change_to_streams(
401 user_id, device_ids, list(hosts)
402 )
403
404 for device_id in device_ids:
405 logger.debug(
406 "Notifying about update %r/%r, ID: %r", user_id, device_id,
407 position,
408 )
409
410 room_ids = yield self.store.get_rooms_for_user(user_id)
411
412 yield self.notifier.on_new_event(
413 "device_list_key", position, rooms=room_ids,
414 )
415
416 if hosts:
417 logger.info("Sending device list update notif for %r to: %r", user_id, hosts)
418 for host in hosts:
419 self.federation_sender.send_device_messages(host)
420
409421 @defer.inlineCallbacks
410422 def on_federation_query_user_devices(self, user_id):
411423 stream_id, devices = yield self.store.get_devices_with_keys_by_user(user_id)
472484
473485 if get_domain_from_id(user_id) != origin:
474486 # TODO: Raise?
475 logger.warning("Got device list update edu for %r from %r", user_id, origin)
487 logger.warning(
488 "Got device list update edu for %r/%r from %r",
489 user_id, device_id, origin,
490 )
476491 return
477492
478493 room_ids = yield self.store.get_rooms_for_user(user_id)
479494 if not room_ids:
480495 # We don't share any rooms with this user. Ignore update, as we
481496 # probably won't get any further updates.
497 logger.warning(
498 "Got device list update edu for %r/%r, but don't share a room",
499 user_id, device_id,
500 )
482501 return
502
503 logger.debug(
504 "Received device list update for %r/%r", user_id, device_id,
505 )
483506
484507 self._pending_updates.setdefault(user_id, []).append(
485508 (device_id, stream_id, prev_ids, edu_content)
498521 # This can happen since we batch updates
499522 return
500523
524 for device_id, stream_id, prev_ids, content in pending_updates:
525 logger.debug(
526 "Handling update %r/%r, ID: %r, prev: %r ",
527 user_id, device_id, stream_id, prev_ids,
528 )
529
501530 # Given a list of updates we check if we need to resync. This
502531 # happens if we've missed updates.
503532 resync = yield self._need_to_do_resync(user_id, pending_updates)
533
534 logger.debug("Need to re-sync devices for %r? %r", user_id, resync)
504535
505536 if resync:
506537 # Fetch all devices for the user.
554585 )
555586 devices = []
556587
588 for device in devices:
589 logger.debug(
590 "Handling resync update %r/%r, ID: %r",
591 user_id, device["device_id"], stream_id,
592 )
593
557594 yield self.store.update_remote_device_list_cache(
558595 user_id, devices, stream_id,
559596 )
560597 device_ids = [device["device_id"] for device in devices]
561598 yield self.device_handler.notify_device_update(user_id, device_ids)
599
600 # We clobber the seen updates since we've re-synced from a given
601 # point.
602 self._seen_updates[user_id] = set([stream_id])
562603 else:
563604 # Simply update the single device, since we know that is the only
564605 # change (because of the single prev_id matching the current cache)
571612 user_id, [device_id for device_id, _, _, _ in pending_updates]
572613 )
573614
574 self._seen_updates.setdefault(user_id, set()).update(
575 stream_id for _, stream_id, _, _ in pending_updates
576 )
615 self._seen_updates.setdefault(user_id, set()).update(
616 stream_id for _, stream_id, _, _ in pending_updates
617 )
577618
578619 @defer.inlineCallbacks
579620 def _need_to_do_resync(self, user_id, updates):
584625
585626 extremity = yield self.store.get_device_list_last_stream_id_for_remote(
586627 user_id
628 )
629
630 logger.debug(
631 "Current extremity for %r: %r",
632 user_id, extremity,
587633 )
588634
589635 stream_id_in_updates = set() # stream_ids in updates list
4343 self.appservice_handler = hs.get_application_service_handler()
4444 self.event_creation_handler = hs.get_event_creation_handler()
4545 self.config = hs.config
46 self.enable_room_list_search = hs.config.enable_room_list_search
4647
4748 self.federation = hs.get_federation_client()
4849 hs.get_federation_registry().register_query_handler(
410411 if visibility not in ["public", "private"]:
411412 raise SynapseError(400, "Invalid visibility setting")
412413
414 if visibility == "public" and not self.enable_room_list_search:
415 # The room list has been disabled.
416 raise AuthError(
417 403,
418 "This user is not permitted to publish rooms to the room list"
419 )
420
413421 room = yield self.store.get_room(room_id)
414422 if room is None:
415423 raise SynapseError(400, "Unknown room")
1818 from twisted.internet import defer
1919
2020 from synapse.api.constants import EventTypes, Membership
21 from synapse.api.errors import AuthError
21 from synapse.api.errors import AuthError, SynapseError
2222 from synapse.events import EventBase
2323 from synapse.events.utils import serialize_event
2424 from synapse.types import UserID
5959
6060 If `only_keys` is not None, events from keys will be sent down.
6161 """
62
63 if room_id:
64 blocked = yield self.store.is_room_blocked(room_id)
65 if blocked:
66 raise SynapseError(403, "This room has been blocked on this server")
6267
6368 # send any outstanding server notices to the user.
6469 yield self._server_notices_sender.on_user_syncing(auth_user_id)
4444 SynapseError,
4545 )
4646 from synapse.crypto.event_signing import compute_event_signature
47 from synapse.event_auth import auth_types_for_event
4748 from synapse.events.validator import EventValidator
4849 from synapse.replication.http.federation import (
4950 ReplicationCleanRoomRestServlet,
857858 logger.debug("Not backfilling as no extremeties found.")
858859 return
859860
861 # We only want to paginate if we can actually see the events we'll get,
862 # as otherwise we'll just spend a lot of resources to get redacted
863 # events.
864 #
865 # We do this by filtering all the backwards extremities and seeing if
866 # any remain. Given we don't have the extremity events themselves, we
867 # need to actually check the events that reference them.
868 #
869 # *Note*: the spec wants us to keep backfilling until we reach the start
870 # of the room in case we are allowed to see some of the history. However
871 # in practice that causes more issues than its worth, as a) its
872 # relatively rare for there to be any visible history and b) even when
873 # there is its often sufficiently long ago that clients would stop
874 # attempting to paginate before backfill reached the visible history.
875 #
876 # TODO: If we do do a backfill then we should filter the backwards
877 # extremities to only include those that point to visible portions of
878 # history.
879 #
880 # TODO: Correctly handle the case where we are allowed to see the
881 # forward event but not the backward extremity, e.g. in the case of
882 # initial join of the server where we are allowed to see the join
883 # event but not anything before it. This would require looking at the
884 # state *before* the event, ignoring the special casing certain event
885 # types have.
886
887 forward_events = yield self.store.get_successor_events(
888 list(extremities),
889 )
890
891 extremities_events = yield self.store.get_events(
892 forward_events,
893 check_redacted=False,
894 get_prev_content=False,
895 )
896
897 # We set `check_history_visibility_only` as we might otherwise get false
898 # positives from users having been erased.
899 filtered_extremities = yield filter_events_for_server(
900 self.store, self.server_name, list(extremities_events.values()),
901 redact=False, check_history_visibility_only=True,
902 )
903
904 if not filtered_extremities:
905 defer.returnValue(False)
906
860907 # Check if we reached a point where we should start backfilling.
861908 sorted_extremeties_tuple = sorted(
862909 extremities.items(),
15811628 origin, event,
15821629 state=state,
15831630 auth_events=auth_events,
1631 backfilled=backfilled,
15841632 )
15851633
15861634 # reraise does not allow inlineCallbacks to preserve the stacktrace, so we
16251673 event,
16261674 state=ev_info.get("state"),
16271675 auth_events=ev_info.get("auth_events"),
1676 backfilled=backfilled,
16281677 )
16291678 defer.returnValue(res)
16301679
17471796 )
17481797
17491798 @defer.inlineCallbacks
1750 def _prep_event(self, origin, event, state=None, auth_events=None):
1799 def _prep_event(self, origin, event, state, auth_events, backfilled):
17511800 """
17521801
17531802 Args:
17551804 event:
17561805 state:
17571806 auth_events:
1807 backfilled (bool)
17581808
17591809 Returns:
17601810 Deferred, which resolves to synapse.events.snapshot.EventContext
17961846
17971847 context.rejected = RejectedReason.AUTH_ERROR
17981848
1849 if not context.rejected:
1850 yield self._check_for_soft_fail(event, state, backfilled)
1851
17991852 if event.type == EventTypes.GuestAccess and not context.rejected:
18001853 yield self.maybe_kick_guest_users(event)
18011854
18021855 defer.returnValue(context)
1856
1857 @defer.inlineCallbacks
1858 def _check_for_soft_fail(self, event, state, backfilled):
1859 """Checks if we should soft fail the event, if so marks the event as
1860 such.
1861
1862 Args:
1863 event (FrozenEvent)
1864 state (dict|None): The state at the event if we don't have all the
1865 event's prev events
1866 backfilled (bool): Whether the event is from backfill
1867
1868 Returns:
1869 Deferred
1870 """
1871 # For new (non-backfilled and non-outlier) events we check if the event
1872 # passes auth based on the current state. If it doesn't then we
1873 # "soft-fail" the event.
1874 do_soft_fail_check = not backfilled and not event.internal_metadata.is_outlier()
1875 if do_soft_fail_check:
1876 extrem_ids = yield self.store.get_latest_event_ids_in_room(
1877 event.room_id,
1878 )
1879
1880 extrem_ids = set(extrem_ids)
1881 prev_event_ids = set(event.prev_event_ids())
1882
1883 if extrem_ids == prev_event_ids:
1884 # If they're the same then the current state is the same as the
1885 # state at the event, so no point rechecking auth for soft fail.
1886 do_soft_fail_check = False
1887
1888 if do_soft_fail_check:
1889 room_version = yield self.store.get_room_version(event.room_id)
1890
1891 # Calculate the "current state".
1892 if state is not None:
1893 # If we're explicitly given the state then we won't have all the
1894 # prev events, and so we have a gap in the graph. In this case
1895 # we want to be a little careful as we might have been down for
1896 # a while and have an incorrect view of the current state,
1897 # however we still want to do checks as gaps are easy to
1898 # maliciously manufacture.
1899 #
1900 # So we use a "current state" that is actually a state
1901 # resolution across the current forward extremities and the
1902 # given state at the event. This should correctly handle cases
1903 # like bans, especially with state res v2.
1904
1905 state_sets = yield self.store.get_state_groups(
1906 event.room_id, extrem_ids,
1907 )
1908 state_sets = list(state_sets.values())
1909 state_sets.append(state)
1910 current_state_ids = yield self.state_handler.resolve_events(
1911 room_version, state_sets, event,
1912 )
1913 current_state_ids = {
1914 k: e.event_id for k, e in iteritems(current_state_ids)
1915 }
1916 else:
1917 current_state_ids = yield self.state_handler.get_current_state_ids(
1918 event.room_id, latest_event_ids=extrem_ids,
1919 )
1920
1921 # Now check if event pass auth against said current state
1922 auth_types = auth_types_for_event(event)
1923 current_state_ids = [
1924 e for k, e in iteritems(current_state_ids)
1925 if k in auth_types
1926 ]
1927
1928 current_auth_events = yield self.store.get_events(current_state_ids)
1929 current_auth_events = {
1930 (e.type, e.state_key): e for e in current_auth_events.values()
1931 }
1932
1933 try:
1934 self.auth.check(room_version, event, auth_events=current_auth_events)
1935 except AuthError as e:
1936 logger.warn(
1937 "Failed current state auth resolution for %r because %s",
1938 event, e,
1939 )
1940 event.internal_metadata.soft_failed = True
18031941
18041942 @defer.inlineCallbacks
18051943 def on_query_auth(self, origin, event_id, room_id, remote_auth_chain, rejects,
1717 from twisted.internet import defer
1818
1919 from synapse.api.constants import EventTypes, Membership
20 from synapse.api.errors import AuthError, Codes
20 from synapse.api.errors import AuthError, Codes, SynapseError
2121 from synapse.events.utils import serialize_event
2222 from synapse.events.validator import EventValidator
2323 from synapse.handlers.presence import format_user_presence_state
261261 A JSON serialisable dict with the snapshot of the room.
262262 """
263263
264 blocked = yield self.store.is_room_blocked(room_id)
265 if blocked:
266 raise SynapseError(403, "This room has been blocked on this server")
267
264268 user_id = requester.user.to_string()
265269
266270 membership, member_event_id = yield self._check_in_room_or_world_readable(
242242
243243 self.spam_checker = hs.get_spam_checker()
244244
245 if self.config.block_events_without_consent_error is not None:
245 self._block_events_without_consent_error = (
246 self.config.block_events_without_consent_error
247 )
248
249 # we need to construct a ConsentURIBuilder here, as it checks that the necessary
250 # config options, but *only* if we have a configuration for which we are
251 # going to need it.
252 if self._block_events_without_consent_error:
246253 self._consent_uri_builder = ConsentURIBuilder(self.config)
247254
248255 @defer.inlineCallbacks
249256 def create_event(self, requester, event_dict, token_id=None, txn_id=None,
250 prev_events_and_hashes=None):
257 prev_events_and_hashes=None, require_consent=True):
251258 """
252259 Given a dict from a client, create a new event.
253260
268275 where *hashes* is a map from algorithm to hash.
269276
270277 If None, they will be requested from the database.
278
279 require_consent (bool): Whether to check if the requester has
280 consented to privacy policy.
271281 Raises:
272282 ResourceLimitError if server is blocked to some resource being
273283 exceeded
309319 )
310320
311321 is_exempt = yield self._is_exempt_from_privacy_policy(builder, requester)
312 if not is_exempt:
322 if require_consent and not is_exempt:
313323 yield self.assert_accepted_privacy_policy(requester)
314324
315325 if token_id is not None:
377387 Raises:
378388 ConsentNotGivenError: if the user has not given consent yet
379389 """
380 if self.config.block_events_without_consent_error is None:
390 if self._block_events_without_consent_error is None:
381391 return
382392
383393 # exempt AS users from needing consent
404414 consent_uri = self._consent_uri_builder.build_user_consent_uri(
405415 requester.user.localpart,
406416 )
407 msg = self.config.block_events_without_consent_error % {
417 msg = self._block_events_without_consent_error % {
408418 'consent_uri': consent_uri,
409419 }
410420 raise ConsentNotGivenError(
435445
436446 if event.is_state():
437447 prev_state = yield self.deduplicate_state_event(event, context)
438 logger.info(
439 "Not bothering to persist duplicate state event %s", event.event_id,
440 )
441448 if prev_state is not None:
449 logger.info(
450 "Not bothering to persist state event %s duplicated by %s",
451 event.event_id, prev_state.event_id,
452 )
442453 defer.returnValue(prev_state)
443454
444455 yield self.handle_new_client_event(
815815 if self.is_mine(observed_user):
816816 yield self.invite_presence(observed_user, observer_user)
817817 else:
818 yield self.federation.send_edu(
818 yield self.federation.build_and_send_edu(
819819 destination=observed_user.domain,
820820 edu_type="m.presence_invite",
821821 content={
835835 if self.is_mine(observer_user):
836836 yield self.accept_presence(observed_user, observer_user)
837837 else:
838 self.federation.send_edu(
838 self.federation.build_and_send_edu(
839839 destination=observer_user.domain,
840840 edu_type="m.presence_accept",
841841 content={
847847 state_dict = yield self.get_state(observed_user, as_event=False)
848848 state_dict = format_user_presence_state(state_dict, self.clock.time_msec())
849849
850 self.federation.send_edu(
850 self.federation.build_and_send_edu(
851851 destination=observer_user.domain,
852852 edu_type="m.presence",
853853 content={
146146
147147 @defer.inlineCallbacks
148148 def set_displayname(self, target_user, requester, new_displayname, by_admin=False):
149 """target_user is the user whose displayname is to be changed;
150 auth_user is the user attempting to make this change."""
149 """Set the displayname of a user
150
151 Args:
152 target_user (UserID): the user whose displayname is to be changed.
153 requester (Requester): The user attempting to make this change.
154 new_displayname (str): The displayname to give this user.
155 by_admin (bool): Whether this change was made by an administrator.
156 """
151157 if not self.hs.is_mine(target_user):
152158 raise SynapseError(400, "User is not hosted on this Home Server")
153159
1515
1616 from twisted.internet import defer
1717
18 from synapse.metrics.background_process_metrics import run_as_background_process
19 from synapse.types import get_domain_from_id
20
21 from ._base import BaseHandler
18 from synapse.handlers._base import BaseHandler
19 from synapse.types import ReadReceipt
2220
2321 logger = logging.getLogger(__name__)
2422
3836 self.state = hs.get_state_handler()
3937
4038 @defer.inlineCallbacks
41 def received_client_receipt(self, room_id, receipt_type, user_id,
42 event_id):
43 """Called when a client tells us a local user has read up to the given
44 event_id in the room.
45 """
46 receipt = {
47 "room_id": room_id,
48 "receipt_type": receipt_type,
49 "user_id": user_id,
50 "event_ids": [event_id],
51 "data": {
52 "ts": int(self.clock.time_msec()),
53 }
54 }
55
56 is_new = yield self._handle_new_receipts([receipt])
57
58 if is_new:
59 # fire off a process in the background to send the receipt to
60 # remote servers
61 run_as_background_process(
62 'push_receipts_to_remotes', self._push_remotes, receipt
63 )
64
65 @defer.inlineCallbacks
6639 def _received_remote_receipt(self, origin, content):
6740 """Called when we receive an EDU of type m.receipt from a remote HS.
6841 """
6942 receipts = [
70 {
71 "room_id": room_id,
72 "receipt_type": receipt_type,
73 "user_id": user_id,
74 "event_ids": user_values["event_ids"],
75 "data": user_values.get("data", {}),
76 }
43 ReadReceipt(
44 room_id=room_id,
45 receipt_type=receipt_type,
46 user_id=user_id,
47 event_ids=user_values["event_ids"],
48 data=user_values.get("data", {}),
49 )
7750 for room_id, room_values in content.items()
7851 for receipt_type, users in room_values.items()
7952 for user_id, user_values in users.items()
8962 max_batch_id = None
9063
9164 for receipt in receipts:
92 room_id = receipt["room_id"]
93 receipt_type = receipt["receipt_type"]
94 user_id = receipt["user_id"]
95 event_ids = receipt["event_ids"]
96 data = receipt["data"]
97
9865 res = yield self.store.insert_receipt(
99 room_id, receipt_type, user_id, event_ids, data
66 receipt.room_id,
67 receipt.receipt_type,
68 receipt.user_id,
69 receipt.event_ids,
70 receipt.data,
10071 )
10172
10273 if not res:
11485 # no new receipts
11586 defer.returnValue(False)
11687
117 affected_room_ids = list(set([r["room_id"] for r in receipts]))
88 affected_room_ids = list(set([r.room_id for r in receipts]))
11889
11990 self.notifier.on_new_event(
12091 "receipt_key", max_batch_id, rooms=affected_room_ids
12798 defer.returnValue(True)
12899
129100 @defer.inlineCallbacks
130 def _push_remotes(self, receipt):
131 """Given a receipt, works out which remote servers should be
132 poked and pokes them.
101 def received_client_receipt(self, room_id, receipt_type, user_id,
102 event_id):
103 """Called when a client tells us a local user has read up to the given
104 event_id in the room.
133105 """
134 try:
135 # TODO: optimise this to move some of the work to the workers.
136 room_id = receipt["room_id"]
137 receipt_type = receipt["receipt_type"]
138 user_id = receipt["user_id"]
139 event_ids = receipt["event_ids"]
140 data = receipt["data"]
106 receipt = ReadReceipt(
107 room_id=room_id,
108 receipt_type=receipt_type,
109 user_id=user_id,
110 event_ids=[event_id],
111 data={
112 "ts": int(self.clock.time_msec()),
113 },
114 )
141115
142 users = yield self.state.get_current_user_in_room(room_id)
143 remotedomains = set(get_domain_from_id(u) for u in users)
144 remotedomains = remotedomains.copy()
145 remotedomains.discard(self.server_name)
116 is_new = yield self._handle_new_receipts([receipt])
117 if not is_new:
118 return
146119
147 logger.debug("Sending receipt to: %r", remotedomains)
148
149 for domain in remotedomains:
150 self.federation.send_edu(
151 destination=domain,
152 edu_type="m.receipt",
153 content={
154 room_id: {
155 receipt_type: {
156 user_id: {
157 "event_ids": event_ids,
158 "data": data,
159 }
160 }
161 },
162 },
163 key=(room_id, receipt_type, user_id),
164 )
165 except Exception:
166 logger.exception("Error pushing receipts to remote servers")
120 yield self.federation.send_read_receipt(receipt)
167121
168122 @defer.inlineCallbacks
169123 def get_receipts_for_room(self, room_id, to_key):
2222 from synapse.api.errors import (
2323 AuthError,
2424 Codes,
25 ConsentNotGivenError,
2526 InvalidCaptchaError,
27 LimitExceededError,
2628 RegistrationError,
2729 SynapseError,
2830 )
5961 self.user_directory_handler = hs.get_user_directory_handler()
6062 self.captcha_client = CaptchaServerHttpClient(hs)
6163 self.identity_handler = self.hs.get_handlers().identity_handler
64 self.ratelimiter = hs.get_registration_ratelimiter()
6265
6366 self._next_generated_user_id = None
6467
148151 threepid=None,
149152 user_type=None,
150153 default_display_name=None,
154 address=None,
151155 ):
152156 """Registers a new client on the server.
153157
166170 api.constants.UserTypes, or None for a normal user.
167171 default_display_name (unicode|None): if set, the new user's displayname
168172 will be set to this. Defaults to 'localpart'.
173 address (str|None): the IP address used to perform the registration.
169174 Returns:
170175 A tuple of (user_id, access_token).
171176 Raises:
205210 token = None
206211 if generate_token:
207212 token = self.macaroon_gen.generate_access_token(user_id)
208 yield self._register_with_store(
213 yield self.register_with_store(
209214 user_id=user_id,
210215 token=token,
211216 password_hash=password_hash,
214219 create_profile_with_displayname=default_display_name,
215220 admin=admin,
216221 user_type=user_type,
222 address=address,
217223 )
218224
219225 if self.hs.config.user_directory_search_all_users:
237243 if default_display_name is None:
238244 default_display_name = localpart
239245 try:
240 yield self._register_with_store(
246 yield self.register_with_store(
241247 user_id=user_id,
242248 token=token,
243249 password_hash=password_hash,
244250 make_guest=make_guest,
245251 create_profile_with_displayname=default_display_name,
252 address=address,
246253 )
247254 except SynapseError:
248255 # if user id is taken, just generate another
304311 )
305312 else:
306313 yield self._join_user_to_room(fake_requester, r)
314 except ConsentNotGivenError as e:
315 # Technically not necessary to pull out this error though
316 # moving away from bare excepts is a good thing to do.
317 logger.error("Failed to join new user to %r: %r", r, e)
307318 except Exception as e:
308319 logger.error("Failed to join new user to %r: %r", r, e)
309320
336347 user_id, allowed_appservice=service
337348 )
338349
339 yield self._register_with_store(
350 yield self.register_with_store(
340351 user_id=user_id,
341352 password_hash="",
342353 appservice_id=service_id,
512523 token = self.macaroon_gen.generate_access_token(user_id)
513524
514525 if need_register:
515 yield self._register_with_store(
526 yield self.register_with_store(
516527 user_id=user_id,
517528 token=token,
518529 password_hash=password_hash,
589600 ratelimit=False,
590601 )
591602
592 def _register_with_store(self, user_id, token=None, password_hash=None,
593 was_guest=False, make_guest=False, appservice_id=None,
594 create_profile_with_displayname=None, admin=False,
595 user_type=None):
603 def register_with_store(self, user_id, token=None, password_hash=None,
604 was_guest=False, make_guest=False, appservice_id=None,
605 create_profile_with_displayname=None, admin=False,
606 user_type=None, address=None):
596607 """Register user in the datastore.
597608
598609 Args:
611622 admin (boolean): is an admin user?
612623 user_type (str|None): type of user. One of the values from
613624 api.constants.UserTypes, or None for a normal user.
625 address (str|None): the IP address used to perform the registration.
614626
615627 Returns:
616628 Deferred
617629 """
630 # Don't rate limit for app services
631 if appservice_id is None and address is not None:
632 time_now = self.clock.time()
633
634 allowed, time_allowed = self.ratelimiter.can_do_action(
635 address, time_now_s=time_now,
636 rate_hz=self.hs.config.rc_registration.per_second,
637 burst_count=self.hs.config.rc_registration.burst_count,
638 )
639
640 if not allowed:
641 raise LimitExceededError(
642 retry_after_ms=int(1000 * (time_allowed - time_now)),
643 )
644
618645 if self.hs.config.worker_app:
619646 return self._register_client(
620647 user_id=user_id,
626653 create_profile_with_displayname=create_profile_with_displayname,
627654 admin=admin,
628655 user_type=user_type,
656 address=address,
629657 )
630658 else:
631659 return self.store.register(
692720 access_token (str|None): The access token of the newly logged in
693721 device, or None if `inhibit_login` enabled.
694722 bind_email (bool): Whether to bind the email with the identity
695 server
723 server.
696724 bind_msisdn (bool): Whether to bind the msisdn with the identity
697 server
725 server.
698726 """
699727 if self.hs.config.worker_app:
700728 yield self._post_registration_client(
736764 """A user consented to the terms on registration
737765
738766 Args:
739 user_id (str): The user ID that consented
767 user_id (str): The user ID that consented.
740768 consent_version (str): version of the policy the user has
741769 consented to.
742770 """
4343 class RoomListHandler(BaseHandler):
4444 def __init__(self, hs):
4545 super(RoomListHandler, self).__init__(hs)
46 self.enable_room_list_search = hs.config.enable_room_list_search
4647 self.response_cache = ResponseCache(hs, "room_list")
4748 self.remote_response_cache = ResponseCache(hs, "remote_room_list",
4849 timeout_ms=30 * 1000)
6566 appservice and network id to use an appservice specific one.
6667 Setting to None returns all public rooms across all lists.
6768 """
69 if not self.enable_room_list_search:
70 return defer.succeed({
71 "chunk": [],
72 "total_room_count_estimate": 0,
73 })
74
6875 logger.info(
6976 "Getting public room list: limit=%r, since=%r, search=%r, network=%r",
7077 limit, since_token, bool(search_filter), network_tuple,
7178 )
79
7280 if search_filter:
7381 # We explicitly don't bother caching searches or requests for
7482 # appservice specific lists.
440448 def get_remote_public_room_list(self, server_name, limit=None, since_token=None,
441449 search_filter=None, include_all_networks=False,
442450 third_party_instance_id=None,):
451 if not self.enable_room_list_search:
452 defer.returnValue({
453 "chunk": [],
454 "total_room_count_estimate": 0,
455 })
456
443457 if search_filter:
444458 # We currently don't support searching across federation, so we have
445459 # to do it manually without pagination
159159 txn_id=None,
160160 ratelimit=True,
161161 content=None,
162 require_consent=True,
162163 ):
163164 user_id = target.to_string()
164165
184185 token_id=requester.access_token_id,
185186 txn_id=txn_id,
186187 prev_events_and_hashes=prev_events_and_hashes,
188 require_consent=require_consent,
187189 )
188190
189191 # Check if this event matches the previous membership event for the user.
229231 if predecessor:
230232 # It is an upgraded room. Copy over old tags
231233 self.copy_room_tags_and_direct_to_room(
234 predecessor["room_id"], room_id, user_id,
235 )
236 # Move over old push rules
237 self.store.move_push_rules_from_room_to_room_for_user(
232238 predecessor["room_id"], room_id, user_id,
233239 )
234240 elif event.membership == Membership.LEAVE:
300306 third_party_signed=None,
301307 ratelimit=True,
302308 content=None,
309 require_consent=True,
303310 ):
304311 key = (room_id,)
305312
314321 third_party_signed=third_party_signed,
315322 ratelimit=ratelimit,
316323 content=content,
324 require_consent=require_consent,
317325 )
318326
319327 defer.returnValue(result)
330338 third_party_signed=None,
331339 ratelimit=True,
332340 content=None,
341 require_consent=True,
333342 ):
334343 content_specified = bool(content)
335344 if content is None:
511520 ratelimit=ratelimit,
512521 prev_events_and_hashes=prev_events_and_hashes,
513522 content=content,
523 require_consent=require_consent,
514524 )
515525 defer.returnValue(res)
516526
0 # -*- coding: utf-8 -*-
1 # Copyright 2018 New Vector Ltd
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import logging
16
17 from twisted.internet import defer
18
19 logger = logging.getLogger(__name__)
20
21
22 class StateDeltasHandler(object):
23
24 def __init__(self, hs):
25 self.store = hs.get_datastore()
26
27 @defer.inlineCallbacks
28 def _get_key_change(self, prev_event_id, event_id, key_name, public_value):
29 """Given two events check if the `key_name` field in content changed
30 from not matching `public_value` to doing so.
31
32 For example, check if `history_visibility` (`key_name`) changed from
33 `shared` to `world_readable` (`public_value`).
34
35 Returns:
36 None if the field in the events either both match `public_value`
37 or if neither do, i.e. there has been no change.
38 True if it didnt match `public_value` but now does
39 False if it did match `public_value` but now doesn't
40 """
41 prev_event = None
42 event = None
43 if prev_event_id:
44 prev_event = yield self.store.get_event(prev_event_id, allow_none=True)
45
46 if event_id:
47 event = yield self.store.get_event(event_id, allow_none=True)
48
49 if not event and not prev_event:
50 logger.debug("Neither event exists: %r %r", prev_event_id, event_id)
51 defer.returnValue(None)
52
53 prev_value = None
54 value = None
55
56 if prev_event:
57 prev_value = prev_event.content.get(key_name)
58
59 if event:
60 value = event.content.get(key_name)
61
62 logger.debug("prev_value: %r -> value: %r", prev_value, value)
63
64 if value == public_value and prev_value != public_value:
65 defer.returnValue(True)
66 elif value != public_value and prev_value == public_value:
67 defer.returnValue(False)
68 else:
69 defer.returnValue(None)
3737 from synapse.visibility import filter_events_for_client
3838
3939 logger = logging.getLogger(__name__)
40
41 # Debug logger for https://github.com/matrix-org/synapse/issues/4422
42 issue4422_logger = logging.getLogger("synapse.handler.sync.4422_debug")
4043
4144
4245 # Counts the number of times we returned a non-empty sync. `type` is one of
961964
962965 yield self._generate_sync_entry_for_groups(sync_result_builder)
963966
967 # debug for https://github.com/matrix-org/synapse/issues/4422
968 for joined_room in sync_result_builder.joined:
969 room_id = joined_room.room_id
970 if room_id in newly_joined_rooms:
971 issue4422_logger.debug(
972 "Sync result for newly joined room %s: %r",
973 room_id, joined_room,
974 )
975
964976 defer.returnValue(SyncResult(
965977 presence=sync_result_builder.presence,
966978 account_data=sync_result_builder.account_data,
14241436 old_mem_ev = yield self.store.get_event(
14251437 old_mem_ev_id, allow_none=True
14261438 )
1439
1440 # debug for #4422
1441 if has_join:
1442 prev_membership = None
1443 if old_mem_ev:
1444 prev_membership = old_mem_ev.membership
1445 issue4422_logger.debug(
1446 "Previous membership for room %s with join: %s (event %s)",
1447 room_id, prev_membership, old_mem_ev_id,
1448 )
1449
14271450 if not old_mem_ev or old_mem_ev.membership != Membership.JOIN:
14281451 newly_joined_rooms.append(room_id)
14291452
15181541 for room_id in sync_result_builder.joined_room_ids:
15191542 room_entry = room_to_events.get(room_id, None)
15201543
1544 newly_joined = room_id in newly_joined_rooms
15211545 if room_entry:
15221546 events, start_key = room_entry
15231547
15241548 prev_batch_token = now_token.copy_and_replace("room_key", start_key)
15251549
1526 room_entries.append(RoomSyncResultBuilder(
1550 entry = RoomSyncResultBuilder(
15271551 room_id=room_id,
15281552 rtype="joined",
15291553 events=events,
1530 newly_joined=room_id in newly_joined_rooms,
1554 newly_joined=newly_joined,
15311555 full_state=False,
1532 since_token=None if room_id in newly_joined_rooms else since_token,
1556 since_token=None if newly_joined else since_token,
15331557 upto_token=prev_batch_token,
1534 ))
1558 )
15351559 else:
1536 room_entries.append(RoomSyncResultBuilder(
1560 entry = RoomSyncResultBuilder(
15371561 room_id=room_id,
15381562 rtype="joined",
15391563 events=[],
1540 newly_joined=room_id in newly_joined_rooms,
1564 newly_joined=newly_joined,
15411565 full_state=False,
15421566 since_token=since_token,
15431567 upto_token=since_token,
1544 ))
1568 )
1569
1570 if newly_joined:
1571 # debugging for https://github.com/matrix-org/synapse/issues/4422
1572 issue4422_logger.debug(
1573 "RoomSyncResultBuilder events for newly joined room %s: %r",
1574 room_id, entry.events,
1575 )
1576 room_entries.append(entry)
15451577
15461578 defer.returnValue((room_entries, invited, newly_joined_rooms, newly_left_rooms))
15471579
16621694 newly_joined_room=newly_joined,
16631695 )
16641696
1697 if newly_joined:
1698 # debug for https://github.com/matrix-org/synapse/issues/4422
1699 issue4422_logger.debug(
1700 "Timeline events after filtering in newly-joined room %s: %r",
1701 room_id, batch,
1702 )
1703
16651704 # When we join the room (or the client requests full_state), we should
16661705 # send down any existing tags. Usually the user won't have tags in a
16671706 # newly joined room, unless either a) they've joined before or b) the
18931932
18941933
18951934 class SyncResultBuilder(object):
1896 "Used to help build up a new SyncResult for a user"
1935 """Used to help build up a new SyncResult for a user
1936
1937 Attributes:
1938 sync_config (SyncConfig)
1939 full_state (bool)
1940 since_token (StreamToken)
1941 now_token (StreamToken)
1942 joined_room_ids (list[str])
1943
1944 # The following mirror the fields in a sync response
1945 presence (list)
1946 account_data (list)
1947 joined (list[JoinedSyncResult])
1948 invited (list[InvitedSyncResult])
1949 archived (list[ArchivedSyncResult])
1950 device (list)
1951 groups (GroupsSyncResult|None)
1952 to_device (list)
1953 """
18971954 def __init__(self, sync_config, full_state, since_token, now_token,
18981955 joined_room_ids):
18991956 """
19001957 Args:
1901 sync_config(SyncConfig)
1902 full_state(bool): The full_state flag as specified by user
1903 since_token(StreamToken): The token supplied by user, or None.
1904 now_token(StreamToken): The token to sync up to.
1958 sync_config (SyncConfig)
1959 full_state (bool): The full_state flag as specified by user
1960 since_token (StreamToken): The token supplied by user, or None.
1961 now_token (StreamToken): The token to sync up to.
1962 joined_room_ids (list[str]): List of rooms the user is joined to
19051963 """
19061964 self.sync_config = sync_config
19071965 self.full_state = full_state
19291987 Args:
19301988 room_id(str)
19311989 rtype(str): One of `"joined"` or `"archived"`
1932 events(list): List of events to include in the room, (more events
1933 may be added when generating result).
1990 events(list[FrozenEvent]): List of events to include in the room
1991 (more events may be added when generating result).
19341992 newly_joined(bool): If the user has newly joined the room
19351993 full_state(bool): Whether the full state should be sent in result
19361994 since_token(StreamToken): Earliest point to return events from, or None
230230 for domain in set(get_domain_from_id(u) for u in users):
231231 if domain != self.server_name:
232232 logger.debug("sending typing update to %s", domain)
233 self.federation.send_edu(
233 self.federation.build_and_send_edu(
234234 destination=domain,
235235 edu_type="m.typing",
236236 content={
1414
1515 import logging
1616
17 from six import iteritems
17 from six import iteritems, iterkeys
1818
1919 from twisted.internet import defer
2020
2121 import synapse.metrics
2222 from synapse.api.constants import EventTypes, JoinRules, Membership
23 from synapse.handlers.state_deltas import StateDeltasHandler
2324 from synapse.metrics.background_process_metrics import run_as_background_process
2425 from synapse.storage.roommember import ProfileInfo
2526 from synapse.types import get_localpart_from_id
2829 logger = logging.getLogger(__name__)
2930
3031
31 class UserDirectoryHandler(object):
32 class UserDirectoryHandler(StateDeltasHandler):
3233 """Handles querying of and keeping updated the user_directory.
3334
3435 N.B.: ASSUMES IT IS THE ONLY THING THAT MODIFIES THE USER DIRECTORY
3738 world_readable or publically joinable room. We keep a database table up to date
3839 by streaming changes of the current state and recalculating whether users should
3940 be in the directory or not when necessary.
40
41 For each user in the directory we also store a room_id which is public and that the
42 user is joined to. This allows us to ignore history_visibility and join_rules changes
43 for that user in all other public rooms, as we know they'll still be in at least
44 one public room.
4541 """
4642
47 INITIAL_ROOM_SLEEP_MS = 50
48 INITIAL_ROOM_SLEEP_COUNT = 100
49 INITIAL_ROOM_BATCH_SIZE = 100
50 INITIAL_USER_SLEEP_MS = 10
51
5243 def __init__(self, hs):
44 super(UserDirectoryHandler, self).__init__(hs)
45
5346 self.store = hs.get_datastore()
5447 self.state = hs.get_state_handler()
5548 self.server_name = hs.hostname
5851 self.is_mine_id = hs.is_mine_id
5952 self.update_user_directory = hs.config.update_user_directory
6053 self.search_all_users = hs.config.user_directory_search_all_users
61
62 # When start up for the first time we need to populate the user_directory.
63 # This is a set of user_id's we've inserted already
64 self.initially_handled_users = set()
65 self.initially_handled_users_in_public = set()
66
67 self.initially_handled_users_share = set()
68 self.initially_handled_users_share_private_room = set()
69
7054 # The current position in the current_state_delta stream
7155 self.pos = None
7256
129113 # Support users are for diagnostics and should not appear in the user directory.
130114 if not is_support:
131115 yield self.store.update_profile_in_user_dir(
132 user_id, profile.display_name, profile.avatar_url, None
116 user_id, profile.display_name, profile.avatar_url
133117 )
134118
135119 @defer.inlineCallbacks
139123 # FIXME(#3714): We should probably do this in the same worker as all
140124 # the other changes.
141125 yield self.store.remove_from_user_dir(user_id)
142 yield self.store.remove_from_user_in_public_room(user_id)
143126
144127 @defer.inlineCallbacks
145128 def _unsafe_process(self):
147130 if self.pos is None:
148131 self.pos = yield self.store.get_user_directory_stream_pos()
149132
150 # If still None then we need to do the initial fill of directory
133 # If still None then the initial background update hasn't happened yet
151134 if self.pos is None:
152 yield self._do_initial_spam()
153 self.pos = yield self.store.get_user_directory_stream_pos()
135 defer.returnValue(None)
154136
155137 # Loop round handling deltas until we're up to date
156138 while True:
170152 )
171153
172154 yield self.store.update_user_directory_stream_pos(self.pos)
173
174 @defer.inlineCallbacks
175 def _do_initial_spam(self):
176 """Populates the user_directory from the current state of the DB, used
177 when synapse first starts with user_directory support
178 """
179 new_pos = yield self.store.get_max_stream_id_in_current_state_deltas()
180
181 # Delete any existing entries just in case there are any
182 yield self.store.delete_all_from_user_dir()
183
184 # We process by going through each existing room at a time.
185 room_ids = yield self.store.get_all_rooms()
186
187 logger.info("Doing initial update of user directory. %d rooms", len(room_ids))
188 num_processed_rooms = 0
189
190 for room_id in room_ids:
191 logger.info("Handling room %d/%d", num_processed_rooms + 1, len(room_ids))
192 yield self._handle_initial_room(room_id)
193 num_processed_rooms += 1
194 yield self.clock.sleep(self.INITIAL_ROOM_SLEEP_MS / 1000.0)
195
196 logger.info("Processed all rooms.")
197
198 if self.search_all_users:
199 num_processed_users = 0
200 user_ids = yield self.store.get_all_local_users()
201 logger.info(
202 "Doing initial update of user directory. %d users", len(user_ids)
203 )
204 for user_id in user_ids:
205 # We add profiles for all users even if they don't match the
206 # include pattern, just in case we want to change it in future
207 logger.info(
208 "Handling user %d/%d", num_processed_users + 1, len(user_ids)
209 )
210 yield self._handle_local_user(user_id)
211 num_processed_users += 1
212 yield self.clock.sleep(self.INITIAL_USER_SLEEP_MS / 1000.0)
213
214 logger.info("Processed all users")
215
216 self.initially_handled_users = None
217 self.initially_handled_users_in_public = None
218 self.initially_handled_users_share = None
219 self.initially_handled_users_share_private_room = None
220
221 yield self.store.update_user_directory_stream_pos(new_pos)
222
223 @defer.inlineCallbacks
224 def _handle_initial_room(self, room_id):
225 """Called when we initially fill out user_directory one room at a time
226 """
227 is_in_room = yield self.store.is_host_joined(room_id, self.server_name)
228 if not is_in_room:
229 return
230
231 is_public = yield self.store.is_room_world_readable_or_publicly_joinable(
232 room_id
233 )
234
235 users_with_profile = yield self.state.get_current_user_in_room(room_id)
236 user_ids = set(users_with_profile)
237 unhandled_users = user_ids - self.initially_handled_users
238
239 yield self.store.add_profiles_to_user_dir(
240 room_id,
241 {user_id: users_with_profile[user_id] for user_id in unhandled_users},
242 )
243
244 self.initially_handled_users |= unhandled_users
245
246 if is_public:
247 yield self.store.add_users_to_public_room(
248 room_id, user_ids=user_ids - self.initially_handled_users_in_public
249 )
250 self.initially_handled_users_in_public |= user_ids
251
252 # We now go and figure out the new users who share rooms with user entries
253 # We sleep aggressively here as otherwise it can starve resources.
254 # We also batch up inserts/updates, but try to avoid too many at once.
255 to_insert = set()
256 to_update = set()
257 count = 0
258 for user_id in user_ids:
259 if count % self.INITIAL_ROOM_SLEEP_COUNT == 0:
260 yield self.clock.sleep(self.INITIAL_ROOM_SLEEP_MS / 1000.0)
261
262 if not self.is_mine_id(user_id):
263 count += 1
264 continue
265
266 if self.store.get_if_app_services_interested_in_user(user_id):
267 count += 1
268 continue
269
270 for other_user_id in user_ids:
271 if user_id == other_user_id:
272 continue
273
274 if count % self.INITIAL_ROOM_SLEEP_COUNT == 0:
275 yield self.clock.sleep(self.INITIAL_ROOM_SLEEP_MS / 1000.0)
276 count += 1
277
278 user_set = (user_id, other_user_id)
279
280 if user_set in self.initially_handled_users_share_private_room:
281 continue
282
283 if user_set in self.initially_handled_users_share:
284 if is_public:
285 continue
286 to_update.add(user_set)
287 else:
288 to_insert.add(user_set)
289
290 if is_public:
291 self.initially_handled_users_share.add(user_set)
292 else:
293 self.initially_handled_users_share_private_room.add(user_set)
294
295 if len(to_insert) > self.INITIAL_ROOM_BATCH_SIZE:
296 yield self.store.add_users_who_share_room(
297 room_id, not is_public, to_insert
298 )
299 to_insert.clear()
300
301 if len(to_update) > self.INITIAL_ROOM_BATCH_SIZE:
302 yield self.store.update_users_who_share_room(
303 room_id, not is_public, to_update
304 )
305 to_update.clear()
306
307 if to_insert:
308 yield self.store.add_users_who_share_room(room_id, not is_public, to_insert)
309 to_insert.clear()
310
311 if to_update:
312 yield self.store.update_users_who_share_room(
313 room_id, not is_public, to_update
314 )
315 to_update.clear()
316155
317156 @defer.inlineCallbacks
318157 def _handle_deltas(self, deltas):
355194 user_ids = yield self.store.get_users_in_dir_due_to_room(
356195 room_id
357196 )
197
358198 for user_id in user_ids:
359199 yield self._handle_remove_user(room_id, user_id)
360200 return
435275 # ignore the change
436276 return
437277
438 if change:
439 users_with_profile = yield self.state.get_current_user_in_room(room_id)
440 for user_id, profile in iteritems(users_with_profile):
441 yield self._handle_new_user(room_id, user_id, profile)
442 else:
443 users = yield self.store.get_users_in_public_due_to_room(room_id)
444 for user_id in users:
445 yield self._handle_remove_user(room_id, user_id)
278 users_with_profile = yield self.state.get_current_user_in_room(room_id)
279
280 # Remove every user from the sharing tables for that room.
281 for user_id in iterkeys(users_with_profile):
282 yield self.store.remove_user_who_share_room(user_id, room_id)
283
284 # Then, re-add them to the tables.
285 # NOTE: this is not the most efficient method, as handle_new_user sets
286 # up local_user -> other_user and other_user_whos_local -> local_user,
287 # which when ran over an entire room, will result in the same values
288 # being added multiple times. The batching upserts shouldn't make this
289 # too bad, though.
290 for user_id, profile in iteritems(users_with_profile):
291 yield self._handle_new_user(room_id, user_id, profile)
446292
447293 @defer.inlineCallbacks
448294 def _handle_local_user(self, user_id):
456302
457303 row = yield self.store.get_user_in_directory(user_id)
458304 if not row:
459 yield self.store.add_profiles_to_user_dir(None, {user_id: profile})
305 yield self.store.update_profile_in_user_dir(
306 user_id, profile.display_name, profile.avatar_url
307 )
460308
461309 @defer.inlineCallbacks
462310 def _handle_new_user(self, room_id, user_id, profile):
468316 """
469317 logger.debug("Adding new user to dir, %r", user_id)
470318
471 row = yield self.store.get_user_in_directory(user_id)
472 if not row:
473 yield self.store.add_profiles_to_user_dir(room_id, {user_id: profile})
319 yield self.store.update_profile_in_user_dir(
320 user_id, profile.display_name, profile.avatar_url
321 )
474322
475323 is_public = yield self.store.is_room_world_readable_or_publicly_joinable(
476324 room_id
477325 )
326 # Now we update users who share rooms with users.
327 users_with_profile = yield self.state.get_current_user_in_room(room_id)
478328
479329 if is_public:
480 row = yield self.store.get_user_in_public_room(user_id)
481 if not row:
482 yield self.store.add_users_to_public_room(room_id, [user_id])
330 yield self.store.add_users_in_public_rooms(room_id, (user_id,))
483331 else:
484 logger.debug("Not adding new user to public dir, %r", user_id)
485
486 # Now we update users who share rooms with users. We do this by getting
487 # all the current users in the room and seeing which aren't already
488 # marked in the database as sharing with `user_id`
489
490 users_with_profile = yield self.state.get_current_user_in_room(room_id)
491
492 to_insert = set()
493 to_update = set()
494
495 is_appservice = self.store.get_if_app_services_interested_in_user(user_id)
496
497 # First, if they're our user then we need to update for every user
498 if self.is_mine_id(user_id) and not is_appservice:
499 # Returns a map of other_user_id -> shared_private. We only need
500 # to update mappings if for users that either don't share a room
501 # already (aren't in the map) or, if the room is private, those that
502 # only share a public room.
503 user_ids_shared = yield self.store.get_users_who_share_room_from_dir(
504 user_id
505 )
506
332 to_insert = set()
333
334 # First, if they're our user then we need to update for every user
335 if self.is_mine_id(user_id):
336
337 is_appservice = self.store.get_if_app_services_interested_in_user(
338 user_id
339 )
340
341 # We don't care about appservice users.
342 if not is_appservice:
343 for other_user_id in users_with_profile:
344 if user_id == other_user_id:
345 continue
346
347 to_insert.add((user_id, other_user_id))
348
349 # Next we need to update for every local user in the room
507350 for other_user_id in users_with_profile:
508351 if user_id == other_user_id:
509352 continue
510353
511 shared_is_private = user_ids_shared.get(other_user_id)
512 if shared_is_private is True:
513 # We've already marked in the database they share a private room
514 continue
515 elif shared_is_private is False:
516 # They already share a public room, so only update if this is
517 # a private room
518 if not is_public:
519 to_update.add((user_id, other_user_id))
520 elif shared_is_private is None:
521 # This is the first time they both share a room
522 to_insert.add((user_id, other_user_id))
523
524 # Next we need to update for every local user in the room
525 for other_user_id in users_with_profile:
526 if user_id == other_user_id:
527 continue
528
529 is_appservice = self.store.get_if_app_services_interested_in_user(
530 other_user_id
531 )
532 if self.is_mine_id(other_user_id) and not is_appservice:
533 shared_is_private = yield self.store.get_if_users_share_a_room(
534 other_user_id, user_id
354 is_appservice = self.store.get_if_app_services_interested_in_user(
355 other_user_id
535356 )
536 if shared_is_private is True:
537 # We've already marked in the database they share a private room
538 continue
539 elif shared_is_private is False:
540 # They already share a public room, so only update if this is
541 # a private room
542 if not is_public:
543 to_update.add((other_user_id, user_id))
544 elif shared_is_private is None:
545 # This is the first time they both share a room
357 if self.is_mine_id(other_user_id) and not is_appservice:
546358 to_insert.add((other_user_id, user_id))
547359
548 if to_insert:
549 yield self.store.add_users_who_share_room(room_id, not is_public, to_insert)
550
551 if to_update:
552 yield self.store.update_users_who_share_room(
553 room_id, not is_public, to_update
554 )
360 if to_insert:
361 yield self.store.add_users_who_share_private_room(room_id, to_insert)
555362
556363 @defer.inlineCallbacks
557364 def _handle_remove_user(self, room_id, user_id):
558 """Called when we might need to remove user to directory
365 """Called when we might need to remove user from directory
559366
560367 Args:
561368 room_id (str): room_id that user left or stopped being public that
562369 user_id (str)
563370 """
564 logger.debug("Maybe removing user %r", user_id)
565
566 row = yield self.store.get_user_in_directory(user_id)
567 update_user_dir = row and row["room_id"] == room_id
568
569 row = yield self.store.get_user_in_public_room(user_id)
570 update_user_in_public = row and row["room_id"] == room_id
571
572 if update_user_in_public or update_user_dir:
573 # XXX: Make this faster?
574 rooms = yield self.store.get_rooms_for_user(user_id)
575 for j_room_id in rooms:
576 if not update_user_in_public and not update_user_dir:
577 break
578
579 is_in_room = yield self.store.is_host_joined(
580 j_room_id, self.server_name
581 )
582
583 if not is_in_room:
584 continue
585
586 if update_user_dir:
587 update_user_dir = False
588 yield self.store.update_user_in_user_dir(user_id, j_room_id)
589
590 is_public = yield self.store.is_room_world_readable_or_publicly_joinable(
591 j_room_id
592 )
593
594 if update_user_in_public and is_public:
595 yield self.store.update_user_in_public_user_list(user_id, j_room_id)
596 update_user_in_public = False
597
598 if update_user_dir:
371 logger.debug("Removing user %r", user_id)
372
373 # Remove user from sharing tables
374 yield self.store.remove_user_who_share_room(user_id, room_id)
375
376 # Are they still in any rooms? If not, remove them entirely.
377 rooms_user_is_in = yield self.store.get_user_dir_rooms_user_is_in(user_id)
378
379 if len(rooms_user_is_in) == 0:
599380 yield self.store.remove_from_user_dir(user_id)
600 elif update_user_in_public:
601 yield self.store.remove_from_user_in_public_room(user_id)
602
603 # Now handle users_who_share_rooms.
604
605 # Get a list of user tuples that were in the DB due to this room and
606 # users (this includes tuples where the other user matches `user_id`)
607 user_tuples = yield self.store.get_users_in_share_dir_with_room_id(
608 user_id, room_id
609 )
610
611 for user_id, other_user_id in user_tuples:
612 # For each user tuple get a list of rooms that they still share,
613 # trying to find a private room, and update the entry in the DB
614 rooms = yield self.store.get_rooms_in_common_for_users(
615 user_id, other_user_id
616 )
617
618 # If they dont share a room anymore, remove the mapping
619 if not rooms:
620 yield self.store.remove_user_who_share_room(user_id, other_user_id)
621 continue
622
623 found_public_share = None
624 for j_room_id in rooms:
625 is_public = yield self.store.is_room_world_readable_or_publicly_joinable(
626 j_room_id
627 )
628
629 if is_public:
630 found_public_share = j_room_id
631 else:
632 found_public_share = None
633 yield self.store.update_users_who_share_room(
634 room_id, not is_public, [(user_id, other_user_id)]
635 )
636 break
637
638 if found_public_share:
639 yield self.store.update_users_who_share_room(
640 room_id, not is_public, [(user_id, other_user_id)]
641 )
642381
643382 @defer.inlineCallbacks
644383 def _handle_profile_change(self, user_id, room_id, prev_event_id, event_id):
664403 new_avatar = event.content.get("avatar_url")
665404
666405 if prev_name != new_name or prev_avatar != new_avatar:
667 yield self.store.update_profile_in_user_dir(
668 user_id, new_name, new_avatar, room_id
669 )
670
671 @defer.inlineCallbacks
672 def _get_key_change(self, prev_event_id, event_id, key_name, public_value):
673 """Given two events check if the `key_name` field in content changed
674 from not matching `public_value` to doing so.
675
676 For example, check if `history_visibility` (`key_name`) changed from
677 `shared` to `world_readable` (`public_value`).
678
679 Returns:
680 None if the field in the events either both match `public_value`
681 or if neither do, i.e. there has been no change.
682 True if it didnt match `public_value` but now does
683 False if it did match `public_value` but now doesn't
684 """
685 prev_event = None
686 event = None
687 if prev_event_id:
688 prev_event = yield self.store.get_event(prev_event_id, allow_none=True)
689
690 if event_id:
691 event = yield self.store.get_event(event_id, allow_none=True)
692
693 if not event and not prev_event:
694 logger.debug("Neither event exists: %r %r", prev_event_id, event_id)
695 defer.returnValue(None)
696
697 prev_value = None
698 value = None
699
700 if prev_event:
701 prev_value = prev_event.content.get(key_name)
702
703 if event:
704 value = event.content.get(key_name)
705
706 logger.debug("prev_value: %r -> value: %r", prev_value, value)
707
708 if value == public_value and prev_value != public_value:
709 defer.returnValue(True)
710 elif value != public_value and prev_value == public_value:
711 defer.returnValue(False)
712 else:
713 defer.returnValue(None)
406 yield self.store.update_profile_in_user_dir(user_id, new_name, new_avatar)
6767 TLS policy to use for fetching .well-known files. None to use a default
6868 (browser-like) implementation.
6969
70 srv_resolver (SrvResolver|None):
70 _srv_resolver (SrvResolver|None):
7171 SRVResolver impl to use for looking up SRV records. None to use a default
72 implementation.
73
74 _well_known_cache (TTLCache|None):
75 TTLCache impl for storing cached well-known lookups. None to use a default
7276 implementation.
7377 """
7478
188188 self._cooperator = Cooperator(scheduler=schedule)
189189
190190 @defer.inlineCallbacks
191 def _send_request_with_optional_trailing_slash(
192 self,
193 request,
194 try_trailing_slash_on_400=False,
195 **send_request_args
196 ):
197 """Wrapper for _send_request which can optionally retry the request
198 upon receiving a combination of a 400 HTTP response code and a
199 'M_UNRECOGNIZED' errcode. This is a workaround for Synapse <= v0.99.3
200 due to #3622.
201
202 Args:
203 request (MatrixFederationRequest): details of request to be sent
204 try_trailing_slash_on_400 (bool): Whether on receiving a 400
205 'M_UNRECOGNIZED' from the server to retry the request with a
206 trailing slash appended to the request path.
207 send_request_args (Dict): A dictionary of arguments to pass to
208 `_send_request()`.
209
210 Raises:
211 HttpResponseException: If we get an HTTP response code >= 300
212 (except 429).
213
214 Returns:
215 Deferred[Dict]: Parsed JSON response body.
216 """
217 try:
218 response = yield self._send_request(
219 request, **send_request_args
220 )
221 except HttpResponseException as e:
222 # Received an HTTP error > 300. Check if it meets the requirements
223 # to retry with a trailing slash
224 if not try_trailing_slash_on_400:
225 raise
226
227 if e.code != 400 or e.to_synapse_error().errcode != "M_UNRECOGNIZED":
228 raise
229
230 # Retry with a trailing slash if we received a 400 with
231 # 'M_UNRECOGNIZED' which some endpoints can return when omitting a
232 # trailing slash on Synapse <= v0.99.3.
233 logger.info("Retrying request with trailing slash")
234 request.path += "/"
235
236 response = yield self._send_request(
237 request, **send_request_args
238 )
239
240 defer.returnValue(response)
241
242 @defer.inlineCallbacks
191243 def _send_request(
192244 self,
193245 request,
195247 timeout=None,
196248 long_retries=False,
197249 ignore_backoff=False,
198 backoff_on_404=False
250 backoff_on_404=False,
199251 ):
200252 """
201253 Sends a request to the given server.
472524 json_data_callback=None,
473525 long_retries=False, timeout=None,
474526 ignore_backoff=False,
475 backoff_on_404=False):
527 backoff_on_404=False,
528 try_trailing_slash_on_400=False):
476529 """ Sends the specifed json data using PUT
477530
478531 Args:
492545 and try the request anyway.
493546 backoff_on_404 (bool): True if we should count a 404 response as
494547 a failure of the server (and should therefore back off future
495 requests)
548 requests).
549 try_trailing_slash_on_400 (bool): True if on a 400 M_UNRECOGNIZED
550 response we should try appending a trailing slash to the end
551 of the request. Workaround for #3622 in Synapse <= v0.99.3. This
552 will be attempted before backing off if backing off has been
553 enabled.
496554
497555 Returns:
498556 Deferred[dict|list]: Succeeds when we get a 2xx HTTP response. The
508566 RequestSendFailed: If there were problems connecting to the
509567 remote, due to e.g. DNS failures, connection timeouts etc.
510568 """
511
512569 request = MatrixFederationRequest(
513570 method="PUT",
514571 destination=destination,
518575 json=data,
519576 )
520577
521 response = yield self._send_request(
578 response = yield self._send_request_with_optional_trailing_slash(
522579 request,
580 try_trailing_slash_on_400,
581 backoff_on_404=backoff_on_404,
582 ignore_backoff=ignore_backoff,
523583 long_retries=long_retries,
524584 timeout=timeout,
525 ignore_backoff=ignore_backoff,
526 backoff_on_404=backoff_on_404,
527585 )
528586
529587 body = yield _handle_json_response(
530588 self.hs.get_reactor(), self.default_timeout, request, response,
531589 )
590
532591 defer.returnValue(body)
533592
534593 @defer.inlineCallbacks
591650
592651 @defer.inlineCallbacks
593652 def get_json(self, destination, path, args=None, retry_on_dns_fail=True,
594 timeout=None, ignore_backoff=False):
653 timeout=None, ignore_backoff=False,
654 try_trailing_slash_on_400=False):
595655 """ GETs some json from the given host homeserver and path
596656
597657 Args:
605665 be retried.
606666 ignore_backoff (bool): true to ignore the historical backoff data
607667 and try the request anyway.
668 try_trailing_slash_on_400 (bool): True if on a 400 M_UNRECOGNIZED
669 response we should try appending a trailing slash to the end of
670 the request. Workaround for #3622 in Synapse <= v0.99.3.
608671 Returns:
609672 Deferred[dict|list]: Succeeds when we get a 2xx HTTP response. The
610673 result will be the decoded JSON body.
630693 query=args,
631694 )
632695
633 response = yield self._send_request(
696 response = yield self._send_request_with_optional_trailing_slash(
634697 request,
698 try_trailing_slash_on_400,
699 backoff_on_404=False,
700 ignore_backoff=ignore_backoff,
635701 retry_on_dns_fail=retry_on_dns_fail,
636702 timeout=timeout,
637 ignore_backoff=ignore_backoff,
638703 )
639704
640705 body = yield _handle_json_response(
641706 self.hs.get_reactor(), self.default_timeout, request, response,
642707 )
708
643709 defer.returnValue(body)
644710
645711 @defer.inlineCallbacks
7272 """
7373 return self._auth_handler.check_user_exists(user_id)
7474
75 def register(self, localpart):
76 """Registers a new user with given localpart
75 @defer.inlineCallbacks
76 def register(self, localpart, displayname=None):
77 """Registers a new user with given localpart and optional
78 displayname.
79
80 Args:
81 localpart (str): The localpart of the new user.
82 displayname (str|None): The displayname of the new user. If None,
83 the user's displayname will default to `localpart`.
7784
7885 Returns:
7986 Deferred: a 2-tuple of (user_id, access_token)
8087 """
88 # Register the user
8189 reg = self.hs.get_registration_handler()
82 return reg.register(localpart=localpart)
90 user_id, access_token = yield reg.register(
91 localpart=localpart, default_display_name=displayname,
92 )
93
94 defer.returnValue((user_id, access_token))
8395
8496 @defer.inlineCallbacks
8597 def invalidate_access_token(self, access_token):
177177 self.remove_expired_streams, self.UNUSED_STREAM_EXPIRY_MS
178178 )
179179
180 self.replication_deferred = ObservableDeferred(defer.Deferred())
181
182180 # This is not a very cheap test to perform, but it's only executed
183181 # when rendering the metrics page, which is likely once per minute at
184182 # most when scraping it.
204202
205203 def add_replication_callback(self, cb):
206204 """Add a callback that will be called when some new data is available.
207 Callback is not given any arguments.
205 Callback is not given any arguments. It should *not* return a Deferred - if
206 it needs to do any asynchronous work, a background thread should be started and
207 wrapped with run_as_background_process.
208208 """
209209 self.replication_callbacks.append(cb)
210210
516516
517517 def notify_replication(self):
518518 """Notify the any replication listeners that there's a new event"""
519 with PreserveLoggingContext():
520 deferred = self.replication_deferred
521 self.replication_deferred = ObservableDeferred(defer.Deferred())
522 deferred.callback(None)
523
524 # the callbacks may well outlast the current request, so we run
525 # them in the sentinel logcontext.
526 #
527 # (ideally it would be up to the callbacks to know if they were
528 # starting off background processes and drop the logcontext
529 # accordingly, but that requires more changes)
530 for cb in self.replication_callbacks:
531 cb()
532
533 @defer.inlineCallbacks
534 def wait_for_replication(self, callback, timeout):
535 """Wait for an event to happen.
536
537 Args:
538 callback: Gets called whenever an event happens. If this returns a
539 truthy value then ``wait_for_replication`` returns, otherwise
540 it waits for another event.
541 timeout: How many milliseconds to wait for callback return a truthy
542 value.
543
544 Returns:
545 A deferred that resolves with the value returned by the callback.
546 """
547 listener = _NotificationListener(None)
548
549 end_time = self.clock.time_msec() + timeout
550
551 while True:
552 listener.deferred = self.replication_deferred.observe()
553 result = yield callback()
554 if result:
555 break
556
557 now = self.clock.time_msec()
558 if end_time <= now:
559 break
560
561 listener.deferred = timeout_deferred(
562 listener.deferred,
563 timeout=(end_time - now) / 1000.,
564 reactor=self.hs.get_reactor(),
565 )
566
567 try:
568 with PreserveLoggingContext():
569 yield listener.deferred
570 except defer.TimeoutError:
571 break
572 except defer.CancelledError:
573 break
574
575 defer.returnValue(result)
519 for cb in self.replication_callbacks:
520 cb()
6868 "attrs>=17.4.0",
6969
7070 "netaddr>=0.7.18",
71
72 # requests is a transitive dep of treq, and urlib3 is a transitive dep
73 # of requests, as well as of sentry-sdk.
74 #
75 # As of requests 2.21, requests does not yet support urllib3 1.25.
76 # (If we do not pin it here, pip will give us the latest urllib3
77 # due to the dep via sentry-sdk.)
78 "urllib3<1.25",
7179 ]
7280
7381 CONDITIONAL_REQUIREMENTS = {
3232 def __init__(self, hs):
3333 super(ReplicationRegisterServlet, self).__init__(hs)
3434 self.store = hs.get_datastore()
35 self.registration_handler = hs.get_registration_handler()
3536
3637 @staticmethod
3738 def _serialize_payload(
3839 user_id, token, password_hash, was_guest, make_guest, appservice_id,
39 create_profile_with_displayname, admin, user_type,
40 create_profile_with_displayname, admin, user_type, address,
4041 ):
4142 """
4243 Args:
5556 admin (boolean): is an admin user?
5657 user_type (str|None): type of user. One of the values from
5758 api.constants.UserTypes, or None for a normal user.
59 address (str|None): the IP address used to perform the regitration.
5860 """
5961 return {
6062 "token": token,
6567 "create_profile_with_displayname": create_profile_with_displayname,
6668 "admin": admin,
6769 "user_type": user_type,
70 "address": address,
6871 }
6972
7073 @defer.inlineCallbacks
7174 def _handle_request(self, request, user_id):
7275 content = parse_json_object_from_request(request)
7376
74 yield self.store.register(
77 yield self.registration_handler.register_with_store(
7578 user_id=user_id,
7679 token=content["token"],
7780 password_hash=content["password_hash"],
8184 create_profile_with_displayname=content["create_profile_with_displayname"],
8285 admin=content["admin"],
8386 user_type=content["user_type"],
87 address=content["address"]
8488 )
8589
8690 defer.returnValue((200, {}))
4242 if last_seen is not None and (now - last_seen) < LAST_SEEN_GRANULARITY:
4343 return
4444
45 self.client_ip_last_seen.prefill(key, now)
46
4547 self.hs.get_tcp_replication().send_user_ip(
4648 user_id, access_token, ip, user_agent, device_id, now
4749 )
1212 # See the License for the specific language governing permissions and
1313 # limitations under the License.
1414
15 from synapse.storage import DataStore
15 from synapse.replication.slave.storage._base import BaseSlavedStore
16 from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker
17 from synapse.storage.deviceinbox import DeviceInboxWorkerStore
1618 from synapse.util.caches.expiringcache import ExpiringCache
1719 from synapse.util.caches.stream_change_cache import StreamChangeCache
1820
19 from ._base import BaseSlavedStore, __func__
20 from ._slaved_id_tracker import SlavedIdTracker
2121
22
23 class SlavedDeviceInboxStore(BaseSlavedStore):
22 class SlavedDeviceInboxStore(DeviceInboxWorkerStore, BaseSlavedStore):
2423 def __init__(self, db_conn, hs):
2524 super(SlavedDeviceInboxStore, self).__init__(db_conn, hs)
2625 self._device_inbox_id_gen = SlavedIdTracker(
4241 expiry_ms=30 * 60 * 1000,
4342 )
4443
45 get_to_device_stream_token = __func__(DataStore.get_to_device_stream_token)
46 get_new_messages_for_device = __func__(DataStore.get_new_messages_for_device)
47 get_new_device_msgs_for_remote = __func__(DataStore.get_new_device_msgs_for_remote)
48 delete_messages_for_device = __func__(DataStore.delete_messages_for_device)
49 delete_device_msgs_for_remote = __func__(DataStore.delete_device_msgs_for_remote)
50
5144 def stream_positions(self):
5245 result = super(SlavedDeviceInboxStore, self).stream_positions()
5346 result["to_device"] = self._device_inbox_id_gen.get_current_token()
1212 # See the License for the specific language governing permissions and
1313 # limitations under the License.
1414
15 from synapse.storage import DataStore
16 from synapse.storage.end_to_end_keys import EndToEndKeyStore
15 from synapse.replication.slave.storage._base import BaseSlavedStore
16 from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker
17 from synapse.storage.devices import DeviceWorkerStore
18 from synapse.storage.end_to_end_keys import EndToEndKeyWorkerStore
1719 from synapse.util.caches.stream_change_cache import StreamChangeCache
1820
19 from ._base import BaseSlavedStore, __func__
20 from ._slaved_id_tracker import SlavedIdTracker
2121
22
23 class SlavedDeviceStore(BaseSlavedStore):
22 class SlavedDeviceStore(EndToEndKeyWorkerStore, DeviceWorkerStore, BaseSlavedStore):
2423 def __init__(self, db_conn, hs):
2524 super(SlavedDeviceStore, self).__init__(db_conn, hs)
2625
3736 "DeviceListFederationStreamChangeCache", device_list_max,
3837 )
3938
40 get_device_stream_token = __func__(DataStore.get_device_stream_token)
41 get_user_whose_devices_changed = __func__(DataStore.get_user_whose_devices_changed)
42 get_devices_by_remote = __func__(DataStore.get_devices_by_remote)
43 _get_devices_by_remote_txn = __func__(DataStore._get_devices_by_remote_txn)
44 _get_e2e_device_keys_txn = __func__(DataStore._get_e2e_device_keys_txn)
45 mark_as_sent_devices_by_remote = __func__(DataStore.mark_as_sent_devices_by_remote)
46 _mark_as_sent_devices_by_remote_txn = (
47 __func__(DataStore._mark_as_sent_devices_by_remote_txn)
48 )
49 count_e2e_one_time_keys = EndToEndKeyStore.__dict__["count_e2e_one_time_keys"]
50
5139 def stream_positions(self):
5240 result = super(SlavedDeviceStore, self).stream_positions()
5341 result["device_lists"] = self._device_list_id_gen.get_current_token()
5745 if stream_name == "device_lists":
5846 self._device_list_id_gen.advance(token)
5947 for row in rows:
60 self._device_list_stream_cache.entity_has_changed(
61 row.user_id, token
48 self._invalidate_caches_for_devices(
49 token, row.user_id, row.destination,
6250 )
63
64 if row.destination:
65 self._device_list_federation_stream_cache.entity_has_changed(
66 row.destination, token
67 )
6851 return super(SlavedDeviceStore, self).process_replication_rows(
6952 stream_name, token, rows
7053 )
54
55 def _invalidate_caches_for_devices(self, token, user_id, destination):
56 self._device_list_stream_cache.entity_has_changed(
57 user_id, token
58 )
59
60 if destination:
61 self._device_list_federation_stream_cache.entity_has_changed(
62 destination, token
63 )
64
65 self._get_cached_devices_for_user.invalidate((user_id,))
66 self._get_cached_user_device.invalidate_many((user_id,))
67 self.get_device_list_last_stream_id_for_remote.invalidate((user_id,))
5353
5454 def stream_positions(self):
5555 result = super(SlavedPresenceStore, self).stream_positions()
56 position = self._presence_id_gen.get_current_token()
57 result["presence"] = position
56
57 if self.hs.config.use_presence:
58 position = self._presence_id_gen.get_current_token()
59 result["presence"] = position
60
5861 return result
5962
6063 def process_replication_rows(self, stream_name, token, rows):
1919 from .events import SlavedEventStore
2020
2121
22 class SlavedPushRuleStore(PushRulesWorkerStore, SlavedEventStore):
22 class SlavedPushRuleStore(SlavedEventStore, PushRulesWorkerStore):
2323 def __init__(self, db_conn, hs):
2424 self._push_rules_stream_id_gen = SlavedIdTracker(
2525 db_conn, "push_rules_stream", "stream_id",
3838 Accepts a handler that will be called when new data is available or data
3939 is required.
4040 """
41 maxDelay = 5 # Try at least once every N seconds
41 maxDelay = 30 # Try at least once every N seconds
4242
4343 def __init__(self, hs, client_name, handler):
4444 self.client_name = client_name
5353
5454 def buildProtocol(self, addr):
5555 logger.info("Connected to replication: %r", addr)
56 self.resetDelay()
5756 return ClientReplicationStreamProtocol(
5857 self.client_name, self.server_name, self._clock, self.handler
5958 )
8988 # Used for tests.
9089 self.awaiting_syncs = {}
9190
91 # The factory used to create connections.
92 self.factory = None
93
9294 def start_replication(self, hs):
9395 """Helper method to start a replication connection to the remote server
9496 using TCP.
9597 """
9698 client_name = hs.config.worker_name
97 factory = ReplicationClientFactory(hs, client_name, self)
99 self.factory = ReplicationClientFactory(hs, client_name, self)
98100 host = hs.config.worker_replication_host
99101 port = hs.config.worker_replication_port
100 hs.get_reactor().connectTCP(host, port, factory)
102 hs.get_reactor().connectTCP(host, port, self.factory)
101103
102104 def on_rdata(self, stream_name, token, rows):
103105 """Called when we get new replication data. By default this just pokes
139141 args["account_data"] = user_account_data
140142 elif room_account_data:
141143 args["account_data"] = room_account_data
144
142145 return args
143146
144147 def get_currently_syncing_users(self):
203206 for cmd in self.pending_commands:
204207 connection.send_command(cmd)
205208 self.pending_commands = []
209
210 def finished_connecting(self):
211 """Called when we have successfully subscribed and caught up to all
212 streams we're interested in.
213 """
214 logger.info("Finished connecting to server")
215
216 # We don't reset the delay any earlier as otherwise if there is a
217 # problem during start up we'll end up tight looping connecting to the
218 # server.
219 self.factory.resetDelay()
126126
127127
128128 class PositionCommand(Command):
129 """Sent by the client to tell the client the stream postition without
129 """Sent by the server to tell the client the stream postition without
130130 needing to send an RDATA.
131
132 Sent to the client after all missing updates for a stream have been sent
133 to the client and they're now up to date.
131134 """
132135 NAME = "POSITION"
133136
222222 return
223223
224224 # Now lets try and call on_<CMD_NAME> function
225 try:
226 run_as_background_process(
227 "replication-" + cmd.get_logcontext_id(),
228 getattr(self, "on_%s" % (cmd_name,)),
229 cmd,
230 )
231 except Exception:
232 logger.exception("[%s] Failed to handle line: %r", self.id(), line)
225 run_as_background_process(
226 "replication-" + cmd.get_logcontext_id(),
227 self.handle_command,
228 cmd,
229 )
230
231 def handle_command(self, cmd):
232 """Handle a command we have received over the replication stream.
233
234 By default delegates to on_<COMMAND>
235
236 Args:
237 cmd (synapse.replication.tcp.commands.Command): received command
238
239 Returns:
240 Deferred
241 """
242 handler = getattr(self, "on_%s" % (cmd.NAME,))
243 return handler(cmd)
233244
234245 def close(self):
235246 logger.warn("[%s] Closing connection", self.id())
363374 self.transport.unregisterProducer()
364375
365376 def __str__(self):
377 addr = None
378 if self.transport:
379 addr = str(self.transport.getPeer())
366380 return "ReplicationConnection<name=%s,conn_id=%s,addr=%s>" % (
367 self.name, self.conn_id, self.addr,
381 self.name, self.conn_id, addr,
368382 )
369383
370384 def id(self):
380394 VALID_INBOUND_COMMANDS = VALID_CLIENT_COMMANDS
381395 VALID_OUTBOUND_COMMANDS = VALID_SERVER_COMMANDS
382396
383 def __init__(self, server_name, clock, streamer, addr):
397 def __init__(self, server_name, clock, streamer):
384398 BaseReplicationStreamProtocol.__init__(self, clock) # Old style class
385399
386400 self.server_name = server_name
387401 self.streamer = streamer
388 self.addr = addr
389402
390403 # The streams the client has subscribed to and is up to date with
391404 self.replication_streams = set()
450463
451464 @defer.inlineCallbacks
452465 def subscribe_to_stream(self, stream_name, token):
453 """Subscribe the remote to a streams.
466 """Subscribe the remote to a stream.
454467
455468 This invloves checking if they've missed anything and sending those
456469 updates down if they have. During that time new updates for the stream
477490
478491 # Now we can send any updates that came in while we were subscribing
479492 pending_rdata = self.pending_rdata.pop(stream_name, [])
493 updates = []
480494 for token, update in pending_rdata:
481 # Only send updates newer than the current token
482 if token > current_token:
495 # If the token is null, it is part of a batch update. Batches
496 # are multiple updates that share a single token. To denote
497 # this, the token is set to None for all tokens in the batch
498 # except for the last. If we find a None token, we keep looking
499 # through tokens until we find one that is not None and then
500 # process all previous updates in the batch as if they had the
501 # final token.
502 if token is None:
503 # Store this update as part of a batch
504 updates.append(update)
505 continue
506
507 if token <= current_token:
508 # This update or batch of updates is older than
509 # current_token, dismiss it
510 updates = []
511 continue
512
513 updates.append(update)
514
515 # Send all updates that are part of this batch with the
516 # found token
517 for update in updates:
483518 self.send_command(RdataCommand(stream_name, token, update))
519
520 # Clear stored updates
521 updates = []
484522
485523 # They're now fully subscribed
486524 self.replication_streams.add(stream_name)
525563 self.server_name = server_name
526564 self.handler = handler
527565
566 # Set of stream names that have been subscribe to, but haven't yet
567 # caught up with. This is used to track when the client has been fully
568 # connected to the remote.
569 self.streams_connecting = set()
570
528571 # Map of stream to batched updates. See RdataCommand for info on how
529572 # batching works.
530573 self.pending_batches = {}
546589
547590 # We've now finished connecting to so inform the client handler
548591 self.handler.update_connection(self)
592
593 # This will happen if we don't actually subscribe to any streams
594 if not self.streams_connecting:
595 self.handler.finished_connecting()
549596
550597 def on_SERVER(self, cmd):
551598 if cmd.data != self.server_name:
576623 return self.handler.on_rdata(stream_name, cmd.token, rows)
577624
578625 def on_POSITION(self, cmd):
626 # When we get a `POSITION` command it means we've finished getting
627 # missing updates for the given stream, and are now up to date.
628 self.streams_connecting.discard(cmd.stream_name)
629 if not self.streams_connecting:
630 self.handler.finished_connecting()
631
579632 return self.handler.on_position(cmd.stream_name, cmd.token)
580633
581634 def on_SYNC(self, cmd):
591644 "[%s] Subscribing to replication stream: %r from %r",
592645 self.id(), stream_name, token
593646 )
647
648 self.streams_connecting.add(stream_name)
594649
595650 self.send_command(ReplicateCommand(stream_name, token))
596651
1515 """
1616
1717 import logging
18 import random
1819
1920 from six import itervalues
2021
5556 self.server_name,
5657 self.clock,
5758 self.streamer,
58 addr
5959 )
6060
6161
7272 self.clock = hs.get_clock()
7373 self.notifier = hs.get_notifier()
7474 self._server_notices_sender = hs.get_server_notices_sender()
75
76 self._replication_torture_level = hs.config.replication_torture_level
7577
7678 # Current connections.
7779 self.connections = []
156158 for stream in self.streams:
157159 stream.advance_current_token()
158160
159 for stream in self.streams:
161 all_streams = self.streams
162
163 if self._replication_torture_level is not None:
164 # there is no guarantee about ordering between the streams,
165 # so let's shuffle them around a bit when we are in torture mode.
166 all_streams = list(all_streams)
167 random.shuffle(all_streams)
168
169 for stream in all_streams:
160170 if stream.last_token == stream.upto_token:
161171 continue
172
173 if self._replication_torture_level:
174 yield self.clock.sleep(
175 self._replication_torture_level / 1000.0
176 )
162177
163178 logger.debug(
164179 "Getting stream: %s: %s -> %s",
2222 current_token: The function that returns the current token for the stream
2323 update_function: The function that returns a list of updates between two tokens
2424 """
25
25 import itertools
2626 import logging
2727 from collections import namedtuple
2828
194194 limit=MAX_EVENTS_BEHIND + 1,
195195 )
196196
197 if len(rows) >= MAX_EVENTS_BEHIND:
198 raise Exception("stream %s has fallen behind" % (self.NAME))
197 # never turn more than MAX_EVENTS_BEHIND + 1 into updates.
198 rows = itertools.islice(rows, MAX_EVENTS_BEHIND + 1)
199199 else:
200200 rows = yield self.update_function(
201201 from_token, current_token,
202202 )
203203
204204 updates = [(row[0], self.ROW_TYPE(*row[1:])) for row in rows]
205
206 # check we didn't get more rows than the limit.
207 # doing it like this allows the update_function to be a generator.
208 if self._LIMITED and len(updates) >= MAX_EVENTS_BEHIND:
209 raise Exception("stream %s has fallen behind" % (self.NAME))
205210
206211 defer.returnValue((updates, current_token))
207212
55 <img alt="" class="sender_avatar" src="{{ message.sender_avatar_url|mxc_to_http(32,32) }}" />
66 {% else %}
77 {% if message.sender_hash % 3 == 0 %}
8 <img class="sender_avatar" src="https://vector.im/beta/img/76cfa6.png" />
8 <img class="sender_avatar" src="https://riot.im/img/external/avatar-1.png" />
99 {% elif message.sender_hash % 3 == 1 %}
10 <img class="sender_avatar" src="https://vector.im/beta/img/50e2c2.png" />
10 <img class="sender_avatar" src="https://riot.im/img/external/avatar-2.png" />
1111 {% else %}
12 <img class="sender_avatar" src="https://vector.im/beta/img/f4c371.png" />
12 <img class="sender_avatar" src="https://riot.im/img/external/avatar-3.png" />
1313 {% endif %}
1414 {% endif %}
1515 {% endif %}
1818 </td>
1919 <td class="logo">
2020 {% if app_name == "Riot" %}
21 <img src="http://matrix.org/img/riot-logo-email.png" width="83" height="83" alt="[Riot]"/>
21 <img src="http://riot.im/img/external/riot-logo-email.png" width="83" height="83" alt="[Riot]"/>
2222 {% elif app_name == "Vector" %}
2323 <img src="http://matrix.org/img/vector-logo-email.png" width="64" height="83" alt="[Vector]"/>
2424 {% else %}
44 <img alt="" src="{{ room.avatar_url|mxc_to_http(48,48) }}" />
55 {% else %}
66 {% if room.hash % 3 == 0 %}
7 <img alt="" src="https://vector.im/beta/img/76cfa6.png" />
7 <img alt="" src="https://riot.im/img/external/avatar-1.png" />
88 {% elif room.hash % 3 == 1 %}
9 <img alt="" src="https://vector.im/beta/img/50e2c2.png" />
9 <img alt="" src="https://riot.im/img/external/avatar-2.png" />
1010 {% else %}
11 <img alt="" src="https://vector.im/beta/img/f4c371.png" />
11 <img alt="" src="https://riot.im/img/external/avatar-3.png" />
1212 {% endif %}
1313 {% endif %}
1414 </td>
1616 import hashlib
1717 import hmac
1818 import logging
19 import platform
1920
2021 from six import text_type
2122 from six.moves import http_client
2223
2324 from twisted.internet import defer
2425
26 import synapse
2527 from synapse.api.constants import Membership, UserTypes
2628 from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError
2729 from synapse.http.servlet import (
3133 parse_string,
3234 )
3335 from synapse.types import UserID, create_requester
36 from synapse.util.versionstring import get_version_string
3437
3538 from .base import ClientV1RestServlet, client_path_patterns
3639
6164 raise SynapseError(400, "Can only users a local user")
6265
6366 ret = yield self.handlers.admin_handler.get_users()
67
68 defer.returnValue((200, ret))
69
70
71 class VersionServlet(ClientV1RestServlet):
72 PATTERNS = client_path_patterns("/admin/server_version")
73
74 @defer.inlineCallbacks
75 def on_GET(self, request):
76 requester = yield self.auth.get_user_by_req(request)
77 is_admin = yield self.auth.is_server_admin(requester.user)
78
79 if not is_admin:
80 raise AuthError(403, "You are not a server admin")
81
82 ret = {
83 'server_version': get_version_string(synapse),
84 'python_version': platform.python_version(),
85 }
6486
6587 defer.returnValue((200, ret))
6688
465487 )
466488 new_room_id = info["room_id"]
467489
490 requester_user_id = requester.user.to_string()
491
492 logger.info(
493 "Shutting down room %r, joining to new room: %r",
494 room_id, new_room_id,
495 )
496
497 # This will work even if the room is already blocked, but that is
498 # desirable in case the first attempt at blocking the room failed below.
499 yield self.store.block_room(room_id, requester_user_id)
500
501 users = yield self.state.get_current_user_in_room(room_id)
502 kicked_users = []
503 failed_to_kick_users = []
504 for user_id in users:
505 if not self.hs.is_mine_id(user_id):
506 continue
507
508 logger.info("Kicking %r from %r...", user_id, room_id)
509
510 try:
511 target_requester = create_requester(user_id)
512 yield self.room_member_handler.update_membership(
513 requester=target_requester,
514 target=target_requester.user,
515 room_id=room_id,
516 action=Membership.LEAVE,
517 content={},
518 ratelimit=False,
519 require_consent=False,
520 )
521
522 yield self.room_member_handler.forget(target_requester.user, room_id)
523
524 yield self.room_member_handler.update_membership(
525 requester=target_requester,
526 target=target_requester.user,
527 room_id=new_room_id,
528 action=Membership.JOIN,
529 content={},
530 ratelimit=False,
531 require_consent=False,
532 )
533
534 kicked_users.append(user_id)
535 except Exception:
536 logger.exception(
537 "Failed to leave old room and join new room for %r", user_id,
538 )
539 failed_to_kick_users.append(user_id)
540
468541 yield self.event_creation_handler.create_and_send_nonmember_event(
469542 room_creator_requester,
470543 {
476549 ratelimit=False,
477550 )
478551
479 requester_user_id = requester.user.to_string()
480
481 logger.info("Shutting down room %r", room_id)
482
483 yield self.store.block_room(room_id, requester_user_id)
484
485 users = yield self.state.get_current_user_in_room(room_id)
486 kicked_users = []
487 for user_id in users:
488 if not self.hs.is_mine_id(user_id):
489 continue
490
491 logger.info("Kicking %r from %r...", user_id, room_id)
492
493 target_requester = create_requester(user_id)
494 yield self.room_member_handler.update_membership(
495 requester=target_requester,
496 target=target_requester.user,
497 room_id=room_id,
498 action=Membership.LEAVE,
499 content={},
500 ratelimit=False
501 )
502
503 yield self.room_member_handler.forget(target_requester.user, room_id)
504
505 yield self.room_member_handler.update_membership(
506 requester=target_requester,
507 target=target_requester.user,
508 room_id=new_room_id,
509 action=Membership.JOIN,
510 content={},
511 ratelimit=False
512 )
513
514 kicked_users.append(user_id)
515
516552 aliases_for_room = yield self.store.get_aliases_for_room(room_id)
517553
518554 yield self.store.update_aliases_for_room(
521557
522558 defer.returnValue((200, {
523559 "kicked_users": kicked_users,
560 "failed_to_kick_users": failed_to_kick_users,
524561 "local_aliases": aliases_for_room,
525562 "new_room_id": new_room_id,
526563 }))
762799 QuarantineMediaInRoom(hs).register(http_server)
763800 ListMediaInRoom(hs).register(http_server)
764801 UserRegisterServlet(hs).register(http_server)
802 VersionServlet(hs).register(http_server)
2121 from twisted.web.client import PartialDownloadError
2222
2323 from synapse.api.errors import Codes, LoginError, SynapseError
24 from synapse.api.ratelimiting import Ratelimiter
2425 from synapse.http.server import finish_request
2526 from synapse.http.servlet import (
2627 RestServlet,
9697 self.registration_handler = hs.get_registration_handler()
9798 self.handlers = hs.get_handlers()
9899 self._well_known_builder = WellKnownBuilder(hs)
100 self._address_ratelimiter = Ratelimiter()
99101
100102 def on_GET(self, request):
101103 flows = []
128130
129131 @defer.inlineCallbacks
130132 def on_POST(self, request):
133 self._address_ratelimiter.ratelimit(
134 request.getClientIP(), time_now_s=self.hs.clock.time(),
135 rate_hz=self.hs.config.rc_login_address.per_second,
136 burst_count=self.hs.config.rc_login_address.burst_count,
137 update=True,
138 )
139
131140 login_submission = parse_json_object_from_request(request)
132141 try:
133142 if self.jwt_enabled and (login_submission["type"] ==
191200 # We store all email addreses as lowercase in the DB.
192201 # (See add_threepid in synapse/handlers/auth.py)
193202 address = address.lower()
203
204 # Check for login providers that support 3pid login types
205 canonical_user_id, callback_3pid = (
206 yield self.auth_handler.check_password_provider_3pid(
207 medium,
208 address,
209 login_submission["password"],
210 )
211 )
212 if canonical_user_id:
213 # Authentication through password provider and 3pid succeeded
214 result = yield self._register_device_with_callback(
215 canonical_user_id, login_submission, callback_3pid,
216 )
217 defer.returnValue(result)
218
219 # No password providers were able to handle this 3pid
220 # Check local store
194221 user_id = yield self.hs.get_datastore().get_user_id_by_threepid(
195222 medium, address,
196223 )
213240 if "user" not in identifier:
214241 raise SynapseError(400, "User identifier is missing 'user' key")
215242
216 auth_handler = self.auth_handler
217 canonical_user_id, callback = yield auth_handler.validate_login(
243 canonical_user_id, callback = yield self.auth_handler.validate_login(
218244 identifier["user"],
219245 login_submission,
220246 )
221247
248 result = yield self._register_device_with_callback(
249 canonical_user_id, login_submission, callback,
250 )
251 defer.returnValue(result)
252
253 @defer.inlineCallbacks
254 def _register_device_with_callback(
255 self,
256 user_id,
257 login_submission,
258 callback=None,
259 ):
260 """ Registers a device with a given user_id. Optionally run a callback
261 function after registration has completed.
262
263 Args:
264 user_id (str): ID of the user to register.
265 login_submission (dict): Dictionary of login information.
266 callback (func|None): Callback function to run after registration.
267
268 Returns:
269 result (Dict[str,str]): Dictionary of account information after
270 successful registration.
271 """
222272 device_id = login_submission.get("device_id")
223273 initial_display_name = login_submission.get("initial_device_display_name")
224274 device_id, access_token = yield self.registration_handler.register_device(
225 canonical_user_id, device_id, initial_display_name,
275 user_id, device_id, initial_display_name,
226276 )
227277
228278 result = {
229 "user_id": canonical_user_id,
279 "user_id": user_id,
230280 "access_token": access_token,
231281 "home_server": self.hs.hostname,
232282 "device_id": device_id,
284334 raise LoginError(401, "Invalid JWT", errcode=Codes.UNAUTHORIZED)
285335
286336 user_id = UserID(user, self.hs.hostname).to_string()
337
287338 auth_handler = self.auth_handler
288339 registered_user_id = yield auth_handler.check_user_exists(user_id)
289340 if registered_user_id:
2424 import synapse
2525 import synapse.types
2626 from synapse.api.constants import LoginType
27 from synapse.api.errors import Codes, SynapseError, UnrecognizedRequestError
27 from synapse.api.errors import (
28 Codes,
29 LimitExceededError,
30 SynapseError,
31 UnrecognizedRequestError,
32 )
2833 from synapse.config.server import is_threepid_reserved
2934 from synapse.http.servlet import (
3035 RestServlet,
190195 self.identity_handler = hs.get_handlers().identity_handler
191196 self.room_member_handler = hs.get_room_member_handler()
192197 self.macaroon_gen = hs.get_macaroon_generator()
198 self.ratelimiter = hs.get_registration_ratelimiter()
199 self.clock = hs.get_clock()
193200
194201 @interactive_auth_handler
195202 @defer.inlineCallbacks
196203 def on_POST(self, request):
197204 body = parse_json_object_from_request(request)
205
206 client_addr = request.getClientIP()
207
208 time_now = self.clock.time()
209
210 allowed, time_allowed = self.ratelimiter.can_do_action(
211 client_addr, time_now_s=time_now,
212 rate_hz=self.hs.config.rc_registration.per_second,
213 burst_count=self.hs.config.rc_registration.burst_count,
214 update=False,
215 )
216
217 if not allowed:
218 raise LimitExceededError(
219 retry_after_ms=int(1000 * (time_allowed - time_now)),
220 )
198221
199222 kind = b"user"
200223 if b"kind" in request.args:
201224 kind = request.args[b"kind"][0]
202225
203226 if kind == b"guest":
204 ret = yield self._do_guest_registration(body)
227 ret = yield self._do_guest_registration(body, address=client_addr)
205228 defer.returnValue(ret)
206229 return
207230 elif kind != b"user":
410433 guest_access_token=guest_access_token,
411434 generate_token=False,
412435 threepid=threepid,
436 address=client_addr,
413437 )
414438 # Necessary due to auth checks prior to the threepid being
415439 # written to the db
521545 defer.returnValue(result)
522546
523547 @defer.inlineCallbacks
524 def _do_guest_registration(self, params):
548 def _do_guest_registration(self, params, address=None):
525549 if not self.hs.config.allow_guest_access:
526550 raise SynapseError(403, "Guest access is disabled")
527551 user_id, _ = yield self.registration_handler.register(
528552 generate_token=False,
529 make_guest=True
553 make_guest=True,
554 address=address,
530555 )
531556
532557 # we don't allow guests to specify their own device_id, because
5858 requester = yield self.auth.get_user_by_req(request, allow_guest=False)
5959 user_id = requester.user.to_string()
6060
61 if not self.hs.config.user_directory_search_enabled:
62 defer.returnValue((200, {
63 "limited": False,
64 "results": [],
65 }))
66
6167 body = parse_json_object_from_request(request)
6268
6369 limit = body.get("limit", 10)
00 # -*- coding: utf-8 -*-
11 # Copyright 2014-2016 OpenMarket Ltd
2 # Copyright 2019 New Vector Ltd.
23 #
34 # Licensed under the Apache License, Version 2.0 (the "License");
45 # you may not use this file except in compliance with the License.
9899
99100 request.setHeader(b"Content-Type", media_type.encode("UTF-8"))
100101 if upload_name:
101 if is_ascii(upload_name):
102 disposition = "inline; filename=%s" % (_quote(upload_name),)
102 # RFC6266 section 4.1 [1] defines both `filename` and `filename*`.
103 #
104 # `filename` is defined to be a `value`, which is defined by RFC2616
105 # section 3.6 [2] to be a `token` or a `quoted-string`, where a `token`
106 # is (essentially) a single US-ASCII word, and a `quoted-string` is a
107 # US-ASCII string surrounded by double-quotes, using backslash as an
108 # escape charater. Note that %-encoding is *not* permitted.
109 #
110 # `filename*` is defined to be an `ext-value`, which is defined in
111 # RFC5987 section 3.2.1 [3] to be `charset "'" [ language ] "'" value-chars`,
112 # where `value-chars` is essentially a %-encoded string in the given charset.
113 #
114 # [1]: https://tools.ietf.org/html/rfc6266#section-4.1
115 # [2]: https://tools.ietf.org/html/rfc2616#section-3.6
116 # [3]: https://tools.ietf.org/html/rfc5987#section-3.2.1
117
118 # We avoid the quoted-string version of `filename`, because (a) synapse didn't
119 # correctly interpret those as of 0.99.2 and (b) they are a bit of a pain and we
120 # may as well just do the filename* version.
121 if _can_encode_filename_as_token(upload_name):
122 disposition = 'inline; filename=%s' % (upload_name, )
103123 else:
104 disposition = "inline; filename*=utf-8''%s" % (_quote(upload_name),)
124 disposition = "inline; filename*=utf-8''%s" % (_quote(upload_name), )
105125
106126 request.setHeader(b"Content-Disposition", disposition.encode('ascii'))
107127
112132 # clients are smart enough to be happy with Cache-Control
113133 request.setHeader(b"Cache-Control", b"public,max-age=86400,s-maxage=86400")
114134 request.setHeader(b"Content-Length", b"%d" % (file_size,))
135
136
137 # separators as defined in RFC2616. SP and HT are handled separately.
138 # see _can_encode_filename_as_token.
139 _FILENAME_SEPARATOR_CHARS = set((
140 "(", ")", "<", ">", "@", ",", ";", ":", "\\", '"',
141 "/", "[", "]", "?", "=", "{", "}",
142 ))
143
144
145 def _can_encode_filename_as_token(x):
146 for c in x:
147 # from RFC2616:
148 #
149 # token = 1*<any CHAR except CTLs or separators>
150 #
151 # separators = "(" | ")" | "<" | ">" | "@"
152 # | "," | ";" | ":" | "\" | <">
153 # | "/" | "[" | "]" | "?" | "="
154 # | "{" | "}" | SP | HT
155 #
156 # CHAR = <any US-ASCII character (octets 0 - 127)>
157 #
158 # CTL = <any US-ASCII control character
159 # (octets 0 - 31) and DEL (127)>
160 #
161 if ord(c) >= 127 or ord(c) <= 32 or c in _FILENAME_SEPARATOR_CHARS:
162 return False
163 return True
115164
116165
117166 @defer.inlineCallbacks
212261 Content-Disposition HTTP header.
213262
214263 Args:
215 headers (twisted.web.http_headers.Headers): The HTTP
216 request headers.
264 headers (dict[bytes, list[bytes]]): The HTTP request headers.
217265
218266 Returns:
219267 A Unicode string of the filename, or None.
224272 if not content_disposition[0]:
225273 return
226274
227 # dict of unicode: bytes, corresponding to the key value sections of the
228 # Content-Disposition header.
229 params = {}
230 parts = content_disposition[0].split(b";")
231 for i in parts:
232 # Split into key-value pairs, if able
233 # We don't care about things like `inline`, so throw it out
234 if b"=" not in i:
235 continue
236
237 key, value = i.strip().split(b"=")
238 params[key.decode('ascii')] = value
275 _, params = _parse_header(content_disposition[0])
239276
240277 upload_name = None
241278
242279 # First check if there is a valid UTF-8 filename
243 upload_name_utf8 = params.get("filename*", None)
280 upload_name_utf8 = params.get(b"filename*", None)
244281 if upload_name_utf8:
245282 if upload_name_utf8.lower().startswith(b"utf-8''"):
246283 upload_name_utf8 = upload_name_utf8[7:]
266303
267304 # If there isn't check for an ascii name.
268305 if not upload_name:
269 upload_name_ascii = params.get("filename", None)
306 upload_name_ascii = params.get(b"filename", None)
270307 if upload_name_ascii and is_ascii(upload_name_ascii):
271 # Make sure there's no %-quoted bytes. If there is, reject it as
272 # non-valid ASCII.
273 if b"%" not in upload_name_ascii:
274 upload_name = upload_name_ascii.decode('ascii')
308 upload_name = upload_name_ascii.decode('ascii')
275309
276310 # This may be None here, indicating we did not find a matching name.
277311 return upload_name
312
313
314 def _parse_header(line):
315 """Parse a Content-type like header.
316
317 Cargo-culted from `cgi`, but works on bytes rather than strings.
318
319 Args:
320 line (bytes): header to be parsed
321
322 Returns:
323 Tuple[bytes, dict[bytes, bytes]]:
324 the main content-type, followed by the parameter dictionary
325 """
326 parts = _parseparam(b';' + line)
327 key = next(parts)
328 pdict = {}
329 for p in parts:
330 i = p.find(b'=')
331 if i >= 0:
332 name = p[:i].strip().lower()
333 value = p[i + 1:].strip()
334
335 # strip double-quotes
336 if len(value) >= 2 and value[0:1] == value[-1:] == b'"':
337 value = value[1:-1]
338 value = value.replace(b'\\\\', b'\\').replace(b'\\"', b'"')
339 pdict[name] = value
340
341 return key, pdict
342
343
344 def _parseparam(s):
345 """Generator which splits the input on ;, respecting double-quoted sequences
346
347 Cargo-culted from `cgi`, but works on bytes rather than strings.
348
349 Args:
350 s (bytes): header to be parsed
351
352 Returns:
353 Iterable[bytes]: the split input
354 """
355 while s[:1] == b';':
356 s = s[1:]
357
358 # look for the next ;
359 end = s.find(b';')
360
361 # if there is an odd number of " marks between here and the next ;, skip to the
362 # next ; instead
363 while end > 0 and (s.count(b'"', 0, end) - s.count(b'\\"', 0, end)) % 2:
364 end = s.find(b';', end + 1)
365
366 if end < 0:
367 end = len(s)
368 f = s[:end]
369 yield f.strip()
370 s = s[end:]
4141 ReplicationFederationHandlerRegistry,
4242 )
4343 from synapse.federation.send_queue import FederationRemoteSendQueue
44 from synapse.federation.transaction_queue import TransactionQueue
44 from synapse.federation.sender import FederationSender
4545 from synapse.federation.transport.client import TransportLayerClient
4646 from synapse.groups.attestations import GroupAttestationSigning, GroupAttestionRenewer
4747 from synapse.groups.groups_server import GroupsServerHandler
5050 from synapse.handlers.appservice import ApplicationServicesHandler
5151 from synapse.handlers.auth import AuthHandler, MacaroonGenerator
5252 from synapse.handlers.deactivate_account import DeactivateAccountHandler
53 from synapse.handlers.device import DeviceHandler
53 from synapse.handlers.device import DeviceHandler, DeviceWorkerHandler
5454 from synapse.handlers.devicemessage import DeviceMessageHandler
5555 from synapse.handlers.e2e_keys import E2eKeysHandler
5656 from synapse.handlers.e2e_room_keys import E2eRoomKeysHandler
184184 'registration_handler',
185185 ]
186186
187 REQUIRED_ON_MASTER_STARTUP = [
188 "user_directory_handler",
189 ]
190
187191 # This is overridden in derived application classes
188192 # (such as synapse.app.homeserver.SynapseHomeServer) and gives the class to be
189193 # instantiated during setup() for future return by get_datastore()
205209 self.clock = Clock(reactor)
206210 self.distributor = Distributor()
207211 self.ratelimiter = Ratelimiter()
212 self.registration_ratelimiter = Ratelimiter()
208213
209214 self.datastore = None
210215
219224 conn.commit()
220225 logger.info("Finished setting up.")
221226
227 def setup_master(self):
228 """
229 Some handlers have side effects on instantiation (like registering
230 background updates). This function causes them to be fetched, and
231 therefore instantiated, to run those side effects.
232 """
233 for i in self.REQUIRED_ON_MASTER_STARTUP:
234 getattr(self, "get_" + i)()
235
222236 def get_reactor(self):
223237 """
224238 Fetch the Twisted reactor in use by this HomeServer.
249263
250264 def get_ratelimiter(self):
251265 return self.ratelimiter
266
267 def get_registration_ratelimiter(self):
268 return self.registration_ratelimiter
252269
253270 def build_federation_client(self):
254271 return FederationClient(self)
306323 return MacaroonGenerator(self)
307324
308325 def build_device_handler(self):
309 return DeviceHandler(self)
326 if self.config.worker_app:
327 return DeviceWorkerHandler(self)
328 else:
329 return DeviceHandler(self)
310330
311331 def build_device_message_handler(self):
312332 return DeviceMessageHandler(self)
413433
414434 def build_federation_sender(self):
415435 if self.should_send_federation():
416 return TransactionQueue(self)
436 return FederationSender(self)
417437 elif not self.config.worker_app:
418438 return FederationRemoteSendQueue(self)
419439 else:
00 import synapse.api.auth
11 import synapse.config.homeserver
2 import synapse.federation.sender
23 import synapse.federation.transaction_queue
34 import synapse.federation.transport.client
45 import synapse.handlers
67 import synapse.handlers.deactivate_account
78 import synapse.handlers.device
89 import synapse.handlers.e2e_keys
10 import synapse.handlers.message
911 import synapse.handlers.room
1012 import synapse.handlers.room_member
11 import synapse.handlers.message
1213 import synapse.handlers.set_password
1314 import synapse.rest.media.v1.media_repository
1415 import synapse.server_notices.server_notices_manager
6162 def get_set_password_handler(self) -> synapse.handlers.set_password.SetPasswordHandler:
6263 pass
6364
64 def get_federation_sender(self) -> synapse.federation.transaction_queue.TransactionQueue:
65 def get_federation_sender(self) -> synapse.federation.sender.FederationSender:
6566 pass
6667
6768 def get_federation_transport_client(self) -> synapse.federation.transport.client.TransportLayerClient:
766766 """
767767 allvalues = {}
768768 allvalues.update(keyvalues)
769 allvalues.update(values)
770769 allvalues.update(insertion_values)
770
771 if not values:
772 latter = "NOTHING"
773 else:
774 allvalues.update(values)
775 latter = (
776 "UPDATE SET " + ", ".join(k + "=EXCLUDED." + k for k in values)
777 )
771778
772779 sql = (
773780 "INSERT INTO %s (%s) VALUES (%s) "
774 "ON CONFLICT (%s) DO UPDATE SET %s"
781 "ON CONFLICT (%s) DO %s"
775782 ) % (
776783 table,
777784 ", ".join(k for k in allvalues),
778785 ", ".join("?" for _ in allvalues),
779786 ", ".join(k for k in keyvalues),
780 ", ".join(k + "=EXCLUDED." + k for k in values),
787 latter
781788 )
782789 txn.execute(sql, list(allvalues.values()))
783790
5151 Returns:
5252 A duration in ms as a float
5353 """
54 if self.total_item_count == 0:
54 if self.avg_duration_ms == 0:
55 return 0
56 elif self.total_item_count == 0:
5557 return None
5658 else:
5759 # Use the exponential moving average so that we can adapt to
6365 Returns:
6466 A duration in ms as a float
6567 """
66 if self.total_item_count == 0:
68 if self.total_duration_ms == 0:
69 return 0
70 elif self.total_item_count == 0:
6771 return None
6872 else:
6973 return float(self.total_item_count) / float(self.total_duration_ms)
1818
1919 from twisted.internet import defer
2020
21 from synapse.storage._base import SQLBaseStore
22 from synapse.storage.background_updates import BackgroundUpdateStore
2123 from synapse.util.caches.expiringcache import ExpiringCache
2224
23 from .background_updates import BackgroundUpdateStore
24
2525 logger = logging.getLogger(__name__)
2626
2727
28 class DeviceInboxStore(BackgroundUpdateStore):
28 class DeviceInboxWorkerStore(SQLBaseStore):
29 def get_to_device_stream_token(self):
30 return self._device_inbox_id_gen.get_current_token()
31
32 def get_new_messages_for_device(
33 self, user_id, device_id, last_stream_id, current_stream_id, limit=100
34 ):
35 """
36 Args:
37 user_id(str): The recipient user_id.
38 device_id(str): The recipient device_id.
39 current_stream_id(int): The current position of the to device
40 message stream.
41 Returns:
42 Deferred ([dict], int): List of messages for the device and where
43 in the stream the messages got to.
44 """
45 has_changed = self._device_inbox_stream_cache.has_entity_changed(
46 user_id, last_stream_id
47 )
48 if not has_changed:
49 return defer.succeed(([], current_stream_id))
50
51 def get_new_messages_for_device_txn(txn):
52 sql = (
53 "SELECT stream_id, message_json FROM device_inbox"
54 " WHERE user_id = ? AND device_id = ?"
55 " AND ? < stream_id AND stream_id <= ?"
56 " ORDER BY stream_id ASC"
57 " LIMIT ?"
58 )
59 txn.execute(sql, (
60 user_id, device_id, last_stream_id, current_stream_id, limit
61 ))
62 messages = []
63 for row in txn:
64 stream_pos = row[0]
65 messages.append(json.loads(row[1]))
66 if len(messages) < limit:
67 stream_pos = current_stream_id
68 return (messages, stream_pos)
69
70 return self.runInteraction(
71 "get_new_messages_for_device", get_new_messages_for_device_txn,
72 )
73
74 @defer.inlineCallbacks
75 def delete_messages_for_device(self, user_id, device_id, up_to_stream_id):
76 """
77 Args:
78 user_id(str): The recipient user_id.
79 device_id(str): The recipient device_id.
80 up_to_stream_id(int): Where to delete messages up to.
81 Returns:
82 A deferred that resolves to the number of messages deleted.
83 """
84 # If we have cached the last stream id we've deleted up to, we can
85 # check if there is likely to be anything that needs deleting
86 last_deleted_stream_id = self._last_device_delete_cache.get(
87 (user_id, device_id), None
88 )
89 if last_deleted_stream_id:
90 has_changed = self._device_inbox_stream_cache.has_entity_changed(
91 user_id, last_deleted_stream_id
92 )
93 if not has_changed:
94 defer.returnValue(0)
95
96 def delete_messages_for_device_txn(txn):
97 sql = (
98 "DELETE FROM device_inbox"
99 " WHERE user_id = ? AND device_id = ?"
100 " AND stream_id <= ?"
101 )
102 txn.execute(sql, (user_id, device_id, up_to_stream_id))
103 return txn.rowcount
104
105 count = yield self.runInteraction(
106 "delete_messages_for_device", delete_messages_for_device_txn
107 )
108
109 # Update the cache, ensuring that we only ever increase the value
110 last_deleted_stream_id = self._last_device_delete_cache.get(
111 (user_id, device_id), 0
112 )
113 self._last_device_delete_cache[(user_id, device_id)] = max(
114 last_deleted_stream_id, up_to_stream_id
115 )
116
117 defer.returnValue(count)
118
119 def get_new_device_msgs_for_remote(
120 self, destination, last_stream_id, current_stream_id, limit=100
121 ):
122 """
123 Args:
124 destination(str): The name of the remote server.
125 last_stream_id(int|long): The last position of the device message stream
126 that the server sent up to.
127 current_stream_id(int|long): The current position of the device
128 message stream.
129 Returns:
130 Deferred ([dict], int|long): List of messages for the device and where
131 in the stream the messages got to.
132 """
133
134 has_changed = self._device_federation_outbox_stream_cache.has_entity_changed(
135 destination, last_stream_id
136 )
137 if not has_changed or last_stream_id == current_stream_id:
138 return defer.succeed(([], current_stream_id))
139
140 def get_new_messages_for_remote_destination_txn(txn):
141 sql = (
142 "SELECT stream_id, messages_json FROM device_federation_outbox"
143 " WHERE destination = ?"
144 " AND ? < stream_id AND stream_id <= ?"
145 " ORDER BY stream_id ASC"
146 " LIMIT ?"
147 )
148 txn.execute(sql, (
149 destination, last_stream_id, current_stream_id, limit
150 ))
151 messages = []
152 for row in txn:
153 stream_pos = row[0]
154 messages.append(json.loads(row[1]))
155 if len(messages) < limit:
156 stream_pos = current_stream_id
157 return (messages, stream_pos)
158
159 return self.runInteraction(
160 "get_new_device_msgs_for_remote",
161 get_new_messages_for_remote_destination_txn,
162 )
163
164 def delete_device_msgs_for_remote(self, destination, up_to_stream_id):
165 """Used to delete messages when the remote destination acknowledges
166 their receipt.
167
168 Args:
169 destination(str): The destination server_name
170 up_to_stream_id(int): Where to delete messages up to.
171 Returns:
172 A deferred that resolves when the messages have been deleted.
173 """
174 def delete_messages_for_remote_destination_txn(txn):
175 sql = (
176 "DELETE FROM device_federation_outbox"
177 " WHERE destination = ?"
178 " AND stream_id <= ?"
179 )
180 txn.execute(sql, (destination, up_to_stream_id))
181
182 return self.runInteraction(
183 "delete_device_msgs_for_remote",
184 delete_messages_for_remote_destination_txn
185 )
186
187
188 class DeviceInboxStore(DeviceInboxWorkerStore, BackgroundUpdateStore):
29189 DEVICE_INBOX_STREAM_ID = "device_inbox_stream_drop"
30190
31191 def __init__(self, db_conn, hs):
219379
220380 txn.executemany(sql, rows)
221381
222 def get_new_messages_for_device(
223 self, user_id, device_id, last_stream_id, current_stream_id, limit=100
224 ):
225 """
226 Args:
227 user_id(str): The recipient user_id.
228 device_id(str): The recipient device_id.
229 current_stream_id(int): The current position of the to device
230 message stream.
231 Returns:
232 Deferred ([dict], int): List of messages for the device and where
233 in the stream the messages got to.
234 """
235 has_changed = self._device_inbox_stream_cache.has_entity_changed(
236 user_id, last_stream_id
237 )
238 if not has_changed:
239 return defer.succeed(([], current_stream_id))
240
241 def get_new_messages_for_device_txn(txn):
242 sql = (
243 "SELECT stream_id, message_json FROM device_inbox"
244 " WHERE user_id = ? AND device_id = ?"
245 " AND ? < stream_id AND stream_id <= ?"
246 " ORDER BY stream_id ASC"
247 " LIMIT ?"
248 )
249 txn.execute(sql, (
250 user_id, device_id, last_stream_id, current_stream_id, limit
251 ))
252 messages = []
253 for row in txn:
254 stream_pos = row[0]
255 messages.append(json.loads(row[1]))
256 if len(messages) < limit:
257 stream_pos = current_stream_id
258 return (messages, stream_pos)
259
260 return self.runInteraction(
261 "get_new_messages_for_device", get_new_messages_for_device_txn,
262 )
263
264 @defer.inlineCallbacks
265 def delete_messages_for_device(self, user_id, device_id, up_to_stream_id):
266 """
267 Args:
268 user_id(str): The recipient user_id.
269 device_id(str): The recipient device_id.
270 up_to_stream_id(int): Where to delete messages up to.
271 Returns:
272 A deferred that resolves to the number of messages deleted.
273 """
274 # If we have cached the last stream id we've deleted up to, we can
275 # check if there is likely to be anything that needs deleting
276 last_deleted_stream_id = self._last_device_delete_cache.get(
277 (user_id, device_id), None
278 )
279 if last_deleted_stream_id:
280 has_changed = self._device_inbox_stream_cache.has_entity_changed(
281 user_id, last_deleted_stream_id
282 )
283 if not has_changed:
284 defer.returnValue(0)
285
286 def delete_messages_for_device_txn(txn):
287 sql = (
288 "DELETE FROM device_inbox"
289 " WHERE user_id = ? AND device_id = ?"
290 " AND stream_id <= ?"
291 )
292 txn.execute(sql, (user_id, device_id, up_to_stream_id))
293 return txn.rowcount
294
295 count = yield self.runInteraction(
296 "delete_messages_for_device", delete_messages_for_device_txn
297 )
298
299 # Update the cache, ensuring that we only ever increase the value
300 last_deleted_stream_id = self._last_device_delete_cache.get(
301 (user_id, device_id), 0
302 )
303 self._last_device_delete_cache[(user_id, device_id)] = max(
304 last_deleted_stream_id, up_to_stream_id
305 )
306
307 defer.returnValue(count)
308
309382 def get_all_new_device_messages(self, last_pos, current_pos, limit):
310383 """
311384 Args:
348421
349422 return self.runInteraction(
350423 "get_all_new_device_messages", get_all_new_device_messages_txn
351 )
352
353 def get_to_device_stream_token(self):
354 return self._device_inbox_id_gen.get_current_token()
355
356 def get_new_device_msgs_for_remote(
357 self, destination, last_stream_id, current_stream_id, limit=100
358 ):
359 """
360 Args:
361 destination(str): The name of the remote server.
362 last_stream_id(int|long): The last position of the device message stream
363 that the server sent up to.
364 current_stream_id(int|long): The current position of the device
365 message stream.
366 Returns:
367 Deferred ([dict], int|long): List of messages for the device and where
368 in the stream the messages got to.
369 """
370
371 has_changed = self._device_federation_outbox_stream_cache.has_entity_changed(
372 destination, last_stream_id
373 )
374 if not has_changed or last_stream_id == current_stream_id:
375 return defer.succeed(([], current_stream_id))
376
377 def get_new_messages_for_remote_destination_txn(txn):
378 sql = (
379 "SELECT stream_id, messages_json FROM device_federation_outbox"
380 " WHERE destination = ?"
381 " AND ? < stream_id AND stream_id <= ?"
382 " ORDER BY stream_id ASC"
383 " LIMIT ?"
384 )
385 txn.execute(sql, (
386 destination, last_stream_id, current_stream_id, limit
387 ))
388 messages = []
389 for row in txn:
390 stream_pos = row[0]
391 messages.append(json.loads(row[1]))
392 if len(messages) < limit:
393 stream_pos = current_stream_id
394 return (messages, stream_pos)
395
396 return self.runInteraction(
397 "get_new_device_msgs_for_remote",
398 get_new_messages_for_remote_destination_txn,
399 )
400
401 def delete_device_msgs_for_remote(self, destination, up_to_stream_id):
402 """Used to delete messages when the remote destination acknowledges
403 their receipt.
404
405 Args:
406 destination(str): The destination server_name
407 up_to_stream_id(int): Where to delete messages up to.
408 Returns:
409 A deferred that resolves when the messages have been deleted.
410 """
411 def delete_messages_for_remote_destination_txn(txn):
412 sql = (
413 "DELETE FROM device_federation_outbox"
414 " WHERE destination = ?"
415 " AND stream_id <= ?"
416 )
417 txn.execute(sql, (destination, up_to_stream_id))
418
419 return self.runInteraction(
420 "delete_device_msgs_for_remote",
421 delete_messages_for_remote_destination_txn
422424 )
423425
424426 @defer.inlineCallbacks
2121
2222 from synapse.api.errors import StoreError
2323 from synapse.metrics.background_process_metrics import run_as_background_process
24 from synapse.storage._base import Cache, SQLBaseStore, db_to_json
2425 from synapse.storage.background_updates import BackgroundUpdateStore
2526 from synapse.util.caches.descriptors import cached, cachedInlineCallbacks, cachedList
26
27 from ._base import Cache, db_to_json
2827
2928 logger = logging.getLogger(__name__)
3029
3332 )
3433
3534
36 class DeviceStore(BackgroundUpdateStore):
35 class DeviceWorkerStore(SQLBaseStore):
36 def get_device(self, user_id, device_id):
37 """Retrieve a device.
38
39 Args:
40 user_id (str): The ID of the user which owns the device
41 device_id (str): The ID of the device to retrieve
42 Returns:
43 defer.Deferred for a dict containing the device information
44 Raises:
45 StoreError: if the device is not found
46 """
47 return self._simple_select_one(
48 table="devices",
49 keyvalues={"user_id": user_id, "device_id": device_id},
50 retcols=("user_id", "device_id", "display_name"),
51 desc="get_device",
52 )
53
54 @defer.inlineCallbacks
55 def get_devices_by_user(self, user_id):
56 """Retrieve all of a user's registered devices.
57
58 Args:
59 user_id (str):
60 Returns:
61 defer.Deferred: resolves to a dict from device_id to a dict
62 containing "device_id", "user_id" and "display_name" for each
63 device.
64 """
65 devices = yield self._simple_select_list(
66 table="devices",
67 keyvalues={"user_id": user_id},
68 retcols=("user_id", "device_id", "display_name"),
69 desc="get_devices_by_user"
70 )
71
72 defer.returnValue({d["device_id"]: d for d in devices})
73
74 def get_devices_by_remote(self, destination, from_stream_id):
75 """Get stream of updates to send to remote servers
76
77 Returns:
78 (int, list[dict]): current stream id and list of updates
79 """
80 now_stream_id = self._device_list_id_gen.get_current_token()
81
82 has_changed = self._device_list_federation_stream_cache.has_entity_changed(
83 destination, int(from_stream_id)
84 )
85 if not has_changed:
86 return (now_stream_id, [])
87
88 return self.runInteraction(
89 "get_devices_by_remote", self._get_devices_by_remote_txn,
90 destination, from_stream_id, now_stream_id,
91 )
92
93 def _get_devices_by_remote_txn(self, txn, destination, from_stream_id,
94 now_stream_id):
95 sql = """
96 SELECT user_id, device_id, max(stream_id) FROM device_lists_outbound_pokes
97 WHERE destination = ? AND ? < stream_id AND stream_id <= ? AND sent = ?
98 GROUP BY user_id, device_id
99 LIMIT 20
100 """
101 txn.execute(
102 sql, (destination, from_stream_id, now_stream_id, False)
103 )
104
105 # maps (user_id, device_id) -> stream_id
106 query_map = {(r[0], r[1]): r[2] for r in txn}
107 if not query_map:
108 return (now_stream_id, [])
109
110 if len(query_map) >= 20:
111 now_stream_id = max(stream_id for stream_id in itervalues(query_map))
112
113 devices = self._get_e2e_device_keys_txn(
114 txn, query_map.keys(), include_all_devices=True, include_deleted_devices=True
115 )
116
117 prev_sent_id_sql = """
118 SELECT coalesce(max(stream_id), 0) as stream_id
119 FROM device_lists_outbound_last_success
120 WHERE destination = ? AND user_id = ? AND stream_id <= ?
121 """
122
123 results = []
124 for user_id, user_devices in iteritems(devices):
125 # The prev_id for the first row is always the last row before
126 # `from_stream_id`
127 txn.execute(prev_sent_id_sql, (destination, user_id, from_stream_id))
128 rows = txn.fetchall()
129 prev_id = rows[0][0]
130 for device_id, device in iteritems(user_devices):
131 stream_id = query_map[(user_id, device_id)]
132 result = {
133 "user_id": user_id,
134 "device_id": device_id,
135 "prev_id": [prev_id] if prev_id else [],
136 "stream_id": stream_id,
137 }
138
139 prev_id = stream_id
140
141 if device is not None:
142 key_json = device.get("key_json", None)
143 if key_json:
144 result["keys"] = db_to_json(key_json)
145 device_display_name = device.get("device_display_name", None)
146 if device_display_name:
147 result["device_display_name"] = device_display_name
148 else:
149 result["deleted"] = True
150
151 results.append(result)
152
153 return (now_stream_id, results)
154
155 def mark_as_sent_devices_by_remote(self, destination, stream_id):
156 """Mark that updates have successfully been sent to the destination.
157 """
158 return self.runInteraction(
159 "mark_as_sent_devices_by_remote", self._mark_as_sent_devices_by_remote_txn,
160 destination, stream_id,
161 )
162
163 def _mark_as_sent_devices_by_remote_txn(self, txn, destination, stream_id):
164 # We update the device_lists_outbound_last_success with the successfully
165 # poked users. We do the join to see which users need to be inserted and
166 # which updated.
167 sql = """
168 SELECT user_id, coalesce(max(o.stream_id), 0), (max(s.stream_id) IS NOT NULL)
169 FROM device_lists_outbound_pokes as o
170 LEFT JOIN device_lists_outbound_last_success as s
171 USING (destination, user_id)
172 WHERE destination = ? AND o.stream_id <= ?
173 GROUP BY user_id
174 """
175 txn.execute(sql, (destination, stream_id,))
176 rows = txn.fetchall()
177
178 sql = """
179 UPDATE device_lists_outbound_last_success
180 SET stream_id = ?
181 WHERE destination = ? AND user_id = ?
182 """
183 txn.executemany(
184 sql, ((row[1], destination, row[0],) for row in rows if row[2])
185 )
186
187 sql = """
188 INSERT INTO device_lists_outbound_last_success
189 (destination, user_id, stream_id) VALUES (?, ?, ?)
190 """
191 txn.executemany(
192 sql, ((destination, row[0], row[1],) for row in rows if not row[2])
193 )
194
195 # Delete all sent outbound pokes
196 sql = """
197 DELETE FROM device_lists_outbound_pokes
198 WHERE destination = ? AND stream_id <= ?
199 """
200 txn.execute(sql, (destination, stream_id,))
201
202 def get_device_stream_token(self):
203 return self._device_list_id_gen.get_current_token()
204
205 @defer.inlineCallbacks
206 def get_user_devices_from_cache(self, query_list):
207 """Get the devices (and keys if any) for remote users from the cache.
208
209 Args:
210 query_list(list): List of (user_id, device_ids), if device_ids is
211 falsey then return all device ids for that user.
212
213 Returns:
214 (user_ids_not_in_cache, results_map), where user_ids_not_in_cache is
215 a set of user_ids and results_map is a mapping of
216 user_id -> device_id -> device_info
217 """
218 user_ids = set(user_id for user_id, _ in query_list)
219 user_map = yield self.get_device_list_last_stream_id_for_remotes(list(user_ids))
220 user_ids_in_cache = set(
221 user_id for user_id, stream_id in user_map.items() if stream_id
222 )
223 user_ids_not_in_cache = user_ids - user_ids_in_cache
224
225 results = {}
226 for user_id, device_id in query_list:
227 if user_id not in user_ids_in_cache:
228 continue
229
230 if device_id:
231 device = yield self._get_cached_user_device(user_id, device_id)
232 results.setdefault(user_id, {})[device_id] = device
233 else:
234 results[user_id] = yield self._get_cached_devices_for_user(user_id)
235
236 defer.returnValue((user_ids_not_in_cache, results))
237
238 @cachedInlineCallbacks(num_args=2, tree=True)
239 def _get_cached_user_device(self, user_id, device_id):
240 content = yield self._simple_select_one_onecol(
241 table="device_lists_remote_cache",
242 keyvalues={
243 "user_id": user_id,
244 "device_id": device_id,
245 },
246 retcol="content",
247 desc="_get_cached_user_device",
248 )
249 defer.returnValue(db_to_json(content))
250
251 @cachedInlineCallbacks()
252 def _get_cached_devices_for_user(self, user_id):
253 devices = yield self._simple_select_list(
254 table="device_lists_remote_cache",
255 keyvalues={
256 "user_id": user_id,
257 },
258 retcols=("device_id", "content"),
259 desc="_get_cached_devices_for_user",
260 )
261 defer.returnValue({
262 device["device_id"]: db_to_json(device["content"])
263 for device in devices
264 })
265
266 def get_devices_with_keys_by_user(self, user_id):
267 """Get all devices (with any device keys) for a user
268
269 Returns:
270 (stream_id, devices)
271 """
272 return self.runInteraction(
273 "get_devices_with_keys_by_user",
274 self._get_devices_with_keys_by_user_txn, user_id,
275 )
276
277 def _get_devices_with_keys_by_user_txn(self, txn, user_id):
278 now_stream_id = self._device_list_id_gen.get_current_token()
279
280 devices = self._get_e2e_device_keys_txn(
281 txn, [(user_id, None)], include_all_devices=True
282 )
283
284 if devices:
285 user_devices = devices[user_id]
286 results = []
287 for device_id, device in iteritems(user_devices):
288 result = {
289 "device_id": device_id,
290 }
291
292 key_json = device.get("key_json", None)
293 if key_json:
294 result["keys"] = db_to_json(key_json)
295 device_display_name = device.get("device_display_name", None)
296 if device_display_name:
297 result["device_display_name"] = device_display_name
298
299 results.append(result)
300
301 return now_stream_id, results
302
303 return now_stream_id, []
304
305 @defer.inlineCallbacks
306 def get_user_whose_devices_changed(self, from_key):
307 """Get set of users whose devices have changed since `from_key`.
308 """
309 from_key = int(from_key)
310 changed = self._device_list_stream_cache.get_all_entities_changed(from_key)
311 if changed is not None:
312 defer.returnValue(set(changed))
313
314 sql = """
315 SELECT DISTINCT user_id FROM device_lists_stream WHERE stream_id > ?
316 """
317 rows = yield self._execute("get_user_whose_devices_changed", None, sql, from_key)
318 defer.returnValue(set(row[0] for row in rows))
319
320 def get_all_device_list_changes_for_remotes(self, from_key, to_key):
321 """Return a list of `(stream_id, user_id, destination)` which is the
322 combined list of changes to devices, and which destinations need to be
323 poked. `destination` may be None if no destinations need to be poked.
324 """
325 # We do a group by here as there can be a large number of duplicate
326 # entries, since we throw away device IDs.
327 sql = """
328 SELECT MAX(stream_id) AS stream_id, user_id, destination
329 FROM device_lists_stream
330 LEFT JOIN device_lists_outbound_pokes USING (stream_id, user_id, device_id)
331 WHERE ? < stream_id AND stream_id <= ?
332 GROUP BY user_id, destination
333 """
334 return self._execute(
335 "get_all_device_list_changes_for_remotes", None,
336 sql, from_key, to_key
337 )
338
339 @cached(max_entries=10000)
340 def get_device_list_last_stream_id_for_remote(self, user_id):
341 """Get the last stream_id we got for a user. May be None if we haven't
342 got any information for them.
343 """
344 return self._simple_select_one_onecol(
345 table="device_lists_remote_extremeties",
346 keyvalues={"user_id": user_id},
347 retcol="stream_id",
348 desc="get_device_list_last_stream_id_for_remote",
349 allow_none=True,
350 )
351
352 @cachedList(cached_method_name="get_device_list_last_stream_id_for_remote",
353 list_name="user_ids", inlineCallbacks=True)
354 def get_device_list_last_stream_id_for_remotes(self, user_ids):
355 rows = yield self._simple_select_many_batch(
356 table="device_lists_remote_extremeties",
357 column="user_id",
358 iterable=user_ids,
359 retcols=("user_id", "stream_id",),
360 desc="get_device_list_last_stream_id_for_remotes",
361 )
362
363 results = {user_id: None for user_id in user_ids}
364 results.update({
365 row["user_id"]: row["stream_id"] for row in rows
366 })
367
368 defer.returnValue(results)
369
370
371 class DeviceStore(DeviceWorkerStore, BackgroundUpdateStore):
37372 def __init__(self, db_conn, hs):
38373 super(DeviceStore, self).__init__(db_conn, hs)
39374
120455 initial_device_display_name, e)
121456 raise StoreError(500, "Problem storing device.")
122457
123 def get_device(self, user_id, device_id):
124 """Retrieve a device.
125
126 Args:
127 user_id (str): The ID of the user which owns the device
128 device_id (str): The ID of the device to retrieve
129 Returns:
130 defer.Deferred for a dict containing the device information
131 Raises:
132 StoreError: if the device is not found
133 """
134 return self._simple_select_one(
135 table="devices",
136 keyvalues={"user_id": user_id, "device_id": device_id},
137 retcols=("user_id", "device_id", "display_name"),
138 desc="get_device",
139 )
140
141458 @defer.inlineCallbacks
142459 def delete_device(self, user_id, device_id):
143460 """Delete a device.
200517 updatevalues=updates,
201518 desc="update_device",
202519 )
203
204 @defer.inlineCallbacks
205 def get_devices_by_user(self, user_id):
206 """Retrieve all of a user's registered devices.
207
208 Args:
209 user_id (str):
210 Returns:
211 defer.Deferred: resolves to a dict from device_id to a dict
212 containing "device_id", "user_id" and "display_name" for each
213 device.
214 """
215 devices = yield self._simple_select_list(
216 table="devices",
217 keyvalues={"user_id": user_id},
218 retcols=("user_id", "device_id", "display_name"),
219 desc="get_devices_by_user"
220 )
221
222 defer.returnValue({d["device_id"]: d for d in devices})
223
224 @cached(max_entries=10000)
225 def get_device_list_last_stream_id_for_remote(self, user_id):
226 """Get the last stream_id we got for a user. May be None if we haven't
227 got any information for them.
228 """
229 return self._simple_select_one_onecol(
230 table="device_lists_remote_extremeties",
231 keyvalues={"user_id": user_id},
232 retcol="stream_id",
233 desc="get_device_list_remote_extremity",
234 allow_none=True,
235 )
236
237 @cachedList(cached_method_name="get_device_list_last_stream_id_for_remote",
238 list_name="user_ids", inlineCallbacks=True)
239 def get_device_list_last_stream_id_for_remotes(self, user_ids):
240 rows = yield self._simple_select_many_batch(
241 table="device_lists_remote_extremeties",
242 column="user_id",
243 iterable=user_ids,
244 retcols=("user_id", "stream_id",),
245 desc="get_user_devices_from_cache",
246 )
247
248 results = {user_id: None for user_id in user_ids}
249 results.update({
250 row["user_id"]: row["stream_id"] for row in rows
251 })
252
253 defer.returnValue(results)
254520
255521 @defer.inlineCallbacks
256522 def mark_remote_user_device_list_as_unsubscribed(self, user_id):
402668 # we don't need to lock, because we can assume we are the only thread
403669 # updating this user's extremity.
404670 lock=False,
405 )
406
407 def get_devices_by_remote(self, destination, from_stream_id):
408 """Get stream of updates to send to remote servers
409
410 Returns:
411 (int, list[dict]): current stream id and list of updates
412 """
413 now_stream_id = self._device_list_id_gen.get_current_token()
414
415 has_changed = self._device_list_federation_stream_cache.has_entity_changed(
416 destination, int(from_stream_id)
417 )
418 if not has_changed:
419 return (now_stream_id, [])
420
421 return self.runInteraction(
422 "get_devices_by_remote", self._get_devices_by_remote_txn,
423 destination, from_stream_id, now_stream_id,
424 )
425
426 def _get_devices_by_remote_txn(self, txn, destination, from_stream_id,
427 now_stream_id):
428 sql = """
429 SELECT user_id, device_id, max(stream_id) FROM device_lists_outbound_pokes
430 WHERE destination = ? AND ? < stream_id AND stream_id <= ? AND sent = ?
431 GROUP BY user_id, device_id
432 LIMIT 20
433 """
434 txn.execute(
435 sql, (destination, from_stream_id, now_stream_id, False)
436 )
437
438 # maps (user_id, device_id) -> stream_id
439 query_map = {(r[0], r[1]): r[2] for r in txn}
440 if not query_map:
441 return (now_stream_id, [])
442
443 if len(query_map) >= 20:
444 now_stream_id = max(stream_id for stream_id in itervalues(query_map))
445
446 devices = self._get_e2e_device_keys_txn(
447 txn, query_map.keys(), include_all_devices=True, include_deleted_devices=True
448 )
449
450 prev_sent_id_sql = """
451 SELECT coalesce(max(stream_id), 0) as stream_id
452 FROM device_lists_outbound_last_success
453 WHERE destination = ? AND user_id = ? AND stream_id <= ?
454 """
455
456 results = []
457 for user_id, user_devices in iteritems(devices):
458 # The prev_id for the first row is always the last row before
459 # `from_stream_id`
460 txn.execute(prev_sent_id_sql, (destination, user_id, from_stream_id))
461 rows = txn.fetchall()
462 prev_id = rows[0][0]
463 for device_id, device in iteritems(user_devices):
464 stream_id = query_map[(user_id, device_id)]
465 result = {
466 "user_id": user_id,
467 "device_id": device_id,
468 "prev_id": [prev_id] if prev_id else [],
469 "stream_id": stream_id,
470 }
471
472 prev_id = stream_id
473
474 if device is not None:
475 key_json = device.get("key_json", None)
476 if key_json:
477 result["keys"] = db_to_json(key_json)
478 device_display_name = device.get("device_display_name", None)
479 if device_display_name:
480 result["device_display_name"] = device_display_name
481 else:
482 result["deleted"] = True
483
484 results.append(result)
485
486 return (now_stream_id, results)
487
488 @defer.inlineCallbacks
489 def get_user_devices_from_cache(self, query_list):
490 """Get the devices (and keys if any) for remote users from the cache.
491
492 Args:
493 query_list(list): List of (user_id, device_ids), if device_ids is
494 falsey then return all device ids for that user.
495
496 Returns:
497 (user_ids_not_in_cache, results_map), where user_ids_not_in_cache is
498 a set of user_ids and results_map is a mapping of
499 user_id -> device_id -> device_info
500 """
501 user_ids = set(user_id for user_id, _ in query_list)
502 user_map = yield self.get_device_list_last_stream_id_for_remotes(list(user_ids))
503 user_ids_in_cache = set(
504 user_id for user_id, stream_id in user_map.items() if stream_id
505 )
506 user_ids_not_in_cache = user_ids - user_ids_in_cache
507
508 results = {}
509 for user_id, device_id in query_list:
510 if user_id not in user_ids_in_cache:
511 continue
512
513 if device_id:
514 device = yield self._get_cached_user_device(user_id, device_id)
515 results.setdefault(user_id, {})[device_id] = device
516 else:
517 results[user_id] = yield self._get_cached_devices_for_user(user_id)
518
519 defer.returnValue((user_ids_not_in_cache, results))
520
521 @cachedInlineCallbacks(num_args=2, tree=True)
522 def _get_cached_user_device(self, user_id, device_id):
523 content = yield self._simple_select_one_onecol(
524 table="device_lists_remote_cache",
525 keyvalues={
526 "user_id": user_id,
527 "device_id": device_id,
528 },
529 retcol="content",
530 desc="_get_cached_user_device",
531 )
532 defer.returnValue(db_to_json(content))
533
534 @cachedInlineCallbacks()
535 def _get_cached_devices_for_user(self, user_id):
536 devices = yield self._simple_select_list(
537 table="device_lists_remote_cache",
538 keyvalues={
539 "user_id": user_id,
540 },
541 retcols=("device_id", "content"),
542 desc="_get_cached_devices_for_user",
543 )
544 defer.returnValue({
545 device["device_id"]: db_to_json(device["content"])
546 for device in devices
547 })
548
549 def get_devices_with_keys_by_user(self, user_id):
550 """Get all devices (with any device keys) for a user
551
552 Returns:
553 (stream_id, devices)
554 """
555 return self.runInteraction(
556 "get_devices_with_keys_by_user",
557 self._get_devices_with_keys_by_user_txn, user_id,
558 )
559
560 def _get_devices_with_keys_by_user_txn(self, txn, user_id):
561 now_stream_id = self._device_list_id_gen.get_current_token()
562
563 devices = self._get_e2e_device_keys_txn(
564 txn, [(user_id, None)], include_all_devices=True
565 )
566
567 if devices:
568 user_devices = devices[user_id]
569 results = []
570 for device_id, device in iteritems(user_devices):
571 result = {
572 "device_id": device_id,
573 }
574
575 key_json = device.get("key_json", None)
576 if key_json:
577 result["keys"] = db_to_json(key_json)
578 device_display_name = device.get("device_display_name", None)
579 if device_display_name:
580 result["device_display_name"] = device_display_name
581
582 results.append(result)
583
584 return now_stream_id, results
585
586 return now_stream_id, []
587
588 def mark_as_sent_devices_by_remote(self, destination, stream_id):
589 """Mark that updates have successfully been sent to the destination.
590 """
591 return self.runInteraction(
592 "mark_as_sent_devices_by_remote", self._mark_as_sent_devices_by_remote_txn,
593 destination, stream_id,
594 )
595
596 def _mark_as_sent_devices_by_remote_txn(self, txn, destination, stream_id):
597 # We update the device_lists_outbound_last_success with the successfully
598 # poked users. We do the join to see which users need to be inserted and
599 # which updated.
600 sql = """
601 SELECT user_id, coalesce(max(o.stream_id), 0), (max(s.stream_id) IS NOT NULL)
602 FROM device_lists_outbound_pokes as o
603 LEFT JOIN device_lists_outbound_last_success as s
604 USING (destination, user_id)
605 WHERE destination = ? AND o.stream_id <= ?
606 GROUP BY user_id
607 """
608 txn.execute(sql, (destination, stream_id,))
609 rows = txn.fetchall()
610
611 sql = """
612 UPDATE device_lists_outbound_last_success
613 SET stream_id = ?
614 WHERE destination = ? AND user_id = ?
615 """
616 txn.executemany(
617 sql, ((row[1], destination, row[0],) for row in rows if row[2])
618 )
619
620 sql = """
621 INSERT INTO device_lists_outbound_last_success
622 (destination, user_id, stream_id) VALUES (?, ?, ?)
623 """
624 txn.executemany(
625 sql, ((destination, row[0], row[1],) for row in rows if not row[2])
626 )
627
628 # Delete all sent outbound pokes
629 sql = """
630 DELETE FROM device_lists_outbound_pokes
631 WHERE destination = ? AND stream_id <= ?
632 """
633 txn.execute(sql, (destination, stream_id,))
634
635 @defer.inlineCallbacks
636 def get_user_whose_devices_changed(self, from_key):
637 """Get set of users whose devices have changed since `from_key`.
638 """
639 from_key = int(from_key)
640 changed = self._device_list_stream_cache.get_all_entities_changed(from_key)
641 if changed is not None:
642 defer.returnValue(set(changed))
643
644 sql = """
645 SELECT DISTINCT user_id FROM device_lists_stream WHERE stream_id > ?
646 """
647 rows = yield self._execute("get_user_whose_devices_changed", None, sql, from_key)
648 defer.returnValue(set(row[0] for row in rows))
649
650 def get_all_device_list_changes_for_remotes(self, from_key, to_key):
651 """Return a list of `(stream_id, user_id, destination)` which is the
652 combined list of changes to devices, and which destinations need to be
653 poked. `destination` may be None if no destinations need to be poked.
654 """
655 # We do a group by here as there can be a large number of duplicate
656 # entries, since we throw away device IDs.
657 sql = """
658 SELECT MAX(stream_id) AS stream_id, user_id, destination
659 FROM device_lists_stream
660 LEFT JOIN device_lists_outbound_pokes USING (stream_id, user_id, device_id)
661 WHERE ? < stream_id AND stream_id <= ?
662 GROUP BY user_id, destination
663 """
664 return self._execute(
665 "get_all_device_list_changes_for_remotes", None,
666 sql, from_key, to_key
667671 )
668672
669673 @defer.inlineCallbacks
731735 ]
732736 )
733737
734 def get_device_stream_token(self):
735 return self._device_list_id_gen.get_current_token()
736
737738 def _prune_old_outbound_device_pokes(self):
738739 """Delete old entries out of the device_lists_outbound_pokes to ensure
739740 that we don't fill up due to dead servers. We keep one entry per
2222 from ._base import SQLBaseStore, db_to_json
2323
2424
25 class EndToEndKeyStore(SQLBaseStore):
26 def set_e2e_device_keys(self, user_id, device_id, time_now, device_keys):
27 """Stores device keys for a device. Returns whether there was a change
28 or the keys were already in the database.
29 """
30 def _set_e2e_device_keys_txn(txn):
31 old_key_json = self._simple_select_one_onecol_txn(
32 txn,
33 table="e2e_device_keys_json",
34 keyvalues={
35 "user_id": user_id,
36 "device_id": device_id,
37 },
38 retcol="key_json",
39 allow_none=True,
40 )
41
42 # In py3 we need old_key_json to match new_key_json type. The DB
43 # returns unicode while encode_canonical_json returns bytes.
44 new_key_json = encode_canonical_json(device_keys).decode("utf-8")
45
46 if old_key_json == new_key_json:
47 return False
48
49 self._simple_upsert_txn(
50 txn,
51 table="e2e_device_keys_json",
52 keyvalues={
53 "user_id": user_id,
54 "device_id": device_id,
55 },
56 values={
57 "ts_added_ms": time_now,
58 "key_json": new_key_json,
59 }
60 )
61
62 return True
63
64 return self.runInteraction(
65 "set_e2e_device_keys", _set_e2e_device_keys_txn
66 )
67
25 class EndToEndKeyWorkerStore(SQLBaseStore):
6826 @defer.inlineCallbacks
6927 def get_e2e_device_keys(
7028 self, query_list, include_all_devices=False,
237195 "count_e2e_one_time_keys", _count_e2e_one_time_keys
238196 )
239197
198
199 class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore):
200 def set_e2e_device_keys(self, user_id, device_id, time_now, device_keys):
201 """Stores device keys for a device. Returns whether there was a change
202 or the keys were already in the database.
203 """
204 def _set_e2e_device_keys_txn(txn):
205 old_key_json = self._simple_select_one_onecol_txn(
206 txn,
207 table="e2e_device_keys_json",
208 keyvalues={
209 "user_id": user_id,
210 "device_id": device_id,
211 },
212 retcol="key_json",
213 allow_none=True,
214 )
215
216 # In py3 we need old_key_json to match new_key_json type. The DB
217 # returns unicode while encode_canonical_json returns bytes.
218 new_key_json = encode_canonical_json(device_keys).decode("utf-8")
219
220 if old_key_json == new_key_json:
221 return False
222
223 self._simple_upsert_txn(
224 txn,
225 table="e2e_device_keys_json",
226 keyvalues={
227 "user_id": user_id,
228 "device_id": device_id,
229 },
230 values={
231 "ts_added_ms": time_now,
232 "key_json": new_key_json,
233 }
234 )
235
236 return True
237
238 return self.runInteraction(
239 "set_e2e_device_keys", _set_e2e_device_keys_txn
240 )
241
240242 def claim_e2e_one_time_keys(self, query_list):
241243 """Take a list of one time keys out of the database"""
242244 def _claim_e2e_one_time_keys(txn):
440440 # reverse it so that the events are approximately chronological.
441441 event_results.reverse()
442442 return event_results
443
444 @defer.inlineCallbacks
445 def get_successor_events(self, event_ids):
446 """Fetch all events that have the given events as a prev event
447
448 Args:
449 event_ids (iterable[str])
450
451 Returns:
452 Deferred[list[str]]
453 """
454 rows = yield self._simple_select_many_batch(
455 table="event_edges",
456 column="prev_event_id",
457 iterable=event_ids,
458 retcols=("event_id",),
459 desc="get_successor_events"
460 )
461
462 defer.returnValue([
463 row["event_id"] for row in rows
464 ])
443465
444466
445467 class EventFederationStore(EventFederationWorkerStore):
536536 new_events = [
537537 event for event, ctx in event_contexts
538538 if not event.internal_metadata.is_outlier() and not ctx.rejected
539 and not event.internal_metadata.is_soft_failed()
539540 ]
540541
541542 # start with the existing forward extremities
14051406 values=state_values,
14061407 )
14071408
1408 self._simple_insert_many_txn(
1409 txn,
1410 table="event_edges",
1411 values=[
1412 {
1413 "event_id": event.event_id,
1414 "prev_event_id": prev_id,
1415 "room_id": event.room_id,
1416 "is_state": True,
1417 }
1418 for event, _ in state_events_and_contexts
1419 for prev_id, _ in event.prev_state
1420 ],
1421 )
1422
14231409 # Prefill the event cache
14241410 self._add_to_cache(txn, events_and_contexts)
14251411
183183 )
184184
185185 defer.returnValue(results)
186
187 @defer.inlineCallbacks
188 def move_push_rule_from_room_to_room(
189 self, new_room_id, user_id, rule,
190 ):
191 """Move a single push rule from one room to another for a specific user.
192
193 Args:
194 new_room_id (str): ID of the new room.
195 user_id (str): ID of user the push rule belongs to.
196 rule (Dict): A push rule.
197 """
198 # Create new rule id
199 rule_id_scope = '/'.join(rule["rule_id"].split('/')[:-1])
200 new_rule_id = rule_id_scope + "/" + new_room_id
201
202 # Change room id in each condition
203 for condition in rule.get("conditions", []):
204 if condition.get("key") == "room_id":
205 condition["pattern"] = new_room_id
206
207 # Add the rule for the new room
208 yield self.add_push_rule(
209 user_id=user_id,
210 rule_id=new_rule_id,
211 priority_class=rule["priority_class"],
212 conditions=rule["conditions"],
213 actions=rule["actions"],
214 )
215
216 # Delete push rule for the old room
217 yield self.delete_push_rule(user_id, rule["rule_id"])
218
219 @defer.inlineCallbacks
220 def move_push_rules_from_room_to_room_for_user(
221 self, old_room_id, new_room_id, user_id,
222 ):
223 """Move all of the push rules from one room to another for a specific
224 user.
225
226 Args:
227 old_room_id (str): ID of the old room.
228 new_room_id (str): ID of the new room.
229 user_id (str): ID of user to copy push rules for.
230 """
231 # Retrieve push rules for this user
232 user_push_rules = yield self.get_push_rules_for_user(user_id)
233
234 # Get rules relating to the old room, move them to the new room, then
235 # delete them from the old room
236 for rule in user_push_rules:
237 conditions = rule.get("conditions", [])
238 if any((c.get("key") == "room_id" and
239 c.get("pattern") == old_room_id) for c in conditions):
240 self.move_push_rule_from_room_to_room(
241 new_room_id, user_id, rule,
242 )
186243
187244 @defer.inlineCallbacks
188245 def bulk_get_push_rules_for_room(self, event, context):
300300 args.append(limit)
301301 txn.execute(sql, args)
302302
303 return txn.fetchall()
303 return (
304 r[0:5] + (json.loads(r[5]), ) for r in txn
305 )
304306 return self.runInteraction(
305307 "get_all_updated_receipts", get_all_updated_receipts_txn
306308 )
345347
346348 def insert_linearized_receipt_txn(self, txn, room_id, receipt_type,
347349 user_id, event_id, data, stream_id):
350 """Inserts a read-receipt into the database if it's newer than the current RR
351
352 Returns: int|None
353 None if the RR is older than the current RR
354 otherwise, the rx timestamp of the event that the RR corresponds to
355 (or 0 if the event is unknown)
356 """
348357 res = self._simple_select_one_txn(
349358 txn,
350359 table="events",
351 retcols=["topological_ordering", "stream_ordering"],
360 retcols=["stream_ordering", "received_ts"],
352361 keyvalues={"event_id": event_id},
353362 allow_none=True
354363 )
355364
356365 stream_ordering = int(res["stream_ordering"]) if res else None
366 rx_ts = res["received_ts"] if res else 0
357367
358368 # We don't want to clobber receipts for more recent events, so we
359369 # have to compare orderings of existing receipts
372382 "one for later event %s",
373383 event_id, eid,
374384 )
375 return False
385 return None
376386
377387 txn.call_after(
378388 self.get_receipts_for_room.invalidate, (room_id, receipt_type)
428438 stream_ordering=stream_ordering,
429439 )
430440
431 return True
441 return rx_ts
432442
433443 @defer.inlineCallbacks
434444 def insert_receipt(self, room_id, receipt_type, user_id, event_ids, data):
465475
466476 stream_id_manager = self._receipts_id_gen.get_next()
467477 with stream_id_manager as stream_id:
468 have_persisted = yield self.runInteraction(
478 event_ts = yield self.runInteraction(
469479 "insert_linearized_receipt",
470480 self.insert_linearized_receipt_txn,
471481 room_id, receipt_type, user_id, linearized_event_id,
473483 stream_id=stream_id,
474484 )
475485
476 if not have_persisted:
477 defer.returnValue(None)
486 if event_ts is None:
487 defer.returnValue(None)
488
489 now = self._clock.time_msec()
490 logger.debug(
491 "RR for event %s in %s (%i ms old)",
492 linearized_event_id, room_id, now - event_ts,
493 )
478494
479495 yield self.insert_graph_receipt(
480496 room_id, receipt_type, user_id, event_ids, data
293293 if ret:
294294 return ret['user_id']
295295 return None
296
297 @defer.inlineCallbacks
298 def user_add_threepid(self, user_id, medium, address, validated_at, added_at):
299 yield self._simple_upsert("user_threepids", {
300 "medium": medium,
301 "address": address,
302 }, {
303 "user_id": user_id,
304 "validated_at": validated_at,
305 "added_at": added_at,
306 })
307
308 @defer.inlineCallbacks
309 def user_get_threepids(self, user_id):
310 ret = yield self._simple_select_list(
311 "user_threepids", {
312 "user_id": user_id
313 },
314 ['medium', 'address', 'validated_at', 'added_at'],
315 'user_get_threepids'
316 )
317 defer.returnValue(ret)
318
319 def user_delete_threepid(self, user_id, medium, address):
320 return self._simple_delete(
321 "user_threepids",
322 keyvalues={
323 "user_id": user_id,
324 "medium": medium,
325 "address": address,
326 },
327 desc="user_delete_threepids",
328 )
296329
297330
298331 class RegistrationStore(RegistrationWorkerStore,
632665 defer.returnValue(res if res else False)
633666
634667 @defer.inlineCallbacks
635 def user_add_threepid(self, user_id, medium, address, validated_at, added_at):
636 yield self._simple_upsert("user_threepids", {
637 "medium": medium,
638 "address": address,
639 }, {
640 "user_id": user_id,
641 "validated_at": validated_at,
642 "added_at": added_at,
643 })
644
645 @defer.inlineCallbacks
646 def user_get_threepids(self, user_id):
647 ret = yield self._simple_select_list(
648 "user_threepids", {
649 "user_id": user_id
650 },
651 ['medium', 'address', 'validated_at', 'added_at'],
652 'user_get_threepids'
653 )
654 defer.returnValue(ret)
655
656 def user_delete_threepid(self, user_id, medium, address):
657 return self._simple_delete(
658 "user_threepids",
659 keyvalues={
660 "user_id": user_id,
661 "medium": medium,
662 "address": address,
663 },
664 desc="user_delete_threepids",
665 )
666
667 @defer.inlineCallbacks
668668 def save_or_get_3pid_guest_access_token(
669669 self, medium, address, access_token, inviter_user_id
670670 ):
499499
500500 @defer.inlineCallbacks
501501 def block_room(self, room_id, user_id):
502 yield self._simple_insert(
502 """Marks the room as blocked. Can be called multiple times.
503
504 Args:
505 room_id (str): Room to block
506 user_id (str): Who blocked it
507
508 Returns:
509 Deferred
510 """
511 yield self._simple_upsert(
503512 table="blocked_rooms",
504 values={
513 keyvalues={
505514 "room_id": room_id,
515 },
516 values={},
517 insertion_values={
506518 "user_id": user_id,
507519 },
508520 desc="block_room",
0 /* Copyright 2019 New Vector Ltd
1 *
2 * Licensed under the Apache License, Version 2.0 (the "License");
3 * you may not use this file except in compliance with the License.
4 * You may obtain a copy of the License at
5 *
6 * http://www.apache.org/licenses/LICENSE-2.0
7 *
8 * Unless required by applicable law or agreed to in writing, software
9 * distributed under the License is distributed on an "AS IS" BASIS,
10 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 * See the License for the specific language governing permissions and
12 * limitations under the License.
13 */
14
15 -- Set up staging tables
16 INSERT INTO background_updates (update_name, progress_json) VALUES
17 ('populate_user_directory_createtables', '{}');
18
19 -- Run through each room and update the user directory according to who is in it
20 INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES
21 ('populate_user_directory_process_rooms', '{}', 'populate_user_directory_createtables');
22
23 -- Insert all users, if search_all_users is on
24 INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES
25 ('populate_user_directory_process_users', '{}', 'populate_user_directory_process_rooms');
26
27 -- Clean up staging tables
28 INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES
29 ('populate_user_directory_cleanup', '{}', 'populate_user_directory_process_users');
0 /* Copyright 2017 Vector Creations Ltd, 2019 New Vector Ltd
1 *
2 * Licensed under the Apache License, Version 2.0 (the "License");
3 * you may not use this file except in compliance with the License.
4 * You may obtain a copy of the License at
5 *
6 * http://www.apache.org/licenses/LICENSE-2.0
7 *
8 * Unless required by applicable law or agreed to in writing, software
9 * distributed under the License is distributed on an "AS IS" BASIS,
10 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 * See the License for the specific language governing permissions and
12 * limitations under the License.
13 */
14
15 -- Old disused version of the tables below.
16 DROP TABLE IF EXISTS users_who_share_rooms;
17
18 -- Tables keeping track of what users share rooms. This is a map of local users
19 -- to local or remote users, per room. Remote users cannot be in the user_id
20 -- column, only the other_user_id column. There are two tables, one for public
21 -- rooms and those for private rooms.
22 CREATE TABLE IF NOT EXISTS users_who_share_public_rooms (
23 user_id TEXT NOT NULL,
24 other_user_id TEXT NOT NULL,
25 room_id TEXT NOT NULL
26 );
27
28 CREATE TABLE IF NOT EXISTS users_who_share_private_rooms (
29 user_id TEXT NOT NULL,
30 other_user_id TEXT NOT NULL,
31 room_id TEXT NOT NULL
32 );
33
34 CREATE UNIQUE INDEX users_who_share_public_rooms_u_idx ON users_who_share_public_rooms(user_id, other_user_id, room_id);
35 CREATE INDEX users_who_share_public_rooms_r_idx ON users_who_share_public_rooms(room_id);
36 CREATE INDEX users_who_share_public_rooms_o_idx ON users_who_share_public_rooms(other_user_id);
37
38 CREATE UNIQUE INDEX users_who_share_private_rooms_u_idx ON users_who_share_private_rooms(user_id, other_user_id, room_id);
39 CREATE INDEX users_who_share_private_rooms_r_idx ON users_who_share_private_rooms(room_id);
40 CREATE INDEX users_who_share_private_rooms_o_idx ON users_who_share_private_rooms(other_user_id);
41
42 -- Make sure that we populate the tables initially by resetting the stream ID
43 UPDATE user_directory_stream_pos SET stream_id = NULL;
0 /* Copyright 2019 New Vector Ltd
1 *
2 * Licensed under the Apache License, Version 2.0 (the "License");
3 * you may not use this file except in compliance with the License.
4 * You may obtain a copy of the License at
5 *
6 * http://www.apache.org/licenses/LICENSE-2.0
7 *
8 * Unless required by applicable law or agreed to in writing, software
9 * distributed under the License is distributed on an "AS IS" BASIS,
10 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 * See the License for the specific language governing permissions and
12 * limitations under the License.
13 */
14
15 -- We don't need the old version of this table.
16 DROP TABLE IF EXISTS users_in_public_rooms;
17
18 -- Old version of users_in_public_rooms
19 DROP TABLE IF EXISTS users_who_share_public_rooms;
20
21 -- Track what users are in public rooms.
22 CREATE TABLE IF NOT EXISTS users_in_public_rooms (
23 user_id TEXT NOT NULL,
24 room_id TEXT NOT NULL
25 );
26
27 CREATE UNIQUE INDEX users_in_public_rooms_u_idx ON users_in_public_rooms(user_id, room_id);
3636 event_id TEXT NOT NULL,
3737 prev_event_id TEXT NOT NULL,
3838 room_id TEXT NOT NULL,
39 -- We no longer insert prev_state into this table, so all new rows will have
40 -- is_state as false.
3941 is_state BOOL NOT NULL,
4042 UNIQUE (event_id, prev_event_id, room_id, is_state)
4143 );
0 # -*- coding: utf-8 -*-
1 # Copyright 2018 Vector Creations Ltd
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import logging
16
17 from synapse.storage._base import SQLBaseStore
18
19 logger = logging.getLogger(__name__)
20
21
22 class StateDeltasStore(SQLBaseStore):
23
24 def get_current_state_deltas(self, prev_stream_id):
25 prev_stream_id = int(prev_stream_id)
26 if not self._curr_state_delta_stream_cache.has_any_entity_changed(prev_stream_id):
27 return []
28
29 def get_current_state_deltas_txn(txn):
30 # First we calculate the max stream id that will give us less than
31 # N results.
32 # We arbitarily limit to 100 stream_id entries to ensure we don't
33 # select toooo many.
34 sql = """
35 SELECT stream_id, count(*)
36 FROM current_state_delta_stream
37 WHERE stream_id > ?
38 GROUP BY stream_id
39 ORDER BY stream_id ASC
40 LIMIT 100
41 """
42 txn.execute(sql, (prev_stream_id,))
43
44 total = 0
45 max_stream_id = prev_stream_id
46 for max_stream_id, count in txn:
47 total += count
48 if total > 100:
49 # We arbitarily limit to 100 entries to ensure we don't
50 # select toooo many.
51 break
52
53 # Now actually get the deltas
54 sql = """
55 SELECT stream_id, room_id, type, state_key, event_id, prev_event_id
56 FROM current_state_delta_stream
57 WHERE ? < stream_id AND stream_id <= ?
58 ORDER BY stream_id ASC
59 """
60 txn.execute(sql, (prev_stream_id, max_stream_id,))
61 return self.cursor_to_dict(txn)
62
63 return self.runInteraction(
64 "get_current_state_deltas", get_current_state_deltas_txn
65 )
66
67 def get_max_stream_id_in_current_state_deltas(self):
68 return self._simple_select_one_onecol(
69 table="current_state_delta_stream",
70 keyvalues={},
71 retcol="COALESCE(MAX(stream_id), -1)",
72 desc="get_max_stream_id_in_current_state_deltas",
73 )
190190 @defer.inlineCallbacks
191191 def get_room_events_stream_for_rooms(self, room_ids, from_key, to_key, limit=0,
192192 order='DESC'):
193 """Get new room events in stream ordering since `from_key`.
194
195 Args:
196 room_id (str)
197 from_key (str): Token from which no events are returned before
198 to_key (str): Token from which no events are returned after. (This
199 is typically the current stream token)
200 limit (int): Maximum number of events to return
201 order (str): Either "DESC" or "ASC". Determines which events are
202 returned when the result is limited. If "DESC" then the most
203 recent `limit` events are returned, otherwise returns the
204 oldest `limit` events.
205
206 Returns:
207 Deferred[dict[str,tuple[list[FrozenEvent], str]]]
208 A map from room id to a tuple containing:
209 - list of recent events in the room
210 - stream ordering key for the start of the chunk of events returned.
211 """
193212 from_id = RoomStreamToken.parse_stream_token(from_key).stream
194213
195214 room_ids = yield self._events_stream_cache.get_entities_changed(
1515 import logging
1616 import re
1717
18 from six import iteritems
19
2018 from twisted.internet import defer
2119
2220 from synapse.api.constants import EventTypes, JoinRules
21 from synapse.storage.background_updates import BackgroundUpdateStore
2322 from synapse.storage.engines import PostgresEngine, Sqlite3Engine
2423 from synapse.storage.state import StateFilter
24 from synapse.storage.state_deltas import StateDeltasStore
2525 from synapse.types import get_domain_from_id, get_localpart_from_id
26 from synapse.util.caches.descriptors import cached, cachedInlineCallbacks
27
28 from ._base import SQLBaseStore
26 from synapse.util.caches.descriptors import cached
2927
3028 logger = logging.getLogger(__name__)
3129
3230
33 class UserDirectoryStore(SQLBaseStore):
31 TEMP_TABLE = "_temp_populate_user_directory"
32
33
34 class UserDirectoryStore(StateDeltasStore, BackgroundUpdateStore):
35
36 # How many records do we calculate before sending it to
37 # add_users_who_share_private_rooms?
38 SHARE_PRIVATE_WORKING_SET = 500
39
40 def __init__(self, db_conn, hs):
41 super(UserDirectoryStore, self).__init__(db_conn, hs)
42
43 self.server_name = hs.hostname
44
45 self.register_background_update_handler(
46 "populate_user_directory_createtables",
47 self._populate_user_directory_createtables,
48 )
49 self.register_background_update_handler(
50 "populate_user_directory_process_rooms",
51 self._populate_user_directory_process_rooms,
52 )
53 self.register_background_update_handler(
54 "populate_user_directory_process_users",
55 self._populate_user_directory_process_users,
56 )
57 self.register_background_update_handler(
58 "populate_user_directory_cleanup", self._populate_user_directory_cleanup
59 )
60
61 @defer.inlineCallbacks
62 def _populate_user_directory_createtables(self, progress, batch_size):
63
64 # Get all the rooms that we want to process.
65 def _make_staging_area(txn):
66 sql = (
67 "CREATE TABLE IF NOT EXISTS "
68 + TEMP_TABLE
69 + "_rooms(room_id TEXT NOT NULL, events BIGINT NOT NULL)"
70 )
71 txn.execute(sql)
72
73 sql = (
74 "CREATE TABLE IF NOT EXISTS "
75 + TEMP_TABLE
76 + "_position(position TEXT NOT NULL)"
77 )
78 txn.execute(sql)
79
80 # Get rooms we want to process from the database
81 sql = """
82 SELECT room_id, count(*) FROM current_state_events
83 GROUP BY room_id
84 """
85 txn.execute(sql)
86 rooms = [{"room_id": x[0], "events": x[1]} for x in txn.fetchall()]
87 self._simple_insert_many_txn(txn, TEMP_TABLE + "_rooms", rooms)
88 del rooms
89
90 # If search all users is on, get all the users we want to add.
91 if self.hs.config.user_directory_search_all_users:
92 sql = (
93 "CREATE TABLE IF NOT EXISTS "
94 + TEMP_TABLE
95 + "_users(user_id TEXT NOT NULL)"
96 )
97 txn.execute(sql)
98
99 txn.execute("SELECT name FROM users")
100 users = [{"user_id": x[0]} for x in txn.fetchall()]
101
102 self._simple_insert_many_txn(txn, TEMP_TABLE + "_users", users)
103
104 new_pos = yield self.get_max_stream_id_in_current_state_deltas()
105 yield self.runInteraction(
106 "populate_user_directory_temp_build", _make_staging_area
107 )
108 yield self._simple_insert(TEMP_TABLE + "_position", {"position": new_pos})
109
110 yield self._end_background_update("populate_user_directory_createtables")
111 defer.returnValue(1)
112
113 @defer.inlineCallbacks
114 def _populate_user_directory_cleanup(self, progress, batch_size):
115 """
116 Update the user directory stream position, then clean up the old tables.
117 """
118 position = yield self._simple_select_one_onecol(
119 TEMP_TABLE + "_position", None, "position"
120 )
121 yield self.update_user_directory_stream_pos(position)
122
123 def _delete_staging_area(txn):
124 txn.execute("DROP TABLE IF EXISTS " + TEMP_TABLE + "_rooms")
125 txn.execute("DROP TABLE IF EXISTS " + TEMP_TABLE + "_users")
126 txn.execute("DROP TABLE IF EXISTS " + TEMP_TABLE + "_position")
127
128 yield self.runInteraction(
129 "populate_user_directory_cleanup", _delete_staging_area
130 )
131
132 yield self._end_background_update("populate_user_directory_cleanup")
133 defer.returnValue(1)
134
135 @defer.inlineCallbacks
136 def _populate_user_directory_process_rooms(self, progress, batch_size):
137 """
138 Args:
139 progress (dict)
140 batch_size (int): Maximum number of state events to process
141 per cycle.
142 """
143 state = self.hs.get_state_handler()
144
145 # If we don't have progress filed, delete everything.
146 if not progress:
147 yield self.delete_all_from_user_dir()
148
149 def _get_next_batch(txn):
150 # Only fetch 250 rooms, so we don't fetch too many at once, even
151 # if those 250 rooms have less than batch_size state events.
152 sql = """
153 SELECT room_id, events FROM %s
154 ORDER BY events DESC
155 LIMIT 250
156 """ % (
157 TEMP_TABLE + "_rooms",
158 )
159 txn.execute(sql)
160 rooms_to_work_on = txn.fetchall()
161
162 if not rooms_to_work_on:
163 return None
164
165 # Get how many are left to process, so we can give status on how
166 # far we are in processing
167 txn.execute("SELECT COUNT(*) FROM " + TEMP_TABLE + "_rooms")
168 progress["remaining"] = txn.fetchone()[0]
169
170 return rooms_to_work_on
171
172 rooms_to_work_on = yield self.runInteraction(
173 "populate_user_directory_temp_read", _get_next_batch
174 )
175
176 # No more rooms -- complete the transaction.
177 if not rooms_to_work_on:
178 yield self._end_background_update("populate_user_directory_process_rooms")
179 defer.returnValue(1)
180
181 logger.info(
182 "Processing the next %d rooms of %d remaining"
183 % (len(rooms_to_work_on), progress["remaining"])
184 )
185
186 processed_event_count = 0
187
188 for room_id, event_count in rooms_to_work_on:
189 is_in_room = yield self.is_host_joined(room_id, self.server_name)
190
191 if is_in_room:
192 is_public = yield self.is_room_world_readable_or_publicly_joinable(
193 room_id
194 )
195
196 users_with_profile = yield state.get_current_user_in_room(room_id)
197 user_ids = set(users_with_profile)
198
199 # Update each user in the user directory.
200 for user_id, profile in users_with_profile.items():
201 yield self.update_profile_in_user_dir(
202 user_id, profile.display_name, profile.avatar_url
203 )
204
205 to_insert = set()
206
207 if is_public:
208 for user_id in user_ids:
209 if self.get_if_app_services_interested_in_user(user_id):
210 continue
211
212 to_insert.add(user_id)
213
214 if to_insert:
215 yield self.add_users_in_public_rooms(room_id, to_insert)
216 to_insert.clear()
217 else:
218 for user_id in user_ids:
219 if not self.hs.is_mine_id(user_id):
220 continue
221
222 if self.get_if_app_services_interested_in_user(user_id):
223 continue
224
225 for other_user_id in user_ids:
226 if user_id == other_user_id:
227 continue
228
229 user_set = (user_id, other_user_id)
230 to_insert.add(user_set)
231
232 # If it gets too big, stop and write to the database
233 # to prevent storing too much in RAM.
234 if len(to_insert) >= self.SHARE_PRIVATE_WORKING_SET:
235 yield self.add_users_who_share_private_room(
236 room_id, to_insert
237 )
238 to_insert.clear()
239
240 if to_insert:
241 yield self.add_users_who_share_private_room(room_id, to_insert)
242 to_insert.clear()
243
244 # We've finished a room. Delete it from the table.
245 yield self._simple_delete_one(TEMP_TABLE + "_rooms", {"room_id": room_id})
246 # Update the remaining counter.
247 progress["remaining"] -= 1
248 yield self.runInteraction(
249 "populate_user_directory",
250 self._background_update_progress_txn,
251 "populate_user_directory_process_rooms",
252 progress,
253 )
254
255 processed_event_count += event_count
256
257 if processed_event_count > batch_size:
258 # Don't process any more rooms, we've hit our batch size.
259 defer.returnValue(processed_event_count)
260
261 defer.returnValue(processed_event_count)
262
263 @defer.inlineCallbacks
264 def _populate_user_directory_process_users(self, progress, batch_size):
265 """
266 If search_all_users is enabled, add all of the users to the user directory.
267 """
268 if not self.hs.config.user_directory_search_all_users:
269 yield self._end_background_update("populate_user_directory_process_users")
270 defer.returnValue(1)
271
272 def _get_next_batch(txn):
273 sql = "SELECT user_id FROM %s LIMIT %s" % (
274 TEMP_TABLE + "_users",
275 str(batch_size),
276 )
277 txn.execute(sql)
278 users_to_work_on = txn.fetchall()
279
280 if not users_to_work_on:
281 return None
282
283 users_to_work_on = [x[0] for x in users_to_work_on]
284
285 # Get how many are left to process, so we can give status on how
286 # far we are in processing
287 sql = "SELECT COUNT(*) FROM " + TEMP_TABLE + "_users"
288 txn.execute(sql)
289 progress["remaining"] = txn.fetchone()[0]
290
291 return users_to_work_on
292
293 users_to_work_on = yield self.runInteraction(
294 "populate_user_directory_temp_read", _get_next_batch
295 )
296
297 # No more users -- complete the transaction.
298 if not users_to_work_on:
299 yield self._end_background_update("populate_user_directory_process_users")
300 defer.returnValue(1)
301
302 logger.info(
303 "Processing the next %d users of %d remaining"
304 % (len(users_to_work_on), progress["remaining"])
305 )
306
307 for user_id in users_to_work_on:
308 profile = yield self.get_profileinfo(get_localpart_from_id(user_id))
309 yield self.update_profile_in_user_dir(
310 user_id, profile.display_name, profile.avatar_url
311 )
312
313 # We've finished processing a user. Delete it from the table.
314 yield self._simple_delete_one(TEMP_TABLE + "_users", {"user_id": user_id})
315 # Update the remaining counter.
316 progress["remaining"] -= 1
317 yield self.runInteraction(
318 "populate_user_directory",
319 self._background_update_progress_txn,
320 "populate_user_directory_process_users",
321 progress,
322 )
323
324 defer.returnValue(len(users_to_work_on))
325
34326 @defer.inlineCallbacks
35327 def is_room_world_readable_or_publicly_joinable(self, room_id):
36328 """Check if the room is either world_readable or publically joinable
62354
63355 defer.returnValue(False)
64356
65 @defer.inlineCallbacks
66 def add_users_to_public_room(self, room_id, user_ids):
67 """Add user to the list of users in public rooms
68
69 Args:
70 room_id (str): A room_id that all users are in that is world_readable
71 or publically joinable
72 user_ids (list(str)): Users to add
73 """
74 yield self._simple_insert_many(
75 table="users_in_public_rooms",
76 values=[{"user_id": user_id, "room_id": room_id} for user_id in user_ids],
77 desc="add_users_to_public_room",
78 )
79 for user_id in user_ids:
80 self.get_user_in_public_room.invalidate((user_id,))
81
82 def add_profiles_to_user_dir(self, room_id, users_with_profile):
83 """Add profiles to the user directory
84
85 Args:
86 room_id (str): A room_id that all users are joined to
87 users_with_profile (dict): Users to add to directory in the form of
88 mapping of user_id -> ProfileInfo
89 """
90 if isinstance(self.database_engine, PostgresEngine):
91 # We weight the loclpart most highly, then display name and finally
92 # server name
93 sql = """
94 INSERT INTO user_directory_search(user_id, vector)
95 VALUES (?,
96 setweight(to_tsvector('english', ?), 'A')
97 || setweight(to_tsvector('english', ?), 'D')
98 || setweight(to_tsvector('english', COALESCE(?, '')), 'B')
99 )
100 """
101 args = (
102 (
103 user_id,
104 get_localpart_from_id(user_id),
105 get_domain_from_id(user_id),
106 profile.display_name,
107 )
108 for user_id, profile in iteritems(users_with_profile)
109 )
110 elif isinstance(self.database_engine, Sqlite3Engine):
111 sql = """
112 INSERT INTO user_directory_search(user_id, value)
113 VALUES (?,?)
114 """
115 args = (
116 (
117 user_id,
118 "%s %s" % (user_id, p.display_name) if p.display_name else user_id,
119 )
120 for user_id, p in iteritems(users_with_profile)
121 )
122 else:
123 # This should be unreachable.
124 raise Exception("Unrecognized database engine")
125
126 def _add_profiles_to_user_dir_txn(txn):
127 txn.executemany(sql, args)
128 self._simple_insert_many_txn(
129 txn,
130 table="user_directory",
131 values=[
132 {
133 "user_id": user_id,
134 "room_id": room_id,
135 "display_name": profile.display_name,
136 "avatar_url": profile.avatar_url,
137 }
138 for user_id, profile in iteritems(users_with_profile)
139 ],
140 )
141 for user_id in users_with_profile:
142 txn.call_after(self.get_user_in_directory.invalidate, (user_id,))
143
144 return self.runInteraction(
145 "add_profiles_to_user_dir", _add_profiles_to_user_dir_txn
146 )
147
148 @defer.inlineCallbacks
149 def update_user_in_user_dir(self, user_id, room_id):
150 yield self._simple_update_one(
151 table="user_directory",
152 keyvalues={"user_id": user_id},
153 updatevalues={"room_id": room_id},
154 desc="update_user_in_user_dir",
155 )
156 self.get_user_in_directory.invalidate((user_id,))
157
158 def update_profile_in_user_dir(self, user_id, display_name, avatar_url, room_id):
357 def update_profile_in_user_dir(self, user_id, display_name, avatar_url):
358 """
359 Update or add a user's profile in the user directory.
360 """
361
159362 def _update_profile_in_user_dir_txn(txn):
160363 new_entry = self._simple_upsert_txn(
161364 txn,
162365 table="user_directory",
163366 keyvalues={"user_id": user_id},
164 insertion_values={"room_id": room_id},
165367 values={"display_name": display_name, "avatar_url": avatar_url},
166368 lock=False, # We're only inserter
167369 )
249451 "update_profile_in_user_dir", _update_profile_in_user_dir_txn
250452 )
251453
252 @defer.inlineCallbacks
253 def update_user_in_public_user_list(self, user_id, room_id):
254 yield self._simple_update_one(
255 table="users_in_public_rooms",
256 keyvalues={"user_id": user_id},
257 updatevalues={"room_id": room_id},
258 desc="update_user_in_public_user_list",
259 )
260 self.get_user_in_public_room.invalidate((user_id,))
261
262454 def remove_from_user_dir(self, user_id):
263455 def _remove_from_user_dir_txn(txn):
264456 self._simple_delete_txn(
270462 self._simple_delete_txn(
271463 txn, table="users_in_public_rooms", keyvalues={"user_id": user_id}
272464 )
465 self._simple_delete_txn(
466 txn,
467 table="users_who_share_private_rooms",
468 keyvalues={"user_id": user_id},
469 )
470 self._simple_delete_txn(
471 txn,
472 table="users_who_share_private_rooms",
473 keyvalues={"other_user_id": user_id},
474 )
273475 txn.call_after(self.get_user_in_directory.invalidate, (user_id,))
274 txn.call_after(self.get_user_in_public_room.invalidate, (user_id,))
275476
276477 return self.runInteraction("remove_from_user_dir", _remove_from_user_dir_txn)
277
278 @defer.inlineCallbacks
279 def remove_from_user_in_public_room(self, user_id):
280 yield self._simple_delete(
281 table="users_in_public_rooms",
282 keyvalues={"user_id": user_id},
283 desc="remove_from_user_in_public_room",
284 )
285 self.get_user_in_public_room.invalidate((user_id,))
286
287 def get_users_in_public_due_to_room(self, room_id):
288 """Get all user_ids that are in the room directory because they're
289 in the given room_id
290 """
291 return self._simple_select_onecol(
292 table="users_in_public_rooms",
293 keyvalues={"room_id": room_id},
294 retcol="user_id",
295 desc="get_users_in_public_due_to_room",
296 )
297478
298479 @defer.inlineCallbacks
299480 def get_users_in_dir_due_to_room(self, room_id):
300481 """Get all user_ids that are in the room directory because they're
301482 in the given room_id
302483 """
303 user_ids_dir = yield self._simple_select_onecol(
304 table="user_directory",
305 keyvalues={"room_id": room_id},
306 retcol="user_id",
307 desc="get_users_in_dir_due_to_room",
308 )
309
310 user_ids_pub = yield self._simple_select_onecol(
484 user_ids_share_pub = yield self._simple_select_onecol(
311485 table="users_in_public_rooms",
312486 keyvalues={"room_id": room_id},
313487 retcol="user_id",
314488 desc="get_users_in_dir_due_to_room",
315489 )
316490
317 user_ids_share = yield self._simple_select_onecol(
318 table="users_who_share_rooms",
491 user_ids_share_priv = yield self._simple_select_onecol(
492 table="users_who_share_private_rooms",
319493 keyvalues={"room_id": room_id},
320 retcol="user_id",
494 retcol="other_user_id",
321495 desc="get_users_in_dir_due_to_room",
322496 )
323497
324 user_ids = set(user_ids_dir)
325 user_ids.update(user_ids_pub)
326 user_ids.update(user_ids_share)
498 user_ids = set(user_ids_share_pub)
499 user_ids.update(user_ids_share_priv)
327500
328501 defer.returnValue(user_ids)
329502
330 @defer.inlineCallbacks
331 def get_all_rooms(self):
332 """Get all room_ids we've ever known about, in ascending order of "size"
333 """
334 sql = """
335 SELECT room_id FROM current_state_events
336 GROUP BY room_id
337 ORDER BY count(*) ASC
338 """
339 rows = yield self._execute("get_all_rooms", None, sql)
340 defer.returnValue([room_id for room_id, in rows])
341
342 @defer.inlineCallbacks
343 def get_all_local_users(self):
344 """Get all local users
345 """
346 sql = """
347 SELECT name FROM users
348 """
349 rows = yield self._execute("get_all_local_users", None, sql)
350 defer.returnValue([name for name, in rows])
351
352 def add_users_who_share_room(self, room_id, share_private, user_id_tuples):
353 """Insert entries into the users_who_share_rooms table. The first
503 def add_users_who_share_private_room(self, room_id, user_id_tuples):
504 """Insert entries into the users_who_share_private_rooms table. The first
354505 user should be a local user.
355506
356507 Args:
357508 room_id (str)
358 share_private (bool): Is the room private
359509 user_id_tuples([(str, str)]): iterable of 2-tuple of user IDs.
360510 """
361511
362512 def _add_users_who_share_room_txn(txn):
363 self._simple_insert_many_txn(
513 self._simple_upsert_many_txn(
364514 txn,
365 table="users_who_share_rooms",
366 values=[
367 {
368 "user_id": user_id,
369 "other_user_id": other_user_id,
370 "room_id": room_id,
371 "share_private": share_private,
372 }
515 table="users_who_share_private_rooms",
516 key_names=["user_id", "other_user_id", "room_id"],
517 key_values=[
518 (user_id, other_user_id, room_id)
373519 for user_id, other_user_id in user_id_tuples
374520 ],
375 )
376 for user_id, other_user_id in user_id_tuples:
377 txn.call_after(
378 self.get_users_who_share_room_from_dir.invalidate, (user_id,)
379 )
380 txn.call_after(
381 self.get_if_users_share_a_room.invalidate, (user_id, other_user_id)
382 )
521 value_names=(),
522 value_values=None,
523 )
383524
384525 return self.runInteraction(
385526 "add_users_who_share_room", _add_users_who_share_room_txn
386527 )
387528
388 def update_users_who_share_room(self, room_id, share_private, user_id_sets):
389 """Updates entries in the users_who_share_rooms table. The first
529 def add_users_in_public_rooms(self, room_id, user_ids):
530 """Insert entries into the users_who_share_private_rooms table. The first
390531 user should be a local user.
391532
392533 Args:
393534 room_id (str)
394 share_private (bool): Is the room private
395 user_id_tuples([(str, str)]): iterable of 2-tuple of user IDs.
396 """
397
398 def _update_users_who_share_room_txn(txn):
399 sql = """
400 UPDATE users_who_share_rooms
401 SET room_id = ?, share_private = ?
402 WHERE user_id = ? AND other_user_id = ?
403 """
404 txn.executemany(
405 sql, ((room_id, share_private, uid, oid) for uid, oid in user_id_sets)
406 )
407 for user_id, other_user_id in user_id_sets:
408 txn.call_after(
409 self.get_users_who_share_room_from_dir.invalidate, (user_id,)
410 )
411 txn.call_after(
412 self.get_if_users_share_a_room.invalidate, (user_id, other_user_id)
413 )
535 user_ids (list[str])
536 """
537
538 def _add_users_in_public_rooms_txn(txn):
539
540 self._simple_upsert_many_txn(
541 txn,
542 table="users_in_public_rooms",
543 key_names=["user_id", "room_id"],
544 key_values=[(user_id, room_id) for user_id in user_ids],
545 value_names=(),
546 value_values=None,
547 )
414548
415549 return self.runInteraction(
416 "update_users_who_share_room", _update_users_who_share_room_txn
417 )
418
419 def remove_user_who_share_room(self, user_id, other_user_id):
420 """Deletes entries in the users_who_share_rooms table. The first
550 "add_users_in_public_rooms", _add_users_in_public_rooms_txn
551 )
552
553 def remove_user_who_share_room(self, user_id, room_id):
554 """
555 Deletes entries in the users_who_share_*_rooms table. The first
421556 user should be a local user.
422557
423558 Args:
559 user_id (str)
424560 room_id (str)
425 share_private (bool): Is the room private
426 user_id_tuples([(str, str)]): iterable of 2-tuple of user IDs.
427561 """
428562
429563 def _remove_user_who_share_room_txn(txn):
430564 self._simple_delete_txn(
431565 txn,
432 table="users_who_share_rooms",
433 keyvalues={"user_id": user_id, "other_user_id": other_user_id},
434 )
435 txn.call_after(
436 self.get_users_who_share_room_from_dir.invalidate, (user_id,)
437 )
438 txn.call_after(
439 self.get_if_users_share_a_room.invalidate, (user_id, other_user_id)
566 table="users_who_share_private_rooms",
567 keyvalues={"user_id": user_id, "room_id": room_id},
568 )
569 self._simple_delete_txn(
570 txn,
571 table="users_who_share_private_rooms",
572 keyvalues={"other_user_id": user_id, "room_id": room_id},
573 )
574 self._simple_delete_txn(
575 txn,
576 table="users_in_public_rooms",
577 keyvalues={"user_id": user_id, "room_id": room_id},
440578 )
441579
442580 return self.runInteraction(
443581 "remove_user_who_share_room", _remove_user_who_share_room_txn
444582 )
445583
446 @cached(max_entries=500000)
447 def get_if_users_share_a_room(self, user_id, other_user_id):
448 """Gets if users share a room.
449
450 Args:
451 user_id (str): Must be a local user_id
452 other_user_id (str)
453
454 Returns:
455 bool|None: None if they don't share a room, otherwise whether they
456 share a private room or not.
457 """
458 return self._simple_select_one_onecol(
459 table="users_who_share_rooms",
460 keyvalues={"user_id": user_id, "other_user_id": other_user_id},
461 retcol="share_private",
462 allow_none=True,
463 desc="get_if_users_share_a_room",
464 )
465
466 @cachedInlineCallbacks(max_entries=500000, iterable=True)
467 def get_users_who_share_room_from_dir(self, user_id):
468 """Returns the set of users who share a room with `user_id`
584 @defer.inlineCallbacks
585 def get_user_dir_rooms_user_is_in(self, user_id):
586 """
587 Returns the rooms that a user is in.
469588
470589 Args:
471590 user_id(str): Must be a local user
472591
473592 Returns:
474 dict: user_id -> share_private mapping
475 """
476 rows = yield self._simple_select_list(
477 table="users_who_share_rooms",
593 list: user_id
594 """
595 rows = yield self._simple_select_onecol(
596 table="users_who_share_private_rooms",
478597 keyvalues={"user_id": user_id},
479 retcols=("other_user_id", "share_private"),
480 desc="get_users_who_share_room_with_user",
481 )
482
483 defer.returnValue({row["other_user_id"]: row["share_private"] for row in rows})
484
485 def get_users_in_share_dir_with_room_id(self, user_id, room_id):
486 """Get all user tuples that are in the users_who_share_rooms due to the
487 given room_id.
488
489 Returns:
490 [(user_id, other_user_id)]: where one of the two will match the given
491 user_id.
492 """
493 sql = """
494 SELECT user_id, other_user_id FROM users_who_share_rooms
495 WHERE room_id = ? AND (user_id = ? OR other_user_id = ?)
496 """
497 return self._execute(
498 "get_users_in_share_dir_with_room_id", None, sql, room_id, user_id, user_id
499 )
598 retcol="room_id",
599 desc="get_rooms_user_is_in",
600 )
601
602 pub_rows = yield self._simple_select_onecol(
603 table="users_in_public_rooms",
604 keyvalues={"user_id": user_id},
605 retcol="room_id",
606 desc="get_rooms_user_is_in",
607 )
608
609 users = set(pub_rows)
610 users.update(rows)
611 defer.returnValue(list(users))
500612
501613 @defer.inlineCallbacks
502614 def get_rooms_in_common_for_users(self, user_id, other_user_id):
532644 txn.execute("DELETE FROM user_directory")
533645 txn.execute("DELETE FROM user_directory_search")
534646 txn.execute("DELETE FROM users_in_public_rooms")
535 txn.execute("DELETE FROM users_who_share_rooms")
647 txn.execute("DELETE FROM users_who_share_private_rooms")
536648 txn.call_after(self.get_user_in_directory.invalidate_all)
537 txn.call_after(self.get_user_in_public_room.invalidate_all)
538 txn.call_after(self.get_users_who_share_room_from_dir.invalidate_all)
539 txn.call_after(self.get_if_users_share_a_room.invalidate_all)
540649
541650 return self.runInteraction(
542651 "delete_all_from_user_dir", _delete_all_from_user_dir_txn
547656 return self._simple_select_one(
548657 table="user_directory",
549658 keyvalues={"user_id": user_id},
550 retcols=("room_id", "display_name", "avatar_url"),
659 retcols=("display_name", "avatar_url"),
551660 allow_none=True,
552661 desc="get_user_in_directory",
553 )
554
555 @cached()
556 def get_user_in_public_room(self, user_id):
557 return self._simple_select_one(
558 table="users_in_public_rooms",
559 keyvalues={"user_id": user_id},
560 retcols=("room_id",),
561 allow_none=True,
562 desc="get_user_in_public_room",
563662 )
564663
565664 def get_user_directory_stream_pos(self):
576675 keyvalues={},
577676 updatevalues={"stream_id": stream_id},
578677 desc="update_user_directory_stream_pos",
579 )
580
581 def get_current_state_deltas(self, prev_stream_id):
582 prev_stream_id = int(prev_stream_id)
583 if not self._curr_state_delta_stream_cache.has_any_entity_changed(
584 prev_stream_id
585 ):
586 return []
587
588 def get_current_state_deltas_txn(txn):
589 # First we calculate the max stream id that will give us less than
590 # N results.
591 # We arbitarily limit to 100 stream_id entries to ensure we don't
592 # select toooo many.
593 sql = """
594 SELECT stream_id, count(*)
595 FROM current_state_delta_stream
596 WHERE stream_id > ?
597 GROUP BY stream_id
598 ORDER BY stream_id ASC
599 LIMIT 100
600 """
601 txn.execute(sql, (prev_stream_id,))
602
603 total = 0
604 max_stream_id = prev_stream_id
605 for max_stream_id, count in txn:
606 total += count
607 if total > 100:
608 # We arbitarily limit to 100 entries to ensure we don't
609 # select toooo many.
610 break
611
612 # Now actually get the deltas
613 sql = """
614 SELECT stream_id, room_id, type, state_key, event_id, prev_event_id
615 FROM current_state_delta_stream
616 WHERE ? < stream_id AND stream_id <= ?
617 ORDER BY stream_id ASC
618 """
619 txn.execute(sql, (prev_stream_id, max_stream_id))
620 return self.cursor_to_dict(txn)
621
622 return self.runInteraction(
623 "get_current_state_deltas", get_current_state_deltas_txn
624 )
625
626 def get_max_stream_id_in_current_state_deltas(self):
627 return self._simple_select_one_onecol(
628 table="current_state_delta_stream",
629 keyvalues={},
630 retcol="COALESCE(MAX(stream_id), -1)",
631 desc="get_max_stream_id_in_current_state_deltas",
632678 )
633679
634680 @defer.inlineCallbacks
651697 """
652698
653699 if self.hs.config.user_directory_search_all_users:
654 # make s.user_id null to keep the ordering algorithm happy
655 join_clause = """
656 CROSS JOIN (SELECT NULL as user_id) AS s
700 join_args = (user_id,)
701 where_clause = "user_id != ?"
702 else:
703 join_args = (user_id,)
704 where_clause = """
705 (
706 EXISTS (select 1 from users_in_public_rooms WHERE user_id = t.user_id)
707 OR EXISTS (
708 SELECT 1 FROM users_who_share_private_rooms
709 WHERE user_id = ? AND other_user_id = t.user_id
710 )
711 )
657712 """
658 join_args = ()
659 where_clause = "1=1"
660 else:
661 join_clause = """
662 LEFT JOIN users_in_public_rooms AS p USING (user_id)
663 LEFT JOIN (
664 SELECT other_user_id AS user_id FROM users_who_share_rooms
665 WHERE user_id = ? AND share_private
666 ) AS s USING (user_id)
667 """
668 join_args = (user_id,)
669 where_clause = "(s.user_id IS NOT NULL OR p.user_id IS NOT NULL)"
670713
671714 if isinstance(self.database_engine, PostgresEngine):
672715 full_query, exact_query, prefix_query = _parse_query_postgres(search_term)
678721 # search: (domain, _, display name, localpart)
679722 sql = """
680723 SELECT d.user_id AS user_id, display_name, avatar_url
681 FROM user_directory_search
724 FROM user_directory_search as t
682725 INNER JOIN user_directory AS d USING (user_id)
683 %s
684726 WHERE
685727 %s
686728 AND vector @@ to_tsquery('english', ?)
687729 ORDER BY
688 (CASE WHEN s.user_id IS NOT NULL THEN 4.0 ELSE 1.0 END)
730 (CASE WHEN d.user_id IS NOT NULL THEN 4.0 ELSE 1.0 END)
689731 * (CASE WHEN display_name IS NOT NULL THEN 1.2 ELSE 1.0 END)
690732 * (CASE WHEN avatar_url IS NOT NULL THEN 1.2 ELSE 1.0 END)
691733 * (
707749 avatar_url IS NULL
708750 LIMIT ?
709751 """ % (
710 join_clause,
711752 where_clause,
712753 )
713754 args = join_args + (full_query, exact_query, prefix_query, limit + 1)
716757
717758 sql = """
718759 SELECT d.user_id AS user_id, display_name, avatar_url
719 FROM user_directory_search
760 FROM user_directory_search as t
720761 INNER JOIN user_directory AS d USING (user_id)
721 %s
722762 WHERE
723763 %s
724764 AND value MATCH ?
728768 avatar_url IS NULL
729769 LIMIT ?
730770 """ % (
731 join_clause,
732771 where_clause,
733772 )
734773 args = join_args + (search_query, limit + 1)
1515 import string
1616 from collections import namedtuple
1717
18 import attr
19
1820 from synapse.api.errors import SynapseError
1921
2022
454456 @classmethod
455457 def create(cls, appservice_id, network_id,):
456458 return cls(appservice_id=appservice_id, network_id=network_id)
459
460
461 @attr.s(slots=True)
462 class ReadReceipt(object):
463 """Information about a read-receipt"""
464 room_id = attr.ib()
465 receipt_type = attr.ib()
466 user_id = attr.ib()
467 event_ids = attr.ib()
468 data = attr.ib()
2323 string.digits + string.ascii_letters + ".,;:^&*-_+=#~@"
2424 )
2525
26 # random_string and random_string_with_symbols are used for a range of things,
27 # some cryptographically important, some less so. We use SystemRandom to make sure
28 # we get cryptographically-secure randoms.
29 rand = random.SystemRandom()
30
2631
2732 def random_string(length):
28 return ''.join(random.choice(string.ascii_letters) for _ in range(length))
33 return ''.join(rand.choice(string.ascii_letters) for _ in range(length))
2934
3035
3136 def random_string_with_symbols(length):
3237 return ''.join(
33 random.choice(_string_with_symbols) for _ in range(length)
38 rand.choice(_string_with_symbols) for _ in range(length)
3439 )
3540
3641
6666 Returns:
6767 Deferred[list[synapse.events.EventBase]]
6868 """
69 # Filter out events that have been soft failed so that we don't relay them
70 # to clients.
71 events = list(e for e in events if not e.internal_metadata.is_soft_failed())
72
6973 types = (
7074 (EventTypes.RoomHistoryVisibility, ""),
7175 (EventTypes.Member, user_id),
215219
216220
217221 @defer.inlineCallbacks
218 def filter_events_for_server(store, server_name, events):
219 # Whatever else we do, we need to check for senders which have requested
220 # erasure of their data.
221 erased_senders = yield store.are_users_erased(
222 (e.sender for e in events),
223 )
224
225 def redact_disallowed(event, state):
226 # if the sender has been gdpr17ed, always return a redacted
227 # copy of the event.
228 if erased_senders[event.sender]:
222 def filter_events_for_server(store, server_name, events, redact=True,
223 check_history_visibility_only=False):
224 """Filter a list of events based on whether given server is allowed to
225 see them.
226
227 Args:
228 store (DataStore)
229 server_name (str)
230 events (iterable[FrozenEvent])
231 redact (bool): Whether to return a redacted version of the event, or
232 to filter them out entirely.
233 check_history_visibility_only (bool): Whether to only check the
234 history visibility, rather than things like if the sender has been
235 erased. This is used e.g. during pagination to decide whether to
236 backfill or not.
237
238 Returns
239 Deferred[list[FrozenEvent]]
240 """
241
242 def is_sender_erased(event, erased_senders):
243 if erased_senders and erased_senders[event.sender]:
229244 logger.info(
230245 "Sender of %s has been erased, redacting",
231246 event.event_id,
232247 )
233 return prune_event(event)
234
235 # state will be None if we decided we didn't need to filter by
236 # room membership.
237 if not state:
238 return event
239
248 return True
249 return False
250
251 def check_event_is_visible(event, state):
240252 history = state.get((EventTypes.RoomHistoryVisibility, ''), None)
241253 if history:
242254 visibility = history.content.get("history_visibility", "shared")
258270
259271 memtype = ev.membership
260272 if memtype == Membership.JOIN:
261 return event
273 return True
262274 elif memtype == Membership.INVITE:
263275 if visibility == "invited":
264 return event
276 return True
265277 else:
266278 # server has no users in the room: redact
267 return prune_event(event)
268
269 return event
270
271 # Next lets check to see if all the events have a history visibility
279 return False
280
281 return True
282
283 # Lets check to see if all the events have a history visibility
272284 # of "shared" or "world_readable". If thats the case then we don't
273285 # need to check membership (as we know the server is in the room).
274286 event_to_state_ids = yield store.get_state_ids_for_events(
295307 for e in itervalues(event_map)
296308 )
297309
310 if not check_history_visibility_only:
311 erased_senders = yield store.are_users_erased(
312 (e.sender for e in events),
313 )
314 else:
315 # We don't want to check whether users are erased, which is equivalent
316 # to no users having been erased.
317 erased_senders = {}
318
298319 if all_open:
299320 # all the history_visibility state affecting these events is open, so
300321 # we don't need to filter by membership state. We *do* need to check
301322 # for user erasure, though.
302323 if erased_senders:
303 events = [
304 redact_disallowed(e, None)
305 for e in events
306 ]
307
324 to_return = []
325 for e in events:
326 if not is_sender_erased(e, erased_senders):
327 to_return.append(e)
328 elif redact:
329 to_return.append(prune_event(e))
330
331 defer.returnValue(to_return)
332
333 # If there are no erased users then we can just return the given list
334 # of events without having to copy it.
308335 defer.returnValue(events)
309336
310337 # Ok, so we're dealing with events that have non-trivial visibility
360387 for e_id, key_to_eid in iteritems(event_to_state_ids)
361388 }
362389
363 defer.returnValue([
364 redact_disallowed(e, event_to_state[e.event_id])
365 for e in events
366 ])
390 to_return = []
391 for e in events:
392 erased = is_sender_erased(e, erased_senders)
393 visible = check_event_is_visible(e, event_to_state[e.event_id])
394 if visible and not erased:
395 to_return.append(e)
396 elif redact:
397 to_return.append(prune_event(e))
398
399 defer.returnValue(to_return)
163163 sys.exit(1)
164164
165165 with open(configfile) as stream:
166 config = yaml.load(stream)
166 config = yaml.safe_load(stream)
167167
168168 pidfile = config["pid_file"]
169169 cache_factor = config.get("synctl_cache_factor")
205205 workers = []
206206 for worker_configfile in worker_configfiles:
207207 with open(worker_configfile) as stream:
208 worker_config = yaml.load(stream)
208 worker_config = yaml.safe_load(stream)
209209 worker_app = worker_config["worker_app"]
210210 if worker_app == "synapse.app.homeserver":
211211 # We need to special case all of this to pick up options that may
344344 self.assertEquals(e.exception.code, 403)
345345
346346 @defer.inlineCallbacks
347 def test_hs_disabled_no_server_notices_user(self):
348 """Check that 'hs_disabled_message' works correctly when there is no
349 server_notices user.
350 """
351 # this should be the default, but we had a bug where the test was doing the wrong
352 # thing, so let's make it explicit
353 self.hs.config.server_notices_mxid = None
354
355 self.hs.config.hs_disabled = True
356 self.hs.config.hs_disabled_message = "Reason for being disabled"
357 with self.assertRaises(ResourceLimitError) as e:
358 yield self.auth.check_auth_blocking()
359 self.assertEquals(e.exception.admin_contact, self.hs.config.admin_contact)
360 self.assertEquals(e.exception.errcode, Codes.RESOURCE_LIMIT_EXCEEDED)
361 self.assertEquals(e.exception.code, 403)
362
363 @defer.inlineCallbacks
347364 def test_server_notices_mxid_special_cased(self):
348365 self.hs.config.hs_disabled = True
349366 user = "@user:server"
55 class TestRatelimiter(unittest.TestCase):
66 def test_allowed(self):
77 limiter = Ratelimiter()
8 allowed, time_allowed = limiter.send_message(
9 user_id="test_id", time_now_s=0, msg_rate_hz=0.1, burst_count=1
8 allowed, time_allowed = limiter.can_do_action(
9 key="test_id", time_now_s=0, rate_hz=0.1, burst_count=1
1010 )
1111 self.assertTrue(allowed)
1212 self.assertEquals(10., time_allowed)
1313
14 allowed, time_allowed = limiter.send_message(
15 user_id="test_id", time_now_s=5, msg_rate_hz=0.1, burst_count=1
14 allowed, time_allowed = limiter.can_do_action(
15 key="test_id", time_now_s=5, rate_hz=0.1, burst_count=1
1616 )
1717 self.assertFalse(allowed)
1818 self.assertEquals(10., time_allowed)
1919
20 allowed, time_allowed = limiter.send_message(
21 user_id="test_id", time_now_s=10, msg_rate_hz=0.1, burst_count=1
20 allowed, time_allowed = limiter.can_do_action(
21 key="test_id", time_now_s=10, rate_hz=0.1, burst_count=1
2222 )
2323 self.assertTrue(allowed)
2424 self.assertEquals(20., time_allowed)
2525
2626 def test_pruning(self):
2727 limiter = Ratelimiter()
28 allowed, time_allowed = limiter.send_message(
29 user_id="test_id_1", time_now_s=0, msg_rate_hz=0.1, burst_count=1
28 allowed, time_allowed = limiter.can_do_action(
29 key="test_id_1", time_now_s=0, rate_hz=0.1, burst_count=1
3030 )
3131
3232 self.assertIn("test_id_1", limiter.message_counts)
3333
34 allowed, time_allowed = limiter.send_message(
35 user_id="test_id_2", time_now_s=10, msg_rate_hz=0.1, burst_count=1
34 allowed, time_allowed = limiter.can_do_action(
35 key="test_id_2", time_now_s=10, rate_hz=0.1, burst_count=1
3636 )
3737
3838 self.assertNotIn("test_id_1", limiter.message_counts)
4242 self.generate_config()
4343
4444 with open(self.file, "r") as f:
45 raw = yaml.load(f)
45 raw = yaml.safe_load(f)
4646 self.assertIn("macaroon_secret_key", raw)
4747
4848 config = HomeServerConfig.load_config("", ["-c", self.file])
2121
2222 class RoomDirectoryConfigTestCase(unittest.TestCase):
2323 def test_alias_creation_acl(self):
24 config = yaml.load("""
24 config = yaml.safe_load("""
2525 alias_creation_rules:
2626 - user_id: "*bob*"
2727 alias: "*"
7373 ))
7474
7575 def test_room_publish_acl(self):
76 config = yaml.load("""
76 config = yaml.safe_load("""
7777 alias_creation_rules: []
7878
7979 room_list_publication_rules:
0 # -*- coding: utf-8 -*-
1 # Copyright 2019 New Vector Ltd
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from mock import Mock
16
17 from twisted.internet import defer
18
19 from synapse.types import ReadReceipt
20
21 from tests.unittest import HomeserverTestCase
22
23
24 class FederationSenderTestCases(HomeserverTestCase):
25 def make_homeserver(self, reactor, clock):
26 return super(FederationSenderTestCases, self).setup_test_homeserver(
27 state_handler=Mock(spec=["get_current_hosts_in_room"]),
28 federation_transport_client=Mock(spec=["send_transaction"]),
29 )
30
31 def test_send_receipts(self):
32 mock_state_handler = self.hs.get_state_handler()
33 mock_state_handler.get_current_hosts_in_room.return_value = ["test", "host2"]
34
35 mock_send_transaction = self.hs.get_federation_transport_client().send_transaction
36 mock_send_transaction.return_value = defer.succeed({})
37
38 sender = self.hs.get_federation_sender()
39 receipt = ReadReceipt("room_id", "m.read", "user_id", ["event_id"], {"ts": 1234})
40 self.successResultOf(sender.send_read_receipt(receipt))
41
42 self.pump()
43
44 # expect a call to send_transaction
45 mock_send_transaction.assert_called_once()
46 json_cb = mock_send_transaction.call_args[0][1]
47 data = json_cb()
48 self.assertEqual(data['edus'], [
49 {
50 'edu_type': 'm.receipt',
51 'content': {
52 'room_id': {
53 'm.read': {
54 'user_id': {
55 'event_ids': ['event_id'],
56 'data': {'ts': 1234},
57 },
58 },
59 },
60 },
61 },
62 ])
63
64 def test_send_receipts_with_backoff(self):
65 """Send two receipts in quick succession; the second should be flushed, but
66 only after 20ms"""
67 mock_state_handler = self.hs.get_state_handler()
68 mock_state_handler.get_current_hosts_in_room.return_value = ["test", "host2"]
69
70 mock_send_transaction = self.hs.get_federation_transport_client().send_transaction
71 mock_send_transaction.return_value = defer.succeed({})
72
73 sender = self.hs.get_federation_sender()
74 receipt = ReadReceipt("room_id", "m.read", "user_id", ["event_id"], {"ts": 1234})
75 self.successResultOf(sender.send_read_receipt(receipt))
76
77 self.pump()
78
79 # expect a call to send_transaction
80 mock_send_transaction.assert_called_once()
81 json_cb = mock_send_transaction.call_args[0][1]
82 data = json_cb()
83 self.assertEqual(data['edus'], [
84 {
85 'edu_type': 'm.receipt',
86 'content': {
87 'room_id': {
88 'm.read': {
89 'user_id': {
90 'event_ids': ['event_id'],
91 'data': {'ts': 1234},
92 },
93 },
94 },
95 },
96 },
97 ])
98 mock_send_transaction.reset_mock()
99
100 # send the second RR
101 receipt = ReadReceipt("room_id", "m.read", "user_id", ["other_id"], {"ts": 1234})
102 self.successResultOf(sender.send_read_receipt(receipt))
103 self.pump()
104 mock_send_transaction.assert_not_called()
105
106 self.reactor.advance(19)
107 mock_send_transaction.assert_not_called()
108
109 self.reactor.advance(10)
110 mock_send_transaction.assert_called_once()
111 json_cb = mock_send_transaction.call_args[0][1]
112 data = json_cb()
113 self.assertEqual(data['edus'], [
114 {
115 'edu_type': 'm.receipt',
116 'content': {
117 'room_id': {
118 'm.read': {
119 'user_id': {
120 'event_ids': ['other_id'],
121 'data': {'ts': 1234},
122 },
123 },
124 },
125 },
126 },
127 ])
110110
111111 servlets = [directory.register_servlets, room.register_servlets]
112112
113 def prepare(self, hs, reactor, clock):
113 def prepare(self, reactor, clock, hs):
114114 # We cheekily override the config to add custom alias creation rules
115115 config = {}
116116 config["alias_creation_rules"] = [
150150 )
151151 self.render(request)
152152 self.assertEquals(200, channel.code, channel.result)
153
154
155 class TestRoomListSearchDisabled(unittest.HomeserverTestCase):
156 user_id = "@test:test"
157
158 servlets = [directory.register_servlets, room.register_servlets]
159
160 def prepare(self, reactor, clock, hs):
161 room_id = self.helper.create_room_as(self.user_id)
162
163 request, channel = self.make_request(
164 "PUT",
165 b"directory/list/room/%s" % (room_id.encode('ascii'),),
166 b'{}',
167 )
168 self.render(request)
169 self.assertEquals(200, channel.code, channel.result)
170
171 self.room_list_handler = hs.get_room_list_handler()
172 self.directory_handler = hs.get_handlers().directory_handler
173
174 return hs
175
176 def test_disabling_room_list(self):
177 self.room_list_handler.enable_room_list_search = True
178 self.directory_handler.enable_room_list_search = True
179
180 # Room list is enabled so we should get some results
181 request, channel = self.make_request(
182 "GET",
183 b"publicRooms",
184 )
185 self.render(request)
186 self.assertEquals(200, channel.code, channel.result)
187 self.assertTrue(len(channel.json_body["chunk"]) > 0)
188
189 self.room_list_handler.enable_room_list_search = False
190 self.directory_handler.enable_room_list_search = False
191
192 # Room list disabled so we should get no results
193 request, channel = self.make_request(
194 "GET",
195 b"publicRooms",
196 )
197 self.render(request)
198 self.assertEquals(200, channel.code, channel.result)
199 self.assertTrue(len(channel.json_body["chunk"]) == 0)
200
201 # Room list disabled so we shouldn't be allowed to publish rooms
202 room_id = self.helper.create_room_as(self.user_id)
203 request, channel = self.make_request(
204 "PUT",
205 b"directory/list/room/%s" % (room_id.encode('ascii'),),
206 b'{}',
207 )
208 self.render(request)
209 self.assertEquals(403, channel.code, channel.result)
5454 federation_client=self.mock_federation,
5555 federation_server=Mock(),
5656 federation_registry=self.mock_registry,
57 ratelimiter=NonCallableMock(spec_set=["send_message"]),
57 ratelimiter=NonCallableMock(spec_set=["can_do_action"]),
5858 )
5959
6060 self.ratelimiter = hs.get_ratelimiter()
61 self.ratelimiter.send_message.return_value = (True, 0)
61 self.ratelimiter.can_do_action.return_value = (True, 0)
6262
6363 self.store = hs.get_datastore()
6464
2121 from synapse.handlers.register import RegistrationHandler
2222 from synapse.types import RoomAlias, UserID, create_requester
2323
24 from tests.utils import setup_test_homeserver
25
2624 from .. import unittest
2725
2826
3129 self.registration_handler = RegistrationHandler(hs)
3230
3331
34 class RegistrationTestCase(unittest.TestCase):
32 class RegistrationTestCase(unittest.HomeserverTestCase):
3533 """ Tests the RegistrationHandler. """
3634
37 @defer.inlineCallbacks
38 def setUp(self):
35 def make_homeserver(self, reactor, clock):
36 hs_config = self.default_config("test")
37
38 # some of the tests rely on us having a user consent version
39 hs_config.user_consent_version = "test_consent_version"
40 hs_config.max_mau_value = 50
41
42 hs = self.setup_test_homeserver(config=hs_config, expire_access_token=True)
43 return hs
44
45 def prepare(self, reactor, clock, hs):
3946 self.mock_distributor = Mock()
4047 self.mock_distributor.declare("registered_user")
4148 self.mock_captcha_client = Mock()
42 self.hs = yield setup_test_homeserver(
43 self.addCleanup,
44 expire_access_token=True,
45 )
4649 self.macaroon_generator = Mock(
4750 generate_access_token=Mock(return_value='secret')
4851 )
4952 self.hs.get_macaroon_generator = Mock(return_value=self.macaroon_generator)
5053 self.handler = self.hs.get_registration_handler()
5154 self.store = self.hs.get_datastore()
52 self.hs.config.max_mau_value = 50
5355 self.lots_of_users = 100
5456 self.small_number_of_users = 1
5557
5658 self.requester = create_requester("@requester:test")
5759
58 @defer.inlineCallbacks
5960 def test_user_is_created_and_logged_in_if_doesnt_exist(self):
6061 frank = UserID.from_string("@frank:test")
6162 user_id = frank.to_string()
6263 requester = create_requester(user_id)
63 result_user_id, result_token = yield self.handler.get_or_create_user(
64 requester, frank.localpart, "Frankie"
64 result_user_id, result_token = self.get_success(
65 self.handler.get_or_create_user(requester, frank.localpart, "Frankie")
6566 )
6667 self.assertEquals(result_user_id, user_id)
6768 self.assertTrue(result_token is not None)
6869 self.assertEquals(result_token, 'secret')
6970
70 @defer.inlineCallbacks
7171 def test_if_user_exists(self):
7272 store = self.hs.get_datastore()
7373 frank = UserID.from_string("@frank:test")
74 yield store.register(
75 user_id=frank.to_string(),
76 token="jkv;g498752-43gj['eamb!-5",
77 password_hash=None,
74 self.get_success(
75 store.register(
76 user_id=frank.to_string(),
77 token="jkv;g498752-43gj['eamb!-5",
78 password_hash=None,
79 )
7880 )
7981 local_part = frank.localpart
8082 user_id = frank.to_string()
8183 requester = create_requester(user_id)
82 result_user_id, result_token = yield self.handler.get_or_create_user(
83 requester, local_part, None
84 result_user_id, result_token = self.get_success(
85 self.handler.get_or_create_user(requester, local_part, None)
8486 )
8587 self.assertEquals(result_user_id, user_id)
8688 self.assertTrue(result_token is not None)
8789
88 @defer.inlineCallbacks
8990 def test_mau_limits_when_disabled(self):
9091 self.hs.config.limit_usage_by_mau = False
9192 # Ensure does not throw exception
92 yield self.handler.get_or_create_user(self.requester, 'a', "display_name")
93
94 @defer.inlineCallbacks
93 self.get_success(
94 self.handler.get_or_create_user(self.requester, 'a', "display_name")
95 )
96
9597 def test_get_or_create_user_mau_not_blocked(self):
9698 self.hs.config.limit_usage_by_mau = True
9799 self.store.count_monthly_users = Mock(
98100 return_value=defer.succeed(self.hs.config.max_mau_value - 1)
99101 )
100102 # Ensure does not throw exception
101 yield self.handler.get_or_create_user(self.requester, 'c', "User")
102
103 @defer.inlineCallbacks
103 self.get_success(self.handler.get_or_create_user(self.requester, 'c', "User"))
104
104105 def test_get_or_create_user_mau_blocked(self):
105106 self.hs.config.limit_usage_by_mau = True
106107 self.store.get_monthly_active_count = Mock(
107108 return_value=defer.succeed(self.lots_of_users)
108109 )
109 with self.assertRaises(ResourceLimitError):
110 yield self.handler.get_or_create_user(self.requester, 'b', "display_name")
110 self.get_failure(
111 self.handler.get_or_create_user(self.requester, 'b', "display_name"),
112 ResourceLimitError,
113 )
111114
112115 self.store.get_monthly_active_count = Mock(
113116 return_value=defer.succeed(self.hs.config.max_mau_value)
114117 )
115 with self.assertRaises(ResourceLimitError):
116 yield self.handler.get_or_create_user(self.requester, 'b', "display_name")
117
118 @defer.inlineCallbacks
118 self.get_failure(
119 self.handler.get_or_create_user(self.requester, 'b', "display_name"),
120 ResourceLimitError,
121 )
122
119123 def test_register_mau_blocked(self):
120124 self.hs.config.limit_usage_by_mau = True
121125 self.store.get_monthly_active_count = Mock(
122126 return_value=defer.succeed(self.lots_of_users)
123127 )
124 with self.assertRaises(ResourceLimitError):
125 yield self.handler.register(localpart="local_part")
128 self.get_failure(
129 self.handler.register(localpart="local_part"), ResourceLimitError
130 )
126131
127132 self.store.get_monthly_active_count = Mock(
128133 return_value=defer.succeed(self.hs.config.max_mau_value)
129134 )
130 with self.assertRaises(ResourceLimitError):
131 yield self.handler.register(localpart="local_part")
132
133 @defer.inlineCallbacks
135 self.get_failure(
136 self.handler.register(localpart="local_part"), ResourceLimitError
137 )
138
134139 def test_auto_create_auto_join_rooms(self):
135140 room_alias_str = "#room:test"
136141 self.hs.config.auto_join_rooms = [room_alias_str]
137 res = yield self.handler.register(localpart='jeff')
138 rooms = yield self.store.get_rooms_for_user(res[0])
142 res = self.get_success(self.handler.register(localpart='jeff'))
143 rooms = self.get_success(self.store.get_rooms_for_user(res[0]))
139144 directory_handler = self.hs.get_handlers().directory_handler
140145 room_alias = RoomAlias.from_string(room_alias_str)
141 room_id = yield directory_handler.get_association(room_alias)
146 room_id = self.get_success(directory_handler.get_association(room_alias))
142147
143148 self.assertTrue(room_id['room_id'] in rooms)
144149 self.assertEqual(len(rooms), 1)
145150
146 @defer.inlineCallbacks
147151 def test_auto_create_auto_join_rooms_with_no_rooms(self):
148152 self.hs.config.auto_join_rooms = []
149153 frank = UserID.from_string("@frank:test")
150 res = yield self.handler.register(frank.localpart)
154 res = self.get_success(self.handler.register(frank.localpart))
151155 self.assertEqual(res[0], frank.to_string())
152 rooms = yield self.store.get_rooms_for_user(res[0])
153 self.assertEqual(len(rooms), 0)
154
155 @defer.inlineCallbacks
156 rooms = self.get_success(self.store.get_rooms_for_user(res[0]))
157 self.assertEqual(len(rooms), 0)
158
156159 def test_auto_create_auto_join_where_room_is_another_domain(self):
157160 self.hs.config.auto_join_rooms = ["#room:another"]
158161 frank = UserID.from_string("@frank:test")
159 res = yield self.handler.register(frank.localpart)
162 res = self.get_success(self.handler.register(frank.localpart))
160163 self.assertEqual(res[0], frank.to_string())
161 rooms = yield self.store.get_rooms_for_user(res[0])
162 self.assertEqual(len(rooms), 0)
163
164 @defer.inlineCallbacks
164 rooms = self.get_success(self.store.get_rooms_for_user(res[0]))
165 self.assertEqual(len(rooms), 0)
166
165167 def test_auto_create_auto_join_where_auto_create_is_false(self):
166168 self.hs.config.autocreate_auto_join_rooms = False
167169 room_alias_str = "#room:test"
168170 self.hs.config.auto_join_rooms = [room_alias_str]
169 res = yield self.handler.register(localpart='jeff')
170 rooms = yield self.store.get_rooms_for_user(res[0])
171 self.assertEqual(len(rooms), 0)
172
173 @defer.inlineCallbacks
171 res = self.get_success(self.handler.register(localpart='jeff'))
172 rooms = self.get_success(self.store.get_rooms_for_user(res[0]))
173 self.assertEqual(len(rooms), 0)
174
174175 def test_auto_create_auto_join_rooms_when_support_user_exists(self):
175176 room_alias_str = "#room:test"
176177 self.hs.config.auto_join_rooms = [room_alias_str]
177178
178179 self.store.is_support_user = Mock(return_value=True)
179 res = yield self.handler.register(localpart='support')
180 rooms = yield self.store.get_rooms_for_user(res[0])
180 res = self.get_success(self.handler.register(localpart='support'))
181 rooms = self.get_success(self.store.get_rooms_for_user(res[0]))
181182 self.assertEqual(len(rooms), 0)
182183 directory_handler = self.hs.get_handlers().directory_handler
183184 room_alias = RoomAlias.from_string(room_alias_str)
184 with self.assertRaises(SynapseError):
185 yield directory_handler.get_association(room_alias)
186
187 @defer.inlineCallbacks
185 self.get_failure(directory_handler.get_association(room_alias), SynapseError)
186
188187 def test_auto_create_auto_join_where_no_consent(self):
189 self.hs.config.user_consent_at_registration = True
190 self.hs.config.block_events_without_consent_error = "Error"
191 room_alias_str = "#room:test"
192 self.hs.config.auto_join_rooms = [room_alias_str]
193 res = yield self.handler.register(localpart='jeff')
194 yield self.handler.post_consent_actions(res[0])
195 rooms = yield self.store.get_rooms_for_user(res[0])
196 self.assertEqual(len(rooms), 0)
197
198 @defer.inlineCallbacks
188 """Test to ensure that the first user is not auto-joined to a room if
189 they have not given general consent.
190 """
191
192 # Given:-
193 # * a user must give consent,
194 # * they have not given that consent
195 # * The server is configured to auto-join to a room
196 # (and autocreate if necessary)
197
198 event_creation_handler = self.hs.get_event_creation_handler()
199 # (Messing with the internals of event_creation_handler is fragile
200 # but can't see a better way to do this. One option could be to subclass
201 # the test with custom config.)
202 event_creation_handler._block_events_without_consent_error = "Error"
203 event_creation_handler._consent_uri_builder = Mock()
204 room_alias_str = "#room:test"
205 self.hs.config.auto_join_rooms = [room_alias_str]
206
207 # When:-
208 # * the user is registered and post consent actions are called
209 res = self.get_success(self.handler.register(localpart='jeff'))
210 self.get_success(self.handler.post_consent_actions(res[0]))
211
212 # Then:-
213 # * Ensure that they have not been joined to the room
214 rooms = self.get_success(self.store.get_rooms_for_user(res[0]))
215 self.assertEqual(len(rooms), 0)
216
199217 def test_register_support_user(self):
200 res = yield self.handler.register(localpart='user', user_type=UserTypes.SUPPORT)
218 res = self.get_success(
219 self.handler.register(localpart='user', user_type=UserTypes.SUPPORT)
220 )
201221 self.assertTrue(self.store.is_support_user(res[0]))
202222
203 @defer.inlineCallbacks
204223 def test_register_not_support_user(self):
205 res = yield self.handler.register(localpart='user')
224 res = self.get_success(self.handler.register(localpart='user'))
206225 self.assertFalse(self.store.is_support_user(res[0]))
2323 from synapse.types import UserID
2424
2525 from tests import unittest
26
27 from ..utils import (
28 DeferredMockCallable,
29 MockClock,
30 MockHttpResource,
31 setup_test_homeserver,
32 )
26 from tests.utils import register_federation_servlets
27
28 # Some local users to test with
29 U_APPLE = UserID.from_string("@apple:test")
30 U_BANANA = UserID.from_string("@banana:test")
31
32 # Remote user
33 U_ONION = UserID.from_string("@onion:farm")
34
35 # Test room id
36 ROOM_ID = "a-room"
3337
3438
3539 def _expect_edu_transaction(edu_type, content, origin="test"):
4549 return json.dumps(_expect_edu_transaction(edu_type, content)).encode('utf8')
4650
4751
48 class TypingNotificationsTestCase(unittest.TestCase):
49 """Tests typing notifications to rooms."""
50
51 @defer.inlineCallbacks
52 def setUp(self):
53 self.clock = MockClock()
54
55 self.mock_http_client = Mock(spec=[])
56 self.mock_http_client.put_json = DeferredMockCallable()
57
58 self.mock_federation_resource = MockHttpResource()
59
60 mock_notifier = Mock()
61 self.on_new_event = mock_notifier.on_new_event
62
63 self.auth = Mock(spec=[])
64 self.state_handler = Mock()
65
66 hs = yield setup_test_homeserver(
67 self.addCleanup,
68 "test",
69 auth=self.auth,
70 clock=self.clock,
71 datastore=Mock(
52 class TypingNotificationsTestCase(unittest.HomeserverTestCase):
53 servlets = [register_federation_servlets]
54
55 def make_homeserver(self, reactor, clock):
56 # we mock out the keyring so as to skip the authentication check on the
57 # federation API call.
58 mock_keyring = Mock(spec=["verify_json_for_server"])
59 mock_keyring.verify_json_for_server.return_value = defer.succeed(True)
60
61 # we mock out the federation client too
62 mock_federation_client = Mock(spec=["put_json"])
63 mock_federation_client.put_json.return_value = defer.succeed((200, "OK"))
64
65 hs = self.setup_test_homeserver(
66 datastore=(Mock(
7267 spec=[
7368 # Bits that Federation needs
7469 "prep_send_transaction",
8176 "get_user_directory_stream_pos",
8277 "get_current_state_deltas",
8378 ]
84 ),
85 state_handler=self.state_handler,
86 handlers=Mock(),
87 notifier=mock_notifier,
88 resource_for_client=Mock(),
89 resource_for_federation=self.mock_federation_resource,
90 http_client=self.mock_http_client,
91 keyring=Mock(),
92 )
79 )),
80 notifier=Mock(),
81 http_client=mock_federation_client,
82 keyring=mock_keyring,
83 )
84
85 return hs
86
87 def prepare(self, reactor, clock, hs):
88 # the tests assume that we are starting at unix time 1000
89 reactor.pump((1000, ))
90
91 mock_notifier = hs.get_notifier()
92 self.on_new_event = mock_notifier.on_new_event
9393
9494 self.handler = hs.get_typing_handler()
9595
108108
109109 self.datastore.get_received_txn_response = get_received_txn_response
110110
111 self.room_id = "a-room"
112
113111 self.room_members = []
114112
115113 def check_joined_room(room_id, user_id):
116114 if user_id not in [u.to_string() for u in self.room_members]:
117115 raise AuthError(401, "User is not in the room")
116 hs.get_auth().check_joined_room = check_joined_room
118117
119118 def get_joined_hosts_for_room(room_id):
120119 return set(member.domain for member in self.room_members)
123122
124123 def get_current_user_in_room(room_id):
125124 return set(str(u) for u in self.room_members)
126
127 self.state_handler.get_current_user_in_room = get_current_user_in_room
125 hs.get_state_handler().get_current_user_in_room = get_current_user_in_room
128126
129127 self.datastore.get_user_directory_stream_pos.return_value = (
130128 # we deliberately return a non-None stream pos to avoid doing an initial_spam
133131
134132 self.datastore.get_current_state_deltas.return_value = None
135133
136 self.auth.check_joined_room = check_joined_room
137
138134 self.datastore.get_to_device_stream_token = lambda: 0
139135 self.datastore.get_new_device_msgs_for_remote = lambda *args, **kargs: ([], 0)
140136 self.datastore.delete_device_msgs_for_remote = lambda *args, **kargs: None
141137
142 # Some local users to test with
143 self.u_apple = UserID.from_string("@apple:test")
144 self.u_banana = UserID.from_string("@banana:test")
145
146 # Remote user
147 self.u_onion = UserID.from_string("@onion:farm")
148
149 @defer.inlineCallbacks
150138 def test_started_typing_local(self):
151 self.room_members = [self.u_apple, self.u_banana]
139 self.room_members = [U_APPLE, U_BANANA]
152140
153141 self.assertEquals(self.event_source.get_current_key(), 0)
154142
155 yield self.handler.started_typing(
156 target_user=self.u_apple,
157 auth_user=self.u_apple,
158 room_id=self.room_id,
143 self.successResultOf(self.handler.started_typing(
144 target_user=U_APPLE,
145 auth_user=U_APPLE,
146 room_id=ROOM_ID,
159147 timeout=20000,
160 )
161
162 self.on_new_event.assert_has_calls(
163 [call('typing_key', 1, rooms=[self.room_id])]
148 ))
149
150 self.on_new_event.assert_has_calls(
151 [call('typing_key', 1, rooms=[ROOM_ID])]
164152 )
165153
166154 self.assertEquals(self.event_source.get_current_key(), 1)
167 events = yield self.event_source.get_new_events(
168 room_ids=[self.room_id], from_key=0
169 )
170 self.assertEquals(
171 events[0],
172 [
173 {
174 "type": "m.typing",
175 "room_id": self.room_id,
176 "content": {"user_ids": [self.u_apple.to_string()]},
177 }
178 ],
179 )
180
181 @defer.inlineCallbacks
155 events = self.event_source.get_new_events(
156 room_ids=[ROOM_ID], from_key=0
157 )
158 self.assertEquals(
159 events[0],
160 [
161 {
162 "type": "m.typing",
163 "room_id": ROOM_ID,
164 "content": {"user_ids": [U_APPLE.to_string()]},
165 }
166 ],
167 )
168
182169 def test_started_typing_remote_send(self):
183 self.room_members = [self.u_apple, self.u_onion]
184
185 put_json = self.mock_http_client.put_json
186 put_json.expect_call_and_return(
187 call(
188 "farm",
189 path="/_matrix/federation/v1/send/1000000/",
190 data=_expect_edu_transaction(
191 "m.typing",
192 content={
193 "room_id": self.room_id,
194 "user_id": self.u_apple.to_string(),
195 "typing": True,
196 },
197 ),
198 json_data_callback=ANY,
199 long_retries=True,
200 backoff_on_404=True,
170 self.room_members = [U_APPLE, U_ONION]
171
172 self.successResultOf(self.handler.started_typing(
173 target_user=U_APPLE,
174 auth_user=U_APPLE,
175 room_id=ROOM_ID,
176 timeout=20000,
177 ))
178
179 put_json = self.hs.get_http_client().put_json
180 put_json.assert_called_once_with(
181 "farm",
182 path="/_matrix/federation/v1/send/1000000",
183 data=_expect_edu_transaction(
184 "m.typing",
185 content={
186 "room_id": ROOM_ID,
187 "user_id": U_APPLE.to_string(),
188 "typing": True,
189 },
201190 ),
202 defer.succeed((200, "OK")),
203 )
204
205 yield self.handler.started_typing(
206 target_user=self.u_apple,
207 auth_user=self.u_apple,
208 room_id=self.room_id,
209 timeout=20000,
210 )
211
212 yield put_json.await_calls()
213
214 @defer.inlineCallbacks
191 json_data_callback=ANY,
192 long_retries=True,
193 backoff_on_404=True,
194 try_trailing_slash_on_400=True,
195 )
196
215197 def test_started_typing_remote_recv(self):
216 self.room_members = [self.u_apple, self.u_onion]
198 self.room_members = [U_APPLE, U_ONION]
217199
218200 self.assertEquals(self.event_source.get_current_key(), 0)
219201
220 (code, response) = yield self.mock_federation_resource.trigger(
202 (request, channel) = self.make_request(
221203 "PUT",
222 "/_matrix/federation/v1/send/1000000/",
204 "/_matrix/federation/v1/send/1000000",
223205 _make_edu_transaction_json(
224206 "m.typing",
225207 content={
226 "room_id": self.room_id,
227 "user_id": self.u_onion.to_string(),
208 "room_id": ROOM_ID,
209 "user_id": U_ONION.to_string(),
228210 "typing": True,
229211 },
230212 ),
231213 federation_auth_origin=b'farm',
232214 )
233
234 self.on_new_event.assert_has_calls(
235 [call('typing_key', 1, rooms=[self.room_id])]
215 self.render(request)
216 self.assertEqual(channel.code, 200)
217
218 self.on_new_event.assert_has_calls(
219 [call('typing_key', 1, rooms=[ROOM_ID])]
236220 )
237221
238222 self.assertEquals(self.event_source.get_current_key(), 1)
239 events = yield self.event_source.get_new_events(
240 room_ids=[self.room_id], from_key=0
241 )
242 self.assertEquals(
243 events[0],
244 [
245 {
246 "type": "m.typing",
247 "room_id": self.room_id,
248 "content": {"user_ids": [self.u_onion.to_string()]},
249 }
250 ],
251 )
252
253 @defer.inlineCallbacks
223 events = self.event_source.get_new_events(
224 room_ids=[ROOM_ID], from_key=0
225 )
226 self.assertEquals(
227 events[0],
228 [
229 {
230 "type": "m.typing",
231 "room_id": ROOM_ID,
232 "content": {"user_ids": [U_ONION.to_string()]},
233 }
234 ],
235 )
236
254237 def test_stopped_typing(self):
255 self.room_members = [self.u_apple, self.u_banana, self.u_onion]
256
257 put_json = self.mock_http_client.put_json
258 put_json.expect_call_and_return(
259 call(
260 "farm",
261 path="/_matrix/federation/v1/send/1000000/",
262 data=_expect_edu_transaction(
263 "m.typing",
264 content={
265 "room_id": self.room_id,
266 "user_id": self.u_apple.to_string(),
267 "typing": False,
268 },
269 ),
270 json_data_callback=ANY,
271 long_retries=True,
272 backoff_on_404=True,
273 ),
274 defer.succeed((200, "OK")),
275 )
238 self.room_members = [U_APPLE, U_BANANA, U_ONION]
276239
277240 # Gut-wrenching
278241 from synapse.handlers.typing import RoomMember
279242
280 member = RoomMember(self.room_id, self.u_apple.to_string())
243 member = RoomMember(ROOM_ID, U_APPLE.to_string())
281244 self.handler._member_typing_until[member] = 1002000
282 self.handler._room_typing[self.room_id] = set([self.u_apple.to_string()])
245 self.handler._room_typing[ROOM_ID] = set([U_APPLE.to_string()])
283246
284247 self.assertEquals(self.event_source.get_current_key(), 0)
285248
286 yield self.handler.stopped_typing(
287 target_user=self.u_apple, auth_user=self.u_apple, room_id=self.room_id
288 )
289
290 self.on_new_event.assert_has_calls(
291 [call('typing_key', 1, rooms=[self.room_id])]
292 )
293
294 yield put_json.await_calls()
249 self.successResultOf(self.handler.stopped_typing(
250 target_user=U_APPLE, auth_user=U_APPLE, room_id=ROOM_ID
251 ))
252
253 self.on_new_event.assert_has_calls(
254 [call('typing_key', 1, rooms=[ROOM_ID])]
255 )
256
257 put_json = self.hs.get_http_client().put_json
258 put_json.assert_called_once_with(
259 "farm",
260 path="/_matrix/federation/v1/send/1000000",
261 data=_expect_edu_transaction(
262 "m.typing",
263 content={
264 "room_id": ROOM_ID,
265 "user_id": U_APPLE.to_string(),
266 "typing": False,
267 },
268 ),
269 json_data_callback=ANY,
270 long_retries=True,
271 backoff_on_404=True,
272 try_trailing_slash_on_400=True,
273 )
295274
296275 self.assertEquals(self.event_source.get_current_key(), 1)
297 events = yield self.event_source.get_new_events(
298 room_ids=[self.room_id], from_key=0
299 )
300 self.assertEquals(
301 events[0],
302 [
303 {
304 "type": "m.typing",
305 "room_id": self.room_id,
276 events = self.event_source.get_new_events(
277 room_ids=[ROOM_ID], from_key=0
278 )
279 self.assertEquals(
280 events[0],
281 [
282 {
283 "type": "m.typing",
284 "room_id": ROOM_ID,
306285 "content": {"user_ids": []},
307286 }
308287 ],
309288 )
310289
311 @defer.inlineCallbacks
312290 def test_typing_timeout(self):
313 self.room_members = [self.u_apple, self.u_banana]
291 self.room_members = [U_APPLE, U_BANANA]
314292
315293 self.assertEquals(self.event_source.get_current_key(), 0)
316294
317 yield self.handler.started_typing(
318 target_user=self.u_apple,
319 auth_user=self.u_apple,
320 room_id=self.room_id,
295 self.successResultOf(self.handler.started_typing(
296 target_user=U_APPLE,
297 auth_user=U_APPLE,
298 room_id=ROOM_ID,
321299 timeout=10000,
322 )
323
324 self.on_new_event.assert_has_calls(
325 [call('typing_key', 1, rooms=[self.room_id])]
300 ))
301
302 self.on_new_event.assert_has_calls(
303 [call('typing_key', 1, rooms=[ROOM_ID])]
326304 )
327305 self.on_new_event.reset_mock()
328306
329307 self.assertEquals(self.event_source.get_current_key(), 1)
330 events = yield self.event_source.get_new_events(
331 room_ids=[self.room_id], from_key=0
332 )
333 self.assertEquals(
334 events[0],
335 [
336 {
337 "type": "m.typing",
338 "room_id": self.room_id,
339 "content": {"user_ids": [self.u_apple.to_string()]},
340 }
341 ],
342 )
343
344 self.clock.advance_time(16)
345
346 self.on_new_event.assert_has_calls(
347 [call('typing_key', 2, rooms=[self.room_id])]
308 events = self.event_source.get_new_events(
309 room_ids=[ROOM_ID], from_key=0
310 )
311 self.assertEquals(
312 events[0],
313 [
314 {
315 "type": "m.typing",
316 "room_id": ROOM_ID,
317 "content": {"user_ids": [U_APPLE.to_string()]},
318 }
319 ],
320 )
321
322 self.reactor.pump([16, ])
323
324 self.on_new_event.assert_has_calls(
325 [call('typing_key', 2, rooms=[ROOM_ID])]
348326 )
349327
350328 self.assertEquals(self.event_source.get_current_key(), 2)
351 events = yield self.event_source.get_new_events(
352 room_ids=[self.room_id], from_key=1
353 )
354 self.assertEquals(
355 events[0],
356 [
357 {
358 "type": "m.typing",
359 "room_id": self.room_id,
329 events = self.event_source.get_new_events(
330 room_ids=[ROOM_ID], from_key=1
331 )
332 self.assertEquals(
333 events[0],
334 [
335 {
336 "type": "m.typing",
337 "room_id": ROOM_ID,
360338 "content": {"user_ids": []},
361339 }
362340 ],
364342
365343 # SYN-230 - see if we can still set after timeout
366344
367 yield self.handler.started_typing(
368 target_user=self.u_apple,
369 auth_user=self.u_apple,
370 room_id=self.room_id,
345 self.successResultOf(self.handler.started_typing(
346 target_user=U_APPLE,
347 auth_user=U_APPLE,
348 room_id=ROOM_ID,
371349 timeout=10000,
372 )
373
374 self.on_new_event.assert_has_calls(
375 [call('typing_key', 3, rooms=[self.room_id])]
350 ))
351
352 self.on_new_event.assert_has_calls(
353 [call('typing_key', 3, rooms=[ROOM_ID])]
376354 )
377355 self.on_new_event.reset_mock()
378356
379357 self.assertEquals(self.event_source.get_current_key(), 3)
380 events = yield self.event_source.get_new_events(
381 room_ids=[self.room_id], from_key=0
382 )
383 self.assertEquals(
384 events[0],
385 [
386 {
387 "type": "m.typing",
388 "room_id": self.room_id,
389 "content": {"user_ids": [self.u_apple.to_string()]},
390 }
391 ],
392 )
358 events = self.event_source.get_new_events(
359 room_ids=[ROOM_ID], from_key=0
360 )
361 self.assertEquals(
362 events[0],
363 [
364 {
365 "type": "m.typing",
366 "room_id": ROOM_ID,
367 "content": {"user_ids": [U_APPLE.to_string()]},
368 }
369 ],
370 )
1313 # limitations under the License.
1414 from mock import Mock
1515
16 from twisted.internet import defer
17
1816 from synapse.api.constants import UserTypes
19 from synapse.handlers.user_directory import UserDirectoryHandler
17 from synapse.rest.client.v1 import admin, login, room
18 from synapse.rest.client.v2_alpha import user_directory
2019 from synapse.storage.roommember import ProfileInfo
2120
2221 from tests import unittest
23 from tests.utils import setup_test_homeserver
24
25
26 class UserDirectoryHandlers(object):
27 def __init__(self, hs):
28 self.user_directory_handler = UserDirectoryHandler(hs)
29
30
31 class UserDirectoryTestCase(unittest.TestCase):
32 """ Tests the UserDirectoryHandler. """
33
34 @defer.inlineCallbacks
35 def setUp(self):
36 hs = yield setup_test_homeserver(self.addCleanup)
22
23
24 class UserDirectoryTestCase(unittest.HomeserverTestCase):
25 """
26 Tests the UserDirectoryHandler.
27 """
28
29 servlets = [
30 login.register_servlets,
31 admin.register_servlets,
32 room.register_servlets,
33 ]
34
35 def make_homeserver(self, reactor, clock):
36
37 config = self.default_config()
38 config.update_user_directory = True
39 return self.setup_test_homeserver(config=config)
40
41 def prepare(self, reactor, clock, hs):
3742 self.store = hs.get_datastore()
38 hs.handlers = UserDirectoryHandlers(hs)
39
40 self.handler = hs.get_handlers().user_directory_handler
41
42 @defer.inlineCallbacks
43 self.handler = hs.get_user_directory_handler()
44
4345 def test_handle_local_profile_change_with_support_user(self):
4446 support_user_id = "@support:test"
45 yield self.store.register(
46 user_id=support_user_id,
47 token="123",
48 password_hash=None,
49 user_type=UserTypes.SUPPORT
50 )
51
52 yield self.handler.handle_local_profile_change(support_user_id, None)
53 profile = yield self.store.get_user_in_directory(support_user_id)
47 self.get_success(
48 self.store.register(
49 user_id=support_user_id,
50 token="123",
51 password_hash=None,
52 user_type=UserTypes.SUPPORT,
53 )
54 )
55
56 self.get_success(
57 self.handler.handle_local_profile_change(support_user_id, None)
58 )
59 profile = self.get_success(self.store.get_user_in_directory(support_user_id))
5460 self.assertTrue(profile is None)
5561 display_name = 'display_name'
5662
57 profile_info = ProfileInfo(
58 avatar_url='avatar_url',
59 display_name=display_name,
60 )
63 profile_info = ProfileInfo(avatar_url='avatar_url', display_name=display_name)
6164 regular_user_id = '@regular:test'
62 yield self.handler.handle_local_profile_change(regular_user_id, profile_info)
63 profile = yield self.store.get_user_in_directory(regular_user_id)
65 self.get_success(
66 self.handler.handle_local_profile_change(regular_user_id, profile_info)
67 )
68 profile = self.get_success(self.store.get_user_in_directory(regular_user_id))
6469 self.assertTrue(profile['display_name'] == display_name)
6570
66 @defer.inlineCallbacks
6771 def test_handle_user_deactivated_support_user(self):
6872 s_user_id = "@support:test"
69 self.store.register(
70 user_id=s_user_id,
71 token="123",
72 password_hash=None,
73 user_type=UserTypes.SUPPORT
73 self.get_success(
74 self.store.register(
75 user_id=s_user_id,
76 token="123",
77 password_hash=None,
78 user_type=UserTypes.SUPPORT,
79 )
7480 )
7581
7682 self.store.remove_from_user_dir = Mock()
7783 self.store.remove_from_user_in_public_room = Mock()
78 yield self.handler.handle_user_deactivated(s_user_id)
84 self.get_success(self.handler.handle_user_deactivated(s_user_id))
7985 self.store.remove_from_user_dir.not_called()
8086 self.store.remove_from_user_in_public_room.not_called()
8187
82 @defer.inlineCallbacks
8388 def test_handle_user_deactivated_regular_user(self):
8489 r_user_id = "@regular:test"
85 self.store.register(user_id=r_user_id, token="123", password_hash=None)
90 self.get_success(
91 self.store.register(user_id=r_user_id, token="123", password_hash=None)
92 )
8693 self.store.remove_from_user_dir = Mock()
87 self.store.remove_from_user_in_public_room = Mock()
88 yield self.handler.handle_user_deactivated(r_user_id)
94 self.get_success(self.handler.handle_user_deactivated(r_user_id))
8995 self.store.remove_from_user_dir.called_once_with(r_user_id)
90 self.store.remove_from_user_in_public_room.assert_called_once_with(r_user_id)
96
97 def test_private_room(self):
98 """
99 A user can be searched for only by people that are either in a public
100 room, or that share a private chat.
101 """
102 u1 = self.register_user("user1", "pass")
103 u1_token = self.login(u1, "pass")
104 u2 = self.register_user("user2", "pass")
105 u2_token = self.login(u2, "pass")
106 u3 = self.register_user("user3", "pass")
107
108 # We do not add users to the directory until they join a room.
109 s = self.get_success(self.handler.search_users(u1, "user2", 10))
110 self.assertEqual(len(s["results"]), 0)
111
112 room = self.helper.create_room_as(u1, is_public=False, tok=u1_token)
113 self.helper.invite(room, src=u1, targ=u2, tok=u1_token)
114 self.helper.join(room, user=u2, tok=u2_token)
115
116 # Check we have populated the database correctly.
117 shares_private = self.get_users_who_share_private_rooms()
118 public_users = self.get_users_in_public_rooms()
119
120 self.assertEqual(
121 self._compress_shared(shares_private), set([(u1, u2, room), (u2, u1, room)])
122 )
123 self.assertEqual(public_users, [])
124
125 # We get one search result when searching for user2 by user1.
126 s = self.get_success(self.handler.search_users(u1, "user2", 10))
127 self.assertEqual(len(s["results"]), 1)
128
129 # We get NO search results when searching for user2 by user3.
130 s = self.get_success(self.handler.search_users(u3, "user2", 10))
131 self.assertEqual(len(s["results"]), 0)
132
133 # We get NO search results when searching for user3 by user1.
134 s = self.get_success(self.handler.search_users(u1, "user3", 10))
135 self.assertEqual(len(s["results"]), 0)
136
137 # User 2 then leaves.
138 self.helper.leave(room, user=u2, tok=u2_token)
139
140 # Check we have removed the values.
141 shares_private = self.get_users_who_share_private_rooms()
142 public_users = self.get_users_in_public_rooms()
143
144 self.assertEqual(self._compress_shared(shares_private), set())
145 self.assertEqual(public_users, [])
146
147 # User1 now gets no search results for any of the other users.
148 s = self.get_success(self.handler.search_users(u1, "user2", 10))
149 self.assertEqual(len(s["results"]), 0)
150
151 s = self.get_success(self.handler.search_users(u1, "user3", 10))
152 self.assertEqual(len(s["results"]), 0)
153
154 def _compress_shared(self, shared):
155 """
156 Compress a list of users who share rooms dicts to a list of tuples.
157 """
158 r = set()
159 for i in shared:
160 r.add((i["user_id"], i["other_user_id"], i["room_id"]))
161 return r
162
163 def get_users_in_public_rooms(self):
164 r = self.get_success(
165 self.store._simple_select_list(
166 "users_in_public_rooms", None, ("user_id", "room_id")
167 )
168 )
169 retval = []
170 for i in r:
171 retval.append((i["user_id"], i["room_id"]))
172 return retval
173
174 def get_users_who_share_private_rooms(self):
175 return self.get_success(
176 self.store._simple_select_list(
177 "users_who_share_private_rooms",
178 None,
179 ["user_id", "other_user_id", "room_id"],
180 )
181 )
182
183 def _add_background_updates(self):
184 """
185 Add the background updates we need to run.
186 """
187 # Ugh, have to reset this flag
188 self.store._all_done = False
189
190 self.get_success(
191 self.store._simple_insert(
192 "background_updates",
193 {
194 "update_name": "populate_user_directory_createtables",
195 "progress_json": "{}",
196 },
197 )
198 )
199 self.get_success(
200 self.store._simple_insert(
201 "background_updates",
202 {
203 "update_name": "populate_user_directory_process_rooms",
204 "progress_json": "{}",
205 "depends_on": "populate_user_directory_createtables",
206 },
207 )
208 )
209 self.get_success(
210 self.store._simple_insert(
211 "background_updates",
212 {
213 "update_name": "populate_user_directory_process_users",
214 "progress_json": "{}",
215 "depends_on": "populate_user_directory_process_rooms",
216 },
217 )
218 )
219 self.get_success(
220 self.store._simple_insert(
221 "background_updates",
222 {
223 "update_name": "populate_user_directory_cleanup",
224 "progress_json": "{}",
225 "depends_on": "populate_user_directory_process_users",
226 },
227 )
228 )
229
230 def test_initial(self):
231 """
232 The user directory's initial handler correctly updates the search tables.
233 """
234 u1 = self.register_user("user1", "pass")
235 u1_token = self.login(u1, "pass")
236 u2 = self.register_user("user2", "pass")
237 u2_token = self.login(u2, "pass")
238 u3 = self.register_user("user3", "pass")
239 u3_token = self.login(u3, "pass")
240
241 room = self.helper.create_room_as(u1, is_public=True, tok=u1_token)
242 self.helper.invite(room, src=u1, targ=u2, tok=u1_token)
243 self.helper.join(room, user=u2, tok=u2_token)
244
245 private_room = self.helper.create_room_as(u1, is_public=False, tok=u1_token)
246 self.helper.invite(private_room, src=u1, targ=u3, tok=u1_token)
247 self.helper.join(private_room, user=u3, tok=u3_token)
248
249 self.get_success(self.store.update_user_directory_stream_pos(None))
250 self.get_success(self.store.delete_all_from_user_dir())
251
252 shares_private = self.get_users_who_share_private_rooms()
253 public_users = self.get_users_in_public_rooms()
254
255 # Nothing updated yet
256 self.assertEqual(shares_private, [])
257 self.assertEqual(public_users, [])
258
259 # Do the initial population of the user directory via the background update
260 self._add_background_updates()
261
262 while not self.get_success(self.store.has_completed_background_updates()):
263 self.get_success(self.store.do_next_background_update(100), by=0.1)
264
265 shares_private = self.get_users_who_share_private_rooms()
266 public_users = self.get_users_in_public_rooms()
267
268 # User 1 and User 2 are in the same public room
269 self.assertEqual(set(public_users), set([(u1, room), (u2, room)]))
270
271 # User 1 and User 3 share private rooms
272 self.assertEqual(
273 self._compress_shared(shares_private),
274 set([(u1, u3, private_room), (u3, u1, private_room)]),
275 )
276
277 def test_initial_share_all_users(self):
278 """
279 Search all users = True means that a user does not have to share a
280 private room with the searching user or be in a public room to be search
281 visible.
282 """
283 self.handler.search_all_users = True
284 self.hs.config.user_directory_search_all_users = True
285
286 u1 = self.register_user("user1", "pass")
287 self.register_user("user2", "pass")
288 u3 = self.register_user("user3", "pass")
289
290 # Wipe the user dir
291 self.get_success(self.store.update_user_directory_stream_pos(None))
292 self.get_success(self.store.delete_all_from_user_dir())
293
294 # Do the initial population of the user directory via the background update
295 self._add_background_updates()
296
297 while not self.get_success(self.store.has_completed_background_updates()):
298 self.get_success(self.store.do_next_background_update(100), by=0.1)
299
300 shares_private = self.get_users_who_share_private_rooms()
301 public_users = self.get_users_in_public_rooms()
302
303 # No users share rooms
304 self.assertEqual(public_users, [])
305 self.assertEqual(self._compress_shared(shares_private), set([]))
306
307 # Despite not sharing a room, search_all_users means we get a search
308 # result.
309 s = self.get_success(self.handler.search_users(u1, u3, 10))
310 self.assertEqual(len(s["results"]), 1)
311
312 # We can find the other two users
313 s = self.get_success(self.handler.search_users(u1, "user", 10))
314 self.assertEqual(len(s["results"]), 2)
315
316 # Registering a user and then searching for them works.
317 u4 = self.register_user("user4", "pass")
318 s = self.get_success(self.handler.search_users(u1, u4, 10))
319 self.assertEqual(len(s["results"]), 1)
320
321
322 class TestUserDirSearchDisabled(unittest.HomeserverTestCase):
323 user_id = "@test:test"
324
325 servlets = [
326 user_directory.register_servlets,
327 room.register_servlets,
328 login.register_servlets,
329 admin.register_servlets,
330 ]
331
332 def make_homeserver(self, reactor, clock):
333 config = self.default_config()
334 config.update_user_directory = True
335 hs = self.setup_test_homeserver(config=config)
336
337 self.config = hs.config
338
339 return hs
340
341 def test_disabling_room_list(self):
342 self.config.user_directory_search_enabled = True
343
344 # First we create a room with another user so that user dir is non-empty
345 # for our user
346 self.helper.create_room_as(self.user_id)
347 u2 = self.register_user("user2", "pass")
348 room = self.helper.create_room_as(self.user_id)
349 self.helper.join(room, user=u2)
350
351 # Assert user directory is not empty
352 request, channel = self.make_request(
353 "POST",
354 b"user_directory/search",
355 b'{"search_term":"user2"}',
356 )
357 self.render(request)
358 self.assertEquals(200, channel.code, channel.result)
359 self.assertTrue(len(channel.json_body["results"]) > 0)
360
361 # Disable user directory and check search returns nothing
362 self.config.user_directory_search_enabled = False
363 request, channel = self.make_request(
364 "POST",
365 b"user_directory/search",
366 b'{"search_term":"user2"}',
367 )
368 self.render(request)
369 self.assertEquals(200, channel.code, channel.result)
370 self.assertTrue(len(channel.json_body["results"]) == 0)
267267
268268 self.assertIsInstance(f.value, TimeoutError)
269269
270 def test_client_requires_trailing_slashes(self):
271 """
272 If a connection is made to a client but the client rejects it due to
273 requiring a trailing slash. We need to retry the request with a
274 trailing slash. Workaround for Synapse <= v0.99.3, explained in #3622.
275 """
276 d = self.cl.get_json(
277 "testserv:8008", "foo/bar", try_trailing_slash_on_400=True,
278 )
279
280 # Send the request
281 self.pump()
282
283 # there should have been a call to connectTCP
284 clients = self.reactor.tcpClients
285 self.assertEqual(len(clients), 1)
286 (_host, _port, factory, _timeout, _bindAddress) = clients[0]
287
288 # complete the connection and wire it up to a fake transport
289 client = factory.buildProtocol(None)
290 conn = StringTransport()
291 client.makeConnection(conn)
292
293 # that should have made it send the request to the connection
294 self.assertRegex(conn.value(), b"^GET /foo/bar")
295
296 # Clear the original request data before sending a response
297 conn.clear()
298
299 # Send the HTTP response
300 client.dataReceived(
301 b"HTTP/1.1 400 Bad Request\r\n"
302 b"Content-Type: application/json\r\n"
303 b"Content-Length: 59\r\n"
304 b"\r\n"
305 b'{"errcode":"M_UNRECOGNIZED","error":"Unrecognized request"}'
306 )
307
308 # We should get another request with a trailing slash
309 self.assertRegex(conn.value(), b"^GET /foo/bar/")
310
311 # Send a happy response this time
312 client.dataReceived(
313 b"HTTP/1.1 200 OK\r\n"
314 b"Content-Type: application/json\r\n"
315 b"Content-Length: 2\r\n"
316 b"\r\n"
317 b'{}'
318 )
319
320 # We should get a successful response
321 r = self.successResultOf(d)
322 self.assertEqual(r, {})
323
324 def test_client_does_not_retry_on_400_plus(self):
325 """
326 Another test for trailing slashes but now test that we don't retry on
327 trailing slashes on a non-400/M_UNRECOGNIZED response.
328
329 See test_client_requires_trailing_slashes() for context.
330 """
331 d = self.cl.get_json(
332 "testserv:8008", "foo/bar", try_trailing_slash_on_400=True,
333 )
334
335 # Send the request
336 self.pump()
337
338 # there should have been a call to connectTCP
339 clients = self.reactor.tcpClients
340 self.assertEqual(len(clients), 1)
341 (_host, _port, factory, _timeout, _bindAddress) = clients[0]
342
343 # complete the connection and wire it up to a fake transport
344 client = factory.buildProtocol(None)
345 conn = StringTransport()
346 client.makeConnection(conn)
347
348 # that should have made it send the request to the connection
349 self.assertRegex(conn.value(), b"^GET /foo/bar")
350
351 # Clear the original request data before sending a response
352 conn.clear()
353
354 # Send the HTTP response
355 client.dataReceived(
356 b"HTTP/1.1 404 Not Found\r\n"
357 b"Content-Type: application/json\r\n"
358 b"Content-Length: 2\r\n"
359 b"\r\n"
360 b"{}"
361 )
362
363 # We should not get another request
364 self.assertEqual(conn.value(), b"")
365
366 # We should get a 404 failure response
367 self.failureResultOf(d)
368
270369 def test_client_sends_body(self):
271370 self.cl.post_json(
272371 "testserv:8008", "foo/bar", timeout=10000,
6262 config.email_smtp_port = 20
6363 config.require_transport_security = False
6464 config.email_smtp_user = None
65 config.email_smtp_pass = None
6566 config.email_app_name = "Matrix"
6667 config.email_notif_from = "test@example.com"
68 config.email_riot_base_url = None
6769
6870 hs = self.setup_test_homeserver(config=config, sendmail=sendmail)
6971
3030 hs = self.setup_test_homeserver(
3131 "blue",
3232 federation_client=Mock(),
33 ratelimiter=NonCallableMock(spec_set=["send_message"]),
33 ratelimiter=NonCallableMock(spec_set=["can_do_action"]),
3434 )
3535
36 hs.get_ratelimiter().send_message.return_value = (True, 0)
36 hs.get_ratelimiter().can_do_action.return_value = (True, 0)
3737
3838 return hs
3939
0 # -*- coding: utf-8 -*-
1 # Copyright 2019 New Vector Ltd
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
0 # -*- coding: utf-8 -*-
1 # Copyright 2019 New Vector Ltd
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
0 # -*- coding: utf-8 -*-
1 # Copyright 2019 New Vector Ltd
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from synapse.replication.tcp.commands import ReplicateCommand
15 from synapse.replication.tcp.protocol import ClientReplicationStreamProtocol
16 from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory
17
18 from tests import unittest
19 from tests.server import FakeTransport
20
21
22 class BaseStreamTestCase(unittest.HomeserverTestCase):
23 """Base class for tests of the replication streams"""
24 def prepare(self, reactor, clock, hs):
25 # build a replication server
26 server_factory = ReplicationStreamProtocolFactory(self.hs)
27 self.streamer = server_factory.streamer
28 server = server_factory.buildProtocol(None)
29
30 # build a replication client, with a dummy handler
31 self.test_handler = TestReplicationClientHandler()
32 self.client = ClientReplicationStreamProtocol(
33 "client", "test", clock, self.test_handler
34 )
35
36 # wire them together
37 self.client.makeConnection(FakeTransport(server, reactor))
38 server.makeConnection(FakeTransport(self.client, reactor))
39
40 def replicate(self):
41 """Tell the master side of replication that something has happened, and then
42 wait for the replication to occur.
43 """
44 self.streamer.on_notifier_poke()
45 self.pump(0.1)
46
47 def replicate_stream(self, stream, token="NOW"):
48 """Make the client end a REPLICATE command to set up a subscription to a stream"""
49 self.client.send_command(ReplicateCommand(stream, token))
50
51
52 class TestReplicationClientHandler(object):
53 """Drop-in for ReplicationClientHandler which just collects RDATA rows"""
54 def __init__(self):
55 self.received_rdata_rows = []
56
57 def get_streams_to_replicate(self):
58 return {}
59
60 def get_currently_syncing_users(self):
61 return []
62
63 def update_connection(self, connection):
64 pass
65
66 def finished_connecting(self):
67 pass
68
69 def on_rdata(self, stream_name, token, rows):
70 for r in rows:
71 self.received_rdata_rows.append(
72 (stream_name, token, r)
73 )
0 # -*- coding: utf-8 -*-
1 # Copyright 2019 New Vector Ltd
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from synapse.replication.tcp.streams import ReceiptsStreamRow
15
16 from tests.replication.tcp.streams._base import BaseStreamTestCase
17
18 USER_ID = "@feeling:blue"
19 ROOM_ID = "!room:blue"
20 EVENT_ID = "$event:blue"
21
22
23 class ReceiptsStreamTestCase(BaseStreamTestCase):
24 def test_receipt(self):
25 # make the client subscribe to the receipts stream
26 self.replicate_stream("receipts", "NOW")
27
28 # tell the master to send a new receipt
29 self.get_success(
30 self.hs.get_datastore().insert_receipt(
31 ROOM_ID, "m.read", USER_ID, [EVENT_ID], {"a": 1}
32 )
33 )
34 self.replicate()
35
36 # there should be one RDATA command
37 rdata_rows = self.test_handler.received_rdata_rows
38 self.assertEqual(1, len(rdata_rows))
39 self.assertEqual(rdata_rows[0][0], "receipts")
40 row = rdata_rows[0][2] # type: ReceiptsStreamRow
41 self.assertEqual(ROOM_ID, row.room_id)
42 self.assertEqual("m.read", row.receipt_type)
43 self.assertEqual(USER_ID, row.user_id)
44 self.assertEqual(EVENT_ID, row.event_id)
45 self.assertEqual({"a": 1}, row.data)
1919 from mock import Mock
2020
2121 from synapse.api.constants import UserTypes
22 from synapse.rest.client.v1.admin import register_servlets
22 from synapse.rest.client.v1 import admin, events, login, room
2323
2424 from tests import unittest
2525
2626
27 class VersionTestCase(unittest.HomeserverTestCase):
28
29 servlets = [
30 admin.register_servlets,
31 login.register_servlets,
32 ]
33
34 url = '/_matrix/client/r0/admin/server_version'
35
36 def test_version_string(self):
37 self.register_user("admin", "pass", admin=True)
38 self.admin_token = self.login("admin", "pass")
39
40 request, channel = self.make_request("GET", self.url,
41 access_token=self.admin_token)
42 self.render(request)
43
44 self.assertEqual(200, int(channel.result["code"]),
45 msg=channel.result["body"])
46 self.assertEqual({'server_version', 'python_version'},
47 set(channel.json_body.keys()))
48
49 def test_inaccessible_to_non_admins(self):
50 self.register_user("unprivileged-user", "pass", admin=False)
51 user_token = self.login("unprivileged-user", "pass")
52
53 request, channel = self.make_request("GET", self.url,
54 access_token=user_token)
55 self.render(request)
56
57 self.assertEqual(403, int(channel.result['code']),
58 msg=channel.result['body'])
59
60
2761 class UserRegisterTestCase(unittest.HomeserverTestCase):
2862
29 servlets = [register_servlets]
63 servlets = [admin.register_servlets]
3064
3165 def make_homeserver(self, reactor, clock):
3266
318352
319353 self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
320354 self.assertEqual('Invalid user type', channel.json_body["error"])
355
356
357 class ShutdownRoomTestCase(unittest.HomeserverTestCase):
358 servlets = [
359 admin.register_servlets,
360 login.register_servlets,
361 events.register_servlets,
362 room.register_servlets,
363 room.register_deprecated_servlets,
364 ]
365
366 def prepare(self, reactor, clock, hs):
367 self.event_creation_handler = hs.get_event_creation_handler()
368 hs.config.user_consent_version = "1"
369
370 consent_uri_builder = Mock()
371 consent_uri_builder.build_user_consent_uri.return_value = (
372 "http://example.com"
373 )
374 self.event_creation_handler._consent_uri_builder = consent_uri_builder
375
376 self.store = hs.get_datastore()
377
378 self.admin_user = self.register_user("admin", "pass", admin=True)
379 self.admin_user_tok = self.login("admin", "pass")
380
381 self.other_user = self.register_user("user", "pass")
382 self.other_user_token = self.login("user", "pass")
383
384 # Mark the admin user as having consented
385 self.get_success(
386 self.store.user_set_consent_version(self.admin_user, "1"),
387 )
388
389 def test_shutdown_room_consent(self):
390 """Test that we can shutdown rooms with local users who have not
391 yet accepted the privacy policy. This used to fail when we tried to
392 force part the user from the old room.
393 """
394 self.event_creation_handler._block_events_without_consent_error = None
395
396 room_id = self.helper.create_room_as(self.other_user, tok=self.other_user_token)
397
398 # Assert one user in room
399 users_in_room = self.get_success(
400 self.store.get_users_in_room(room_id),
401 )
402 self.assertEqual([self.other_user], users_in_room)
403
404 # Enable require consent to send events
405 self.event_creation_handler._block_events_without_consent_error = "Error"
406
407 # Assert that the user is getting consent error
408 self.helper.send(
409 room_id,
410 body="foo", tok=self.other_user_token, expect_code=403,
411 )
412
413 # Test that the admin can still send shutdown
414 url = "admin/shutdown_room/" + room_id
415 request, channel = self.make_request(
416 "POST",
417 url.encode('ascii'),
418 json.dumps({"new_room_user_id": self.admin_user}),
419 access_token=self.admin_user_tok,
420 )
421 self.render(request)
422
423 self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
424
425 # Assert there is now no longer anyone in the room
426 users_in_room = self.get_success(
427 self.store.get_users_in_room(room_id),
428 )
429 self.assertEqual([], users_in_room)
430
431 @unittest.DEBUG
432 def test_shutdown_room_block_peek(self):
433 """Test that a world_readable room can no longer be peeked into after
434 it has been shut down.
435 """
436
437 self.event_creation_handler._block_events_without_consent_error = None
438
439 room_id = self.helper.create_room_as(self.other_user, tok=self.other_user_token)
440
441 # Enable world readable
442 url = "rooms/%s/state/m.room.history_visibility" % (room_id,)
443 request, channel = self.make_request(
444 "PUT",
445 url.encode('ascii'),
446 json.dumps({"history_visibility": "world_readable"}),
447 access_token=self.other_user_token,
448 )
449 self.render(request)
450 self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
451
452 # Test that the admin can still send shutdown
453 url = "admin/shutdown_room/" + room_id
454 request, channel = self.make_request(
455 "POST",
456 url.encode('ascii'),
457 json.dumps({"new_room_user_id": self.admin_user}),
458 access_token=self.admin_user_tok,
459 )
460 self.render(request)
461
462 self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
463
464 # Assert we can no longer peek into the room
465 self._assert_peek(room_id, expect_code=403)
466
467 def _assert_peek(self, room_id, expect_code):
468 """Assert that the admin user can (or cannot) peek into the room.
469 """
470
471 url = "rooms/%s/initialSync" % (room_id,)
472 request, channel = self.make_request(
473 "GET",
474 url.encode('ascii'),
475 access_token=self.admin_user_tok,
476 )
477 self.render(request)
478 self.assertEqual(
479 expect_code, int(channel.result["code"]), msg=channel.result["body"],
480 )
481
482 url = "events?timeout=0&room_id=" + room_id
483 request, channel = self.make_request(
484 "GET",
485 url.encode('ascii'),
486 access_token=self.admin_user_tok,
487 )
488 self.render(request)
489 self.assertEqual(
490 expect_code, int(channel.result["code"]), msg=channel.result["body"],
491 )
3939 config.auto_join_rooms = []
4040
4141 hs = self.setup_test_homeserver(
42 config=config, ratelimiter=NonCallableMock(spec_set=["send_message"])
42 config=config, ratelimiter=NonCallableMock(spec_set=["can_do_action"])
4343 )
4444 self.ratelimiter = hs.get_ratelimiter()
45 self.ratelimiter.send_message.return_value = (True, 0)
45 self.ratelimiter.can_do_action.return_value = (True, 0)
4646
4747 hs.get_handlers().federation_handler = Mock()
4848
0 import json
1
2 from synapse.rest.client.v1 import admin, login
3
4 from tests import unittest
5
6 LOGIN_URL = b"/_matrix/client/r0/login"
7
8
9 class LoginRestServletTestCase(unittest.HomeserverTestCase):
10
11 servlets = [
12 admin.register_servlets,
13 login.register_servlets,
14 ]
15
16 def make_homeserver(self, reactor, clock):
17
18 self.hs = self.setup_test_homeserver()
19 self.hs.config.enable_registration = True
20 self.hs.config.registrations_require_3pid = []
21 self.hs.config.auto_join_rooms = []
22 self.hs.config.enable_registration_captcha = False
23
24 return self.hs
25
26 def test_POST_ratelimiting_per_address(self):
27 self.hs.config.rc_login_address.burst_count = 5
28 self.hs.config.rc_login_address.per_second = 0.17
29
30 # Create different users so we're sure not to be bothered by the per-user
31 # ratelimiter.
32 for i in range(0, 6):
33 self.register_user("kermit" + str(i), "monkey")
34
35 for i in range(0, 6):
36 params = {
37 "type": "m.login.password",
38 "identifier": {
39 "type": "m.id.user",
40 "user": "kermit" + str(i),
41 },
42 "password": "monkey",
43 }
44 request_data = json.dumps(params)
45 request, channel = self.make_request(b"POST", LOGIN_URL, request_data)
46 self.render(request)
47
48 if i == 5:
49 self.assertEquals(channel.result["code"], b"429", channel.result)
50 retry_after_ms = int(channel.json_body["retry_after_ms"])
51 else:
52 self.assertEquals(channel.result["code"], b"200", channel.result)
53
54 # Since we're ratelimiting at 1 request/min, retry_after_ms should be lower
55 # than 1min.
56 self.assertTrue(retry_after_ms < 6000)
57
58 self.reactor.advance(retry_after_ms / 1000.)
59
60 params = {
61 "type": "m.login.password",
62 "identifier": {
63 "type": "m.id.user",
64 "user": "kermit" + str(i),
65 },
66 "password": "monkey",
67 }
68 request_data = json.dumps(params)
69 request, channel = self.make_request(b"POST", LOGIN_URL, params)
70 self.render(request)
71
72 self.assertEquals(channel.result["code"], b"200", channel.result)
73
74 def test_POST_ratelimiting_per_account(self):
75 self.hs.config.rc_login_account.burst_count = 5
76 self.hs.config.rc_login_account.per_second = 0.17
77
78 self.register_user("kermit", "monkey")
79
80 for i in range(0, 6):
81 params = {
82 "type": "m.login.password",
83 "identifier": {
84 "type": "m.id.user",
85 "user": "kermit",
86 },
87 "password": "monkey",
88 }
89 request_data = json.dumps(params)
90 request, channel = self.make_request(b"POST", LOGIN_URL, request_data)
91 self.render(request)
92
93 if i == 5:
94 self.assertEquals(channel.result["code"], b"429", channel.result)
95 retry_after_ms = int(channel.json_body["retry_after_ms"])
96 else:
97 self.assertEquals(channel.result["code"], b"200", channel.result)
98
99 # Since we're ratelimiting at 1 request/min, retry_after_ms should be lower
100 # than 1min.
101 self.assertTrue(retry_after_ms < 6000)
102
103 self.reactor.advance(retry_after_ms / 1000.)
104
105 params = {
106 "type": "m.login.password",
107 "identifier": {
108 "type": "m.id.user",
109 "user": "kermit",
110 },
111 "password": "monkey",
112 }
113 request_data = json.dumps(params)
114 request, channel = self.make_request(b"POST", LOGIN_URL, params)
115 self.render(request)
116
117 self.assertEquals(channel.result["code"], b"200", channel.result)
118
119 def test_POST_ratelimiting_per_account_failed_attempts(self):
120 self.hs.config.rc_login_failed_attempts.burst_count = 5
121 self.hs.config.rc_login_failed_attempts.per_second = 0.17
122
123 self.register_user("kermit", "monkey")
124
125 for i in range(0, 6):
126 params = {
127 "type": "m.login.password",
128 "identifier": {
129 "type": "m.id.user",
130 "user": "kermit",
131 },
132 "password": "notamonkey",
133 }
134 request_data = json.dumps(params)
135 request, channel = self.make_request(b"POST", LOGIN_URL, request_data)
136 self.render(request)
137
138 if i == 5:
139 self.assertEquals(channel.result["code"], b"429", channel.result)
140 retry_after_ms = int(channel.json_body["retry_after_ms"])
141 else:
142 self.assertEquals(channel.result["code"], b"403", channel.result)
143
144 # Since we're ratelimiting at 1 request/min, retry_after_ms should be lower
145 # than 1min.
146 self.assertTrue(retry_after_ms < 6000)
147
148 self.reactor.advance(retry_after_ms / 1000.)
149
150 params = {
151 "type": "m.login.password",
152 "identifier": {
153 "type": "m.id.user",
154 "user": "kermit",
155 },
156 "password": "notamonkey",
157 }
158 request_data = json.dumps(params)
159 request, channel = self.make_request(b"POST", LOGIN_URL, params)
160 self.render(request)
161
162 self.assertEquals(channel.result["code"], b"403", channel.result)
4040 "red",
4141 http_client=None,
4242 federation_client=Mock(),
43 ratelimiter=NonCallableMock(spec_set=["send_message"]),
43 ratelimiter=NonCallableMock(spec_set=["can_do_action"]),
4444 )
4545 self.ratelimiter = self.hs.get_ratelimiter()
46 self.ratelimiter.send_message.return_value = (True, 0)
46 self.ratelimiter.can_do_action.return_value = (True, 0)
4747
4848 self.hs.get_federation_handler = Mock(return_value=Mock())
4949
9595 # auth as user_id now
9696 self.helper.auth_user_id = self.user_id
9797
98 def test_send_message(self):
98 def test_can_do_action(self):
9999 msg_content = b'{"msgtype":"m.text","body":"hello"}'
100100
101101 seq = iter(range(100))
4141 "red",
4242 http_client=None,
4343 federation_client=Mock(),
44 ratelimiter=NonCallableMock(spec_set=["send_message"]),
44 ratelimiter=NonCallableMock(spec_set=["can_do_action"]),
4545 )
4646
4747 self.event_source = hs.get_event_sources().sources["typing"]
4848
4949 self.ratelimiter = hs.get_ratelimiter()
50 self.ratelimiter.send_message.return_value = (True, 0)
50 self.ratelimiter.can_do_action.return_value = (True, 0)
5151
5252 hs.get_handlers().federation_handler = Mock()
5353
1717
1818 import attr
1919
20 from twisted.internet import defer
21
2220 from synapse.api.constants import Membership
2321
24 from tests import unittest
2522 from tests.server import make_request, render
26
27
28 class RestTestCase(unittest.TestCase):
29 """Contains extra helper functions to quickly and clearly perform a given
30 REST action, which isn't the focus of the test.
31
32 This subclass assumes there are mock_resource and auth_user_id attributes.
33 """
34
35 def __init__(self, *args, **kwargs):
36 super(RestTestCase, self).__init__(*args, **kwargs)
37 self.mock_resource = None
38 self.auth_user_id = None
39
40 @defer.inlineCallbacks
41 def create_room_as(self, room_creator, is_public=True, tok=None):
42 temp_id = self.auth_user_id
43 self.auth_user_id = room_creator
44 path = "/createRoom"
45 content = "{}"
46 if not is_public:
47 content = '{"visibility":"private"}'
48 if tok:
49 path = path + "?access_token=%s" % tok
50 (code, response) = yield self.mock_resource.trigger("POST", path, content)
51 self.assertEquals(200, code, msg=str(response))
52 self.auth_user_id = temp_id
53 defer.returnValue(response["room_id"])
54
55 @defer.inlineCallbacks
56 def invite(self, room=None, src=None, targ=None, expect_code=200, tok=None):
57 yield self.change_membership(
58 room=room,
59 src=src,
60 targ=targ,
61 tok=tok,
62 membership=Membership.INVITE,
63 expect_code=expect_code,
64 )
65
66 @defer.inlineCallbacks
67 def join(self, room=None, user=None, expect_code=200, tok=None):
68 yield self.change_membership(
69 room=room,
70 src=user,
71 targ=user,
72 tok=tok,
73 membership=Membership.JOIN,
74 expect_code=expect_code,
75 )
76
77 @defer.inlineCallbacks
78 def leave(self, room=None, user=None, expect_code=200, tok=None):
79 yield self.change_membership(
80 room=room,
81 src=user,
82 targ=user,
83 tok=tok,
84 membership=Membership.LEAVE,
85 expect_code=expect_code,
86 )
87
88 @defer.inlineCallbacks
89 def change_membership(self, room, src, targ, membership, tok=None, expect_code=200):
90 temp_id = self.auth_user_id
91 self.auth_user_id = src
92
93 path = "/rooms/%s/state/m.room.member/%s" % (room, targ)
94 if tok:
95 path = path + "?access_token=%s" % tok
96
97 data = {"membership": membership}
98
99 (code, response) = yield self.mock_resource.trigger(
100 "PUT", path, json.dumps(data)
101 )
102 self.assertEquals(
103 expect_code,
104 code,
105 msg="Expected: %d, got: %d, resp: %r" % (expect_code, code, response),
106 )
107
108 self.auth_user_id = temp_id
109
110 @defer.inlineCallbacks
111 def register(self, user_id):
112 (code, response) = yield self.mock_resource.trigger(
113 "POST",
114 "/register",
115 json.dumps(
116 {"user": user_id, "password": "test", "type": "m.login.password"}
117 ),
118 )
119 self.assertEquals(200, code, msg=response)
120 defer.returnValue(response)
121
122 @defer.inlineCallbacks
123 def send(self, room_id, body=None, txn_id=None, tok=None, expect_code=200):
124 if txn_id is None:
125 txn_id = "m%s" % (str(time.time()))
126 if body is None:
127 body = "body_text_here"
128
129 path = "/rooms/%s/send/m.room.message/%s" % (room_id, txn_id)
130 content = '{"msgtype":"m.text","body":"%s"}' % body
131 if tok:
132 path = path + "?access_token=%s" % tok
133
134 (code, response) = yield self.mock_resource.trigger("PUT", path, content)
135 self.assertEquals(expect_code, code, msg=str(response))
136
137 def assert_dict(self, required, actual):
138 """Does a partial assert of a dict.
139
140 Args:
141 required (dict): The keys and value which MUST be in 'actual'.
142 actual (dict): The test result. Extra keys will not be checked.
143 """
144 for key in required:
145 self.assertEquals(
146 required[key], actual[key], msg="%s mismatch. %s" % (key, actual)
147 )
14823
14924
15025 @attr.s
1919 self.hs.config.registrations_require_3pid = []
2020 self.hs.config.auto_join_rooms = []
2121 self.hs.config.enable_registration_captcha = False
22 self.hs.config.allow_guest_access = True
2223
2324 return self.hs
2425
2728 as_token = "i_am_an_app_service"
2829
2930 appservice = ApplicationService(
30 as_token, self.hs.config.hostname,
31 as_token, self.hs.config.server_name,
3132 id="1234",
3233 namespaces={
3334 "users": [{"regex": r"@as_user.*", "exclusive": True}],
129130
130131 self.assertEquals(channel.result["code"], b"403", channel.result)
131132 self.assertEquals(channel.json_body["error"], "Guest access is disabled")
133
134 def test_POST_ratelimiting_guest(self):
135 self.hs.config.rc_registration.burst_count = 5
136 self.hs.config.rc_registration.per_second = 0.17
137
138 for i in range(0, 6):
139 url = self.url + b"?kind=guest"
140 request, channel = self.make_request(b"POST", url, b"{}")
141 self.render(request)
142
143 if i == 5:
144 self.assertEquals(channel.result["code"], b"429", channel.result)
145 retry_after_ms = int(channel.json_body["retry_after_ms"])
146 else:
147 self.assertEquals(channel.result["code"], b"200", channel.result)
148
149 self.reactor.advance(retry_after_ms / 1000.)
150
151 request, channel = self.make_request(b"POST", self.url + b"?kind=guest", b"{}")
152 self.render(request)
153
154 self.assertEquals(channel.result["code"], b"200", channel.result)
155
156 def test_POST_ratelimiting(self):
157 self.hs.config.rc_registration.burst_count = 5
158 self.hs.config.rc_registration.per_second = 0.17
159
160 for i in range(0, 6):
161 params = {
162 "username": "kermit" + str(i),
163 "password": "monkey",
164 "device_id": "frogfone",
165 "auth": {"type": LoginType.DUMMY},
166 }
167 request_data = json.dumps(params)
168 request, channel = self.make_request(b"POST", self.url, request_data)
169 self.render(request)
170
171 if i == 5:
172 self.assertEquals(channel.result["code"], b"429", channel.result)
173 retry_after_ms = int(channel.json_body["retry_after_ms"])
174 else:
175 self.assertEquals(channel.result["code"], b"200", channel.result)
176
177 self.reactor.advance(retry_after_ms / 1000.)
178
179 request, channel = self.make_request(b"POST", self.url + b"?kind=guest", b"{}")
180 self.render(request)
181
182 self.assertEquals(channel.result["code"], b"200", channel.result)
0 # -*- coding: utf-8 -*-
1 # Copyright 2019 New Vector Ltd
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from synapse.rest.media.v1._base import get_filename_from_headers
16
17 from tests import unittest
18
19
20 class GetFileNameFromHeadersTests(unittest.TestCase):
21 # input -> expected result
22 TEST_CASES = {
23 b"inline; filename=abc.txt": u"abc.txt",
24 b'inline; filename="azerty"': u"azerty",
25 b'inline; filename="aze%20rty"': u"aze%20rty",
26 b'inline; filename="aze\"rty"': u'aze"rty',
27 b'inline; filename="azer;ty"': u"azer;ty",
28
29 b"inline; filename*=utf-8''foo%C2%A3bar": u"foo£bar",
30 }
31
32 def tests(self):
33 for hdr, expected in self.TEST_CASES.items():
34 res = get_filename_from_headers(
35 {
36 b'Content-Disposition': [hdr],
37 },
38 )
39 self.assertEqual(
40 res, expected,
41 "expected output for %s to be %s but was %s" % (
42 hdr, expected, res,
43 )
44 )
118118
119119 server_version_string = b"1"
120120 site_tag = "test"
121
122 @property
123 def access_logger(self):
124 class FakeLogger:
125 def info(self, *args, **kwargs):
126 pass
127
128 return FakeLogger()
121 access_logger = logging.getLogger("synapse.access.http.fake")
129122
130123
131124 def make_request(
136129 access_token=None,
137130 request=SynapseRequest,
138131 shorthand=True,
132 federation_auth_origin=None,
139133 ):
140134 """
141135 Make a web request using the given method and path, feed it the
149143 a dict.
150144 shorthand: Whether to try and be helpful and prefix the given URL
151145 with the usual REST API path, if it doesn't contain it.
146 federation_auth_origin (bytes|None): if set to not-None, we will add a fake
147 Authorization header pretenting to be the given server name.
152148
153149 Returns:
154 A synapse.http.site.SynapseRequest.
150 Tuple[synapse.http.site.SynapseRequest, channel]
155151 """
156152 if not isinstance(method, bytes):
157153 method = method.encode('ascii')
181177 if access_token:
182178 req.requestHeaders.addRawHeader(
183179 b"Authorization", b"Bearer " + access_token.encode('ascii')
180 )
181
182 if federation_auth_origin is not None:
183 req.requestHeaders.addRawHeader(
184 b"Authorization", b"X-Matrix origin=%s,key=,sig=" % (federation_auth_origin,)
184185 )
185186
186187 if content:
287288 **kwargs
288289 )
289290
290 pool.runWithConnection = runWithConnection
291 pool.runInteraction = runInteraction
292
293291 class ThreadPool:
294292 """
295293 Threadless thread pool.
315313 return d
316314
317315 clock.threadpool = ThreadPool()
318 pool.threadpool = ThreadPool()
319 pool.running = True
316
317 if pool:
318 pool.runWithConnection = runWithConnection
319 pool.runInteraction = runInteraction
320 pool.threadpool = ThreadPool()
321 pool.running = True
320322 return d
321323
322324
0 # -*- coding: utf-8 -*-
1 # Copyright 2018, 2019 New Vector Ltd
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
015 from mock import Mock
116
217 from twisted.internet import defer
823 )
924
1025 from tests import unittest
11 from tests.utils import setup_test_homeserver
12
13
14 class TestResourceLimitsServerNotices(unittest.TestCase):
15 @defer.inlineCallbacks
16 def setUp(self):
17 self.hs = yield setup_test_homeserver(self.addCleanup)
26
27
28 class TestResourceLimitsServerNotices(unittest.HomeserverTestCase):
29
30 def make_homeserver(self, reactor, clock):
31 hs_config = self.default_config("test")
32 hs_config.server_notices_mxid = "@server:test"
33
34 hs = self.setup_test_homeserver(config=hs_config, expire_access_token=True)
35 return hs
36
37 def prepare(self, reactor, clock, hs):
1838 self.server_notices_sender = self.hs.get_server_notices_sender()
1939
2040 # relying on [1] is far from ideal, but the only case where
4969 self._rlsn._store.get_tags_for_room = Mock(return_value={})
5070 self.hs.config.admin_contact = "mailto:user@test.com"
5171
52 @defer.inlineCallbacks
5372 def test_maybe_send_server_notice_to_user_flag_off(self):
5473 """Tests cases where the flags indicate nothing to do"""
5574 # test hs disabled case
5675 self.hs.config.hs_disabled = True
5776
58 yield self._rlsn.maybe_send_server_notice_to_user(self.user_id)
77 self.get_success(self._rlsn.maybe_send_server_notice_to_user(self.user_id))
5978
6079 self._send_notice.assert_not_called()
6180 # Test when mau limiting disabled
6281 self.hs.config.hs_disabled = False
6382 self.hs.limit_usage_by_mau = False
64 yield self._rlsn.maybe_send_server_notice_to_user(self.user_id)
65
66 self._send_notice.assert_not_called()
67
68 @defer.inlineCallbacks
83 self.get_success(self._rlsn.maybe_send_server_notice_to_user(self.user_id))
84
85 self._send_notice.assert_not_called()
86
6987 def test_maybe_send_server_notice_to_user_remove_blocked_notice(self):
7088 """Test when user has blocked notice, but should have it removed"""
7189
7795 return_value=defer.succeed({"123": mock_event})
7896 )
7997
80 yield self._rlsn.maybe_send_server_notice_to_user(self.user_id)
98 self.get_success(self._rlsn.maybe_send_server_notice_to_user(self.user_id))
8199 # Would be better to check the content, but once == remove blocking event
82100 self._send_notice.assert_called_once()
83101
84 @defer.inlineCallbacks
85102 def test_maybe_send_server_notice_to_user_remove_blocked_notice_noop(self):
86 """Test when user has blocked notice, but notice ought to be there (NOOP)"""
103 """
104 Test when user has blocked notice, but notice ought to be there (NOOP)
105 """
87106 self._rlsn._auth.check_auth_blocking = Mock(
88107 side_effect=ResourceLimitError(403, 'foo')
89108 )
94113 self._rlsn._store.get_events = Mock(
95114 return_value=defer.succeed({"123": mock_event})
96115 )
97 yield self._rlsn.maybe_send_server_notice_to_user(self.user_id)
98
99 self._send_notice.assert_not_called()
100
101 @defer.inlineCallbacks
116 self.get_success(self._rlsn.maybe_send_server_notice_to_user(self.user_id))
117
118 self._send_notice.assert_not_called()
119
102120 def test_maybe_send_server_notice_to_user_add_blocked_notice(self):
103 """Test when user does not have blocked notice, but should have one"""
121 """
122 Test when user does not have blocked notice, but should have one
123 """
104124
105125 self._rlsn._auth.check_auth_blocking = Mock(
106126 side_effect=ResourceLimitError(403, 'foo')
107127 )
108 yield self._rlsn.maybe_send_server_notice_to_user(self.user_id)
128 self.get_success(self._rlsn.maybe_send_server_notice_to_user(self.user_id))
109129
110130 # Would be better to check contents, but 2 calls == set blocking event
111131 self.assertTrue(self._send_notice.call_count == 2)
112132
113 @defer.inlineCallbacks
114133 def test_maybe_send_server_notice_to_user_add_blocked_notice_noop(self):
115 """Test when user does not have blocked notice, nor should they (NOOP)"""
116
134 """
135 Test when user does not have blocked notice, nor should they (NOOP)
136 """
117137 self._rlsn._auth.check_auth_blocking = Mock()
118138
119 yield self._rlsn.maybe_send_server_notice_to_user(self.user_id)
120
121 self._send_notice.assert_not_called()
122
123 @defer.inlineCallbacks
139 self.get_success(self._rlsn.maybe_send_server_notice_to_user(self.user_id))
140
141 self._send_notice.assert_not_called()
142
124143 def test_maybe_send_server_notice_to_user_not_in_mau_cohort(self):
125
126 """Test when user is not part of the MAU cohort - this should not ever
144 """
145 Test when user is not part of the MAU cohort - this should not ever
127146 happen - but ...
128147 """
129
130148 self._rlsn._auth.check_auth_blocking = Mock()
131149 self._rlsn._store.user_last_seen_monthly_active = Mock(
132150 return_value=defer.succeed(None)
133151 )
134 yield self._rlsn.maybe_send_server_notice_to_user(self.user_id)
135
136 self._send_notice.assert_not_called()
137
138
139 class TestResourceLimitsServerNoticesWithRealRooms(unittest.TestCase):
140 @defer.inlineCallbacks
141 def setUp(self):
142 self.hs = yield setup_test_homeserver(self.addCleanup)
152 self.get_success(self._rlsn.maybe_send_server_notice_to_user(self.user_id))
153
154 self._send_notice.assert_not_called()
155
156
157 class TestResourceLimitsServerNoticesWithRealRooms(unittest.HomeserverTestCase):
158 def prepare(self, reactor, clock, hs):
143159 self.store = self.hs.get_datastore()
144160 self.server_notices_sender = self.hs.get_server_notices_sender()
145161 self.server_notices_manager = self.hs.get_server_notices_manager()
164180
165181 self.hs.config.admin_contact = "mailto:user@test.com"
166182
167 @defer.inlineCallbacks
168183 def test_server_notice_only_sent_once(self):
169184 self.store.get_monthly_active_count = Mock(return_value=1000)
170185
171186 self.store.user_last_seen_monthly_active = Mock(return_value=1000)
172187
173188 # Call the function multiple times to ensure we only send the notice once
174 yield self._rlsn.maybe_send_server_notice_to_user(self.user_id)
175 yield self._rlsn.maybe_send_server_notice_to_user(self.user_id)
176 yield self._rlsn.maybe_send_server_notice_to_user(self.user_id)
189 self.get_success(self._rlsn.maybe_send_server_notice_to_user(self.user_id))
190 self.get_success(self._rlsn.maybe_send_server_notice_to_user(self.user_id))
191 self.get_success(self._rlsn.maybe_send_server_notice_to_user(self.user_id))
177192
178193 # Now lets get the last load of messages in the service notice room and
179194 # check that there is only one server notice
180 room_id = yield self.server_notices_manager.get_notice_room_for_user(
181 self.user_id
182 )
183
184 token = yield self.event_source.get_current_token()
185 events, _ = yield self.store.get_recent_events_for_room(
186 room_id, limit=100, end_token=token.room_key
195 room_id = self.get_success(
196 self.server_notices_manager.get_notice_room_for_user(self.user_id)
197 )
198
199 token = self.get_success(self.event_source.get_current_token())
200 events, _ = self.get_success(
201 self.store.get_recent_events_for_room(
202 room_id, limit=100, end_token=token.room_key
203 )
187204 )
188205
189206 count = 0
1515 from twisted.internet import defer
1616
1717 from synapse.storage import UserDirectoryStore
18 from synapse.storage.roommember import ProfileInfo
1918
2019 from tests import unittest
2120 from tests.utils import setup_test_homeserver
3332
3433 # alice and bob are both in !room_id. bobby is not but shares
3534 # a homeserver with alice.
36 yield self.store.add_profiles_to_user_dir(
37 "!room:id",
38 {
39 ALICE: ProfileInfo(None, "alice"),
40 BOB: ProfileInfo(None, "bob"),
41 BOBBY: ProfileInfo(None, "bobby"),
42 },
43 )
44 yield self.store.add_users_to_public_room("!room:id", [ALICE, BOB])
45 yield self.store.add_users_who_share_room(
46 "!room:id", False, ((ALICE, BOB), (BOB, ALICE))
35 yield self.store.update_profile_in_user_dir(ALICE, "alice", None)
36 yield self.store.update_profile_in_user_dir(BOB, "bob", None)
37 yield self.store.update_profile_in_user_dir(BOBBY, "bobby", None)
38 yield self.store.add_users_in_public_rooms(
39 "!room:id", (ALICE, BOB)
4740 )
4841
4942 @defer.inlineCallbacks
1616
1717 import json
1818
19 from mock import Mock, NonCallableMock
19 from mock import Mock
2020
2121 from synapse.api.constants import LoginType
2222 from synapse.api.errors import Codes, HttpResponseException, SynapseError
3535 "red",
3636 http_client=None,
3737 federation_client=Mock(),
38 ratelimiter=NonCallableMock(spec_set=["send_message"]),
3938 )
4039
4140 self.store = self.hs.get_datastore()
261261 access_token=None,
262262 request=SynapseRequest,
263263 shorthand=True,
264 federation_auth_origin=None,
264265 ):
265266 """
266267 Create a SynapseRequest at the path using the method and containing the
274275 a dict.
275276 shorthand: Whether to try and be helpful and prefix the given URL
276277 with the usual REST API path, if it doesn't contain it.
278 federation_auth_origin (bytes|None): if set to not-None, we will add a fake
279 Authorization header pretenting to be the given server name.
277280
278281 Returns:
279 A synapse.http.site.SynapseRequest.
282 Tuple[synapse.http.site.SynapseRequest, channel]
280283 """
281284 if isinstance(content, dict):
282285 content = json.dumps(content).encode('utf8')
283286
284287 return make_request(
285 self.reactor, method, path, content, access_token, request, shorthand
288 self.reactor, method, path, content, access_token, request, shorthand,
289 federation_auth_origin,
286290 )
287291
288292 def render(self, request):
309313 """
310314 kwargs = dict(kwargs)
311315 kwargs.update(self._hs_args)
316 if "config" not in kwargs:
317 config = self.default_config()
318 kwargs["config"] = config
312319 hs = setup_test_homeserver(self.addCleanup, *args, **kwargs)
313320 stor = hs.get_datastore()
314321
325332 """
326333 self.reactor.pump([by] * 100)
327334
328 def get_success(self, d):
335 def get_success(self, d, by=0.0):
336 if not isinstance(d, Deferred):
337 return d
338 self.pump(by=by)
339 return self.successResultOf(d)
340
341 def get_failure(self, d, exc):
342 """
343 Run a Deferred and get a Failure from it. The failure must be of the type `exc`.
344 """
329345 if not isinstance(d, Deferred):
330346 return d
331347 self.pump()
332 return self.successResultOf(d)
348 return self.failureResultOf(d, exc)
333349
334350 def register_user(self, username, password, admin=False):
335351 """
00 # -*- coding: utf-8 -*-
11 # Copyright 2014-2016 OpenMarket Ltd
2 # Copyright 2018-2019 New Vector Ltd
23 #
34 # Licensed under the Apache License, Version 2.0 (the "License");
45 # you may not use this file except in compliance with the License.
2728
2829 from synapse.api.constants import EventTypes, RoomVersions
2930 from synapse.api.errors import CodeMessageException, cs_error
30 from synapse.config.server import ServerConfig
31 from synapse.federation.transport import server
31 from synapse.config.homeserver import HomeServerConfig
32 from synapse.federation.transport import server as federation_server
3233 from synapse.http.server import HttpServer
3334 from synapse.server import HomeServer
3435 from synapse.storage import DataStore
4243 from synapse.util.ratelimitutils import FederationRateLimiter
4344
4445 # set this to True to run the tests against postgres instead of sqlite.
46 #
47 # When running under postgres, we first create a base database with the name
48 # POSTGRES_BASE_DB and update it to the current schema. Then, for each test case, we
49 # create another unique database, using the base database as a template.
4550 USE_POSTGRES_FOR_TESTS = os.environ.get("SYNAPSE_POSTGRES", False)
4651 LEAVE_DB = os.environ.get("SYNAPSE_LEAVE_DB", False)
47 POSTGRES_USER = os.environ.get("SYNAPSE_POSTGRES_USER", "postgres")
52 POSTGRES_USER = os.environ.get("SYNAPSE_POSTGRES_USER", None)
53 POSTGRES_HOST = os.environ.get("SYNAPSE_POSTGRES_HOST", None)
54 POSTGRES_PASSWORD = os.environ.get("SYNAPSE_POSTGRES_PASSWORD", None)
4855 POSTGRES_BASE_DB = "_synapse_unit_tests_base_%s" % (os.getpid(),)
4956
57 # the dbname we will connect to in order to create the base database.
58 POSTGRES_DBNAME_FOR_INITIAL_CREATE = "postgres"
59
5060
5161 def setupdb():
52
5362 # If we're using PostgreSQL, set up the db once
5463 if USE_POSTGRES_FOR_TESTS:
55 pgconfig = {
56 "name": "psycopg2",
57 "args": {
58 "database": POSTGRES_BASE_DB,
59 "user": POSTGRES_USER,
60 "cp_min": 1,
61 "cp_max": 5,
62 },
63 }
64 config = Mock()
65 config.password_providers = []
66 config.database_config = pgconfig
67 db_engine = create_engine(pgconfig)
68 db_conn = db_engine.module.connect(user=POSTGRES_USER)
64 # create a PostgresEngine
65 db_engine = create_engine({"name": "psycopg2", "args": {}})
66
67 # connect to postgres to create the base database.
68 db_conn = db_engine.module.connect(
69 user=POSTGRES_USER, host=POSTGRES_HOST, password=POSTGRES_PASSWORD,
70 dbname=POSTGRES_DBNAME_FOR_INITIAL_CREATE,
71 )
6972 db_conn.autocommit = True
7073 cur = db_conn.cursor()
7174 cur.execute("DROP DATABASE IF EXISTS %s;" % (POSTGRES_BASE_DB,))
7578
7679 # Set up in the db
7780 db_conn = db_engine.module.connect(
78 database=POSTGRES_BASE_DB, user=POSTGRES_USER
81 database=POSTGRES_BASE_DB,
82 user=POSTGRES_USER,
83 host=POSTGRES_HOST,
84 password=POSTGRES_PASSWORD,
7985 )
8086 cur = db_conn.cursor()
8187 _get_or_create_schema_state(cur, db_engine)
8591 db_conn.close()
8692
8793 def _cleanup():
88 db_conn = db_engine.module.connect(user=POSTGRES_USER)
94 db_conn = db_engine.module.connect(
95 user=POSTGRES_USER, host=POSTGRES_HOST, password=POSTGRES_PASSWORD,
96 dbname=POSTGRES_DBNAME_FOR_INITIAL_CREATE,
97 )
8998 db_conn.autocommit = True
9099 cur = db_conn.cursor()
91100 cur.execute("DROP DATABASE IF EXISTS %s;" % (POSTGRES_BASE_DB,))
99108 """
100109 Create a reasonable test config.
101110 """
102 config = Mock()
103 config.signing_key = [MockKey()]
111 config_dict = {
112 "server_name": name,
113 "media_store_path": "media",
114 "uploads_path": "uploads",
115
116 # the test signing key is just an arbitrary ed25519 key to keep the config
117 # parser happy
118 "signing_key": "ed25519 a_lPym qvioDNmfExFBRPgdTU+wtFYKq4JfwFRv7sYVgWvmgJg",
119 }
120
121 config = HomeServerConfig()
122 config.parse_config_dict(config_dict)
123
124 # TODO: move this stuff into config_dict or get rid of it
104125 config.event_cache_size = 1
105126 config.enable_registration = True
127 config.enable_registration_captcha = False
106128 config.macaroon_secret_key = "not even a little secret"
107129 config.expire_access_token = False
108 config.server_name = name
109130 config.trusted_third_party_id_servers = []
110131 config.room_invite_state_types = []
111132 config.password_providers = []
138159 config.admin_contact = None
139160 config.rc_messages_per_second = 10000
140161 config.rc_message_burst_count = 10000
162 config.rc_registration.per_second = 10000
163 config.rc_registration.burst_count = 10000
164 config.rc_login_address.per_second = 10000
165 config.rc_login_address.burst_count = 10000
166 config.rc_login_account.per_second = 10000
167 config.rc_login_account.burst_count = 10000
168 config.rc_login_failed_attempts.per_second = 10000
169 config.rc_login_failed_attempts.burst_count = 10000
141170 config.saml2_enabled = False
142171 config.public_baseurl = None
143172 config.default_identity_server = None
173 config.key_refresh_interval = 24 * 60 * 60 * 1000
174 config.old_signing_keys = {}
175 config.tls_fingerprints = []
144176
145177 config.use_frozen_dicts = False
146178
151183 # disable user directory updates, because they get done in the
152184 # background, which upsets the test runner.
153185 config.update_user_directory = False
154
155 def is_threepid_reserved(threepid):
156 return ServerConfig.is_threepid_reserved(
157 config.mau_limits_reserved_threepids, threepid
158 )
159
160 config.is_threepid_reserved.side_effect = is_threepid_reserved
161186
162187 return config
163188
185210 Args:
186211 cleanup_func : The function used to register a cleanup routine for
187212 after the test.
213
214 Calling this method directly is deprecated: you should instead derive from
215 HomeserverTestCase.
188216 """
189217 if reactor is None:
190218 from twisted.internet import reactor
202230
203231 config.database_config = {
204232 "name": "psycopg2",
205 "args": {"database": test_db, "cp_min": 1, "cp_max": 5},
233 "args": {
234 "database": test_db,
235 "host": POSTGRES_HOST,
236 "password": POSTGRES_PASSWORD,
237 "user": POSTGRES_USER,
238 "cp_min": 1,
239 "cp_max": 5,
240 },
206241 }
207242 else:
208243 config.database_config = {
216251 # the template database we generate in setupdb()
217252 if datastore is None and isinstance(db_engine, PostgresEngine):
218253 db_conn = db_engine.module.connect(
219 database=POSTGRES_BASE_DB, user=POSTGRES_USER
254 database=POSTGRES_BASE_DB,
255 user=POSTGRES_USER,
256 host=POSTGRES_HOST,
257 password=POSTGRES_PASSWORD,
220258 )
221259 db_conn.autocommit = True
222260 cur = db_conn.cursor()
239277 db_config=config.database_config,
240278 version_string="Synapse/tests",
241279 database_engine=db_engine,
242 room_list_handler=object(),
243280 tls_server_context_factory=Mock(),
244281 tls_client_options_factory=Mock(),
245282 reactor=reactor,
266303
267304 # Drop the test database
268305 db_conn = db_engine.module.connect(
269 database=POSTGRES_BASE_DB, user=POSTGRES_USER
306 database=POSTGRES_BASE_DB,
307 user=POSTGRES_USER,
308 host=POSTGRES_HOST,
309 password=POSTGRES_PASSWORD,
270310 )
271311 db_conn.autocommit = True
272312 cur = db_conn.cursor()
297337 cleanup_func(cleanup)
298338
299339 hs.setup()
340 if homeserverToUse.__name__ == "TestHomeServer":
341 hs.setup_master()
300342 else:
301343 hs = homeserverToUse(
302344 name,
305347 config=config,
306348 version_string="Synapse/tests",
307349 database_engine=db_engine,
308 room_list_handler=object(),
309350 tls_server_context_factory=Mock(),
310351 tls_client_options_factory=Mock(),
311352 reactor=reactor,
323364
324365 fed = kargs.get("resource_for_federation", None)
325366 if fed:
326 server.register_servlets(
327 hs,
328 resource=fed,
329 authenticator=server.Authenticator(hs),
330 ratelimiter=FederationRateLimiter(
331 hs.get_clock(),
332 window_size=hs.config.federation_rc_window_size,
333 sleep_limit=hs.config.federation_rc_sleep_limit,
334 sleep_msec=hs.config.federation_rc_sleep_delay,
335 reject_limit=hs.config.federation_rc_reject_limit,
336 concurrent_requests=hs.config.federation_rc_concurrent,
337 ),
338 )
367 register_federation_servlets(hs, fed)
339368
340369 defer.returnValue(hs)
370
371
372 def register_federation_servlets(hs, resource):
373 federation_server.register_servlets(
374 hs,
375 resource=resource,
376 authenticator=federation_server.Authenticator(hs),
377 ratelimiter=FederationRateLimiter(
378 hs.get_clock(),
379 window_size=hs.config.federation_rc_window_size,
380 sleep_limit=hs.config.federation_rc_sleep_limit,
381 sleep_msec=hs.config.federation_rc_sleep_delay,
382 reject_limit=hs.config.federation_rc_reject_limit,
383 concurrent_requests=hs.config.federation_rc_concurrent,
384 ),
385 )
341386
342387
343388 def get_mock_call_args(pattern_func, mock_func):
456501 def verify(self, message, sig):
457502 assert sig == b"\x9a\x87$"
458503
504 def encode(self):
505 return b"<fake_encoded_key>"
506
459507
460508 class MockClock(object):
461509 now = 1000
485533 return t
486534
487535 def looping_call(self, function, interval):
488 self.loopers.append([function, interval / 1000., self.now])
536 self.loopers.append([function, interval / 1000.0, self.now])
489537
490538 def cancel_call_later(self, timer, ignore_errs=False):
491539 if timer[2]:
521569 looped[2] = self.now
522570
523571 def advance_time_msec(self, ms):
524 self.advance_time(ms / 1000.)
572 self.advance_time(ms / 1000.0)
525573
526574 def time_bound_deferred(self, d, *args, **kwargs):
527575 # We don't bother timing things out for now.
630678 "sender": creator_id,
631679 "room_id": room_id,
632680 "content": {},
633 }
681 },
634682 )
635683
636684 event, context = yield event_creation_handler.create_new_client_event(builder)
8181
8282 mock
8383 lxml
84 coverage
85
8486 commands =
8587 /usr/bin/find "{toxinidir}" -name '*.pyc' -delete
8688 # Make all greater-thans equals so we test the oldest version of our direct
8789 # dependencies, but make the pyopenssl 17.0, which can work against an
8890 # OpenSSL 1.1 compiled cryptography (as older ones don't compile on Travis).
8991 /bin/sh -c 'python -m synapse.python_dependencies | sed -e "s/>=/==/g" -e "s/psycopg2==2.6//" -e "s/pyopenssl==16.0.0/pyopenssl==17.0.0/" | xargs pip install'
92
93 # Add this so that coverage will run on subprocesses
94 /bin/sh -c 'echo "import coverage; coverage.process_startup()" > {envsitepackagesdir}/../sitecustomize.py'
95
9096 # Install Synapse itself. This won't update any libraries.
9197 pip install -e .
92 {envbindir}/trial {env:TRIAL_FLAGS:} {posargs:tests} {env:TOXSUFFIX:}
98
99 {envbindir}/coverage run "{envbindir}/trial" {env:TRIAL_FLAGS:} {posargs:tests} {env:TOXSUFFIX:}
100
93101
94102 [testenv:packaging]
95103 skip_install=True
117125 python -m towncrier.check --compare-with=origin/develop
118126 basepython = python3.6
119127
128 [testenv:check-sampleconfig]
129 commands = {toxinidir}/scripts-dev/generate_sample_config --check
130
120131 [testenv:codecov]
121132 skip_install = True
122133 deps =