Imported Upstream version 0.33.5
Richard van der Hoff
5 years ago
8 | 8 | - store_artifacts: |
9 | 9 | path: ~/project/logs |
10 | 10 | destination: logs |
11 | - store_test_results: | |
12 | path: logs | |
11 | 13 | sytestpy2postgres: |
12 | 14 | machine: true |
13 | 15 | steps: |
17 | 19 | - store_artifacts: |
18 | 20 | path: ~/project/logs |
19 | 21 | destination: logs |
22 | - store_test_results: | |
23 | path: logs | |
24 | sytestpy2merged: | |
25 | machine: true | |
26 | steps: | |
27 | - checkout | |
28 | - run: bash .circleci/merge_base_branch.sh | |
29 | - run: docker pull matrixdotorg/sytest-synapsepy2 | |
30 | - run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs matrixdotorg/sytest-synapsepy2 | |
31 | - store_artifacts: | |
32 | path: ~/project/logs | |
33 | destination: logs | |
34 | - store_test_results: | |
35 | path: logs | |
36 | ||
37 | sytestpy2postgresmerged: | |
38 | machine: true | |
39 | steps: | |
40 | - checkout | |
41 | - run: bash .circleci/merge_base_branch.sh | |
42 | - run: docker pull matrixdotorg/sytest-synapsepy2 | |
43 | - run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs -e POSTGRES=1 matrixdotorg/sytest-synapsepy2 | |
44 | - store_artifacts: | |
45 | path: ~/project/logs | |
46 | destination: logs | |
47 | - store_test_results: | |
48 | path: logs | |
49 | ||
20 | 50 | sytestpy3: |
21 | 51 | machine: true |
22 | 52 | steps: |
23 | 53 | - checkout |
24 | 54 | - run: docker pull matrixdotorg/sytest-synapsepy3 |
25 | - run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs hawkowl/sytestpy3 | |
55 | - run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs matrixdotorg/sytest-synapsepy3 | |
26 | 56 | - store_artifacts: |
27 | 57 | path: ~/project/logs |
28 | 58 | destination: logs |
59 | - store_test_results: | |
60 | path: logs | |
29 | 61 | sytestpy3postgres: |
30 | 62 | machine: true |
31 | 63 | steps: |
35 | 67 | - store_artifacts: |
36 | 68 | path: ~/project/logs |
37 | 69 | destination: logs |
70 | - store_test_results: | |
71 | path: logs | |
72 | sytestpy3merged: | |
73 | machine: true | |
74 | steps: | |
75 | - checkout | |
76 | - run: bash .circleci/merge_base_branch.sh | |
77 | - run: docker pull matrixdotorg/sytest-synapsepy3 | |
78 | - run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs matrixdotorg/sytest-synapsepy3 | |
79 | - store_artifacts: | |
80 | path: ~/project/logs | |
81 | destination: logs | |
82 | - store_test_results: | |
83 | path: logs | |
84 | sytestpy3postgresmerged: | |
85 | machine: true | |
86 | steps: | |
87 | - checkout | |
88 | - run: bash .circleci/merge_base_branch.sh | |
89 | - run: docker pull matrixdotorg/sytest-synapsepy3 | |
90 | - run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs -e POSTGRES=1 matrixdotorg/sytest-synapsepy3 | |
91 | - store_artifacts: | |
92 | path: ~/project/logs | |
93 | destination: logs | |
94 | - store_test_results: | |
95 | path: logs | |
38 | 96 | |
39 | 97 | workflows: |
40 | 98 | version: 2 |
42 | 100 | jobs: |
43 | 101 | - sytestpy2 |
44 | 102 | - sytestpy2postgres |
45 | # Currently broken while the Python 3 port is incomplete | |
46 | # - sytestpy3 | |
47 | # - sytestpy3postgres | |
103 | - sytestpy3 | |
104 | - sytestpy3postgres | |
105 | - sytestpy2merged: | |
106 | filters: | |
107 | branches: | |
108 | ignore: /develop|master/ | |
109 | - sytestpy2postgresmerged: | |
110 | filters: | |
111 | branches: | |
112 | ignore: /develop|master/ | |
113 | - sytestpy3merged: | |
114 | filters: | |
115 | branches: | |
116 | ignore: /develop|master/ | |
117 | - sytestpy3postgresmerged: | |
118 | filters: | |
119 | branches: | |
120 | ignore: /develop|master/ |
0 | #!/usr/bin/env bash | |
1 | ||
2 | set -e | |
3 | ||
4 | # CircleCI doesn't give CIRCLE_PR_NUMBER in the environment for non-forked PRs. Wonderful. | |
5 | # In this case, we just need to do some ~shell magic~ to strip it out of the PULL_REQUEST URL. | |
6 | echo 'export CIRCLE_PR_NUMBER="${CIRCLE_PR_NUMBER:-${CIRCLE_PULL_REQUEST##*/}}"' >> $BASH_ENV | |
7 | source $BASH_ENV | |
8 | ||
9 | if [[ -z "${CIRCLE_PR_NUMBER}" ]] | |
10 | then | |
11 | echo "Can't figure out what the PR number is!" | |
12 | exit 1 | |
13 | fi | |
14 | ||
15 | # Get the reference, using the GitHub API | |
16 | GITBASE=`curl -q https://api.github.com/repos/matrix-org/synapse/pulls/${CIRCLE_PR_NUMBER} | jq -r '.base.ref'` | |
17 | ||
18 | # Show what we are before | |
19 | git show -s | |
20 | ||
21 | # Set up username so it can do a merge | |
22 | git config --global user.email bot@matrix.org | |
23 | git config --global user.name "A robot" | |
24 | ||
25 | # Fetch and merge. If it doesn't work, it will raise due to set -e. | |
26 | git fetch -u origin $GITBASE | |
27 | git merge --no-edit origin/$GITBASE | |
28 | ||
29 | # Show what we are after. | |
30 | git show -s⏎ |
43 | 43 | build/ |
44 | 44 | venv/ |
45 | 45 | venv*/ |
46 | *venv/ | |
46 | 47 | |
47 | 48 | localhost-800*/ |
48 | 49 | static/client/register/register_config.js |
6 | 6 | before_script: |
7 | 7 | - git remote set-branches --add origin develop |
8 | 8 | - git fetch origin develop |
9 | ||
10 | services: | |
11 | - postgresql | |
12 | 9 | |
13 | 10 | matrix: |
14 | 11 | fast_finish: true |
24 | 21 | |
25 | 22 | - python: 2.7 |
26 | 23 | env: TOX_ENV=py27-postgres TRIAL_FLAGS="-j 4" |
24 | services: | |
25 | - postgresql | |
26 | ||
27 | - python: 3.5 | |
28 | env: TOX_ENV=py35 | |
27 | 29 | |
28 | 30 | - python: 3.6 |
29 | 31 | env: TOX_ENV=py36 |
0 | Synapse 0.33.5 (2018-09-24) | |
1 | =========================== | |
2 | ||
3 | No significant changes. | |
4 | ||
5 | ||
6 | Synapse 0.33.5rc1 (2018-09-17) | |
7 | ============================== | |
8 | ||
9 | Features | |
10 | -------- | |
11 | ||
12 | - Python 3.5 and 3.6 support is now in beta. ([\#3576](https://github.com/matrix-org/synapse/issues/3576)) | |
13 | - Implement `event_format` filter param in `/sync` ([\#3790](https://github.com/matrix-org/synapse/issues/3790)) | |
14 | - Add synapse_admin_mau:registered_reserved_users metric to expose number of real reaserved users ([\#3846](https://github.com/matrix-org/synapse/issues/3846)) | |
15 | ||
16 | ||
17 | Bugfixes | |
18 | -------- | |
19 | ||
20 | - Remove connection ID for replication prometheus metrics, as it creates a large number of new series. ([\#3788](https://github.com/matrix-org/synapse/issues/3788)) | |
21 | - guest users should not be part of mau total ([\#3800](https://github.com/matrix-org/synapse/issues/3800)) | |
22 | - Bump dependency on pyopenssl 16.x, to avoid incompatibility with recent Twisted. ([\#3804](https://github.com/matrix-org/synapse/issues/3804)) | |
23 | - Fix existing room tags not coming down sync when joining a room ([\#3810](https://github.com/matrix-org/synapse/issues/3810)) | |
24 | - Fix jwt import check ([\#3824](https://github.com/matrix-org/synapse/issues/3824)) | |
25 | - fix VOIP crashes under Python 3 (#3821) ([\#3835](https://github.com/matrix-org/synapse/issues/3835)) | |
26 | - Fix manhole so that it works with latest openssh clients ([\#3841](https://github.com/matrix-org/synapse/issues/3841)) | |
27 | - Fix outbound requests occasionally wedging, which can result in federation breaking between servers. ([\#3845](https://github.com/matrix-org/synapse/issues/3845)) | |
28 | - Show heroes if room name/canonical alias has been deleted ([\#3851](https://github.com/matrix-org/synapse/issues/3851)) | |
29 | - Fix handling of redacted events from federation ([\#3859](https://github.com/matrix-org/synapse/issues/3859)) | |
30 | - ([\#3874](https://github.com/matrix-org/synapse/issues/3874)) | |
31 | - Mitigate outbound federation randomly becoming wedged ([\#3875](https://github.com/matrix-org/synapse/issues/3875)) | |
32 | ||
33 | ||
34 | Internal Changes | |
35 | ---------------- | |
36 | ||
37 | - CircleCI tests now run on the potential merge of a PR. ([\#3704](https://github.com/matrix-org/synapse/issues/3704)) | |
38 | - http/ is now ported to Python 3. ([\#3771](https://github.com/matrix-org/synapse/issues/3771)) | |
39 | - Improve human readable error messages for threepid registration/account update ([\#3789](https://github.com/matrix-org/synapse/issues/3789)) | |
40 | - Make /sync slightly faster by avoiding needless copies ([\#3795](https://github.com/matrix-org/synapse/issues/3795)) | |
41 | - handlers/ is now ported to Python 3. ([\#3803](https://github.com/matrix-org/synapse/issues/3803)) | |
42 | - Limit the number of PDUs/EDUs per federation transaction ([\#3805](https://github.com/matrix-org/synapse/issues/3805)) | |
43 | - Only start postgres instance for postgres tests on Travis CI ([\#3806](https://github.com/matrix-org/synapse/issues/3806)) | |
44 | - tests/ is now ported to Python 3. ([\#3808](https://github.com/matrix-org/synapse/issues/3808)) | |
45 | - crypto/ is now ported to Python 3. ([\#3822](https://github.com/matrix-org/synapse/issues/3822)) | |
46 | - rest/ is now ported to Python 3. ([\#3823](https://github.com/matrix-org/synapse/issues/3823)) | |
47 | - add some logging for the keyring queue ([\#3826](https://github.com/matrix-org/synapse/issues/3826)) | |
48 | - speed up lazy loading by 2-3x ([\#3827](https://github.com/matrix-org/synapse/issues/3827)) | |
49 | - Improved Dockerfile to remove build requirements after building reducing the image size. ([\#3834](https://github.com/matrix-org/synapse/issues/3834)) | |
50 | - Disable lazy loading for incremental syncs for now ([\#3840](https://github.com/matrix-org/synapse/issues/3840)) | |
51 | - federation/ is now ported to Python 3. ([\#3847](https://github.com/matrix-org/synapse/issues/3847)) | |
52 | - Log when we retry outbound requests ([\#3853](https://github.com/matrix-org/synapse/issues/3853)) | |
53 | - Removed some excess logging messages. ([\#3855](https://github.com/matrix-org/synapse/issues/3855)) | |
54 | - Speed up purge history for rooms that have been previously purged ([\#3856](https://github.com/matrix-org/synapse/issues/3856)) | |
55 | - Refactor some HTTP timeout code. ([\#3857](https://github.com/matrix-org/synapse/issues/3857)) | |
56 | - Fix running merged builds on CircleCI ([\#3858](https://github.com/matrix-org/synapse/issues/3858)) | |
57 | - Fix typo in replication stream exception. ([\#3860](https://github.com/matrix-org/synapse/issues/3860)) | |
58 | - Add in flight real time metrics for Measure blocks ([\#3871](https://github.com/matrix-org/synapse/issues/3871)) | |
59 | - Disable buffering and automatic retrying in treq requests to prevent timeouts. ([\#3872](https://github.com/matrix-org/synapse/issues/3872)) | |
60 | - mention jemalloc in the README ([\#3877](https://github.com/matrix-org/synapse/issues/3877)) | |
61 | - Remove unmaintained "nuke-room-from-db.sh" script ([\#3888](https://github.com/matrix-org/synapse/issues/3888)) | |
62 | ||
63 | ||
0 | 64 | Synapse 0.33.4 (2018-09-07) |
1 | 65 | =========================== |
2 | 66 |
741 | 741 | } |
742 | 742 | } |
743 | 743 | |
744 | and an example apache configuration may look like:: | |
745 | ||
746 | <VirtualHost *:443> | |
747 | SSLEngine on | |
748 | ServerName matrix.example.com; | |
749 | ||
750 | <Location /_matrix> | |
751 | ProxyPass http://127.0.0.1:8008/_matrix nocanon | |
752 | ProxyPassReverse http://127.0.0.1:8008/_matrix | |
753 | </Location> | |
754 | </VirtualHost> | |
755 | ||
744 | 756 | You will also want to set ``bind_addresses: ['127.0.0.1']`` and ``x_forwarded: true`` |
745 | 757 | for port 8008 in ``homeserver.yaml`` to ensure that client IP addresses are |
746 | 758 | recorded correctly. |
950 | 962 | in memory constrained enviroments, or increased if performance starts to |
951 | 963 | degrade. |
952 | 964 | |
965 | Using `libjemalloc <http://jemalloc.net/>`_ can also yield a significant | |
966 | improvement in overall amount, and especially in terms of giving back RAM | |
967 | to the OS. To use it, the library must simply be put in the LD_PRELOAD | |
968 | environment variable when launching Synapse. On Debian, this can be done | |
969 | by installing the ``libjemalloc1`` package and adding this line to | |
970 | ``/etc/default/matrix-synaspse``:: | |
971 | ||
972 | LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.1 | |
953 | 973 | |
954 | 974 | .. _`key_management`: https://matrix.org/docs/spec/server_server/unstable.html#retrieving-server-keys |
0 | 0 | FROM docker.io/python:2-alpine3.8 |
1 | 1 | |
2 | RUN apk add --no-cache --virtual .nacl_deps \ | |
2 | COPY . /synapse | |
3 | ||
4 | RUN apk add --no-cache --virtual .build_deps \ | |
3 | 5 | build-base \ |
4 | 6 | libffi-dev \ |
5 | 7 | libjpeg-turbo-dev \ |
7 | 9 | libxslt-dev \ |
8 | 10 | linux-headers \ |
9 | 11 | postgresql-dev \ |
10 | su-exec \ | |
11 | zlib-dev | |
12 | ||
13 | COPY . /synapse | |
14 | ||
15 | # A wheel cache may be provided in ./cache for faster build | |
16 | RUN cd /synapse \ | |
12 | zlib-dev \ | |
13 | && cd /synapse \ | |
14 | && apk add --no-cache --virtual .runtime_deps \ | |
15 | libffi \ | |
16 | libjpeg-turbo \ | |
17 | libressl \ | |
18 | libxslt \ | |
19 | libpq \ | |
20 | zlib \ | |
21 | su-exec \ | |
17 | 22 | && pip install --upgrade \ |
18 | 23 | lxml \ |
19 | 24 | pip \ |
25 | 30 | && rm -rf \ |
26 | 31 | setup.cfg \ |
27 | 32 | setup.py \ |
28 | synapse | |
29 | ||
33 | synapse \ | |
34 | && apk del .build_deps | |
35 | ||
30 | 36 | VOLUME ["/data"] |
31 | 37 | |
32 | 38 | EXPOSE 8008/tcp 8448/tcp |
0 | #!/bin/bash | |
1 | ||
2 | ## CAUTION: | |
3 | ## This script will remove (hopefully) all trace of the given room ID from | |
4 | ## your homeserver.db | |
5 | ||
6 | ## Do not run it lightly. | |
7 | ||
8 | set -e | |
9 | ||
10 | if [ "$1" == "-h" ] || [ "$1" == "" ]; then | |
11 | echo "Call with ROOM_ID as first option and then pipe it into the database. So for instance you might run" | |
12 | echo " nuke-room-from-db.sh <room_id> | sqlite3 homeserver.db" | |
13 | echo "or" | |
14 | echo " nuke-room-from-db.sh <room_id> | psql --dbname=synapse" | |
15 | exit | |
16 | fi | |
17 | ||
18 | ROOMID="$1" | |
19 | ||
20 | cat <<EOF | |
21 | DELETE FROM event_forward_extremities WHERE room_id = '$ROOMID'; | |
22 | DELETE FROM event_backward_extremities WHERE room_id = '$ROOMID'; | |
23 | DELETE FROM event_edges WHERE room_id = '$ROOMID'; | |
24 | DELETE FROM room_depth WHERE room_id = '$ROOMID'; | |
25 | DELETE FROM state_forward_extremities WHERE room_id = '$ROOMID'; | |
26 | DELETE FROM events WHERE room_id = '$ROOMID'; | |
27 | DELETE FROM event_json WHERE room_id = '$ROOMID'; | |
28 | DELETE FROM state_events WHERE room_id = '$ROOMID'; | |
29 | DELETE FROM current_state_events WHERE room_id = '$ROOMID'; | |
30 | DELETE FROM room_memberships WHERE room_id = '$ROOMID'; | |
31 | DELETE FROM feedback WHERE room_id = '$ROOMID'; | |
32 | DELETE FROM topics WHERE room_id = '$ROOMID'; | |
33 | DELETE FROM room_names WHERE room_id = '$ROOMID'; | |
34 | DELETE FROM rooms WHERE room_id = '$ROOMID'; | |
35 | DELETE FROM room_hosts WHERE room_id = '$ROOMID'; | |
36 | DELETE FROM room_aliases WHERE room_id = '$ROOMID'; | |
37 | DELETE FROM state_groups WHERE room_id = '$ROOMID'; | |
38 | DELETE FROM state_groups_state WHERE room_id = '$ROOMID'; | |
39 | DELETE FROM receipts_graph WHERE room_id = '$ROOMID'; | |
40 | DELETE FROM receipts_linearized WHERE room_id = '$ROOMID'; | |
41 | DELETE FROM event_search WHERE room_id = '$ROOMID'; | |
42 | DELETE FROM guest_access WHERE room_id = '$ROOMID'; | |
43 | DELETE FROM history_visibility WHERE room_id = '$ROOMID'; | |
44 | DELETE FROM room_tags WHERE room_id = '$ROOMID'; | |
45 | DELETE FROM room_tags_revisions WHERE room_id = '$ROOMID'; | |
46 | DELETE FROM room_account_data WHERE room_id = '$ROOMID'; | |
47 | DELETE FROM event_push_actions WHERE room_id = '$ROOMID'; | |
48 | DELETE FROM local_invites WHERE room_id = '$ROOMID'; | |
49 | DELETE FROM pusher_throttle WHERE room_id = '$ROOMID'; | |
50 | DELETE FROM event_reports WHERE room_id = '$ROOMID'; | |
51 | DELETE FROM public_room_list_stream WHERE room_id = '$ROOMID'; | |
52 | DELETE FROM stream_ordering_to_exterm WHERE room_id = '$ROOMID'; | |
53 | DELETE FROM event_auth WHERE room_id = '$ROOMID'; | |
54 | DELETE FROM appservice_room_list WHERE room_id = '$ROOMID'; | |
55 | VACUUM; | |
56 | EOF |
16 | 16 | [pep8] |
17 | 17 | max-line-length = 90 |
18 | 18 | # W503 requires that binary operators be at the end, not start, of lines. Erik |
19 | # doesn't like it. E203 is contrary to PEP8. | |
20 | ignore = W503,E203 | |
19 | # doesn't like it. E203 is contrary to PEP8. E731 is silly. | |
20 | ignore = W503,E203,E731 | |
21 | 21 | |
22 | 22 | [flake8] |
23 | 23 | # note that flake8 inherits the "ignore" settings from "pep8" (because it uses |
24 | 24 | # pep8 to do those checks), but not the "max-line-length" setting |
25 | 25 | max-line-length = 90 |
26 | ignore=W503,E203,E731 | |
26 | 27 | |
27 | 28 | [isort] |
28 | 29 | line_length = 89 |
16 | 16 | """ This is a reference implementation of a Matrix home server. |
17 | 17 | """ |
18 | 18 | |
19 | __version__ = "0.33.4" | |
19 | try: | |
20 | from twisted.internet import protocol | |
21 | from twisted.internet.protocol import Factory | |
22 | from twisted.names.dns import DNSDatagramProtocol | |
23 | protocol.Factory.noisy = False | |
24 | Factory.noisy = False | |
25 | DNSDatagramProtocol.noisy = False | |
26 | except ImportError: | |
27 | pass | |
28 | ||
29 | __version__ = "0.33.5" |
250 | 250 | "include_leave", False |
251 | 251 | ) |
252 | 252 | self.event_fields = filter_json.get("event_fields", []) |
253 | self.event_format = filter_json.get("event_format", "client") | |
253 | 254 | |
254 | 255 | def __repr__(self): |
255 | 256 | return "<FilterCollection %s>" % (json.dumps(self._filter_json),) |
306 | 306 | # Gauges to expose monthly active user control metrics |
307 | 307 | current_mau_gauge = Gauge("synapse_admin_mau:current", "Current MAU") |
308 | 308 | max_mau_gauge = Gauge("synapse_admin_mau:max", "MAU Limit") |
309 | registered_reserved_users_mau_gauge = Gauge( | |
310 | "synapse_admin_mau:registered_reserved_users", | |
311 | "Registered users with reserved threepids" | |
312 | ) | |
309 | 313 | |
310 | 314 | |
311 | 315 | def setup(config_options): |
530 | 534 | |
531 | 535 | @defer.inlineCallbacks |
532 | 536 | def generate_monthly_active_users(): |
533 | count = 0 | |
537 | current_mau_count = 0 | |
538 | reserved_count = 0 | |
539 | store = hs.get_datastore() | |
534 | 540 | if hs.config.limit_usage_by_mau: |
535 | count = yield hs.get_datastore().get_monthly_active_count() | |
536 | current_mau_gauge.set(float(count)) | |
541 | current_mau_count = yield store.get_monthly_active_count() | |
542 | reserved_count = yield store.get_registered_reserved_users_count() | |
543 | current_mau_gauge.set(float(current_mau_count)) | |
544 | registered_reserved_users_mau_gauge.set(float(reserved_count)) | |
537 | 545 | max_mau_gauge.set(float(hs.config.max_mau_value)) |
538 | 546 | |
539 | 547 | hs.get_datastore().initialise_reserved_users( |
12 | 12 | # See the License for the specific language governing permissions and |
13 | 13 | # limitations under the License. |
14 | 14 | import logging |
15 | import urllib | |
15 | ||
16 | from six.moves import urllib | |
16 | 17 | |
17 | 18 | from prometheus_client import Counter |
18 | 19 | |
97 | 98 | def query_user(self, service, user_id): |
98 | 99 | if service.url is None: |
99 | 100 | defer.returnValue(False) |
100 | uri = service.url + ("/users/%s" % urllib.quote(user_id)) | |
101 | uri = service.url + ("/users/%s" % urllib.parse.quote(user_id)) | |
101 | 102 | response = None |
102 | 103 | try: |
103 | 104 | response = yield self.get_json(uri, { |
118 | 119 | def query_alias(self, service, alias): |
119 | 120 | if service.url is None: |
120 | 121 | defer.returnValue(False) |
121 | uri = service.url + ("/rooms/%s" % urllib.quote(alias)) | |
122 | uri = service.url + ("/rooms/%s" % urllib.parse.quote(alias)) | |
122 | 123 | response = None |
123 | 124 | try: |
124 | 125 | response = yield self.get_json(uri, { |
152 | 153 | service.url, |
153 | 154 | APP_SERVICE_PREFIX, |
154 | 155 | kind, |
155 | urllib.quote(protocol) | |
156 | urllib.parse.quote(protocol) | |
156 | 157 | ) |
157 | 158 | try: |
158 | 159 | response = yield self.get_json(uri, fields) |
187 | 188 | uri = "%s%s/thirdparty/protocol/%s" % ( |
188 | 189 | service.url, |
189 | 190 | APP_SERVICE_PREFIX, |
190 | urllib.quote(protocol) | |
191 | urllib.parse.quote(protocol) | |
191 | 192 | ) |
192 | 193 | try: |
193 | 194 | info = yield self.get_json(uri, {}) |
227 | 228 | txn_id = str(txn_id) |
228 | 229 | |
229 | 230 | uri = service.url + ("/transactions/%s" % |
230 | urllib.quote(txn_id)) | |
231 | urllib.parse.quote(txn_id)) | |
231 | 232 | try: |
232 | 233 | yield self.put_json( |
233 | 234 | uri=uri, |
20 | 20 | from .database import DatabaseConfig |
21 | 21 | from .emailconfig import EmailConfig |
22 | 22 | from .groups import GroupsConfig |
23 | from .jwt import JWTConfig | |
23 | from .jwt_config import JWTConfig | |
24 | 24 | from .key import KeyConfig |
25 | 25 | from .logger import LoggingConfig |
26 | 26 | from .metrics import MetricsConfig |
0 | # -*- coding: utf-8 -*- | |
1 | # Copyright 2015 Niklas Riekenbrauck | |
2 | # | |
3 | # Licensed under the Apache License, Version 2.0 (the "License"); | |
4 | # you may not use this file except in compliance with the License. | |
5 | # You may obtain a copy of the License at | |
6 | # | |
7 | # http://www.apache.org/licenses/LICENSE-2.0 | |
8 | # | |
9 | # Unless required by applicable law or agreed to in writing, software | |
10 | # distributed under the License is distributed on an "AS IS" BASIS, | |
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
12 | # See the License for the specific language governing permissions and | |
13 | # limitations under the License. | |
14 | ||
15 | from ._base import Config, ConfigError | |
16 | ||
17 | MISSING_JWT = ( | |
18 | """Missing jwt library. This is required for jwt login. | |
19 | ||
20 | Install by running: | |
21 | pip install pyjwt | |
22 | """ | |
23 | ) | |
24 | ||
25 | ||
26 | class JWTConfig(Config): | |
27 | def read_config(self, config): | |
28 | jwt_config = config.get("jwt_config", None) | |
29 | if jwt_config: | |
30 | self.jwt_enabled = jwt_config.get("enabled", False) | |
31 | self.jwt_secret = jwt_config["secret"] | |
32 | self.jwt_algorithm = jwt_config["algorithm"] | |
33 | ||
34 | try: | |
35 | import jwt | |
36 | jwt # To stop unused lint. | |
37 | except ImportError: | |
38 | raise ConfigError(MISSING_JWT) | |
39 | else: | |
40 | self.jwt_enabled = False | |
41 | self.jwt_secret = None | |
42 | self.jwt_algorithm = None | |
43 | ||
44 | def default_config(self, **kwargs): | |
45 | return """\ | |
46 | # The JWT needs to contain a globally unique "sub" (subject) claim. | |
47 | # | |
48 | # jwt_config: | |
49 | # enabled: true | |
50 | # secret: "a secret" | |
51 | # algorithm: "HS256" | |
52 | """ |
0 | # -*- coding: utf-8 -*- | |
1 | # Copyright 2015 Niklas Riekenbrauck | |
2 | # | |
3 | # Licensed under the Apache License, Version 2.0 (the "License"); | |
4 | # you may not use this file except in compliance with the License. | |
5 | # You may obtain a copy of the License at | |
6 | # | |
7 | # http://www.apache.org/licenses/LICENSE-2.0 | |
8 | # | |
9 | # Unless required by applicable law or agreed to in writing, software | |
10 | # distributed under the License is distributed on an "AS IS" BASIS, | |
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
12 | # See the License for the specific language governing permissions and | |
13 | # limitations under the License. | |
14 | ||
15 | from ._base import Config, ConfigError | |
16 | ||
17 | MISSING_JWT = ( | |
18 | """Missing jwt library. This is required for jwt login. | |
19 | ||
20 | Install by running: | |
21 | pip install pyjwt | |
22 | """ | |
23 | ) | |
24 | ||
25 | ||
26 | class JWTConfig(Config): | |
27 | def read_config(self, config): | |
28 | jwt_config = config.get("jwt_config", None) | |
29 | if jwt_config: | |
30 | self.jwt_enabled = jwt_config.get("enabled", False) | |
31 | self.jwt_secret = jwt_config["secret"] | |
32 | self.jwt_algorithm = jwt_config["algorithm"] | |
33 | ||
34 | try: | |
35 | import jwt | |
36 | jwt # To stop unused lint. | |
37 | except ImportError: | |
38 | raise ConfigError(MISSING_JWT) | |
39 | else: | |
40 | self.jwt_enabled = False | |
41 | self.jwt_secret = None | |
42 | self.jwt_algorithm = None | |
43 | ||
44 | def default_config(self, **kwargs): | |
45 | return """\ | |
46 | # The JWT needs to contain a globally unique "sub" (subject) claim. | |
47 | # | |
48 | # jwt_config: | |
49 | # enabled: true | |
50 | # secret: "a secret" | |
51 | # algorithm: "HS256" | |
52 | """ |
226 | 226 | # |
227 | 227 | # However this may not be too much of a problem if we are just writing to a file. |
228 | 228 | observer = STDLibLogObserver() |
229 | ||
230 | def _log(event): | |
231 | ||
232 | if "log_text" in event: | |
233 | if event["log_text"].startswith("DNSDatagramProtocol starting on "): | |
234 | return | |
235 | ||
236 | if event["log_text"].startswith("(UDP Port "): | |
237 | return | |
238 | ||
239 | if event["log_text"].startswith("Timing out client"): | |
240 | return | |
241 | ||
242 | return observer(event) | |
243 | ||
229 | 244 | globalLogBeginner.beginLoggingTo( |
230 | [observer], | |
245 | [_log], | |
231 | 246 | redirectStandardIO=not config.no_redirect_stdio, |
232 | 247 | ) |
122 | 122 | |
123 | 123 | def get_options(self, host): |
124 | 124 | return ClientTLSOptions( |
125 | host.decode('utf-8'), | |
125 | host, | |
126 | 126 | CertificateOptions(verify=False).getContext() |
127 | 127 | ) |
49 | 49 | defer.returnValue((server_response, server_certificate)) |
50 | 50 | except SynapseKeyClientError as e: |
51 | 51 | logger.warn("Error getting key for %r: %s", server_name, e) |
52 | if e.status.startswith("4"): | |
52 | if e.status.startswith(b"4"): | |
53 | 53 | # Don't retry for 4xx responses. |
54 | 54 | raise IOError("Cannot get key for %r" % server_name) |
55 | 55 | except (ConnectError, DomainError) as e: |
80 | 80 | def connectionMade(self): |
81 | 81 | self._peer = self.transport.getPeer() |
82 | 82 | logger.debug("Connected to %s", self._peer) |
83 | ||
84 | if not isinstance(self.path, bytes): | |
85 | self.path = self.path.encode('ascii') | |
86 | ||
87 | if not isinstance(self.host, bytes): | |
88 | self.host = self.host.encode('ascii') | |
83 | 89 | |
84 | 90 | self.sendCommand(b"GET", self.path) |
85 | 91 | if self.host: |
15 | 15 | |
16 | 16 | import hashlib |
17 | 17 | import logging |
18 | import urllib | |
19 | 18 | from collections import namedtuple |
19 | ||
20 | from six.moves import urllib | |
20 | 21 | |
21 | 22 | from signedjson.key import ( |
22 | 23 | decode_verify_key_bytes, |
39 | 40 | from synapse.crypto.keyclient import fetch_server_key |
40 | 41 | from synapse.util import logcontext, unwrapFirstError |
41 | 42 | from synapse.util.logcontext import ( |
43 | LoggingContext, | |
42 | 44 | PreserveLoggingContext, |
43 | 45 | preserve_fn, |
44 | 46 | run_in_background, |
215 | 217 | servers have completed. Follows the synapse rules of logcontext |
216 | 218 | preservation. |
217 | 219 | """ |
220 | loop_count = 1 | |
218 | 221 | while True: |
219 | 222 | wait_on = [ |
220 | self.key_downloads[server_name] | |
223 | (server_name, self.key_downloads[server_name]) | |
221 | 224 | for server_name in server_names |
222 | 225 | if server_name in self.key_downloads |
223 | 226 | ] |
224 | if wait_on: | |
225 | with PreserveLoggingContext(): | |
226 | yield defer.DeferredList(wait_on) | |
227 | else: | |
227 | if not wait_on: | |
228 | 228 | break |
229 | logger.info( | |
230 | "Waiting for existing lookups for %s to complete [loop %i]", | |
231 | [w[0] for w in wait_on], loop_count, | |
232 | ) | |
233 | with PreserveLoggingContext(): | |
234 | yield defer.DeferredList((w[1] for w in wait_on)) | |
235 | ||
236 | loop_count += 1 | |
237 | ||
238 | ctx = LoggingContext.current_context() | |
229 | 239 | |
230 | 240 | def rm(r, server_name_): |
231 | self.key_downloads.pop(server_name_, None) | |
241 | with PreserveLoggingContext(ctx): | |
242 | logger.debug("Releasing key lookup lock on %s", server_name_) | |
243 | self.key_downloads.pop(server_name_, None) | |
232 | 244 | return r |
233 | 245 | |
234 | 246 | for server_name, deferred in server_to_deferred.items(): |
247 | logger.debug("Got key lookup lock on %s", server_name) | |
235 | 248 | self.key_downloads[server_name] = deferred |
236 | 249 | deferred.addBoth(rm, server_name) |
237 | 250 | |
431 | 444 | # an incoming request. |
432 | 445 | query_response = yield self.client.post_json( |
433 | 446 | destination=perspective_name, |
434 | path=b"/_matrix/key/v2/query", | |
447 | path="/_matrix/key/v2/query", | |
435 | 448 | data={ |
436 | 449 | u"server_keys": { |
437 | 450 | server_name: { |
512 | 525 | |
513 | 526 | (response, tls_certificate) = yield fetch_server_key( |
514 | 527 | server_name, self.hs.tls_client_options_factory, |
515 | path=(b"/_matrix/key/v2/server/%s" % ( | |
516 | urllib.quote(requested_key_id), | |
528 | path=("/_matrix/key/v2/server/%s" % ( | |
529 | urllib.parse.quote(requested_key_id), | |
517 | 530 | )).encode("ascii"), |
518 | 531 | ) |
519 | 532 |
12 | 12 | # See the License for the specific language governing permissions and |
13 | 13 | # limitations under the License. |
14 | 14 | |
15 | import six | |
16 | ||
15 | 17 | from synapse.util.caches import intern_dict |
16 | 18 | from synapse.util.frozenutils import freeze |
17 | 19 | |
146 | 148 | def items(self): |
147 | 149 | return list(self._event_dict.items()) |
148 | 150 | |
151 | def keys(self): | |
152 | return six.iterkeys(self._event_dict) | |
153 | ||
149 | 154 | |
150 | 155 | class FrozenEvent(EventBase): |
151 | 156 | def __init__(self, event_dict, internal_metadata_dict={}, rejected_reason=None): |
142 | 142 | def callback(_, pdu): |
143 | 143 | with logcontext.PreserveLoggingContext(ctx): |
144 | 144 | if not check_event_content_hash(pdu): |
145 | logger.warn( | |
146 | "Event content has been tampered, redacting %s: %s", | |
147 | pdu.event_id, pdu.get_pdu_json() | |
148 | ) | |
149 | return prune_event(pdu) | |
145 | # let's try to distinguish between failures because the event was | |
146 | # redacted (which are somewhat expected) vs actual ball-tampering | |
147 | # incidents. | |
148 | # | |
149 | # This is just a heuristic, so we just assume that if the keys are | |
150 | # about the same between the redacted and received events, then the | |
151 | # received event was probably a redacted copy (but we then use our | |
152 | # *actual* redacted copy to be on the safe side.) | |
153 | redacted_event = prune_event(pdu) | |
154 | if ( | |
155 | set(redacted_event.keys()) == set(pdu.keys()) and | |
156 | set(six.iterkeys(redacted_event.content)) | |
157 | == set(six.iterkeys(pdu.content)) | |
158 | ): | |
159 | logger.info( | |
160 | "Event %s seems to have been redacted; using our redacted " | |
161 | "copy", | |
162 | pdu.event_id, | |
163 | ) | |
164 | else: | |
165 | logger.warning( | |
166 | "Event %s content has been tampered, redacting", | |
167 | pdu.event_id, pdu.get_pdu_json(), | |
168 | ) | |
169 | return redacted_event | |
150 | 170 | |
151 | 171 | if self.spam_checker.check_event_for_spam(pdu): |
152 | 172 | logger.warn( |
161 | 181 | failure.trap(SynapseError) |
162 | 182 | with logcontext.PreserveLoggingContext(ctx): |
163 | 183 | logger.warn( |
164 | "Signature check failed for %s", | |
165 | pdu.event_id, | |
184 | "Signature check failed for %s: %s", | |
185 | pdu.event_id, failure.getErrorMessage(), | |
166 | 186 | ) |
167 | 187 | return failure |
168 | 188 |
270 | 270 | event_id, destination, e, |
271 | 271 | ) |
272 | 272 | except NotRetryingDestination as e: |
273 | logger.info(e.message) | |
273 | logger.info(str(e)) | |
274 | 274 | continue |
275 | 275 | except FederationDeniedError as e: |
276 | logger.info(e.message) | |
276 | logger.info(str(e)) | |
277 | 277 | continue |
278 | 278 | except Exception as e: |
279 | 279 | pdu_attempts[destination] = now |
509 | 509 | else: |
510 | 510 | logger.warn( |
511 | 511 | "Failed to %s via %s: %i %s", |
512 | description, destination, e.code, e.message, | |
512 | description, destination, e.code, e.args[0], | |
513 | 513 | ) |
514 | 514 | except Exception: |
515 | 515 | logger.warn( |
874 | 874 | except Exception as e: |
875 | 875 | logger.exception( |
876 | 876 | "Failed to send_third_party_invite via %s: %s", |
877 | destination, e.message | |
877 | destination, str(e) | |
878 | 878 | ) |
879 | 879 | |
880 | 880 | raise RuntimeError("Failed to send to any server.") |
837 | 837 | ) |
838 | 838 | |
839 | 839 | return self._send_edu( |
840 | edu_type=edu_type, | |
841 | origin=origin, | |
842 | content=content, | |
840 | edu_type=edu_type, | |
841 | origin=origin, | |
842 | content=content, | |
843 | 843 | ) |
844 | 844 | |
845 | 845 | def on_query(self, query_type, args): |
850 | 850 | return handler(args) |
851 | 851 | |
852 | 852 | return self._get_query_client( |
853 | query_type=query_type, | |
854 | args=args, | |
855 | ) | |
853 | query_type=query_type, | |
854 | args=args, | |
855 | ) |
462 | 462 | # pending_transactions flag. |
463 | 463 | |
464 | 464 | pending_pdus = self.pending_pdus_by_dest.pop(destination, []) |
465 | ||
466 | # We can only include at most 50 PDUs per transactions | |
467 | pending_pdus, leftover_pdus = pending_pdus[:50], pending_pdus[50:] | |
468 | if leftover_pdus: | |
469 | self.pending_pdus_by_dest[destination] = leftover_pdus | |
470 | ||
465 | 471 | pending_edus = self.pending_edus_by_dest.pop(destination, []) |
472 | ||
473 | # We can only include at most 100 EDUs per transactions | |
474 | pending_edus, leftover_edus = pending_edus[:100], pending_edus[100:] | |
475 | if leftover_edus: | |
476 | self.pending_edus_by_dest[destination] = leftover_edus | |
477 | ||
466 | 478 | pending_presence = self.pending_presence_by_dest.pop(destination, {}) |
467 | 479 | |
468 | 480 | pending_edus.extend( |
14 | 14 | # limitations under the License. |
15 | 15 | |
16 | 16 | import logging |
17 | import urllib | |
17 | ||
18 | from six.moves import urllib | |
18 | 19 | |
19 | 20 | from twisted.internet import defer |
20 | 21 | |
950 | 951 | Returns: |
951 | 952 | str |
952 | 953 | """ |
953 | return prefix + path % tuple(urllib.quote(arg, "") for arg in args) | |
954 | return prefix + path % tuple(urllib.parse.quote(arg, "") for arg in args) |
89 | 89 | @defer.inlineCallbacks |
90 | 90 | def authenticate_request(self, request, content): |
91 | 91 | json_request = { |
92 | "method": request.method, | |
93 | "uri": request.uri, | |
92 | "method": request.method.decode('ascii'), | |
93 | "uri": request.uri.decode('ascii'), | |
94 | 94 | "destination": self.server_name, |
95 | 95 | "signatures": {}, |
96 | 96 | } |
251 | 251 | by the callback method. None if the request has already been handled. |
252 | 252 | """ |
253 | 253 | content = None |
254 | if request.method in ["PUT", "POST"]: | |
254 | if request.method in [b"PUT", b"POST"]: | |
255 | 255 | # TODO: Handle other method types? other content types? |
256 | 256 | content = parse_json_object_from_request(request) |
257 | 257 | |
385 | 385 | return self.handler.on_context_state_request( |
386 | 386 | origin, |
387 | 387 | context, |
388 | query.get("event_id", [None])[0], | |
388 | parse_string_from_args(query, "event_id", None), | |
389 | 389 | ) |
390 | 390 | |
391 | 391 | |
396 | 396 | return self.handler.on_state_ids_request( |
397 | 397 | origin, |
398 | 398 | room_id, |
399 | query.get("event_id", [None])[0], | |
399 | parse_string_from_args(query, "event_id", None), | |
400 | 400 | ) |
401 | 401 | |
402 | 402 | |
404 | 404 | PATH = "/backfill/(?P<context>[^/]*)/" |
405 | 405 | |
406 | 406 | def on_GET(self, origin, content, query, context): |
407 | versions = query["v"] | |
408 | limits = query["limit"] | |
409 | ||
410 | if not limits: | |
407 | versions = [x.decode('ascii') for x in query[b"v"]] | |
408 | limit = parse_integer_from_args(query, "limit", None) | |
409 | ||
410 | if not limit: | |
411 | 411 | return defer.succeed((400, {"error": "Did not include limit param"})) |
412 | ||
413 | limit = int(limits[-1]) | |
414 | 412 | |
415 | 413 | return self.handler.on_backfill_request(origin, context, versions, limit) |
416 | 414 | |
422 | 420 | def on_GET(self, origin, content, query, query_type): |
423 | 421 | return self.handler.on_query_request( |
424 | 422 | query_type, |
425 | {k: v[0].decode("utf-8") for k, v in query.items()} | |
423 | {k.decode('utf8'): v[0].decode("utf-8") for k, v in query.items()} | |
426 | 424 | ) |
427 | 425 | |
428 | 426 | |
629 | 627 | |
630 | 628 | @defer.inlineCallbacks |
631 | 629 | def on_GET(self, origin, content, query): |
632 | token = query.get("access_token", [None])[0] | |
630 | token = query.get(b"access_token", [None])[0] | |
633 | 631 | if token is None: |
634 | 632 | defer.returnValue((401, { |
635 | 633 | "errcode": "M_MISSING_TOKEN", "error": "Access Token required" |
636 | 634 | })) |
637 | 635 | return |
638 | 636 | |
639 | user_id = yield self.handler.on_openid_userinfo(token) | |
637 | user_id = yield self.handler.on_openid_userinfo(token.decode('ascii')) | |
640 | 638 | |
641 | 639 | if user_id is None: |
642 | 640 | defer.returnValue((401, { |
894 | 894 | |
895 | 895 | Args: |
896 | 896 | password (unicode): Password to hash. |
897 | stored_hash (unicode): Expected hash value. | |
897 | stored_hash (bytes): Expected hash value. | |
898 | 898 | |
899 | 899 | Returns: |
900 | 900 | Deferred(bool): Whether self.hash(password) == stored_hash. |
901 | 901 | """ |
902 | ||
903 | 902 | def _do_validate_hash(): |
904 | 903 | # Normalise the Unicode in the password |
905 | 904 | pw = unicodedata.normalize("NFKC", password) |
906 | 905 | |
907 | 906 | return bcrypt.checkpw( |
908 | 907 | pw.encode('utf8') + self.hs.config.password_pepper.encode("utf8"), |
909 | stored_hash.encode('utf8') | |
908 | stored_hash | |
910 | 909 | ) |
911 | 910 | |
912 | 911 | if stored_hash: |
912 | if not isinstance(stored_hash, bytes): | |
913 | stored_hash = stored_hash.encode('ascii') | |
914 | ||
913 | 915 | return make_deferred_yieldable( |
914 | 916 | threads.deferToThreadPool( |
915 | 917 | self.hs.get_reactor(), |
329 | 329 | (algorithm, key_id, ex_json, key) |
330 | 330 | ) |
331 | 331 | else: |
332 | new_keys.append((algorithm, key_id, encode_canonical_json(key))) | |
332 | new_keys.append(( | |
333 | algorithm, key_id, encode_canonical_json(key).decode('ascii'))) | |
333 | 334 | |
334 | 335 | yield self.store.add_e2e_one_time_keys( |
335 | 336 | user_id, device_id, time_now, new_keys |
357 | 358 | # Note that some Exceptions (notably twisted's ResponseFailed etc) don't |
358 | 359 | # give a string for e.message, which json then fails to serialize. |
359 | 360 | return { |
360 | "status": 503, "message": str(e.message), | |
361 | "status": 503, "message": str(e), | |
361 | 362 | } |
362 | 363 | |
363 | 364 |
593 | 593 | |
594 | 594 | required_auth = set( |
595 | 595 | a_id |
596 | for event in events + state_events.values() + auth_events.values() | |
596 | for event in events + list(state_events.values()) + list(auth_events.values()) | |
597 | 597 | for a_id, _ in event.auth_events |
598 | 598 | ) |
599 | 599 | auth_events.update({ |
801 | 801 | ) |
802 | 802 | continue |
803 | 803 | except NotRetryingDestination as e: |
804 | logger.info(e.message) | |
804 | logger.info(str(e)) | |
805 | 805 | continue |
806 | 806 | except FederationDeniedError as e: |
807 | 807 | logger.info(e) |
1357 | 1357 | ) |
1358 | 1358 | |
1359 | 1359 | if state_groups: |
1360 | _, state = state_groups.items().pop() | |
1360 | _, state = list(state_groups.items()).pop() | |
1361 | 1361 | results = state |
1362 | 1362 | |
1363 | 1363 | if event.is_state(): |
268 | 268 | |
269 | 269 | if state_ids: |
270 | 270 | state = yield self.store.get_events(list(state_ids.values())) |
271 | ||
272 | if state: | |
273 | state = yield filter_events_for_client( | |
274 | self.store, | |
275 | user_id, | |
276 | state.values(), | |
277 | is_peeking=(member_event_id is None), | |
278 | ) | |
271 | state = state.values() | |
279 | 272 | |
280 | 273 | time_now = self.clock.time_msec() |
281 | 274 |
161 | 161 | # Filter out rooms that we don't want to return |
162 | 162 | rooms_to_scan = [ |
163 | 163 | r for r in sorted_rooms |
164 | if r not in newly_unpublished and rooms_to_num_joined[room_id] > 0 | |
164 | if r not in newly_unpublished and rooms_to_num_joined[r] > 0 | |
165 | 165 | ] |
166 | 166 | |
167 | 167 | total_room_count = len(rooms_to_scan) |
53 | 53 | batch_token = None |
54 | 54 | if batch: |
55 | 55 | try: |
56 | b = decode_base64(batch) | |
56 | b = decode_base64(batch).decode('ascii') | |
57 | 57 | batch_group, batch_group_key, batch_token = b.split("\n") |
58 | 58 | |
59 | 59 | assert batch_group is not None |
257 | 257 | # it returns more from the same group (if applicable) rather |
258 | 258 | # than reverting to searching all results again. |
259 | 259 | if batch_group and batch_group_key: |
260 | global_next_batch = encode_base64("%s\n%s\n%s" % ( | |
260 | global_next_batch = encode_base64(("%s\n%s\n%s" % ( | |
261 | 261 | batch_group, batch_group_key, pagination_token |
262 | )) | |
262 | )).encode('ascii')) | |
263 | 263 | else: |
264 | global_next_batch = encode_base64("%s\n%s\n%s" % ( | |
264 | global_next_batch = encode_base64(("%s\n%s\n%s" % ( | |
265 | 265 | "all", "", pagination_token |
266 | )) | |
266 | )).encode('ascii')) | |
267 | 267 | |
268 | 268 | for room_id, group in room_groups.items(): |
269 | group["next_batch"] = encode_base64("%s\n%s\n%s" % ( | |
269 | group["next_batch"] = encode_base64(("%s\n%s\n%s" % ( | |
270 | 270 | "room_id", room_id, pagination_token |
271 | )) | |
271 | )).encode('ascii')) | |
272 | 272 | |
273 | 273 | allowed_events.extend(room_events) |
274 | 274 |
23 | 23 | |
24 | 24 | from synapse.api.constants import EventTypes, Membership |
25 | 25 | from synapse.push.clientformat import format_push_rules_for_user |
26 | from synapse.storage.roommember import MemberSummary | |
26 | 27 | from synapse.types import RoomStreamToken |
27 | 28 | from synapse.util.async_helpers import concurrently_execute |
28 | 29 | from synapse.util.caches.expiringcache import ExpiringCache |
524 | 525 | A deferred dict describing the room summary |
525 | 526 | """ |
526 | 527 | |
528 | # FIXME: we could/should get this from room_stats when matthew/stats lands | |
529 | ||
527 | 530 | # FIXME: this promulgates https://github.com/matrix-org/synapse/issues/3305 |
528 | 531 | last_events, _ = yield self.store.get_recent_event_ids_for_room( |
529 | 532 | room_id, end_token=now_token.room_key, limit=1, |
536 | 539 | last_event = last_events[-1] |
537 | 540 | state_ids = yield self.store.get_state_ids_for_event( |
538 | 541 | last_event.event_id, [ |
539 | (EventTypes.Member, None), | |
540 | 542 | (EventTypes.Name, ''), |
541 | 543 | (EventTypes.CanonicalAlias, ''), |
542 | 544 | ] |
543 | 545 | ) |
544 | 546 | |
545 | member_ids = { | |
546 | state_key: event_id | |
547 | for (t, state_key), event_id in state_ids.iteritems() | |
548 | if t == EventTypes.Member | |
549 | } | |
547 | # this is heavily cached, thus: fast. | |
548 | details = yield self.store.get_room_summary(room_id) | |
549 | ||
550 | 550 | name_id = state_ids.get((EventTypes.Name, '')) |
551 | 551 | canonical_alias_id = state_ids.get((EventTypes.CanonicalAlias, '')) |
552 | 552 | |
553 | 553 | summary = {} |
554 | ||
555 | # FIXME: it feels very heavy to load up every single membership event | |
556 | # just to calculate the counts. | |
557 | member_events = yield self.store.get_events(member_ids.values()) | |
558 | ||
559 | joined_user_ids = [] | |
560 | invited_user_ids = [] | |
561 | ||
562 | for ev in member_events.values(): | |
563 | if ev.content.get("membership") == Membership.JOIN: | |
564 | joined_user_ids.append(ev.state_key) | |
565 | elif ev.content.get("membership") == Membership.INVITE: | |
566 | invited_user_ids.append(ev.state_key) | |
554 | empty_ms = MemberSummary([], 0) | |
567 | 555 | |
568 | 556 | # TODO: only send these when they change. |
569 | summary["m.joined_member_count"] = len(joined_user_ids) | |
570 | summary["m.invited_member_count"] = len(invited_user_ids) | |
571 | ||
572 | if name_id or canonical_alias_id: | |
573 | defer.returnValue(summary) | |
574 | ||
575 | # FIXME: order by stream ordering, not alphabetic | |
576 | ||
557 | summary["m.joined_member_count"] = ( | |
558 | details.get(Membership.JOIN, empty_ms).count | |
559 | ) | |
560 | summary["m.invited_member_count"] = ( | |
561 | details.get(Membership.INVITE, empty_ms).count | |
562 | ) | |
563 | ||
564 | # if the room has a name or canonical_alias set, we can skip | |
565 | # calculating heroes. we assume that if the event has contents, it'll | |
566 | # be a valid name or canonical_alias - i.e. we're checking that they | |
567 | # haven't been "deleted" by blatting {} over the top. | |
568 | if name_id: | |
569 | name = yield self.store.get_event(name_id, allow_none=False) | |
570 | if name and name.content: | |
571 | defer.returnValue(summary) | |
572 | ||
573 | if canonical_alias_id: | |
574 | canonical_alias = yield self.store.get_event( | |
575 | canonical_alias_id, allow_none=False, | |
576 | ) | |
577 | if canonical_alias and canonical_alias.content: | |
578 | defer.returnValue(summary) | |
579 | ||
580 | joined_user_ids = [ | |
581 | r[0] for r in details.get(Membership.JOIN, empty_ms).members | |
582 | ] | |
583 | invited_user_ids = [ | |
584 | r[0] for r in details.get(Membership.INVITE, empty_ms).members | |
585 | ] | |
586 | gone_user_ids = ( | |
587 | [r[0] for r in details.get(Membership.LEAVE, empty_ms).members] + | |
588 | [r[0] for r in details.get(Membership.BAN, empty_ms).members] | |
589 | ) | |
590 | ||
591 | # FIXME: only build up a member_ids list for our heroes | |
592 | member_ids = {} | |
593 | for membership in ( | |
594 | Membership.JOIN, | |
595 | Membership.INVITE, | |
596 | Membership.LEAVE, | |
597 | Membership.BAN | |
598 | ): | |
599 | for user_id, event_id in details.get(membership, empty_ms).members: | |
600 | member_ids[user_id] = event_id | |
601 | ||
602 | # FIXME: order by stream ordering rather than as returned by SQL | |
577 | 603 | me = sync_config.user.to_string() |
578 | 604 | if (joined_user_ids or invited_user_ids): |
579 | 605 | summary['m.heroes'] = sorted( |
585 | 611 | )[0:5] |
586 | 612 | else: |
587 | 613 | summary['m.heroes'] = sorted( |
588 | [user_id for user_id in member_ids.keys() if user_id != me] | |
614 | [ | |
615 | user_id | |
616 | for user_id in gone_user_ids | |
617 | if user_id != me | |
618 | ] | |
589 | 619 | )[0:5] |
590 | 620 | |
591 | 621 | if not sync_config.filter_collection.lazy_load_members(): |
718 | 748 | lazy_load_members=lazy_load_members, |
719 | 749 | ) |
720 | 750 | elif batch.limited: |
751 | state_at_timeline_start = yield self.store.get_state_ids_for_event( | |
752 | batch.events[0].event_id, types=types, | |
753 | filtered_types=filtered_types, | |
754 | ) | |
755 | ||
756 | # for now, we disable LL for gappy syncs - see | |
757 | # https://github.com/vector-im/riot-web/issues/7211#issuecomment-419976346 | |
758 | # N.B. this slows down incr syncs as we are now processing way | |
759 | # more state in the server than if we were LLing. | |
760 | # | |
761 | # We still have to filter timeline_start to LL entries (above) in order | |
762 | # for _calculate_state's LL logic to work, as we have to include LL | |
763 | # members for timeline senders in case they weren't loaded in the initial | |
764 | # sync. We do this by (counterintuitively) by filtering timeline_start | |
765 | # members to just be ones which were timeline senders, which then ensures | |
766 | # all of the rest get included in the state block (if we need to know | |
767 | # about them). | |
768 | types = None | |
769 | filtered_types = None | |
770 | ||
721 | 771 | state_at_previous_sync = yield self.get_state_at( |
722 | 772 | room_id, stream_position=since_token, types=types, |
723 | 773 | filtered_types=filtered_types, |
725 | 775 | |
726 | 776 | current_state_ids = yield self.store.get_state_ids_for_event( |
727 | 777 | batch.events[-1].event_id, types=types, |
728 | filtered_types=filtered_types, | |
729 | ) | |
730 | ||
731 | state_at_timeline_start = yield self.store.get_state_ids_for_event( | |
732 | batch.events[0].event_id, types=types, | |
733 | 778 | filtered_types=filtered_types, |
734 | 779 | ) |
735 | 780 | |
738 | 783 | timeline_start=state_at_timeline_start, |
739 | 784 | previous=state_at_previous_sync, |
740 | 785 | current=current_state_ids, |
786 | # we have to include LL members in case LL initial sync missed them | |
741 | 787 | lazy_load_members=lazy_load_members, |
742 | 788 | ) |
743 | 789 | else: |
744 | 790 | state_ids = {} |
745 | 791 | if lazy_load_members: |
746 | 792 | if types: |
747 | # We're returning an incremental sync, with no "gap" since | |
748 | # the previous sync, so normally there would be no state to return | |
793 | # We're returning an incremental sync, with no | |
794 | # "gap" since the previous sync, so normally there would be | |
795 | # no state to return. | |
749 | 796 | # But we're lazy-loading, so the client might need some more |
750 | 797 | # member events to understand the events in this timeline. |
751 | 798 | # So we fish out all the member events corresponding to the |
773 | 820 | logger.debug("filtering state from %r...", state_ids) |
774 | 821 | state_ids = { |
775 | 822 | t: event_id |
776 | for t, event_id in state_ids.iteritems() | |
823 | for t, event_id in iteritems(state_ids) | |
777 | 824 | if cache.get(t[1]) != event_id |
778 | 825 | } |
779 | 826 | logger.debug("...to %r", state_ids) |
1574 | 1621 | newly_joined_room=newly_joined, |
1575 | 1622 | ) |
1576 | 1623 | |
1624 | # When we join the room (or the client requests full_state), we should | |
1625 | # send down any existing tags. Usually the user won't have tags in a | |
1626 | # newly joined room, unless either a) they've joined before or b) the | |
1627 | # tag was added by synapse e.g. for server notice rooms. | |
1628 | if full_state: | |
1629 | user_id = sync_result_builder.sync_config.user.to_string() | |
1630 | tags = yield self.store.get_tags_for_room(user_id, room_id) | |
1631 | ||
1632 | # If there aren't any tags, don't send the empty tags list down | |
1633 | # sync | |
1634 | if not tags: | |
1635 | tags = None | |
1636 | ||
1577 | 1637 | account_data_events = [] |
1578 | 1638 | if tags is not None: |
1579 | 1639 | account_data_events.append({ |
1602 | 1662 | ) |
1603 | 1663 | |
1604 | 1664 | summary = {} |
1665 | ||
1666 | # we include a summary in room responses when we're lazy loading | |
1667 | # members (as the client otherwise doesn't have enough info to form | |
1668 | # the name itself). | |
1605 | 1669 | if ( |
1606 | 1670 | sync_config.filter_collection.lazy_load_members() and |
1607 | 1671 | ( |
1672 | # we recalulate the summary: | |
1673 | # if there are membership changes in the timeline, or | |
1674 | # if membership has changed during a gappy sync, or | |
1675 | # if this is an initial sync. | |
1608 | 1676 | any(ev.type == EventTypes.Member for ev in batch.events) or |
1677 | ( | |
1678 | # XXX: this may include false positives in the form of LL | |
1679 | # members which have snuck into state | |
1680 | batch.limited and | |
1681 | any(t == EventTypes.Member for (t, k) in state) | |
1682 | ) or | |
1609 | 1683 | since_token is None |
1610 | 1684 | ) |
1611 | 1685 | ): |
1635 | 1709 | unread_notifications["highlight_count"] = notifs["highlight_count"] |
1636 | 1710 | |
1637 | 1711 | sync_result_builder.joined.append(room_sync) |
1712 | ||
1713 | if batch.limited and since_token: | |
1714 | user_id = sync_result_builder.sync_config.user.to_string() | |
1715 | logger.info( | |
1716 | "Incremental gappy sync of %s for user %s with %d state events" % ( | |
1717 | room_id, | |
1718 | user_id, | |
1719 | len(state), | |
1720 | ) | |
1721 | ) | |
1638 | 1722 | elif room_builder.rtype == "archived": |
1639 | 1723 | room_sync = ArchivedSyncResult( |
1640 | 1724 | room_id=room_id, |
1728 | 1812 | event_id_to_key = { |
1729 | 1813 | e: key |
1730 | 1814 | for key, e in itertools.chain( |
1731 | timeline_contains.items(), | |
1732 | previous.items(), | |
1733 | timeline_start.items(), | |
1734 | current.items(), | |
1815 | iteritems(timeline_contains), | |
1816 | iteritems(previous), | |
1817 | iteritems(timeline_start), | |
1818 | iteritems(current), | |
1735 | 1819 | ) |
1736 | 1820 | } |
1737 | 1821 | |
1738 | c_ids = set(e for e in current.values()) | |
1739 | ts_ids = set(e for e in timeline_start.values()) | |
1740 | p_ids = set(e for e in previous.values()) | |
1741 | tc_ids = set(e for e in timeline_contains.values()) | |
1822 | c_ids = set(e for e in itervalues(current)) | |
1823 | ts_ids = set(e for e in itervalues(timeline_start)) | |
1824 | p_ids = set(e for e in itervalues(previous)) | |
1825 | tc_ids = set(e for e in itervalues(timeline_contains)) | |
1742 | 1826 | |
1743 | 1827 | # If we are lazyloading room members, we explicitly add the membership events |
1744 | 1828 | # for the senders in the timeline into the state block returned by /sync, |
1752 | 1836 | |
1753 | 1837 | if lazy_load_members: |
1754 | 1838 | p_ids.difference_update( |
1755 | e for t, e in timeline_start.iteritems() | |
1839 | e for t, e in iteritems(timeline_start) | |
1756 | 1840 | if t[0] == EventTypes.Member |
1757 | 1841 | ) |
1758 | 1842 |
37 | 37 | return value |
38 | 38 | |
39 | 39 | |
40 | ACCESS_TOKEN_RE = re.compile(br'(\?.*access(_|%5[Ff])token=)[^&]*(.*)$') | |
40 | ACCESS_TOKEN_RE = re.compile(r'(\?.*access(_|%5[Ff])token=)[^&]*(.*)$') | |
41 | 41 | |
42 | 42 | |
43 | 43 | def redact_uri(uri): |
44 | 44 | """Strips access tokens from the uri replaces with <redacted>""" |
45 | 45 | return ACCESS_TOKEN_RE.sub( |
46 | br'\1<redacted>\3', | |
46 | r'\1<redacted>\3', | |
47 | 47 | uri |
48 | 48 | ) |
12 | 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
13 | 13 | # See the License for the specific language governing permissions and |
14 | 14 | # limitations under the License. |
15 | ||
15 | 16 | import logging |
16 | import urllib | |
17 | ||
18 | from six import StringIO | |
19 | ||
17 | ||
18 | from six import text_type | |
19 | from six.moves import urllib | |
20 | ||
21 | import treq | |
20 | 22 | from canonicaljson import encode_canonical_json, json |
21 | 23 | from prometheus_client import Counter |
22 | 24 | |
23 | 25 | from OpenSSL import SSL |
24 | 26 | from OpenSSL.SSL import VERIFY_NONE |
25 | from twisted.internet import defer, protocol, reactor, ssl, task | |
27 | from twisted.internet import defer, protocol, reactor, ssl | |
26 | 28 | from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS |
27 | 29 | from twisted.web._newclient import ResponseDone |
28 | 30 | from twisted.web.client import ( |
29 | 31 | Agent, |
30 | 32 | BrowserLikeRedirectAgent, |
31 | 33 | ContentDecoderAgent, |
32 | FileBodyProducer as TwistedFileBodyProducer, | |
33 | 34 | GzipDecoder, |
34 | 35 | HTTPConnectionPool, |
35 | 36 | PartialDownloadError, |
82 | 83 | if hs.config.user_agent_suffix: |
83 | 84 | self.user_agent = "%s %s" % (self.user_agent, hs.config.user_agent_suffix,) |
84 | 85 | |
85 | @defer.inlineCallbacks | |
86 | def request(self, method, uri, *args, **kwargs): | |
86 | self.user_agent = self.user_agent.encode('ascii') | |
87 | ||
88 | @defer.inlineCallbacks | |
89 | def request(self, method, uri, data=b'', headers=None): | |
87 | 90 | # A small wrapper around self.agent.request() so we can easily attach |
88 | 91 | # counters to it |
89 | 92 | outgoing_requests_counter.labels(method).inc() |
92 | 95 | logger.info("Sending request %s %s", method, redact_uri(uri)) |
93 | 96 | |
94 | 97 | try: |
95 | request_deferred = self.agent.request( | |
96 | method, uri, *args, **kwargs | |
98 | request_deferred = treq.request( | |
99 | method, uri, agent=self.agent, data=data, headers=headers | |
97 | 100 | ) |
98 | 101 | add_timeout_to_deferred( |
99 | 102 | request_deferred, 60, self.hs.get_reactor(), |
111 | 114 | incoming_responses_counter.labels(method, "ERR").inc() |
112 | 115 | logger.info( |
113 | 116 | "Error sending request to %s %s: %s %s", |
114 | method, redact_uri(uri), type(e).__name__, e.message | |
117 | method, redact_uri(uri), type(e).__name__, e.args[0] | |
115 | 118 | ) |
116 | 119 | raise |
117 | 120 | |
136 | 139 | # TODO: Do we ever want to log message contents? |
137 | 140 | logger.debug("post_urlencoded_get_json args: %s", args) |
138 | 141 | |
139 | query_bytes = urllib.urlencode(encode_urlencode_args(args), True) | |
142 | query_bytes = urllib.parse.urlencode( | |
143 | encode_urlencode_args(args), True).encode("utf8") | |
140 | 144 | |
141 | 145 | actual_headers = { |
142 | 146 | b"Content-Type": [b"application/x-www-form-urlencoded"], |
147 | 151 | |
148 | 152 | response = yield self.request( |
149 | 153 | "POST", |
150 | uri.encode("ascii"), | |
154 | uri, | |
151 | 155 | headers=Headers(actual_headers), |
152 | bodyProducer=FileBodyProducer(StringIO(query_bytes)) | |
153 | ) | |
154 | ||
155 | body = yield make_deferred_yieldable(readBody(response)) | |
156 | data=query_bytes | |
157 | ) | |
156 | 158 | |
157 | 159 | if 200 <= response.code < 300: |
158 | defer.returnValue(json.loads(body)) | |
160 | body = yield make_deferred_yieldable(treq.json_content(response)) | |
161 | defer.returnValue(body) | |
159 | 162 | else: |
160 | 163 | raise HttpResponseException(response.code, response.phrase, body) |
161 | 164 | |
190 | 193 | |
191 | 194 | response = yield self.request( |
192 | 195 | "POST", |
193 | uri.encode("ascii"), | |
196 | uri, | |
194 | 197 | headers=Headers(actual_headers), |
195 | bodyProducer=FileBodyProducer(StringIO(json_str)) | |
198 | data=json_str | |
196 | 199 | ) |
197 | 200 | |
198 | 201 | body = yield make_deferred_yieldable(readBody(response)) |
247 | 250 | ValueError: if the response was not JSON |
248 | 251 | """ |
249 | 252 | if len(args): |
250 | query_bytes = urllib.urlencode(args, True) | |
253 | query_bytes = urllib.parse.urlencode(args, True) | |
251 | 254 | uri = "%s?%s" % (uri, query_bytes) |
252 | 255 | |
253 | 256 | json_str = encode_canonical_json(json_body) |
261 | 264 | |
262 | 265 | response = yield self.request( |
263 | 266 | "PUT", |
264 | uri.encode("ascii"), | |
267 | uri, | |
265 | 268 | headers=Headers(actual_headers), |
266 | bodyProducer=FileBodyProducer(StringIO(json_str)) | |
269 | data=json_str | |
267 | 270 | ) |
268 | 271 | |
269 | 272 | body = yield make_deferred_yieldable(readBody(response)) |
292 | 295 | HttpResponseException on a non-2xx HTTP response. |
293 | 296 | """ |
294 | 297 | if len(args): |
295 | query_bytes = urllib.urlencode(args, True) | |
298 | query_bytes = urllib.parse.urlencode(args, True) | |
296 | 299 | uri = "%s?%s" % (uri, query_bytes) |
297 | 300 | |
298 | 301 | actual_headers = { |
303 | 306 | |
304 | 307 | response = yield self.request( |
305 | 308 | "GET", |
306 | uri.encode("ascii"), | |
309 | uri, | |
307 | 310 | headers=Headers(actual_headers), |
308 | 311 | ) |
309 | 312 | |
338 | 341 | |
339 | 342 | response = yield self.request( |
340 | 343 | "GET", |
341 | url.encode("ascii"), | |
344 | url, | |
342 | 345 | headers=Headers(actual_headers), |
343 | 346 | ) |
344 | 347 | |
345 | 348 | resp_headers = dict(response.headers.getAllRawHeaders()) |
346 | 349 | |
347 | if 'Content-Length' in resp_headers and resp_headers['Content-Length'] > max_size: | |
350 | if (b'Content-Length' in resp_headers and | |
351 | int(resp_headers[b'Content-Length']) > max_size): | |
348 | 352 | logger.warn("Requested URL is too large > %r bytes" % (self.max_size,)) |
349 | 353 | raise SynapseError( |
350 | 354 | 502, |
377 | 381 | ) |
378 | 382 | |
379 | 383 | defer.returnValue( |
380 | (length, resp_headers, response.request.absoluteURI, response.code), | |
384 | ( | |
385 | length, | |
386 | resp_headers, | |
387 | response.request.absoluteURI.decode('ascii'), | |
388 | response.code, | |
389 | ), | |
381 | 390 | ) |
382 | 391 | |
383 | 392 | |
433 | 442 | |
434 | 443 | @defer.inlineCallbacks |
435 | 444 | def post_urlencoded_get_raw(self, url, args={}): |
436 | query_bytes = urllib.urlencode(encode_urlencode_args(args), True) | |
445 | query_bytes = urllib.parse.urlencode(encode_urlencode_args(args), True) | |
437 | 446 | |
438 | 447 | response = yield self.request( |
439 | 448 | "POST", |
440 | url.encode("ascii"), | |
441 | bodyProducer=FileBodyProducer(StringIO(query_bytes)), | |
449 | url, | |
450 | data=query_bytes, | |
442 | 451 | headers=Headers({ |
443 | 452 | b"Content-Type": [b"application/x-www-form-urlencoded"], |
444 | 453 | b"User-Agent": [self.user_agent], |
462 | 471 | def endpointForURI(self, uri): |
463 | 472 | logger.info("Getting endpoint for %s", uri.toBytes()) |
464 | 473 | |
465 | if uri.scheme == "http": | |
474 | if uri.scheme == b"http": | |
466 | 475 | endpoint_factory = HostnameEndpoint |
467 | elif uri.scheme == "https": | |
476 | elif uri.scheme == b"https": | |
468 | 477 | tlsCreator = self.policyForHTTPS.creatorForNetloc(uri.host, uri.port) |
469 | 478 | |
470 | 479 | def endpoint_factory(reactor, host, port, **kw): |
509 | 518 | |
510 | 519 | |
511 | 520 | def encode_urlencode_arg(arg): |
512 | if isinstance(arg, unicode): | |
521 | if isinstance(arg, text_type): | |
513 | 522 | return arg.encode('utf-8') |
514 | 523 | elif isinstance(arg, list): |
515 | 524 | return [encode_urlencode_arg(i) for i in arg] |
541 | 550 | |
542 | 551 | def creatorForNetloc(self, hostname, port): |
543 | 552 | return self |
544 | ||
545 | ||
546 | class FileBodyProducer(TwistedFileBodyProducer): | |
547 | """Workaround for https://twistedmatrix.com/trac/ticket/8473 | |
548 | ||
549 | We override the pauseProducing and resumeProducing methods in twisted's | |
550 | FileBodyProducer so that they do not raise exceptions if the task has | |
551 | already completed. | |
552 | """ | |
553 | ||
554 | def pauseProducing(self): | |
555 | try: | |
556 | super(FileBodyProducer, self).pauseProducing() | |
557 | except task.TaskDone: | |
558 | # task has already completed | |
559 | pass | |
560 | ||
561 | def resumeProducing(self): | |
562 | try: | |
563 | super(FileBodyProducer, self).resumeProducing() | |
564 | except task.NotPaused: | |
565 | # task was not paused (probably because it had already completed) | |
566 | pass |
16 | 16 | import logging |
17 | 17 | import random |
18 | 18 | import sys |
19 | import urllib | |
20 | ||
21 | from six import string_types | |
22 | from six.moves.urllib import parse as urlparse | |
23 | ||
24 | from canonicaljson import encode_canonical_json, json | |
19 | ||
20 | from six import PY3, string_types | |
21 | from six.moves import urllib | |
22 | ||
23 | import treq | |
24 | from canonicaljson import encode_canonical_json | |
25 | 25 | from prometheus_client import Counter |
26 | 26 | from signedjson.sign import sign_json |
27 | 27 | |
28 | from twisted.internet import defer, protocol, reactor | |
28 | from twisted.internet import defer, protocol | |
29 | 29 | from twisted.internet.error import DNSLookupError |
30 | 30 | from twisted.web._newclient import ResponseDone |
31 | from twisted.web.client import Agent, HTTPConnectionPool, readBody | |
31 | from twisted.web.client import Agent, HTTPConnectionPool | |
32 | 32 | from twisted.web.http_headers import Headers |
33 | 33 | |
34 | 34 | import synapse.metrics |
39 | 39 | HttpResponseException, |
40 | 40 | SynapseError, |
41 | 41 | ) |
42 | from synapse.http import cancelled_to_request_timed_out_error | |
43 | 42 | from synapse.http.endpoint import matrix_federation_endpoint |
44 | 43 | from synapse.util import logcontext |
45 | from synapse.util.async_helpers import add_timeout_to_deferred | |
44 | from synapse.util.async_helpers import timeout_no_seriously | |
46 | 45 | from synapse.util.logcontext import make_deferred_yieldable |
46 | from synapse.util.metrics import Measure | |
47 | 47 | |
48 | 48 | logger = logging.getLogger(__name__) |
49 | 49 | outbound_logger = logging.getLogger("synapse.http.outbound") |
57 | 57 | MAX_LONG_RETRIES = 10 |
58 | 58 | MAX_SHORT_RETRIES = 3 |
59 | 59 | |
60 | if PY3: | |
61 | MAXINT = sys.maxsize | |
62 | else: | |
63 | MAXINT = sys.maxint | |
64 | ||
60 | 65 | |
61 | 66 | class MatrixFederationEndpointFactory(object): |
62 | 67 | def __init__(self, hs): |
68 | self.reactor = hs.get_reactor() | |
63 | 69 | self.tls_client_options_factory = hs.tls_client_options_factory |
64 | 70 | |
65 | 71 | def endpointForURI(self, uri): |
66 | destination = uri.netloc | |
72 | destination = uri.netloc.decode('ascii') | |
67 | 73 | |
68 | 74 | return matrix_federation_endpoint( |
69 | reactor, destination, timeout=10, | |
75 | self.reactor, destination, timeout=10, | |
70 | 76 | tls_client_options_factory=self.tls_client_options_factory |
71 | 77 | ) |
72 | 78 | |
84 | 90 | self.hs = hs |
85 | 91 | self.signing_key = hs.config.signing_key[0] |
86 | 92 | self.server_name = hs.hostname |
93 | reactor = hs.get_reactor() | |
87 | 94 | pool = HTTPConnectionPool(reactor) |
95 | pool.retryAutomatically = False | |
88 | 96 | pool.maxPersistentPerHost = 5 |
89 | 97 | pool.cachedConnectionTimeout = 2 * 60 |
90 | 98 | self.agent = Agent.usingEndpointFactory( |
92 | 100 | ) |
93 | 101 | self.clock = hs.get_clock() |
94 | 102 | self._store = hs.get_datastore() |
95 | self.version_string = hs.version_string | |
103 | self.version_string = hs.version_string.encode('ascii') | |
96 | 104 | self._next_id = 1 |
105 | self.default_timeout = 60 | |
97 | 106 | |
98 | 107 | def _create_url(self, destination, path_bytes, param_bytes, query_bytes): |
99 | return urlparse.urlunparse( | |
100 | ("matrix", destination, path_bytes, param_bytes, query_bytes, "") | |
108 | return urllib.parse.urlunparse( | |
109 | (b"matrix", destination, path_bytes, param_bytes, query_bytes, b"") | |
101 | 110 | ) |
102 | 111 | |
103 | 112 | @defer.inlineCallbacks |
104 | 113 | def _request(self, destination, method, path, |
105 | body_callback, headers_dict={}, param_bytes=b"", | |
106 | query_bytes=b"", retry_on_dns_fail=True, | |
114 | json=None, json_callback=None, | |
115 | param_bytes=b"", | |
116 | query=None, retry_on_dns_fail=True, | |
107 | 117 | timeout=None, long_retries=False, |
108 | 118 | ignore_backoff=False, |
109 | 119 | backoff_on_404=False): |
110 | """ Creates and sends a request to the given server | |
120 | """ | |
121 | Creates and sends a request to the given server. | |
122 | ||
111 | 123 | Args: |
112 | 124 | destination (str): The remote server to send the HTTP request to. |
113 | 125 | method (str): HTTP method |
114 | 126 | path (str): The HTTP path |
127 | json (dict or None): JSON to send in the body. | |
128 | json_callback (func or None): A callback to generate the JSON. | |
129 | query (dict or None): Query arguments. | |
115 | 130 | ignore_backoff (bool): true to ignore the historical backoff data |
116 | 131 | and try the request anyway. |
117 | 132 | backoff_on_404 (bool): Back off if we get a 404 |
131 | 146 | (May also fail with plenty of other Exceptions for things like DNS |
132 | 147 | failures, connection failures, SSL failures.) |
133 | 148 | """ |
149 | if timeout: | |
150 | _sec_timeout = timeout / 1000 | |
151 | else: | |
152 | _sec_timeout = self.default_timeout | |
153 | ||
134 | 154 | if ( |
135 | 155 | self.hs.config.federation_domain_whitelist is not None and |
136 | 156 | destination not in self.hs.config.federation_domain_whitelist |
145 | 165 | ignore_backoff=ignore_backoff, |
146 | 166 | ) |
147 | 167 | |
148 | destination = destination.encode("ascii") | |
168 | headers_dict = {} | |
149 | 169 | path_bytes = path.encode("ascii") |
170 | if query: | |
171 | query_bytes = encode_query_args(query) | |
172 | else: | |
173 | query_bytes = b"" | |
174 | ||
175 | headers_dict = { | |
176 | "User-Agent": [self.version_string], | |
177 | "Host": [destination], | |
178 | } | |
179 | ||
150 | 180 | with limiter: |
151 | headers_dict[b"User-Agent"] = [self.version_string] | |
152 | headers_dict[b"Host"] = [destination] | |
153 | ||
154 | url_bytes = self._create_url( | |
155 | destination, path_bytes, param_bytes, query_bytes | |
156 | ) | |
181 | url = self._create_url( | |
182 | destination.encode("ascii"), path_bytes, param_bytes, query_bytes | |
183 | ).decode('ascii') | |
157 | 184 | |
158 | 185 | txn_id = "%s-O-%s" % (method, self._next_id) |
159 | self._next_id = (self._next_id + 1) % (sys.maxint - 1) | |
160 | ||
161 | outbound_logger.info( | |
162 | "{%s} [%s] Sending request: %s %s", | |
163 | txn_id, destination, method, url_bytes | |
164 | ) | |
186 | self._next_id = (self._next_id + 1) % (MAXINT - 1) | |
165 | 187 | |
166 | 188 | # XXX: Would be much nicer to retry only at the transaction-layer |
167 | 189 | # (once we have reliable transactions in place) |
170 | 192 | else: |
171 | 193 | retries_left = MAX_SHORT_RETRIES |
172 | 194 | |
173 | http_url_bytes = urlparse.urlunparse( | |
174 | ("", "", path_bytes, param_bytes, query_bytes, "") | |
175 | ) | |
195 | http_url = urllib.parse.urlunparse( | |
196 | (b"", b"", path_bytes, param_bytes, query_bytes, b"") | |
197 | ).decode('ascii') | |
176 | 198 | |
177 | 199 | log_result = None |
178 | try: | |
179 | while True: | |
180 | producer = None | |
181 | if body_callback: | |
182 | producer = body_callback(method, http_url_bytes, headers_dict) | |
183 | ||
184 | try: | |
185 | request_deferred = self.agent.request( | |
186 | method, | |
187 | url_bytes, | |
188 | Headers(headers_dict), | |
189 | producer | |
200 | while True: | |
201 | try: | |
202 | if json_callback: | |
203 | json = json_callback() | |
204 | ||
205 | if json: | |
206 | data = encode_canonical_json(json) | |
207 | headers_dict["Content-Type"] = ["application/json"] | |
208 | self.sign_request( | |
209 | destination, method, http_url, headers_dict, json | |
190 | 210 | ) |
191 | add_timeout_to_deferred( | |
192 | request_deferred, | |
193 | timeout / 1000. if timeout else 60, | |
194 | self.hs.get_reactor(), | |
195 | cancelled_to_request_timed_out_error, | |
196 | ) | |
211 | else: | |
212 | data = None | |
213 | self.sign_request(destination, method, http_url, headers_dict) | |
214 | ||
215 | outbound_logger.info( | |
216 | "{%s} [%s] Sending request: %s %s", | |
217 | txn_id, destination, method, url | |
218 | ) | |
219 | ||
220 | request_deferred = treq.request( | |
221 | method, | |
222 | url, | |
223 | headers=Headers(headers_dict), | |
224 | data=data, | |
225 | agent=self.agent, | |
226 | reactor=self.hs.get_reactor(), | |
227 | unbuffered=True | |
228 | ) | |
229 | request_deferred.addTimeout(_sec_timeout, self.hs.get_reactor()) | |
230 | ||
231 | # Sometimes the timeout above doesn't work, so lets hack yet | |
232 | # another layer of timeouts in in the vain hope that at some | |
233 | # point the world made sense and this really really really | |
234 | # should work. | |
235 | request_deferred = timeout_no_seriously( | |
236 | request_deferred, | |
237 | timeout=_sec_timeout * 2, | |
238 | reactor=self.hs.get_reactor(), | |
239 | ) | |
240 | ||
241 | with Measure(self.clock, "outbound_request"): | |
197 | 242 | response = yield make_deferred_yieldable( |
198 | 243 | request_deferred, |
199 | 244 | ) |
200 | 245 | |
201 | log_result = "%d %s" % (response.code, response.phrase,) | |
202 | break | |
203 | except Exception as e: | |
204 | if not retry_on_dns_fail and isinstance(e, DNSLookupError): | |
205 | logger.warn( | |
206 | "DNS Lookup failed to %s with %s", | |
207 | destination, | |
208 | e | |
209 | ) | |
210 | log_result = "DNS Lookup failed to %s with %s" % ( | |
211 | destination, e | |
212 | ) | |
213 | raise | |
214 | ||
246 | log_result = "%d %s" % (response.code, response.phrase,) | |
247 | break | |
248 | except Exception as e: | |
249 | if not retry_on_dns_fail and isinstance(e, DNSLookupError): | |
215 | 250 | logger.warn( |
216 | "{%s} Sending request failed to %s: %s %s: %s", | |
251 | "DNS Lookup failed to %s with %s", | |
252 | destination, | |
253 | e | |
254 | ) | |
255 | log_result = "DNS Lookup failed to %s with %s" % ( | |
256 | destination, e | |
257 | ) | |
258 | raise | |
259 | ||
260 | logger.warn( | |
261 | "{%s} Sending request failed to %s: %s %s: %s", | |
262 | txn_id, | |
263 | destination, | |
264 | method, | |
265 | url, | |
266 | _flatten_response_never_received(e), | |
267 | ) | |
268 | ||
269 | log_result = _flatten_response_never_received(e) | |
270 | ||
271 | if retries_left and not timeout: | |
272 | if long_retries: | |
273 | delay = 4 ** (MAX_LONG_RETRIES + 1 - retries_left) | |
274 | delay = min(delay, 60) | |
275 | delay *= random.uniform(0.8, 1.4) | |
276 | else: | |
277 | delay = 0.5 * 2 ** (MAX_SHORT_RETRIES - retries_left) | |
278 | delay = min(delay, 2) | |
279 | delay *= random.uniform(0.8, 1.4) | |
280 | ||
281 | logger.debug( | |
282 | "{%s} Waiting %s before sending to %s...", | |
217 | 283 | txn_id, |
218 | destination, | |
219 | method, | |
220 | url_bytes, | |
221 | _flatten_response_never_received(e), | |
284 | delay, | |
285 | destination | |
222 | 286 | ) |
223 | 287 | |
224 | log_result = _flatten_response_never_received(e) | |
225 | ||
226 | if retries_left and not timeout: | |
227 | if long_retries: | |
228 | delay = 4 ** (MAX_LONG_RETRIES + 1 - retries_left) | |
229 | delay = min(delay, 60) | |
230 | delay *= random.uniform(0.8, 1.4) | |
231 | else: | |
232 | delay = 0.5 * 2 ** (MAX_SHORT_RETRIES - retries_left) | |
233 | delay = min(delay, 2) | |
234 | delay *= random.uniform(0.8, 1.4) | |
235 | ||
236 | yield self.clock.sleep(delay) | |
237 | retries_left -= 1 | |
238 | else: | |
239 | raise | |
240 | finally: | |
241 | outbound_logger.info( | |
242 | "{%s} [%s] Result: %s", | |
243 | txn_id, | |
244 | destination, | |
245 | log_result, | |
246 | ) | |
288 | yield self.clock.sleep(delay) | |
289 | retries_left -= 1 | |
290 | else: | |
291 | raise | |
292 | finally: | |
293 | outbound_logger.info( | |
294 | "{%s} [%s] Result: %s", | |
295 | txn_id, | |
296 | destination, | |
297 | log_result, | |
298 | ) | |
247 | 299 | |
248 | 300 | if 200 <= response.code < 300: |
249 | 301 | pass |
251 | 303 | # :'( |
252 | 304 | # Update transactions table? |
253 | 305 | with logcontext.PreserveLoggingContext(): |
254 | body = yield readBody(response) | |
306 | d = treq.content(response) | |
307 | d.addTimeout(_sec_timeout, self.hs.get_reactor()) | |
308 | body = yield make_deferred_yieldable(d) | |
255 | 309 | raise HttpResponseException( |
256 | 310 | response.code, response.phrase, body |
257 | 311 | ) |
296 | 350 | auth_headers = [] |
297 | 351 | |
298 | 352 | for key, sig in request["signatures"][self.server_name].items(): |
299 | auth_headers.append(bytes( | |
353 | auth_headers.append(( | |
300 | 354 | "X-Matrix origin=%s,key=\"%s\",sig=\"%s\"" % ( |
301 | 355 | self.server_name, key, sig, |
302 | ) | |
303 | )) | |
356 | )).encode('ascii') | |
357 | ) | |
304 | 358 | |
305 | 359 | headers_dict[b"Authorization"] = auth_headers |
306 | 360 | |
346 | 400 | """ |
347 | 401 | |
348 | 402 | if not json_data_callback: |
349 | def json_data_callback(): | |
350 | return data | |
351 | ||
352 | def body_callback(method, url_bytes, headers_dict): | |
353 | json_data = json_data_callback() | |
354 | self.sign_request( | |
355 | destination, method, url_bytes, headers_dict, json_data | |
356 | ) | |
357 | producer = _JsonProducer(json_data) | |
358 | return producer | |
403 | json_data_callback = lambda: data | |
359 | 404 | |
360 | 405 | response = yield self._request( |
361 | 406 | destination, |
362 | 407 | "PUT", |
363 | 408 | path, |
364 | body_callback=body_callback, | |
365 | headers_dict={"Content-Type": ["application/json"]}, | |
366 | query_bytes=encode_query_args(args), | |
409 | json_callback=json_data_callback, | |
410 | query=args, | |
367 | 411 | long_retries=long_retries, |
368 | 412 | timeout=timeout, |
369 | 413 | ignore_backoff=ignore_backoff, |
375 | 419 | check_content_type_is_json(response.headers) |
376 | 420 | |
377 | 421 | with logcontext.PreserveLoggingContext(): |
378 | body = yield readBody(response) | |
379 | defer.returnValue(json.loads(body)) | |
422 | d = treq.json_content(response) | |
423 | d.addTimeout(self.default_timeout, self.hs.get_reactor()) | |
424 | body = yield make_deferred_yieldable(d) | |
425 | defer.returnValue(body) | |
380 | 426 | |
381 | 427 | @defer.inlineCallbacks |
382 | 428 | def post_json(self, destination, path, data={}, long_retries=False, |
409 | 455 | Fails with ``FederationDeniedError`` if this destination |
410 | 456 | is not on our federation whitelist |
411 | 457 | """ |
412 | ||
413 | def body_callback(method, url_bytes, headers_dict): | |
414 | self.sign_request( | |
415 | destination, method, url_bytes, headers_dict, data | |
416 | ) | |
417 | return _JsonProducer(data) | |
418 | ||
419 | 458 | response = yield self._request( |
420 | 459 | destination, |
421 | 460 | "POST", |
422 | 461 | path, |
423 | query_bytes=encode_query_args(args), | |
424 | body_callback=body_callback, | |
425 | headers_dict={"Content-Type": ["application/json"]}, | |
462 | query=args, | |
463 | json=data, | |
426 | 464 | long_retries=long_retries, |
427 | 465 | timeout=timeout, |
428 | 466 | ignore_backoff=ignore_backoff, |
433 | 471 | check_content_type_is_json(response.headers) |
434 | 472 | |
435 | 473 | with logcontext.PreserveLoggingContext(): |
436 | body = yield readBody(response) | |
437 | ||
438 | defer.returnValue(json.loads(body)) | |
474 | d = treq.json_content(response) | |
475 | if timeout: | |
476 | _sec_timeout = timeout / 1000 | |
477 | else: | |
478 | _sec_timeout = self.default_timeout | |
479 | ||
480 | d.addTimeout(_sec_timeout, self.hs.get_reactor()) | |
481 | body = yield make_deferred_yieldable(d) | |
482 | ||
483 | defer.returnValue(body) | |
439 | 484 | |
440 | 485 | @defer.inlineCallbacks |
441 | 486 | def get_json(self, destination, path, args=None, retry_on_dns_fail=True, |
470 | 515 | |
471 | 516 | logger.debug("Query bytes: %s Retry DNS: %s", args, retry_on_dns_fail) |
472 | 517 | |
473 | def body_callback(method, url_bytes, headers_dict): | |
474 | self.sign_request(destination, method, url_bytes, headers_dict) | |
475 | return None | |
476 | ||
477 | 518 | response = yield self._request( |
478 | 519 | destination, |
479 | 520 | "GET", |
480 | 521 | path, |
481 | query_bytes=encode_query_args(args), | |
482 | body_callback=body_callback, | |
522 | query=args, | |
483 | 523 | retry_on_dns_fail=retry_on_dns_fail, |
484 | 524 | timeout=timeout, |
485 | 525 | ignore_backoff=ignore_backoff, |
490 | 530 | check_content_type_is_json(response.headers) |
491 | 531 | |
492 | 532 | with logcontext.PreserveLoggingContext(): |
493 | body = yield readBody(response) | |
494 | ||
495 | defer.returnValue(json.loads(body)) | |
533 | d = treq.json_content(response) | |
534 | d.addTimeout(self.default_timeout, self.hs.get_reactor()) | |
535 | body = yield make_deferred_yieldable(d) | |
536 | ||
537 | defer.returnValue(body) | |
496 | 538 | |
497 | 539 | @defer.inlineCallbacks |
498 | 540 | def delete_json(self, destination, path, long_retries=False, |
522 | 564 | Fails with ``FederationDeniedError`` if this destination |
523 | 565 | is not on our federation whitelist |
524 | 566 | """ |
525 | ||
526 | 567 | response = yield self._request( |
527 | 568 | destination, |
528 | 569 | "DELETE", |
529 | 570 | path, |
530 | query_bytes=encode_query_args(args), | |
531 | headers_dict={"Content-Type": ["application/json"]}, | |
571 | query=args, | |
532 | 572 | long_retries=long_retries, |
533 | 573 | timeout=timeout, |
534 | 574 | ignore_backoff=ignore_backoff, |
539 | 579 | check_content_type_is_json(response.headers) |
540 | 580 | |
541 | 581 | with logcontext.PreserveLoggingContext(): |
542 | body = yield readBody(response) | |
543 | ||
544 | defer.returnValue(json.loads(body)) | |
582 | d = treq.json_content(response) | |
583 | d.addTimeout(self.default_timeout, self.hs.get_reactor()) | |
584 | body = yield make_deferred_yieldable(d) | |
585 | ||
586 | defer.returnValue(body) | |
545 | 587 | |
546 | 588 | @defer.inlineCallbacks |
547 | 589 | def get_file(self, destination, path, output_stream, args={}, |
568 | 610 | Fails with ``FederationDeniedError`` if this destination |
569 | 611 | is not on our federation whitelist |
570 | 612 | """ |
571 | ||
572 | encoded_args = {} | |
573 | for k, vs in args.items(): | |
574 | if isinstance(vs, string_types): | |
575 | vs = [vs] | |
576 | encoded_args[k] = [v.encode("UTF-8") for v in vs] | |
577 | ||
578 | query_bytes = urllib.urlencode(encoded_args, True) | |
579 | logger.debug("Query bytes: %s Retry DNS: %s", query_bytes, retry_on_dns_fail) | |
580 | ||
581 | def body_callback(method, url_bytes, headers_dict): | |
582 | self.sign_request(destination, method, url_bytes, headers_dict) | |
583 | return None | |
584 | ||
585 | 613 | response = yield self._request( |
586 | 614 | destination, |
587 | 615 | "GET", |
588 | 616 | path, |
589 | query_bytes=query_bytes, | |
590 | body_callback=body_callback, | |
617 | query=args, | |
591 | 618 | retry_on_dns_fail=retry_on_dns_fail, |
592 | 619 | ignore_backoff=ignore_backoff, |
593 | 620 | ) |
596 | 623 | |
597 | 624 | try: |
598 | 625 | with logcontext.PreserveLoggingContext(): |
599 | length = yield _readBodyToFile( | |
600 | response, output_stream, max_size | |
601 | ) | |
626 | d = _readBodyToFile(response, output_stream, max_size) | |
627 | d.addTimeout(self.default_timeout, self.hs.get_reactor()) | |
628 | length = yield make_deferred_yieldable(d) | |
602 | 629 | except Exception: |
603 | 630 | logger.exception("Failed to download body") |
604 | 631 | raise |
638 | 665 | return d |
639 | 666 | |
640 | 667 | |
641 | class _JsonProducer(object): | |
642 | """ Used by the twisted http client to create the HTTP body from json | |
643 | """ | |
644 | def __init__(self, jsn): | |
645 | self.reset(jsn) | |
646 | ||
647 | def reset(self, jsn): | |
648 | self.body = encode_canonical_json(jsn) | |
649 | self.length = len(self.body) | |
650 | ||
651 | def startProducing(self, consumer): | |
652 | consumer.write(self.body) | |
653 | return defer.succeed(None) | |
654 | ||
655 | def pauseProducing(self): | |
656 | pass | |
657 | ||
658 | def stopProducing(self): | |
659 | pass | |
660 | ||
661 | def resumeProducing(self): | |
662 | pass | |
663 | ||
664 | ||
665 | 668 | def _flatten_response_never_received(e): |
666 | 669 | if hasattr(e, "reasons"): |
667 | 670 | reasons = ", ".join( |
692 | 695 | "No Content-Type header" |
693 | 696 | ) |
694 | 697 | |
695 | c_type = c_type[0] # only the first header | |
698 | c_type = c_type[0].decode('ascii') # only the first header | |
696 | 699 | val, options = cgi.parse_header(c_type) |
697 | 700 | if val != "application/json": |
698 | 701 | raise RuntimeError( |
710 | 713 | vs = [vs] |
711 | 714 | encoded_args[k] = [v.encode("UTF-8") for v in vs] |
712 | 715 | |
713 | query_bytes = urllib.urlencode(encoded_args, True) | |
714 | ||
715 | return query_bytes | |
716 | query_bytes = urllib.parse.urlencode(encoded_args, True) | |
717 | ||
718 | return query_bytes.encode('utf8') |
84 | 84 | return "%s-%i" % (self.method, self.request_seq) |
85 | 85 | |
86 | 86 | def get_redacted_uri(self): |
87 | return redact_uri(self.uri) | |
87 | uri = self.uri | |
88 | if isinstance(uri, bytes): | |
89 | uri = self.uri.decode('ascii') | |
90 | return redact_uri(uri) | |
88 | 91 | |
89 | 92 | def get_user_agent(self): |
90 | 93 | return self.requestHeaders.getRawHeaders(b"User-Agent", [None])[-1] |
203 | 206 | self.start_time = time.time() |
204 | 207 | self.request_metrics = RequestMetrics() |
205 | 208 | self.request_metrics.start( |
206 | self.start_time, name=servlet_name, method=self.method, | |
209 | self.start_time, name=servlet_name, method=self.method.decode('ascii'), | |
207 | 210 | ) |
208 | 211 | |
209 | 212 | self.site.access_logger.info( |
210 | 213 | "%s - %s - Received request: %s %s", |
211 | 214 | self.getClientIP(), |
212 | 215 | self.site.site_tag, |
213 | self.method, | |
216 | self.method.decode('ascii'), | |
214 | 217 | self.get_redacted_uri() |
215 | 218 | ) |
216 | 219 |
17 | 17 | import logging |
18 | 18 | import os |
19 | 19 | import platform |
20 | import threading | |
20 | 21 | import time |
22 | ||
23 | import six | |
21 | 24 | |
22 | 25 | import attr |
23 | 26 | from prometheus_client import Counter, Gauge, Histogram |
67 | 70 | return |
68 | 71 | |
69 | 72 | if isinstance(calls, dict): |
70 | for k, v in calls.items(): | |
73 | for k, v in six.iteritems(calls): | |
71 | 74 | g.add_metric(k, v) |
72 | 75 | else: |
73 | 76 | g.add_metric([], calls) |
78 | 81 | self._register() |
79 | 82 | |
80 | 83 | def _register(self): |
84 | if self.name in all_gauges.keys(): | |
85 | logger.warning("%s already registered, reregistering" % (self.name,)) | |
86 | REGISTRY.unregister(all_gauges.pop(self.name)) | |
87 | ||
88 | REGISTRY.register(self) | |
89 | all_gauges[self.name] = self | |
90 | ||
91 | ||
92 | class InFlightGauge(object): | |
93 | """Tracks number of things (e.g. requests, Measure blocks, etc) in flight | |
94 | at any given time. | |
95 | ||
96 | Each InFlightGauge will create a metric called `<name>_total` that counts | |
97 | the number of in flight blocks, as well as a metrics for each item in the | |
98 | given `sub_metrics` as `<name>_<sub_metric>` which will get updated by the | |
99 | callbacks. | |
100 | ||
101 | Args: | |
102 | name (str) | |
103 | desc (str) | |
104 | labels (list[str]) | |
105 | sub_metrics (list[str]): A list of sub metrics that the callbacks | |
106 | will update. | |
107 | """ | |
108 | ||
109 | def __init__(self, name, desc, labels, sub_metrics): | |
110 | self.name = name | |
111 | self.desc = desc | |
112 | self.labels = labels | |
113 | self.sub_metrics = sub_metrics | |
114 | ||
115 | # Create a class which have the sub_metrics values as attributes, which | |
116 | # default to 0 on initialization. Used to pass to registered callbacks. | |
117 | self._metrics_class = attr.make_class( | |
118 | "_MetricsEntry", | |
119 | attrs={x: attr.ib(0) for x in sub_metrics}, | |
120 | slots=True, | |
121 | ) | |
122 | ||
123 | # Counts number of in flight blocks for a given set of label values | |
124 | self._registrations = {} | |
125 | ||
126 | # Protects access to _registrations | |
127 | self._lock = threading.Lock() | |
128 | ||
129 | self._register_with_collector() | |
130 | ||
131 | def register(self, key, callback): | |
132 | """Registers that we've entered a new block with labels `key`. | |
133 | ||
134 | `callback` gets called each time the metrics are collected. The same | |
135 | value must also be given to `unregister`. | |
136 | ||
137 | `callback` gets called with an object that has an attribute per | |
138 | sub_metric, which should be updated with the necessary values. Note that | |
139 | the metrics object is shared between all callbacks registered with the | |
140 | same key. | |
141 | ||
142 | Note that `callback` may be called on a separate thread. | |
143 | """ | |
144 | with self._lock: | |
145 | self._registrations.setdefault(key, set()).add(callback) | |
146 | ||
147 | def unregister(self, key, callback): | |
148 | """Registers that we've exited a block with labels `key`. | |
149 | """ | |
150 | ||
151 | with self._lock: | |
152 | self._registrations.setdefault(key, set()).discard(callback) | |
153 | ||
154 | def collect(self): | |
155 | """Called by prometheus client when it reads metrics. | |
156 | ||
157 | Note: may be called by a separate thread. | |
158 | """ | |
159 | in_flight = GaugeMetricFamily(self.name + "_total", self.desc, labels=self.labels) | |
160 | ||
161 | metrics_by_key = {} | |
162 | ||
163 | # We copy so that we don't mutate the list while iterating | |
164 | with self._lock: | |
165 | keys = list(self._registrations) | |
166 | ||
167 | for key in keys: | |
168 | with self._lock: | |
169 | callbacks = set(self._registrations[key]) | |
170 | ||
171 | in_flight.add_metric(key, len(callbacks)) | |
172 | ||
173 | metrics = self._metrics_class() | |
174 | metrics_by_key[key] = metrics | |
175 | for callback in callbacks: | |
176 | callback(metrics) | |
177 | ||
178 | yield in_flight | |
179 | ||
180 | for name in self.sub_metrics: | |
181 | gauge = GaugeMetricFamily("_".join([self.name, name]), "", labels=self.labels) | |
182 | for key, metrics in six.iteritems(metrics_by_key): | |
183 | gauge.add_metric(key, getattr(metrics, name)) | |
184 | yield gauge | |
185 | ||
186 | def _register_with_collector(self): | |
81 | 187 | if self.name in all_gauges.keys(): |
82 | 188 | logger.warning("%s already registered, reregistering" % (self.name,)) |
83 | 189 | REGISTRY.unregister(all_gauges.pop(self.name)) |
14 | 14 | # limitations under the License. |
15 | 15 | import logging |
16 | 16 | |
17 | import six | |
18 | ||
17 | 19 | from prometheus_client import Counter |
18 | 20 | |
19 | 21 | from twisted.internet import defer |
24 | 26 | from synapse.util.metrics import Measure |
25 | 27 | |
26 | 28 | from . import push_rule_evaluator, push_tools |
29 | ||
30 | if six.PY3: | |
31 | long = int | |
27 | 32 | |
28 | 33 | logger = logging.getLogger(__name__) |
29 | 34 | |
95 | 100 | |
96 | 101 | @defer.inlineCallbacks |
97 | 102 | def on_new_notifications(self, min_stream_ordering, max_stream_ordering): |
98 | self.max_stream_ordering = max(max_stream_ordering, self.max_stream_ordering) | |
103 | self.max_stream_ordering = max(max_stream_ordering, self.max_stream_ordering or 0) | |
99 | 104 | yield self._process() |
100 | 105 | |
101 | 106 | @defer.inlineCallbacks |
16 | 16 | import email.utils |
17 | 17 | import logging |
18 | 18 | import time |
19 | import urllib | |
20 | 19 | from email.mime.multipart import MIMEMultipart |
21 | 20 | from email.mime.text import MIMEText |
21 | ||
22 | from six.moves import urllib | |
22 | 23 | |
23 | 24 | import bleach |
24 | 25 | import jinja2 |
473 | 474 | # XXX: make r0 once API is stable |
474 | 475 | return "%s_matrix/client/unstable/pushers/remove?%s" % ( |
475 | 476 | self.hs.config.public_baseurl, |
476 | urllib.urlencode(params), | |
477 | urllib.parse.urlencode(params), | |
477 | 478 | ) |
478 | 479 | |
479 | 480 | |
560 | 561 | return "%s_matrix/media/v1/thumbnail/%s?%s%s" % ( |
561 | 562 | config.public_baseurl, |
562 | 563 | serverAndMediaId, |
563 | urllib.urlencode(params), | |
564 | urllib.parse.urlencode(params), | |
564 | 565 | fragment or "", |
565 | 566 | ) |
566 | 567 |
39 | 39 | "pynacl>=1.2.1": ["nacl>=1.2.1", "nacl.bindings"], |
40 | 40 | "service_identity>=1.0.0": ["service_identity>=1.0.0"], |
41 | 41 | "Twisted>=17.1.0": ["twisted>=17.1.0"], |
42 | "treq>=15.1": ["treq>=15.1"], | |
42 | 43 | |
43 | # We use crypto.get_elliptic_curve which is only supported in >=0.15 | |
44 | "pyopenssl>=0.15": ["OpenSSL>=0.15"], | |
44 | # Twisted has required pyopenssl 16.0 since about Twisted 16.6. | |
45 | "pyopenssl>=16.0.0": ["OpenSSL>=16.0.0"], | |
45 | 46 | |
46 | 47 | "pyyaml": ["yaml"], |
47 | 48 | "pyasn1": ["pyasn1"], |
12 | 12 | # See the License for the specific language governing permissions and |
13 | 13 | # limitations under the License. |
14 | 14 | |
15 | import six | |
16 | ||
15 | 17 | from synapse.storage import DataStore |
16 | 18 | from synapse.storage.end_to_end_keys import EndToEndKeyStore |
17 | 19 | from synapse.util.caches.stream_change_cache import StreamChangeCache |
18 | 20 | |
19 | 21 | from ._base import BaseSlavedStore |
20 | 22 | from ._slaved_id_tracker import SlavedIdTracker |
23 | ||
24 | ||
25 | def __func__(inp): | |
26 | if six.PY3: | |
27 | return inp | |
28 | else: | |
29 | return inp.__func__ | |
21 | 30 | |
22 | 31 | |
23 | 32 | class SlavedDeviceStore(BaseSlavedStore): |
37 | 46 | "DeviceListFederationStreamChangeCache", device_list_max, |
38 | 47 | ) |
39 | 48 | |
40 | get_device_stream_token = DataStore.get_device_stream_token.__func__ | |
41 | get_user_whose_devices_changed = DataStore.get_user_whose_devices_changed.__func__ | |
42 | get_devices_by_remote = DataStore.get_devices_by_remote.__func__ | |
43 | _get_devices_by_remote_txn = DataStore._get_devices_by_remote_txn.__func__ | |
44 | _get_e2e_device_keys_txn = DataStore._get_e2e_device_keys_txn.__func__ | |
45 | mark_as_sent_devices_by_remote = DataStore.mark_as_sent_devices_by_remote.__func__ | |
49 | get_device_stream_token = __func__(DataStore.get_device_stream_token) | |
50 | get_user_whose_devices_changed = __func__(DataStore.get_user_whose_devices_changed) | |
51 | get_devices_by_remote = __func__(DataStore.get_devices_by_remote) | |
52 | _get_devices_by_remote_txn = __func__(DataStore._get_devices_by_remote_txn) | |
53 | _get_e2e_device_keys_txn = __func__(DataStore._get_e2e_device_keys_txn) | |
54 | mark_as_sent_devices_by_remote = __func__(DataStore.mark_as_sent_devices_by_remote) | |
46 | 55 | _mark_as_sent_devices_by_remote_txn = ( |
47 | DataStore._mark_as_sent_devices_by_remote_txn.__func__ | |
56 | __func__(DataStore._mark_as_sent_devices_by_remote_txn) | |
48 | 57 | ) |
49 | 58 | count_e2e_one_time_keys = EndToEndKeyStore.__dict__["count_e2e_one_time_keys"] |
50 | 59 |
589 | 589 | pending_commands = LaterGauge( |
590 | 590 | "synapse_replication_tcp_protocol_pending_commands", |
591 | 591 | "", |
592 | ["name", "conn_id"], | |
592 | ["name"], | |
593 | 593 | lambda: { |
594 | (p.name, p.conn_id): len(p.pending_commands) for p in connected_connections | |
594 | (p.name,): len(p.pending_commands) for p in connected_connections | |
595 | 595 | }, |
596 | 596 | ) |
597 | 597 | |
606 | 606 | transport_send_buffer = LaterGauge( |
607 | 607 | "synapse_replication_tcp_protocol_transport_send_buffer", |
608 | 608 | "", |
609 | ["name", "conn_id"], | |
609 | ["name"], | |
610 | 610 | lambda: { |
611 | (p.name, p.conn_id): transport_buffer_size(p) for p in connected_connections | |
611 | (p.name,): transport_buffer_size(p) for p in connected_connections | |
612 | 612 | }, |
613 | 613 | ) |
614 | 614 | |
631 | 631 | tcp_transport_kernel_send_buffer = LaterGauge( |
632 | 632 | "synapse_replication_tcp_protocol_transport_kernel_send_buffer", |
633 | 633 | "", |
634 | ["name", "conn_id"], | |
634 | ["name"], | |
635 | 635 | lambda: { |
636 | (p.name, p.conn_id): transport_kernel_read_buffer_size(p, False) | |
636 | (p.name,): transport_kernel_read_buffer_size(p, False) | |
637 | 637 | for p in connected_connections |
638 | 638 | }, |
639 | 639 | ) |
642 | 642 | tcp_transport_kernel_read_buffer = LaterGauge( |
643 | 643 | "synapse_replication_tcp_protocol_transport_kernel_read_buffer", |
644 | 644 | "", |
645 | ["name", "conn_id"], | |
645 | ["name"], | |
646 | 646 | lambda: { |
647 | (p.name, p.conn_id): transport_kernel_read_buffer_size(p, True) | |
647 | (p.name,): transport_kernel_read_buffer_size(p, True) | |
648 | 648 | for p in connected_connections |
649 | 649 | }, |
650 | 650 | ) |
653 | 653 | tcp_inbound_commands = LaterGauge( |
654 | 654 | "synapse_replication_tcp_protocol_inbound_commands", |
655 | 655 | "", |
656 | ["command", "name", "conn_id"], | |
656 | ["command", "name"], | |
657 | 657 | lambda: { |
658 | (k[0], p.name, p.conn_id): count | |
658 | (k[0], p.name,): count | |
659 | 659 | for p in connected_connections |
660 | 660 | for k, count in iteritems(p.inbound_commands_counter) |
661 | 661 | }, |
664 | 664 | tcp_outbound_commands = LaterGauge( |
665 | 665 | "synapse_replication_tcp_protocol_outbound_commands", |
666 | 666 | "", |
667 | ["command", "name", "conn_id"], | |
667 | ["command", "name"], | |
668 | 668 | lambda: { |
669 | (k[0], p.name, p.conn_id): count | |
669 | (k[0], p.name,): count | |
670 | 670 | for p in connected_connections |
671 | 671 | for k, count in iteritems(p.outbound_commands_counter) |
672 | 672 | }, |
195 | 195 | ) |
196 | 196 | |
197 | 197 | if len(rows) >= MAX_EVENTS_BEHIND: |
198 | raise Exception("stream %s has fallen behined" % (self.NAME)) | |
198 | raise Exception("stream %s has fallen behind" % (self.NAME)) | |
199 | 199 | else: |
200 | 200 | rows = yield self.update_function( |
201 | 201 | from_token, current_token, |
100 | 100 | |
101 | 101 | nonce = self.hs.get_secrets().token_hex(64) |
102 | 102 | self.nonces[nonce] = int(self.reactor.seconds()) |
103 | return (200, {"nonce": nonce.encode('ascii')}) | |
103 | return (200, {"nonce": nonce}) | |
104 | 104 | |
105 | 105 | @defer.inlineCallbacks |
106 | 106 | def on_POST(self, request): |
163 | 163 | key=self.hs.config.registration_shared_secret.encode(), |
164 | 164 | digestmod=hashlib.sha1, |
165 | 165 | ) |
166 | want_mac.update(nonce) | |
166 | want_mac.update(nonce.encode('utf8')) | |
167 | 167 | want_mac.update(b"\x00") |
168 | 168 | want_mac.update(username) |
169 | 169 | want_mac.update(b"\x00") |
172 | 172 | want_mac.update(b"admin" if admin else b"notadmin") |
173 | 173 | want_mac = want_mac.hexdigest() |
174 | 174 | |
175 | if not hmac.compare_digest(want_mac, got_mac.encode('ascii')): | |
175 | if not hmac.compare_digest( | |
176 | want_mac.encode('ascii'), | |
177 | got_mac.encode('ascii') | |
178 | ): | |
176 | 179 | raise SynapseError(403, "HMAC incorrect") |
177 | 180 | |
178 | 181 | # Reuse the parts of RegisterRestServlet to reduce code duplication |
44 | 44 | is_guest = requester.is_guest |
45 | 45 | room_id = None |
46 | 46 | if is_guest: |
47 | if "room_id" not in request.args: | |
47 | if b"room_id" not in request.args: | |
48 | 48 | raise SynapseError(400, "Guest users must specify room_id param") |
49 | if "room_id" in request.args: | |
50 | room_id = request.args["room_id"][0] | |
49 | if b"room_id" in request.args: | |
50 | room_id = request.args[b"room_id"][0].decode('ascii') | |
51 | 51 | |
52 | 52 | pagin_config = PaginationConfig.from_request(request) |
53 | 53 | timeout = EventStreamRestServlet.DEFAULT_LONGPOLL_TIME_MS |
54 | if "timeout" in request.args: | |
54 | if b"timeout" in request.args: | |
55 | 55 | try: |
56 | timeout = int(request.args["timeout"][0]) | |
56 | timeout = int(request.args[b"timeout"][0]) | |
57 | 57 | except ValueError: |
58 | 58 | raise SynapseError(400, "timeout must be in milliseconds.") |
59 | 59 | |
60 | as_client_event = "raw" not in request.args | |
60 | as_client_event = b"raw" not in request.args | |
61 | 61 | |
62 | 62 | chunk = yield self.event_stream_handler.get_stream( |
63 | 63 | requester.user.to_string(), |
31 | 31 | @defer.inlineCallbacks |
32 | 32 | def on_GET(self, request): |
33 | 33 | requester = yield self.auth.get_user_by_req(request) |
34 | as_client_event = "raw" not in request.args | |
34 | as_client_event = b"raw" not in request.args | |
35 | 35 | pagination_config = PaginationConfig.from_request(request) |
36 | 36 | include_archived = parse_boolean(request, "archived", default=False) |
37 | 37 | content = yield self.initial_sync_handler.snapshot_all_rooms( |
13 | 13 | # limitations under the License. |
14 | 14 | |
15 | 15 | import logging |
16 | import urllib | |
17 | 16 | import xml.etree.ElementTree as ET |
18 | 17 | |
19 | from six.moves.urllib import parse as urlparse | |
18 | from six.moves import urllib | |
20 | 19 | |
21 | 20 | from canonicaljson import json |
22 | 21 | from saml2 import BINDING_HTTP_POST, config |
133 | 132 | LoginRestServlet.SAML2_TYPE): |
134 | 133 | relay_state = "" |
135 | 134 | if "relay_state" in login_submission: |
136 | relay_state = "&RelayState=" + urllib.quote( | |
135 | relay_state = "&RelayState=" + urllib.parse.quote( | |
137 | 136 | login_submission["relay_state"]) |
138 | 137 | result = { |
139 | 138 | "uri": "%s%s" % (self.idp_redirect_url, relay_state) |
365 | 364 | (user_id, token) = yield handler.register_saml2(username) |
366 | 365 | # Forward to the RelayState callback along with ava |
367 | 366 | if 'RelayState' in request.args: |
368 | request.redirect(urllib.unquote( | |
367 | request.redirect(urllib.parse.unquote( | |
369 | 368 | request.args['RelayState'][0]) + |
370 | 369 | '?status=authenticated&access_token=' + |
371 | 370 | token + '&user_id=' + user_id + '&ava=' + |
376 | 375 | "user_id": user_id, "token": token, |
377 | 376 | "ava": saml2_auth.ava})) |
378 | 377 | elif 'RelayState' in request.args: |
379 | request.redirect(urllib.unquote( | |
378 | request.redirect(urllib.parse.unquote( | |
380 | 379 | request.args['RelayState'][0]) + |
381 | 380 | '?status=not_authenticated') |
382 | 381 | finish_request(request) |
389 | 388 | |
390 | 389 | def __init__(self, hs): |
391 | 390 | super(CasRedirectServlet, self).__init__(hs) |
392 | self.cas_server_url = hs.config.cas_server_url | |
393 | self.cas_service_url = hs.config.cas_service_url | |
391 | self.cas_server_url = hs.config.cas_server_url.encode('ascii') | |
392 | self.cas_service_url = hs.config.cas_service_url.encode('ascii') | |
394 | 393 | |
395 | 394 | def on_GET(self, request): |
396 | 395 | args = request.args |
397 | if "redirectUrl" not in args: | |
396 | if b"redirectUrl" not in args: | |
398 | 397 | return (400, "Redirect URL not specified for CAS auth") |
399 | client_redirect_url_param = urllib.urlencode({ | |
400 | "redirectUrl": args["redirectUrl"][0] | |
401 | }) | |
402 | hs_redirect_url = self.cas_service_url + "/_matrix/client/api/v1/login/cas/ticket" | |
403 | service_param = urllib.urlencode({ | |
404 | "service": "%s?%s" % (hs_redirect_url, client_redirect_url_param) | |
405 | }) | |
406 | request.redirect("%s/login?%s" % (self.cas_server_url, service_param)) | |
398 | client_redirect_url_param = urllib.parse.urlencode({ | |
399 | b"redirectUrl": args[b"redirectUrl"][0] | |
400 | }).encode('ascii') | |
401 | hs_redirect_url = (self.cas_service_url + | |
402 | b"/_matrix/client/api/v1/login/cas/ticket") | |
403 | service_param = urllib.parse.urlencode({ | |
404 | b"service": b"%s?%s" % (hs_redirect_url, client_redirect_url_param) | |
405 | }).encode('ascii') | |
406 | request.redirect(b"%s/login?%s" % (self.cas_server_url, service_param)) | |
407 | 407 | finish_request(request) |
408 | 408 | |
409 | 409 | |
421 | 421 | |
422 | 422 | @defer.inlineCallbacks |
423 | 423 | def on_GET(self, request): |
424 | client_redirect_url = request.args["redirectUrl"][0] | |
424 | client_redirect_url = request.args[b"redirectUrl"][0] | |
425 | 425 | http_client = self.hs.get_simple_http_client() |
426 | 426 | uri = self.cas_server_url + "/proxyValidate" |
427 | 427 | args = { |
428 | "ticket": request.args["ticket"], | |
428 | "ticket": request.args[b"ticket"][0].decode('ascii'), | |
429 | 429 | "service": self.cas_service_url |
430 | 430 | } |
431 | 431 | try: |
470 | 470 | finish_request(request) |
471 | 471 | |
472 | 472 | def add_login_token_to_redirect_url(self, url, token): |
473 | url_parts = list(urlparse.urlparse(url)) | |
474 | query = dict(urlparse.parse_qsl(url_parts[4])) | |
473 | url_parts = list(urllib.parse.urlparse(url)) | |
474 | query = dict(urllib.parse.parse_qsl(url_parts[4])) | |
475 | 475 | query.update({"loginToken": token}) |
476 | url_parts[4] = urllib.urlencode(query) | |
477 | return urlparse.urlunparse(url_parts) | |
476 | url_parts[4] = urllib.parse.urlencode(query).encode('ascii') | |
477 | return urllib.parse.urlunparse(url_parts) | |
478 | 478 | |
479 | 479 | def parse_cas_response(self, cas_response_body): |
480 | 480 | user = None |
45 | 45 | try: |
46 | 46 | priority_class = _priority_class_from_spec(spec) |
47 | 47 | except InvalidRuleException as e: |
48 | raise SynapseError(400, e.message) | |
48 | raise SynapseError(400, str(e)) | |
49 | 49 | |
50 | 50 | requester = yield self.auth.get_user_by_req(request) |
51 | 51 | |
72 | 72 | content, |
73 | 73 | ) |
74 | 74 | except InvalidRuleException as e: |
75 | raise SynapseError(400, e.message) | |
75 | raise SynapseError(400, str(e)) | |
76 | 76 | |
77 | 77 | before = parse_string(request, "before") |
78 | 78 | if before: |
94 | 94 | ) |
95 | 95 | self.notify_user(user_id) |
96 | 96 | except InconsistentRuleException as e: |
97 | raise SynapseError(400, e.message) | |
97 | raise SynapseError(400, str(e)) | |
98 | 98 | except RuleNotFoundException as e: |
99 | raise SynapseError(400, e.message) | |
99 | raise SynapseError(400, str(e)) | |
100 | 100 | |
101 | 101 | defer.returnValue((200, {})) |
102 | 102 | |
141 | 141 | PushRuleRestServlet.SLIGHTLY_PEDANTIC_TRAILING_SLASH_ERROR |
142 | 142 | ) |
143 | 143 | |
144 | if path[0] == '': | |
144 | if path[0] == b'': | |
145 | 145 | defer.returnValue((200, rules)) |
146 | elif path[0] == 'global': | |
147 | path = path[1:] | |
146 | elif path[0] == b'global': | |
147 | path = [x.decode('ascii') for x in path[1:]] | |
148 | 148 | result = _filter_ruleset_with_path(rules['global'], path) |
149 | 149 | defer.returnValue((200, result)) |
150 | 150 | else: |
191 | 191 | def _rule_spec_from_path(path): |
192 | 192 | if len(path) < 2: |
193 | 193 | raise UnrecognizedRequestError() |
194 | if path[0] != 'pushrules': | |
195 | raise UnrecognizedRequestError() | |
196 | ||
197 | scope = path[1] | |
194 | if path[0] != b'pushrules': | |
195 | raise UnrecognizedRequestError() | |
196 | ||
197 | scope = path[1].decode('ascii') | |
198 | 198 | path = path[2:] |
199 | 199 | if scope != 'global': |
200 | 200 | raise UnrecognizedRequestError() |
202 | 202 | if len(path) == 0: |
203 | 203 | raise UnrecognizedRequestError() |
204 | 204 | |
205 | template = path[0] | |
205 | template = path[0].decode('ascii') | |
206 | 206 | path = path[1:] |
207 | 207 | |
208 | 208 | if len(path) == 0 or len(path[0]) == 0: |
209 | 209 | raise UnrecognizedRequestError() |
210 | 210 | |
211 | rule_id = path[0] | |
211 | rule_id = path[0].decode('ascii') | |
212 | 212 | |
213 | 213 | spec = { |
214 | 214 | 'scope': scope, |
219 | 219 | path = path[1:] |
220 | 220 | |
221 | 221 | if len(path) > 0 and len(path[0]) > 0: |
222 | spec['attr'] = path[0] | |
222 | spec['attr'] = path[0].decode('ascii') | |
223 | 223 | |
224 | 224 | return spec |
225 | 225 |
58 | 58 | ] |
59 | 59 | |
60 | 60 | for p in pushers: |
61 | for k, v in p.items(): | |
61 | for k, v in list(p.items()): | |
62 | 62 | if k not in allowed_keys: |
63 | 63 | del p[k] |
64 | 64 | |
125 | 125 | profile_tag=content.get('profile_tag', ""), |
126 | 126 | ) |
127 | 127 | except PusherConfigException as pce: |
128 | raise SynapseError(400, "Config Error: " + pce.message, | |
128 | raise SynapseError(400, "Config Error: " + str(pce), | |
129 | 129 | errcode=Codes.MISSING_PARAM) |
130 | 130 | |
131 | 131 | self.notifier.on_new_replication_data() |
206 | 206 | "sender": requester.user.to_string(), |
207 | 207 | } |
208 | 208 | |
209 | if 'ts' in request.args and requester.app_service: | |
209 | if b'ts' in request.args and requester.app_service: | |
210 | 210 | event_dict['origin_server_ts'] = parse_integer(request, "ts", 0) |
211 | 211 | |
212 | 212 | event = yield self.event_creation_hander.create_and_send_nonmember_event( |
254 | 254 | if RoomID.is_valid(room_identifier): |
255 | 255 | room_id = room_identifier |
256 | 256 | try: |
257 | remote_room_hosts = request.args["server_name"] | |
257 | remote_room_hosts = [ | |
258 | x.decode('ascii') for x in request.args[b"server_name"] | |
259 | ] | |
258 | 260 | except Exception: |
259 | 261 | remote_room_hosts = None |
260 | 262 | elif RoomAlias.is_valid(room_identifier): |
460 | 462 | pagination_config = PaginationConfig.from_request( |
461 | 463 | request, default_limit=10, |
462 | 464 | ) |
463 | as_client_event = "raw" not in request.args | |
464 | filter_bytes = parse_string(request, "filter") | |
465 | as_client_event = b"raw" not in request.args | |
466 | filter_bytes = parse_string(request, b"filter", encoding=None) | |
465 | 467 | if filter_bytes: |
466 | filter_json = urlparse.unquote(filter_bytes).decode("UTF-8") | |
468 | filter_json = urlparse.unquote(filter_bytes.decode("UTF-8")) | |
467 | 469 | event_filter = Filter(json.loads(filter_json)) |
468 | 470 | else: |
469 | 471 | event_filter = None |
559 | 561 | # picking the API shape for symmetry with /messages |
560 | 562 | filter_bytes = parse_string(request, "filter") |
561 | 563 | if filter_bytes: |
562 | filter_json = urlparse.unquote(filter_bytes).decode("UTF-8") | |
564 | filter_json = urlparse.unquote(filter_bytes) | |
563 | 565 | event_filter = Filter(json.loads(filter_json)) |
564 | 566 | else: |
565 | 567 | event_filter = None |
41 | 41 | expiry = (self.hs.get_clock().time_msec() + userLifetime) / 1000 |
42 | 42 | username = "%d:%s" % (expiry, requester.user.to_string()) |
43 | 43 | |
44 | mac = hmac.new(turnSecret, msg=username, digestmod=hashlib.sha1) | |
44 | mac = hmac.new( | |
45 | turnSecret.encode(), | |
46 | msg=username.encode(), | |
47 | digestmod=hashlib.sha1 | |
48 | ) | |
45 | 49 | # We need to use standard padded base64 encoding here |
46 | 50 | # encode_base64 because we need to add the standard padding to get the |
47 | 51 | # same result as the TURN server. |
52 | 52 | |
53 | 53 | if not check_3pid_allowed(self.hs, "email", body['email']): |
54 | 54 | raise SynapseError( |
55 | 403, "Third party identifier is not allowed", Codes.THREEPID_DENIED, | |
55 | 403, | |
56 | "Your email domain is not authorized on this server", | |
57 | Codes.THREEPID_DENIED, | |
56 | 58 | ) |
57 | 59 | |
58 | 60 | existingUid = yield self.hs.get_datastore().get_user_id_by_threepid( |
88 | 90 | |
89 | 91 | if not check_3pid_allowed(self.hs, "msisdn", msisdn): |
90 | 92 | raise SynapseError( |
91 | 403, "Third party identifier is not allowed", Codes.THREEPID_DENIED, | |
93 | 403, | |
94 | "Account phone numbers are not authorized on this server", | |
95 | Codes.THREEPID_DENIED, | |
92 | 96 | ) |
93 | 97 | |
94 | 98 | existingUid = yield self.datastore.get_user_id_by_threepid( |
240 | 244 | |
241 | 245 | if not check_3pid_allowed(self.hs, "email", body['email']): |
242 | 246 | raise SynapseError( |
243 | 403, "Third party identifier is not allowed", Codes.THREEPID_DENIED, | |
247 | 403, | |
248 | "Your email domain is not authorized on this server", | |
249 | Codes.THREEPID_DENIED, | |
244 | 250 | ) |
245 | 251 | |
246 | 252 | existingUid = yield self.datastore.get_user_id_by_threepid( |
275 | 281 | |
276 | 282 | if not check_3pid_allowed(self.hs, "msisdn", msisdn): |
277 | 283 | raise SynapseError( |
278 | 403, "Third party identifier is not allowed", Codes.THREEPID_DENIED, | |
284 | 403, | |
285 | "Account phone numbers are not authorized on this server", | |
286 | Codes.THREEPID_DENIED, | |
279 | 287 | ) |
280 | 288 | |
281 | 289 | existingUid = yield self.datastore.get_user_id_by_threepid( |
74 | 74 | |
75 | 75 | if not check_3pid_allowed(self.hs, "email", body['email']): |
76 | 76 | raise SynapseError( |
77 | 403, "Third party identifier is not allowed", Codes.THREEPID_DENIED, | |
77 | 403, | |
78 | "Your email domain is not authorized to register on this server", | |
79 | Codes.THREEPID_DENIED, | |
78 | 80 | ) |
79 | 81 | |
80 | 82 | existingUid = yield self.hs.get_datastore().get_user_id_by_threepid( |
114 | 116 | |
115 | 117 | if not check_3pid_allowed(self.hs, "msisdn", msisdn): |
116 | 118 | raise SynapseError( |
117 | 403, "Third party identifier is not allowed", Codes.THREEPID_DENIED, | |
119 | 403, | |
120 | "Phone numbers are not authorized to register on this server", | |
121 | Codes.THREEPID_DENIED, | |
118 | 122 | ) |
119 | 123 | |
120 | 124 | existingUid = yield self.hs.get_datastore().get_user_id_by_threepid( |
372 | 376 | |
373 | 377 | if not check_3pid_allowed(self.hs, medium, address): |
374 | 378 | raise SynapseError( |
375 | 403, "Third party identifier is not allowed", | |
379 | 403, | |
380 | "Third party identifiers (email/phone numbers)" + | |
381 | " are not authorized on this server", | |
376 | 382 | Codes.THREEPID_DENIED, |
377 | 383 | ) |
378 | 384 |
24 | 24 | from synapse.api.filtering import DEFAULT_FILTER_COLLECTION, FilterCollection |
25 | 25 | from synapse.events.utils import ( |
26 | 26 | format_event_for_client_v2_without_room_id, |
27 | format_event_raw, | |
27 | 28 | serialize_event, |
28 | 29 | ) |
29 | 30 | from synapse.handlers.presence import format_user_presence_state |
87 | 88 | |
88 | 89 | @defer.inlineCallbacks |
89 | 90 | def on_GET(self, request): |
90 | if "from" in request.args: | |
91 | if b"from" in request.args: | |
91 | 92 | # /events used to use 'from', but /sync uses 'since'. |
92 | 93 | # Lets be helpful and whine if we see a 'from'. |
93 | 94 | raise SynapseError( |
174 | 175 | |
175 | 176 | @staticmethod |
176 | 177 | def encode_response(time_now, sync_result, access_token_id, filter): |
178 | if filter.event_format == 'client': | |
179 | event_formatter = format_event_for_client_v2_without_room_id | |
180 | elif filter.event_format == 'federation': | |
181 | event_formatter = format_event_raw | |
182 | else: | |
183 | raise Exception("Unknown event format %s" % (filter.event_format, )) | |
184 | ||
177 | 185 | joined = SyncRestServlet.encode_joined( |
178 | sync_result.joined, time_now, access_token_id, filter.event_fields | |
186 | sync_result.joined, time_now, access_token_id, | |
187 | filter.event_fields, | |
188 | event_formatter, | |
179 | 189 | ) |
180 | 190 | |
181 | 191 | invited = SyncRestServlet.encode_invited( |
182 | 192 | sync_result.invited, time_now, access_token_id, |
193 | event_formatter, | |
183 | 194 | ) |
184 | 195 | |
185 | 196 | archived = SyncRestServlet.encode_archived( |
186 | 197 | sync_result.archived, time_now, access_token_id, |
187 | 198 | filter.event_fields, |
199 | event_formatter, | |
188 | 200 | ) |
189 | 201 | |
190 | 202 | return { |
227 | 239 | } |
228 | 240 | |
229 | 241 | @staticmethod |
230 | def encode_joined(rooms, time_now, token_id, event_fields): | |
242 | def encode_joined(rooms, time_now, token_id, event_fields, event_formatter): | |
231 | 243 | """ |
232 | 244 | Encode the joined rooms in a sync result |
233 | 245 | |
239 | 251 | token_id(int): ID of the user's auth token - used for namespacing |
240 | 252 | of transaction IDs |
241 | 253 | event_fields(list<str>): List of event fields to include. If empty, |
242 | all fields will be returned. | |
254 | all fields will be returned. | |
255 | event_formatter (func[dict]): function to convert from federation format | |
256 | to client format | |
243 | 257 | Returns: |
244 | 258 | dict[str, dict[str, object]]: the joined rooms list, in our |
245 | 259 | response format |
247 | 261 | joined = {} |
248 | 262 | for room in rooms: |
249 | 263 | joined[room.room_id] = SyncRestServlet.encode_room( |
250 | room, time_now, token_id, only_fields=event_fields | |
264 | room, time_now, token_id, joined=True, only_fields=event_fields, | |
265 | event_formatter=event_formatter, | |
251 | 266 | ) |
252 | 267 | |
253 | 268 | return joined |
254 | 269 | |
255 | 270 | @staticmethod |
256 | def encode_invited(rooms, time_now, token_id): | |
271 | def encode_invited(rooms, time_now, token_id, event_formatter): | |
257 | 272 | """ |
258 | 273 | Encode the invited rooms in a sync result |
259 | 274 | |
263 | 278 | time_now(int): current time - used as a baseline for age |
264 | 279 | calculations |
265 | 280 | token_id(int): ID of the user's auth token - used for namespacing |
266 | of transaction IDs | |
281 | of transaction IDs | |
282 | event_formatter (func[dict]): function to convert from federation format | |
283 | to client format | |
267 | 284 | |
268 | 285 | Returns: |
269 | 286 | dict[str, dict[str, object]]: the invited rooms list, in our |
273 | 290 | for room in rooms: |
274 | 291 | invite = serialize_event( |
275 | 292 | room.invite, time_now, token_id=token_id, |
276 | event_format=format_event_for_client_v2_without_room_id, | |
293 | event_format=event_formatter, | |
277 | 294 | is_invite=True, |
278 | 295 | ) |
279 | 296 | unsigned = dict(invite.get("unsigned", {})) |
287 | 304 | return invited |
288 | 305 | |
289 | 306 | @staticmethod |
290 | def encode_archived(rooms, time_now, token_id, event_fields): | |
307 | def encode_archived(rooms, time_now, token_id, event_fields, event_formatter): | |
291 | 308 | """ |
292 | 309 | Encode the archived rooms in a sync result |
293 | 310 | |
299 | 316 | token_id(int): ID of the user's auth token - used for namespacing |
300 | 317 | of transaction IDs |
301 | 318 | event_fields(list<str>): List of event fields to include. If empty, |
302 | all fields will be returned. | |
319 | all fields will be returned. | |
320 | event_formatter (func[dict]): function to convert from federation format | |
321 | to client format | |
303 | 322 | Returns: |
304 | 323 | dict[str, dict[str, object]]: The invited rooms list, in our |
305 | 324 | response format |
307 | 326 | joined = {} |
308 | 327 | for room in rooms: |
309 | 328 | joined[room.room_id] = SyncRestServlet.encode_room( |
310 | room, time_now, token_id, joined=False, only_fields=event_fields | |
329 | room, time_now, token_id, joined=False, | |
330 | only_fields=event_fields, | |
331 | event_formatter=event_formatter, | |
311 | 332 | ) |
312 | 333 | |
313 | 334 | return joined |
314 | 335 | |
315 | 336 | @staticmethod |
316 | def encode_room(room, time_now, token_id, joined=True, only_fields=None): | |
337 | def encode_room( | |
338 | room, time_now, token_id, joined, | |
339 | only_fields, event_formatter, | |
340 | ): | |
317 | 341 | """ |
318 | 342 | Args: |
319 | 343 | room (JoinedSyncResult|ArchivedSyncResult): sync result for a |
325 | 349 | joined (bool): True if the user is joined to this room - will mean |
326 | 350 | we handle ephemeral events |
327 | 351 | only_fields(list<str>): Optional. The list of event fields to include. |
352 | event_formatter (func[dict]): function to convert from federation format | |
353 | to client format | |
328 | 354 | Returns: |
329 | 355 | dict[str, object]: the room, encoded in our response format |
330 | 356 | """ |
331 | 357 | def serialize(event): |
332 | # TODO(mjark): Respect formatting requirements in the filter. | |
333 | 358 | return serialize_event( |
334 | 359 | event, time_now, token_id=token_id, |
335 | event_format=format_event_for_client_v2_without_room_id, | |
360 | event_format=event_formatter, | |
336 | 361 | only_event_fields=only_fields, |
337 | 362 | ) |
338 | 363 |
78 | 78 | yield self.auth.get_user_by_req(request, allow_guest=True) |
79 | 79 | |
80 | 80 | fields = request.args |
81 | fields.pop("access_token", None) | |
81 | fields.pop(b"access_token", None) | |
82 | 82 | |
83 | 83 | results = yield self.appservice_handler.query_3pe( |
84 | 84 | ThirdPartyEntityKind.USER, protocol, fields |
101 | 101 | yield self.auth.get_user_by_req(request, allow_guest=True) |
102 | 102 | |
103 | 103 | fields = request.args |
104 | fields.pop("access_token", None) | |
104 | fields.pop(b"access_token", None) | |
105 | 105 | |
106 | 106 | results = yield self.appservice_handler.query_3pe( |
107 | 107 | ThirdPartyEntityKind.LOCATION, protocol, fields |
87 | 87 | ) |
88 | 88 | |
89 | 89 | def getChild(self, name, request): |
90 | if name == '': | |
90 | if name == b'': | |
91 | 91 | return self |
21 | 21 | class KeyApiV2Resource(Resource): |
22 | 22 | def __init__(self, hs): |
23 | 23 | Resource.__init__(self) |
24 | self.putChild("server", LocalKey(hs)) | |
25 | self.putChild("query", RemoteKey(hs)) | |
24 | self.putChild(b"server", LocalKey(hs)) | |
25 | self.putChild(b"query", RemoteKey(hs)) |
102 | 102 | def async_render_GET(self, request): |
103 | 103 | if len(request.postpath) == 1: |
104 | 104 | server, = request.postpath |
105 | query = {server: {}} | |
105 | query = {server.decode('ascii'): {}} | |
106 | 106 | elif len(request.postpath) == 2: |
107 | 107 | server, key_id = request.postpath |
108 | 108 | minimum_valid_until_ts = parse_integer( |
111 | 111 | arguments = {} |
112 | 112 | if minimum_valid_until_ts is not None: |
113 | 113 | arguments["minimum_valid_until_ts"] = minimum_valid_until_ts |
114 | query = {server: {key_id: arguments}} | |
114 | query = {server.decode('ascii'): {key_id.decode('ascii'): arguments}} | |
115 | 115 | else: |
116 | 116 | raise SynapseError( |
117 | 117 | 404, "Not found %r" % request.postpath, Codes.NOT_FOUND |
118 | 118 | ) |
119 | ||
119 | 120 | yield self.query_keys(request, query, query_remote_on_cache_miss=True) |
120 | 121 | |
121 | 122 | def render_POST(self, request): |
134 | 135 | @defer.inlineCallbacks |
135 | 136 | def query_keys(self, request, query, query_remote_on_cache_miss=False): |
136 | 137 | logger.info("Handling query for keys %r", query) |
138 | ||
137 | 139 | store_queries = [] |
138 | 140 | for server_name, key_ids in query.items(): |
139 | 141 | if ( |
55 | 55 | # servers. |
56 | 56 | |
57 | 57 | # TODO: A little crude here, we could do this better. |
58 | filename = request.path.split('/')[-1] | |
58 | filename = request.path.decode('ascii').split('/')[-1] | |
59 | 59 | # be paranoid |
60 | 60 | filename = re.sub("[^0-9A-z.-_]", "", filename) |
61 | 61 | |
77 | 77 | # select private. don't bother setting Expires as all our matrix |
78 | 78 | # clients are smart enough to be happy with Cache-Control (right?) |
79 | 79 | request.setHeader( |
80 | "Cache-Control", "public,max-age=86400,s-maxage=86400" | |
80 | b"Cache-Control", b"public,max-age=86400,s-maxage=86400" | |
81 | 81 | ) |
82 | 82 | |
83 | 83 | d = FileSender().beginFileTransfer(f, request) |
14 | 14 | |
15 | 15 | import logging |
16 | 16 | import os |
17 | import urllib | |
18 | 17 | |
19 | from six.moves.urllib import parse as urlparse | |
18 | from six.moves import urllib | |
20 | 19 | |
21 | 20 | from twisted.internet import defer |
22 | 21 | from twisted.protocols.basic import FileSender |
34 | 33 | # This allows users to append e.g. /test.png to the URL. Useful for |
35 | 34 | # clients that parse the URL to see content type. |
36 | 35 | server_name, media_id = request.postpath[:2] |
36 | ||
37 | if isinstance(server_name, bytes): | |
38 | server_name = server_name.decode('utf-8') | |
39 | media_id = media_id.decode('utf8') | |
40 | ||
37 | 41 | file_name = None |
38 | 42 | if len(request.postpath) > 2: |
39 | 43 | try: |
40 | file_name = urlparse.unquote(request.postpath[-1]).decode("utf-8") | |
44 | file_name = urllib.parse.unquote(request.postpath[-1].decode("utf-8")) | |
41 | 45 | except UnicodeDecodeError: |
42 | 46 | pass |
43 | 47 | return server_name, media_id, file_name |
92 | 96 | file_size (int): Size in bytes of the media, if known. |
93 | 97 | upload_name (str): The name of the requested file, if any. |
94 | 98 | """ |
99 | def _quote(x): | |
100 | return urllib.parse.quote(x.encode("utf-8")) | |
101 | ||
95 | 102 | request.setHeader(b"Content-Type", media_type.encode("UTF-8")) |
96 | 103 | if upload_name: |
97 | 104 | if is_ascii(upload_name): |
98 | request.setHeader( | |
99 | b"Content-Disposition", | |
100 | b"inline; filename=%s" % ( | |
101 | urllib.quote(upload_name.encode("utf-8")), | |
102 | ), | |
103 | ) | |
105 | disposition = ("inline; filename=%s" % (_quote(upload_name),)).encode("ascii") | |
104 | 106 | else: |
105 | request.setHeader( | |
106 | b"Content-Disposition", | |
107 | b"inline; filename*=utf-8''%s" % ( | |
108 | urllib.quote(upload_name.encode("utf-8")), | |
109 | ), | |
110 | ) | |
107 | disposition = ( | |
108 | "inline; filename*=utf-8''%s" % (_quote(upload_name),)).encode("ascii") | |
109 | ||
110 | request.setHeader(b"Content-Disposition", disposition) | |
111 | 111 | |
112 | 112 | # cache for at least a day. |
113 | 113 | # XXX: we might want to turn this off for data we don't want to |
46 | 46 | def _async_render_GET(self, request): |
47 | 47 | set_cors_headers(request) |
48 | 48 | request.setHeader( |
49 | "Content-Security-Policy", | |
50 | "default-src 'none';" | |
51 | " script-src 'none';" | |
52 | " plugin-types application/pdf;" | |
53 | " style-src 'unsafe-inline';" | |
54 | " object-src 'self';" | |
49 | b"Content-Security-Policy", | |
50 | b"default-src 'none';" | |
51 | b" script-src 'none';" | |
52 | b" plugin-types application/pdf;" | |
53 | b" style-src 'unsafe-inline';" | |
54 | b" object-src 'self';" | |
55 | 55 | ) |
56 | 56 | server_name, media_id, name = parse_media_id(request) |
57 | 57 | if server_name == self.server_name: |
19 | 19 | import os |
20 | 20 | import shutil |
21 | 21 | |
22 | from six import iteritems | |
22 | from six import PY3, iteritems | |
23 | 23 | from six.moves.urllib import parse as urlparse |
24 | 24 | |
25 | 25 | import twisted.internet.error |
396 | 396 | |
397 | 397 | yield finish() |
398 | 398 | |
399 | media_type = headers["Content-Type"][0] | |
399 | media_type = headers[b"Content-Type"][0].decode('ascii') | |
400 | 400 | |
401 | 401 | time_now_ms = self.clock.time_msec() |
402 | 402 | |
403 | content_disposition = headers.get("Content-Disposition", None) | |
403 | content_disposition = headers.get(b"Content-Disposition", None) | |
404 | 404 | if content_disposition: |
405 | _, params = cgi.parse_header(content_disposition[0],) | |
405 | _, params = cgi.parse_header(content_disposition[0].decode('ascii'),) | |
406 | 406 | upload_name = None |
407 | 407 | |
408 | 408 | # First check if there is a valid UTF-8 filename |
418 | 418 | upload_name = upload_name_ascii |
419 | 419 | |
420 | 420 | if upload_name: |
421 | upload_name = urlparse.unquote(upload_name) | |
421 | if PY3: | |
422 | upload_name = urlparse.unquote(upload_name) | |
423 | else: | |
424 | upload_name = urlparse.unquote(upload_name.encode('ascii')) | |
422 | 425 | try: |
423 | upload_name = upload_name.decode("utf-8") | |
426 | if isinstance(upload_name, bytes): | |
427 | upload_name = upload_name.decode("utf-8") | |
424 | 428 | except UnicodeDecodeError: |
425 | 429 | upload_name = None |
426 | 430 | else: |
754 | 758 | Resource.__init__(self) |
755 | 759 | |
756 | 760 | media_repo = hs.get_media_repository() |
757 | self.putChild("upload", UploadResource(hs, media_repo)) | |
758 | self.putChild("download", DownloadResource(hs, media_repo)) | |
759 | self.putChild("thumbnail", ThumbnailResource( | |
761 | ||
762 | self.putChild(b"upload", UploadResource(hs, media_repo)) | |
763 | self.putChild(b"download", DownloadResource(hs, media_repo)) | |
764 | self.putChild(b"thumbnail", ThumbnailResource( | |
760 | 765 | hs, media_repo, media_repo.media_storage, |
761 | 766 | )) |
762 | self.putChild("identicon", IdenticonResource()) | |
767 | self.putChild(b"identicon", IdenticonResource()) | |
763 | 768 | if hs.config.url_preview_enabled: |
764 | self.putChild("preview_url", PreviewUrlResource( | |
769 | self.putChild(b"preview_url", PreviewUrlResource( | |
765 | 770 | hs, media_repo, media_repo.media_storage, |
766 | 771 | )) |
767 | self.putChild("config", MediaConfigResource(hs)) | |
772 | self.putChild(b"config", MediaConfigResource(hs)) |
260 | 260 | |
261 | 261 | logger.debug("Calculated OG for %s as %s" % (url, og)) |
262 | 262 | |
263 | jsonog = json.dumps(og) | |
263 | jsonog = json.dumps(og).encode('utf8') | |
264 | 264 | |
265 | 265 | # store OG in history-aware DB cache |
266 | 266 | yield self.store.store_url_cache( |
300 | 300 | logger.warn("Error downloading %s: %r", url, e) |
301 | 301 | raise SynapseError( |
302 | 302 | 500, "Failed to download content: %s" % ( |
303 | traceback.format_exception_only(sys.exc_type, e), | |
303 | traceback.format_exception_only(sys.exc_info()[0], e), | |
304 | 304 | ), |
305 | 305 | Codes.UNKNOWN, |
306 | 306 | ) |
307 | 307 | yield finish() |
308 | 308 | |
309 | 309 | try: |
310 | if "Content-Type" in headers: | |
311 | media_type = headers["Content-Type"][0] | |
310 | if b"Content-Type" in headers: | |
311 | media_type = headers[b"Content-Type"][0].decode('ascii') | |
312 | 312 | else: |
313 | 313 | media_type = "application/octet-stream" |
314 | 314 | time_now_ms = self.clock.time_msec() |
315 | 315 | |
316 | content_disposition = headers.get("Content-Disposition", None) | |
316 | content_disposition = headers.get(b"Content-Disposition", None) | |
317 | 317 | if content_disposition: |
318 | 318 | _, params = cgi.parse_header(content_disposition[0],) |
319 | 319 | download_name = None |
929 | 929 | ) |
930 | 930 | |
931 | 931 | self._invalidate_cache_and_stream( |
932 | txn, self.get_room_summary, (room_id,) | |
933 | ) | |
934 | ||
935 | self._invalidate_cache_and_stream( | |
932 | 936 | txn, self.get_current_state_ids, (room_id,) |
933 | 937 | ) |
934 | 938 | |
1885 | 1889 | ")" |
1886 | 1890 | ) |
1887 | 1891 | |
1888 | # create an index on should_delete because later we'll be looking for | |
1889 | # the should_delete / shouldn't_delete subsets | |
1890 | txn.execute( | |
1891 | "CREATE INDEX events_to_purge_should_delete" | |
1892 | " ON events_to_purge(should_delete)", | |
1893 | ) | |
1894 | ||
1895 | # We do joins against events_to_purge for e.g. calculating state | |
1896 | # groups to purge, etc., so lets make an index. | |
1897 | txn.execute( | |
1898 | "CREATE INDEX events_to_purge_id" | |
1899 | " ON events_to_purge(event_id)", | |
1900 | ) | |
1901 | ||
1902 | 1892 | # First ensure that we're not about to delete all the forward extremeties |
1903 | 1893 | txn.execute( |
1904 | 1894 | "SELECT e.event_id, e.depth FROM events as e " |
1925 | 1915 | should_delete_params = () |
1926 | 1916 | if not delete_local_events: |
1927 | 1917 | should_delete_expr += " AND event_id NOT LIKE ?" |
1928 | should_delete_params += ("%:" + self.hs.hostname, ) | |
1918 | ||
1919 | # We include the parameter twice since we use the expression twice | |
1920 | should_delete_params += ( | |
1921 | "%:" + self.hs.hostname, | |
1922 | "%:" + self.hs.hostname, | |
1923 | ) | |
1929 | 1924 | |
1930 | 1925 | should_delete_params += (room_id, token.topological) |
1931 | 1926 | |
1927 | # Note that we insert events that are outliers and aren't going to be | |
1928 | # deleted, as nothing will happen to them. | |
1932 | 1929 | txn.execute( |
1933 | 1930 | "INSERT INTO events_to_purge" |
1934 | 1931 | " SELECT event_id, %s" |
1935 | 1932 | " FROM events AS e LEFT JOIN state_events USING (event_id)" |
1936 | " WHERE e.room_id = ? AND topological_ordering < ?" % ( | |
1933 | " WHERE (NOT outlier OR (%s)) AND e.room_id = ? AND topological_ordering < ?" | |
1934 | % ( | |
1935 | should_delete_expr, | |
1937 | 1936 | should_delete_expr, |
1938 | 1937 | ), |
1939 | 1938 | should_delete_params, |
1940 | 1939 | ) |
1940 | ||
1941 | # We create the indices *after* insertion as that's a lot faster. | |
1942 | ||
1943 | # create an index on should_delete because later we'll be looking for | |
1944 | # the should_delete / shouldn't_delete subsets | |
1945 | txn.execute( | |
1946 | "CREATE INDEX events_to_purge_should_delete" | |
1947 | " ON events_to_purge(should_delete)", | |
1948 | ) | |
1949 | ||
1950 | # We do joins against events_to_purge for e.g. calculating state | |
1951 | # groups to purge, etc., so lets make an index. | |
1952 | txn.execute( | |
1953 | "CREATE INDEX events_to_purge_id" | |
1954 | " ON events_to_purge(event_id)", | |
1955 | ) | |
1956 | ||
1941 | 1957 | txn.execute( |
1942 | 1958 | "SELECT event_id, should_delete FROM events_to_purge" |
1943 | 1959 | ) |
133 | 133 | """ |
134 | 134 | key_id = "%s:%s" % (verify_key.alg, verify_key.version) |
135 | 135 | |
136 | # XXX fix this to not need a lock (#3819) | |
136 | 137 | def _txn(txn): |
137 | 138 | self._simple_upsert_txn( |
138 | 139 | txn, |
146 | 146 | return self.runInteraction("count_users", _count_users) |
147 | 147 | |
148 | 148 | @defer.inlineCallbacks |
149 | def get_registered_reserved_users_count(self): | |
150 | """Of the reserved threepids defined in config, how many are associated | |
151 | with registered users? | |
152 | ||
153 | Returns: | |
154 | Defered[int]: Number of real reserved users | |
155 | """ | |
156 | count = 0 | |
157 | for tp in self.hs.config.mau_limits_reserved_threepids: | |
158 | user_id = yield self.hs.get_datastore().get_user_id_by_threepid( | |
159 | tp["medium"], tp["address"] | |
160 | ) | |
161 | if user_id: | |
162 | count = count + 1 | |
163 | defer.returnValue(count) | |
164 | ||
165 | @defer.inlineCallbacks | |
149 | 166 | def upsert_monthly_active_user(self, user_id): |
150 | 167 | """ |
151 | 168 | Updates or inserts monthly active user member |
198 | 215 | Args: |
199 | 216 | user_id(str): the user_id to query |
200 | 217 | """ |
218 | ||
201 | 219 | if self.hs.config.limit_usage_by_mau: |
220 | # Trial users and guests should not be included as part of MAU group | |
221 | is_guest = yield self.is_guest(user_id) | |
222 | if is_guest: | |
223 | return | |
202 | 224 | is_trial = yield self.is_trial_user(user_id) |
203 | 225 | if is_trial: |
204 | # we don't track trial users in the MAU table. | |
205 | 226 | return |
206 | 227 | |
207 | 228 | last_seen_timestamp = yield self.user_last_seen_monthly_active(user_id) |
50 | 50 | "ProfileInfo", ("avatar_url", "display_name") |
51 | 51 | ) |
52 | 52 | |
53 | # "members" points to a truncated list of (user_id, event_id) tuples for users of | |
54 | # a given membership type, suitable for use in calculating heroes for a room. | |
55 | # "count" points to the total numberr of users of a given membership type. | |
56 | MemberSummary = namedtuple( | |
57 | "MemberSummary", ("members", "count") | |
58 | ) | |
53 | 59 | |
54 | 60 | _MEMBERSHIP_PROFILE_UPDATE_NAME = "room_membership_profile_update" |
55 | 61 | |
80 | 86 | txn.execute(sql, (room_id, Membership.JOIN,)) |
81 | 87 | return [to_ascii(r[0]) for r in txn] |
82 | 88 | return self.runInteraction("get_users_in_room", f) |
89 | ||
90 | @cached(max_entries=100000) | |
91 | def get_room_summary(self, room_id): | |
92 | """ Get the details of a room roughly suitable for use by the room | |
93 | summary extension to /sync. Useful when lazy loading room members. | |
94 | Args: | |
95 | room_id (str): The room ID to query | |
96 | Returns: | |
97 | Deferred[dict[str, MemberSummary]: | |
98 | dict of membership states, pointing to a MemberSummary named tuple. | |
99 | """ | |
100 | ||
101 | def _get_room_summary_txn(txn): | |
102 | # first get counts. | |
103 | # We do this all in one transaction to keep the cache small. | |
104 | # FIXME: get rid of this when we have room_stats | |
105 | sql = """ | |
106 | SELECT count(*), m.membership FROM room_memberships as m | |
107 | INNER JOIN current_state_events as c | |
108 | ON m.event_id = c.event_id | |
109 | AND m.room_id = c.room_id | |
110 | AND m.user_id = c.state_key | |
111 | WHERE c.type = 'm.room.member' AND c.room_id = ? | |
112 | GROUP BY m.membership | |
113 | """ | |
114 | ||
115 | txn.execute(sql, (room_id,)) | |
116 | res = {} | |
117 | for count, membership in txn: | |
118 | summary = res.setdefault(to_ascii(membership), MemberSummary([], count)) | |
119 | ||
120 | # we order by membership and then fairly arbitrarily by event_id so | |
121 | # heroes are consistent | |
122 | sql = """ | |
123 | SELECT m.user_id, m.membership, m.event_id | |
124 | FROM room_memberships as m | |
125 | INNER JOIN current_state_events as c | |
126 | ON m.event_id = c.event_id | |
127 | AND m.room_id = c.room_id | |
128 | AND m.user_id = c.state_key | |
129 | WHERE c.type = 'm.room.member' AND c.room_id = ? | |
130 | ORDER BY | |
131 | CASE m.membership WHEN ? THEN 1 WHEN ? THEN 2 ELSE 3 END ASC, | |
132 | m.event_id ASC | |
133 | LIMIT ? | |
134 | """ | |
135 | ||
136 | # 6 is 5 (number of heroes) plus 1, in case one of them is the calling user. | |
137 | txn.execute(sql, (room_id, Membership.JOIN, Membership.INVITE, 6)) | |
138 | for user_id, membership, event_id in txn: | |
139 | summary = res[to_ascii(membership)] | |
140 | # we will always have a summary for this membership type at this | |
141 | # point given the summary currently contains the counts. | |
142 | members = summary.members | |
143 | members.append((to_ascii(user_id), to_ascii(event_id))) | |
144 | ||
145 | return res | |
146 | ||
147 | return self.runInteraction("get_room_summary", _get_room_summary_txn) | |
83 | 148 | |
84 | 149 | @cached() |
85 | 150 | def get_invited_rooms_for_user(self, user_id): |
437 | 437 | value.trap(CancelledError) |
438 | 438 | raise DeferredTimeoutError(timeout, "Deferred") |
439 | 439 | return value |
440 | ||
441 | ||
442 | def timeout_no_seriously(deferred, timeout, reactor): | |
443 | """The in build twisted deferred addTimeout (and the method above) | |
444 | completely fail to time things out under some unknown circumstances. | |
445 | ||
446 | Lets try a different way of timing things out and maybe that will make | |
447 | things work?! | |
448 | ||
449 | TODO: Kill this with fire. | |
450 | """ | |
451 | ||
452 | new_d = defer.Deferred() | |
453 | ||
454 | timed_out = [False] | |
455 | ||
456 | def time_it_out(): | |
457 | timed_out[0] = True | |
458 | ||
459 | if not new_d.called: | |
460 | new_d.errback(DeferredTimeoutError(timeout, "Deferred")) | |
461 | ||
462 | deferred.cancel() | |
463 | ||
464 | delayed_call = reactor.callLater(timeout, time_it_out) | |
465 | ||
466 | def convert_cancelled(value): | |
467 | if timed_out[0]: | |
468 | return _cancelled_to_timed_out_error(value, timeout) | |
469 | return value | |
470 | ||
471 | deferred.addBoth(convert_cancelled) | |
472 | ||
473 | def cancel_timeout(result): | |
474 | # stop the pending call to cancel the deferred if it's been fired | |
475 | if delayed_call.active(): | |
476 | delayed_call.cancel() | |
477 | return result | |
478 | ||
479 | deferred.addBoth(cancel_timeout) | |
480 | ||
481 | def success_cb(val): | |
482 | if not new_d.called: | |
483 | new_d.callback(val) | |
484 | ||
485 | def failure_cb(val): | |
486 | if not new_d.called: | |
487 | new_d.errback(val) | |
488 | ||
489 | deferred.addCallbacks(success_cb, failure_cb) | |
490 | ||
491 | return new_d |
18 | 18 | from twisted.cred import checkers, portal |
19 | 19 | |
20 | 20 | PUBLIC_KEY = ( |
21 | "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEArzJx8OYOnJmzf4tfBEvLi8DVPrJ3/c9k2I/Az" | |
22 | "64fxjHf9imyRJbixtQhlH9lfNjUIx+4LmrJH5QNRsFporcHDKOTwTTYLh5KmRpslkYHRivcJS" | |
23 | "kbh/C+BR3utDS555mV" | |
21 | "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDHhGATaW4KhE23+7nrH4jFx3yLq9OjaEs5" | |
22 | "XALqeK+7385NlLja3DE/DO9mGhnd9+bAy39EKT3sTV6+WXQ4yD0TvEEyUEMtjWkSEm6U32+C" | |
23 | "DaS3TW/vPBUMeJQwq+Ydcif1UlnpXrDDTamD0AU9VaEvHq+3HAkipqn0TGpKON6aqk4vauDx" | |
24 | "oXSsV5TXBVrxP/y7HpMOpU4GUWsaaacBTKKNnUaQB4UflvydaPJUuwdaCUJGTMjbhWrjVfK+" | |
25 | "jslseSPxU6XvrkZMyCr4znxvuDxjMk1RGIdO7v+rbBMLEgqtSMNqJbYeVCnj2CFgc3fcTcld" | |
26 | "X2uOJDrJb/WRlHulthCh" | |
24 | 27 | ) |
25 | 28 | |
26 | 29 | PRIVATE_KEY = """-----BEGIN RSA PRIVATE KEY----- |
27 | MIIByAIBAAJhAK8ycfDmDpyZs3+LXwRLy4vA1T6yd/3PZNiPwM+uH8Yx3/YpskSW | |
28 | 4sbUIZR/ZXzY1CMfuC5qyR+UDUbBaaK3Bwyjk8E02C4eSpkabJZGB0Yr3CUpG4fw | |
29 | vgUd7rQ0ueeZlQIBIwJgbh+1VZfr7WftK5lu7MHtqE1S1vPWZQYE3+VUn8yJADyb | |
30 | Z4fsZaCrzW9lkIqXkE3GIY+ojdhZhkO1gbG0118sIgphwSWKRxK0mvh6ERxKqIt1 | |
31 | xJEJO74EykXZV4oNJ8sjAjEA3J9r2ZghVhGN6V8DnQrTk24Td0E8hU8AcP0FVP+8 | |
32 | PQm/g/aXf2QQkQT+omdHVEJrAjEAy0pL0EBH6EVS98evDCBtQw22OZT52qXlAwZ2 | |
33 | gyTriKFVoqjeEjt3SZKKqXHSApP/AjBLpF99zcJJZRq2abgYlf9lv1chkrWqDHUu | |
34 | DZttmYJeEfiFBBavVYIF1dOlZT0G8jMCMBc7sOSZodFnAiryP+Qg9otSBjJ3bQML | |
35 | pSTqy7c3a2AScC/YyOwkDaICHnnD3XyjMwIxALRzl0tQEKMXs6hH8ToUdlLROCrP | |
36 | EhQ0wahUTCk1gKA4uPD6TMTChavbh4K63OvbKg== | |
30 | MIIEpQIBAAKCAQEAx4RgE2luCoRNt/u56x+Ixcd8i6vTo2hLOVwC6nivu9/OTZS4 | |
31 | 2twxPwzvZhoZ3ffmwMt/RCk97E1evll0OMg9E7xBMlBDLY1pEhJulN9vgg2kt01v | |
32 | 7zwVDHiUMKvmHXIn9VJZ6V6ww02pg9AFPVWhLx6vtxwJIqap9ExqSjjemqpOL2rg | |
33 | 8aF0rFeU1wVa8T/8ux6TDqVOBlFrGmmnAUyijZ1GkAeFH5b8nWjyVLsHWglCRkzI | |
34 | 24Vq41Xyvo7JbHkj8VOl765GTMgq+M58b7g8YzJNURiHTu7/q2wTCxIKrUjDaiW2 | |
35 | HlQp49ghYHN33E3JXV9rjiQ6yW/1kZR7pbYQoQIDAQABAoIBAQC8KJ0q8Wzzwh5B | |
36 | esa1dQHZ8+4DEsL/Amae66VcVwD0X3cCN1W2IZ7X5W0Ij2kBqr8V51RYhcR+S+Ek | |
37 | BtzSiBUBvbKGrqcMGKaUgomDIMzai99hd0gvCCyZnEW1OQhFkNkaRNXCfqiZJ27M | |
38 | fqvSUiU2eOwh9fCvmxoA6Of8o3FbzcJ+1GMcobWRllDtLmj6lgVbDzuA+0jC5daB | |
39 | 9Tj1pBzu3wn3ufxiS+gBnJ+7NcXH3E73lqCcPa2ufbZ1haxfiGCnRIhFXuQDgxFX | |
40 | vKdEfDgtvas6r1ahGbc+b/q8E8fZT7cABuIU4yfOORK+MhpyWbvoyyzuVGKj3PKt | |
41 | KSPJu5CZAoGBAOkoJfAVyYteqKcmGTanGqQnAY43CaYf6GdSPX/jg+JmKZg0zqMC | |
42 | jWZUtPb93i+jnOInbrnuHOiHAxI8wmhEPed28H2lC/LU8PzlqFkZXKFZ4vLOhhRB | |
43 | /HeHCFIDosPFlohWi3b+GAjD7sXgnIuGmnXWe2ea/TS3yersifDEoKKjAoGBANsQ | |
44 | gJX2cJv1c3jhdgcs8vAt5zIOKcCLTOr/QPmVf/kxjNgndswcKHwsxE/voTO9q+TF | |
45 | v/6yCSTxAdjuKz1oIYWgi/dZo82bBKWxNRpgrGviU3/zwxiHlyIXUhzQu78q3VS/ | |
46 | 7S1XVbc7qMV++XkYKHPVD+nVG/gGzFxumX7MLXfrAoGBAJit9cn2OnjNj9uFE1W6 | |
47 | r7N254ndeLAUjPe73xH0RtTm2a4WRopwjW/JYIetTuYbWgyujc+robqTTuuOZjAp | |
48 | H/CG7o0Ym251CypQqaFO/l2aowclPp/dZhpPjp9GSjuxFBZLtiBB3DNBOwbRQzIK | |
49 | /vLTdRQvZkgzYkI4i0vjNt3JAoGBANP8HSKBLymMlShlrSx2b8TB9tc2Y2riohVJ | |
50 | 2ttqs0M2kt/dGJWdrgOz4mikL+983Olt/0P9juHDoxEEMK2kpcPEv40lnmBpYU7h | |
51 | s8yJvnBLvJe2EJYdJ8AipyAhUX1FgpbvfxmASP8eaUxsegeXvBWTGWojAoS6N2o+ | |
52 | 0KSl+l3vAoGAFqm0gO9f/Q1Se60YQd4l2PZeMnJFv0slpgHHUwegmd6wJhOD7zJ1 | |
53 | CkZcXwiv7Nog7AI9qKJEUXLjoqL+vJskBzSOqU3tcd670YQMi1aXSXJqYE202K7o | |
54 | EddTrx3TNpr1D5m/f+6mnXWrc8u9y1+GNx9yz889xMjIBTBI9KqaaOs= | |
37 | 55 | -----END RSA PRIVATE KEY-----""" |
38 | 56 | |
39 | 57 |
19 | 19 | |
20 | 20 | from twisted.internet import defer |
21 | 21 | |
22 | from synapse.metrics import InFlightGauge | |
22 | 23 | from synapse.util.logcontext import LoggingContext |
23 | 24 | |
24 | 25 | logger = logging.getLogger(__name__) |
43 | 44 | # seconds spent waiting for a db connection, in this block |
44 | 45 | block_db_sched_duration = Counter( |
45 | 46 | "synapse_util_metrics_block_db_sched_duration_seconds", "", ["block_name"]) |
47 | ||
48 | # Tracks the number of blocks currently active | |
49 | in_flight = InFlightGauge( | |
50 | "synapse_util_metrics_block_in_flight", "", | |
51 | labels=["block_name"], | |
52 | sub_metrics=["real_time_max", "real_time_sum"], | |
53 | ) | |
46 | 54 | |
47 | 55 | |
48 | 56 | def measure_func(name): |
81 | 89 | |
82 | 90 | self.start_usage = self.start_context.get_resource_usage() |
83 | 91 | |
92 | in_flight.register((self.name,), self._update_in_flight) | |
93 | ||
84 | 94 | def __exit__(self, exc_type, exc_val, exc_tb): |
85 | 95 | if isinstance(exc_type, Exception) or not self.start_context: |
86 | 96 | return |
97 | ||
98 | in_flight.unregister((self.name,), self._update_in_flight) | |
87 | 99 | |
88 | 100 | duration = self.clock.time() - self.start |
89 | 101 | |
119 | 131 | |
120 | 132 | if self.created_context: |
121 | 133 | self.start_context.__exit__(exc_type, exc_val, exc_tb) |
134 | ||
135 | def _update_in_flight(self, metrics): | |
136 | """Gets called when processing in flight metrics | |
137 | """ | |
138 | duration = self.clock.time() - self.start | |
139 | ||
140 | metrics.real_time_max = max(metrics.real_time_max, duration) | |
141 | metrics.real_time_sum += duration | |
142 | ||
143 | # TODO: Add other in flight metrics. |
470 | 470 | def test_reserved_threepid(self): |
471 | 471 | self.hs.config.limit_usage_by_mau = True |
472 | 472 | self.hs.config.max_mau_value = 1 |
473 | self.store.get_monthly_active_count = lambda: defer.succeed(2) | |
473 | 474 | threepid = {'medium': 'email', 'address': 'reserved@server.com'} |
474 | 475 | unknown_threepid = {'medium': 'email', 'address': 'unreserved@server.com'} |
475 | 476 | self.hs.config.mau_limits_reserved_threepids = [threepid] |
46 | 46 | self.assertEqual(len(self.reactor.tcpServers), 1) |
47 | 47 | site = self.reactor.tcpServers[0][1] |
48 | 48 | self.resource = ( |
49 | site.resource.children["_matrix"].children["client"].children["r0"] | |
49 | site.resource.children[b"_matrix"].children[b"client"].children[b"r0"] | |
50 | 50 | ) |
51 | 51 | |
52 | 52 | request, channel = self.make_request("PUT", "presence/a/status") |
76 | 76 | self.assertEqual(len(self.reactor.tcpServers), 1) |
77 | 77 | site = self.reactor.tcpServers[0][1] |
78 | 78 | self.resource = ( |
79 | site.resource.children["_matrix"].children["client"].children["r0"] | |
79 | site.resource.children[b"_matrix"].children[b"client"].children[b"r0"] | |
80 | 80 | ) |
81 | 81 | |
82 | 82 | request, channel = self.make_request("PUT", "presence/a/status") |
42 | 42 | |
43 | 43 | |
44 | 44 | def _make_edu_transaction_json(edu_type, content): |
45 | return json.dumps(_expect_edu_transaction(edu_type, content)).encode( | |
46 | 'utf8' | |
47 | ) | |
45 | return json.dumps(_expect_edu_transaction(edu_type, content)).encode('utf8') | |
48 | 46 | |
49 | 47 | |
50 | 48 | class TypingNotificationsTestCase(unittest.TestCase): |
0 | # -*- coding: utf-8 -*- | |
1 | # Copyright 2018 New Vector Ltd | |
2 | # | |
3 | # Licensed under the Apache License, Version 2.0 (the "License"); | |
4 | # you may not use this file except in compliance with the License. | |
5 | # You may obtain a copy of the License at | |
6 | # | |
7 | # http://www.apache.org/licenses/LICENSE-2.0 | |
8 | # | |
9 | # Unless required by applicable law or agreed to in writing, software | |
10 | # distributed under the License is distributed on an "AS IS" BASIS, | |
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
12 | # See the License for the specific language governing permissions and | |
13 | # limitations under the License. | |
14 | ||
15 | from mock import Mock | |
16 | ||
17 | from twisted.internet.defer import TimeoutError | |
18 | from twisted.internet.error import ConnectingCancelledError, DNSLookupError | |
19 | from twisted.web.client import ResponseNeverReceived | |
20 | ||
21 | from synapse.http.matrixfederationclient import MatrixFederationHttpClient | |
22 | ||
23 | from tests.unittest import HomeserverTestCase | |
24 | ||
25 | ||
26 | class FederationClientTests(HomeserverTestCase): | |
27 | def make_homeserver(self, reactor, clock): | |
28 | ||
29 | hs = self.setup_test_homeserver(reactor=reactor, clock=clock) | |
30 | hs.tls_client_options_factory = None | |
31 | return hs | |
32 | ||
33 | def prepare(self, reactor, clock, homeserver): | |
34 | ||
35 | self.cl = MatrixFederationHttpClient(self.hs) | |
36 | self.reactor.lookups["testserv"] = "1.2.3.4" | |
37 | ||
38 | def test_dns_error(self): | |
39 | """ | |
40 | If the DNS raising returns an error, it will bubble up. | |
41 | """ | |
42 | d = self.cl._request("testserv2:8008", "GET", "foo/bar", timeout=10000) | |
43 | self.pump() | |
44 | ||
45 | f = self.failureResultOf(d) | |
46 | self.assertIsInstance(f.value, DNSLookupError) | |
47 | ||
48 | def test_client_never_connect(self): | |
49 | """ | |
50 | If the HTTP request is not connected and is timed out, it'll give a | |
51 | ConnectingCancelledError. | |
52 | """ | |
53 | d = self.cl._request("testserv:8008", "GET", "foo/bar", timeout=10000) | |
54 | ||
55 | self.pump() | |
56 | ||
57 | # Nothing happened yet | |
58 | self.assertFalse(d.called) | |
59 | ||
60 | # Make sure treq is trying to connect | |
61 | clients = self.reactor.tcpClients | |
62 | self.assertEqual(len(clients), 1) | |
63 | self.assertEqual(clients[0][0], '1.2.3.4') | |
64 | self.assertEqual(clients[0][1], 8008) | |
65 | ||
66 | # Deferred is still without a result | |
67 | self.assertFalse(d.called) | |
68 | ||
69 | # Push by enough to time it out | |
70 | self.reactor.advance(10.5) | |
71 | f = self.failureResultOf(d) | |
72 | ||
73 | self.assertIsInstance(f.value, ConnectingCancelledError) | |
74 | ||
75 | def test_client_connect_no_response(self): | |
76 | """ | |
77 | If the HTTP request is connected, but gets no response before being | |
78 | timed out, it'll give a ResponseNeverReceived. | |
79 | """ | |
80 | d = self.cl._request("testserv:8008", "GET", "foo/bar", timeout=10000) | |
81 | ||
82 | self.pump() | |
83 | ||
84 | # Nothing happened yet | |
85 | self.assertFalse(d.called) | |
86 | ||
87 | # Make sure treq is trying to connect | |
88 | clients = self.reactor.tcpClients | |
89 | self.assertEqual(len(clients), 1) | |
90 | self.assertEqual(clients[0][0], '1.2.3.4') | |
91 | self.assertEqual(clients[0][1], 8008) | |
92 | ||
93 | conn = Mock() | |
94 | client = clients[0][2].buildProtocol(None) | |
95 | client.makeConnection(conn) | |
96 | ||
97 | # Deferred is still without a result | |
98 | self.assertFalse(d.called) | |
99 | ||
100 | # Push by enough to time it out | |
101 | self.reactor.advance(10.5) | |
102 | f = self.failureResultOf(d) | |
103 | ||
104 | self.assertIsInstance(f.value, ResponseNeverReceived) | |
105 | ||
106 | def test_client_gets_headers(self): | |
107 | """ | |
108 | Once the client gets the headers, _request returns successfully. | |
109 | """ | |
110 | d = self.cl._request("testserv:8008", "GET", "foo/bar", timeout=10000) | |
111 | ||
112 | self.pump() | |
113 | ||
114 | conn = Mock() | |
115 | clients = self.reactor.tcpClients | |
116 | client = clients[0][2].buildProtocol(None) | |
117 | client.makeConnection(conn) | |
118 | ||
119 | # Deferred does not have a result | |
120 | self.assertFalse(d.called) | |
121 | ||
122 | # Send it the HTTP response | |
123 | client.dataReceived(b"HTTP/1.1 200 OK\r\nServer: Fake\r\n\r\n") | |
124 | ||
125 | # We should get a successful response | |
126 | r = self.successResultOf(d) | |
127 | self.assertEqual(r.code, 200) | |
128 | ||
129 | def test_client_headers_no_body(self): | |
130 | """ | |
131 | If the HTTP request is connected, but gets no response before being | |
132 | timed out, it'll give a ResponseNeverReceived. | |
133 | """ | |
134 | d = self.cl.post_json("testserv:8008", "foo/bar", timeout=10000) | |
135 | ||
136 | self.pump() | |
137 | ||
138 | conn = Mock() | |
139 | clients = self.reactor.tcpClients | |
140 | client = clients[0][2].buildProtocol(None) | |
141 | client.makeConnection(conn) | |
142 | ||
143 | # Deferred does not have a result | |
144 | self.assertFalse(d.called) | |
145 | ||
146 | # Send it the HTTP response | |
147 | client.dataReceived( | |
148 | (b"HTTP/1.1 200 OK\r\nContent-Type: application/json\r\n" | |
149 | b"Server: Fake\r\n\r\n") | |
150 | ) | |
151 | ||
152 | # Push by enough to time it out | |
153 | self.reactor.advance(10.5) | |
154 | f = self.failureResultOf(d) | |
155 | ||
156 | self.assertIsInstance(f.value, TimeoutError) |
21 | 21 | |
22 | 22 | from twisted.internet import defer |
23 | 23 | |
24 | import synapse.rest.client.v1.room | |
25 | 24 | from synapse.api.constants import Membership |
26 | from synapse.http.server import JsonResource | |
27 | from synapse.types import UserID | |
28 | from synapse.util import Clock | |
25 | from synapse.rest.client.v1 import room | |
29 | 26 | |
30 | 27 | from tests import unittest |
31 | from tests.server import ( | |
32 | ThreadedMemoryReactorClock, | |
33 | make_request, | |
34 | render, | |
35 | setup_test_homeserver, | |
36 | ) | |
37 | ||
38 | from .utils import RestHelper | |
39 | 28 | |
40 | 29 | PATH_PREFIX = b"/_matrix/client/api/v1" |
41 | 30 | |
42 | 31 | |
43 | class RoomBase(unittest.TestCase): | |
32 | class RoomBase(unittest.HomeserverTestCase): | |
44 | 33 | rmcreator_id = None |
45 | 34 | |
46 | def setUp(self): | |
47 | ||
48 | self.clock = ThreadedMemoryReactorClock() | |
49 | self.hs_clock = Clock(self.clock) | |
50 | ||
51 | self.hs = setup_test_homeserver( | |
52 | self.addCleanup, | |
35 | servlets = [room.register_servlets, room.register_deprecated_servlets] | |
36 | ||
37 | def make_homeserver(self, reactor, clock): | |
38 | ||
39 | self.hs = self.setup_test_homeserver( | |
53 | 40 | "red", |
54 | 41 | http_client=None, |
55 | clock=self.hs_clock, | |
56 | reactor=self.clock, | |
57 | 42 | federation_client=Mock(), |
58 | 43 | ratelimiter=NonCallableMock(spec_set=["send_message"]), |
59 | 44 | ) |
62 | 47 | |
63 | 48 | self.hs.get_federation_handler = Mock(return_value=Mock()) |
64 | 49 | |
65 | def get_user_by_access_token(token=None, allow_guest=False): | |
66 | return { | |
67 | "user": UserID.from_string(self.helper.auth_user_id), | |
68 | "token_id": 1, | |
69 | "is_guest": False, | |
70 | } | |
71 | ||
72 | def get_user_by_req(request, allow_guest=False, rights="access"): | |
73 | return synapse.types.create_requester( | |
74 | UserID.from_string(self.helper.auth_user_id), 1, False, None | |
75 | ) | |
76 | ||
77 | self.hs.get_auth().get_user_by_req = get_user_by_req | |
78 | self.hs.get_auth().get_user_by_access_token = get_user_by_access_token | |
79 | self.hs.get_auth().get_access_token_from_request = Mock(return_value=b"1234") | |
80 | ||
81 | 50 | def _insert_client_ip(*args, **kwargs): |
82 | 51 | return defer.succeed(None) |
83 | 52 | |
84 | 53 | self.hs.get_datastore().insert_client_ip = _insert_client_ip |
85 | 54 | |
86 | self.resource = JsonResource(self.hs) | |
87 | synapse.rest.client.v1.room.register_servlets(self.hs, self.resource) | |
88 | synapse.rest.client.v1.room.register_deprecated_servlets(self.hs, self.resource) | |
89 | self.helper = RestHelper(self.hs, self.resource, self.user_id) | |
55 | return self.hs | |
90 | 56 | |
91 | 57 | |
92 | 58 | class RoomPermissionsTestCase(RoomBase): |
93 | 59 | """ Tests room permissions. """ |
94 | 60 | |
95 | user_id = b"@sid1:red" | |
96 | rmcreator_id = b"@notme:red" | |
97 | ||
98 | def setUp(self): | |
99 | ||
100 | super(RoomPermissionsTestCase, self).setUp() | |
61 | user_id = "@sid1:red" | |
62 | rmcreator_id = "@notme:red" | |
63 | ||
64 | def prepare(self, reactor, clock, hs): | |
101 | 65 | |
102 | 66 | self.helper.auth_user_id = self.rmcreator_id |
103 | 67 | # create some rooms under the name rmcreator_id |
113 | 77 | self.created_rmid_msg_path = ( |
114 | 78 | "rooms/%s/send/m.room.message/a1" % (self.created_rmid) |
115 | 79 | ).encode('ascii') |
116 | request, channel = make_request( | |
117 | b"PUT", | |
118 | self.created_rmid_msg_path, | |
119 | b'{"msgtype":"m.text","body":"test msg"}', | |
120 | ) | |
121 | render(request, self.resource, self.clock) | |
122 | self.assertEquals(channel.result["code"], b"200", channel.result) | |
80 | request, channel = self.make_request( | |
81 | "PUT", self.created_rmid_msg_path, b'{"msgtype":"m.text","body":"test msg"}' | |
82 | ) | |
83 | self.render(request) | |
84 | self.assertEquals(200, channel.code, channel.result) | |
123 | 85 | |
124 | 86 | # set topic for public room |
125 | request, channel = make_request( | |
126 | b"PUT", | |
87 | request, channel = self.make_request( | |
88 | "PUT", | |
127 | 89 | ("rooms/%s/state/m.room.topic" % self.created_public_rmid).encode('ascii'), |
128 | 90 | b'{"topic":"Public Room Topic"}', |
129 | 91 | ) |
130 | render(request, self.resource, self.clock) | |
131 | self.assertEquals(channel.result["code"], b"200", channel.result) | |
92 | self.render(request) | |
93 | self.assertEquals(200, channel.code, channel.result) | |
132 | 94 | |
133 | 95 | # auth as user_id now |
134 | 96 | self.helper.auth_user_id = self.user_id |
139 | 101 | seq = iter(range(100)) |
140 | 102 | |
141 | 103 | def send_msg_path(): |
142 | return b"/rooms/%s/send/m.room.message/mid%s" % ( | |
104 | return "/rooms/%s/send/m.room.message/mid%s" % ( | |
143 | 105 | self.created_rmid, |
144 | str(next(seq)).encode('ascii'), | |
106 | str(next(seq)), | |
145 | 107 | ) |
146 | 108 | |
147 | 109 | # send message in uncreated room, expect 403 |
148 | request, channel = make_request( | |
149 | b"PUT", | |
150 | b"/rooms/%s/send/m.room.message/mid2" % (self.uncreated_rmid,), | |
110 | request, channel = self.make_request( | |
111 | "PUT", | |
112 | "/rooms/%s/send/m.room.message/mid2" % (self.uncreated_rmid,), | |
151 | 113 | msg_content, |
152 | 114 | ) |
153 | render(request, self.resource, self.clock) | |
154 | self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"]) | |
115 | self.render(request) | |
116 | self.assertEquals(403, channel.code, msg=channel.result["body"]) | |
155 | 117 | |
156 | 118 | # send message in created room not joined (no state), expect 403 |
157 | request, channel = make_request(b"PUT", send_msg_path(), msg_content) | |
158 | render(request, self.resource, self.clock) | |
159 | self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"]) | |
119 | request, channel = self.make_request("PUT", send_msg_path(), msg_content) | |
120 | self.render(request) | |
121 | self.assertEquals(403, channel.code, msg=channel.result["body"]) | |
160 | 122 | |
161 | 123 | # send message in created room and invited, expect 403 |
162 | 124 | self.helper.invite( |
163 | 125 | room=self.created_rmid, src=self.rmcreator_id, targ=self.user_id |
164 | 126 | ) |
165 | request, channel = make_request(b"PUT", send_msg_path(), msg_content) | |
166 | render(request, self.resource, self.clock) | |
167 | self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"]) | |
127 | request, channel = self.make_request("PUT", send_msg_path(), msg_content) | |
128 | self.render(request) | |
129 | self.assertEquals(403, channel.code, msg=channel.result["body"]) | |
168 | 130 | |
169 | 131 | # send message in created room and joined, expect 200 |
170 | 132 | self.helper.join(room=self.created_rmid, user=self.user_id) |
171 | request, channel = make_request(b"PUT", send_msg_path(), msg_content) | |
172 | render(request, self.resource, self.clock) | |
173 | self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) | |
133 | request, channel = self.make_request("PUT", send_msg_path(), msg_content) | |
134 | self.render(request) | |
135 | self.assertEquals(200, channel.code, msg=channel.result["body"]) | |
174 | 136 | |
175 | 137 | # send message in created room and left, expect 403 |
176 | 138 | self.helper.leave(room=self.created_rmid, user=self.user_id) |
177 | request, channel = make_request(b"PUT", send_msg_path(), msg_content) | |
178 | render(request, self.resource, self.clock) | |
179 | self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"]) | |
139 | request, channel = self.make_request("PUT", send_msg_path(), msg_content) | |
140 | self.render(request) | |
141 | self.assertEquals(403, channel.code, msg=channel.result["body"]) | |
180 | 142 | |
181 | 143 | def test_topic_perms(self): |
182 | 144 | topic_content = b'{"topic":"My Topic Name"}' |
183 | topic_path = b"/rooms/%s/state/m.room.topic" % self.created_rmid | |
145 | topic_path = "/rooms/%s/state/m.room.topic" % self.created_rmid | |
184 | 146 | |
185 | 147 | # set/get topic in uncreated room, expect 403 |
186 | request, channel = make_request( | |
187 | b"PUT", b"/rooms/%s/state/m.room.topic" % self.uncreated_rmid, topic_content | |
188 | ) | |
189 | render(request, self.resource, self.clock) | |
190 | self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"]) | |
191 | request, channel = make_request( | |
192 | b"GET", "/rooms/%s/state/m.room.topic" % self.uncreated_rmid | |
193 | ) | |
194 | render(request, self.resource, self.clock) | |
195 | self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"]) | |
148 | request, channel = self.make_request( | |
149 | "PUT", "/rooms/%s/state/m.room.topic" % self.uncreated_rmid, topic_content | |
150 | ) | |
151 | self.render(request) | |
152 | self.assertEquals(403, channel.code, msg=channel.result["body"]) | |
153 | request, channel = self.make_request( | |
154 | "GET", "/rooms/%s/state/m.room.topic" % self.uncreated_rmid | |
155 | ) | |
156 | self.render(request) | |
157 | self.assertEquals(403, channel.code, msg=channel.result["body"]) | |
196 | 158 | |
197 | 159 | # set/get topic in created PRIVATE room not joined, expect 403 |
198 | request, channel = make_request(b"PUT", topic_path, topic_content) | |
199 | render(request, self.resource, self.clock) | |
200 | self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"]) | |
201 | request, channel = make_request(b"GET", topic_path) | |
202 | render(request, self.resource, self.clock) | |
203 | self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"]) | |
160 | request, channel = self.make_request("PUT", topic_path, topic_content) | |
161 | self.render(request) | |
162 | self.assertEquals(403, channel.code, msg=channel.result["body"]) | |
163 | request, channel = self.make_request("GET", topic_path) | |
164 | self.render(request) | |
165 | self.assertEquals(403, channel.code, msg=channel.result["body"]) | |
204 | 166 | |
205 | 167 | # set topic in created PRIVATE room and invited, expect 403 |
206 | 168 | self.helper.invite( |
207 | 169 | room=self.created_rmid, src=self.rmcreator_id, targ=self.user_id |
208 | 170 | ) |
209 | request, channel = make_request(b"PUT", topic_path, topic_content) | |
210 | render(request, self.resource, self.clock) | |
211 | self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"]) | |
171 | request, channel = self.make_request("PUT", topic_path, topic_content) | |
172 | self.render(request) | |
173 | self.assertEquals(403, channel.code, msg=channel.result["body"]) | |
212 | 174 | |
213 | 175 | # get topic in created PRIVATE room and invited, expect 403 |
214 | request, channel = make_request(b"GET", topic_path) | |
215 | render(request, self.resource, self.clock) | |
216 | self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"]) | |
176 | request, channel = self.make_request("GET", topic_path) | |
177 | self.render(request) | |
178 | self.assertEquals(403, channel.code, msg=channel.result["body"]) | |
217 | 179 | |
218 | 180 | # set/get topic in created PRIVATE room and joined, expect 200 |
219 | 181 | self.helper.join(room=self.created_rmid, user=self.user_id) |
220 | 182 | |
221 | 183 | # Only room ops can set topic by default |
222 | 184 | self.helper.auth_user_id = self.rmcreator_id |
223 | request, channel = make_request(b"PUT", topic_path, topic_content) | |
224 | render(request, self.resource, self.clock) | |
225 | self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) | |
185 | request, channel = self.make_request("PUT", topic_path, topic_content) | |
186 | self.render(request) | |
187 | self.assertEquals(200, channel.code, msg=channel.result["body"]) | |
226 | 188 | self.helper.auth_user_id = self.user_id |
227 | 189 | |
228 | request, channel = make_request(b"GET", topic_path) | |
229 | render(request, self.resource, self.clock) | |
230 | self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) | |
231 | self.assert_dict(json.loads(topic_content), channel.json_body) | |
190 | request, channel = self.make_request("GET", topic_path) | |
191 | self.render(request) | |
192 | self.assertEquals(200, channel.code, msg=channel.result["body"]) | |
193 | self.assert_dict(json.loads(topic_content.decode('utf8')), channel.json_body) | |
232 | 194 | |
233 | 195 | # set/get topic in created PRIVATE room and left, expect 403 |
234 | 196 | self.helper.leave(room=self.created_rmid, user=self.user_id) |
235 | request, channel = make_request(b"PUT", topic_path, topic_content) | |
236 | render(request, self.resource, self.clock) | |
237 | self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"]) | |
238 | request, channel = make_request(b"GET", topic_path) | |
239 | render(request, self.resource, self.clock) | |
240 | self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) | |
197 | request, channel = self.make_request("PUT", topic_path, topic_content) | |
198 | self.render(request) | |
199 | self.assertEquals(403, channel.code, msg=channel.result["body"]) | |
200 | request, channel = self.make_request("GET", topic_path) | |
201 | self.render(request) | |
202 | self.assertEquals(200, channel.code, msg=channel.result["body"]) | |
241 | 203 | |
242 | 204 | # get topic in PUBLIC room, not joined, expect 403 |
243 | request, channel = make_request( | |
244 | b"GET", b"/rooms/%s/state/m.room.topic" % self.created_public_rmid | |
245 | ) | |
246 | render(request, self.resource, self.clock) | |
247 | self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"]) | |
205 | request, channel = self.make_request( | |
206 | "GET", "/rooms/%s/state/m.room.topic" % self.created_public_rmid | |
207 | ) | |
208 | self.render(request) | |
209 | self.assertEquals(403, channel.code, msg=channel.result["body"]) | |
248 | 210 | |
249 | 211 | # set topic in PUBLIC room, not joined, expect 403 |
250 | request, channel = make_request( | |
251 | b"PUT", | |
252 | b"/rooms/%s/state/m.room.topic" % self.created_public_rmid, | |
212 | request, channel = self.make_request( | |
213 | "PUT", | |
214 | "/rooms/%s/state/m.room.topic" % self.created_public_rmid, | |
253 | 215 | topic_content, |
254 | 216 | ) |
255 | render(request, self.resource, self.clock) | |
256 | self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"]) | |
217 | self.render(request) | |
218 | self.assertEquals(403, channel.code, msg=channel.result["body"]) | |
257 | 219 | |
258 | 220 | def _test_get_membership(self, room=None, members=[], expect_code=None): |
259 | 221 | for member in members: |
260 | path = b"/rooms/%s/state/m.room.member/%s" % (room, member) | |
261 | request, channel = make_request(b"GET", path) | |
262 | render(request, self.resource, self.clock) | |
263 | self.assertEquals(expect_code, int(channel.result["code"])) | |
222 | path = "/rooms/%s/state/m.room.member/%s" % (room, member) | |
223 | request, channel = self.make_request("GET", path) | |
224 | self.render(request) | |
225 | self.assertEquals(expect_code, channel.code) | |
264 | 226 | |
265 | 227 | def test_membership_basic_room_perms(self): |
266 | 228 | # === room does not exist === |
427 | 389 | class RoomsMemberListTestCase(RoomBase): |
428 | 390 | """ Tests /rooms/$room_id/members/list REST events.""" |
429 | 391 | |
430 | user_id = b"@sid1:red" | |
392 | user_id = "@sid1:red" | |
431 | 393 | |
432 | 394 | def test_get_member_list(self): |
433 | 395 | room_id = self.helper.create_room_as(self.user_id) |
434 | request, channel = make_request(b"GET", b"/rooms/%s/members" % room_id) | |
435 | render(request, self.resource, self.clock) | |
436 | self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) | |
396 | request, channel = self.make_request("GET", "/rooms/%s/members" % room_id) | |
397 | self.render(request) | |
398 | self.assertEquals(200, channel.code, msg=channel.result["body"]) | |
437 | 399 | |
438 | 400 | def test_get_member_list_no_room(self): |
439 | request, channel = make_request(b"GET", b"/rooms/roomdoesnotexist/members") | |
440 | render(request, self.resource, self.clock) | |
441 | self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"]) | |
401 | request, channel = self.make_request("GET", "/rooms/roomdoesnotexist/members") | |
402 | self.render(request) | |
403 | self.assertEquals(403, channel.code, msg=channel.result["body"]) | |
442 | 404 | |
443 | 405 | def test_get_member_list_no_permission(self): |
444 | room_id = self.helper.create_room_as(b"@some_other_guy:red") | |
445 | request, channel = make_request(b"GET", b"/rooms/%s/members" % room_id) | |
446 | render(request, self.resource, self.clock) | |
447 | self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"]) | |
406 | room_id = self.helper.create_room_as("@some_other_guy:red") | |
407 | request, channel = self.make_request("GET", "/rooms/%s/members" % room_id) | |
408 | self.render(request) | |
409 | self.assertEquals(403, channel.code, msg=channel.result["body"]) | |
448 | 410 | |
449 | 411 | def test_get_member_list_mixed_memberships(self): |
450 | room_creator = b"@some_other_guy:red" | |
412 | room_creator = "@some_other_guy:red" | |
451 | 413 | room_id = self.helper.create_room_as(room_creator) |
452 | room_path = b"/rooms/%s/members" % room_id | |
414 | room_path = "/rooms/%s/members" % room_id | |
453 | 415 | self.helper.invite(room=room_id, src=room_creator, targ=self.user_id) |
454 | 416 | # can't see list if you're just invited. |
455 | request, channel = make_request(b"GET", room_path) | |
456 | render(request, self.resource, self.clock) | |
457 | self.assertEquals(403, int(channel.result["code"]), msg=channel.result["body"]) | |
417 | request, channel = self.make_request("GET", room_path) | |
418 | self.render(request) | |
419 | self.assertEquals(403, channel.code, msg=channel.result["body"]) | |
458 | 420 | |
459 | 421 | self.helper.join(room=room_id, user=self.user_id) |
460 | 422 | # can see list now joined |
461 | request, channel = make_request(b"GET", room_path) | |
462 | render(request, self.resource, self.clock) | |
463 | self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) | |
423 | request, channel = self.make_request("GET", room_path) | |
424 | self.render(request) | |
425 | self.assertEquals(200, channel.code, msg=channel.result["body"]) | |
464 | 426 | |
465 | 427 | self.helper.leave(room=room_id, user=self.user_id) |
466 | 428 | # can see old list once left |
467 | request, channel = make_request(b"GET", room_path) | |
468 | render(request, self.resource, self.clock) | |
469 | self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) | |
429 | request, channel = self.make_request("GET", room_path) | |
430 | self.render(request) | |
431 | self.assertEquals(200, channel.code, msg=channel.result["body"]) | |
470 | 432 | |
471 | 433 | |
472 | 434 | class RoomsCreateTestCase(RoomBase): |
473 | 435 | """ Tests /rooms and /rooms/$room_id REST events. """ |
474 | 436 | |
475 | user_id = b"@sid1:red" | |
437 | user_id = "@sid1:red" | |
476 | 438 | |
477 | 439 | def test_post_room_no_keys(self): |
478 | 440 | # POST with no config keys, expect new room id |
479 | request, channel = make_request(b"POST", b"/createRoom", b"{}") | |
480 | ||
481 | render(request, self.resource, self.clock) | |
482 | self.assertEquals(200, int(channel.result["code"]), channel.result) | |
441 | request, channel = self.make_request("POST", "/createRoom", "{}") | |
442 | ||
443 | self.render(request) | |
444 | self.assertEquals(200, channel.code, channel.result) | |
483 | 445 | self.assertTrue("room_id" in channel.json_body) |
484 | 446 | |
485 | 447 | def test_post_room_visibility_key(self): |
486 | 448 | # POST with visibility config key, expect new room id |
487 | request, channel = make_request( | |
488 | b"POST", b"/createRoom", b'{"visibility":"private"}' | |
489 | ) | |
490 | render(request, self.resource, self.clock) | |
491 | self.assertEquals(200, int(channel.result["code"])) | |
449 | request, channel = self.make_request( | |
450 | "POST", "/createRoom", b'{"visibility":"private"}' | |
451 | ) | |
452 | self.render(request) | |
453 | self.assertEquals(200, channel.code) | |
492 | 454 | self.assertTrue("room_id" in channel.json_body) |
493 | 455 | |
494 | 456 | def test_post_room_custom_key(self): |
495 | 457 | # POST with custom config keys, expect new room id |
496 | request, channel = make_request(b"POST", b"/createRoom", b'{"custom":"stuff"}') | |
497 | render(request, self.resource, self.clock) | |
498 | self.assertEquals(200, int(channel.result["code"])) | |
458 | request, channel = self.make_request( | |
459 | "POST", "/createRoom", b'{"custom":"stuff"}' | |
460 | ) | |
461 | self.render(request) | |
462 | self.assertEquals(200, channel.code) | |
499 | 463 | self.assertTrue("room_id" in channel.json_body) |
500 | 464 | |
501 | 465 | def test_post_room_known_and_unknown_keys(self): |
502 | 466 | # POST with custom + known config keys, expect new room id |
503 | request, channel = make_request( | |
504 | b"POST", b"/createRoom", b'{"visibility":"private","custom":"things"}' | |
505 | ) | |
506 | render(request, self.resource, self.clock) | |
507 | self.assertEquals(200, int(channel.result["code"])) | |
467 | request, channel = self.make_request( | |
468 | "POST", "/createRoom", b'{"visibility":"private","custom":"things"}' | |
469 | ) | |
470 | self.render(request) | |
471 | self.assertEquals(200, channel.code) | |
508 | 472 | self.assertTrue("room_id" in channel.json_body) |
509 | 473 | |
510 | 474 | def test_post_room_invalid_content(self): |
511 | 475 | # POST with invalid content / paths, expect 400 |
512 | request, channel = make_request(b"POST", b"/createRoom", b'{"visibili') | |
513 | render(request, self.resource, self.clock) | |
514 | self.assertEquals(400, int(channel.result["code"])) | |
515 | ||
516 | request, channel = make_request(b"POST", b"/createRoom", b'["hello"]') | |
517 | render(request, self.resource, self.clock) | |
518 | self.assertEquals(400, int(channel.result["code"])) | |
476 | request, channel = self.make_request("POST", "/createRoom", b'{"visibili') | |
477 | self.render(request) | |
478 | self.assertEquals(400, channel.code) | |
479 | ||
480 | request, channel = self.make_request("POST", "/createRoom", b'["hello"]') | |
481 | self.render(request) | |
482 | self.assertEquals(400, channel.code) | |
519 | 483 | |
520 | 484 | |
521 | 485 | class RoomTopicTestCase(RoomBase): |
522 | 486 | """ Tests /rooms/$room_id/topic REST events. """ |
523 | 487 | |
524 | user_id = b"@sid1:red" | |
525 | ||
526 | def setUp(self): | |
527 | ||
528 | super(RoomTopicTestCase, self).setUp() | |
529 | ||
488 | user_id = "@sid1:red" | |
489 | ||
490 | def prepare(self, reactor, clock, hs): | |
530 | 491 | # create the room |
531 | 492 | self.room_id = self.helper.create_room_as(self.user_id) |
532 | self.path = b"/rooms/%s/state/m.room.topic" % (self.room_id,) | |
493 | self.path = "/rooms/%s/state/m.room.topic" % (self.room_id,) | |
533 | 494 | |
534 | 495 | def test_invalid_puts(self): |
535 | 496 | # missing keys or invalid json |
536 | request, channel = make_request(b"PUT", self.path, '{}') | |
537 | render(request, self.resource, self.clock) | |
538 | self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) | |
539 | ||
540 | request, channel = make_request(b"PUT", self.path, '{"_name":"bob"}') | |
541 | render(request, self.resource, self.clock) | |
542 | self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) | |
543 | ||
544 | request, channel = make_request(b"PUT", self.path, '{"nao') | |
545 | render(request, self.resource, self.clock) | |
546 | self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) | |
547 | ||
548 | request, channel = make_request( | |
549 | b"PUT", self.path, '[{"_name":"bob"},{"_name":"jill"}]' | |
550 | ) | |
551 | render(request, self.resource, self.clock) | |
552 | self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) | |
553 | ||
554 | request, channel = make_request(b"PUT", self.path, 'text only') | |
555 | render(request, self.resource, self.clock) | |
556 | self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) | |
557 | ||
558 | request, channel = make_request(b"PUT", self.path, '') | |
559 | render(request, self.resource, self.clock) | |
560 | self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) | |
497 | request, channel = self.make_request("PUT", self.path, '{}') | |
498 | self.render(request) | |
499 | self.assertEquals(400, channel.code, msg=channel.result["body"]) | |
500 | ||
501 | request, channel = self.make_request("PUT", self.path, '{"_name":"bo"}') | |
502 | self.render(request) | |
503 | self.assertEquals(400, channel.code, msg=channel.result["body"]) | |
504 | ||
505 | request, channel = self.make_request("PUT", self.path, '{"nao') | |
506 | self.render(request) | |
507 | self.assertEquals(400, channel.code, msg=channel.result["body"]) | |
508 | ||
509 | request, channel = self.make_request( | |
510 | "PUT", self.path, '[{"_name":"bo"},{"_name":"jill"}]' | |
511 | ) | |
512 | self.render(request) | |
513 | self.assertEquals(400, channel.code, msg=channel.result["body"]) | |
514 | ||
515 | request, channel = self.make_request("PUT", self.path, 'text only') | |
516 | self.render(request) | |
517 | self.assertEquals(400, channel.code, msg=channel.result["body"]) | |
518 | ||
519 | request, channel = self.make_request("PUT", self.path, '') | |
520 | self.render(request) | |
521 | self.assertEquals(400, channel.code, msg=channel.result["body"]) | |
561 | 522 | |
562 | 523 | # valid key, wrong type |
563 | 524 | content = '{"topic":["Topic name"]}' |
564 | request, channel = make_request(b"PUT", self.path, content) | |
565 | render(request, self.resource, self.clock) | |
566 | self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) | |
525 | request, channel = self.make_request("PUT", self.path, content) | |
526 | self.render(request) | |
527 | self.assertEquals(400, channel.code, msg=channel.result["body"]) | |
567 | 528 | |
568 | 529 | def test_rooms_topic(self): |
569 | 530 | # nothing should be there |
570 | request, channel = make_request(b"GET", self.path) | |
571 | render(request, self.resource, self.clock) | |
572 | self.assertEquals(404, int(channel.result["code"]), msg=channel.result["body"]) | |
531 | request, channel = self.make_request("GET", self.path) | |
532 | self.render(request) | |
533 | self.assertEquals(404, channel.code, msg=channel.result["body"]) | |
573 | 534 | |
574 | 535 | # valid put |
575 | 536 | content = '{"topic":"Topic name"}' |
576 | request, channel = make_request(b"PUT", self.path, content) | |
577 | render(request, self.resource, self.clock) | |
578 | self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) | |
537 | request, channel = self.make_request("PUT", self.path, content) | |
538 | self.render(request) | |
539 | self.assertEquals(200, channel.code, msg=channel.result["body"]) | |
579 | 540 | |
580 | 541 | # valid get |
581 | request, channel = make_request(b"GET", self.path) | |
582 | render(request, self.resource, self.clock) | |
583 | self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) | |
542 | request, channel = self.make_request("GET", self.path) | |
543 | self.render(request) | |
544 | self.assertEquals(200, channel.code, msg=channel.result["body"]) | |
584 | 545 | self.assert_dict(json.loads(content), channel.json_body) |
585 | 546 | |
586 | 547 | def test_rooms_topic_with_extra_keys(self): |
587 | 548 | # valid put with extra keys |
588 | 549 | content = '{"topic":"Seasons","subtopic":"Summer"}' |
589 | request, channel = make_request(b"PUT", self.path, content) | |
590 | render(request, self.resource, self.clock) | |
591 | self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) | |
550 | request, channel = self.make_request("PUT", self.path, content) | |
551 | self.render(request) | |
552 | self.assertEquals(200, channel.code, msg=channel.result["body"]) | |
592 | 553 | |
593 | 554 | # valid get |
594 | request, channel = make_request(b"GET", self.path) | |
595 | render(request, self.resource, self.clock) | |
596 | self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) | |
555 | request, channel = self.make_request("GET", self.path) | |
556 | self.render(request) | |
557 | self.assertEquals(200, channel.code, msg=channel.result["body"]) | |
597 | 558 | self.assert_dict(json.loads(content), channel.json_body) |
598 | 559 | |
599 | 560 | |
600 | 561 | class RoomMemberStateTestCase(RoomBase): |
601 | 562 | """ Tests /rooms/$room_id/members/$user_id/state REST events. """ |
602 | 563 | |
603 | user_id = b"@sid1:red" | |
604 | ||
605 | def setUp(self): | |
606 | ||
607 | super(RoomMemberStateTestCase, self).setUp() | |
564 | user_id = "@sid1:red" | |
565 | ||
566 | def prepare(self, reactor, clock, hs): | |
608 | 567 | self.room_id = self.helper.create_room_as(self.user_id) |
609 | ||
610 | def tearDown(self): | |
611 | pass | |
612 | 568 | |
613 | 569 | def test_invalid_puts(self): |
614 | 570 | path = "/rooms/%s/state/m.room.member/%s" % (self.room_id, self.user_id) |
615 | 571 | # missing keys or invalid json |
616 | request, channel = make_request(b"PUT", path, '{}') | |
617 | render(request, self.resource, self.clock) | |
618 | self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) | |
619 | ||
620 | request, channel = make_request(b"PUT", path, '{"_name":"bob"}') | |
621 | render(request, self.resource, self.clock) | |
622 | self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) | |
623 | ||
624 | request, channel = make_request(b"PUT", path, '{"nao') | |
625 | render(request, self.resource, self.clock) | |
626 | self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) | |
627 | ||
628 | request, channel = make_request( | |
629 | b"PUT", path, b'[{"_name":"bob"},{"_name":"jill"}]' | |
630 | ) | |
631 | render(request, self.resource, self.clock) | |
632 | self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) | |
633 | ||
634 | request, channel = make_request(b"PUT", path, 'text only') | |
635 | render(request, self.resource, self.clock) | |
636 | self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) | |
637 | ||
638 | request, channel = make_request(b"PUT", path, '') | |
639 | render(request, self.resource, self.clock) | |
640 | self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) | |
572 | request, channel = self.make_request("PUT", path, '{}') | |
573 | self.render(request) | |
574 | self.assertEquals(400, channel.code, msg=channel.result["body"]) | |
575 | ||
576 | request, channel = self.make_request("PUT", path, '{"_name":"bo"}') | |
577 | self.render(request) | |
578 | self.assertEquals(400, channel.code, msg=channel.result["body"]) | |
579 | ||
580 | request, channel = self.make_request("PUT", path, '{"nao') | |
581 | self.render(request) | |
582 | self.assertEquals(400, channel.code, msg=channel.result["body"]) | |
583 | ||
584 | request, channel = self.make_request( | |
585 | "PUT", path, b'[{"_name":"bo"},{"_name":"jill"}]' | |
586 | ) | |
587 | self.render(request) | |
588 | self.assertEquals(400, channel.code, msg=channel.result["body"]) | |
589 | ||
590 | request, channel = self.make_request("PUT", path, 'text only') | |
591 | self.render(request) | |
592 | self.assertEquals(400, channel.code, msg=channel.result["body"]) | |
593 | ||
594 | request, channel = self.make_request("PUT", path, '') | |
595 | self.render(request) | |
596 | self.assertEquals(400, channel.code, msg=channel.result["body"]) | |
641 | 597 | |
642 | 598 | # valid keys, wrong types |
643 | 599 | content = '{"membership":["%s","%s","%s"]}' % ( |
645 | 601 | Membership.JOIN, |
646 | 602 | Membership.LEAVE, |
647 | 603 | ) |
648 | request, channel = make_request(b"PUT", path, content.encode('ascii')) | |
649 | render(request, self.resource, self.clock) | |
650 | self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) | |
604 | request, channel = self.make_request("PUT", path, content.encode('ascii')) | |
605 | self.render(request) | |
606 | self.assertEquals(400, channel.code, msg=channel.result["body"]) | |
651 | 607 | |
652 | 608 | def test_rooms_members_self(self): |
653 | 609 | path = "/rooms/%s/state/m.room.member/%s" % ( |
657 | 613 | |
658 | 614 | # valid join message (NOOP since we made the room) |
659 | 615 | content = '{"membership":"%s"}' % Membership.JOIN |
660 | request, channel = make_request(b"PUT", path, content.encode('ascii')) | |
661 | render(request, self.resource, self.clock) | |
662 | self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) | |
663 | ||
664 | request, channel = make_request(b"GET", path, None) | |
665 | render(request, self.resource, self.clock) | |
666 | self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) | |
616 | request, channel = self.make_request("PUT", path, content.encode('ascii')) | |
617 | self.render(request) | |
618 | self.assertEquals(200, channel.code, msg=channel.result["body"]) | |
619 | ||
620 | request, channel = self.make_request("GET", path, None) | |
621 | self.render(request) | |
622 | self.assertEquals(200, channel.code, msg=channel.result["body"]) | |
667 | 623 | |
668 | 624 | expected_response = {"membership": Membership.JOIN} |
669 | 625 | self.assertEquals(expected_response, channel.json_body) |
677 | 633 | |
678 | 634 | # valid invite message |
679 | 635 | content = '{"membership":"%s"}' % Membership.INVITE |
680 | request, channel = make_request(b"PUT", path, content) | |
681 | render(request, self.resource, self.clock) | |
682 | self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) | |
683 | ||
684 | request, channel = make_request(b"GET", path, None) | |
685 | render(request, self.resource, self.clock) | |
686 | self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) | |
636 | request, channel = self.make_request("PUT", path, content) | |
637 | self.render(request) | |
638 | self.assertEquals(200, channel.code, msg=channel.result["body"]) | |
639 | ||
640 | request, channel = self.make_request("GET", path, None) | |
641 | self.render(request) | |
642 | self.assertEquals(200, channel.code, msg=channel.result["body"]) | |
687 | 643 | self.assertEquals(json.loads(content), channel.json_body) |
688 | 644 | |
689 | 645 | def test_rooms_members_other_custom_keys(self): |
698 | 654 | Membership.INVITE, |
699 | 655 | "Join us!", |
700 | 656 | ) |
701 | request, channel = make_request(b"PUT", path, content) | |
702 | render(request, self.resource, self.clock) | |
703 | self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) | |
704 | ||
705 | request, channel = make_request(b"GET", path, None) | |
706 | render(request, self.resource, self.clock) | |
707 | self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) | |
657 | request, channel = self.make_request("PUT", path, content) | |
658 | self.render(request) | |
659 | self.assertEquals(200, channel.code, msg=channel.result["body"]) | |
660 | ||
661 | request, channel = self.make_request("GET", path, None) | |
662 | self.render(request) | |
663 | self.assertEquals(200, channel.code, msg=channel.result["body"]) | |
708 | 664 | self.assertEquals(json.loads(content), channel.json_body) |
709 | 665 | |
710 | 666 | |
713 | 669 | |
714 | 670 | user_id = "@sid1:red" |
715 | 671 | |
716 | def setUp(self): | |
717 | super(RoomMessagesTestCase, self).setUp() | |
718 | ||
672 | def prepare(self, reactor, clock, hs): | |
719 | 673 | self.room_id = self.helper.create_room_as(self.user_id) |
720 | 674 | |
721 | 675 | def test_invalid_puts(self): |
722 | 676 | path = "/rooms/%s/send/m.room.message/mid1" % (urlparse.quote(self.room_id)) |
723 | 677 | # missing keys or invalid json |
724 | request, channel = make_request(b"PUT", path, '{}') | |
725 | render(request, self.resource, self.clock) | |
726 | self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) | |
727 | ||
728 | request, channel = make_request(b"PUT", path, '{"_name":"bob"}') | |
729 | render(request, self.resource, self.clock) | |
730 | self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) | |
731 | ||
732 | request, channel = make_request(b"PUT", path, '{"nao') | |
733 | render(request, self.resource, self.clock) | |
734 | self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) | |
735 | ||
736 | request, channel = make_request( | |
737 | b"PUT", path, '[{"_name":"bob"},{"_name":"jill"}]' | |
738 | ) | |
739 | render(request, self.resource, self.clock) | |
740 | self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) | |
741 | ||
742 | request, channel = make_request(b"PUT", path, 'text only') | |
743 | render(request, self.resource, self.clock) | |
744 | self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) | |
745 | ||
746 | request, channel = make_request(b"PUT", path, '') | |
747 | render(request, self.resource, self.clock) | |
748 | self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) | |
678 | request, channel = self.make_request("PUT", path, b'{}') | |
679 | self.render(request) | |
680 | self.assertEquals(400, channel.code, msg=channel.result["body"]) | |
681 | ||
682 | request, channel = self.make_request("PUT", path, b'{"_name":"bo"}') | |
683 | self.render(request) | |
684 | self.assertEquals(400, channel.code, msg=channel.result["body"]) | |
685 | ||
686 | request, channel = self.make_request("PUT", path, b'{"nao') | |
687 | self.render(request) | |
688 | self.assertEquals(400, channel.code, msg=channel.result["body"]) | |
689 | ||
690 | request, channel = self.make_request( | |
691 | "PUT", path, b'[{"_name":"bo"},{"_name":"jill"}]' | |
692 | ) | |
693 | self.render(request) | |
694 | self.assertEquals(400, channel.code, msg=channel.result["body"]) | |
695 | ||
696 | request, channel = self.make_request("PUT", path, b'text only') | |
697 | self.render(request) | |
698 | self.assertEquals(400, channel.code, msg=channel.result["body"]) | |
699 | ||
700 | request, channel = self.make_request("PUT", path, b'') | |
701 | self.render(request) | |
702 | self.assertEquals(400, channel.code, msg=channel.result["body"]) | |
749 | 703 | |
750 | 704 | def test_rooms_messages_sent(self): |
751 | 705 | path = "/rooms/%s/send/m.room.message/mid1" % (urlparse.quote(self.room_id)) |
752 | 706 | |
753 | content = '{"body":"test","msgtype":{"type":"a"}}' | |
754 | request, channel = make_request(b"PUT", path, content) | |
755 | render(request, self.resource, self.clock) | |
756 | self.assertEquals(400, int(channel.result["code"]), msg=channel.result["body"]) | |
707 | content = b'{"body":"test","msgtype":{"type":"a"}}' | |
708 | request, channel = self.make_request("PUT", path, content) | |
709 | self.render(request) | |
710 | self.assertEquals(400, channel.code, msg=channel.result["body"]) | |
757 | 711 | |
758 | 712 | # custom message types |
759 | content = '{"body":"test","msgtype":"test.custom.text"}' | |
760 | request, channel = make_request(b"PUT", path, content) | |
761 | render(request, self.resource, self.clock) | |
762 | self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) | |
713 | content = b'{"body":"test","msgtype":"test.custom.text"}' | |
714 | request, channel = self.make_request("PUT", path, content) | |
715 | self.render(request) | |
716 | self.assertEquals(200, channel.code, msg=channel.result["body"]) | |
763 | 717 | |
764 | 718 | # m.text message type |
765 | 719 | path = "/rooms/%s/send/m.room.message/mid2" % (urlparse.quote(self.room_id)) |
766 | content = '{"body":"test2","msgtype":"m.text"}' | |
767 | request, channel = make_request(b"PUT", path, content) | |
768 | render(request, self.resource, self.clock) | |
769 | self.assertEquals(200, int(channel.result["code"]), msg=channel.result["body"]) | |
720 | content = b'{"body":"test2","msgtype":"m.text"}' | |
721 | request, channel = self.make_request("PUT", path, content) | |
722 | self.render(request) | |
723 | self.assertEquals(200, channel.code, msg=channel.result["body"]) | |
770 | 724 | |
771 | 725 | |
772 | 726 | class RoomInitialSyncTestCase(RoomBase): |
774 | 728 | |
775 | 729 | user_id = "@sid1:red" |
776 | 730 | |
777 | def setUp(self): | |
778 | super(RoomInitialSyncTestCase, self).setUp() | |
779 | ||
731 | def prepare(self, reactor, clock, hs): | |
780 | 732 | # create the room |
781 | 733 | self.room_id = self.helper.create_room_as(self.user_id) |
782 | 734 | |
783 | 735 | def test_initial_sync(self): |
784 | request, channel = make_request(b"GET", "/rooms/%s/initialSync" % self.room_id) | |
785 | render(request, self.resource, self.clock) | |
786 | self.assertEquals(200, int(channel.result["code"])) | |
736 | request, channel = self.make_request( | |
737 | "GET", "/rooms/%s/initialSync" % self.room_id | |
738 | ) | |
739 | self.render(request) | |
740 | self.assertEquals(200, channel.code) | |
787 | 741 | |
788 | 742 | self.assertEquals(self.room_id, channel.json_body["room_id"]) |
789 | 743 | self.assertEquals("join", channel.json_body["membership"]) |
818 | 772 | |
819 | 773 | user_id = "@sid1:red" |
820 | 774 | |
821 | def setUp(self): | |
822 | super(RoomMessageListTestCase, self).setUp() | |
775 | def prepare(self, reactor, clock, hs): | |
823 | 776 | self.room_id = self.helper.create_room_as(self.user_id) |
824 | 777 | |
825 | 778 | def test_topo_token_is_accepted(self): |
826 | 779 | token = "t1-0_0_0_0_0_0_0_0_0" |
827 | request, channel = make_request( | |
828 | b"GET", "/rooms/%s/messages?access_token=x&from=%s" % (self.room_id, token) | |
829 | ) | |
830 | render(request, self.resource, self.clock) | |
831 | self.assertEquals(200, int(channel.result["code"])) | |
780 | request, channel = self.make_request( | |
781 | "GET", "/rooms/%s/messages?access_token=x&from=%s" % (self.room_id, token) | |
782 | ) | |
783 | self.render(request) | |
784 | self.assertEquals(200, channel.code) | |
832 | 785 | self.assertTrue("start" in channel.json_body) |
833 | 786 | self.assertEquals(token, channel.json_body['start']) |
834 | 787 | self.assertTrue("chunk" in channel.json_body) |
836 | 789 | |
837 | 790 | def test_stream_token_is_accepted_for_fwd_pagianation(self): |
838 | 791 | token = "s0_0_0_0_0_0_0_0_0" |
839 | request, channel = make_request( | |
840 | b"GET", "/rooms/%s/messages?access_token=x&from=%s" % (self.room_id, token) | |
841 | ) | |
842 | render(request, self.resource, self.clock) | |
843 | self.assertEquals(200, int(channel.result["code"])) | |
792 | request, channel = self.make_request( | |
793 | "GET", "/rooms/%s/messages?access_token=x&from=%s" % (self.room_id, token) | |
794 | ) | |
795 | self.render(request) | |
796 | self.assertEquals(200, channel.code) | |
844 | 797 | self.assertTrue("start" in channel.json_body) |
845 | 798 | self.assertEquals(token, channel.json_body['start']) |
846 | 799 | self.assertTrue("chunk" in channel.json_body) |
61 | 61 | self.assertEqual(channel.code, 200) |
62 | 62 | self.assertTrue( |
63 | 63 | set( |
64 | [ | |
65 | "next_batch", | |
66 | "rooms", | |
67 | "account_data", | |
68 | "to_device", | |
69 | "device_lists", | |
70 | ] | |
64 | ["next_batch", "rooms", "account_data", "to_device", "device_lists"] | |
71 | 65 | ).issubset(set(channel.json_body.keys())) |
72 | 66 | ) |
3 | 3 | from six import text_type |
4 | 4 | |
5 | 5 | import attr |
6 | ||
7 | from twisted.internet import address, threads | |
6 | from zope.interface import implementer | |
7 | ||
8 | from twisted.internet import address, threads, udp | |
9 | from twisted.internet._resolver import HostResolution | |
10 | from twisted.internet.address import IPv4Address | |
8 | 11 | from twisted.internet.defer import Deferred |
12 | from twisted.internet.error import DNSLookupError | |
13 | from twisted.internet.interfaces import IReactorPluggableNameResolver | |
9 | 14 | from twisted.python.failure import Failure |
10 | 15 | from twisted.test.proto_helpers import MemoryReactorClock |
11 | 16 | |
64 | 69 | def getPeer(self): |
65 | 70 | # We give an address so that getClientIP returns a non null entry, |
66 | 71 | # causing us to record the MAU |
67 | return address.IPv4Address(b"TCP", "127.0.0.1", 3423) | |
72 | return address.IPv4Address("TCP", "127.0.0.1", 3423) | |
68 | 73 | |
69 | 74 | def getHost(self): |
70 | 75 | return None |
153 | 158 | wait_until_result(clock, request) |
154 | 159 | |
155 | 160 | |
161 | @implementer(IReactorPluggableNameResolver) | |
156 | 162 | class ThreadedMemoryReactorClock(MemoryReactorClock): |
157 | 163 | """ |
158 | 164 | A MemoryReactorClock that supports callFromThread. |
159 | 165 | """ |
166 | ||
167 | def __init__(self): | |
168 | self._udp = [] | |
169 | self.lookups = {} | |
170 | ||
171 | class Resolver(object): | |
172 | def resolveHostName( | |
173 | _self, | |
174 | resolutionReceiver, | |
175 | hostName, | |
176 | portNumber=0, | |
177 | addressTypes=None, | |
178 | transportSemantics='TCP', | |
179 | ): | |
180 | ||
181 | resolution = HostResolution(hostName) | |
182 | resolutionReceiver.resolutionBegan(resolution) | |
183 | if hostName not in self.lookups: | |
184 | raise DNSLookupError("OH NO") | |
185 | ||
186 | resolutionReceiver.addressResolved( | |
187 | IPv4Address('TCP', self.lookups[hostName], portNumber) | |
188 | ) | |
189 | resolutionReceiver.resolutionComplete() | |
190 | return resolution | |
191 | ||
192 | self.nameResolver = Resolver() | |
193 | super(ThreadedMemoryReactorClock, self).__init__() | |
194 | ||
195 | def listenUDP(self, port, protocol, interface='', maxPacketSize=8196): | |
196 | p = udp.Port(port, protocol, interface, maxPacketSize, self) | |
197 | p.startListening() | |
198 | self._udp.append(p) | |
199 | return p | |
160 | 200 | |
161 | 201 | def callFromThread(self, callback, *args, **kwargs): |
162 | 202 | """ |
79 | 79 | |
80 | 80 | self._rlsn._auth.check_auth_blocking = Mock() |
81 | 81 | mock_event = Mock( |
82 | type=EventTypes.Message, | |
83 | content={"msgtype": ServerNoticeMsgType}, | |
84 | ) | |
85 | self._rlsn._store.get_events = Mock(return_value=defer.succeed( | |
86 | {"123": mock_event} | |
87 | )) | |
82 | type=EventTypes.Message, content={"msgtype": ServerNoticeMsgType} | |
83 | ) | |
84 | self._rlsn._store.get_events = Mock( | |
85 | return_value=defer.succeed({"123": mock_event}) | |
86 | ) | |
88 | 87 | |
89 | 88 | yield self._rlsn.maybe_send_server_notice_to_user(self.user_id) |
90 | 89 | # Would be better to check the content, but once == remove blocking event |
98 | 97 | ) |
99 | 98 | |
100 | 99 | mock_event = Mock( |
101 | type=EventTypes.Message, | |
102 | content={"msgtype": ServerNoticeMsgType}, | |
103 | ) | |
104 | self._rlsn._store.get_events = Mock(return_value=defer.succeed( | |
105 | {"123": mock_event} | |
106 | )) | |
100 | type=EventTypes.Message, content={"msgtype": ServerNoticeMsgType} | |
101 | ) | |
102 | self._rlsn._store.get_events = Mock( | |
103 | return_value=defer.succeed({"123": mock_event}) | |
104 | ) | |
107 | 105 | yield self._rlsn.maybe_send_server_notice_to_user(self.user_id) |
108 | 106 | |
109 | 107 | self._send_notice.assert_not_called() |
176 | 174 | |
177 | 175 | @defer.inlineCallbacks |
178 | 176 | def test_server_notice_only_sent_once(self): |
179 | self.store.get_monthly_active_count = Mock( | |
180 | return_value=1000, | |
181 | ) | |
182 | ||
183 | self.store.user_last_seen_monthly_active = Mock( | |
184 | return_value=1000, | |
185 | ) | |
177 | self.store.get_monthly_active_count = Mock(return_value=1000) | |
178 | ||
179 | self.store.user_last_seen_monthly_active = Mock(return_value=1000) | |
186 | 180 | |
187 | 181 | # Call the function multiple times to ensure we only send the notice once |
188 | 182 | yield self._rlsn.maybe_send_server_notice_to_user(self.user_id) |
192 | 186 | # Now lets get the last load of messages in the service notice room and |
193 | 187 | # check that there is only one server notice |
194 | 188 | room_id = yield self.server_notices_manager.get_notice_room_for_user( |
195 | self.user_id, | |
189 | self.user_id | |
196 | 190 | ) |
197 | 191 | |
198 | 192 | token = yield self.event_source.get_current_token() |
199 | 193 | events, _ = yield self.store.get_recent_events_for_room( |
200 | room_id, limit=100, end_token=token.room_key, | |
194 | room_id, limit=100, end_token=token.room_key | |
201 | 195 | ) |
202 | 196 | |
203 | 197 | count = 0 |
100 | 100 | self.hs.config.limit_usage_by_mau = True |
101 | 101 | self.hs.config.max_mau_value = 50 |
102 | 102 | user_id = "@user:server" |
103 | yield self.store.register(user_id=user_id, token="123", password_hash=None) | |
103 | 104 | |
104 | 105 | active = yield self.store.user_last_seen_monthly_active(user_id) |
105 | 106 | self.assertFalse(active) |
107 | 108 | yield self.store.insert_client_ip( |
108 | 109 | user_id, "access_token", "ip", "user_agent", "device_id" |
109 | 110 | ) |
110 | yield self.store.insert_client_ip( | |
111 | user_id, "access_token", "ip", "user_agent", "device_id" | |
112 | ) | |
113 | 111 | active = yield self.store.user_last_seen_monthly_active(user_id) |
114 | 112 | self.assertTrue(active) |
11 | 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
12 | 12 | # See the License for the specific language governing permissions and |
13 | 13 | # limitations under the License. |
14 | from mock import Mock | |
15 | ||
16 | from twisted.internet import defer | |
14 | 17 | |
15 | 18 | from tests.unittest import HomeserverTestCase |
16 | 19 | |
22 | 25 | |
23 | 26 | hs = self.setup_test_homeserver() |
24 | 27 | self.store = hs.get_datastore() |
25 | ||
28 | hs.config.limit_usage_by_mau = True | |
29 | hs.config.max_mau_value = 50 | |
26 | 30 | # Advance the clock a bit |
27 | 31 | reactor.advance(FORTY_DAYS) |
28 | 32 | |
72 | 76 | active_count = self.store.get_monthly_active_count() |
73 | 77 | self.assertEquals(self.get_success(active_count), user_num) |
74 | 78 | |
75 | # Test that regalar users are removed from the db | |
79 | # Test that regular users are removed from the db | |
76 | 80 | ru_count = 2 |
77 | 81 | self.store.upsert_monthly_active_user("@ru1:server") |
78 | 82 | self.store.upsert_monthly_active_user("@ru2:server") |
138 | 142 | |
139 | 143 | count = self.store.get_monthly_active_count() |
140 | 144 | self.assertEquals(self.get_success(count), 0) |
145 | ||
146 | def test_populate_monthly_users_is_guest(self): | |
147 | # Test that guest users are not added to mau list | |
148 | user_id = "user_id" | |
149 | self.store.register( | |
150 | user_id=user_id, token="123", password_hash=None, make_guest=True | |
151 | ) | |
152 | self.store.upsert_monthly_active_user = Mock() | |
153 | self.store.populate_monthly_active_users(user_id) | |
154 | self.pump() | |
155 | self.store.upsert_monthly_active_user.assert_not_called() | |
156 | ||
157 | def test_populate_monthly_users_should_update(self): | |
158 | self.store.upsert_monthly_active_user = Mock() | |
159 | ||
160 | self.store.is_trial_user = Mock( | |
161 | return_value=defer.succeed(False) | |
162 | ) | |
163 | ||
164 | self.store.user_last_seen_monthly_active = Mock( | |
165 | return_value=defer.succeed(None) | |
166 | ) | |
167 | self.store.populate_monthly_active_users('user_id') | |
168 | self.pump() | |
169 | self.store.upsert_monthly_active_user.assert_called_once() | |
170 | ||
171 | def test_populate_monthly_users_should_not_update(self): | |
172 | self.store.upsert_monthly_active_user = Mock() | |
173 | ||
174 | self.store.is_trial_user = Mock( | |
175 | return_value=defer.succeed(False) | |
176 | ) | |
177 | self.store.user_last_seen_monthly_active = Mock( | |
178 | return_value=defer.succeed( | |
179 | self.hs.get_clock().time_msec() | |
180 | ) | |
181 | ) | |
182 | self.store.populate_monthly_active_users('user_id') | |
183 | self.pump() | |
184 | self.store.upsert_monthly_active_user.assert_not_called() | |
185 | ||
186 | def test_get_reserved_real_user_account(self): | |
187 | # Test no reserved users, or reserved threepids | |
188 | count = self.store.get_registered_reserved_users_count() | |
189 | self.assertEquals(self.get_success(count), 0) | |
190 | # Test reserved users but no registered users | |
191 | ||
192 | user1 = '@user1:example.com' | |
193 | user2 = '@user2:example.com' | |
194 | user1_email = 'user1@example.com' | |
195 | user2_email = 'user2@example.com' | |
196 | threepids = [ | |
197 | {'medium': 'email', 'address': user1_email}, | |
198 | {'medium': 'email', 'address': user2_email}, | |
199 | ] | |
200 | self.hs.config.mau_limits_reserved_threepids = threepids | |
201 | self.store.initialise_reserved_users(threepids) | |
202 | self.pump() | |
203 | count = self.store.get_registered_reserved_users_count() | |
204 | self.assertEquals(self.get_success(count), 0) | |
205 | ||
206 | # Test reserved registed users | |
207 | self.store.register(user_id=user1, token="123", password_hash=None) | |
208 | self.store.register(user_id=user2, token="456", password_hash=None) | |
209 | self.pump() | |
210 | ||
211 | now = int(self.hs.get_clock().time_msec()) | |
212 | self.store.user_add_threepid(user1, "email", user1_email, now, now) | |
213 | self.store.user_add_threepid(user2, "email", user2_email, now, now) | |
214 | count = self.store.get_registered_reserved_users_count() | |
215 | self.assertEquals(self.get_success(count), len(threepids)) |
184 | 184 | |
185 | 185 | # test _get_some_state_from_cache correctly filters out members with types=[] |
186 | 186 | (state_dict, is_all) = yield self.store._get_some_state_from_cache( |
187 | self.store._state_group_cache, | |
188 | group, [], filtered_types=[EventTypes.Member] | |
187 | self.store._state_group_cache, group, [], filtered_types=[EventTypes.Member] | |
189 | 188 | ) |
190 | 189 | |
191 | 190 | self.assertEqual(is_all, True) |
199 | 198 | |
200 | 199 | (state_dict, is_all) = yield self.store._get_some_state_from_cache( |
201 | 200 | self.store._state_group_members_cache, |
202 | group, [], filtered_types=[EventTypes.Member] | |
203 | ) | |
204 | ||
205 | self.assertEqual(is_all, True) | |
206 | self.assertDictEqual( | |
207 | {}, | |
208 | state_dict, | |
209 | ) | |
201 | group, | |
202 | [], | |
203 | filtered_types=[EventTypes.Member], | |
204 | ) | |
205 | ||
206 | self.assertEqual(is_all, True) | |
207 | self.assertDictEqual({}, state_dict) | |
210 | 208 | |
211 | 209 | # test _get_some_state_from_cache correctly filters in members with wildcard types |
212 | 210 | (state_dict, is_all) = yield self.store._get_some_state_from_cache( |
213 | 211 | self.store._state_group_cache, |
214 | group, [(EventTypes.Member, None)], filtered_types=[EventTypes.Member] | |
212 | group, | |
213 | [(EventTypes.Member, None)], | |
214 | filtered_types=[EventTypes.Member], | |
215 | 215 | ) |
216 | 216 | |
217 | 217 | self.assertEqual(is_all, True) |
225 | 225 | |
226 | 226 | (state_dict, is_all) = yield self.store._get_some_state_from_cache( |
227 | 227 | self.store._state_group_members_cache, |
228 | group, [(EventTypes.Member, None)], filtered_types=[EventTypes.Member] | |
228 | group, | |
229 | [(EventTypes.Member, None)], | |
230 | filtered_types=[EventTypes.Member], | |
229 | 231 | ) |
230 | 232 | |
231 | 233 | self.assertEqual(is_all, True) |
263 | 265 | ) |
264 | 266 | |
265 | 267 | self.assertEqual(is_all, True) |
266 | self.assertDictEqual( | |
267 | { | |
268 | (e5.type, e5.state_key): e5.event_id, | |
269 | }, | |
270 | state_dict, | |
271 | ) | |
268 | self.assertDictEqual({(e5.type, e5.state_key): e5.event_id}, state_dict) | |
272 | 269 | |
273 | 270 | # test _get_some_state_from_cache correctly filters in members with specific types |
274 | 271 | # and no filtered_types |
275 | 272 | (state_dict, is_all) = yield self.store._get_some_state_from_cache( |
276 | 273 | self.store._state_group_members_cache, |
277 | group, [(EventTypes.Member, e5.state_key)], filtered_types=None | |
274 | group, | |
275 | [(EventTypes.Member, e5.state_key)], | |
276 | filtered_types=None, | |
278 | 277 | ) |
279 | 278 | |
280 | 279 | self.assertEqual(is_all, True) |
304 | 303 | key=group, |
305 | 304 | value=state_dict_ids, |
306 | 305 | # list fetched keys so it knows it's partial |
307 | fetched_keys=( | |
308 | (e1.type, e1.state_key), | |
309 | ), | |
306 | fetched_keys=((e1.type, e1.state_key),), | |
310 | 307 | ) |
311 | 308 | |
312 | 309 | (is_all, known_absent, state_dict_ids) = self.store._state_group_cache.get( |
314 | 311 | ) |
315 | 312 | |
316 | 313 | self.assertEqual(is_all, False) |
317 | self.assertEqual( | |
318 | known_absent, | |
319 | set( | |
320 | [ | |
321 | (e1.type, e1.state_key), | |
322 | ] | |
323 | ), | |
324 | ) | |
325 | self.assertDictEqual( | |
326 | state_dict_ids, | |
327 | { | |
328 | (e1.type, e1.state_key): e1.event_id, | |
329 | }, | |
330 | ) | |
314 | self.assertEqual(known_absent, set([(e1.type, e1.state_key)])) | |
315 | self.assertDictEqual(state_dict_ids, {(e1.type, e1.state_key): e1.event_id}) | |
331 | 316 | |
332 | 317 | ############################################ |
333 | 318 | # test that things work with a partial cache |
335 | 320 | # test _get_some_state_from_cache correctly filters out members with types=[] |
336 | 321 | room_id = self.room.to_string() |
337 | 322 | (state_dict, is_all) = yield self.store._get_some_state_from_cache( |
338 | self.store._state_group_cache, | |
339 | group, [], filtered_types=[EventTypes.Member] | |
323 | self.store._state_group_cache, group, [], filtered_types=[EventTypes.Member] | |
340 | 324 | ) |
341 | 325 | |
342 | 326 | self.assertEqual(is_all, False) |
345 | 329 | room_id = self.room.to_string() |
346 | 330 | (state_dict, is_all) = yield self.store._get_some_state_from_cache( |
347 | 331 | self.store._state_group_members_cache, |
348 | group, [], filtered_types=[EventTypes.Member] | |
332 | group, | |
333 | [], | |
334 | filtered_types=[EventTypes.Member], | |
349 | 335 | ) |
350 | 336 | |
351 | 337 | self.assertEqual(is_all, True) |
354 | 340 | # test _get_some_state_from_cache correctly filters in members wildcard types |
355 | 341 | (state_dict, is_all) = yield self.store._get_some_state_from_cache( |
356 | 342 | self.store._state_group_cache, |
357 | group, [(EventTypes.Member, None)], filtered_types=[EventTypes.Member] | |
343 | group, | |
344 | [(EventTypes.Member, None)], | |
345 | filtered_types=[EventTypes.Member], | |
358 | 346 | ) |
359 | 347 | |
360 | 348 | self.assertEqual(is_all, False) |
361 | self.assertDictEqual( | |
362 | { | |
363 | (e1.type, e1.state_key): e1.event_id, | |
364 | }, | |
365 | state_dict, | |
366 | ) | |
367 | ||
368 | (state_dict, is_all) = yield self.store._get_some_state_from_cache( | |
369 | self.store._state_group_members_cache, | |
370 | group, [(EventTypes.Member, None)], filtered_types=[EventTypes.Member] | |
349 | self.assertDictEqual({(e1.type, e1.state_key): e1.event_id}, state_dict) | |
350 | ||
351 | (state_dict, is_all) = yield self.store._get_some_state_from_cache( | |
352 | self.store._state_group_members_cache, | |
353 | group, | |
354 | [(EventTypes.Member, None)], | |
355 | filtered_types=[EventTypes.Member], | |
371 | 356 | ) |
372 | 357 | |
373 | 358 | self.assertEqual(is_all, True) |
388 | 373 | ) |
389 | 374 | |
390 | 375 | self.assertEqual(is_all, False) |
391 | self.assertDictEqual( | |
392 | { | |
393 | (e1.type, e1.state_key): e1.event_id, | |
394 | }, | |
395 | state_dict, | |
396 | ) | |
397 | ||
398 | (state_dict, is_all) = yield self.store._get_some_state_from_cache( | |
399 | self.store._state_group_members_cache, | |
400 | group, | |
401 | [(EventTypes.Member, e5.state_key)], | |
402 | filtered_types=[EventTypes.Member], | |
403 | ) | |
404 | ||
405 | self.assertEqual(is_all, True) | |
406 | self.assertDictEqual( | |
407 | { | |
408 | (e5.type, e5.state_key): e5.event_id, | |
409 | }, | |
410 | state_dict, | |
411 | ) | |
376 | self.assertDictEqual({(e1.type, e1.state_key): e1.event_id}, state_dict) | |
377 | ||
378 | (state_dict, is_all) = yield self.store._get_some_state_from_cache( | |
379 | self.store._state_group_members_cache, | |
380 | group, | |
381 | [(EventTypes.Member, e5.state_key)], | |
382 | filtered_types=[EventTypes.Member], | |
383 | ) | |
384 | ||
385 | self.assertEqual(is_all, True) | |
386 | self.assertDictEqual({(e5.type, e5.state_key): e5.event_id}, state_dict) | |
412 | 387 | |
413 | 388 | # test _get_some_state_from_cache correctly filters in members with specific types |
414 | 389 | # and no filtered_types |
415 | 390 | (state_dict, is_all) = yield self.store._get_some_state_from_cache( |
416 | 391 | self.store._state_group_cache, |
417 | group, [(EventTypes.Member, e5.state_key)], filtered_types=None | |
392 | group, | |
393 | [(EventTypes.Member, e5.state_key)], | |
394 | filtered_types=None, | |
418 | 395 | ) |
419 | 396 | |
420 | 397 | self.assertEqual(is_all, False) |
422 | 399 | |
423 | 400 | (state_dict, is_all) = yield self.store._get_some_state_from_cache( |
424 | 401 | self.store._state_group_members_cache, |
425 | group, [(EventTypes.Member, e5.state_key)], filtered_types=None | |
426 | ) | |
427 | ||
428 | self.assertEqual(is_all, True) | |
429 | self.assertDictEqual( | |
430 | { | |
431 | (e5.type, e5.state_key): e5.event_id, | |
432 | }, | |
433 | state_dict, | |
434 | ) | |
402 | group, | |
403 | [(EventTypes.Member, e5.state_key)], | |
404 | filtered_types=None, | |
405 | ) | |
406 | ||
407 | self.assertEqual(is_all, True) | |
408 | self.assertDictEqual({(e5.type, e5.state_key): e5.event_id}, state_dict) |
184 | 184 | self.assertEqual(e.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) |
185 | 185 | |
186 | 186 | def create_user(self, localpart): |
187 | request_data = json.dumps({ | |
188 | "username": localpart, | |
189 | "password": "monkey", | |
190 | "auth": {"type": LoginType.DUMMY}, | |
191 | }) | |
192 | ||
193 | request, channel = make_request(b"POST", b"/register", request_data) | |
187 | request_data = json.dumps( | |
188 | { | |
189 | "username": localpart, | |
190 | "password": "monkey", | |
191 | "auth": {"type": LoginType.DUMMY}, | |
192 | } | |
193 | ) | |
194 | ||
195 | request, channel = make_request("POST", "/register", request_data) | |
194 | 196 | render(request, self.resource, self.reactor) |
195 | 197 | |
196 | if channel.result["code"] != b"200": | |
198 | if channel.code != 200: | |
197 | 199 | raise HttpResponseException( |
198 | int(channel.result["code"]), | |
199 | channel.result["reason"], | |
200 | channel.result["body"], | |
200 | channel.code, channel.result["reason"], channel.result["body"] | |
201 | 201 | ).to_synapse_error() |
202 | 202 | |
203 | 203 | access_token = channel.json_body["access_token"] |
205 | 205 | return access_token |
206 | 206 | |
207 | 207 | def do_sync_for_user(self, token): |
208 | request, channel = make_request(b"GET", b"/sync", access_token=token) | |
208 | request, channel = make_request( | |
209 | "GET", "/sync", access_token=token.encode('ascii') | |
210 | ) | |
209 | 211 | render(request, self.resource, self.reactor) |
210 | 212 | |
211 | if channel.result["code"] != b"200": | |
213 | if channel.code != 200: | |
212 | 214 | raise HttpResponseException( |
213 | int(channel.result["code"]), | |
214 | channel.result["reason"], | |
215 | channel.result["body"], | |
215 | channel.code, channel.result["reason"], channel.result["body"] | |
216 | 216 | ).to_synapse_error() |
0 | # -*- coding: utf-8 -*- | |
1 | # Copyright 2018 New Vector Ltd | |
2 | # | |
3 | # Licensed under the Apache License, Version 2.0 (the "License"); | |
4 | # you may not use this file except in compliance with the License. | |
5 | # You may obtain a copy of the License at | |
6 | # | |
7 | # http://www.apache.org/licenses/LICENSE-2.0 | |
8 | # | |
9 | # Unless required by applicable law or agreed to in writing, software | |
10 | # distributed under the License is distributed on an "AS IS" BASIS, | |
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
12 | # See the License for the specific language governing permissions and | |
13 | # limitations under the License. | |
14 | ||
15 | ||
16 | from synapse.metrics import InFlightGauge | |
17 | ||
18 | from tests import unittest | |
19 | ||
20 | ||
21 | class TestMauLimit(unittest.TestCase): | |
22 | def test_basic(self): | |
23 | gauge = InFlightGauge( | |
24 | "test1", "", | |
25 | labels=["test_label"], | |
26 | sub_metrics=["foo", "bar"], | |
27 | ) | |
28 | ||
29 | def handle1(metrics): | |
30 | metrics.foo += 2 | |
31 | metrics.bar = max(metrics.bar, 5) | |
32 | ||
33 | def handle2(metrics): | |
34 | metrics.foo += 3 | |
35 | metrics.bar = max(metrics.bar, 7) | |
36 | ||
37 | gauge.register(("key1",), handle1) | |
38 | ||
39 | self.assert_dict({ | |
40 | "test1_total": {("key1",): 1}, | |
41 | "test1_foo": {("key1",): 2}, | |
42 | "test1_bar": {("key1",): 5}, | |
43 | }, self.get_metrics_from_gauge(gauge)) | |
44 | ||
45 | gauge.unregister(("key1",), handle1) | |
46 | ||
47 | self.assert_dict({ | |
48 | "test1_total": {("key1",): 0}, | |
49 | "test1_foo": {("key1",): 0}, | |
50 | "test1_bar": {("key1",): 0}, | |
51 | }, self.get_metrics_from_gauge(gauge)) | |
52 | ||
53 | gauge.register(("key1",), handle1) | |
54 | gauge.register(("key2",), handle2) | |
55 | ||
56 | self.assert_dict({ | |
57 | "test1_total": {("key1",): 1, ("key2",): 1}, | |
58 | "test1_foo": {("key1",): 2, ("key2",): 3}, | |
59 | "test1_bar": {("key1",): 5, ("key2",): 7}, | |
60 | }, self.get_metrics_from_gauge(gauge)) | |
61 | ||
62 | gauge.unregister(("key2",), handle2) | |
63 | gauge.register(("key1",), handle2) | |
64 | ||
65 | self.assert_dict({ | |
66 | "test1_total": {("key1",): 2, ("key2",): 0}, | |
67 | "test1_foo": {("key1",): 5, ("key2",): 0}, | |
68 | "test1_bar": {("key1",): 7, ("key2",): 0}, | |
69 | }, self.get_metrics_from_gauge(gauge)) | |
70 | ||
71 | def get_metrics_from_gauge(self, gauge): | |
72 | results = {} | |
73 | ||
74 | for r in gauge.collect(): | |
75 | results[r.name] = { | |
76 | tuple(labels[x] for x in gauge.labels): value | |
77 | for _, labels, value in r.samples | |
78 | } | |
79 | ||
80 | return results |
179 | 179 | graph = Graph( |
180 | 180 | nodes={ |
181 | 181 | "START": DictObj( |
182 | type=EventTypes.Create, state_key="", content={}, depth=1, | |
182 | type=EventTypes.Create, state_key="", content={}, depth=1 | |
183 | 183 | ), |
184 | 184 | "A": DictObj(type=EventTypes.Message, depth=2), |
185 | 185 | "B": DictObj(type=EventTypes.Message, depth=3), |
99 | 99 | |
100 | 100 | @defer.inlineCallbacks |
101 | 101 | def setup_test_homeserver( |
102 | cleanup_func, name="test", datastore=None, config=None, reactor=None, | |
103 | homeserverToUse=TestHomeServer, **kargs | |
102 | cleanup_func, | |
103 | name="test", | |
104 | datastore=None, | |
105 | config=None, | |
106 | reactor=None, | |
107 | homeserverToUse=TestHomeServer, | |
108 | **kargs | |
104 | 109 | ): |
105 | 110 | """ |
106 | 111 | Setup a homeserver suitable for running tests against. Keyword arguments |
146 | 151 | config.hs_disabled_message = "" |
147 | 152 | config.hs_disabled_limit_type = "" |
148 | 153 | config.max_mau_value = 50 |
154 | config.mau_trial_days = 0 | |
149 | 155 | config.mau_limits_reserved_threepids = [] |
150 | 156 | config.admin_contact = None |
151 | 157 | config.rc_messages_per_second = 10000 |
321 | 327 | @patch('twisted.web.http.Request') |
322 | 328 | @defer.inlineCallbacks |
323 | 329 | def trigger( |
324 | self, http_method, path, content, mock_request, | |
325 | federation_auth_origin=None, | |
330 | self, http_method, path, content, mock_request, federation_auth_origin=None | |
326 | 331 | ): |
327 | 332 | """ Fire an HTTP event. |
328 | 333 | |
355 | 360 | headers = {} |
356 | 361 | if federation_auth_origin is not None: |
357 | 362 | headers[b"Authorization"] = [ |
358 | b"X-Matrix origin=%s,key=,sig=" % (federation_auth_origin, ) | |
363 | b"X-Matrix origin=%s,key=,sig=" % (federation_auth_origin,) | |
359 | 364 | ] |
360 | 365 | mock_request.requestHeaders.getRawHeaders = mock_getRawHeaders(headers) |
361 | 366 | |
575 | 580 | event_builder_factory = hs.get_event_builder_factory() |
576 | 581 | event_creation_handler = hs.get_event_creation_handler() |
577 | 582 | |
578 | builder = event_builder_factory.new({ | |
579 | "type": EventTypes.Create, | |
580 | "state_key": "", | |
581 | "sender": creator_id, | |
582 | "room_id": room_id, | |
583 | "content": {}, | |
584 | }) | |
585 | ||
586 | event, context = yield event_creation_handler.create_new_client_event( | |
587 | builder | |
583 | builder = event_builder_factory.new( | |
584 | { | |
585 | "type": EventTypes.Create, | |
586 | "state_key": "", | |
587 | "sender": creator_id, | |
588 | "room_id": room_id, | |
589 | "content": {}, | |
590 | } | |
588 | 591 | ) |
589 | 592 | |
593 | event, context = yield event_creation_handler.create_new_client_event(builder) | |
594 | ||
590 | 595 | yield store.persist_event(event, context) |
63 | 63 | {[base]setenv} |
64 | 64 | SYNAPSE_POSTGRES = 1 |
65 | 65 | |
66 | [testenv:py35] | |
67 | usedevelop=true | |
68 | ||
66 | 69 | [testenv:py36] |
67 | 70 | usedevelop=true |
68 | commands = | |
69 | /usr/bin/find "{toxinidir}" -name '*.pyc' -delete | |
70 | coverage run {env:COVERAGE_OPTS:} --source="{toxinidir}/synapse" \ | |
71 | "{envbindir}/trial" {env:TRIAL_FLAGS:} {posargs:tests/config \ | |
72 | tests/api/test_filtering.py \ | |
73 | tests/api/test_ratelimiting.py \ | |
74 | tests/appservice \ | |
75 | tests/crypto \ | |
76 | tests/events \ | |
77 | tests/handlers/test_appservice.py \ | |
78 | tests/handlers/test_auth.py \ | |
79 | tests/handlers/test_device.py \ | |
80 | tests/handlers/test_directory.py \ | |
81 | tests/handlers/test_e2e_keys.py \ | |
82 | tests/handlers/test_presence.py \ | |
83 | tests/handlers/test_profile.py \ | |
84 | tests/handlers/test_register.py \ | |
85 | tests/replication/slave/storage/test_account_data.py \ | |
86 | tests/replication/slave/storage/test_receipts.py \ | |
87 | tests/storage/test_appservice.py \ | |
88 | tests/storage/test_background_update.py \ | |
89 | tests/storage/test_base.py \ | |
90 | tests/storage/test__base.py \ | |
91 | tests/storage/test_client_ips.py \ | |
92 | tests/storage/test_devices.py \ | |
93 | tests/storage/test_end_to_end_keys.py \ | |
94 | tests/storage/test_event_push_actions.py \ | |
95 | tests/storage/test_keys.py \ | |
96 | tests/storage/test_presence.py \ | |
97 | tests/storage/test_profile.py \ | |
98 | tests/storage/test_registration.py \ | |
99 | tests/storage/test_room.py \ | |
100 | tests/storage/test_user_directory.py \ | |
101 | tests/test_distributor.py \ | |
102 | tests/test_dns.py \ | |
103 | tests/test_preview.py \ | |
104 | tests/test_test_utils.py \ | |
105 | tests/test_types.py \ | |
106 | tests/util} \ | |
107 | {env:TOXSUFFIX:} | |
108 | {env:DUMP_COVERAGE_COMMAND:coverage report -m} | |
109 | 71 | |
110 | 72 | [testenv:packaging] |
111 | 73 | deps = |