Merge tag 'upstream/0.18.0' into debian
Upstream version 0.18.0
Erik Johnston
7 years ago
0 | Changes in synapse v0.18.0 (2016-09-19) | |
1 | ======================================= | |
2 | ||
3 | The release includes major changes to the state storage database schemas, which | |
4 | significantly reduce database size. Synapse will attempt to upgrade the current | |
5 | data in the background. Servers with large SQLite database may experience | |
6 | degradation of performance while this upgrade is in progress, therefore you may | |
7 | want to consider migrating to using Postgres before upgrading very large SQLite | |
8 | daabases | |
9 | ||
10 | ||
11 | Changes: | |
12 | ||
13 | * Make public room search case insensitive (PR #1127) | |
14 | ||
15 | ||
16 | Bug fixes: | |
17 | ||
18 | * Fix and clean up publicRooms pagination (PR #1129) | |
19 | ||
20 | ||
21 | Changes in synapse v0.18.0-rc1 (2016-09-16) | |
22 | =========================================== | |
23 | ||
24 | Features: | |
25 | ||
26 | * Add ``only=highlight`` on ``/notifications`` (PR #1081) | |
27 | * Add server param to /publicRooms (PR #1082) | |
28 | * Allow clients to ask for the whole of a single state event (PR #1094) | |
29 | * Add is_direct param to /createRoom (PR #1108) | |
30 | * Add pagination support to publicRooms (PR #1121) | |
31 | * Add very basic filter API to /publicRooms (PR #1126) | |
32 | * Add basic direct to device messaging support for E2E (PR #1074, #1084, #1104, | |
33 | #1111) | |
34 | ||
35 | ||
36 | Changes: | |
37 | ||
38 | * Move to storing state_groups_state as deltas, greatly reducing DB size (PR | |
39 | #1065) | |
40 | * Reduce amount of state pulled out of the DB during common requests (PR #1069) | |
41 | * Allow PDF to be rendered from media repo (PR #1071) | |
42 | * Reindex state_groups_state after pruning (PR #1085) | |
43 | * Clobber EDUs in send queue (PR #1095) | |
44 | * Conform better to the CAS protocol specification (PR #1100) | |
45 | * Limit how often we ask for keys from dead servers (PR #1114) | |
46 | ||
47 | ||
48 | Bug fixes: | |
49 | ||
50 | * Fix /notifications API when used with ``from`` param (PR #1080) | |
51 | * Fix backfill when cannot find an event. (PR #1107) | |
52 | ||
53 | ||
0 | 54 | Changes in synapse v0.17.3 (2016-09-09) |
1 | 55 | ======================================= |
2 | 56 |
41 | 41 | * synapse.app.appservice - handles output traffic to Application Services |
42 | 42 | * synapse.app.federation_reader - handles receiving federation traffic (including public_rooms API) |
43 | 43 | * synapse.app.media_repository - handles the media repository. |
44 | * synapse.app.client_reader - handles client API endpoints like /publicRooms | |
44 | 45 | |
45 | 46 | Each worker configuration file inherits the configuration of the main homeserver |
46 | 47 | configuration file. You can then override configuration specific to that worker, |
19 | 19 | --pusher \ |
20 | 20 | --synchrotron \ |
21 | 21 | --federation-reader \ |
22 | --client-reader \ | |
23 | --appservice \ |
15 | 15 | """ This is a reference implementation of a Matrix home server. |
16 | 16 | """ |
17 | 17 | |
18 | __version__ = "0.17.3" | |
18 | __version__ = "0.18.0" |
582 | 582 | """ |
583 | 583 | # Can optionally look elsewhere in the request (e.g. headers) |
584 | 584 | try: |
585 | user_id = yield self._get_appservice_user_id(request.args) | |
585 | user_id = yield self._get_appservice_user_id(request) | |
586 | 586 | if user_id: |
587 | 587 | request.authenticated_entity = user_id |
588 | 588 | defer.returnValue(synapse.types.create_requester(user_id)) |
589 | 589 | |
590 | access_token = request.args["access_token"][0] | |
590 | access_token = get_access_token_from_request( | |
591 | request, self.TOKEN_NOT_FOUND_HTTP_STATUS | |
592 | ) | |
593 | ||
591 | 594 | user_info = yield self.get_user_by_access_token(access_token, rights) |
592 | 595 | user = user_info["user"] |
593 | 596 | token_id = user_info["token_id"] |
628 | 631 | ) |
629 | 632 | |
630 | 633 | @defer.inlineCallbacks |
631 | def _get_appservice_user_id(self, request_args): | |
634 | def _get_appservice_user_id(self, request): | |
632 | 635 | app_service = yield self.store.get_app_service_by_token( |
633 | request_args["access_token"][0] | |
636 | get_access_token_from_request( | |
637 | request, self.TOKEN_NOT_FOUND_HTTP_STATUS | |
638 | ) | |
634 | 639 | ) |
635 | 640 | if app_service is None: |
636 | 641 | defer.returnValue(None) |
637 | 642 | |
638 | if "user_id" not in request_args: | |
643 | if "user_id" not in request.args: | |
639 | 644 | defer.returnValue(app_service.sender) |
640 | 645 | |
641 | user_id = request_args["user_id"][0] | |
646 | user_id = request.args["user_id"][0] | |
642 | 647 | if app_service.sender == user_id: |
643 | 648 | defer.returnValue(app_service.sender) |
644 | 649 | |
832 | 837 | @defer.inlineCallbacks |
833 | 838 | def get_appservice_by_req(self, request): |
834 | 839 | try: |
835 | token = request.args["access_token"][0] | |
840 | token = get_access_token_from_request( | |
841 | request, self.TOKEN_NOT_FOUND_HTTP_STATUS | |
842 | ) | |
836 | 843 | service = yield self.store.get_app_service_by_token(token) |
837 | 844 | if not service: |
838 | 845 | logger.warn("Unrecognised appservice access token: %s" % (token,)) |
1141 | 1148 | "This server requires you to be a moderator in the room to" |
1142 | 1149 | " edit its room list entry" |
1143 | 1150 | ) |
1151 | ||
1152 | ||
1153 | def has_access_token(request): | |
1154 | """Checks if the request has an access_token. | |
1155 | ||
1156 | Returns: | |
1157 | bool: False if no access_token was given, True otherwise. | |
1158 | """ | |
1159 | query_params = request.args.get("access_token") | |
1160 | return bool(query_params) | |
1161 | ||
1162 | ||
1163 | def get_access_token_from_request(request, token_not_found_http_status=401): | |
1164 | """Extracts the access_token from the request. | |
1165 | ||
1166 | Args: | |
1167 | request: The http request. | |
1168 | token_not_found_http_status(int): The HTTP status code to set in the | |
1169 | AuthError if the token isn't found. This is used in some of the | |
1170 | legacy APIs to change the status code to 403 from the default of | |
1171 | 401 since some of the old clients depended on auth errors returning | |
1172 | 403. | |
1173 | Returns: | |
1174 | str: The access_token | |
1175 | Raises: | |
1176 | AuthError: If there isn't an access_token in the request. | |
1177 | """ | |
1178 | query_params = request.args.get("access_token") | |
1179 | # Try to get the access_token from the query params. | |
1180 | if not query_params: | |
1181 | raise AuthError( | |
1182 | token_not_found_http_status, | |
1183 | "Missing access token.", | |
1184 | errcode=Codes.MISSING_TOKEN | |
1185 | ) | |
1186 | ||
1187 | return query_params[0] |
186 | 186 | def start(): |
187 | 187 | ps.replicate() |
188 | 188 | ps.get_datastore().start_profiling() |
189 | ps.get_state_handler().start_caching() | |
189 | 190 | |
190 | 191 | reactor.callWhenRunning(start) |
191 | 192 |
0 | #!/usr/bin/env python | |
1 | # -*- coding: utf-8 -*- | |
2 | # Copyright 2016 OpenMarket Ltd | |
3 | # | |
4 | # Licensed under the Apache License, Version 2.0 (the "License"); | |
5 | # you may not use this file except in compliance with the License. | |
6 | # You may obtain a copy of the License at | |
7 | # | |
8 | # http://www.apache.org/licenses/LICENSE-2.0 | |
9 | # | |
10 | # Unless required by applicable law or agreed to in writing, software | |
11 | # distributed under the License is distributed on an "AS IS" BASIS, | |
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
13 | # See the License for the specific language governing permissions and | |
14 | # limitations under the License. | |
15 | ||
16 | import synapse | |
17 | ||
18 | from synapse.config._base import ConfigError | |
19 | from synapse.config.homeserver import HomeServerConfig | |
20 | from synapse.config.logger import setup_logging | |
21 | from synapse.http.site import SynapseSite | |
22 | from synapse.http.server import JsonResource | |
23 | from synapse.metrics.resource import MetricsResource, METRICS_PREFIX | |
24 | from synapse.replication.slave.storage._base import BaseSlavedStore | |
25 | from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore | |
26 | from synapse.replication.slave.storage.events import SlavedEventStore | |
27 | from synapse.replication.slave.storage.keys import SlavedKeyStore | |
28 | from synapse.replication.slave.storage.room import RoomStore | |
29 | from synapse.replication.slave.storage.directory import DirectoryStore | |
30 | from synapse.replication.slave.storage.registration import SlavedRegistrationStore | |
31 | from synapse.rest.client.v1.room import PublicRoomListRestServlet | |
32 | from synapse.server import HomeServer | |
33 | from synapse.storage.client_ips import ClientIpStore | |
34 | from synapse.storage.engines import create_engine | |
35 | from synapse.util.async import sleep | |
36 | from synapse.util.httpresourcetree import create_resource_tree | |
37 | from synapse.util.logcontext import LoggingContext | |
38 | from synapse.util.manhole import manhole | |
39 | from synapse.util.rlimit import change_resource_limit | |
40 | from synapse.util.versionstring import get_version_string | |
41 | from synapse.crypto import context_factory | |
42 | ||
43 | ||
44 | from twisted.internet import reactor, defer | |
45 | from twisted.web.resource import Resource | |
46 | ||
47 | from daemonize import Daemonize | |
48 | ||
49 | import sys | |
50 | import logging | |
51 | import gc | |
52 | ||
53 | logger = logging.getLogger("synapse.app.client_reader") | |
54 | ||
55 | ||
56 | class ClientReaderSlavedStore( | |
57 | SlavedEventStore, | |
58 | SlavedKeyStore, | |
59 | RoomStore, | |
60 | DirectoryStore, | |
61 | SlavedApplicationServiceStore, | |
62 | SlavedRegistrationStore, | |
63 | BaseSlavedStore, | |
64 | ClientIpStore, # After BaseSlavedStore because the constructor is different | |
65 | ): | |
66 | pass | |
67 | ||
68 | ||
69 | class ClientReaderServer(HomeServer): | |
70 | def get_db_conn(self, run_new_connection=True): | |
71 | # Any param beginning with cp_ is a parameter for adbapi, and should | |
72 | # not be passed to the database engine. | |
73 | db_params = { | |
74 | k: v for k, v in self.db_config.get("args", {}).items() | |
75 | if not k.startswith("cp_") | |
76 | } | |
77 | db_conn = self.database_engine.module.connect(**db_params) | |
78 | ||
79 | if run_new_connection: | |
80 | self.database_engine.on_new_connection(db_conn) | |
81 | return db_conn | |
82 | ||
83 | def setup(self): | |
84 | logger.info("Setting up.") | |
85 | self.datastore = ClientReaderSlavedStore(self.get_db_conn(), self) | |
86 | logger.info("Finished setting up.") | |
87 | ||
88 | def _listen_http(self, listener_config): | |
89 | port = listener_config["port"] | |
90 | bind_address = listener_config.get("bind_address", "") | |
91 | site_tag = listener_config.get("tag", port) | |
92 | resources = {} | |
93 | for res in listener_config["resources"]: | |
94 | for name in res["names"]: | |
95 | if name == "metrics": | |
96 | resources[METRICS_PREFIX] = MetricsResource(self) | |
97 | elif name == "client": | |
98 | resource = JsonResource(self, canonical_json=False) | |
99 | PublicRoomListRestServlet(self).register(resource) | |
100 | resources.update({ | |
101 | "/_matrix/client/r0": resource, | |
102 | "/_matrix/client/unstable": resource, | |
103 | "/_matrix/client/v2_alpha": resource, | |
104 | "/_matrix/client/api/v1": resource, | |
105 | }) | |
106 | ||
107 | root_resource = create_resource_tree(resources, Resource()) | |
108 | reactor.listenTCP( | |
109 | port, | |
110 | SynapseSite( | |
111 | "synapse.access.http.%s" % (site_tag,), | |
112 | site_tag, | |
113 | listener_config, | |
114 | root_resource, | |
115 | ), | |
116 | interface=bind_address | |
117 | ) | |
118 | logger.info("Synapse client reader now listening on port %d", port) | |
119 | ||
120 | def start_listening(self, listeners): | |
121 | for listener in listeners: | |
122 | if listener["type"] == "http": | |
123 | self._listen_http(listener) | |
124 | elif listener["type"] == "manhole": | |
125 | reactor.listenTCP( | |
126 | listener["port"], | |
127 | manhole( | |
128 | username="matrix", | |
129 | password="rabbithole", | |
130 | globals={"hs": self}, | |
131 | ), | |
132 | interface=listener.get("bind_address", '127.0.0.1') | |
133 | ) | |
134 | else: | |
135 | logger.warn("Unrecognized listener type: %s", listener["type"]) | |
136 | ||
137 | @defer.inlineCallbacks | |
138 | def replicate(self): | |
139 | http_client = self.get_simple_http_client() | |
140 | store = self.get_datastore() | |
141 | replication_url = self.config.worker_replication_url | |
142 | ||
143 | while True: | |
144 | try: | |
145 | args = store.stream_positions() | |
146 | args["timeout"] = 30000 | |
147 | result = yield http_client.get_json(replication_url, args=args) | |
148 | yield store.process_replication(result) | |
149 | except: | |
150 | logger.exception("Error replicating from %r", replication_url) | |
151 | yield sleep(5) | |
152 | ||
153 | ||
154 | def start(config_options): | |
155 | try: | |
156 | config = HomeServerConfig.load_config( | |
157 | "Synapse client reader", config_options | |
158 | ) | |
159 | except ConfigError as e: | |
160 | sys.stderr.write("\n" + e.message + "\n") | |
161 | sys.exit(1) | |
162 | ||
163 | assert config.worker_app == "synapse.app.client_reader" | |
164 | ||
165 | setup_logging(config.worker_log_config, config.worker_log_file) | |
166 | ||
167 | database_engine = create_engine(config.database_config) | |
168 | ||
169 | tls_server_context_factory = context_factory.ServerContextFactory(config) | |
170 | ||
171 | ss = ClientReaderServer( | |
172 | config.server_name, | |
173 | db_config=config.database_config, | |
174 | tls_server_context_factory=tls_server_context_factory, | |
175 | config=config, | |
176 | version_string="Synapse/" + get_version_string(synapse), | |
177 | database_engine=database_engine, | |
178 | ) | |
179 | ||
180 | ss.setup() | |
181 | ss.get_handlers() | |
182 | ss.start_listening(config.worker_listeners) | |
183 | ||
184 | def run(): | |
185 | with LoggingContext("run"): | |
186 | logger.info("Running") | |
187 | change_resource_limit(config.soft_file_limit) | |
188 | if config.gc_thresholds: | |
189 | gc.set_threshold(*config.gc_thresholds) | |
190 | reactor.run() | |
191 | ||
192 | def start(): | |
193 | ss.get_state_handler().start_caching() | |
194 | ss.get_datastore().start_profiling() | |
195 | ss.replicate() | |
196 | ||
197 | reactor.callWhenRunning(start) | |
198 | ||
199 | if config.worker_daemonize: | |
200 | daemon = Daemonize( | |
201 | app="synapse-client-reader", | |
202 | pid=config.worker_pid_file, | |
203 | action=run, | |
204 | auto_close_fds=False, | |
205 | verbose=True, | |
206 | logger=logger, | |
207 | ) | |
208 | daemon.start() | |
209 | else: | |
210 | run() | |
211 | ||
212 | ||
213 | if __name__ == '__main__': | |
214 | with LoggingContext("main"): | |
215 | start(sys.argv[1:]) |
181 | 181 | reactor.run() |
182 | 182 | |
183 | 183 | def start(): |
184 | ss.get_state_handler().start_caching() | |
184 | 185 | ss.get_datastore().start_profiling() |
185 | 186 | ss.replicate() |
186 | 187 |
187 | 187 | reactor.run() |
188 | 188 | |
189 | 189 | def start(): |
190 | ss.get_state_handler().start_caching() | |
190 | 191 | ss.get_datastore().start_profiling() |
191 | 192 | ss.replicate() |
192 | 193 |
275 | 275 | ps.replicate() |
276 | 276 | ps.get_pusherpool().start() |
277 | 277 | ps.get_datastore().start_profiling() |
278 | ps.get_state_handler().start_caching() | |
278 | 279 | |
279 | 280 | reactor.callWhenRunning(start) |
280 | 281 |
241 | 241 | self._room_typing = {} |
242 | 242 | |
243 | 243 | def stream_positions(self): |
244 | # We must update this typing token from the response of the previous | |
245 | # sync. In particular, the stream id may "reset" back to zero/a low | |
246 | # value which we *must* use for the next replication request. | |
244 | 247 | return {"typing": self._latest_room_serial} |
245 | 248 | |
246 | 249 | def process_replication(self, result): |
461 | 464 | def start(): |
462 | 465 | ss.get_datastore().start_profiling() |
463 | 466 | ss.replicate() |
467 | ss.get_state_handler().start_caching() | |
464 | 468 | |
465 | 469 | reactor.callWhenRunning(start) |
466 | 470 |
31 | 31 | APP_SERVICE_PREFIX = "/_matrix/app/unstable" |
32 | 32 | |
33 | 33 | |
34 | def _is_valid_3pe_metadata(info): | |
35 | if "instances" not in info: | |
36 | return False | |
37 | if not isinstance(info["instances"], list): | |
38 | return False | |
39 | return True | |
40 | ||
41 | ||
34 | 42 | def _is_valid_3pe_result(r, field): |
35 | 43 | if not isinstance(r, dict): |
36 | 44 | return False |
161 | 169 | urllib.quote(protocol) |
162 | 170 | ) |
163 | 171 | try: |
164 | defer.returnValue((yield self.get_json(uri, {}))) | |
172 | info = yield self.get_json(uri, {}) | |
173 | ||
174 | if not _is_valid_3pe_metadata(info): | |
175 | logger.warning("query_3pe_protocol to %s did not return a" | |
176 | " valid result", uri) | |
177 | defer.returnValue(None) | |
178 | ||
179 | defer.returnValue(info) | |
165 | 180 | except Exception as ex: |
166 | 181 | logger.warning("query_3pe_protocol to %s threw exception %s", |
167 | 182 | uri, ex) |
168 | defer.returnValue({}) | |
183 | defer.returnValue(None) | |
169 | 184 | |
170 | 185 | key = (service.id, protocol) |
171 | 186 | return self.protocol_meta_cache.get(key) or ( |
28 | 28 | self.user_agent_suffix = config.get("user_agent_suffix") |
29 | 29 | self.use_frozen_dicts = config.get("use_frozen_dicts", False) |
30 | 30 | self.public_baseurl = config.get("public_baseurl") |
31 | self.secondary_directory_servers = config.get("secondary_directory_servers", []) | |
32 | 31 | |
33 | 32 | if self.public_baseurl is not None: |
34 | 33 | if self.public_baseurl[-1] != '/': |
141 | 140 | # The GC threshold parameters to pass to `gc.set_threshold`, if defined |
142 | 141 | # gc_thresholds: [700, 10, 10] |
143 | 142 | |
144 | # A list of other Home Servers to fetch the public room directory from | |
145 | # and include in the public room directory of this home server | |
146 | # This is a temporary stopgap solution to populate new server with a | |
147 | # list of rooms until there exists a good solution of a decentralized | |
148 | # room directory. | |
149 | # secondary_directory_servers: | |
150 | # - matrix.org | |
151 | ||
152 | 143 | # List of ports that Synapse should listen on, their purpose and their |
153 | 144 | # configuration. |
154 | 145 | listeners: |
14 | 14 | |
15 | 15 | |
16 | 16 | class EventContext(object): |
17 | __slots__ = [ | |
18 | "current_state_ids", | |
19 | "prev_state_ids", | |
20 | "state_group", | |
21 | "rejected", | |
22 | "push_actions", | |
23 | "prev_group", | |
24 | "delta_ids", | |
25 | "prev_state_events", | |
26 | ] | |
27 | ||
17 | 28 | def __init__(self): |
29 | # The current state including the current event | |
18 | 30 | self.current_state_ids = None |
31 | # The current state excluding the current event | |
19 | 32 | self.prev_state_ids = None |
20 | 33 | self.state_group = None |
34 | ||
21 | 35 | self.rejected = False |
22 | 36 | self.push_actions = [] |
37 | ||
38 | # A previously persisted state group and a delta between that | |
39 | # and this state. | |
40 | self.prev_group = None | |
41 | self.delta_ids = None | |
42 | ||
43 | self.prev_state_events = None |
23 | 23 | CodeMessageException, HttpResponseException, SynapseError, |
24 | 24 | ) |
25 | 25 | from synapse.util import unwrapFirstError |
26 | from synapse.util.async import concurrently_execute | |
27 | 26 | from synapse.util.caches.expiringcache import ExpiringCache |
28 | 27 | from synapse.util.logutils import log_function |
29 | 28 | from synapse.util.logcontext import preserve_fn, preserve_context_over_deferred |
121 | 120 | pdu.event_id |
122 | 121 | ) |
123 | 122 | |
124 | @log_function | |
125 | def send_edu(self, destination, edu_type, content): | |
123 | def send_presence(self, destination, states): | |
124 | if destination != self.server_name: | |
125 | self._transaction_queue.enqueue_presence(destination, states) | |
126 | ||
127 | @log_function | |
128 | def send_edu(self, destination, edu_type, content, key=None): | |
126 | 129 | edu = Edu( |
127 | 130 | origin=self.server_name, |
128 | 131 | destination=destination, |
133 | 136 | sent_edus_counter.inc() |
134 | 137 | |
135 | 138 | # TODO, add errback, etc. |
136 | self._transaction_queue.enqueue_edu(edu) | |
139 | self._transaction_queue.enqueue_edu(edu, key=key) | |
137 | 140 | return defer.succeed(None) |
141 | ||
142 | @log_function | |
143 | def send_device_messages(self, destination): | |
144 | """Sends the device messages in the local database to the remote | |
145 | destination""" | |
146 | self._transaction_queue.enqueue_device_messages(destination) | |
138 | 147 | |
139 | 148 | @log_function |
140 | 149 | def send_failure(self, failure, destination): |
165 | 174 | ) |
166 | 175 | |
167 | 176 | @log_function |
168 | def query_client_keys(self, destination, content): | |
177 | def query_client_keys(self, destination, content, timeout): | |
169 | 178 | """Query device keys for a device hosted on a remote server. |
170 | 179 | |
171 | 180 | Args: |
177 | 186 | response |
178 | 187 | """ |
179 | 188 | sent_queries_counter.inc("client_device_keys") |
180 | return self.transport_layer.query_client_keys(destination, content) | |
181 | ||
182 | @log_function | |
183 | def claim_client_keys(self, destination, content): | |
189 | return self.transport_layer.query_client_keys( | |
190 | destination, content, timeout | |
191 | ) | |
192 | ||
193 | @log_function | |
194 | def claim_client_keys(self, destination, content, timeout): | |
184 | 195 | """Claims one-time keys for a device hosted on a remote server. |
185 | 196 | |
186 | 197 | Args: |
192 | 203 | response |
193 | 204 | """ |
194 | 205 | sent_queries_counter.inc("client_one_time_keys") |
195 | return self.transport_layer.claim_client_keys(destination, content) | |
206 | return self.transport_layer.claim_client_keys( | |
207 | destination, content, timeout | |
208 | ) | |
196 | 209 | |
197 | 210 | @defer.inlineCallbacks |
198 | 211 | @log_function |
470 | 483 | defer.DeferredList(deferreds, consumeErrors=True) |
471 | 484 | ) |
472 | 485 | for success, result in res: |
473 | if success: | |
486 | if success and result: | |
474 | 487 | signed_events.append(result) |
475 | 488 | batch.discard(result.event_id) |
476 | 489 | |
704 | 717 | |
705 | 718 | raise RuntimeError("Failed to send to any server.") |
706 | 719 | |
707 | @defer.inlineCallbacks | |
708 | def get_public_rooms(self, destinations): | |
709 | results_by_server = {} | |
710 | ||
711 | @defer.inlineCallbacks | |
712 | def _get_result(s): | |
713 | if s == self.server_name: | |
714 | defer.returnValue() | |
715 | ||
716 | try: | |
717 | result = yield self.transport_layer.get_public_rooms(s) | |
718 | results_by_server[s] = result | |
719 | except: | |
720 | logger.exception("Error getting room list from server %r", s) | |
721 | ||
722 | yield concurrently_execute(_get_result, destinations, 3) | |
723 | ||
724 | defer.returnValue(results_by_server) | |
720 | def get_public_rooms(self, destination, limit=None, since_token=None, | |
721 | search_filter=None): | |
722 | if destination == self.server_name: | |
723 | return | |
724 | ||
725 | return self.transport_layer.get_public_rooms( | |
726 | destination, limit, since_token, search_filter | |
727 | ) | |
725 | 728 | |
726 | 729 | @defer.inlineCallbacks |
727 | 730 | def query_auth(self, destination, room_id, event_id, local_auth): |
187 | 187 | except SynapseError as e: |
188 | 188 | logger.info("Failed to handle edu %r: %r", edu_type, e) |
189 | 189 | except Exception as e: |
190 | logger.exception("Failed to handle edu %r", edu_type, e) | |
190 | logger.exception("Failed to handle edu %r", edu_type) | |
191 | 191 | else: |
192 | 192 | logger.warn("Received EDU of type %s with no handler", edu_type) |
193 | 193 |
16 | 16 | from twisted.internet import defer |
17 | 17 | |
18 | 18 | from .persistence import TransactionActions |
19 | from .units import Transaction | |
19 | from .units import Transaction, Edu | |
20 | 20 | |
21 | 21 | from synapse.api.errors import HttpResponseException |
22 | 22 | from synapse.util.async import run_on_reactor |
25 | 25 | get_retry_limiter, NotRetryingDestination, |
26 | 26 | ) |
27 | 27 | from synapse.util.metrics import measure_func |
28 | from synapse.handlers.presence import format_user_presence_state | |
28 | 29 | import synapse.metrics |
29 | 30 | |
30 | 31 | import logging |
68 | 69 | # destination -> list of tuple(edu, deferred) |
69 | 70 | self.pending_edus_by_dest = edus = {} |
70 | 71 | |
72 | # Presence needs to be separate as we send single aggragate EDUs | |
73 | self.pending_presence_by_dest = presence = {} | |
74 | self.pending_edus_keyed_by_dest = edus_keyed = {} | |
75 | ||
71 | 76 | metrics.register_callback( |
72 | 77 | "pending_pdus", |
73 | 78 | lambda: sum(map(len, pdus.values())), |
74 | 79 | ) |
75 | 80 | metrics.register_callback( |
76 | 81 | "pending_edus", |
77 | lambda: sum(map(len, edus.values())), | |
82 | lambda: ( | |
83 | sum(map(len, edus.values())) | |
84 | + sum(map(len, presence.values())) | |
85 | + sum(map(len, edus_keyed.values())) | |
86 | ), | |
78 | 87 | ) |
79 | 88 | |
80 | 89 | # destination -> list of tuple(failure, deferred) |
81 | 90 | self.pending_failures_by_dest = {} |
91 | ||
92 | self.last_device_stream_id_by_dest = {} | |
82 | 93 | |
83 | 94 | # HACK to get unique tx id |
84 | 95 | self._next_txn_id = int(self.clock.time_msec()) |
127 | 138 | self._attempt_new_transaction, destination |
128 | 139 | ) |
129 | 140 | |
130 | def enqueue_edu(self, edu): | |
141 | def enqueue_presence(self, destination, states): | |
142 | self.pending_presence_by_dest.setdefault(destination, {}).update({ | |
143 | state.user_id: state for state in states | |
144 | }) | |
145 | ||
146 | preserve_context_over_fn( | |
147 | self._attempt_new_transaction, destination | |
148 | ) | |
149 | ||
150 | def enqueue_edu(self, edu, key=None): | |
131 | 151 | destination = edu.destination |
132 | 152 | |
133 | 153 | if not self.can_send_to(destination): |
134 | 154 | return |
135 | 155 | |
136 | self.pending_edus_by_dest.setdefault(destination, []).append(edu) | |
156 | if key: | |
157 | self.pending_edus_keyed_by_dest.setdefault( | |
158 | destination, {} | |
159 | )[(edu.edu_type, key)] = edu | |
160 | else: | |
161 | self.pending_edus_by_dest.setdefault(destination, []).append(edu) | |
137 | 162 | |
138 | 163 | preserve_context_over_fn( |
139 | 164 | self._attempt_new_transaction, destination |
154 | 179 | self._attempt_new_transaction, destination |
155 | 180 | ) |
156 | 181 | |
182 | def enqueue_device_messages(self, destination): | |
183 | if destination == self.server_name or destination == "localhost": | |
184 | return | |
185 | ||
186 | if not self.can_send_to(destination): | |
187 | return | |
188 | ||
189 | preserve_context_over_fn( | |
190 | self._attempt_new_transaction, destination | |
191 | ) | |
192 | ||
157 | 193 | @defer.inlineCallbacks |
158 | 194 | def _attempt_new_transaction(self, destination): |
159 | yield run_on_reactor() | |
160 | while True: | |
161 | # list of (pending_pdu, deferred, order) | |
162 | if destination in self.pending_transactions: | |
163 | # XXX: pending_transactions can get stuck on by a never-ending | |
164 | # request at which point pending_pdus_by_dest just keeps growing. | |
165 | # we need application-layer timeouts of some flavour of these | |
166 | # requests | |
167 | logger.debug( | |
168 | "TX [%s] Transaction already in progress", | |
169 | destination | |
170 | ) | |
171 | return | |
172 | ||
173 | pending_pdus = self.pending_pdus_by_dest.pop(destination, []) | |
174 | pending_edus = self.pending_edus_by_dest.pop(destination, []) | |
175 | pending_failures = self.pending_failures_by_dest.pop(destination, []) | |
176 | ||
177 | if pending_pdus: | |
178 | logger.debug("TX [%s] len(pending_pdus_by_dest[dest]) = %d", | |
179 | destination, len(pending_pdus)) | |
180 | ||
181 | if not pending_pdus and not pending_edus and not pending_failures: | |
182 | logger.debug("TX [%s] Nothing to send", destination) | |
183 | return | |
184 | ||
185 | yield self._send_new_transaction( | |
186 | destination, pending_pdus, pending_edus, pending_failures | |
187 | ) | |
195 | # list of (pending_pdu, deferred, order) | |
196 | if destination in self.pending_transactions: | |
197 | # XXX: pending_transactions can get stuck on by a never-ending | |
198 | # request at which point pending_pdus_by_dest just keeps growing. | |
199 | # we need application-layer timeouts of some flavour of these | |
200 | # requests | |
201 | logger.debug( | |
202 | "TX [%s] Transaction already in progress", | |
203 | destination | |
204 | ) | |
205 | return | |
206 | ||
207 | try: | |
208 | self.pending_transactions[destination] = 1 | |
209 | ||
210 | yield run_on_reactor() | |
211 | ||
212 | while True: | |
213 | pending_pdus = self.pending_pdus_by_dest.pop(destination, []) | |
214 | pending_edus = self.pending_edus_by_dest.pop(destination, []) | |
215 | pending_presence = self.pending_presence_by_dest.pop(destination, {}) | |
216 | pending_failures = self.pending_failures_by_dest.pop(destination, []) | |
217 | ||
218 | pending_edus.extend( | |
219 | self.pending_edus_keyed_by_dest.pop(destination, {}).values() | |
220 | ) | |
221 | ||
222 | limiter = yield get_retry_limiter( | |
223 | destination, | |
224 | self.clock, | |
225 | self.store, | |
226 | ) | |
227 | ||
228 | device_message_edus, device_stream_id = ( | |
229 | yield self._get_new_device_messages(destination) | |
230 | ) | |
231 | ||
232 | pending_edus.extend(device_message_edus) | |
233 | if pending_presence: | |
234 | pending_edus.append( | |
235 | Edu( | |
236 | origin=self.server_name, | |
237 | destination=destination, | |
238 | edu_type="m.presence", | |
239 | content={ | |
240 | "push": [ | |
241 | format_user_presence_state( | |
242 | presence, self.clock.time_msec() | |
243 | ) | |
244 | for presence in pending_presence.values() | |
245 | ] | |
246 | }, | |
247 | ) | |
248 | ) | |
249 | ||
250 | if pending_pdus: | |
251 | logger.debug("TX [%s] len(pending_pdus_by_dest[dest]) = %d", | |
252 | destination, len(pending_pdus)) | |
253 | ||
254 | if not pending_pdus and not pending_edus and not pending_failures: | |
255 | logger.debug("TX [%s] Nothing to send", destination) | |
256 | self.last_device_stream_id_by_dest[destination] = ( | |
257 | device_stream_id | |
258 | ) | |
259 | return | |
260 | ||
261 | success = yield self._send_new_transaction( | |
262 | destination, pending_pdus, pending_edus, pending_failures, | |
263 | device_stream_id, | |
264 | should_delete_from_device_stream=bool(device_message_edus), | |
265 | limiter=limiter, | |
266 | ) | |
267 | if not success: | |
268 | break | |
269 | except NotRetryingDestination: | |
270 | logger.info( | |
271 | "TX [%s] not ready for retry yet - " | |
272 | "dropping transaction for now", | |
273 | destination, | |
274 | ) | |
275 | finally: | |
276 | # We want to be *very* sure we delete this after we stop processing | |
277 | self.pending_transactions.pop(destination, None) | |
278 | ||
279 | @defer.inlineCallbacks | |
280 | def _get_new_device_messages(self, destination): | |
281 | last_device_stream_id = self.last_device_stream_id_by_dest.get(destination, 0) | |
282 | to_device_stream_id = self.store.get_to_device_stream_token() | |
283 | contents, stream_id = yield self.store.get_new_device_msgs_for_remote( | |
284 | destination, last_device_stream_id, to_device_stream_id | |
285 | ) | |
286 | edus = [ | |
287 | Edu( | |
288 | origin=self.server_name, | |
289 | destination=destination, | |
290 | edu_type="m.direct_to_device", | |
291 | content=content, | |
292 | ) | |
293 | for content in contents | |
294 | ] | |
295 | defer.returnValue((edus, stream_id)) | |
188 | 296 | |
189 | 297 | @measure_func("_send_new_transaction") |
190 | 298 | @defer.inlineCallbacks |
191 | 299 | def _send_new_transaction(self, destination, pending_pdus, pending_edus, |
192 | pending_failures): | |
193 | ||
194 | # Sort based on the order field | |
195 | pending_pdus.sort(key=lambda t: t[1]) | |
196 | pdus = [x[0] for x in pending_pdus] | |
197 | edus = pending_edus | |
198 | failures = [x.get_dict() for x in pending_failures] | |
199 | ||
200 | try: | |
201 | self.pending_transactions[destination] = 1 | |
202 | ||
203 | logger.debug("TX [%s] _attempt_new_transaction", destination) | |
204 | ||
205 | txn_id = str(self._next_txn_id) | |
206 | ||
207 | limiter = yield get_retry_limiter( | |
208 | destination, | |
209 | self.clock, | |
210 | self.store, | |
300 | pending_failures, device_stream_id, | |
301 | should_delete_from_device_stream, limiter): | |
302 | ||
303 | # Sort based on the order field | |
304 | pending_pdus.sort(key=lambda t: t[1]) | |
305 | pdus = [x[0] for x in pending_pdus] | |
306 | edus = pending_edus | |
307 | failures = [x.get_dict() for x in pending_failures] | |
308 | ||
309 | success = True | |
310 | ||
311 | try: | |
312 | logger.debug("TX [%s] _attempt_new_transaction", destination) | |
313 | ||
314 | txn_id = str(self._next_txn_id) | |
315 | ||
316 | logger.debug( | |
317 | "TX [%s] {%s} Attempting new transaction" | |
318 | " (pdus: %d, edus: %d, failures: %d)", | |
319 | destination, txn_id, | |
320 | len(pdus), | |
321 | len(edus), | |
322 | len(failures) | |
323 | ) | |
324 | ||
325 | logger.debug("TX [%s] Persisting transaction...", destination) | |
326 | ||
327 | transaction = Transaction.create_new( | |
328 | origin_server_ts=int(self.clock.time_msec()), | |
329 | transaction_id=txn_id, | |
330 | origin=self.server_name, | |
331 | destination=destination, | |
332 | pdus=pdus, | |
333 | edus=edus, | |
334 | pdu_failures=failures, | |
335 | ) | |
336 | ||
337 | self._next_txn_id += 1 | |
338 | ||
339 | yield self.transaction_actions.prepare_to_send(transaction) | |
340 | ||
341 | logger.debug("TX [%s] Persisted transaction", destination) | |
342 | logger.info( | |
343 | "TX [%s] {%s} Sending transaction [%s]," | |
344 | " (PDUs: %d, EDUs: %d, failures: %d)", | |
345 | destination, txn_id, | |
346 | transaction.transaction_id, | |
347 | len(pdus), | |
348 | len(edus), | |
349 | len(failures), | |
350 | ) | |
351 | ||
352 | with limiter: | |
353 | # Actually send the transaction | |
354 | ||
355 | # FIXME (erikj): This is a bit of a hack to make the Pdu age | |
356 | # keys work | |
357 | def json_data_cb(): | |
358 | data = transaction.get_dict() | |
359 | now = int(self.clock.time_msec()) | |
360 | if "pdus" in data: | |
361 | for p in data["pdus"]: | |
362 | if "age_ts" in p: | |
363 | unsigned = p.setdefault("unsigned", {}) | |
364 | unsigned["age"] = now - int(p["age_ts"]) | |
365 | del p["age_ts"] | |
366 | return data | |
367 | ||
368 | try: | |
369 | response = yield self.transport_layer.send_transaction( | |
370 | transaction, json_data_cb | |
371 | ) | |
372 | code = 200 | |
373 | ||
374 | if response: | |
375 | for e_id, r in response.get("pdus", {}).items(): | |
376 | if "error" in r: | |
377 | logger.warn( | |
378 | "Transaction returned error for %s: %s", | |
379 | e_id, r, | |
380 | ) | |
381 | except HttpResponseException as e: | |
382 | code = e.code | |
383 | response = e.response | |
384 | ||
385 | logger.info( | |
386 | "TX [%s] {%s} got %d response", | |
387 | destination, txn_id, code | |
211 | 388 | ) |
212 | 389 | |
213 | logger.debug( | |
214 | "TX [%s] {%s} Attempting new transaction" | |
215 | " (pdus: %d, edus: %d, failures: %d)", | |
216 | destination, txn_id, | |
217 | len(pending_pdus), | |
218 | len(pending_edus), | |
219 | len(pending_failures) | |
220 | ) | |
221 | ||
222 | logger.debug("TX [%s] Persisting transaction...", destination) | |
223 | ||
224 | transaction = Transaction.create_new( | |
225 | origin_server_ts=int(self.clock.time_msec()), | |
226 | transaction_id=txn_id, | |
227 | origin=self.server_name, | |
228 | destination=destination, | |
229 | pdus=pdus, | |
230 | edus=edus, | |
231 | pdu_failures=failures, | |
232 | ) | |
233 | ||
234 | self._next_txn_id += 1 | |
235 | ||
236 | yield self.transaction_actions.prepare_to_send(transaction) | |
237 | ||
238 | logger.debug("TX [%s] Persisted transaction", destination) | |
239 | logger.info( | |
240 | "TX [%s] {%s} Sending transaction [%s]," | |
241 | " (PDUs: %d, EDUs: %d, failures: %d)", | |
242 | destination, txn_id, | |
243 | transaction.transaction_id, | |
244 | len(pending_pdus), | |
245 | len(pending_edus), | |
246 | len(pending_failures), | |
247 | ) | |
248 | ||
249 | with limiter: | |
250 | # Actually send the transaction | |
251 | ||
252 | # FIXME (erikj): This is a bit of a hack to make the Pdu age | |
253 | # keys work | |
254 | def json_data_cb(): | |
255 | data = transaction.get_dict() | |
256 | now = int(self.clock.time_msec()) | |
257 | if "pdus" in data: | |
258 | for p in data["pdus"]: | |
259 | if "age_ts" in p: | |
260 | unsigned = p.setdefault("unsigned", {}) | |
261 | unsigned["age"] = now - int(p["age_ts"]) | |
262 | del p["age_ts"] | |
263 | return data | |
264 | ||
265 | try: | |
266 | response = yield self.transport_layer.send_transaction( | |
267 | transaction, json_data_cb | |
268 | ) | |
269 | code = 200 | |
270 | ||
271 | if response: | |
272 | for e_id, r in response.get("pdus", {}).items(): | |
273 | if "error" in r: | |
274 | logger.warn( | |
275 | "Transaction returned error for %s: %s", | |
276 | e_id, r, | |
277 | ) | |
278 | except HttpResponseException as e: | |
279 | code = e.code | |
280 | response = e.response | |
281 | ||
390 | logger.debug("TX [%s] Sent transaction", destination) | |
391 | logger.debug("TX [%s] Marking as delivered...", destination) | |
392 | ||
393 | yield self.transaction_actions.delivered( | |
394 | transaction, code, response | |
395 | ) | |
396 | ||
397 | logger.debug("TX [%s] Marked as delivered", destination) | |
398 | ||
399 | if code != 200: | |
400 | for p in pdus: | |
282 | 401 | logger.info( |
283 | "TX [%s] {%s} got %d response", | |
284 | destination, txn_id, code | |
285 | ) | |
286 | ||
287 | logger.debug("TX [%s] Sent transaction", destination) | |
288 | logger.debug("TX [%s] Marking as delivered...", destination) | |
289 | ||
290 | yield self.transaction_actions.delivered( | |
291 | transaction, code, response | |
292 | ) | |
293 | ||
294 | logger.debug("TX [%s] Marked as delivered", destination) | |
295 | ||
296 | if code != 200: | |
297 | for p in pdus: | |
298 | logger.info( | |
299 | "Failed to send event %s to %s", p.event_id, destination | |
300 | ) | |
301 | except NotRetryingDestination: | |
302 | logger.info( | |
303 | "TX [%s] not ready for retry yet - " | |
304 | "dropping transaction for now", | |
305 | destination, | |
306 | ) | |
307 | except RuntimeError as e: | |
308 | # We capture this here as there as nothing actually listens | |
309 | # for this finishing functions deferred. | |
310 | logger.warn( | |
311 | "TX [%s] Problem in _attempt_transaction: %s", | |
312 | destination, | |
313 | e, | |
314 | ) | |
315 | ||
316 | for p in pdus: | |
317 | logger.info("Failed to send event %s to %s", p.event_id, destination) | |
318 | except Exception as e: | |
319 | # We capture this here as there as nothing actually listens | |
320 | # for this finishing functions deferred. | |
321 | logger.warn( | |
322 | "TX [%s] Problem in _attempt_transaction: %s", | |
323 | destination, | |
324 | e, | |
325 | ) | |
326 | ||
327 | for p in pdus: | |
328 | logger.info("Failed to send event %s to %s", p.event_id, destination) | |
329 | ||
330 | finally: | |
331 | # We want to be *very* sure we delete this after we stop processing | |
332 | self.pending_transactions.pop(destination, None) | |
402 | "Failed to send event %s to %s", p.event_id, destination | |
403 | ) | |
404 | success = False | |
405 | else: | |
406 | # Remove the acknowledged device messages from the database | |
407 | if should_delete_from_device_stream: | |
408 | yield self.store.delete_device_msgs_for_remote( | |
409 | destination, device_stream_id | |
410 | ) | |
411 | self.last_device_stream_id_by_dest[destination] = device_stream_id | |
412 | except RuntimeError as e: | |
413 | # We capture this here as there as nothing actually listens | |
414 | # for this finishing functions deferred. | |
415 | logger.warn( | |
416 | "TX [%s] Problem in _attempt_transaction: %s", | |
417 | destination, | |
418 | e, | |
419 | ) | |
420 | ||
421 | success = False | |
422 | ||
423 | for p in pdus: | |
424 | logger.info("Failed to send event %s to %s", p.event_id, destination) | |
425 | except Exception as e: | |
426 | # We capture this here as there as nothing actually listens | |
427 | # for this finishing functions deferred. | |
428 | logger.warn( | |
429 | "TX [%s] Problem in _attempt_transaction: %s", | |
430 | destination, | |
431 | e, | |
432 | ) | |
433 | ||
434 | success = False | |
435 | ||
436 | for p in pdus: | |
437 | logger.info("Failed to send event %s to %s", p.event_id, destination) | |
438 | ||
439 | defer.returnValue(success) |
247 | 247 | |
248 | 248 | @defer.inlineCallbacks |
249 | 249 | @log_function |
250 | def get_public_rooms(self, remote_server): | |
250 | def get_public_rooms(self, remote_server, limit, since_token, | |
251 | search_filter=None): | |
251 | 252 | path = PREFIX + "/publicRooms" |
253 | ||
254 | args = {} | |
255 | if limit: | |
256 | args["limit"] = [str(limit)] | |
257 | if since_token: | |
258 | args["since"] = [since_token] | |
259 | ||
260 | # TODO(erikj): Actually send the search_filter across federation. | |
252 | 261 | |
253 | 262 | response = yield self.client.get_json( |
254 | 263 | destination=remote_server, |
255 | 264 | path=path, |
265 | args=args, | |
256 | 266 | ) |
257 | 267 | |
258 | 268 | defer.returnValue(response) |
297 | 307 | |
298 | 308 | @defer.inlineCallbacks |
299 | 309 | @log_function |
300 | def query_client_keys(self, destination, query_content): | |
310 | def query_client_keys(self, destination, query_content, timeout): | |
301 | 311 | """Query the device keys for a list of user ids hosted on a remote |
302 | 312 | server. |
303 | 313 | |
326 | 336 | destination=destination, |
327 | 337 | path=path, |
328 | 338 | data=query_content, |
329 | ) | |
330 | defer.returnValue(content) | |
331 | ||
332 | @defer.inlineCallbacks | |
333 | @log_function | |
334 | def claim_client_keys(self, destination, query_content): | |
339 | timeout=timeout, | |
340 | ) | |
341 | defer.returnValue(content) | |
342 | ||
343 | @defer.inlineCallbacks | |
344 | @log_function | |
345 | def claim_client_keys(self, destination, query_content, timeout): | |
335 | 346 | """Claim one-time keys for a list of devices hosted on a remote server. |
336 | 347 | |
337 | 348 | Request: |
362 | 373 | destination=destination, |
363 | 374 | path=path, |
364 | 375 | data=query_content, |
376 | timeout=timeout, | |
365 | 377 | ) |
366 | 378 | defer.returnValue(content) |
367 | 379 |
17 | 17 | from synapse.api.urls import FEDERATION_PREFIX as PREFIX |
18 | 18 | from synapse.api.errors import Codes, SynapseError |
19 | 19 | from synapse.http.server import JsonResource |
20 | from synapse.http.servlet import parse_json_object_from_request | |
20 | from synapse.http.servlet import ( | |
21 | parse_json_object_from_request, parse_integer_from_args, parse_string_from_args, | |
22 | ) | |
21 | 23 | from synapse.util.ratelimitutils import FederationRateLimiter |
22 | 24 | from synapse.util.versionstring import get_version_string |
23 | 25 | |
553 | 555 | |
554 | 556 | @defer.inlineCallbacks |
555 | 557 | def on_GET(self, origin, content, query): |
556 | data = yield self.room_list_handler.get_local_public_room_list() | |
558 | limit = parse_integer_from_args(query, "limit", 0) | |
559 | since_token = parse_string_from_args(query, "since", None) | |
560 | data = yield self.room_list_handler.get_local_public_room_list( | |
561 | limit, since_token | |
562 | ) | |
557 | 563 | defer.returnValue((200, data)) |
558 | 564 | |
559 | 565 |
175 | 175 | defer.returnValue(ret) |
176 | 176 | |
177 | 177 | @defer.inlineCallbacks |
178 | def get_3pe_protocols(self): | |
178 | def get_3pe_protocols(self, only_protocol=None): | |
179 | 179 | services = yield self.store.get_app_services() |
180 | 180 | protocols = {} |
181 | ||
182 | # Collect up all the individual protocol responses out of the ASes | |
181 | 183 | for s in services: |
182 | 184 | for p in s.protocols: |
183 | protocols[p] = yield self.appservice_api.get_3pe_protocol(s, p) | |
185 | if only_protocol is not None and p != only_protocol: | |
186 | continue | |
187 | ||
188 | if p not in protocols: | |
189 | protocols[p] = [] | |
190 | ||
191 | info = yield self.appservice_api.get_3pe_protocol(s, p) | |
192 | ||
193 | if info is not None: | |
194 | protocols[p].append(info) | |
195 | ||
196 | def _merge_instances(infos): | |
197 | if not infos: | |
198 | return {} | |
199 | ||
200 | # Merge the 'instances' lists of multiple results, but just take | |
201 | # the other fields from the first as they ought to be identical | |
202 | # copy the result so as not to corrupt the cached one | |
203 | combined = dict(infos[0]) | |
204 | combined["instances"] = list(combined["instances"]) | |
205 | ||
206 | for info in infos[1:]: | |
207 | combined["instances"].extend(info["instances"]) | |
208 | ||
209 | return combined | |
210 | ||
211 | for p in protocols.keys(): | |
212 | protocols[p] = _merge_instances(protocols[p]) | |
184 | 213 | |
185 | 214 | defer.returnValue(protocols) |
186 | 215 |
57 | 57 | attempts = 0 |
58 | 58 | while attempts < 5: |
59 | 59 | try: |
60 | device_id = stringutils.random_string_with_symbols(16) | |
60 | device_id = stringutils.random_string(10).upper() | |
61 | 61 | yield self.store.store_device( |
62 | 62 | user_id=user_id, |
63 | 63 | device_id=device_id, |
0 | # -*- coding: utf-8 -*- | |
1 | # Copyright 2016 OpenMarket Ltd | |
2 | # | |
3 | # Licensed under the Apache License, Version 2.0 (the "License"); | |
4 | # you may not use this file except in compliance with the License. | |
5 | # You may obtain a copy of the License at | |
6 | # | |
7 | # http://www.apache.org/licenses/LICENSE-2.0 | |
8 | # | |
9 | # Unless required by applicable law or agreed to in writing, software | |
10 | # distributed under the License is distributed on an "AS IS" BASIS, | |
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
12 | # See the License for the specific language governing permissions and | |
13 | # limitations under the License. | |
14 | ||
15 | import logging | |
16 | ||
17 | from twisted.internet import defer | |
18 | ||
19 | from synapse.types import get_domain_from_id | |
20 | from synapse.util.stringutils import random_string | |
21 | ||
22 | ||
23 | logger = logging.getLogger(__name__) | |
24 | ||
25 | ||
26 | class DeviceMessageHandler(object): | |
27 | ||
28 | def __init__(self, hs): | |
29 | """ | |
30 | Args: | |
31 | hs (synapse.server.HomeServer): server | |
32 | """ | |
33 | self.store = hs.get_datastore() | |
34 | self.notifier = hs.get_notifier() | |
35 | self.is_mine_id = hs.is_mine_id | |
36 | self.federation = hs.get_replication_layer() | |
37 | ||
38 | self.federation.register_edu_handler( | |
39 | "m.direct_to_device", self.on_direct_to_device_edu | |
40 | ) | |
41 | ||
42 | @defer.inlineCallbacks | |
43 | def on_direct_to_device_edu(self, origin, content): | |
44 | local_messages = {} | |
45 | sender_user_id = content["sender"] | |
46 | if origin != get_domain_from_id(sender_user_id): | |
47 | logger.warn( | |
48 | "Dropping device message from %r with spoofed sender %r", | |
49 | origin, sender_user_id | |
50 | ) | |
51 | message_type = content["type"] | |
52 | message_id = content["message_id"] | |
53 | for user_id, by_device in content["messages"].items(): | |
54 | messages_by_device = { | |
55 | device_id: { | |
56 | "content": message_content, | |
57 | "type": message_type, | |
58 | "sender": sender_user_id, | |
59 | } | |
60 | for device_id, message_content in by_device.items() | |
61 | } | |
62 | if messages_by_device: | |
63 | local_messages[user_id] = messages_by_device | |
64 | ||
65 | stream_id = yield self.store.add_messages_from_remote_to_device_inbox( | |
66 | origin, message_id, local_messages | |
67 | ) | |
68 | ||
69 | self.notifier.on_new_event( | |
70 | "to_device_key", stream_id, users=local_messages.keys() | |
71 | ) | |
72 | ||
73 | @defer.inlineCallbacks | |
74 | def send_device_message(self, sender_user_id, message_type, messages): | |
75 | ||
76 | local_messages = {} | |
77 | remote_messages = {} | |
78 | for user_id, by_device in messages.items(): | |
79 | if self.is_mine_id(user_id): | |
80 | messages_by_device = { | |
81 | device_id: { | |
82 | "content": message_content, | |
83 | "type": message_type, | |
84 | "sender": sender_user_id, | |
85 | } | |
86 | for device_id, message_content in by_device.items() | |
87 | } | |
88 | if messages_by_device: | |
89 | local_messages[user_id] = messages_by_device | |
90 | else: | |
91 | destination = get_domain_from_id(user_id) | |
92 | remote_messages.setdefault(destination, {})[user_id] = by_device | |
93 | ||
94 | message_id = random_string(16) | |
95 | ||
96 | remote_edu_contents = {} | |
97 | for destination, messages in remote_messages.items(): | |
98 | remote_edu_contents[destination] = { | |
99 | "messages": messages, | |
100 | "sender": sender_user_id, | |
101 | "type": message_type, | |
102 | "message_id": message_id, | |
103 | } | |
104 | ||
105 | stream_id = yield self.store.add_messages_to_device_inbox( | |
106 | local_messages, remote_edu_contents | |
107 | ) | |
108 | ||
109 | self.notifier.on_new_event( | |
110 | "to_device_key", stream_id, users=local_messages.keys() | |
111 | ) | |
112 | ||
113 | for destination in remote_messages.keys(): | |
114 | # Enqueue a new federation transaction to send the new | |
115 | # device messages to each remote destination. | |
116 | self.federation.send_device_messages(destination) |
12 | 12 | # See the License for the specific language governing permissions and |
13 | 13 | # limitations under the License. |
14 | 14 | |
15 | import collections | |
16 | import json | |
15 | import ujson as json | |
17 | 16 | import logging |
18 | 17 | |
18 | from canonicaljson import encode_canonical_json | |
19 | 19 | from twisted.internet import defer |
20 | 20 | |
21 | from synapse.api import errors | |
22 | import synapse.types | |
21 | from synapse.api.errors import SynapseError, CodeMessageException | |
22 | from synapse.types import get_domain_from_id | |
23 | from synapse.util.logcontext import preserve_fn, preserve_context_over_deferred | |
24 | from synapse.util.retryutils import get_retry_limiter, NotRetryingDestination | |
23 | 25 | |
24 | 26 | logger = logging.getLogger(__name__) |
25 | 27 | |
28 | 30 | def __init__(self, hs): |
29 | 31 | self.store = hs.get_datastore() |
30 | 32 | self.federation = hs.get_replication_layer() |
33 | self.device_handler = hs.get_device_handler() | |
31 | 34 | self.is_mine_id = hs.is_mine_id |
32 | self.server_name = hs.hostname | |
35 | self.clock = hs.get_clock() | |
33 | 36 | |
34 | 37 | # doesn't really work as part of the generic query API, because the |
35 | 38 | # query request requires an object POST, but we abuse the |
39 | 42 | ) |
40 | 43 | |
41 | 44 | @defer.inlineCallbacks |
42 | def query_devices(self, query_body): | |
45 | def query_devices(self, query_body, timeout): | |
43 | 46 | """ Handle a device key query from a client |
44 | 47 | |
45 | 48 | { |
62 | 65 | |
63 | 66 | # separate users by domain. |
64 | 67 | # make a map from domain to user_id to device_ids |
65 | queries_by_domain = collections.defaultdict(dict) | |
68 | local_query = {} | |
69 | remote_queries = {} | |
70 | ||
66 | 71 | for user_id, device_ids in device_keys_query.items(): |
67 | user = synapse.types.UserID.from_string(user_id) | |
68 | queries_by_domain[user.domain][user_id] = device_ids | |
72 | if self.is_mine_id(user_id): | |
73 | local_query[user_id] = device_ids | |
74 | else: | |
75 | domain = get_domain_from_id(user_id) | |
76 | remote_queries.setdefault(domain, {})[user_id] = device_ids | |
69 | 77 | |
70 | 78 | # do the queries |
71 | # TODO: do these in parallel | |
79 | failures = {} | |
72 | 80 | results = {} |
73 | for destination, destination_query in queries_by_domain.items(): | |
74 | if destination == self.server_name: | |
75 | res = yield self.query_local_devices(destination_query) | |
76 | else: | |
77 | res = yield self.federation.query_client_keys( | |
78 | destination, {"device_keys": destination_query} | |
81 | if local_query: | |
82 | local_result = yield self.query_local_devices(local_query) | |
83 | for user_id, keys in local_result.items(): | |
84 | if user_id in local_query: | |
85 | results[user_id] = keys | |
86 | ||
87 | @defer.inlineCallbacks | |
88 | def do_remote_query(destination): | |
89 | destination_query = remote_queries[destination] | |
90 | try: | |
91 | limiter = yield get_retry_limiter( | |
92 | destination, self.clock, self.store | |
79 | 93 | ) |
80 | res = res["device_keys"] | |
81 | for user_id, keys in res.items(): | |
82 | if user_id in destination_query: | |
83 | results[user_id] = keys | |
84 | ||
85 | defer.returnValue((200, {"device_keys": results})) | |
94 | with limiter: | |
95 | remote_result = yield self.federation.query_client_keys( | |
96 | destination, | |
97 | {"device_keys": destination_query}, | |
98 | timeout=timeout | |
99 | ) | |
100 | ||
101 | for user_id, keys in remote_result["device_keys"].items(): | |
102 | if user_id in destination_query: | |
103 | results[user_id] = keys | |
104 | ||
105 | except CodeMessageException as e: | |
106 | failures[destination] = { | |
107 | "status": e.code, "message": e.message | |
108 | } | |
109 | except NotRetryingDestination as e: | |
110 | failures[destination] = { | |
111 | "status": 503, "message": "Not ready for retry", | |
112 | } | |
113 | ||
114 | yield preserve_context_over_deferred(defer.gatherResults([ | |
115 | preserve_fn(do_remote_query)(destination) | |
116 | for destination in remote_queries | |
117 | ])) | |
118 | ||
119 | defer.returnValue({ | |
120 | "device_keys": results, "failures": failures, | |
121 | }) | |
86 | 122 | |
87 | 123 | @defer.inlineCallbacks |
88 | 124 | def query_local_devices(self, query): |
103 | 139 | if not self.is_mine_id(user_id): |
104 | 140 | logger.warning("Request for keys for non-local user %s", |
105 | 141 | user_id) |
106 | raise errors.SynapseError(400, "Not a user here") | |
142 | raise SynapseError(400, "Not a user here") | |
107 | 143 | |
108 | 144 | if not device_ids: |
109 | 145 | local_query.append((user_id, None)) |
136 | 172 | device_keys_query = query_body.get("device_keys", {}) |
137 | 173 | res = yield self.query_local_devices(device_keys_query) |
138 | 174 | defer.returnValue({"device_keys": res}) |
175 | ||
176 | @defer.inlineCallbacks | |
177 | def claim_one_time_keys(self, query, timeout): | |
178 | local_query = [] | |
179 | remote_queries = {} | |
180 | ||
181 | for user_id, device_keys in query.get("one_time_keys", {}).items(): | |
182 | if self.is_mine_id(user_id): | |
183 | for device_id, algorithm in device_keys.items(): | |
184 | local_query.append((user_id, device_id, algorithm)) | |
185 | else: | |
186 | domain = get_domain_from_id(user_id) | |
187 | remote_queries.setdefault(domain, {})[user_id] = device_keys | |
188 | ||
189 | results = yield self.store.claim_e2e_one_time_keys(local_query) | |
190 | ||
191 | json_result = {} | |
192 | failures = {} | |
193 | for user_id, device_keys in results.items(): | |
194 | for device_id, keys in device_keys.items(): | |
195 | for key_id, json_bytes in keys.items(): | |
196 | json_result.setdefault(user_id, {})[device_id] = { | |
197 | key_id: json.loads(json_bytes) | |
198 | } | |
199 | ||
200 | @defer.inlineCallbacks | |
201 | def claim_client_keys(destination): | |
202 | device_keys = remote_queries[destination] | |
203 | try: | |
204 | limiter = yield get_retry_limiter( | |
205 | destination, self.clock, self.store | |
206 | ) | |
207 | with limiter: | |
208 | remote_result = yield self.federation.claim_client_keys( | |
209 | destination, | |
210 | {"one_time_keys": device_keys}, | |
211 | timeout=timeout | |
212 | ) | |
213 | for user_id, keys in remote_result["one_time_keys"].items(): | |
214 | if user_id in device_keys: | |
215 | json_result[user_id] = keys | |
216 | except CodeMessageException as e: | |
217 | failures[destination] = { | |
218 | "status": e.code, "message": e.message | |
219 | } | |
220 | except NotRetryingDestination as e: | |
221 | failures[destination] = { | |
222 | "status": 503, "message": "Not ready for retry", | |
223 | } | |
224 | ||
225 | yield preserve_context_over_deferred(defer.gatherResults([ | |
226 | preserve_fn(claim_client_keys)(destination) | |
227 | for destination in remote_queries | |
228 | ])) | |
229 | ||
230 | defer.returnValue({ | |
231 | "one_time_keys": json_result, | |
232 | "failures": failures | |
233 | }) | |
234 | ||
235 | @defer.inlineCallbacks | |
236 | def upload_keys_for_user(self, user_id, device_id, keys): | |
237 | time_now = self.clock.time_msec() | |
238 | ||
239 | # TODO: Validate the JSON to make sure it has the right keys. | |
240 | device_keys = keys.get("device_keys", None) | |
241 | if device_keys: | |
242 | logger.info( | |
243 | "Updating device_keys for device %r for user %s at %d", | |
244 | device_id, user_id, time_now | |
245 | ) | |
246 | # TODO: Sign the JSON with the server key | |
247 | yield self.store.set_e2e_device_keys( | |
248 | user_id, device_id, time_now, | |
249 | encode_canonical_json(device_keys) | |
250 | ) | |
251 | ||
252 | one_time_keys = keys.get("one_time_keys", None) | |
253 | if one_time_keys: | |
254 | logger.info( | |
255 | "Adding %d one_time_keys for device %r for user %r at %d", | |
256 | len(one_time_keys), device_id, user_id, time_now | |
257 | ) | |
258 | key_list = [] | |
259 | for key_id, key_json in one_time_keys.items(): | |
260 | algorithm, key_id = key_id.split(":") | |
261 | key_list.append(( | |
262 | algorithm, key_id, encode_canonical_json(key_json) | |
263 | )) | |
264 | ||
265 | yield self.store.add_e2e_one_time_keys( | |
266 | user_id, device_id, time_now, key_list | |
267 | ) | |
268 | ||
269 | # the device should have been registered already, but it may have been | |
270 | # deleted due to a race with a DELETE request. Or we may be using an | |
271 | # old access_token without an associated device_id. Either way, we | |
272 | # need to double-check the device is registered to avoid ending up with | |
273 | # keys without a corresponding device. | |
274 | self.device_handler.check_device_registered(user_id, device_id) | |
275 | ||
276 | result = yield self.store.count_e2e_one_time_keys(user_id, device_id) | |
277 | ||
278 | defer.returnValue({"one_time_key_counts": result}) |
831 | 831 | |
832 | 832 | new_pdu = event |
833 | 833 | |
834 | message_handler = self.hs.get_handlers().message_handler | |
835 | destinations = yield message_handler.get_joined_hosts_for_room_from_state( | |
836 | context | |
837 | ) | |
838 | destinations = set(destinations) | |
834 | users_in_room = yield self.store.get_joined_users_from_context(event, context) | |
835 | ||
836 | destinations = set( | |
837 | get_domain_from_id(user_id) for user_id in users_in_room | |
838 | if not self.hs.is_mine_id(user_id) | |
839 | ) | |
840 | ||
839 | 841 | destinations.discard(origin) |
840 | 842 | |
841 | 843 | logger.debug( |
1054 | 1056 | |
1055 | 1057 | new_pdu = event |
1056 | 1058 | |
1057 | message_handler = self.hs.get_handlers().message_handler | |
1058 | destinations = yield message_handler.get_joined_hosts_for_room_from_state( | |
1059 | context | |
1060 | ) | |
1061 | destinations = set(destinations) | |
1059 | users_in_room = yield self.store.get_joined_users_from_context(event, context) | |
1060 | ||
1061 | destinations = set( | |
1062 | get_domain_from_id(user_id) for user_id in users_in_room | |
1063 | if not self.hs.is_mine_id(user_id) | |
1064 | ) | |
1062 | 1065 | destinations.discard(origin) |
1063 | 1066 | |
1064 | 1067 | logger.debug( |
1581 | 1584 | current_state = set(e.event_id for e in auth_events.values()) |
1582 | 1585 | different_auth = event_auth_events - current_state |
1583 | 1586 | |
1587 | context.current_state_ids = dict(context.current_state_ids) | |
1584 | 1588 | context.current_state_ids.update({ |
1585 | 1589 | k: a.event_id for k, a in auth_events.items() |
1586 | 1590 | if k != event_key |
1587 | 1591 | }) |
1592 | context.prev_state_ids = dict(context.prev_state_ids) | |
1588 | 1593 | context.prev_state_ids.update({ |
1589 | 1594 | k: a.event_id for k, a in auth_events.items() |
1590 | 1595 | }) |
1666 | 1671 | # 4. Look at rejects and their proofs. |
1667 | 1672 | # TODO. |
1668 | 1673 | |
1674 | context.current_state_ids = dict(context.current_state_ids) | |
1669 | 1675 | context.current_state_ids.update({ |
1670 | 1676 | k: a.event_id for k, a in auth_events.items() |
1671 | 1677 | if k != event_key |
1672 | 1678 | }) |
1679 | context.prev_state_ids = dict(context.prev_state_ids) | |
1673 | 1680 | context.prev_state_ids.update({ |
1674 | 1681 | k: a.event_id for k, a in auth_events.items() |
1675 | 1682 | }) |
29 | 29 | from synapse.util.caches.snapshot_cache import SnapshotCache |
30 | 30 | from synapse.util.logcontext import preserve_fn, preserve_context_over_deferred |
31 | 31 | from synapse.util.metrics import measure_func |
32 | from synapse.util.caches.descriptors import cachedInlineCallbacks | |
33 | 32 | from synapse.visibility import filter_events_for_client |
34 | 33 | |
35 | 34 | from ._base import BaseHandler |
944 | 943 | event_stream_id, max_stream_id |
945 | 944 | ) |
946 | 945 | |
947 | destinations = yield self.get_joined_hosts_for_room_from_state(context) | |
946 | users_in_room = yield self.store.get_joined_users_from_context(event, context) | |
947 | ||
948 | destinations = [ | |
949 | get_domain_from_id(user_id) for user_id in users_in_room | |
950 | if not self.hs.is_mine_id(user_id) | |
951 | ] | |
948 | 952 | |
949 | 953 | @defer.inlineCallbacks |
950 | 954 | def _notify(): |
962 | 966 | preserve_fn(federation_handler.handle_new_event)( |
963 | 967 | event, destinations=destinations, |
964 | 968 | ) |
965 | ||
966 | def get_joined_hosts_for_room_from_state(self, context): | |
967 | state_group = context.state_group | |
968 | if not state_group: | |
969 | # If state_group is None it means it has yet to be assigned a | |
970 | # state group, i.e. we need to make sure that calls with a state_group | |
971 | # of None don't hit previous cached calls with a None state_group. | |
972 | # To do this we set the state_group to a new object as object() != object() | |
973 | state_group = object() | |
974 | ||
975 | return self._get_joined_hosts_for_room_from_state( | |
976 | state_group, context.current_state_ids | |
977 | ) | |
978 | ||
979 | @cachedInlineCallbacks(num_args=1, cache_context=True) | |
980 | def _get_joined_hosts_for_room_from_state(self, state_group, current_state_ids, | |
981 | cache_context): | |
982 | ||
983 | # Don't bother getting state for people on the same HS | |
984 | current_state = yield self.store.get_events([ | |
985 | e_id for key, e_id in current_state_ids.items() | |
986 | if key[0] == EventTypes.Member and not self.hs.is_mine_id(key[1]) | |
987 | ]) | |
988 | ||
989 | destinations = set() | |
990 | for e in current_state.itervalues(): | |
991 | try: | |
992 | if e.type == EventTypes.Member: | |
993 | if e.content["membership"] == Membership.JOIN: | |
994 | destinations.add(get_domain_from_id(e.state_key)) | |
995 | except SynapseError: | |
996 | logger.warn( | |
997 | "Failed to get destination from event %s", e.event_id | |
998 | ) | |
999 | ||
1000 | defer.returnValue(destinations) |
50 | 50 | bump_active_time_counter = metrics.register_counter("bump_active_time") |
51 | 51 | |
52 | 52 | get_updates_counter = metrics.register_counter("get_updates", labels=["type"]) |
53 | ||
54 | notify_reason_counter = metrics.register_counter("notify_reason", labels=["reason"]) | |
55 | state_transition_counter = metrics.register_counter( | |
56 | "state_transition", labels=["from", "to"] | |
57 | ) | |
53 | 58 | |
54 | 59 | |
55 | 60 | # If a user was last active in the last LAST_ACTIVE_GRANULARITY, consider them |
211 | 216 | is some spurious presence changes that will self-correct. |
212 | 217 | """ |
213 | 218 | logger.info( |
214 | "Performing _on_shutdown. Persiting %d unpersisted changes", | |
219 | "Performing _on_shutdown. Persisting %d unpersisted changes", | |
215 | 220 | len(self.user_to_current_state) |
216 | 221 | ) |
217 | 222 | |
228 | 233 | may stack up and slow down shutdown times. |
229 | 234 | """ |
230 | 235 | logger.info( |
231 | "Performing _persist_unpersisted_changes. Persiting %d unpersisted changes", | |
236 | "Performing _persist_unpersisted_changes. Persisting %d unpersisted changes", | |
232 | 237 | len(self.unpersisted_users_changes) |
233 | 238 | ) |
234 | 239 | |
258 | 263 | |
259 | 264 | to_notify = {} # Changes we want to notify everyone about |
260 | 265 | to_federation_ping = {} # These need sending keep-alives |
266 | ||
267 | # Only bother handling the last presence change for each user | |
268 | new_states_dict = {} | |
269 | for new_state in new_states: | |
270 | new_states_dict[new_state.user_id] = new_state | |
271 | new_state = new_states_dict.values() | |
261 | 272 | |
262 | 273 | for new_state in new_states: |
263 | 274 | user_id = new_state.user_id |
613 | 624 | Args: |
614 | 625 | hosts_to_states (dict): Mapping `server_name` -> `[UserPresenceState]` |
615 | 626 | """ |
616 | now = self.clock.time_msec() | |
617 | 627 | for host, states in hosts_to_states.items(): |
618 | self.federation.send_edu( | |
619 | destination=host, | |
620 | edu_type="m.presence", | |
621 | content={ | |
622 | "push": [ | |
623 | _format_user_presence_state(state, now) | |
624 | for state in states | |
625 | ] | |
626 | } | |
627 | ) | |
628 | self.federation.send_presence(host, states) | |
628 | 629 | |
629 | 630 | @defer.inlineCallbacks |
630 | 631 | def incoming_presence(self, origin, content): |
645 | 646 | ) |
646 | 647 | continue |
647 | 648 | |
649 | if get_domain_from_id(user_id) != origin: | |
650 | logger.info( | |
651 | "Got presence update from %r with bad 'user_id': %r", | |
652 | origin, user_id, | |
653 | ) | |
654 | continue | |
655 | ||
648 | 656 | presence_state = push.get("presence", None) |
649 | 657 | if not presence_state: |
650 | 658 | logger.info( |
704 | 712 | defer.returnValue([ |
705 | 713 | { |
706 | 714 | "type": "m.presence", |
707 | "content": _format_user_presence_state(state, now), | |
715 | "content": format_user_presence_state(state, now), | |
708 | 716 | } |
709 | 717 | for state in updates |
710 | 718 | ]) |
711 | 719 | else: |
712 | 720 | defer.returnValue([ |
713 | _format_user_presence_state(state, now) for state in updates | |
721 | format_user_presence_state(state, now) for state in updates | |
714 | 722 | ]) |
715 | 723 | |
716 | 724 | @defer.inlineCallbacks |
938 | 946 | def should_notify(old_state, new_state): |
939 | 947 | """Decides if a presence state change should be sent to interested parties. |
940 | 948 | """ |
949 | if old_state == new_state: | |
950 | return False | |
951 | ||
941 | 952 | if old_state.status_msg != new_state.status_msg: |
953 | notify_reason_counter.inc("status_msg_change") | |
942 | 954 | return True |
943 | 955 | |
956 | if old_state.state != new_state.state: | |
957 | notify_reason_counter.inc("state_change") | |
958 | state_transition_counter.inc(old_state.state, new_state.state) | |
959 | return True | |
960 | ||
944 | 961 | if old_state.state == PresenceState.ONLINE: |
945 | if new_state.state != PresenceState.ONLINE: | |
946 | # Always notify for online -> anything | |
947 | return True | |
948 | ||
949 | 962 | if new_state.currently_active != old_state.currently_active: |
963 | notify_reason_counter.inc("current_active_change") | |
950 | 964 | return True |
951 | 965 | |
952 | 966 | if new_state.last_active_ts - old_state.last_active_ts > LAST_ACTIVE_GRANULARITY: |
953 | 967 | # Only notify about last active bumps if we're not currently acive |
954 | if not (old_state.currently_active and new_state.currently_active): | |
968 | if not new_state.currently_active: | |
969 | notify_reason_counter.inc("last_active_change_online") | |
955 | 970 | return True |
956 | 971 | |
957 | 972 | elif new_state.last_active_ts - old_state.last_active_ts > LAST_ACTIVE_GRANULARITY: |
958 | 973 | # Always notify for a transition where last active gets bumped. |
974 | notify_reason_counter.inc("last_active_change_not_online") | |
959 | 975 | return True |
960 | 976 | |
961 | if old_state.state != new_state.state: | |
962 | return True | |
963 | ||
964 | 977 | return False |
965 | 978 | |
966 | 979 | |
967 | def _format_user_presence_state(state, now): | |
980 | def format_user_presence_state(state, now): | |
968 | 981 | """Convert UserPresenceState to a format that can be sent down to clients |
969 | 982 | and to other servers. |
970 | 983 | """ |
1077 | 1090 | defer.returnValue(([ |
1078 | 1091 | { |
1079 | 1092 | "type": "m.presence", |
1080 | "content": _format_user_presence_state(s, now), | |
1093 | "content": format_user_presence_state(s, now), | |
1081 | 1094 | } |
1082 | 1095 | for s in updates.values() |
1083 | 1096 | if include_offline or s.state != PresenceState.OFFLINE |
155 | 155 | } |
156 | 156 | }, |
157 | 157 | }, |
158 | key=(room_id, receipt_type, user_id), | |
158 | 159 | ) |
159 | 160 | |
160 | 161 | @defer.inlineCallbacks |
19 | 19 | |
20 | 20 | from synapse.types import UserID, RoomAlias, RoomID, RoomStreamToken |
21 | 21 | from synapse.api.constants import ( |
22 | EventTypes, JoinRules, RoomCreationPreset, Membership, | |
22 | EventTypes, JoinRules, RoomCreationPreset | |
23 | 23 | ) |
24 | 24 | from synapse.api.errors import AuthError, StoreError, SynapseError |
25 | 25 | from synapse.util import stringutils |
26 | from synapse.util.async import concurrently_execute | |
27 | from synapse.util.caches.response_cache import ResponseCache | |
28 | 26 | from synapse.visibility import filter_events_for_client |
29 | 27 | |
30 | 28 | from collections import OrderedDict |
34 | 32 | import string |
35 | 33 | |
36 | 34 | logger = logging.getLogger(__name__) |
37 | ||
38 | REMOTE_ROOM_LIST_POLL_INTERVAL = 60 * 1000 | |
39 | 35 | |
40 | 36 | id_server_scheme = "https://" |
41 | 37 | |
195 | 191 | }, |
196 | 192 | ratelimit=False) |
197 | 193 | |
194 | content = {} | |
195 | is_direct = config.get("is_direct", None) | |
196 | if is_direct: | |
197 | content["is_direct"] = is_direct | |
198 | ||
198 | 199 | for invitee in invite_list: |
199 | 200 | yield room_member_handler.update_membership( |
200 | 201 | requester, |
202 | 203 | room_id, |
203 | 204 | "invite", |
204 | 205 | ratelimit=False, |
206 | content=content, | |
205 | 207 | ) |
206 | 208 | |
207 | 209 | for invite_3pid in invite_3pid_list: |
341 | 343 | ) |
342 | 344 | |
343 | 345 | |
344 | class RoomListHandler(BaseHandler): | |
345 | def __init__(self, hs): | |
346 | super(RoomListHandler, self).__init__(hs) | |
347 | self.response_cache = ResponseCache(hs) | |
348 | self.remote_list_request_cache = ResponseCache(hs) | |
349 | self.remote_list_cache = {} | |
350 | self.fetch_looping_call = hs.get_clock().looping_call( | |
351 | self.fetch_all_remote_lists, REMOTE_ROOM_LIST_POLL_INTERVAL | |
352 | ) | |
353 | self.fetch_all_remote_lists() | |
354 | ||
355 | def get_local_public_room_list(self): | |
356 | result = self.response_cache.get(()) | |
357 | if not result: | |
358 | result = self.response_cache.set((), self._get_public_room_list()) | |
359 | return result | |
360 | ||
361 | @defer.inlineCallbacks | |
362 | def _get_public_room_list(self): | |
363 | room_ids = yield self.store.get_public_room_ids() | |
364 | ||
365 | results = [] | |
366 | ||
367 | @defer.inlineCallbacks | |
368 | def handle_room(room_id): | |
369 | current_state = yield self.state_handler.get_current_state(room_id) | |
370 | ||
371 | # Double check that this is actually a public room. | |
372 | join_rules_event = current_state.get((EventTypes.JoinRules, "")) | |
373 | if join_rules_event: | |
374 | join_rule = join_rules_event.content.get("join_rule", None) | |
375 | if join_rule and join_rule != JoinRules.PUBLIC: | |
376 | defer.returnValue(None) | |
377 | ||
378 | result = {"room_id": room_id} | |
379 | ||
380 | num_joined_users = len([ | |
381 | 1 for _, event in current_state.items() | |
382 | if event.type == EventTypes.Member | |
383 | and event.membership == Membership.JOIN | |
384 | ]) | |
385 | if num_joined_users == 0: | |
386 | return | |
387 | ||
388 | result["num_joined_members"] = num_joined_users | |
389 | ||
390 | aliases = yield self.store.get_aliases_for_room(room_id) | |
391 | if aliases: | |
392 | result["aliases"] = aliases | |
393 | ||
394 | name_event = yield current_state.get((EventTypes.Name, "")) | |
395 | if name_event: | |
396 | name = name_event.content.get("name", None) | |
397 | if name: | |
398 | result["name"] = name | |
399 | ||
400 | topic_event = current_state.get((EventTypes.Topic, "")) | |
401 | if topic_event: | |
402 | topic = topic_event.content.get("topic", None) | |
403 | if topic: | |
404 | result["topic"] = topic | |
405 | ||
406 | canonical_event = current_state.get((EventTypes.CanonicalAlias, "")) | |
407 | if canonical_event: | |
408 | canonical_alias = canonical_event.content.get("alias", None) | |
409 | if canonical_alias: | |
410 | result["canonical_alias"] = canonical_alias | |
411 | ||
412 | visibility_event = current_state.get((EventTypes.RoomHistoryVisibility, "")) | |
413 | visibility = None | |
414 | if visibility_event: | |
415 | visibility = visibility_event.content.get("history_visibility", None) | |
416 | result["world_readable"] = visibility == "world_readable" | |
417 | ||
418 | guest_event = current_state.get((EventTypes.GuestAccess, "")) | |
419 | guest = None | |
420 | if guest_event: | |
421 | guest = guest_event.content.get("guest_access", None) | |
422 | result["guest_can_join"] = guest == "can_join" | |
423 | ||
424 | avatar_event = current_state.get(("m.room.avatar", "")) | |
425 | if avatar_event: | |
426 | avatar_url = avatar_event.content.get("url", None) | |
427 | if avatar_url: | |
428 | result["avatar_url"] = avatar_url | |
429 | ||
430 | results.append(result) | |
431 | ||
432 | yield concurrently_execute(handle_room, room_ids, 10) | |
433 | ||
434 | # FIXME (erikj): START is no longer a valid value | |
435 | defer.returnValue({"start": "START", "end": "END", "chunk": results}) | |
436 | ||
437 | @defer.inlineCallbacks | |
438 | def fetch_all_remote_lists(self): | |
439 | deferred = self.hs.get_replication_layer().get_public_rooms( | |
440 | self.hs.config.secondary_directory_servers | |
441 | ) | |
442 | self.remote_list_request_cache.set((), deferred) | |
443 | self.remote_list_cache = yield deferred | |
444 | ||
445 | @defer.inlineCallbacks | |
446 | def get_aggregated_public_room_list(self): | |
447 | """ | |
448 | Get the public room list from this server and the servers | |
449 | specified in the secondary_directory_servers config option. | |
450 | XXX: Pagination... | |
451 | """ | |
452 | # We return the results from out cache which is updated by a looping call, | |
453 | # unless we're missing a cache entry, in which case wait for the result | |
454 | # of the fetch if there's one in progress. If not, omit that server. | |
455 | wait = False | |
456 | for s in self.hs.config.secondary_directory_servers: | |
457 | if s not in self.remote_list_cache: | |
458 | logger.warn("No cached room list from %s: waiting for fetch", s) | |
459 | wait = True | |
460 | break | |
461 | ||
462 | if wait and self.remote_list_request_cache.get(()): | |
463 | yield self.remote_list_request_cache.get(()) | |
464 | ||
465 | public_rooms = yield self.get_local_public_room_list() | |
466 | ||
467 | # keep track of which room IDs we've seen so we can de-dup | |
468 | room_ids = set() | |
469 | ||
470 | # tag all the ones in our list with our server name. | |
471 | # Also add the them to the de-deping set | |
472 | for room in public_rooms['chunk']: | |
473 | room["server_name"] = self.hs.hostname | |
474 | room_ids.add(room["room_id"]) | |
475 | ||
476 | # Now add the results from federation | |
477 | for server_name, server_result in self.remote_list_cache.items(): | |
478 | for room in server_result["chunk"]: | |
479 | if room["room_id"] not in room_ids: | |
480 | room["server_name"] = server_name | |
481 | public_rooms["chunk"].append(room) | |
482 | room_ids.add(room["room_id"]) | |
483 | ||
484 | defer.returnValue(public_rooms) | |
485 | ||
486 | ||
487 | 346 | class RoomContextHandler(BaseHandler): |
488 | 347 | @defer.inlineCallbacks |
489 | 348 | def get_event_context(self, user, room_id, event_id, limit, is_guest): |
0 | # -*- coding: utf-8 -*- | |
1 | # Copyright 2014 - 2016 OpenMarket Ltd | |
2 | # | |
3 | # Licensed under the Apache License, Version 2.0 (the "License"); | |
4 | # you may not use this file except in compliance with the License. | |
5 | # You may obtain a copy of the License at | |
6 | # | |
7 | # http://www.apache.org/licenses/LICENSE-2.0 | |
8 | # | |
9 | # Unless required by applicable law or agreed to in writing, software | |
10 | # distributed under the License is distributed on an "AS IS" BASIS, | |
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
12 | # See the License for the specific language governing permissions and | |
13 | # limitations under the License. | |
14 | ||
15 | from twisted.internet import defer | |
16 | ||
17 | from ._base import BaseHandler | |
18 | ||
19 | from synapse.api.constants import ( | |
20 | EventTypes, JoinRules, | |
21 | ) | |
22 | from synapse.util.async import concurrently_execute | |
23 | from synapse.util.caches.response_cache import ResponseCache | |
24 | ||
25 | from collections import namedtuple | |
26 | from unpaddedbase64 import encode_base64, decode_base64 | |
27 | ||
28 | import logging | |
29 | import msgpack | |
30 | ||
31 | logger = logging.getLogger(__name__) | |
32 | ||
33 | REMOTE_ROOM_LIST_POLL_INTERVAL = 60 * 1000 | |
34 | ||
35 | ||
36 | class RoomListHandler(BaseHandler): | |
37 | def __init__(self, hs): | |
38 | super(RoomListHandler, self).__init__(hs) | |
39 | self.response_cache = ResponseCache(hs) | |
40 | self.remote_response_cache = ResponseCache(hs, timeout_ms=30 * 1000) | |
41 | ||
42 | def get_local_public_room_list(self, limit=None, since_token=None, | |
43 | search_filter=None): | |
44 | if search_filter: | |
45 | # We explicitly don't bother caching searches. | |
46 | return self._get_public_room_list(limit, since_token, search_filter) | |
47 | ||
48 | result = self.response_cache.get((limit, since_token)) | |
49 | if not result: | |
50 | result = self.response_cache.set( | |
51 | (limit, since_token), | |
52 | self._get_public_room_list(limit, since_token) | |
53 | ) | |
54 | return result | |
55 | ||
56 | @defer.inlineCallbacks | |
57 | def _get_public_room_list(self, limit=None, since_token=None, | |
58 | search_filter=None): | |
59 | if since_token and since_token != "END": | |
60 | since_token = RoomListNextBatch.from_token(since_token) | |
61 | else: | |
62 | since_token = None | |
63 | ||
64 | rooms_to_order_value = {} | |
65 | rooms_to_num_joined = {} | |
66 | rooms_to_latest_event_ids = {} | |
67 | ||
68 | newly_visible = [] | |
69 | newly_unpublished = [] | |
70 | if since_token: | |
71 | stream_token = since_token.stream_ordering | |
72 | current_public_id = yield self.store.get_current_public_room_stream_id() | |
73 | public_room_stream_id = since_token.public_room_stream_id | |
74 | newly_visible, newly_unpublished = yield self.store.get_public_room_changes( | |
75 | public_room_stream_id, current_public_id | |
76 | ) | |
77 | else: | |
78 | stream_token = yield self.store.get_room_max_stream_ordering() | |
79 | public_room_stream_id = yield self.store.get_current_public_room_stream_id() | |
80 | ||
81 | room_ids = yield self.store.get_public_room_ids_at_stream_id( | |
82 | public_room_stream_id | |
83 | ) | |
84 | ||
85 | # We want to return rooms in a particular order: the number of joined | |
86 | # users. We then arbitrarily use the room_id as a tie breaker. | |
87 | ||
88 | @defer.inlineCallbacks | |
89 | def get_order_for_room(room_id): | |
90 | latest_event_ids = rooms_to_latest_event_ids.get(room_id, None) | |
91 | if not latest_event_ids: | |
92 | latest_event_ids = yield self.store.get_forward_extremeties_for_room( | |
93 | room_id, stream_token | |
94 | ) | |
95 | rooms_to_latest_event_ids[room_id] = latest_event_ids | |
96 | ||
97 | if not latest_event_ids: | |
98 | return | |
99 | ||
100 | joined_users = yield self.state_handler.get_current_user_in_room( | |
101 | room_id, latest_event_ids, | |
102 | ) | |
103 | num_joined_users = len(joined_users) | |
104 | rooms_to_num_joined[room_id] = num_joined_users | |
105 | ||
106 | if num_joined_users == 0: | |
107 | return | |
108 | ||
109 | # We want larger rooms to be first, hence negating num_joined_users | |
110 | rooms_to_order_value[room_id] = (-num_joined_users, room_id) | |
111 | ||
112 | yield concurrently_execute(get_order_for_room, room_ids, 10) | |
113 | ||
114 | sorted_entries = sorted(rooms_to_order_value.items(), key=lambda e: e[1]) | |
115 | sorted_rooms = [room_id for room_id, _ in sorted_entries] | |
116 | ||
117 | # `sorted_rooms` should now be a list of all public room ids that is | |
118 | # stable across pagination. Therefore, we can use indices into this | |
119 | # list as our pagination tokens. | |
120 | ||
121 | # Filter out rooms that we don't want to return | |
122 | rooms_to_scan = [ | |
123 | r for r in sorted_rooms | |
124 | if r not in newly_unpublished and rooms_to_num_joined[room_id] > 0 | |
125 | ] | |
126 | ||
127 | if since_token: | |
128 | # Filter out rooms we've already returned previously | |
129 | # `since_token.current_limit` is the index of the last room we | |
130 | # sent down, so we exclude it and everything before/after it. | |
131 | if since_token.direction_is_forward: | |
132 | rooms_to_scan = rooms_to_scan[since_token.current_limit + 1:] | |
133 | else: | |
134 | rooms_to_scan = rooms_to_scan[:since_token.current_limit] | |
135 | rooms_to_scan.reverse() | |
136 | ||
137 | # Actually generate the entries. _generate_room_entry will append to | |
138 | # chunk but will stop if len(chunk) > limit | |
139 | chunk = [] | |
140 | if limit and not search_filter: | |
141 | step = limit + 1 | |
142 | for i in xrange(0, len(rooms_to_scan), step): | |
143 | # We iterate here because the vast majority of cases we'll stop | |
144 | # at first iteration, but occaisonally _generate_room_entry | |
145 | # won't append to the chunk and so we need to loop again. | |
146 | # We don't want to scan over the entire range either as that | |
147 | # would potentially waste a lot of work. | |
148 | yield concurrently_execute( | |
149 | lambda r: self._generate_room_entry( | |
150 | r, rooms_to_num_joined[r], | |
151 | chunk, limit, search_filter | |
152 | ), | |
153 | rooms_to_scan[i:i + step], 10 | |
154 | ) | |
155 | if len(chunk) >= limit + 1: | |
156 | break | |
157 | else: | |
158 | yield concurrently_execute( | |
159 | lambda r: self._generate_room_entry( | |
160 | r, rooms_to_num_joined[r], | |
161 | chunk, limit, search_filter | |
162 | ), | |
163 | rooms_to_scan, 5 | |
164 | ) | |
165 | ||
166 | chunk.sort(key=lambda e: (-e["num_joined_members"], e["room_id"])) | |
167 | ||
168 | # Work out the new limit of the batch for pagination, or None if we | |
169 | # know there are no more results that would be returned. | |
170 | # i.e., [since_token.current_limit..new_limit] is the batch of rooms | |
171 | # we've returned (or the reverse if we paginated backwards) | |
172 | # We tried to pull out limit + 1 rooms above, so if we have <= limit | |
173 | # then we know there are no more results to return | |
174 | new_limit = None | |
175 | if chunk and (not limit or len(chunk) > limit): | |
176 | ||
177 | if not since_token or since_token.direction_is_forward: | |
178 | if limit: | |
179 | chunk = chunk[:limit] | |
180 | last_room_id = chunk[-1]["room_id"] | |
181 | else: | |
182 | if limit: | |
183 | chunk = chunk[-limit:] | |
184 | last_room_id = chunk[0]["room_id"] | |
185 | ||
186 | new_limit = sorted_rooms.index(last_room_id) | |
187 | ||
188 | results = { | |
189 | "chunk": chunk, | |
190 | } | |
191 | ||
192 | if since_token: | |
193 | results["new_rooms"] = bool(newly_visible) | |
194 | ||
195 | if not since_token or since_token.direction_is_forward: | |
196 | if new_limit is not None: | |
197 | results["next_batch"] = RoomListNextBatch( | |
198 | stream_ordering=stream_token, | |
199 | public_room_stream_id=public_room_stream_id, | |
200 | current_limit=new_limit, | |
201 | direction_is_forward=True, | |
202 | ).to_token() | |
203 | ||
204 | if since_token: | |
205 | results["prev_batch"] = since_token.copy_and_replace( | |
206 | direction_is_forward=False, | |
207 | current_limit=since_token.current_limit + 1, | |
208 | ).to_token() | |
209 | else: | |
210 | if new_limit is not None: | |
211 | results["prev_batch"] = RoomListNextBatch( | |
212 | stream_ordering=stream_token, | |
213 | public_room_stream_id=public_room_stream_id, | |
214 | current_limit=new_limit, | |
215 | direction_is_forward=False, | |
216 | ).to_token() | |
217 | ||
218 | if since_token: | |
219 | results["next_batch"] = since_token.copy_and_replace( | |
220 | direction_is_forward=True, | |
221 | current_limit=since_token.current_limit - 1, | |
222 | ).to_token() | |
223 | ||
224 | defer.returnValue(results) | |
225 | ||
226 | @defer.inlineCallbacks | |
227 | def _generate_room_entry(self, room_id, num_joined_users, chunk, limit, | |
228 | search_filter): | |
229 | if limit and len(chunk) > limit + 1: | |
230 | # We've already got enough, so lets just drop it. | |
231 | return | |
232 | ||
233 | result = { | |
234 | "room_id": room_id, | |
235 | "num_joined_members": num_joined_users, | |
236 | } | |
237 | ||
238 | current_state_ids = yield self.state_handler.get_current_state_ids(room_id) | |
239 | ||
240 | event_map = yield self.store.get_events([ | |
241 | event_id for key, event_id in current_state_ids.items() | |
242 | if key[0] in ( | |
243 | EventTypes.JoinRules, | |
244 | EventTypes.Name, | |
245 | EventTypes.Topic, | |
246 | EventTypes.CanonicalAlias, | |
247 | EventTypes.RoomHistoryVisibility, | |
248 | EventTypes.GuestAccess, | |
249 | "m.room.avatar", | |
250 | ) | |
251 | ]) | |
252 | ||
253 | current_state = { | |
254 | (ev.type, ev.state_key): ev | |
255 | for ev in event_map.values() | |
256 | } | |
257 | ||
258 | # Double check that this is actually a public room. | |
259 | join_rules_event = current_state.get((EventTypes.JoinRules, "")) | |
260 | if join_rules_event: | |
261 | join_rule = join_rules_event.content.get("join_rule", None) | |
262 | if join_rule and join_rule != JoinRules.PUBLIC: | |
263 | defer.returnValue(None) | |
264 | ||
265 | aliases = yield self.store.get_aliases_for_room(room_id) | |
266 | if aliases: | |
267 | result["aliases"] = aliases | |
268 | ||
269 | name_event = yield current_state.get((EventTypes.Name, "")) | |
270 | if name_event: | |
271 | name = name_event.content.get("name", None) | |
272 | if name: | |
273 | result["name"] = name | |
274 | ||
275 | topic_event = current_state.get((EventTypes.Topic, "")) | |
276 | if topic_event: | |
277 | topic = topic_event.content.get("topic", None) | |
278 | if topic: | |
279 | result["topic"] = topic | |
280 | ||
281 | canonical_event = current_state.get((EventTypes.CanonicalAlias, "")) | |
282 | if canonical_event: | |
283 | canonical_alias = canonical_event.content.get("alias", None) | |
284 | if canonical_alias: | |
285 | result["canonical_alias"] = canonical_alias | |
286 | ||
287 | visibility_event = current_state.get((EventTypes.RoomHistoryVisibility, "")) | |
288 | visibility = None | |
289 | if visibility_event: | |
290 | visibility = visibility_event.content.get("history_visibility", None) | |
291 | result["world_readable"] = visibility == "world_readable" | |
292 | ||
293 | guest_event = current_state.get((EventTypes.GuestAccess, "")) | |
294 | guest = None | |
295 | if guest_event: | |
296 | guest = guest_event.content.get("guest_access", None) | |
297 | result["guest_can_join"] = guest == "can_join" | |
298 | ||
299 | avatar_event = current_state.get(("m.room.avatar", "")) | |
300 | if avatar_event: | |
301 | avatar_url = avatar_event.content.get("url", None) | |
302 | if avatar_url: | |
303 | result["avatar_url"] = avatar_url | |
304 | ||
305 | if _matches_room_entry(result, search_filter): | |
306 | chunk.append(result) | |
307 | ||
308 | @defer.inlineCallbacks | |
309 | def get_remote_public_room_list(self, server_name, limit=None, since_token=None, | |
310 | search_filter=None): | |
311 | if search_filter: | |
312 | # We currently don't support searching across federation, so we have | |
313 | # to do it manually without pagination | |
314 | limit = None | |
315 | since_token = None | |
316 | ||
317 | res = yield self._get_remote_list_cached( | |
318 | server_name, limit=limit, since_token=since_token, | |
319 | ) | |
320 | ||
321 | if search_filter: | |
322 | res = {"chunk": [ | |
323 | entry | |
324 | for entry in list(res.get("chunk", [])) | |
325 | if _matches_room_entry(entry, search_filter) | |
326 | ]} | |
327 | ||
328 | defer.returnValue(res) | |
329 | ||
330 | def _get_remote_list_cached(self, server_name, limit=None, since_token=None, | |
331 | search_filter=None): | |
332 | repl_layer = self.hs.get_replication_layer() | |
333 | if search_filter: | |
334 | # We can't cache when asking for search | |
335 | return repl_layer.get_public_rooms( | |
336 | server_name, limit=limit, since_token=since_token, | |
337 | search_filter=search_filter, | |
338 | ) | |
339 | ||
340 | result = self.remote_response_cache.get((server_name, limit, since_token)) | |
341 | if not result: | |
342 | result = self.remote_response_cache.set( | |
343 | (server_name, limit, since_token), | |
344 | repl_layer.get_public_rooms( | |
345 | server_name, limit=limit, since_token=since_token, | |
346 | search_filter=search_filter, | |
347 | ) | |
348 | ) | |
349 | return result | |
350 | ||
351 | ||
352 | class RoomListNextBatch(namedtuple("RoomListNextBatch", ( | |
353 | "stream_ordering", # stream_ordering of the first public room list | |
354 | "public_room_stream_id", # public room stream id for first public room list | |
355 | "current_limit", # The number of previous rooms returned | |
356 | "direction_is_forward", # Bool if this is a next_batch, false if prev_batch | |
357 | ))): | |
358 | ||
359 | KEY_DICT = { | |
360 | "stream_ordering": "s", | |
361 | "public_room_stream_id": "p", | |
362 | "current_limit": "n", | |
363 | "direction_is_forward": "d", | |
364 | } | |
365 | ||
366 | REVERSE_KEY_DICT = {v: k for k, v in KEY_DICT.items()} | |
367 | ||
368 | @classmethod | |
369 | def from_token(cls, token): | |
370 | return RoomListNextBatch(**{ | |
371 | cls.REVERSE_KEY_DICT[key]: val | |
372 | for key, val in msgpack.loads(decode_base64(token)).items() | |
373 | }) | |
374 | ||
375 | def to_token(self): | |
376 | return encode_base64(msgpack.dumps({ | |
377 | self.KEY_DICT[key]: val | |
378 | for key, val in self._asdict().items() | |
379 | })) | |
380 | ||
381 | def copy_and_replace(self, **kwds): | |
382 | return self._replace( | |
383 | **kwds | |
384 | ) | |
385 | ||
386 | ||
387 | def _matches_room_entry(room_entry, search_filter): | |
388 | if search_filter and search_filter.get("generic_search_term", None): | |
389 | generic_search_term = search_filter["generic_search_term"].upper() | |
390 | if generic_search_term in room_entry.get("name", "").upper(): | |
391 | return True | |
392 | elif generic_search_term in room_entry.get("topic", "").upper(): | |
393 | return True | |
394 | elif generic_search_term in room_entry.get("canonical_alias", "").upper(): | |
395 | return True | |
396 | else: | |
397 | return True | |
398 | ||
399 | return False |
186 | 186 | "user_id": user_id, |
187 | 187 | "typing": typing, |
188 | 188 | }, |
189 | key=(room_id, user_id), | |
189 | 190 | )) |
190 | 191 | |
191 | 192 | yield preserve_context_over_deferred( |
198 | 199 | user_id = content["user_id"] |
199 | 200 | |
200 | 201 | # Check that the string is a valid user id |
201 | UserID.from_string(user_id) | |
202 | user = UserID.from_string(user_id) | |
203 | ||
204 | if user.domain != origin: | |
205 | logger.info( | |
206 | "Got typing update from %r with bad 'user_id': %r", | |
207 | origin, user_id, | |
208 | ) | |
209 | return | |
202 | 210 | |
203 | 211 | users = yield self.state.get_current_user_in_room(room_id) |
204 | 212 | domains = set(get_domain_from_id(u) for u in users) |
245 | 245 | |
246 | 246 | @defer.inlineCallbacks |
247 | 247 | def put_json(self, destination, path, data={}, json_data_callback=None, |
248 | long_retries=False): | |
248 | long_retries=False, timeout=None): | |
249 | 249 | """ Sends the specifed json data using PUT |
250 | 250 | |
251 | 251 | Args: |
258 | 258 | use as the request body. |
259 | 259 | long_retries (bool): A boolean that indicates whether we should |
260 | 260 | retry for a short or long time. |
261 | timeout(int): How long to try (in ms) the destination for before | |
262 | giving up. None indicates no timeout. | |
261 | 263 | |
262 | 264 | Returns: |
263 | 265 | Deferred: Succeeds when we get a 2xx HTTP response. The result |
284 | 286 | body_callback=body_callback, |
285 | 287 | headers_dict={"Content-Type": ["application/json"]}, |
286 | 288 | long_retries=long_retries, |
289 | timeout=timeout, | |
287 | 290 | ) |
288 | 291 | |
289 | 292 | if 200 <= response.code < 300: |
299 | 302 | defer.returnValue(json.loads(body)) |
300 | 303 | |
301 | 304 | @defer.inlineCallbacks |
302 | def post_json(self, destination, path, data={}, long_retries=True): | |
305 | def post_json(self, destination, path, data={}, long_retries=True, | |
306 | timeout=None): | |
303 | 307 | """ Sends the specifed json data using POST |
304 | 308 | |
305 | 309 | Args: |
310 | 314 | the request body. This will be encoded as JSON. |
311 | 315 | long_retries (bool): A boolean that indicates whether we should |
312 | 316 | retry for a short or long time. |
317 | timeout(int): How long to try (in ms) the destination for before | |
318 | giving up. None indicates no timeout. | |
313 | 319 | |
314 | 320 | Returns: |
315 | 321 | Deferred: Succeeds when we get a 2xx HTTP response. The result |
330 | 336 | body_callback=body_callback, |
331 | 337 | headers_dict={"Content-Type": ["application/json"]}, |
332 | 338 | long_retries=True, |
339 | timeout=timeout, | |
333 | 340 | ) |
334 | 341 | |
335 | 342 | if 200 <= response.code < 300: |
40 | 40 | SynapseError: if the parameter is absent and required, or if the |
41 | 41 | parameter is present and not an integer. |
42 | 42 | """ |
43 | if name in request.args: | |
43 | return parse_integer_from_args(request.args, name, default, required) | |
44 | ||
45 | ||
46 | def parse_integer_from_args(args, name, default=None, required=False): | |
47 | if name in args: | |
44 | 48 | try: |
45 | return int(request.args[name][0]) | |
49 | return int(args[name][0]) | |
46 | 50 | except: |
47 | 51 | message = "Query parameter %r must be an integer" % (name,) |
48 | 52 | raise SynapseError(400, message) |
115 | 119 | parameter is present, must be one of a list of allowed values and |
116 | 120 | is not one of those allowed values. |
117 | 121 | """ |
118 | ||
119 | if name in request.args: | |
120 | value = request.args[name][0] | |
122 | return parse_string_from_args( | |
123 | request.args, name, default, required, allowed_values, param_type, | |
124 | ) | |
125 | ||
126 | ||
127 | def parse_string_from_args(args, name, default=None, required=False, | |
128 | allowed_values=None, param_type="string"): | |
129 | if name in args: | |
130 | value = args[name][0] | |
121 | 131 | if allowed_values is not None and value not in allowed_values: |
122 | 132 | message = "Query parameter %r must be one of [%s]" % ( |
123 | 133 | name, ", ".join(repr(v) for v in allowed_values) |
262 | 262 | } |
263 | 263 | ] |
264 | 264 | }, |
265 | # XXX: once m.direct is standardised everywhere, we should use it to detect | |
266 | # a DM from the user's perspective rather than this heuristic. | |
265 | 267 | { |
266 | 268 | 'rule_id': 'global/underride/.m.rule.room_one_to_one', |
267 | 269 | 'conditions': [ |
288 | 290 | } |
289 | 291 | ] |
290 | 292 | }, |
293 | # XXX: this is going to fire for events which aren't m.room.messages | |
294 | # but are encrypted (e.g. m.call.*)... | |
295 | { | |
296 | 'rule_id': 'global/underride/.m.rule.encrypted_room_one_to_one', | |
297 | 'conditions': [ | |
298 | { | |
299 | 'kind': 'room_member_count', | |
300 | 'is': '2', | |
301 | '_id': 'member_count', | |
302 | }, | |
303 | { | |
304 | 'kind': 'event_match', | |
305 | 'key': 'type', | |
306 | 'pattern': 'm.room.encrypted', | |
307 | '_id': '_encrypted', | |
308 | } | |
309 | ], | |
310 | 'actions': [ | |
311 | 'notify', | |
312 | { | |
313 | 'set_tweak': 'sound', | |
314 | 'value': 'default' | |
315 | }, { | |
316 | 'set_tweak': 'highlight', | |
317 | 'value': False | |
318 | } | |
319 | ] | |
320 | }, | |
291 | 321 | { |
292 | 322 | 'rule_id': 'global/underride/.m.rule.message', |
293 | 323 | 'conditions': [ |
296 | 326 | 'key': 'type', |
297 | 327 | 'pattern': 'm.room.message', |
298 | 328 | '_id': '_message', |
329 | } | |
330 | ], | |
331 | 'actions': [ | |
332 | 'notify', { | |
333 | 'set_tweak': 'highlight', | |
334 | 'value': False | |
335 | } | |
336 | ] | |
337 | }, | |
338 | # XXX: this is going to fire for events which aren't m.room.messages | |
339 | # but are encrypted (e.g. m.call.*)... | |
340 | { | |
341 | 'rule_id': 'global/underride/.m.rule.encrypted', | |
342 | 'conditions': [ | |
343 | { | |
344 | 'kind': 'event_match', | |
345 | 'key': 'type', | |
346 | 'pattern': 'm.room.encrypted', | |
347 | '_id': '_encrypted', | |
299 | 348 | } |
300 | 349 | ], |
301 | 350 | 'actions': [ |
26 | 26 | |
27 | 27 | |
28 | 28 | @defer.inlineCallbacks |
29 | def _get_rules(room_id, user_ids, store): | |
30 | rules_by_user = yield store.bulk_get_push_rules(user_ids) | |
31 | ||
32 | rules_by_user = {k: v for k, v in rules_by_user.items() if v is not None} | |
33 | ||
34 | defer.returnValue(rules_by_user) | |
35 | ||
36 | ||
37 | @defer.inlineCallbacks | |
38 | 29 | def evaluator_for_event(event, hs, store, context): |
39 | 30 | rules_by_user = yield store.bulk_get_push_rules_for_room( |
40 | 31 | event, context |
47 | 38 | if invited_user and hs.is_mine_id(invited_user): |
48 | 39 | has_pusher = yield store.user_has_pusher(invited_user) |
49 | 40 | if has_pusher: |
41 | rules_by_user = dict(rules_by_user) | |
50 | 42 | rules_by_user[invited_user] = yield store.get_push_rules_for_user( |
51 | 43 | invited_user |
52 | 44 | ) |
35 | 35 | "blist": ["blist"], |
36 | 36 | "pysaml2>=3.0.0,<4.0.0": ["saml2>=3.0.0,<4.0.0"], |
37 | 37 | "pymacaroons-pynacl": ["pymacaroons"], |
38 | "msgpack-python>=0.3.0": ["msgpack"], | |
38 | 39 | } |
39 | 40 | CONDITIONAL_REQUIREMENTS = { |
40 | 41 | "web_client": { |
41 | 41 | ("pushers",), |
42 | 42 | ("caches",), |
43 | 43 | ("to_device",), |
44 | ("public_rooms",), | |
44 | 45 | ) |
45 | 46 | |
46 | 47 | |
130 | 131 | push_rules_token, room_stream_token = self.store.get_push_rules_stream_token() |
131 | 132 | pushers_token = self.store.get_pushers_stream_token() |
132 | 133 | caches_token = self.store.get_cache_stream_token() |
134 | public_rooms_token = self.store.get_current_public_room_stream_id() | |
133 | 135 | |
134 | 136 | defer.returnValue(_ReplicationToken( |
135 | 137 | room_stream_token, |
143 | 145 | 0, # State stream is no longer a thing |
144 | 146 | caches_token, |
145 | 147 | int(stream_token.to_device_key), |
148 | int(public_rooms_token), | |
146 | 149 | )) |
147 | 150 | |
148 | 151 | @request_handler() |
180 | 183 | def replicate(self, request_streams, limit): |
181 | 184 | writer = _Writer() |
182 | 185 | current_token = yield self.current_replication_token() |
183 | logger.info("Replicating up to %r", current_token) | |
186 | logger.debug("Replicating up to %r", current_token) | |
184 | 187 | |
185 | 188 | yield self.account_data(writer, current_token, limit, request_streams) |
186 | 189 | yield self.events(writer, current_token, limit, request_streams) |
192 | 195 | yield self.pushers(writer, current_token, limit, request_streams) |
193 | 196 | yield self.caches(writer, current_token, limit, request_streams) |
194 | 197 | yield self.to_device(writer, current_token, limit, request_streams) |
198 | yield self.public_rooms(writer, current_token, limit, request_streams) | |
195 | 199 | self.streams(writer, current_token, request_streams) |
196 | 200 | |
197 | logger.info("Replicated %d rows", writer.total) | |
201 | logger.debug("Replicated %d rows", writer.total) | |
198 | 202 | defer.returnValue(writer.finish()) |
199 | 203 | |
200 | 204 | def streams(self, writer, current_token, request_streams): |
273 | 277 | |
274 | 278 | @defer.inlineCallbacks |
275 | 279 | def typing(self, writer, current_token, request_streams): |
276 | current_position = current_token.presence | |
280 | current_position = current_token.typing | |
277 | 281 | |
278 | 282 | request_typing = request_streams.get("typing") |
279 | 283 | |
280 | 284 | if request_typing is not None: |
285 | # If they have a higher token than current max, we can assume that | |
286 | # they had been talking to a previous instance of the master. Since | |
287 | # we reset the token on restart, the best (but hacky) thing we can | |
288 | # do is to simply resend down all the typing notifications. | |
289 | if request_typing > current_position: | |
290 | request_typing = 0 | |
291 | ||
281 | 292 | typing_rows = yield self.typing_handler.get_all_typing_updates( |
282 | 293 | request_typing, current_position |
283 | 294 | ) |
392 | 403 | "position", "user_id", "device_id", "message_json" |
393 | 404 | )) |
394 | 405 | |
406 | @defer.inlineCallbacks | |
407 | def public_rooms(self, writer, current_token, limit, request_streams): | |
408 | current_position = current_token.public_rooms | |
409 | ||
410 | public_rooms = request_streams.get("public_rooms") | |
411 | ||
412 | if public_rooms is not None: | |
413 | public_rooms_rows = yield self.store.get_all_new_public_rooms( | |
414 | public_rooms, current_position, limit | |
415 | ) | |
416 | writer.write_header_and_rows("public_rooms", public_rooms_rows, ( | |
417 | "position", "room_id", "visibility" | |
418 | )) | |
419 | ||
395 | 420 | |
396 | 421 | class _Writer(object): |
397 | 422 | """Writes the streams as a JSON object as the response to the request""" |
420 | 445 | |
421 | 446 | class _ReplicationToken(collections.namedtuple("_ReplicationToken", ( |
422 | 447 | "events", "presence", "typing", "receipts", "account_data", "backfill", |
423 | "push_rules", "pushers", "state", "caches", "to_device", | |
448 | "push_rules", "pushers", "state", "caches", "to_device", "public_rooms", | |
424 | 449 | ))): |
425 | 450 | __slots__ = [] |
426 | 451 |
15 | 15 | from ._base import BaseSlavedStore |
16 | 16 | from ._slaved_id_tracker import SlavedIdTracker |
17 | 17 | from synapse.storage import DataStore |
18 | from synapse.util.caches.stream_change_cache import StreamChangeCache | |
18 | 19 | |
19 | 20 | |
20 | 21 | class SlavedDeviceInboxStore(BaseSlavedStore): |
21 | 22 | def __init__(self, db_conn, hs): |
22 | 23 | super(SlavedDeviceInboxStore, self).__init__(db_conn, hs) |
23 | 24 | self._device_inbox_id_gen = SlavedIdTracker( |
24 | db_conn, "device_inbox", "stream_id", | |
25 | db_conn, "device_max_stream_id", "stream_id", | |
26 | ) | |
27 | self._device_inbox_stream_cache = StreamChangeCache( | |
28 | "DeviceInboxStreamChangeCache", | |
29 | self._device_inbox_id_gen.get_current_token() | |
25 | 30 | ) |
26 | 31 | |
27 | 32 | get_to_device_stream_token = DataStore.get_to_device_stream_token.__func__ |
37 | 42 | stream = result.get("to_device") |
38 | 43 | if stream: |
39 | 44 | self._device_inbox_id_gen.advance(int(stream["position"])) |
45 | for row in stream["rows"]: | |
46 | stream_id = row[0] | |
47 | user_id = row[1] | |
48 | self._device_inbox_stream_cache.entity_has_changed( | |
49 | user_id, stream_id | |
50 | ) | |
40 | 51 | |
41 | 52 | return super(SlavedDeviceInboxStore, self).process_replication(result) |
59 | 59 | self._membership_stream_cache = StreamChangeCache( |
60 | 60 | "MembershipStreamChangeCache", events_max, |
61 | 61 | ) |
62 | ||
63 | self.stream_ordering_month_ago = 0 | |
64 | self._stream_order_on_start = self.get_room_max_stream_ordering() | |
62 | 65 | |
63 | 66 | # Cached functions can't be accessed through a class instance so we need |
64 | 67 | # to reach inside the __dict__ to extract them. |
85 | 88 | _get_state_groups_from_groups = ( |
86 | 89 | StateStore.__dict__["_get_state_groups_from_groups"] |
87 | 90 | ) |
91 | _get_state_groups_from_groups_txn = ( | |
92 | DataStore._get_state_groups_from_groups_txn.__func__ | |
93 | ) | |
88 | 94 | _get_state_group_from_group = ( |
89 | 95 | StateStore.__dict__["_get_state_group_from_group"] |
90 | 96 | ) |
164 | 170 | get_auth_chain_ids = DataStore.get_auth_chain_ids.__func__ |
165 | 171 | _get_auth_chain_ids_txn = DataStore._get_auth_chain_ids_txn.__func__ |
166 | 172 | |
173 | get_room_max_stream_ordering = DataStore.get_room_max_stream_ordering.__func__ | |
174 | ||
175 | get_forward_extremeties_for_room = ( | |
176 | DataStore.get_forward_extremeties_for_room.__func__ | |
177 | ) | |
178 | _get_forward_extremeties_for_room = ( | |
179 | EventFederationStore.__dict__["_get_forward_extremeties_for_room"] | |
180 | ) | |
181 | ||
167 | 182 | def stream_positions(self): |
168 | 183 | result = super(SlavedEventStore, self).stream_positions() |
169 | 184 | result["events"] = self._stream_id_gen.get_current_token() |
14 | 14 | |
15 | 15 | from ._base import BaseSlavedStore |
16 | 16 | from synapse.storage import DataStore |
17 | from ._slaved_id_tracker import SlavedIdTracker | |
17 | 18 | |
18 | 19 | |
19 | 20 | class RoomStore(BaseSlavedStore): |
21 | def __init__(self, db_conn, hs): | |
22 | super(RoomStore, self).__init__(db_conn, hs) | |
23 | self._public_room_id_gen = SlavedIdTracker( | |
24 | db_conn, "public_room_list_stream", "stream_id" | |
25 | ) | |
26 | ||
20 | 27 | get_public_room_ids = DataStore.get_public_room_ids.__func__ |
28 | get_current_public_room_stream_id = ( | |
29 | DataStore.get_current_public_room_stream_id.__func__ | |
30 | ) | |
31 | get_public_room_ids_at_stream_id = ( | |
32 | DataStore.get_public_room_ids_at_stream_id.__func__ | |
33 | ) | |
34 | get_public_room_ids_at_stream_id_txn = ( | |
35 | DataStore.get_public_room_ids_at_stream_id_txn.__func__ | |
36 | ) | |
37 | get_published_at_stream_id_txn = ( | |
38 | DataStore.get_published_at_stream_id_txn.__func__ | |
39 | ) | |
40 | get_public_room_changes = DataStore.get_public_room_changes.__func__ | |
41 | ||
42 | def stream_positions(self): | |
43 | result = super(RoomStore, self).stream_positions() | |
44 | result["public_rooms"] = self._public_room_id_gen.get_current_token() | |
45 | return result | |
46 | ||
47 | def process_replication(self, result): | |
48 | stream = result.get("public_rooms") | |
49 | if stream: | |
50 | self._public_room_id_gen.advance(int(stream["position"])) | |
51 | ||
52 | return super(RoomStore, self).process_replication(result) |
317 | 317 | service_param = urllib.urlencode({ |
318 | 318 | "service": "%s?%s" % (hs_redirect_url, client_redirect_url_param) |
319 | 319 | }) |
320 | request.redirect("%s?%s" % (self.cas_server_url, service_param)) | |
320 | request.redirect("%s/login?%s" % (self.cas_server_url, service_param)) | |
321 | 321 | finish_request(request) |
322 | 322 | |
323 | 323 | |
384 | 384 | |
385 | 385 | def parse_cas_response(self, cas_response_body): |
386 | 386 | user = None |
387 | attributes = None | |
387 | attributes = {} | |
388 | 388 | try: |
389 | 389 | root = ET.fromstring(cas_response_body) |
390 | 390 | if not root.tag.endswith("serviceResponse"): |
394 | 394 | if child.tag.endswith("user"): |
395 | 395 | user = child.text |
396 | 396 | if child.tag.endswith("attributes"): |
397 | attributes = {} | |
398 | 397 | for attribute in child: |
399 | 398 | # ElementTree library expands the namespace in |
400 | 399 | # attribute tags to the full URL of the namespace. |
406 | 405 | attributes[tag] = attribute.text |
407 | 406 | if user is None: |
408 | 407 | raise Exception("CAS response does not contain user") |
409 | if attributes is None: | |
410 | raise Exception("CAS response does not contain attributes") | |
411 | 408 | except Exception: |
412 | 409 | logger.error("Error parsing CAS response", exc_info=1) |
413 | 410 | raise LoginError(401, "Invalid CAS response", |
14 | 14 | |
15 | 15 | from twisted.internet import defer |
16 | 16 | |
17 | from synapse.api.errors import AuthError, Codes | |
17 | from synapse.api.auth import get_access_token_from_request | |
18 | 18 | |
19 | 19 | from .base import ClientV1RestServlet, client_path_patterns |
20 | 20 | |
36 | 36 | |
37 | 37 | @defer.inlineCallbacks |
38 | 38 | def on_POST(self, request): |
39 | try: | |
40 | access_token = request.args["access_token"][0] | |
41 | except KeyError: | |
42 | raise AuthError( | |
43 | self.TOKEN_NOT_FOUND_HTTP_STATUS, "Missing access token.", | |
44 | errcode=Codes.MISSING_TOKEN | |
45 | ) | |
39 | access_token = get_access_token_from_request(request) | |
46 | 40 | yield self.store.delete_access_token(access_token) |
47 | 41 | defer.returnValue((200, {})) |
48 | 42 |
17 | 17 | |
18 | 18 | from synapse.api.errors import SynapseError, Codes |
19 | 19 | from synapse.api.constants import LoginType |
20 | from synapse.api.auth import get_access_token_from_request | |
20 | 21 | from .base import ClientV1RestServlet, client_path_patterns |
21 | 22 | import synapse.util.stringutils as stringutils |
22 | 23 | from synapse.http.servlet import parse_json_object_from_request |
295 | 296 | |
296 | 297 | @defer.inlineCallbacks |
297 | 298 | def _do_app_service(self, request, register_json, session): |
298 | if "access_token" not in request.args: | |
299 | raise SynapseError(400, "Expected application service token.") | |
299 | as_token = get_access_token_from_request(request) | |
300 | ||
300 | 301 | if "user" not in register_json: |
301 | 302 | raise SynapseError(400, "Expected 'user' key.") |
302 | 303 | |
303 | as_token = request.args["access_token"][0] | |
304 | 304 | user_localpart = register_json["user"].encode("utf-8") |
305 | 305 | |
306 | 306 | handler = self.handlers.registration_handler |
389 | 389 | def on_POST(self, request): |
390 | 390 | user_json = parse_json_object_from_request(request) |
391 | 391 | |
392 | if "access_token" not in request.args: | |
393 | raise SynapseError(400, "Expected application service token.") | |
394 | ||
392 | access_token = get_access_token_from_request(request) | |
395 | 393 | app_service = yield self.store.get_app_service_by_token( |
396 | request.args["access_token"][0] | |
394 | access_token | |
397 | 395 | ) |
398 | 396 | if not app_service: |
399 | 397 | raise SynapseError(403, "Invalid application service token.") |
21 | 21 | from synapse.api.constants import EventTypes, Membership |
22 | 22 | from synapse.api.filtering import Filter |
23 | 23 | from synapse.types import UserID, RoomID, RoomAlias |
24 | from synapse.events.utils import serialize_event | |
25 | from synapse.http.servlet import parse_json_object_from_request | |
24 | from synapse.events.utils import serialize_event, format_event_for_client_v2 | |
25 | from synapse.http.servlet import ( | |
26 | parse_json_object_from_request, parse_string, parse_integer | |
27 | ) | |
26 | 28 | |
27 | 29 | import logging |
28 | 30 | import urllib |
119 | 121 | @defer.inlineCallbacks |
120 | 122 | def on_GET(self, request, room_id, event_type, state_key): |
121 | 123 | requester = yield self.auth.get_user_by_req(request, allow_guest=True) |
124 | format = parse_string(request, "format", default="content", | |
125 | allowed_values=["content", "event"]) | |
122 | 126 | |
123 | 127 | msg_handler = self.handlers.message_handler |
124 | 128 | data = yield msg_handler.get_room_data( |
133 | 137 | raise SynapseError( |
134 | 138 | 404, "Event not found.", errcode=Codes.NOT_FOUND |
135 | 139 | ) |
136 | defer.returnValue((200, data.get_dict()["content"])) | |
140 | ||
141 | if format == "event": | |
142 | event = format_event_for_client_v2(data.get_dict()) | |
143 | defer.returnValue((200, event)) | |
144 | elif format == "content": | |
145 | defer.returnValue((200, data.get_dict()["content"])) | |
137 | 146 | |
138 | 147 | @defer.inlineCallbacks |
139 | 148 | def on_PUT(self, request, room_id, event_type, state_key, txn_id=None): |
294 | 303 | |
295 | 304 | @defer.inlineCallbacks |
296 | 305 | def on_GET(self, request): |
297 | try: | |
298 | yield self.auth.get_user_by_req(request) | |
299 | except AuthError: | |
300 | # This endpoint isn't authed, but its useful to know who's hitting | |
301 | # it if they *do* supply an access token | |
302 | pass | |
306 | server = parse_string(request, "server", default=None) | |
307 | ||
308 | try: | |
309 | yield self.auth.get_user_by_req(request, allow_guest=True) | |
310 | except AuthError as e: | |
311 | # We allow people to not be authed if they're just looking at our | |
312 | # room list, but require auth when we proxy the request. | |
313 | # In both cases we call the auth function, as that has the side | |
314 | # effect of logging who issued this request if an access token was | |
315 | # provided. | |
316 | if server: | |
317 | raise e | |
318 | else: | |
319 | pass | |
320 | ||
321 | limit = parse_integer(request, "limit", 0) | |
322 | since_token = parse_string(request, "since", None) | |
303 | 323 | |
304 | 324 | handler = self.hs.get_room_list_handler() |
305 | data = yield handler.get_aggregated_public_room_list() | |
325 | if server: | |
326 | data = yield handler.get_remote_public_room_list( | |
327 | server, | |
328 | limit=limit, | |
329 | since_token=since_token, | |
330 | ) | |
331 | else: | |
332 | data = yield handler.get_local_public_room_list( | |
333 | limit=limit, | |
334 | since_token=since_token, | |
335 | ) | |
336 | ||
337 | defer.returnValue((200, data)) | |
338 | ||
339 | @defer.inlineCallbacks | |
340 | def on_POST(self, request): | |
341 | yield self.auth.get_user_by_req(request, allow_guest=True) | |
342 | ||
343 | server = parse_string(request, "server", default=None) | |
344 | content = parse_json_object_from_request(request) | |
345 | ||
346 | limit = int(content.get("limit", 100)) | |
347 | since_token = content.get("since", None) | |
348 | search_filter = content.get("filter", None) | |
349 | ||
350 | handler = self.hs.get_room_list_handler() | |
351 | if server: | |
352 | data = yield handler.get_remote_public_room_list( | |
353 | server, | |
354 | limit=limit, | |
355 | since_token=since_token, | |
356 | search_filter=search_filter, | |
357 | ) | |
358 | else: | |
359 | data = yield handler.get_local_public_room_list( | |
360 | limit=limit, | |
361 | since_token=since_token, | |
362 | search_filter=search_filter, | |
363 | ) | |
306 | 364 | |
307 | 365 | defer.returnValue((200, data)) |
308 | 366 |
15 | 15 | """This module contains logic for storing HTTP PUT transactions. This is used |
16 | 16 | to ensure idempotency when performing PUTs using the REST API.""" |
17 | 17 | import logging |
18 | ||
19 | from synapse.api.auth import get_access_token_from_request | |
18 | 20 | |
19 | 21 | logger = logging.getLogger(__name__) |
20 | 22 | |
89 | 91 | return response |
90 | 92 | |
91 | 93 | def _get_key(self, request): |
92 | token = request.args["access_token"][0] | |
94 | token = get_access_token_from_request(request) | |
93 | 95 | path_without_txn_id = request.path.rsplit("/", 1)[0] |
94 | 96 | return path_without_txn_id + "/" + token |
14 | 14 | |
15 | 15 | import logging |
16 | 16 | |
17 | import simplejson as json | |
18 | from canonicaljson import encode_canonical_json | |
19 | 17 | from twisted.internet import defer |
20 | 18 | |
21 | import synapse.api.errors | |
22 | import synapse.server | |
23 | import synapse.types | |
24 | from synapse.http.servlet import RestServlet, parse_json_object_from_request | |
25 | from synapse.types import UserID | |
19 | from synapse.api.errors import SynapseError | |
20 | from synapse.http.servlet import ( | |
21 | RestServlet, parse_json_object_from_request, parse_integer | |
22 | ) | |
26 | 23 | from ._base import client_v2_patterns |
27 | 24 | |
28 | 25 | logger = logging.getLogger(__name__) |
62 | 59 | hs (synapse.server.HomeServer): server |
63 | 60 | """ |
64 | 61 | super(KeyUploadServlet, self).__init__() |
65 | self.store = hs.get_datastore() | |
66 | self.clock = hs.get_clock() | |
67 | 62 | self.auth = hs.get_auth() |
68 | self.device_handler = hs.get_device_handler() | |
63 | self.e2e_keys_handler = hs.get_e2e_keys_handler() | |
69 | 64 | |
70 | 65 | @defer.inlineCallbacks |
71 | 66 | def on_POST(self, request, device_id): |
72 | 67 | requester = yield self.auth.get_user_by_req(request) |
73 | ||
74 | 68 | user_id = requester.user.to_string() |
75 | ||
76 | 69 | body = parse_json_object_from_request(request) |
77 | 70 | |
78 | 71 | if device_id is not None: |
87 | 80 | device_id = requester.device_id |
88 | 81 | |
89 | 82 | if device_id is None: |
90 | raise synapse.api.errors.SynapseError( | |
83 | raise SynapseError( | |
91 | 84 | 400, |
92 | 85 | "To upload keys, you must pass device_id when authenticating" |
93 | 86 | ) |
94 | 87 | |
95 | time_now = self.clock.time_msec() | |
96 | ||
97 | # TODO: Validate the JSON to make sure it has the right keys. | |
98 | device_keys = body.get("device_keys", None) | |
99 | if device_keys: | |
100 | logger.info( | |
101 | "Updating device_keys for device %r for user %s at %d", | |
102 | device_id, user_id, time_now | |
103 | ) | |
104 | # TODO: Sign the JSON with the server key | |
105 | yield self.store.set_e2e_device_keys( | |
106 | user_id, device_id, time_now, | |
107 | encode_canonical_json(device_keys) | |
108 | ) | |
109 | ||
110 | one_time_keys = body.get("one_time_keys", None) | |
111 | if one_time_keys: | |
112 | logger.info( | |
113 | "Adding %d one_time_keys for device %r for user %r at %d", | |
114 | len(one_time_keys), device_id, user_id, time_now | |
115 | ) | |
116 | key_list = [] | |
117 | for key_id, key_json in one_time_keys.items(): | |
118 | algorithm, key_id = key_id.split(":") | |
119 | key_list.append(( | |
120 | algorithm, key_id, encode_canonical_json(key_json) | |
121 | )) | |
122 | ||
123 | yield self.store.add_e2e_one_time_keys( | |
124 | user_id, device_id, time_now, key_list | |
125 | ) | |
126 | ||
127 | # the device should have been registered already, but it may have been | |
128 | # deleted due to a race with a DELETE request. Or we may be using an | |
129 | # old access_token without an associated device_id. Either way, we | |
130 | # need to double-check the device is registered to avoid ending up with | |
131 | # keys without a corresponding device. | |
132 | self.device_handler.check_device_registered(user_id, device_id) | |
133 | ||
134 | result = yield self.store.count_e2e_one_time_keys(user_id, device_id) | |
135 | defer.returnValue((200, {"one_time_key_counts": result})) | |
88 | result = yield self.e2e_keys_handler.upload_keys_for_user( | |
89 | user_id, device_id, body | |
90 | ) | |
91 | defer.returnValue((200, result)) | |
136 | 92 | |
137 | 93 | |
138 | 94 | class KeyQueryServlet(RestServlet): |
194 | 150 | @defer.inlineCallbacks |
195 | 151 | def on_POST(self, request, user_id, device_id): |
196 | 152 | yield self.auth.get_user_by_req(request) |
153 | timeout = parse_integer(request, "timeout", 10 * 1000) | |
197 | 154 | body = parse_json_object_from_request(request) |
198 | result = yield self.e2e_keys_handler.query_devices(body) | |
199 | defer.returnValue(result) | |
155 | result = yield self.e2e_keys_handler.query_devices(body, timeout) | |
156 | defer.returnValue((200, result)) | |
200 | 157 | |
201 | 158 | @defer.inlineCallbacks |
202 | 159 | def on_GET(self, request, user_id, device_id): |
203 | 160 | requester = yield self.auth.get_user_by_req(request) |
161 | timeout = parse_integer(request, "timeout", 10 * 1000) | |
204 | 162 | auth_user_id = requester.user.to_string() |
205 | 163 | user_id = user_id if user_id else auth_user_id |
206 | 164 | device_ids = [device_id] if device_id else [] |
207 | 165 | result = yield self.e2e_keys_handler.query_devices( |
208 | {"device_keys": {user_id: device_ids}} | |
209 | ) | |
210 | defer.returnValue(result) | |
166 | {"device_keys": {user_id: device_ids}}, | |
167 | timeout, | |
168 | ) | |
169 | defer.returnValue((200, result)) | |
211 | 170 | |
212 | 171 | |
213 | 172 | class OneTimeKeyServlet(RestServlet): |
239 | 198 | |
240 | 199 | def __init__(self, hs): |
241 | 200 | super(OneTimeKeyServlet, self).__init__() |
242 | self.store = hs.get_datastore() | |
243 | 201 | self.auth = hs.get_auth() |
244 | self.clock = hs.get_clock() | |
245 | self.federation = hs.get_replication_layer() | |
246 | self.is_mine = hs.is_mine | |
202 | self.e2e_keys_handler = hs.get_e2e_keys_handler() | |
247 | 203 | |
248 | 204 | @defer.inlineCallbacks |
249 | 205 | def on_GET(self, request, user_id, device_id, algorithm): |
250 | 206 | yield self.auth.get_user_by_req(request) |
251 | result = yield self.handle_request( | |
252 | {"one_time_keys": {user_id: {device_id: algorithm}}} | |
253 | ) | |
254 | defer.returnValue(result) | |
207 | timeout = parse_integer(request, "timeout", 10 * 1000) | |
208 | result = yield self.e2e_keys_handler.claim_one_time_keys( | |
209 | {"one_time_keys": {user_id: {device_id: algorithm}}}, | |
210 | timeout, | |
211 | ) | |
212 | defer.returnValue((200, result)) | |
255 | 213 | |
256 | 214 | @defer.inlineCallbacks |
257 | 215 | def on_POST(self, request, user_id, device_id, algorithm): |
258 | 216 | yield self.auth.get_user_by_req(request) |
217 | timeout = parse_integer(request, "timeout", 10 * 1000) | |
259 | 218 | body = parse_json_object_from_request(request) |
260 | result = yield self.handle_request(body) | |
261 | defer.returnValue(result) | |
262 | ||
263 | @defer.inlineCallbacks | |
264 | def handle_request(self, body): | |
265 | local_query = [] | |
266 | remote_queries = {} | |
267 | for user_id, device_keys in body.get("one_time_keys", {}).items(): | |
268 | user = UserID.from_string(user_id) | |
269 | if self.is_mine(user): | |
270 | for device_id, algorithm in device_keys.items(): | |
271 | local_query.append((user_id, device_id, algorithm)) | |
272 | else: | |
273 | remote_queries.setdefault(user.domain, {})[user_id] = ( | |
274 | device_keys | |
275 | ) | |
276 | results = yield self.store.claim_e2e_one_time_keys(local_query) | |
277 | ||
278 | json_result = {} | |
279 | for user_id, device_keys in results.items(): | |
280 | for device_id, keys in device_keys.items(): | |
281 | for key_id, json_bytes in keys.items(): | |
282 | json_result.setdefault(user_id, {})[device_id] = { | |
283 | key_id: json.loads(json_bytes) | |
284 | } | |
285 | ||
286 | for destination, device_keys in remote_queries.items(): | |
287 | remote_result = yield self.federation.claim_client_keys( | |
288 | destination, {"one_time_keys": device_keys} | |
289 | ) | |
290 | for user_id, keys in remote_result["one_time_keys"].items(): | |
291 | if user_id in device_keys: | |
292 | json_result[user_id] = keys | |
293 | ||
294 | defer.returnValue((200, {"one_time_keys": json_result})) | |
219 | result = yield self.e2e_keys_handler.claim_one_time_keys( | |
220 | body, | |
221 | timeout, | |
222 | ) | |
223 | defer.returnValue((200, result)) | |
295 | 224 | |
296 | 225 | |
297 | 226 | def register_servlets(hs, http_server): |
44 | 44 | |
45 | 45 | from_token = parse_string(request, "from", required=False) |
46 | 46 | limit = parse_integer(request, "limit", default=50) |
47 | only = parse_string(request, "only", required=False) | |
47 | 48 | |
48 | 49 | limit = min(limit, 500) |
49 | 50 | |
50 | 51 | push_actions = yield self.store.get_push_actions_for_user( |
51 | user_id, from_token, limit | |
52 | user_id, from_token, limit, only_highlight=(only == "highlight") | |
52 | 53 | ) |
53 | 54 | |
54 | 55 | receipts_by_room = yield self.store.get_receipts_for_user_with_orderings( |
14 | 14 | |
15 | 15 | from twisted.internet import defer |
16 | 16 | |
17 | from synapse.api.auth import get_access_token_from_request, has_access_token | |
17 | 18 | from synapse.api.constants import LoginType |
18 | 19 | from synapse.api.errors import SynapseError, Codes, UnrecognizedRequestError |
19 | 20 | from synapse.http.servlet import RestServlet, parse_json_object_from_request |
130 | 131 | desired_username = body['username'] |
131 | 132 | |
132 | 133 | appservice = None |
133 | if 'access_token' in request.args: | |
134 | if has_access_token(request): | |
134 | 135 | appservice = yield self.auth.get_appservice_by_req(request) |
135 | 136 | |
136 | 137 | # fork off as soon as possible for ASes and shared secret auth which |
142 | 143 | # 'user' key not 'username'). Since this is a new addition, we'll |
143 | 144 | # fallback to 'username' if they gave one. |
144 | 145 | desired_username = body.get("user", desired_username) |
146 | access_token = get_access_token_from_request(request) | |
145 | 147 | |
146 | 148 | if isinstance(desired_username, basestring): |
147 | 149 | result = yield self._do_appservice_registration( |
148 | desired_username, request.args["access_token"][0], body | |
150 | desired_username, access_token, body | |
149 | 151 | ) |
150 | 152 | defer.returnValue((200, result)) # we throw for non 200 responses |
151 | 153 | return |
15 | 15 | import logging |
16 | 16 | |
17 | 17 | from twisted.internet import defer |
18 | from synapse.http.servlet import parse_json_object_from_request | |
19 | 18 | |
20 | 19 | from synapse.http import servlet |
20 | from synapse.http.servlet import parse_json_object_from_request | |
21 | 21 | from synapse.rest.client.v1.transactions import HttpTransactionStore |
22 | ||
22 | 23 | from ._base import client_v2_patterns |
23 | 24 | |
24 | 25 | logger = logging.getLogger(__name__) |
38 | 39 | super(SendToDeviceRestServlet, self).__init__() |
39 | 40 | self.hs = hs |
40 | 41 | self.auth = hs.get_auth() |
41 | self.store = hs.get_datastore() | |
42 | self.notifier = hs.get_notifier() | |
43 | self.is_mine_id = hs.is_mine_id | |
44 | 42 | self.txns = HttpTransactionStore() |
43 | self.device_message_handler = hs.get_device_message_handler() | |
45 | 44 | |
46 | 45 | @defer.inlineCallbacks |
47 | 46 | def on_PUT(self, request, message_type, txn_id): |
56 | 55 | |
57 | 56 | content = parse_json_object_from_request(request) |
58 | 57 | |
59 | # TODO: Prod the notifier to wake up sync streams. | |
60 | # TODO: Implement replication for the messages. | |
61 | # TODO: Send the messages to remote servers if needed. | |
58 | sender_user_id = requester.user.to_string() | |
62 | 59 | |
63 | local_messages = {} | |
64 | for user_id, by_device in content["messages"].items(): | |
65 | if self.is_mine_id(user_id): | |
66 | messages_by_device = { | |
67 | device_id: { | |
68 | "content": message_content, | |
69 | "type": message_type, | |
70 | "sender": requester.user.to_string(), | |
71 | } | |
72 | for device_id, message_content in by_device.items() | |
73 | } | |
74 | if messages_by_device: | |
75 | local_messages[user_id] = messages_by_device | |
76 | ||
77 | stream_id = yield self.store.add_messages_to_device_inbox(local_messages) | |
78 | ||
79 | self.notifier.on_new_event( | |
80 | "to_device_key", stream_id, users=local_messages.keys() | |
60 | yield self.device_message_handler.send_device_message( | |
61 | sender_user_id, message_type, content["messages"] | |
81 | 62 | ) |
82 | 63 | |
83 | 64 | response = (200, {}) |
41 | 41 | defer.returnValue((200, protocols)) |
42 | 42 | |
43 | 43 | |
44 | class ThirdPartyProtocolServlet(RestServlet): | |
45 | PATTERNS = client_v2_patterns("/thirdparty/protocol/(?P<protocol>[^/]+)$", | |
46 | releases=()) | |
47 | ||
48 | def __init__(self, hs): | |
49 | super(ThirdPartyProtocolServlet, self).__init__() | |
50 | ||
51 | self.auth = hs.get_auth() | |
52 | self.appservice_handler = hs.get_application_service_handler() | |
53 | ||
54 | @defer.inlineCallbacks | |
55 | def on_GET(self, request, protocol): | |
56 | yield self.auth.get_user_by_req(request) | |
57 | ||
58 | protocols = yield self.appservice_handler.get_3pe_protocols( | |
59 | only_protocol=protocol, | |
60 | ) | |
61 | if protocol in protocols: | |
62 | defer.returnValue((200, protocols[protocol])) | |
63 | else: | |
64 | defer.returnValue((404, {"error": "Unknown protocol"})) | |
65 | ||
66 | ||
44 | 67 | class ThirdPartyUserServlet(RestServlet): |
45 | 68 | PATTERNS = client_v2_patterns("/thirdparty/user(/(?P<protocol>[^/]+))?$", |
46 | 69 | releases=()) |
56 | 79 | yield self.auth.get_user_by_req(request) |
57 | 80 | |
58 | 81 | fields = request.args |
59 | del fields["access_token"] | |
82 | fields.pop("access_token", None) | |
60 | 83 | |
61 | 84 | results = yield self.appservice_handler.query_3pe( |
62 | 85 | ThirdPartyEntityKind.USER, protocol, fields |
80 | 103 | yield self.auth.get_user_by_req(request) |
81 | 104 | |
82 | 105 | fields = request.args |
83 | del fields["access_token"] | |
106 | fields.pop("access_token", None) | |
84 | 107 | |
85 | 108 | results = yield self.appservice_handler.query_3pe( |
86 | 109 | ThirdPartyEntityKind.LOCATION, protocol, fields |
91 | 114 | |
92 | 115 | def register_servlets(hs, http_server): |
93 | 116 | ThirdPartyProtocolsServlet(hs).register(http_server) |
117 | ThirdPartyProtocolServlet(hs).register(http_server) | |
94 | 118 | ThirdPartyUserServlet(hs).register(http_server) |
95 | 119 | ThirdPartyLocationServlet(hs).register(http_server) |
44 | 44 | @request_handler() |
45 | 45 | @defer.inlineCallbacks |
46 | 46 | def _async_render_GET(self, request): |
47 | request.setHeader("Content-Security-Policy", "sandbox") | |
47 | request.setHeader( | |
48 | "Content-Security-Policy", | |
49 | "default-src 'none';" | |
50 | " script-src 'none';" | |
51 | " plugin-types application/pdf;" | |
52 | " style-src 'unsafe-inline';" | |
53 | " object-src 'self';" | |
54 | ) | |
48 | 55 | server_name, media_id, name = parse_media_id(request) |
49 | 56 | if server_name == self.server_name: |
50 | 57 | yield self._respond_local_file(request, media_id, name) |
34 | 34 | from synapse.handlers import Handlers |
35 | 35 | from synapse.handlers.appservice import ApplicationServicesHandler |
36 | 36 | from synapse.handlers.auth import AuthHandler |
37 | from synapse.handlers.devicemessage import DeviceMessageHandler | |
37 | 38 | from synapse.handlers.device import DeviceHandler |
38 | 39 | from synapse.handlers.e2e_keys import E2eKeysHandler |
39 | 40 | from synapse.handlers.presence import PresenceHandler |
40 | from synapse.handlers.room import RoomListHandler | |
41 | from synapse.handlers.room_list import RoomListHandler | |
41 | 42 | from synapse.handlers.sync import SyncHandler |
42 | 43 | from synapse.handlers.typing import TypingHandler |
43 | 44 | from synapse.handlers.events import EventHandler, EventStreamHandler |
99 | 100 | 'application_service_api', |
100 | 101 | 'application_service_scheduler', |
101 | 102 | 'application_service_handler', |
103 | 'device_message_handler', | |
102 | 104 | 'notifier', |
103 | 105 | 'distributor', |
104 | 106 | 'client_resource', |
204 | 206 | def build_device_handler(self): |
205 | 207 | return DeviceHandler(self) |
206 | 208 | |
209 | def build_device_message_handler(self): | |
210 | return DeviceMessageHandler(self) | |
211 | ||
207 | 212 | def build_e2e_keys_handler(self): |
208 | 213 | return E2eKeysHandler(self) |
209 | 214 |
25 | 25 | from synapse.util.async import Linearizer |
26 | 26 | |
27 | 27 | from collections import namedtuple |
28 | from frozendict import frozendict | |
28 | 29 | |
29 | 30 | import logging |
30 | 31 | import hashlib |
54 | 55 | |
55 | 56 | |
56 | 57 | class _StateCacheEntry(object): |
57 | __slots__ = ["state", "state_group", "state_id"] | |
58 | ||
59 | def __init__(self, state, state_group): | |
60 | self.state = state | |
58 | __slots__ = ["state", "state_group", "state_id", "prev_group", "delta_ids"] | |
59 | ||
60 | def __init__(self, state, state_group, prev_group=None, delta_ids=None): | |
61 | self.state = frozendict(state) | |
61 | 62 | self.state_group = state_group |
63 | ||
64 | self.prev_group = prev_group | |
65 | self.delta_ids = frozendict(delta_ids) if delta_ids is not None else None | |
62 | 66 | |
63 | 67 | # The `state_id` is a unique ID we generate that can be used as ID for |
64 | 68 | # this collection of state. Usually this would be the same as the |
152 | 156 | defer.returnValue(state) |
153 | 157 | |
154 | 158 | @defer.inlineCallbacks |
155 | def get_current_user_in_room(self, room_id): | |
156 | latest_event_ids = yield self.store.get_latest_event_ids_in_room(room_id) | |
159 | def get_current_user_in_room(self, room_id, latest_event_ids=None): | |
160 | if not latest_event_ids: | |
161 | latest_event_ids = yield self.store.get_latest_event_ids_in_room(room_id) | |
157 | 162 | entry = yield self.resolve_state_groups(room_id, latest_event_ids) |
158 | 163 | joined_users = yield self.store.get_joined_users_from_state( |
159 | 164 | room_id, entry.state_id, entry.state |
233 | 238 | context.prev_state_ids = curr_state |
234 | 239 | if event.is_state(): |
235 | 240 | context.state_group = self.store.get_next_state_group() |
241 | ||
242 | key = (event.type, event.state_key) | |
243 | if key in context.prev_state_ids: | |
244 | replaces = context.prev_state_ids[key] | |
245 | event.unsigned["replaces_state"] = replaces | |
246 | ||
247 | context.current_state_ids = dict(context.prev_state_ids) | |
248 | context.current_state_ids[key] = event.event_id | |
249 | ||
250 | context.prev_group = entry.prev_group | |
251 | context.delta_ids = entry.delta_ids | |
252 | if context.delta_ids is not None: | |
253 | context.delta_ids = dict(context.delta_ids) | |
254 | context.delta_ids[key] = event.event_id | |
236 | 255 | else: |
237 | 256 | if entry.state_group is None: |
238 | 257 | entry.state_group = self.store.get_next_state_group() |
239 | 258 | entry.state_id = entry.state_group |
259 | ||
240 | 260 | context.state_group = entry.state_group |
241 | ||
242 | if event.is_state(): | |
243 | key = (event.type, event.state_key) | |
244 | if key in context.prev_state_ids: | |
245 | replaces = context.prev_state_ids[key] | |
246 | event.unsigned["replaces_state"] = replaces | |
247 | context.current_state_ids = dict(context.prev_state_ids) | |
248 | context.current_state_ids[key] = event.event_id | |
249 | else: | |
250 | 261 | context.current_state_ids = context.prev_state_ids |
262 | context.prev_group = entry.prev_group | |
263 | context.delta_ids = entry.delta_ids | |
251 | 264 | |
252 | 265 | context.prev_state_events = [] |
253 | 266 | defer.returnValue(context) |
282 | 295 | defer.returnValue(_StateCacheEntry( |
283 | 296 | state=state_list, |
284 | 297 | state_group=name, |
298 | prev_group=name, | |
299 | delta_ids={}, | |
285 | 300 | )) |
286 | 301 | |
287 | 302 | with (yield self.resolve_linearizer.queue(group_names)): |
339 | 354 | if hasattr(self.store, "get_next_state_group"): |
340 | 355 | state_group = self.store.get_next_state_group() |
341 | 356 | |
357 | prev_group = None | |
358 | delta_ids = None | |
359 | for old_group, old_ids in state_groups_ids.items(): | |
360 | if not set(new_state.iterkeys()) - set(old_ids.iterkeys()): | |
361 | n_delta_ids = { | |
362 | k: v | |
363 | for k, v in new_state.items() | |
364 | if old_ids.get(k) != v | |
365 | } | |
366 | if not delta_ids or len(n_delta_ids) < len(delta_ids): | |
367 | prev_group = old_group | |
368 | delta_ids = n_delta_ids | |
369 | ||
342 | 370 | cache = _StateCacheEntry( |
343 | 371 | state=new_state, |
344 | 372 | state_group=state_group, |
373 | prev_group=prev_group, | |
374 | delta_ids=delta_ids, | |
345 | 375 | ) |
346 | 376 | |
347 | 377 | if self._state_cache is not None: |
110 | 110 | db_conn, "presence_stream", "stream_id" |
111 | 111 | ) |
112 | 112 | self._device_inbox_id_gen = StreamIdGenerator( |
113 | db_conn, "device_inbox", "stream_id" | |
113 | db_conn, "device_max_stream_id", "stream_id" | |
114 | ) | |
115 | self._public_room_id_gen = StreamIdGenerator( | |
116 | db_conn, "public_room_list_stream", "stream_id" | |
114 | 117 | ) |
115 | 118 | |
116 | 119 | self._transaction_id_gen = IdGenerator(db_conn, "sent_transactions", "id") |
181 | 184 | prefilled_cache=push_rules_prefill, |
182 | 185 | ) |
183 | 186 | |
187 | max_device_inbox_id = self._device_inbox_id_gen.get_current_token() | |
188 | device_inbox_prefill, min_device_inbox_id = self._get_cache_dict( | |
189 | db_conn, "device_inbox", | |
190 | entity_column="user_id", | |
191 | stream_column="stream_id", | |
192 | max_value=max_device_inbox_id | |
193 | ) | |
194 | self._device_inbox_stream_cache = StreamChangeCache( | |
195 | "DeviceInboxStreamChangeCache", min_device_inbox_id, | |
196 | prefilled_cache=device_inbox_prefill, | |
197 | ) | |
198 | # The federation outbox and the local device inbox uses the same | |
199 | # stream_id generator. | |
200 | device_outbox_prefill, min_device_outbox_id = self._get_cache_dict( | |
201 | db_conn, "device_federation_outbox", | |
202 | entity_column="destination", | |
203 | stream_column="stream_id", | |
204 | max_value=max_device_inbox_id, | |
205 | ) | |
206 | self._device_federation_outbox_stream_cache = StreamChangeCache( | |
207 | "DeviceFederationOutboxStreamChangeCache", min_device_outbox_id, | |
208 | prefilled_cache=device_outbox_prefill, | |
209 | ) | |
210 | ||
184 | 211 | cur = LoggingTransaction( |
185 | 212 | db_conn.cursor(), |
186 | 213 | name="_find_stream_orderings_for_times_txn", |
193 | 220 | self.find_stream_orderings_looping_call = self._clock.looping_call( |
194 | 221 | self._find_stream_orderings_for_times, 60 * 60 * 1000 |
195 | 222 | ) |
223 | ||
224 | self._stream_order_on_start = self.get_room_max_stream_ordering() | |
196 | 225 | |
197 | 226 | super(DataStore, self).__init__(hs) |
198 | 227 |
132 | 132 | updates = yield self._simple_select_list( |
133 | 133 | "background_updates", |
134 | 134 | keyvalues=None, |
135 | retcols=("update_name",), | |
135 | retcols=("update_name", "depends_on"), | |
136 | 136 | ) |
137 | in_flight = set(update["update_name"] for update in updates) | |
137 | 138 | for update in updates: |
138 | self._background_update_queue.append(update['update_name']) | |
139 | if update["depends_on"] not in in_flight: | |
140 | self._background_update_queue.append(update['update_name']) | |
139 | 141 | |
140 | 142 | if not self._background_update_queue: |
141 | 143 | # no work left to do |
216 | 218 | self._background_update_handlers[update_name] = update_handler |
217 | 219 | |
218 | 220 | def register_background_index_update(self, update_name, index_name, |
219 | table, columns): | |
221 | table, columns, where_clause=None): | |
220 | 222 | """Helper for store classes to do a background index addition |
221 | 223 | |
222 | 224 | To use: |
240 | 242 | conc = True |
241 | 243 | else: |
242 | 244 | conc = False |
243 | ||
244 | sql = "CREATE INDEX %(conc)s %(name)s ON %(table)s (%(columns)s)" \ | |
245 | % { | |
246 | "conc": "CONCURRENTLY" if conc else "", | |
247 | "name": index_name, | |
248 | "table": table, | |
249 | "columns": ", ".join(columns), | |
250 | } | |
245 | # We don't use partial indices on SQLite as it wasn't introduced | |
246 | # until 3.8, and wheezy has 3.7 | |
247 | where_clause = None | |
248 | ||
249 | sql = ( | |
250 | "CREATE INDEX %(conc)s %(name)s ON %(table)s (%(columns)s)" | |
251 | " %(where_clause)s" | |
252 | ) % { | |
253 | "conc": "CONCURRENTLY" if conc else "", | |
254 | "name": index_name, | |
255 | "table": table, | |
256 | "columns": ", ".join(columns), | |
257 | "where_clause": "WHERE " + where_clause if where_clause else "" | |
258 | } | |
251 | 259 | |
252 | 260 | def create_index_concurrently(conn): |
253 | 261 | conn.rollback() |
26 | 26 | class DeviceInboxStore(SQLBaseStore): |
27 | 27 | |
28 | 28 | @defer.inlineCallbacks |
29 | def add_messages_to_device_inbox(self, messages_by_user_then_device): | |
30 | """ | |
31 | Args: | |
32 | messages_by_user_and_device(dict): | |
29 | def add_messages_to_device_inbox(self, local_messages_by_user_then_device, | |
30 | remote_messages_by_destination): | |
31 | """Used to send messages from this server. | |
32 | ||
33 | Args: | |
34 | sender_user_id(str): The ID of the user sending these messages. | |
35 | local_messages_by_user_and_device(dict): | |
33 | 36 | Dictionary of user_id to device_id to message. |
37 | remote_messages_by_destination(dict): | |
38 | Dictionary of destination server_name to the EDU JSON to send. | |
34 | 39 | Returns: |
35 | 40 | A deferred stream_id that resolves when the messages have been |
36 | 41 | inserted. |
37 | 42 | """ |
38 | 43 | |
39 | def select_devices_txn(txn, user_id, devices): | |
40 | if not devices: | |
41 | return [] | |
42 | sql = ( | |
43 | "SELECT user_id, device_id FROM devices" | |
44 | " WHERE user_id = ? AND device_id IN (" | |
45 | + ",".join("?" * len(devices)) | |
46 | + ")" | |
47 | ) | |
48 | # TODO: Maybe this needs to be done in batches if there are | |
49 | # too many local devices for a given user. | |
50 | args = [user_id] + devices | |
51 | txn.execute(sql, args) | |
52 | return [tuple(row) for row in txn.fetchall()] | |
53 | ||
54 | def add_messages_to_device_inbox_txn(txn, stream_id): | |
55 | local_users_and_devices = set() | |
56 | for user_id, messages_by_device in messages_by_user_then_device.items(): | |
57 | local_users_and_devices.update( | |
58 | select_devices_txn(txn, user_id, messages_by_device.keys()) | |
59 | ) | |
60 | ||
61 | sql = ( | |
62 | "INSERT INTO device_inbox" | |
63 | " (user_id, device_id, stream_id, message_json)" | |
44 | def add_messages_txn(txn, now_ms, stream_id): | |
45 | # Add the local messages directly to the local inbox. | |
46 | self._add_messages_to_local_device_inbox_txn( | |
47 | txn, stream_id, local_messages_by_user_then_device | |
48 | ) | |
49 | ||
50 | # Add the remote messages to the federation outbox. | |
51 | # We'll send them to a remote server when we next send a | |
52 | # federation transaction to that destination. | |
53 | sql = ( | |
54 | "INSERT INTO device_federation_outbox" | |
55 | " (destination, stream_id, queued_ts, messages_json)" | |
64 | 56 | " VALUES (?,?,?,?)" |
65 | 57 | ) |
66 | 58 | rows = [] |
67 | for user_id, messages_by_device in messages_by_user_then_device.items(): | |
68 | for device_id, message in messages_by_device.items(): | |
69 | message_json = ujson.dumps(message) | |
59 | for destination, edu in remote_messages_by_destination.items(): | |
60 | edu_json = ujson.dumps(edu) | |
61 | rows.append((destination, stream_id, now_ms, edu_json)) | |
62 | txn.executemany(sql, rows) | |
63 | ||
64 | with self._device_inbox_id_gen.get_next() as stream_id: | |
65 | now_ms = self.clock.time_msec() | |
66 | yield self.runInteraction( | |
67 | "add_messages_to_device_inbox", | |
68 | add_messages_txn, | |
69 | now_ms, | |
70 | stream_id, | |
71 | ) | |
72 | for user_id in local_messages_by_user_then_device.keys(): | |
73 | self._device_inbox_stream_cache.entity_has_changed( | |
74 | user_id, stream_id | |
75 | ) | |
76 | for destination in remote_messages_by_destination.keys(): | |
77 | self._device_federation_outbox_stream_cache.entity_has_changed( | |
78 | destination, stream_id | |
79 | ) | |
80 | ||
81 | defer.returnValue(self._device_inbox_id_gen.get_current_token()) | |
82 | ||
83 | @defer.inlineCallbacks | |
84 | def add_messages_from_remote_to_device_inbox( | |
85 | self, origin, message_id, local_messages_by_user_then_device | |
86 | ): | |
87 | def add_messages_txn(txn, now_ms, stream_id): | |
88 | # Check if we've already inserted a matching message_id for that | |
89 | # origin. This can happen if the origin doesn't receive our | |
90 | # acknowledgement from the first time we received the message. | |
91 | already_inserted = self._simple_select_one_txn( | |
92 | txn, table="device_federation_inbox", | |
93 | keyvalues={"origin": origin, "message_id": message_id}, | |
94 | retcols=("message_id",), | |
95 | allow_none=True, | |
96 | ) | |
97 | if already_inserted is not None: | |
98 | return | |
99 | ||
100 | # Add an entry for this message_id so that we know we've processed | |
101 | # it. | |
102 | self._simple_insert_txn( | |
103 | txn, table="device_federation_inbox", | |
104 | values={ | |
105 | "origin": origin, | |
106 | "message_id": message_id, | |
107 | "received_ts": now_ms, | |
108 | }, | |
109 | ) | |
110 | ||
111 | # Add the messages to the approriate local device inboxes so that | |
112 | # they'll be sent to the devices when they next sync. | |
113 | self._add_messages_to_local_device_inbox_txn( | |
114 | txn, stream_id, local_messages_by_user_then_device | |
115 | ) | |
116 | ||
117 | with self._device_inbox_id_gen.get_next() as stream_id: | |
118 | now_ms = self.clock.time_msec() | |
119 | yield self.runInteraction( | |
120 | "add_messages_from_remote_to_device_inbox", | |
121 | add_messages_txn, | |
122 | now_ms, | |
123 | stream_id, | |
124 | ) | |
125 | for user_id in local_messages_by_user_then_device.keys(): | |
126 | self._device_inbox_stream_cache.entity_has_changed( | |
127 | user_id, stream_id | |
128 | ) | |
129 | ||
130 | defer.returnValue(stream_id) | |
131 | ||
132 | def _add_messages_to_local_device_inbox_txn(self, txn, stream_id, | |
133 | messages_by_user_then_device): | |
134 | sql = ( | |
135 | "UPDATE device_max_stream_id" | |
136 | " SET stream_id = ?" | |
137 | " WHERE stream_id < ?" | |
138 | ) | |
139 | txn.execute(sql, (stream_id, stream_id)) | |
140 | ||
141 | local_by_user_then_device = {} | |
142 | for user_id, messages_by_device in messages_by_user_then_device.items(): | |
143 | messages_json_for_user = {} | |
144 | devices = messages_by_device.keys() | |
145 | if len(devices) == 1 and devices[0] == "*": | |
146 | # Handle wildcard device_ids. | |
147 | sql = ( | |
148 | "SELECT device_id FROM devices" | |
149 | " WHERE user_id = ?" | |
150 | ) | |
151 | txn.execute(sql, (user_id,)) | |
152 | message_json = ujson.dumps(messages_by_device["*"]) | |
153 | for row in txn.fetchall(): | |
154 | # Add the message for all devices for this user on this | |
155 | # server. | |
156 | device = row[0] | |
157 | messages_json_for_user[device] = message_json | |
158 | else: | |
159 | if not devices: | |
160 | continue | |
161 | sql = ( | |
162 | "SELECT device_id FROM devices" | |
163 | " WHERE user_id = ? AND device_id IN (" | |
164 | + ",".join("?" * len(devices)) | |
165 | + ")" | |
166 | ) | |
167 | # TODO: Maybe this needs to be done in batches if there are | |
168 | # too many local devices for a given user. | |
169 | txn.execute(sql, [user_id] + devices) | |
170 | for row in txn.fetchall(): | |
70 | 171 | # Only insert into the local inbox if the device exists on |
71 | 172 | # this server |
72 | if (user_id, device_id) in local_users_and_devices: | |
73 | rows.append((user_id, device_id, stream_id, message_json)) | |
74 | ||
75 | txn.executemany(sql, rows) | |
76 | ||
77 | with self._device_inbox_id_gen.get_next() as stream_id: | |
78 | yield self.runInteraction( | |
79 | "add_messages_to_device_inbox", | |
80 | add_messages_to_device_inbox_txn, | |
81 | stream_id | |
82 | ) | |
83 | ||
84 | defer.returnValue(self._device_inbox_id_gen.get_current_token()) | |
173 | device = row[0] | |
174 | message_json = ujson.dumps(messages_by_device[device]) | |
175 | messages_json_for_user[device] = message_json | |
176 | ||
177 | if messages_json_for_user: | |
178 | local_by_user_then_device[user_id] = messages_json_for_user | |
179 | ||
180 | if not local_by_user_then_device: | |
181 | return | |
182 | ||
183 | sql = ( | |
184 | "INSERT INTO device_inbox" | |
185 | " (user_id, device_id, stream_id, message_json)" | |
186 | " VALUES (?,?,?,?)" | |
187 | ) | |
188 | rows = [] | |
189 | for user_id, messages_by_device in local_by_user_then_device.items(): | |
190 | for device_id, message_json in messages_by_device.items(): | |
191 | rows.append((user_id, device_id, stream_id, message_json)) | |
192 | ||
193 | txn.executemany(sql, rows) | |
85 | 194 | |
86 | 195 | def get_new_messages_for_device( |
87 | 196 | self, user_id, device_id, last_stream_id, current_stream_id, limit=100 |
96 | 205 | Deferred ([dict], int): List of messages for the device and where |
97 | 206 | in the stream the messages got to. |
98 | 207 | """ |
208 | has_changed = self._device_inbox_stream_cache.has_entity_changed( | |
209 | user_id, last_stream_id | |
210 | ) | |
211 | if not has_changed: | |
212 | return defer.succeed(([], current_stream_id)) | |
213 | ||
99 | 214 | def get_new_messages_for_device_txn(txn): |
100 | 215 | sql = ( |
101 | 216 | "SELECT stream_id, message_json FROM device_inbox" |
181 | 296 | |
182 | 297 | def get_to_device_stream_token(self): |
183 | 298 | return self._device_inbox_id_gen.get_current_token() |
299 | ||
300 | def get_new_device_msgs_for_remote( | |
301 | self, destination, last_stream_id, current_stream_id, limit=100 | |
302 | ): | |
303 | """ | |
304 | Args: | |
305 | destination(str): The name of the remote server. | |
306 | last_stream_id(int): The last position of the device message stream | |
307 | that the server sent up to. | |
308 | current_stream_id(int): The current position of the device | |
309 | message stream. | |
310 | Returns: | |
311 | Deferred ([dict], int): List of messages for the device and where | |
312 | in the stream the messages got to. | |
313 | """ | |
314 | ||
315 | has_changed = self._device_federation_outbox_stream_cache.has_entity_changed( | |
316 | destination, last_stream_id | |
317 | ) | |
318 | if not has_changed or last_stream_id == current_stream_id: | |
319 | return defer.succeed(([], current_stream_id)) | |
320 | ||
321 | def get_new_messages_for_remote_destination_txn(txn): | |
322 | sql = ( | |
323 | "SELECT stream_id, messages_json FROM device_federation_outbox" | |
324 | " WHERE destination = ?" | |
325 | " AND ? < stream_id AND stream_id <= ?" | |
326 | " ORDER BY stream_id ASC" | |
327 | " LIMIT ?" | |
328 | ) | |
329 | txn.execute(sql, ( | |
330 | destination, last_stream_id, current_stream_id, limit | |
331 | )) | |
332 | messages = [] | |
333 | for row in txn.fetchall(): | |
334 | stream_pos = row[0] | |
335 | messages.append(ujson.loads(row[1])) | |
336 | if len(messages) < limit: | |
337 | stream_pos = current_stream_id | |
338 | return (messages, stream_pos) | |
339 | ||
340 | return self.runInteraction( | |
341 | "get_new_device_msgs_for_remote", | |
342 | get_new_messages_for_remote_destination_txn, | |
343 | ) | |
344 | ||
345 | def delete_device_msgs_for_remote(self, destination, up_to_stream_id): | |
346 | """Used to delete messages when the remote destination acknowledges | |
347 | their receipt. | |
348 | ||
349 | Args: | |
350 | destination(str): The destination server_name | |
351 | up_to_stream_id(int): Where to delete messages up to. | |
352 | Returns: | |
353 | A deferred that resolves when the messages have been deleted. | |
354 | """ | |
355 | def delete_messages_for_remote_destination_txn(txn): | |
356 | sql = ( | |
357 | "DELETE FROM device_federation_outbox" | |
358 | " WHERE destination = ?" | |
359 | " AND stream_id <= ?" | |
360 | ) | |
361 | txn.execute(sql, (destination, up_to_stream_id)) | |
362 | ||
363 | return self.runInteraction( | |
364 | "delete_device_msgs_for_remote", | |
365 | delete_messages_for_remote_destination_txn | |
366 | ) |
53 | 53 | or_ignore=ignore_if_known, |
54 | 54 | ) |
55 | 55 | except Exception as e: |
56 | logger.error("store_device with device_id=%s failed: %s", | |
57 | device_id, e) | |
56 | logger.error("store_device with device_id=%s(%r) user_id=%s(%r)" | |
57 | " display_name=%s(%r) failed: %s", | |
58 | type(device_id).__name__, device_id, | |
59 | type(user_id).__name__, user_id, | |
60 | type(initial_device_display_name).__name__, | |
61 | initial_device_display_name, e) | |
58 | 62 | raise StoreError(500, "Problem storing device.") |
59 | 63 | |
60 | 64 | def get_device(self, user_id, device_id): |
15 | 15 | from twisted.internet import defer |
16 | 16 | |
17 | 17 | from ._base import SQLBaseStore |
18 | from synapse.api.errors import StoreError | |
18 | 19 | from synapse.util.caches.descriptors import cached |
19 | 20 | from unpaddedbase64 import encode_base64 |
20 | 21 | |
34 | 35 | of the event graphs. These are used to generate the parents for new events |
35 | 36 | and backfilling from another server respectively. |
36 | 37 | """ |
38 | ||
39 | def __init__(self, hs): | |
40 | super(EventFederationStore, self).__init__(hs) | |
41 | ||
42 | hs.get_clock().looping_call( | |
43 | self._delete_old_forward_extrem_cache, 60 * 60 * 1000 | |
44 | ) | |
37 | 45 | |
38 | 46 | def get_auth_chain(self, event_ids): |
39 | 47 | return self.get_auth_chain_ids(event_ids).addCallback(self._get_events) |
269 | 277 | ] |
270 | 278 | ) |
271 | 279 | |
280 | # We now insert into stream_ordering_to_exterm a mapping from room_id, | |
281 | # new stream_ordering to new forward extremeties in the room. | |
282 | # This allows us to later efficiently look up the forward extremeties | |
283 | # for a room before a given stream_ordering | |
284 | max_stream_ord = max( | |
285 | ev.internal_metadata.stream_ordering for ev in events | |
286 | ) | |
287 | new_extrem = {} | |
288 | for room_id in events_by_room: | |
289 | event_ids = self._simple_select_onecol_txn( | |
290 | txn, | |
291 | table="event_forward_extremities", | |
292 | keyvalues={"room_id": room_id}, | |
293 | retcol="event_id", | |
294 | ) | |
295 | new_extrem[room_id] = event_ids | |
296 | ||
297 | self._simple_insert_many_txn( | |
298 | txn, | |
299 | table="stream_ordering_to_exterm", | |
300 | values=[ | |
301 | { | |
302 | "room_id": room_id, | |
303 | "event_id": event_id, | |
304 | "stream_ordering": max_stream_ord, | |
305 | } | |
306 | for room_id, extrem_evs in new_extrem.items() | |
307 | for event_id in extrem_evs | |
308 | ] | |
309 | ) | |
310 | ||
272 | 311 | query = ( |
273 | 312 | "INSERT INTO event_backward_extremities (event_id, room_id)" |
274 | 313 | " SELECT ?, ? WHERE NOT EXISTS (" |
304 | 343 | self.get_latest_event_ids_in_room.invalidate, (room_id,) |
305 | 344 | ) |
306 | 345 | |
346 | def get_forward_extremeties_for_room(self, room_id, stream_ordering): | |
347 | # We want to make the cache more effective, so we clamp to the last | |
348 | # change before the given ordering. | |
349 | last_change = self._events_stream_cache.get_max_pos_of_last_change(room_id) | |
350 | ||
351 | # We don't always have a full stream_to_exterm_id table, e.g. after | |
352 | # the upgrade that introduced it, so we make sure we never ask for a | |
353 | # try and pin to a stream_ordering from before a restart | |
354 | last_change = max(self._stream_order_on_start, last_change) | |
355 | ||
356 | if last_change > self.stream_ordering_month_ago: | |
357 | stream_ordering = min(last_change, stream_ordering) | |
358 | ||
359 | return self._get_forward_extremeties_for_room(room_id, stream_ordering) | |
360 | ||
361 | @cached(max_entries=5000, num_args=2) | |
362 | def _get_forward_extremeties_for_room(self, room_id, stream_ordering): | |
363 | """For a given room_id and stream_ordering, return the forward | |
364 | extremeties of the room at that point in "time". | |
365 | ||
366 | Throws a StoreError if we have since purged the index for | |
367 | stream_orderings from that point. | |
368 | """ | |
369 | ||
370 | if stream_ordering <= self.stream_ordering_month_ago: | |
371 | raise StoreError(400, "stream_ordering too old") | |
372 | ||
373 | sql = (""" | |
374 | SELECT event_id FROM stream_ordering_to_exterm | |
375 | INNER JOIN ( | |
376 | SELECT room_id, MAX(stream_ordering) AS stream_ordering | |
377 | FROM stream_ordering_to_exterm | |
378 | WHERE stream_ordering <= ? GROUP BY room_id | |
379 | ) AS rms USING (room_id, stream_ordering) | |
380 | WHERE room_id = ? | |
381 | """) | |
382 | ||
383 | def get_forward_extremeties_for_room_txn(txn): | |
384 | txn.execute(sql, (stream_ordering, room_id)) | |
385 | rows = txn.fetchall() | |
386 | return [event_id for event_id, in rows] | |
387 | ||
388 | return self.runInteraction( | |
389 | "get_forward_extremeties_for_room", | |
390 | get_forward_extremeties_for_room_txn | |
391 | ) | |
392 | ||
393 | def _delete_old_forward_extrem_cache(self): | |
394 | def _delete_old_forward_extrem_cache_txn(txn): | |
395 | # Delete entries older than a month, while making sure we don't delete | |
396 | # the only entries for a room. | |
397 | sql = (""" | |
398 | DELETE FROM stream_ordering_to_exterm | |
399 | WHERE | |
400 | ( | |
401 | SELECT max(stream_ordering) AS stream_ordering | |
402 | FROM stream_ordering_to_exterm | |
403 | WHERE room_id = stream_ordering_to_exterm.room_id | |
404 | ) > ? | |
405 | AND stream_ordering < ? | |
406 | """) | |
407 | txn.execute( | |
408 | sql, | |
409 | (self.stream_ordering_month_ago, self.stream_ordering_month_ago,) | |
410 | ) | |
411 | return self.runInteraction( | |
412 | "_delete_old_forward_extrem_cache", | |
413 | _delete_old_forward_extrem_cache_txn | |
414 | ) | |
415 | ||
307 | 416 | def get_backfill_events(self, room_id, event_list, limit): |
308 | 417 | """Get a list of Events for a given topic that occurred before (and |
309 | 418 | including) the events in event_list. Return a list of max size `limit` |
25 | 25 | |
26 | 26 | |
27 | 27 | class EventPushActionsStore(SQLBaseStore): |
28 | EPA_HIGHLIGHT_INDEX = "epa_highlight_index" | |
29 | ||
28 | 30 | def __init__(self, hs): |
29 | 31 | self.stream_ordering_month_ago = None |
30 | 32 | super(EventPushActionsStore, self).__init__(hs) |
33 | ||
34 | self.register_background_index_update( | |
35 | self.EPA_HIGHLIGHT_INDEX, | |
36 | index_name="event_push_actions_u_highlight", | |
37 | table="event_push_actions", | |
38 | columns=["user_id", "stream_ordering"], | |
39 | ) | |
31 | 40 | |
32 | 41 | def _set_push_actions_for_event_and_users_txn(self, txn, event, tuples): |
33 | 42 | """ |
337 | 346 | defer.returnValue(notifs[:limit]) |
338 | 347 | |
339 | 348 | @defer.inlineCallbacks |
340 | def get_push_actions_for_user(self, user_id, before=None, limit=50): | |
349 | def get_push_actions_for_user(self, user_id, before=None, limit=50, | |
350 | only_highlight=False): | |
341 | 351 | def f(txn): |
342 | 352 | before_clause = "" |
343 | 353 | if before: |
344 | before_clause = "AND stream_ordering < ?" | |
354 | before_clause = "AND epa.stream_ordering < ?" | |
345 | 355 | args = [user_id, before, limit] |
346 | 356 | else: |
347 | 357 | args = [user_id, limit] |
358 | ||
359 | if only_highlight: | |
360 | if len(before_clause) > 0: | |
361 | before_clause += " " | |
362 | before_clause += "AND epa.highlight = 1" | |
363 | ||
364 | # NB. This assumes event_ids are globally unique since | |
365 | # it makes the query easier to index | |
348 | 366 | sql = ( |
349 | 367 | "SELECT epa.event_id, epa.room_id," |
350 | 368 | " epa.stream_ordering, epa.topological_ordering," |
351 | 369 | " epa.actions, epa.profile_tag, e.received_ts" |
352 | 370 | " FROM event_push_actions epa, events e" |
353 | " WHERE epa.room_id = e.room_id AND epa.event_id = e.event_id" | |
371 | " WHERE epa.event_id = e.event_id" | |
354 | 372 | " AND epa.user_id = ? %s" |
355 | 373 | " ORDER BY epa.stream_ordering DESC" |
356 | 374 | " LIMIT ?" |
186 | 186 | self.register_background_update_handler( |
187 | 187 | self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, |
188 | 188 | self._background_reindex_fields_sender, |
189 | ) | |
190 | ||
191 | self.register_background_index_update( | |
192 | "event_contains_url_index", | |
193 | index_name="event_contains_url_index", | |
194 | table="events", | |
195 | columns=["room_id", "topological_ordering", "stream_ordering"], | |
196 | where_clause="contains_url = true AND outlier = false", | |
189 | 197 | ) |
190 | 198 | |
191 | 199 | self._event_persist_queue = _EventPeristenceQueue() |
496 | 504 | |
497 | 505 | # insert into the state_group, state_groups_state and |
498 | 506 | # event_to_state_groups tables. |
499 | self._store_mult_state_groups_txn(txn, ((event, context),)) | |
507 | try: | |
508 | self._store_mult_state_groups_txn(txn, ((event, context),)) | |
509 | except Exception: | |
510 | logger.exception("") | |
511 | raise | |
500 | 512 | |
501 | 513 | metadata_json = encode_json( |
502 | 514 | event.internal_metadata.get_dict() |
1542 | 1554 | ) |
1543 | 1555 | event_rows = txn.fetchall() |
1544 | 1556 | |
1557 | for event_id, state_key in event_rows: | |
1558 | txn.call_after(self._get_state_group_for_event.invalidate, (event_id,)) | |
1559 | ||
1545 | 1560 | # We calculate the new entries for the backward extremeties by finding |
1546 | 1561 | # all events that point to events that are to be purged |
1547 | 1562 | txn.execute( |
1581 | 1596 | " GROUP BY state_group HAVING MAX(topological_ordering) < ?", |
1582 | 1597 | (room_id, topological_ordering, topological_ordering) |
1583 | 1598 | ) |
1599 | ||
1584 | 1600 | state_rows = txn.fetchall() |
1601 | state_groups_to_delete = [sg for sg, in state_rows] | |
1602 | ||
1603 | # Now we get all the state groups that rely on these state groups | |
1604 | new_state_edges = [] | |
1605 | chunks = [ | |
1606 | state_groups_to_delete[i:i + 100] | |
1607 | for i in xrange(0, len(state_groups_to_delete), 100) | |
1608 | ] | |
1609 | for chunk in chunks: | |
1610 | rows = self._simple_select_many_txn( | |
1611 | txn, | |
1612 | table="state_group_edges", | |
1613 | column="prev_state_group", | |
1614 | iterable=chunk, | |
1615 | retcols=["state_group"], | |
1616 | keyvalues={}, | |
1617 | ) | |
1618 | new_state_edges.extend(row["state_group"] for row in rows) | |
1619 | ||
1620 | # Now we turn the state groups that reference to-be-deleted state groups | |
1621 | # to non delta versions. | |
1622 | for new_state_edge in new_state_edges: | |
1623 | curr_state = self._get_state_groups_from_groups_txn( | |
1624 | txn, [new_state_edge], types=None | |
1625 | ) | |
1626 | curr_state = curr_state[new_state_edge] | |
1627 | ||
1628 | self._simple_delete_txn( | |
1629 | txn, | |
1630 | table="state_groups_state", | |
1631 | keyvalues={ | |
1632 | "state_group": new_state_edge, | |
1633 | } | |
1634 | ) | |
1635 | ||
1636 | self._simple_delete_txn( | |
1637 | txn, | |
1638 | table="state_group_edges", | |
1639 | keyvalues={ | |
1640 | "state_group": new_state_edge, | |
1641 | } | |
1642 | ) | |
1643 | ||
1644 | self._simple_insert_many_txn( | |
1645 | txn, | |
1646 | table="state_groups_state", | |
1647 | values=[ | |
1648 | { | |
1649 | "state_group": new_state_edge, | |
1650 | "room_id": room_id, | |
1651 | "type": key[0], | |
1652 | "state_key": key[1], | |
1653 | "event_id": state_id, | |
1654 | } | |
1655 | for key, state_id in curr_state.items() | |
1656 | ], | |
1657 | ) | |
1658 | ||
1585 | 1659 | txn.executemany( |
1586 | 1660 | "DELETE FROM state_groups_state WHERE state_group = ?", |
1587 | 1661 | state_rows |
24 | 24 | |
25 | 25 | # Remember to update this number every time a change is made to database |
26 | 26 | # schema files, so the users will be informed on server restarts. |
27 | SCHEMA_VERSION = 34 | |
27 | SCHEMA_VERSION = 35 | |
28 | 28 | |
29 | 29 | dir_path = os.path.abspath(os.path.dirname(__file__)) |
30 | 30 | |
241 | 241 | module = imp.load_source( |
242 | 242 | module_name, absolute_path, python_file |
243 | 243 | ) |
244 | logger.debug("Running script %s", relative_path) | |
244 | logger.info("Running script %s", relative_path) | |
245 | 245 | module.run_create(cur, database_engine) |
246 | 246 | if not is_empty: |
247 | 247 | module.run_upgrade(cur, database_engine, config=config) |
252 | 252 | pass |
253 | 253 | elif ext == ".sql": |
254 | 254 | # A plain old .sql file, just read and execute it |
255 | logger.debug("Applying schema %s", relative_path) | |
255 | logger.info("Applying schema %s", relative_path) | |
256 | 256 | executescript(cur, absolute_path) |
257 | 257 | else: |
258 | 258 | # Not a valid delta file. |
47 | 47 | StoreError if the room could not be stored. |
48 | 48 | """ |
49 | 49 | try: |
50 | yield self._simple_insert( | |
51 | "rooms", | |
52 | { | |
53 | "room_id": room_id, | |
54 | "creator": room_creator_user_id, | |
55 | "is_public": is_public, | |
56 | }, | |
57 | desc="store_room", | |
58 | ) | |
50 | def store_room_txn(txn, next_id): | |
51 | self._simple_insert_txn( | |
52 | txn, | |
53 | "rooms", | |
54 | { | |
55 | "room_id": room_id, | |
56 | "creator": room_creator_user_id, | |
57 | "is_public": is_public, | |
58 | }, | |
59 | ) | |
60 | if is_public: | |
61 | self._simple_insert_txn( | |
62 | txn, | |
63 | table="public_room_list_stream", | |
64 | values={ | |
65 | "stream_id": next_id, | |
66 | "room_id": room_id, | |
67 | "visibility": is_public, | |
68 | } | |
69 | ) | |
70 | with self._public_room_id_gen.get_next() as next_id: | |
71 | yield self.runInteraction( | |
72 | "store_room_txn", | |
73 | store_room_txn, next_id, | |
74 | ) | |
59 | 75 | except Exception as e: |
60 | 76 | logger.error("store_room with room_id=%s failed: %s", room_id, e) |
61 | 77 | raise StoreError(500, "Problem creating room.") |
76 | 92 | allow_none=True, |
77 | 93 | ) |
78 | 94 | |
95 | @defer.inlineCallbacks | |
79 | 96 | def set_room_is_public(self, room_id, is_public): |
80 | return self._simple_update_one( | |
81 | table="rooms", | |
82 | keyvalues={"room_id": room_id}, | |
83 | updatevalues={"is_public": is_public}, | |
84 | desc="set_room_is_public", | |
85 | ) | |
97 | def set_room_is_public_txn(txn, next_id): | |
98 | self._simple_update_one_txn( | |
99 | txn, | |
100 | table="rooms", | |
101 | keyvalues={"room_id": room_id}, | |
102 | updatevalues={"is_public": is_public}, | |
103 | ) | |
104 | ||
105 | entries = self._simple_select_list_txn( | |
106 | txn, | |
107 | table="public_room_list_stream", | |
108 | keyvalues={"room_id": room_id}, | |
109 | retcols=("stream_id", "visibility"), | |
110 | ) | |
111 | ||
112 | entries.sort(key=lambda r: r["stream_id"]) | |
113 | ||
114 | add_to_stream = True | |
115 | if entries: | |
116 | add_to_stream = bool(entries[-1]["visibility"]) != is_public | |
117 | ||
118 | if add_to_stream: | |
119 | self._simple_insert_txn( | |
120 | txn, | |
121 | table="public_room_list_stream", | |
122 | values={ | |
123 | "stream_id": next_id, | |
124 | "room_id": room_id, | |
125 | "visibility": is_public, | |
126 | } | |
127 | ) | |
128 | ||
129 | with self._public_room_id_gen.get_next() as next_id: | |
130 | yield self.runInteraction( | |
131 | "set_room_is_public", | |
132 | set_room_is_public_txn, next_id, | |
133 | ) | |
86 | 134 | |
87 | 135 | def get_public_room_ids(self): |
88 | 136 | return self._simple_select_onecol( |
206 | 254 | }, |
207 | 255 | desc="add_event_report" |
208 | 256 | ) |
257 | ||
258 | def get_current_public_room_stream_id(self): | |
259 | return self._public_room_id_gen.get_current_token() | |
260 | ||
261 | def get_public_room_ids_at_stream_id(self, stream_id): | |
262 | return self.runInteraction( | |
263 | "get_public_room_ids_at_stream_id", | |
264 | self.get_public_room_ids_at_stream_id_txn, stream_id | |
265 | ) | |
266 | ||
267 | def get_public_room_ids_at_stream_id_txn(self, txn, stream_id): | |
268 | return { | |
269 | rm | |
270 | for rm, vis in self.get_published_at_stream_id_txn(txn, stream_id).items() | |
271 | if vis | |
272 | } | |
273 | ||
274 | def get_published_at_stream_id_txn(self, txn, stream_id): | |
275 | sql = (""" | |
276 | SELECT room_id, visibility FROM public_room_list_stream | |
277 | INNER JOIN ( | |
278 | SELECT room_id, max(stream_id) AS stream_id | |
279 | FROM public_room_list_stream | |
280 | WHERE stream_id <= ? | |
281 | GROUP BY room_id | |
282 | ) grouped USING (room_id, stream_id) | |
283 | """) | |
284 | ||
285 | txn.execute(sql, (stream_id,)) | |
286 | return dict(txn.fetchall()) | |
287 | ||
288 | def get_public_room_changes(self, prev_stream_id, new_stream_id): | |
289 | def get_public_room_changes_txn(txn): | |
290 | then_rooms = self.get_public_room_ids_at_stream_id_txn(txn, prev_stream_id) | |
291 | ||
292 | now_rooms_dict = self.get_published_at_stream_id_txn(txn, new_stream_id) | |
293 | ||
294 | now_rooms_visible = set( | |
295 | rm for rm, vis in now_rooms_dict.items() if vis | |
296 | ) | |
297 | now_rooms_not_visible = set( | |
298 | rm for rm, vis in now_rooms_dict.items() if not vis | |
299 | ) | |
300 | ||
301 | newly_visible = now_rooms_visible - then_rooms | |
302 | newly_unpublished = now_rooms_not_visible & then_rooms | |
303 | ||
304 | return newly_visible, newly_unpublished | |
305 | ||
306 | return self.runInteraction( | |
307 | "get_public_room_changes", get_public_room_changes_txn | |
308 | ) | |
309 | ||
310 | def get_all_new_public_rooms(self, prev_id, current_id, limit): | |
311 | def get_all_new_public_rooms(txn): | |
312 | sql = (""" | |
313 | SELECT stream_id, room_id, visibility FROM public_room_list_stream | |
314 | WHERE stream_id > ? AND stream_id <= ? | |
315 | ORDER BY stream_id ASC | |
316 | LIMIT ? | |
317 | """) | |
318 | ||
319 | txn.execute(sql, (prev_id, current_id, limit,)) | |
320 | return txn.fetchall() | |
321 | ||
322 | return self.runInteraction( | |
323 | "get_all_new_public_rooms", get_all_new_public_rooms | |
324 | ) |
12 | 12 | * limitations under the License. |
13 | 13 | */ |
14 | 14 | |
15 | /** Using CREATE INDEX directly is deprecated in favour of using background | |
16 | * update see synapse/storage/schema/delta/33/access_tokens_device_index.sql | |
17 | * and synapse/storage/registration.py for an example using | |
18 | * "access_tokens_device_index" **/ | |
15 | 19 | CREATE INDEX receipts_linearized_room_stream ON receipts_linearized( |
16 | 20 | room_id, stream_id |
17 | 21 | ); |
12 | 12 | * limitations under the License. |
13 | 13 | */ |
14 | 14 | |
15 | /** Using CREATE INDEX directly is deprecated in favour of using background | |
16 | * update see synapse/storage/schema/delta/33/access_tokens_device_index.sql | |
17 | * and synapse/storage/registration.py for an example using | |
18 | * "access_tokens_device_index" **/ | |
15 | 19 | CREATE INDEX events_room_stream on events(room_id, stream_ordering); |
12 | 12 | * limitations under the License. |
13 | 13 | */ |
14 | 14 | |
15 | /** Using CREATE INDEX directly is deprecated in favour of using background | |
16 | * update see synapse/storage/schema/delta/33/access_tokens_device_index.sql | |
17 | * and synapse/storage/registration.py for an example using | |
18 | * "access_tokens_device_index" **/ | |
15 | 19 | CREATE INDEX public_room_index on rooms(is_public); |
12 | 12 | * limitations under the License. |
13 | 13 | */ |
14 | 14 | |
15 | /** Using CREATE INDEX directly is deprecated in favour of using background | |
16 | * update see synapse/storage/schema/delta/33/access_tokens_device_index.sql | |
17 | * and synapse/storage/registration.py for an example using | |
18 | * "access_tokens_device_index" **/ | |
15 | 19 | CREATE INDEX receipts_linearized_user ON receipts_linearized( |
16 | 20 | user_id |
17 | 21 | ); |
25 | 25 | |
26 | 26 | UPDATE event_push_actions SET notif = 1, highlight = 0; |
27 | 27 | |
28 | /** Using CREATE INDEX directly is deprecated in favour of using background | |
29 | * update see synapse/storage/schema/delta/33/access_tokens_device_index.sql | |
30 | * and synapse/storage/registration.py for an example using | |
31 | * "access_tokens_device_index" **/ | |
28 | 32 | CREATE INDEX event_push_actions_rm_tokens on event_push_actions( |
29 | 33 | user_id, room_id, topological_ordering, stream_ordering |
30 | 34 | ); |
12 | 12 | * limitations under the License. |
13 | 13 | */ |
14 | 14 | |
15 | /** Using CREATE INDEX directly is deprecated in favour of using background | |
16 | * update see synapse/storage/schema/delta/33/access_tokens_device_index.sql | |
17 | * and synapse/storage/registration.py for an example using | |
18 | * "access_tokens_device_index" **/ | |
15 | 19 | CREATE INDEX event_push_actions_stream_ordering on event_push_actions( |
16 | 20 | stream_ordering, user_id |
17 | 21 | ); |
0 | /* Copyright 2016 OpenMarket Ltd | |
1 | * | |
2 | * Licensed under the Apache License, Version 2.0 (the "License"); | |
3 | * you may not use this file except in compliance with the License. | |
4 | * You may obtain a copy of the License at | |
5 | * | |
6 | * http://www.apache.org/licenses/LICENSE-2.0 | |
7 | * | |
8 | * Unless required by applicable law or agreed to in writing, software | |
9 | * distributed under the License is distributed on an "AS IS" BASIS, | |
10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
11 | * See the License for the specific language governing permissions and | |
12 | * limitations under the License. | |
13 | */ | |
14 | ||
15 | ||
16 | ALTER TABLE background_updates ADD COLUMN depends_on TEXT; | |
17 | ||
18 | INSERT into background_updates (update_name, progress_json, depends_on) | |
19 | VALUES ('state_group_state_type_index', '{}', 'state_group_state_deduplication'); |
0 | /* Copyright 2016 OpenMarket Ltd | |
1 | * | |
2 | * Licensed under the Apache License, Version 2.0 (the "License"); | |
3 | * you may not use this file except in compliance with the License. | |
4 | * You may obtain a copy of the License at | |
5 | * | |
6 | * http://www.apache.org/licenses/LICENSE-2.0 | |
7 | * | |
8 | * Unless required by applicable law or agreed to in writing, software | |
9 | * distributed under the License is distributed on an "AS IS" BASIS, | |
10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
11 | * See the License for the specific language governing permissions and | |
12 | * limitations under the License. | |
13 | */ | |
14 | ||
15 | INSERT into background_updates (update_name, progress_json) | |
16 | VALUES ('event_contains_url_index', '{}'); |
0 | /* Copyright 2016 OpenMarket Ltd | |
1 | * | |
2 | * Licensed under the Apache License, Version 2.0 (the "License"); | |
3 | * you may not use this file except in compliance with the License. | |
4 | * You may obtain a copy of the License at | |
5 | * | |
6 | * http://www.apache.org/licenses/LICENSE-2.0 | |
7 | * | |
8 | * Unless required by applicable law or agreed to in writing, software | |
9 | * distributed under the License is distributed on an "AS IS" BASIS, | |
10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
11 | * See the License for the specific language governing permissions and | |
12 | * limitations under the License. | |
13 | */ | |
14 | ||
15 | DROP TABLE IF EXISTS device_federation_outbox; | |
16 | CREATE TABLE device_federation_outbox ( | |
17 | destination TEXT NOT NULL, | |
18 | stream_id BIGINT NOT NULL, | |
19 | queued_ts BIGINT NOT NULL, | |
20 | messages_json TEXT NOT NULL | |
21 | ); | |
22 | ||
23 | ||
24 | DROP INDEX IF EXISTS device_federation_outbox_destination_id; | |
25 | CREATE INDEX device_federation_outbox_destination_id | |
26 | ON device_federation_outbox(destination, stream_id); | |
27 | ||
28 | ||
29 | DROP TABLE IF EXISTS device_federation_inbox; | |
30 | CREATE TABLE device_federation_inbox ( | |
31 | origin TEXT NOT NULL, | |
32 | message_id TEXT NOT NULL, | |
33 | received_ts BIGINT NOT NULL | |
34 | ); | |
35 | ||
36 | DROP INDEX IF EXISTS device_federation_inbox_sender_id; | |
37 | CREATE INDEX device_federation_inbox_sender_id | |
38 | ON device_federation_inbox(origin, message_id); |
0 | /* Copyright 2016 OpenMarket Ltd | |
1 | * | |
2 | * Licensed under the Apache License, Version 2.0 (the "License"); | |
3 | * you may not use this file except in compliance with the License. | |
4 | * You may obtain a copy of the License at | |
5 | * | |
6 | * http://www.apache.org/licenses/LICENSE-2.0 | |
7 | * | |
8 | * Unless required by applicable law or agreed to in writing, software | |
9 | * distributed under the License is distributed on an "AS IS" BASIS, | |
10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
11 | * See the License for the specific language governing permissions and | |
12 | * limitations under the License. | |
13 | */ | |
14 | ||
15 | CREATE TABLE device_max_stream_id ( | |
16 | stream_id BIGINT NOT NULL | |
17 | ); | |
18 | ||
19 | INSERT INTO device_max_stream_id (stream_id) | |
20 | SELECT COALESCE(MAX(stream_id), 0) FROM device_inbox; |
0 | /* Copyright 2016 OpenMarket Ltd | |
1 | * | |
2 | * Licensed under the Apache License, Version 2.0 (the "License"); | |
3 | * you may not use this file except in compliance with the License. | |
4 | * You may obtain a copy of the License at | |
5 | * | |
6 | * http://www.apache.org/licenses/LICENSE-2.0 | |
7 | * | |
8 | * Unless required by applicable law or agreed to in writing, software | |
9 | * distributed under the License is distributed on an "AS IS" BASIS, | |
10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
11 | * See the License for the specific language governing permissions and | |
12 | * limitations under the License. | |
13 | */ | |
14 | ||
15 | INSERT into background_updates (update_name, progress_json) | |
16 | VALUES ('epa_highlight_index', '{}'); |
0 | /* Copyright 2016 OpenMarket Ltd | |
1 | * | |
2 | * Licensed under the Apache License, Version 2.0 (the "License"); | |
3 | * you may not use this file except in compliance with the License. | |
4 | * You may obtain a copy of the License at | |
5 | * | |
6 | * http://www.apache.org/licenses/LICENSE-2.0 | |
7 | * | |
8 | * Unless required by applicable law or agreed to in writing, software | |
9 | * distributed under the License is distributed on an "AS IS" BASIS, | |
10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
11 | * See the License for the specific language governing permissions and | |
12 | * limitations under the License. | |
13 | */ | |
14 | ||
15 | ||
16 | CREATE TABLE public_room_list_stream ( | |
17 | stream_id BIGINT NOT NULL, | |
18 | room_id TEXT NOT NULL, | |
19 | visibility BOOLEAN NOT NULL | |
20 | ); | |
21 | ||
22 | INSERT INTO public_room_list_stream (stream_id, room_id, visibility) | |
23 | SELECT 1, room_id, is_public FROM rooms | |
24 | WHERE is_public = CAST(1 AS BOOLEAN); | |
25 | ||
26 | CREATE INDEX public_room_list_stream_idx on public_room_list_stream( | |
27 | stream_id | |
28 | ); | |
29 | ||
30 | CREATE INDEX public_room_list_stream_rm_idx on public_room_list_stream( | |
31 | room_id, stream_id | |
32 | ); |
0 | /* Copyright 2016 OpenMarket Ltd | |
1 | * | |
2 | * Licensed under the Apache License, Version 2.0 (the "License"); | |
3 | * you may not use this file except in compliance with the License. | |
4 | * You may obtain a copy of the License at | |
5 | * | |
6 | * http://www.apache.org/licenses/LICENSE-2.0 | |
7 | * | |
8 | * Unless required by applicable law or agreed to in writing, software | |
9 | * distributed under the License is distributed on an "AS IS" BASIS, | |
10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
11 | * See the License for the specific language governing permissions and | |
12 | * limitations under the License. | |
13 | */ | |
14 | ||
15 | CREATE TABLE state_group_edges( | |
16 | state_group BIGINT NOT NULL, | |
17 | prev_state_group BIGINT NOT NULL | |
18 | ); | |
19 | ||
20 | CREATE INDEX state_group_edges_idx ON state_group_edges(state_group); | |
21 | CREATE INDEX state_group_edges_prev_idx ON state_group_edges(prev_state_group); |
0 | /* Copyright 2016 OpenMarket Ltd | |
1 | * | |
2 | * Licensed under the Apache License, Version 2.0 (the "License"); | |
3 | * you may not use this file except in compliance with the License. | |
4 | * You may obtain a copy of the License at | |
5 | * | |
6 | * http://www.apache.org/licenses/LICENSE-2.0 | |
7 | * | |
8 | * Unless required by applicable law or agreed to in writing, software | |
9 | * distributed under the License is distributed on an "AS IS" BASIS, | |
10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
11 | * See the License for the specific language governing permissions and | |
12 | * limitations under the License. | |
13 | */ | |
14 | ||
15 | INSERT into background_updates (update_name, progress_json) | |
16 | VALUES ('state_group_state_deduplication', '{}'); |
0 | /* Copyright 2016 OpenMarket Ltd | |
1 | * | |
2 | * Licensed under the Apache License, Version 2.0 (the "License"); | |
3 | * you may not use this file except in compliance with the License. | |
4 | * You may obtain a copy of the License at | |
5 | * | |
6 | * http://www.apache.org/licenses/LICENSE-2.0 | |
7 | * | |
8 | * Unless required by applicable law or agreed to in writing, software | |
9 | * distributed under the License is distributed on an "AS IS" BASIS, | |
10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
11 | * See the License for the specific language governing permissions and | |
12 | * limitations under the License. | |
13 | */ | |
14 | ||
15 | ||
16 | CREATE TABLE stream_ordering_to_exterm ( | |
17 | stream_ordering BIGINT NOT NULL, | |
18 | room_id TEXT NOT NULL, | |
19 | event_id TEXT NOT NULL | |
20 | ); | |
21 | ||
22 | INSERT INTO stream_ordering_to_exterm (stream_ordering, room_id, event_id) | |
23 | SELECT stream_ordering, room_id, event_id FROM event_forward_extremities | |
24 | INNER JOIN ( | |
25 | SELECT room_id, max(stream_ordering) as stream_ordering FROM events | |
26 | INNER JOIN event_forward_extremities USING (room_id, event_id) | |
27 | GROUP BY room_id | |
28 | ) AS rms USING (room_id); | |
29 | ||
30 | CREATE INDEX stream_ordering_to_exterm_idx on stream_ordering_to_exterm( | |
31 | stream_ordering | |
32 | ); | |
33 | ||
34 | CREATE INDEX stream_ordering_to_exterm_rm_idx on stream_ordering_to_exterm( | |
35 | room_id, stream_ordering | |
36 | ); |
15 | 15 | from ._base import SQLBaseStore |
16 | 16 | from synapse.util.caches.descriptors import cached, cachedList |
17 | 17 | from synapse.util.caches import intern_string |
18 | from synapse.storage.engines import PostgresEngine | |
18 | 19 | |
19 | 20 | from twisted.internet import defer |
20 | 21 | |
21 | 22 | import logging |
22 | 23 | |
23 | 24 | logger = logging.getLogger(__name__) |
25 | ||
26 | ||
27 | MAX_STATE_DELTA_HOPS = 100 | |
24 | 28 | |
25 | 29 | |
26 | 30 | class StateStore(SQLBaseStore): |
42 | 46 | * `state_groups_state`: Maps state group to state events. |
43 | 47 | """ |
44 | 48 | |
49 | STATE_GROUP_DEDUPLICATION_UPDATE_NAME = "state_group_state_deduplication" | |
50 | STATE_GROUP_INDEX_UPDATE_NAME = "state_group_state_type_index" | |
51 | ||
52 | def __init__(self, hs): | |
53 | super(StateStore, self).__init__(hs) | |
54 | self.register_background_update_handler( | |
55 | self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME, | |
56 | self._background_deduplicate_state, | |
57 | ) | |
58 | self.register_background_update_handler( | |
59 | self.STATE_GROUP_INDEX_UPDATE_NAME, | |
60 | self._background_index_state, | |
61 | ) | |
62 | ||
45 | 63 | @defer.inlineCallbacks |
46 | 64 | def get_state_groups_ids(self, room_id, event_ids): |
47 | 65 | if not event_ids: |
102 | 120 | state_groups[event.event_id] = context.state_group |
103 | 121 | |
104 | 122 | if self._have_persisted_state_group_txn(txn, context.state_group): |
105 | logger.info("Already persisted state_group: %r", context.state_group) | |
106 | 123 | continue |
107 | ||
108 | state_event_ids = dict(context.current_state_ids) | |
109 | 124 | |
110 | 125 | self._simple_insert_txn( |
111 | 126 | txn, |
117 | 132 | }, |
118 | 133 | ) |
119 | 134 | |
120 | self._simple_insert_many_txn( | |
121 | txn, | |
122 | table="state_groups_state", | |
123 | values=[ | |
124 | { | |
135 | # We persist as a delta if we can, while also ensuring the chain | |
136 | # of deltas isn't tooo long, as otherwise read performance degrades. | |
137 | if context.prev_group: | |
138 | potential_hops = self._count_state_group_hops_txn( | |
139 | txn, context.prev_group | |
140 | ) | |
141 | if context.prev_group and potential_hops < MAX_STATE_DELTA_HOPS: | |
142 | self._simple_insert_txn( | |
143 | txn, | |
144 | table="state_group_edges", | |
145 | values={ | |
125 | 146 | "state_group": context.state_group, |
126 | "room_id": event.room_id, | |
127 | "type": key[0], | |
128 | "state_key": key[1], | |
129 | "event_id": state_id, | |
130 | } | |
131 | for key, state_id in state_event_ids.items() | |
132 | ], | |
133 | ) | |
147 | "prev_state_group": context.prev_group, | |
148 | }, | |
149 | ) | |
150 | ||
151 | self._simple_insert_many_txn( | |
152 | txn, | |
153 | table="state_groups_state", | |
154 | values=[ | |
155 | { | |
156 | "state_group": context.state_group, | |
157 | "room_id": event.room_id, | |
158 | "type": key[0], | |
159 | "state_key": key[1], | |
160 | "event_id": state_id, | |
161 | } | |
162 | for key, state_id in context.delta_ids.items() | |
163 | ], | |
164 | ) | |
165 | else: | |
166 | self._simple_insert_many_txn( | |
167 | txn, | |
168 | table="state_groups_state", | |
169 | values=[ | |
170 | { | |
171 | "state_group": context.state_group, | |
172 | "room_id": event.room_id, | |
173 | "type": key[0], | |
174 | "state_key": key[1], | |
175 | "event_id": state_id, | |
176 | } | |
177 | for key, state_id in context.current_state_ids.items() | |
178 | ], | |
179 | ) | |
134 | 180 | |
135 | 181 | self._simple_insert_many_txn( |
136 | 182 | txn, |
144 | 190 | ], |
145 | 191 | ) |
146 | 192 | |
193 | def _count_state_group_hops_txn(self, txn, state_group): | |
194 | """Given a state group, count how many hops there are in the tree. | |
195 | ||
196 | This is used to ensure the delta chains don't get too long. | |
197 | """ | |
198 | if isinstance(self.database_engine, PostgresEngine): | |
199 | sql = (""" | |
200 | WITH RECURSIVE state(state_group) AS ( | |
201 | VALUES(?::bigint) | |
202 | UNION ALL | |
203 | SELECT prev_state_group FROM state_group_edges e, state s | |
204 | WHERE s.state_group = e.state_group | |
205 | ) | |
206 | SELECT count(*) FROM state; | |
207 | """) | |
208 | ||
209 | txn.execute(sql, (state_group,)) | |
210 | row = txn.fetchone() | |
211 | if row and row[0]: | |
212 | return row[0] | |
213 | else: | |
214 | return 0 | |
215 | else: | |
216 | # We don't use WITH RECURSIVE on sqlite3 as there are distributions | |
217 | # that ship with an sqlite3 version that doesn't support it (e.g. wheezy) | |
218 | next_group = state_group | |
219 | count = 0 | |
220 | ||
221 | while next_group: | |
222 | next_group = self._simple_select_one_onecol_txn( | |
223 | txn, | |
224 | table="state_group_edges", | |
225 | keyvalues={"state_group": next_group}, | |
226 | retcol="prev_state_group", | |
227 | allow_none=True, | |
228 | ) | |
229 | if next_group: | |
230 | count += 1 | |
231 | ||
232 | return count | |
233 | ||
147 | 234 | @defer.inlineCallbacks |
148 | 235 | def get_current_state(self, room_id, event_type=None, state_key=""): |
149 | 236 | if event_type and state_key is not None: |
205 | 292 | def _get_state_groups_from_groups(self, groups, types): |
206 | 293 | """Returns dictionary state_group -> (dict of (type, state_key) -> event id) |
207 | 294 | """ |
208 | def f(txn, groups): | |
209 | if types is not None: | |
210 | where_clause = "AND (%s)" % ( | |
211 | " OR ".join(["(type = ? AND state_key = ?)"] * len(types)), | |
212 | ) | |
213 | else: | |
214 | where_clause = "" | |
215 | ||
216 | sql = ( | |
217 | "SELECT state_group, event_id, type, state_key" | |
218 | " FROM state_groups_state WHERE" | |
219 | " state_group IN (%s) %s" % ( | |
220 | ",".join("?" for _ in groups), | |
221 | where_clause, | |
222 | ) | |
223 | ) | |
224 | ||
225 | args = list(groups) | |
226 | if types is not None: | |
227 | args.extend([i for typ in types for i in typ]) | |
228 | ||
229 | txn.execute(sql, args) | |
230 | rows = self.cursor_to_dict(txn) | |
231 | ||
232 | results = {group: {} for group in groups} | |
233 | for row in rows: | |
234 | key = (row["type"], row["state_key"]) | |
235 | results[row["state_group"]][key] = row["event_id"] | |
236 | return results | |
237 | ||
238 | 295 | results = {} |
239 | 296 | |
240 | 297 | chunks = [groups[i:i + 100] for i in xrange(0, len(groups), 100)] |
241 | 298 | for chunk in chunks: |
242 | 299 | res = yield self.runInteraction( |
243 | 300 | "_get_state_groups_from_groups", |
244 | f, chunk | |
301 | self._get_state_groups_from_groups_txn, chunk, types, | |
245 | 302 | ) |
246 | 303 | results.update(res) |
247 | 304 | |
248 | 305 | defer.returnValue(results) |
306 | ||
307 | def _get_state_groups_from_groups_txn(self, txn, groups, types=None): | |
308 | results = {group: {} for group in groups} | |
309 | if isinstance(self.database_engine, PostgresEngine): | |
310 | # Temporarily disable sequential scans in this transaction. This is | |
311 | # a temporary hack until we can add the right indices in | |
312 | txn.execute("SET LOCAL enable_seqscan=off") | |
313 | ||
314 | # The below query walks the state_group tree so that the "state" | |
315 | # table includes all state_groups in the tree. It then joins | |
316 | # against `state_groups_state` to fetch the latest state. | |
317 | # It assumes that previous state groups are always numerically | |
318 | # lesser. | |
319 | # The PARTITION is used to get the event_id in the greatest state | |
320 | # group for the given type, state_key. | |
321 | # This may return multiple rows per (type, state_key), but last_value | |
322 | # should be the same. | |
323 | sql = (""" | |
324 | WITH RECURSIVE state(state_group) AS ( | |
325 | VALUES(?::bigint) | |
326 | UNION ALL | |
327 | SELECT prev_state_group FROM state_group_edges e, state s | |
328 | WHERE s.state_group = e.state_group | |
329 | ) | |
330 | SELECT type, state_key, last_value(event_id) OVER ( | |
331 | PARTITION BY type, state_key ORDER BY state_group ASC | |
332 | ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING | |
333 | ) AS event_id FROM state_groups_state | |
334 | WHERE state_group IN ( | |
335 | SELECT state_group FROM state | |
336 | ) | |
337 | %s | |
338 | """) | |
339 | ||
340 | # Turns out that postgres doesn't like doing a list of OR's and | |
341 | # is about 1000x slower, so we just issue a query for each specific | |
342 | # type seperately. | |
343 | if types: | |
344 | clause_to_args = [ | |
345 | ( | |
346 | "AND type = ? AND state_key = ?", | |
347 | (etype, state_key) | |
348 | ) | |
349 | for etype, state_key in types | |
350 | ] | |
351 | else: | |
352 | # If types is None we fetch all the state, and so just use an | |
353 | # empty where clause with no extra args. | |
354 | clause_to_args = [("", [])] | |
355 | ||
356 | for where_clause, where_args in clause_to_args: | |
357 | for group in groups: | |
358 | args = [group] | |
359 | args.extend(where_args) | |
360 | ||
361 | txn.execute(sql % (where_clause,), args) | |
362 | rows = self.cursor_to_dict(txn) | |
363 | for row in rows: | |
364 | key = (row["type"], row["state_key"]) | |
365 | results[group][key] = row["event_id"] | |
366 | else: | |
367 | if types is not None: | |
368 | where_clause = "AND (%s)" % ( | |
369 | " OR ".join(["(type = ? AND state_key = ?)"] * len(types)), | |
370 | ) | |
371 | else: | |
372 | where_clause = "" | |
373 | ||
374 | # We don't use WITH RECURSIVE on sqlite3 as there are distributions | |
375 | # that ship with an sqlite3 version that doesn't support it (e.g. wheezy) | |
376 | for group in groups: | |
377 | group_tree = [group] | |
378 | next_group = group | |
379 | ||
380 | while next_group: | |
381 | next_group = self._simple_select_one_onecol_txn( | |
382 | txn, | |
383 | table="state_group_edges", | |
384 | keyvalues={"state_group": next_group}, | |
385 | retcol="prev_state_group", | |
386 | allow_none=True, | |
387 | ) | |
388 | if next_group: | |
389 | group_tree.append(next_group) | |
390 | ||
391 | sql = (""" | |
392 | SELECT type, state_key, event_id FROM state_groups_state | |
393 | INNER JOIN ( | |
394 | SELECT type, state_key, max(state_group) as state_group | |
395 | FROM state_groups_state | |
396 | WHERE state_group IN (%s) %s | |
397 | GROUP BY type, state_key | |
398 | ) USING (type, state_key, state_group); | |
399 | """) % (",".join("?" for _ in group_tree), where_clause,) | |
400 | ||
401 | args = list(group_tree) | |
402 | if types is not None: | |
403 | args.extend([i for typ in types for i in typ]) | |
404 | ||
405 | txn.execute(sql, args) | |
406 | rows = self.cursor_to_dict(txn) | |
407 | for row in rows: | |
408 | key = (row["type"], row["state_key"]) | |
409 | results[group][key] = row["event_id"] | |
410 | ||
411 | return results | |
249 | 412 | |
250 | 413 | @defer.inlineCallbacks |
251 | 414 | def get_state_for_events(self, event_ids, types): |
503 | 666 | |
504 | 667 | defer.returnValue(results) |
505 | 668 | |
506 | def get_all_new_state_groups(self, last_id, current_id, limit): | |
507 | def get_all_new_state_groups_txn(txn): | |
508 | sql = ( | |
509 | "SELECT id, room_id, event_id FROM state_groups" | |
510 | " WHERE ? < id AND id <= ? ORDER BY id LIMIT ?" | |
511 | ) | |
512 | txn.execute(sql, (last_id, current_id, limit)) | |
513 | groups = txn.fetchall() | |
514 | ||
515 | if not groups: | |
516 | return ([], []) | |
517 | ||
518 | lower_bound = groups[0][0] | |
519 | upper_bound = groups[-1][0] | |
520 | sql = ( | |
521 | "SELECT state_group, type, state_key, event_id" | |
522 | " FROM state_groups_state" | |
523 | " WHERE ? <= state_group AND state_group <= ?" | |
524 | ) | |
525 | ||
526 | txn.execute(sql, (lower_bound, upper_bound)) | |
527 | state_group_state = txn.fetchall() | |
528 | return (groups, state_group_state) | |
529 | return self.runInteraction( | |
530 | "get_all_new_state_groups", get_all_new_state_groups_txn | |
531 | ) | |
532 | ||
533 | 669 | def get_next_state_group(self): |
534 | 670 | return self._state_groups_id_gen.get_next() |
671 | ||
672 | @defer.inlineCallbacks | |
673 | def _background_deduplicate_state(self, progress, batch_size): | |
674 | """This background update will slowly deduplicate state by reencoding | |
675 | them as deltas. | |
676 | """ | |
677 | last_state_group = progress.get("last_state_group", 0) | |
678 | rows_inserted = progress.get("rows_inserted", 0) | |
679 | max_group = progress.get("max_group", None) | |
680 | ||
681 | BATCH_SIZE_SCALE_FACTOR = 100 | |
682 | ||
683 | batch_size = max(1, int(batch_size / BATCH_SIZE_SCALE_FACTOR)) | |
684 | ||
685 | if max_group is None: | |
686 | rows = yield self._execute( | |
687 | "_background_deduplicate_state", None, | |
688 | "SELECT coalesce(max(id), 0) FROM state_groups", | |
689 | ) | |
690 | max_group = rows[0][0] | |
691 | ||
692 | def reindex_txn(txn): | |
693 | new_last_state_group = last_state_group | |
694 | for count in xrange(batch_size): | |
695 | txn.execute( | |
696 | "SELECT id, room_id FROM state_groups" | |
697 | " WHERE ? < id AND id <= ?" | |
698 | " ORDER BY id ASC" | |
699 | " LIMIT 1", | |
700 | (new_last_state_group, max_group,) | |
701 | ) | |
702 | row = txn.fetchone() | |
703 | if row: | |
704 | state_group, room_id = row | |
705 | ||
706 | if not row or not state_group: | |
707 | return True, count | |
708 | ||
709 | txn.execute( | |
710 | "SELECT state_group FROM state_group_edges" | |
711 | " WHERE state_group = ?", | |
712 | (state_group,) | |
713 | ) | |
714 | ||
715 | # If we reach a point where we've already started inserting | |
716 | # edges we should stop. | |
717 | if txn.fetchall(): | |
718 | return True, count | |
719 | ||
720 | txn.execute( | |
721 | "SELECT coalesce(max(id), 0) FROM state_groups" | |
722 | " WHERE id < ? AND room_id = ?", | |
723 | (state_group, room_id,) | |
724 | ) | |
725 | prev_group, = txn.fetchone() | |
726 | new_last_state_group = state_group | |
727 | ||
728 | if prev_group: | |
729 | potential_hops = self._count_state_group_hops_txn( | |
730 | txn, prev_group | |
731 | ) | |
732 | if potential_hops >= MAX_STATE_DELTA_HOPS: | |
733 | # We want to ensure chains are at most this long,# | |
734 | # otherwise read performance degrades. | |
735 | continue | |
736 | ||
737 | prev_state = self._get_state_groups_from_groups_txn( | |
738 | txn, [prev_group], types=None | |
739 | ) | |
740 | prev_state = prev_state[prev_group] | |
741 | ||
742 | curr_state = self._get_state_groups_from_groups_txn( | |
743 | txn, [state_group], types=None | |
744 | ) | |
745 | curr_state = curr_state[state_group] | |
746 | ||
747 | if not set(prev_state.keys()) - set(curr_state.keys()): | |
748 | # We can only do a delta if the current has a strict super set | |
749 | # of keys | |
750 | ||
751 | delta_state = { | |
752 | key: value for key, value in curr_state.items() | |
753 | if prev_state.get(key, None) != value | |
754 | } | |
755 | ||
756 | self._simple_delete_txn( | |
757 | txn, | |
758 | table="state_group_edges", | |
759 | keyvalues={ | |
760 | "state_group": state_group, | |
761 | } | |
762 | ) | |
763 | ||
764 | self._simple_insert_txn( | |
765 | txn, | |
766 | table="state_group_edges", | |
767 | values={ | |
768 | "state_group": state_group, | |
769 | "prev_state_group": prev_group, | |
770 | } | |
771 | ) | |
772 | ||
773 | self._simple_delete_txn( | |
774 | txn, | |
775 | table="state_groups_state", | |
776 | keyvalues={ | |
777 | "state_group": state_group, | |
778 | } | |
779 | ) | |
780 | ||
781 | self._simple_insert_many_txn( | |
782 | txn, | |
783 | table="state_groups_state", | |
784 | values=[ | |
785 | { | |
786 | "state_group": state_group, | |
787 | "room_id": room_id, | |
788 | "type": key[0], | |
789 | "state_key": key[1], | |
790 | "event_id": state_id, | |
791 | } | |
792 | for key, state_id in delta_state.items() | |
793 | ], | |
794 | ) | |
795 | ||
796 | progress = { | |
797 | "last_state_group": state_group, | |
798 | "rows_inserted": rows_inserted + batch_size, | |
799 | "max_group": max_group, | |
800 | } | |
801 | ||
802 | self._background_update_progress_txn( | |
803 | txn, self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME, progress | |
804 | ) | |
805 | ||
806 | return False, batch_size | |
807 | ||
808 | finished, result = yield self.runInteraction( | |
809 | self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME, reindex_txn | |
810 | ) | |
811 | ||
812 | if finished: | |
813 | yield self._end_background_update(self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME) | |
814 | ||
815 | defer.returnValue(result * BATCH_SIZE_SCALE_FACTOR) | |
816 | ||
817 | @defer.inlineCallbacks | |
818 | def _background_index_state(self, progress, batch_size): | |
819 | def reindex_txn(conn): | |
820 | conn.rollback() | |
821 | if isinstance(self.database_engine, PostgresEngine): | |
822 | # postgres insists on autocommit for the index | |
823 | conn.set_session(autocommit=True) | |
824 | try: | |
825 | txn = conn.cursor() | |
826 | txn.execute( | |
827 | "CREATE INDEX CONCURRENTLY state_groups_state_type_idx" | |
828 | " ON state_groups_state(state_group, type, state_key)" | |
829 | ) | |
830 | txn.execute( | |
831 | "DROP INDEX IF EXISTS state_groups_state_id" | |
832 | ) | |
833 | finally: | |
834 | conn.set_session(autocommit=False) | |
835 | else: | |
836 | txn = conn.cursor() | |
837 | txn.execute( | |
838 | "CREATE INDEX state_groups_state_type_idx" | |
839 | " ON state_groups_state(state_group, type, state_key)" | |
840 | ) | |
841 | txn.execute( | |
842 | "DROP INDEX IF EXISTS state_groups_state_id" | |
843 | ) | |
844 | ||
845 | yield self.runWithConnection(reindex_txn) | |
846 | ||
847 | yield self._end_background_update(self.STATE_GROUP_INDEX_UPDATE_NAME) | |
848 | ||
849 | defer.returnValue(1) |
530 | 530 | ) |
531 | 531 | defer.returnValue("t%d-%d" % (topo, token)) |
532 | 532 | |
533 | def get_room_max_stream_ordering(self): | |
534 | return self._stream_id_gen.get_current_token() | |
535 | ||
533 | 536 | def get_stream_token_for_event(self, event_id): |
534 | 537 | """The stream token for an event |
535 | 538 | Args: |
120 | 120 | k, r = self._cache.popitem() |
121 | 121 | self._earliest_known_stream_pos = max(k, self._earliest_known_stream_pos) |
122 | 122 | self._entity_to_key.pop(r, None) |
123 | ||
124 | def get_max_pos_of_last_change(self, entity): | |
125 | """Returns an upper bound of the stream id of the last change to an | |
126 | entity. | |
127 | """ | |
128 | return self._entity_to_key.get(entity, self._earliest_known_stream_pos) |
120 | 120 | |
121 | 121 | self.auth.check_joined_room = check_joined_room |
122 | 122 | |
123 | self.datastore.get_to_device_stream_token = lambda: 0 | |
124 | self.datastore.get_new_device_msgs_for_remote = ( | |
125 | lambda *args, **kargs: ([], 0) | |
126 | ) | |
127 | self.datastore.delete_device_msgs_for_remote = ( | |
128 | lambda *args, **kargs: None | |
129 | ) | |
130 | ||
123 | 131 | # Some local users to test with |
124 | 132 | self.u_apple = UserID.from_string("@apple:test") |
125 | 133 | self.u_banana = UserID.from_string("@banana:test") |