Codebase list matrix-synapse / 176246f
Merge tag 'upstream/0.18.0' into debian Upstream version 0.18.0 Erik Johnston 7 years ago
80 changed file(s) with 3146 addition(s) and 834 deletion(s). Raw diff Collapse all Expand all
0 Changes in synapse v0.18.0 (2016-09-19)
1 =======================================
2
3 The release includes major changes to the state storage database schemas, which
4 significantly reduce database size. Synapse will attempt to upgrade the current
5 data in the background. Servers with large SQLite database may experience
6 degradation of performance while this upgrade is in progress, therefore you may
7 want to consider migrating to using Postgres before upgrading very large SQLite
8 daabases
9
10
11 Changes:
12
13 * Make public room search case insensitive (PR #1127)
14
15
16 Bug fixes:
17
18 * Fix and clean up publicRooms pagination (PR #1129)
19
20
21 Changes in synapse v0.18.0-rc1 (2016-09-16)
22 ===========================================
23
24 Features:
25
26 * Add ``only=highlight`` on ``/notifications`` (PR #1081)
27 * Add server param to /publicRooms (PR #1082)
28 * Allow clients to ask for the whole of a single state event (PR #1094)
29 * Add is_direct param to /createRoom (PR #1108)
30 * Add pagination support to publicRooms (PR #1121)
31 * Add very basic filter API to /publicRooms (PR #1126)
32 * Add basic direct to device messaging support for E2E (PR #1074, #1084, #1104,
33 #1111)
34
35
36 Changes:
37
38 * Move to storing state_groups_state as deltas, greatly reducing DB size (PR
39 #1065)
40 * Reduce amount of state pulled out of the DB during common requests (PR #1069)
41 * Allow PDF to be rendered from media repo (PR #1071)
42 * Reindex state_groups_state after pruning (PR #1085)
43 * Clobber EDUs in send queue (PR #1095)
44 * Conform better to the CAS protocol specification (PR #1100)
45 * Limit how often we ask for keys from dead servers (PR #1114)
46
47
48 Bug fixes:
49
50 * Fix /notifications API when used with ``from`` param (PR #1080)
51 * Fix backfill when cannot find an event. (PR #1107)
52
53
054 Changes in synapse v0.17.3 (2016-09-09)
155 =======================================
256
4141 * synapse.app.appservice - handles output traffic to Application Services
4242 * synapse.app.federation_reader - handles receiving federation traffic (including public_rooms API)
4343 * synapse.app.media_repository - handles the media repository.
44 * synapse.app.client_reader - handles client API endpoints like /publicRooms
4445
4546 Each worker configuration file inherits the configuration of the main homeserver
4647 configuration file. You can then override configuration specific to that worker,
1919 --pusher \
2020 --synchrotron \
2121 --federation-reader \
22 --client-reader \
23 --appservice \
1515 """ This is a reference implementation of a Matrix home server.
1616 """
1717
18 __version__ = "0.17.3"
18 __version__ = "0.18.0"
582582 """
583583 # Can optionally look elsewhere in the request (e.g. headers)
584584 try:
585 user_id = yield self._get_appservice_user_id(request.args)
585 user_id = yield self._get_appservice_user_id(request)
586586 if user_id:
587587 request.authenticated_entity = user_id
588588 defer.returnValue(synapse.types.create_requester(user_id))
589589
590 access_token = request.args["access_token"][0]
590 access_token = get_access_token_from_request(
591 request, self.TOKEN_NOT_FOUND_HTTP_STATUS
592 )
593
591594 user_info = yield self.get_user_by_access_token(access_token, rights)
592595 user = user_info["user"]
593596 token_id = user_info["token_id"]
628631 )
629632
630633 @defer.inlineCallbacks
631 def _get_appservice_user_id(self, request_args):
634 def _get_appservice_user_id(self, request):
632635 app_service = yield self.store.get_app_service_by_token(
633 request_args["access_token"][0]
636 get_access_token_from_request(
637 request, self.TOKEN_NOT_FOUND_HTTP_STATUS
638 )
634639 )
635640 if app_service is None:
636641 defer.returnValue(None)
637642
638 if "user_id" not in request_args:
643 if "user_id" not in request.args:
639644 defer.returnValue(app_service.sender)
640645
641 user_id = request_args["user_id"][0]
646 user_id = request.args["user_id"][0]
642647 if app_service.sender == user_id:
643648 defer.returnValue(app_service.sender)
644649
832837 @defer.inlineCallbacks
833838 def get_appservice_by_req(self, request):
834839 try:
835 token = request.args["access_token"][0]
840 token = get_access_token_from_request(
841 request, self.TOKEN_NOT_FOUND_HTTP_STATUS
842 )
836843 service = yield self.store.get_app_service_by_token(token)
837844 if not service:
838845 logger.warn("Unrecognised appservice access token: %s" % (token,))
11411148 "This server requires you to be a moderator in the room to"
11421149 " edit its room list entry"
11431150 )
1151
1152
1153 def has_access_token(request):
1154 """Checks if the request has an access_token.
1155
1156 Returns:
1157 bool: False if no access_token was given, True otherwise.
1158 """
1159 query_params = request.args.get("access_token")
1160 return bool(query_params)
1161
1162
1163 def get_access_token_from_request(request, token_not_found_http_status=401):
1164 """Extracts the access_token from the request.
1165
1166 Args:
1167 request: The http request.
1168 token_not_found_http_status(int): The HTTP status code to set in the
1169 AuthError if the token isn't found. This is used in some of the
1170 legacy APIs to change the status code to 403 from the default of
1171 401 since some of the old clients depended on auth errors returning
1172 403.
1173 Returns:
1174 str: The access_token
1175 Raises:
1176 AuthError: If there isn't an access_token in the request.
1177 """
1178 query_params = request.args.get("access_token")
1179 # Try to get the access_token from the query params.
1180 if not query_params:
1181 raise AuthError(
1182 token_not_found_http_status,
1183 "Missing access token.",
1184 errcode=Codes.MISSING_TOKEN
1185 )
1186
1187 return query_params[0]
186186 def start():
187187 ps.replicate()
188188 ps.get_datastore().start_profiling()
189 ps.get_state_handler().start_caching()
189190
190191 reactor.callWhenRunning(start)
191192
0 #!/usr/bin/env python
1 # -*- coding: utf-8 -*-
2 # Copyright 2016 OpenMarket Ltd
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 import synapse
17
18 from synapse.config._base import ConfigError
19 from synapse.config.homeserver import HomeServerConfig
20 from synapse.config.logger import setup_logging
21 from synapse.http.site import SynapseSite
22 from synapse.http.server import JsonResource
23 from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
24 from synapse.replication.slave.storage._base import BaseSlavedStore
25 from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
26 from synapse.replication.slave.storage.events import SlavedEventStore
27 from synapse.replication.slave.storage.keys import SlavedKeyStore
28 from synapse.replication.slave.storage.room import RoomStore
29 from synapse.replication.slave.storage.directory import DirectoryStore
30 from synapse.replication.slave.storage.registration import SlavedRegistrationStore
31 from synapse.rest.client.v1.room import PublicRoomListRestServlet
32 from synapse.server import HomeServer
33 from synapse.storage.client_ips import ClientIpStore
34 from synapse.storage.engines import create_engine
35 from synapse.util.async import sleep
36 from synapse.util.httpresourcetree import create_resource_tree
37 from synapse.util.logcontext import LoggingContext
38 from synapse.util.manhole import manhole
39 from synapse.util.rlimit import change_resource_limit
40 from synapse.util.versionstring import get_version_string
41 from synapse.crypto import context_factory
42
43
44 from twisted.internet import reactor, defer
45 from twisted.web.resource import Resource
46
47 from daemonize import Daemonize
48
49 import sys
50 import logging
51 import gc
52
53 logger = logging.getLogger("synapse.app.client_reader")
54
55
56 class ClientReaderSlavedStore(
57 SlavedEventStore,
58 SlavedKeyStore,
59 RoomStore,
60 DirectoryStore,
61 SlavedApplicationServiceStore,
62 SlavedRegistrationStore,
63 BaseSlavedStore,
64 ClientIpStore, # After BaseSlavedStore because the constructor is different
65 ):
66 pass
67
68
69 class ClientReaderServer(HomeServer):
70 def get_db_conn(self, run_new_connection=True):
71 # Any param beginning with cp_ is a parameter for adbapi, and should
72 # not be passed to the database engine.
73 db_params = {
74 k: v for k, v in self.db_config.get("args", {}).items()
75 if not k.startswith("cp_")
76 }
77 db_conn = self.database_engine.module.connect(**db_params)
78
79 if run_new_connection:
80 self.database_engine.on_new_connection(db_conn)
81 return db_conn
82
83 def setup(self):
84 logger.info("Setting up.")
85 self.datastore = ClientReaderSlavedStore(self.get_db_conn(), self)
86 logger.info("Finished setting up.")
87
88 def _listen_http(self, listener_config):
89 port = listener_config["port"]
90 bind_address = listener_config.get("bind_address", "")
91 site_tag = listener_config.get("tag", port)
92 resources = {}
93 for res in listener_config["resources"]:
94 for name in res["names"]:
95 if name == "metrics":
96 resources[METRICS_PREFIX] = MetricsResource(self)
97 elif name == "client":
98 resource = JsonResource(self, canonical_json=False)
99 PublicRoomListRestServlet(self).register(resource)
100 resources.update({
101 "/_matrix/client/r0": resource,
102 "/_matrix/client/unstable": resource,
103 "/_matrix/client/v2_alpha": resource,
104 "/_matrix/client/api/v1": resource,
105 })
106
107 root_resource = create_resource_tree(resources, Resource())
108 reactor.listenTCP(
109 port,
110 SynapseSite(
111 "synapse.access.http.%s" % (site_tag,),
112 site_tag,
113 listener_config,
114 root_resource,
115 ),
116 interface=bind_address
117 )
118 logger.info("Synapse client reader now listening on port %d", port)
119
120 def start_listening(self, listeners):
121 for listener in listeners:
122 if listener["type"] == "http":
123 self._listen_http(listener)
124 elif listener["type"] == "manhole":
125 reactor.listenTCP(
126 listener["port"],
127 manhole(
128 username="matrix",
129 password="rabbithole",
130 globals={"hs": self},
131 ),
132 interface=listener.get("bind_address", '127.0.0.1')
133 )
134 else:
135 logger.warn("Unrecognized listener type: %s", listener["type"])
136
137 @defer.inlineCallbacks
138 def replicate(self):
139 http_client = self.get_simple_http_client()
140 store = self.get_datastore()
141 replication_url = self.config.worker_replication_url
142
143 while True:
144 try:
145 args = store.stream_positions()
146 args["timeout"] = 30000
147 result = yield http_client.get_json(replication_url, args=args)
148 yield store.process_replication(result)
149 except:
150 logger.exception("Error replicating from %r", replication_url)
151 yield sleep(5)
152
153
154 def start(config_options):
155 try:
156 config = HomeServerConfig.load_config(
157 "Synapse client reader", config_options
158 )
159 except ConfigError as e:
160 sys.stderr.write("\n" + e.message + "\n")
161 sys.exit(1)
162
163 assert config.worker_app == "synapse.app.client_reader"
164
165 setup_logging(config.worker_log_config, config.worker_log_file)
166
167 database_engine = create_engine(config.database_config)
168
169 tls_server_context_factory = context_factory.ServerContextFactory(config)
170
171 ss = ClientReaderServer(
172 config.server_name,
173 db_config=config.database_config,
174 tls_server_context_factory=tls_server_context_factory,
175 config=config,
176 version_string="Synapse/" + get_version_string(synapse),
177 database_engine=database_engine,
178 )
179
180 ss.setup()
181 ss.get_handlers()
182 ss.start_listening(config.worker_listeners)
183
184 def run():
185 with LoggingContext("run"):
186 logger.info("Running")
187 change_resource_limit(config.soft_file_limit)
188 if config.gc_thresholds:
189 gc.set_threshold(*config.gc_thresholds)
190 reactor.run()
191
192 def start():
193 ss.get_state_handler().start_caching()
194 ss.get_datastore().start_profiling()
195 ss.replicate()
196
197 reactor.callWhenRunning(start)
198
199 if config.worker_daemonize:
200 daemon = Daemonize(
201 app="synapse-client-reader",
202 pid=config.worker_pid_file,
203 action=run,
204 auto_close_fds=False,
205 verbose=True,
206 logger=logger,
207 )
208 daemon.start()
209 else:
210 run()
211
212
213 if __name__ == '__main__':
214 with LoggingContext("main"):
215 start(sys.argv[1:])
181181 reactor.run()
182182
183183 def start():
184 ss.get_state_handler().start_caching()
184185 ss.get_datastore().start_profiling()
185186 ss.replicate()
186187
187187 reactor.run()
188188
189189 def start():
190 ss.get_state_handler().start_caching()
190191 ss.get_datastore().start_profiling()
191192 ss.replicate()
192193
275275 ps.replicate()
276276 ps.get_pusherpool().start()
277277 ps.get_datastore().start_profiling()
278 ps.get_state_handler().start_caching()
278279
279280 reactor.callWhenRunning(start)
280281
241241 self._room_typing = {}
242242
243243 def stream_positions(self):
244 # We must update this typing token from the response of the previous
245 # sync. In particular, the stream id may "reset" back to zero/a low
246 # value which we *must* use for the next replication request.
244247 return {"typing": self._latest_room_serial}
245248
246249 def process_replication(self, result):
461464 def start():
462465 ss.get_datastore().start_profiling()
463466 ss.replicate()
467 ss.get_state_handler().start_caching()
464468
465469 reactor.callWhenRunning(start)
466470
3131 APP_SERVICE_PREFIX = "/_matrix/app/unstable"
3232
3333
34 def _is_valid_3pe_metadata(info):
35 if "instances" not in info:
36 return False
37 if not isinstance(info["instances"], list):
38 return False
39 return True
40
41
3442 def _is_valid_3pe_result(r, field):
3543 if not isinstance(r, dict):
3644 return False
161169 urllib.quote(protocol)
162170 )
163171 try:
164 defer.returnValue((yield self.get_json(uri, {})))
172 info = yield self.get_json(uri, {})
173
174 if not _is_valid_3pe_metadata(info):
175 logger.warning("query_3pe_protocol to %s did not return a"
176 " valid result", uri)
177 defer.returnValue(None)
178
179 defer.returnValue(info)
165180 except Exception as ex:
166181 logger.warning("query_3pe_protocol to %s threw exception %s",
167182 uri, ex)
168 defer.returnValue({})
183 defer.returnValue(None)
169184
170185 key = (service.id, protocol)
171186 return self.protocol_meta_cache.get(key) or (
2828 self.user_agent_suffix = config.get("user_agent_suffix")
2929 self.use_frozen_dicts = config.get("use_frozen_dicts", False)
3030 self.public_baseurl = config.get("public_baseurl")
31 self.secondary_directory_servers = config.get("secondary_directory_servers", [])
3231
3332 if self.public_baseurl is not None:
3433 if self.public_baseurl[-1] != '/':
141140 # The GC threshold parameters to pass to `gc.set_threshold`, if defined
142141 # gc_thresholds: [700, 10, 10]
143142
144 # A list of other Home Servers to fetch the public room directory from
145 # and include in the public room directory of this home server
146 # This is a temporary stopgap solution to populate new server with a
147 # list of rooms until there exists a good solution of a decentralized
148 # room directory.
149 # secondary_directory_servers:
150 # - matrix.org
151
152143 # List of ports that Synapse should listen on, their purpose and their
153144 # configuration.
154145 listeners:
1414
1515
1616 class EventContext(object):
17 __slots__ = [
18 "current_state_ids",
19 "prev_state_ids",
20 "state_group",
21 "rejected",
22 "push_actions",
23 "prev_group",
24 "delta_ids",
25 "prev_state_events",
26 ]
27
1728 def __init__(self):
29 # The current state including the current event
1830 self.current_state_ids = None
31 # The current state excluding the current event
1932 self.prev_state_ids = None
2033 self.state_group = None
34
2135 self.rejected = False
2236 self.push_actions = []
37
38 # A previously persisted state group and a delta between that
39 # and this state.
40 self.prev_group = None
41 self.delta_ids = None
42
43 self.prev_state_events = None
2323 CodeMessageException, HttpResponseException, SynapseError,
2424 )
2525 from synapse.util import unwrapFirstError
26 from synapse.util.async import concurrently_execute
2726 from synapse.util.caches.expiringcache import ExpiringCache
2827 from synapse.util.logutils import log_function
2928 from synapse.util.logcontext import preserve_fn, preserve_context_over_deferred
121120 pdu.event_id
122121 )
123122
124 @log_function
125 def send_edu(self, destination, edu_type, content):
123 def send_presence(self, destination, states):
124 if destination != self.server_name:
125 self._transaction_queue.enqueue_presence(destination, states)
126
127 @log_function
128 def send_edu(self, destination, edu_type, content, key=None):
126129 edu = Edu(
127130 origin=self.server_name,
128131 destination=destination,
133136 sent_edus_counter.inc()
134137
135138 # TODO, add errback, etc.
136 self._transaction_queue.enqueue_edu(edu)
139 self._transaction_queue.enqueue_edu(edu, key=key)
137140 return defer.succeed(None)
141
142 @log_function
143 def send_device_messages(self, destination):
144 """Sends the device messages in the local database to the remote
145 destination"""
146 self._transaction_queue.enqueue_device_messages(destination)
138147
139148 @log_function
140149 def send_failure(self, failure, destination):
165174 )
166175
167176 @log_function
168 def query_client_keys(self, destination, content):
177 def query_client_keys(self, destination, content, timeout):
169178 """Query device keys for a device hosted on a remote server.
170179
171180 Args:
177186 response
178187 """
179188 sent_queries_counter.inc("client_device_keys")
180 return self.transport_layer.query_client_keys(destination, content)
181
182 @log_function
183 def claim_client_keys(self, destination, content):
189 return self.transport_layer.query_client_keys(
190 destination, content, timeout
191 )
192
193 @log_function
194 def claim_client_keys(self, destination, content, timeout):
184195 """Claims one-time keys for a device hosted on a remote server.
185196
186197 Args:
192203 response
193204 """
194205 sent_queries_counter.inc("client_one_time_keys")
195 return self.transport_layer.claim_client_keys(destination, content)
206 return self.transport_layer.claim_client_keys(
207 destination, content, timeout
208 )
196209
197210 @defer.inlineCallbacks
198211 @log_function
470483 defer.DeferredList(deferreds, consumeErrors=True)
471484 )
472485 for success, result in res:
473 if success:
486 if success and result:
474487 signed_events.append(result)
475488 batch.discard(result.event_id)
476489
704717
705718 raise RuntimeError("Failed to send to any server.")
706719
707 @defer.inlineCallbacks
708 def get_public_rooms(self, destinations):
709 results_by_server = {}
710
711 @defer.inlineCallbacks
712 def _get_result(s):
713 if s == self.server_name:
714 defer.returnValue()
715
716 try:
717 result = yield self.transport_layer.get_public_rooms(s)
718 results_by_server[s] = result
719 except:
720 logger.exception("Error getting room list from server %r", s)
721
722 yield concurrently_execute(_get_result, destinations, 3)
723
724 defer.returnValue(results_by_server)
720 def get_public_rooms(self, destination, limit=None, since_token=None,
721 search_filter=None):
722 if destination == self.server_name:
723 return
724
725 return self.transport_layer.get_public_rooms(
726 destination, limit, since_token, search_filter
727 )
725728
726729 @defer.inlineCallbacks
727730 def query_auth(self, destination, room_id, event_id, local_auth):
187187 except SynapseError as e:
188188 logger.info("Failed to handle edu %r: %r", edu_type, e)
189189 except Exception as e:
190 logger.exception("Failed to handle edu %r", edu_type, e)
190 logger.exception("Failed to handle edu %r", edu_type)
191191 else:
192192 logger.warn("Received EDU of type %s with no handler", edu_type)
193193
1616 from twisted.internet import defer
1717
1818 from .persistence import TransactionActions
19 from .units import Transaction
19 from .units import Transaction, Edu
2020
2121 from synapse.api.errors import HttpResponseException
2222 from synapse.util.async import run_on_reactor
2525 get_retry_limiter, NotRetryingDestination,
2626 )
2727 from synapse.util.metrics import measure_func
28 from synapse.handlers.presence import format_user_presence_state
2829 import synapse.metrics
2930
3031 import logging
6869 # destination -> list of tuple(edu, deferred)
6970 self.pending_edus_by_dest = edus = {}
7071
72 # Presence needs to be separate as we send single aggragate EDUs
73 self.pending_presence_by_dest = presence = {}
74 self.pending_edus_keyed_by_dest = edus_keyed = {}
75
7176 metrics.register_callback(
7277 "pending_pdus",
7378 lambda: sum(map(len, pdus.values())),
7479 )
7580 metrics.register_callback(
7681 "pending_edus",
77 lambda: sum(map(len, edus.values())),
82 lambda: (
83 sum(map(len, edus.values()))
84 + sum(map(len, presence.values()))
85 + sum(map(len, edus_keyed.values()))
86 ),
7887 )
7988
8089 # destination -> list of tuple(failure, deferred)
8190 self.pending_failures_by_dest = {}
91
92 self.last_device_stream_id_by_dest = {}
8293
8394 # HACK to get unique tx id
8495 self._next_txn_id = int(self.clock.time_msec())
127138 self._attempt_new_transaction, destination
128139 )
129140
130 def enqueue_edu(self, edu):
141 def enqueue_presence(self, destination, states):
142 self.pending_presence_by_dest.setdefault(destination, {}).update({
143 state.user_id: state for state in states
144 })
145
146 preserve_context_over_fn(
147 self._attempt_new_transaction, destination
148 )
149
150 def enqueue_edu(self, edu, key=None):
131151 destination = edu.destination
132152
133153 if not self.can_send_to(destination):
134154 return
135155
136 self.pending_edus_by_dest.setdefault(destination, []).append(edu)
156 if key:
157 self.pending_edus_keyed_by_dest.setdefault(
158 destination, {}
159 )[(edu.edu_type, key)] = edu
160 else:
161 self.pending_edus_by_dest.setdefault(destination, []).append(edu)
137162
138163 preserve_context_over_fn(
139164 self._attempt_new_transaction, destination
154179 self._attempt_new_transaction, destination
155180 )
156181
182 def enqueue_device_messages(self, destination):
183 if destination == self.server_name or destination == "localhost":
184 return
185
186 if not self.can_send_to(destination):
187 return
188
189 preserve_context_over_fn(
190 self._attempt_new_transaction, destination
191 )
192
157193 @defer.inlineCallbacks
158194 def _attempt_new_transaction(self, destination):
159 yield run_on_reactor()
160 while True:
161 # list of (pending_pdu, deferred, order)
162 if destination in self.pending_transactions:
163 # XXX: pending_transactions can get stuck on by a never-ending
164 # request at which point pending_pdus_by_dest just keeps growing.
165 # we need application-layer timeouts of some flavour of these
166 # requests
167 logger.debug(
168 "TX [%s] Transaction already in progress",
169 destination
170 )
171 return
172
173 pending_pdus = self.pending_pdus_by_dest.pop(destination, [])
174 pending_edus = self.pending_edus_by_dest.pop(destination, [])
175 pending_failures = self.pending_failures_by_dest.pop(destination, [])
176
177 if pending_pdus:
178 logger.debug("TX [%s] len(pending_pdus_by_dest[dest]) = %d",
179 destination, len(pending_pdus))
180
181 if not pending_pdus and not pending_edus and not pending_failures:
182 logger.debug("TX [%s] Nothing to send", destination)
183 return
184
185 yield self._send_new_transaction(
186 destination, pending_pdus, pending_edus, pending_failures
187 )
195 # list of (pending_pdu, deferred, order)
196 if destination in self.pending_transactions:
197 # XXX: pending_transactions can get stuck on by a never-ending
198 # request at which point pending_pdus_by_dest just keeps growing.
199 # we need application-layer timeouts of some flavour of these
200 # requests
201 logger.debug(
202 "TX [%s] Transaction already in progress",
203 destination
204 )
205 return
206
207 try:
208 self.pending_transactions[destination] = 1
209
210 yield run_on_reactor()
211
212 while True:
213 pending_pdus = self.pending_pdus_by_dest.pop(destination, [])
214 pending_edus = self.pending_edus_by_dest.pop(destination, [])
215 pending_presence = self.pending_presence_by_dest.pop(destination, {})
216 pending_failures = self.pending_failures_by_dest.pop(destination, [])
217
218 pending_edus.extend(
219 self.pending_edus_keyed_by_dest.pop(destination, {}).values()
220 )
221
222 limiter = yield get_retry_limiter(
223 destination,
224 self.clock,
225 self.store,
226 )
227
228 device_message_edus, device_stream_id = (
229 yield self._get_new_device_messages(destination)
230 )
231
232 pending_edus.extend(device_message_edus)
233 if pending_presence:
234 pending_edus.append(
235 Edu(
236 origin=self.server_name,
237 destination=destination,
238 edu_type="m.presence",
239 content={
240 "push": [
241 format_user_presence_state(
242 presence, self.clock.time_msec()
243 )
244 for presence in pending_presence.values()
245 ]
246 },
247 )
248 )
249
250 if pending_pdus:
251 logger.debug("TX [%s] len(pending_pdus_by_dest[dest]) = %d",
252 destination, len(pending_pdus))
253
254 if not pending_pdus and not pending_edus and not pending_failures:
255 logger.debug("TX [%s] Nothing to send", destination)
256 self.last_device_stream_id_by_dest[destination] = (
257 device_stream_id
258 )
259 return
260
261 success = yield self._send_new_transaction(
262 destination, pending_pdus, pending_edus, pending_failures,
263 device_stream_id,
264 should_delete_from_device_stream=bool(device_message_edus),
265 limiter=limiter,
266 )
267 if not success:
268 break
269 except NotRetryingDestination:
270 logger.info(
271 "TX [%s] not ready for retry yet - "
272 "dropping transaction for now",
273 destination,
274 )
275 finally:
276 # We want to be *very* sure we delete this after we stop processing
277 self.pending_transactions.pop(destination, None)
278
279 @defer.inlineCallbacks
280 def _get_new_device_messages(self, destination):
281 last_device_stream_id = self.last_device_stream_id_by_dest.get(destination, 0)
282 to_device_stream_id = self.store.get_to_device_stream_token()
283 contents, stream_id = yield self.store.get_new_device_msgs_for_remote(
284 destination, last_device_stream_id, to_device_stream_id
285 )
286 edus = [
287 Edu(
288 origin=self.server_name,
289 destination=destination,
290 edu_type="m.direct_to_device",
291 content=content,
292 )
293 for content in contents
294 ]
295 defer.returnValue((edus, stream_id))
188296
189297 @measure_func("_send_new_transaction")
190298 @defer.inlineCallbacks
191299 def _send_new_transaction(self, destination, pending_pdus, pending_edus,
192 pending_failures):
193
194 # Sort based on the order field
195 pending_pdus.sort(key=lambda t: t[1])
196 pdus = [x[0] for x in pending_pdus]
197 edus = pending_edus
198 failures = [x.get_dict() for x in pending_failures]
199
200 try:
201 self.pending_transactions[destination] = 1
202
203 logger.debug("TX [%s] _attempt_new_transaction", destination)
204
205 txn_id = str(self._next_txn_id)
206
207 limiter = yield get_retry_limiter(
208 destination,
209 self.clock,
210 self.store,
300 pending_failures, device_stream_id,
301 should_delete_from_device_stream, limiter):
302
303 # Sort based on the order field
304 pending_pdus.sort(key=lambda t: t[1])
305 pdus = [x[0] for x in pending_pdus]
306 edus = pending_edus
307 failures = [x.get_dict() for x in pending_failures]
308
309 success = True
310
311 try:
312 logger.debug("TX [%s] _attempt_new_transaction", destination)
313
314 txn_id = str(self._next_txn_id)
315
316 logger.debug(
317 "TX [%s] {%s} Attempting new transaction"
318 " (pdus: %d, edus: %d, failures: %d)",
319 destination, txn_id,
320 len(pdus),
321 len(edus),
322 len(failures)
323 )
324
325 logger.debug("TX [%s] Persisting transaction...", destination)
326
327 transaction = Transaction.create_new(
328 origin_server_ts=int(self.clock.time_msec()),
329 transaction_id=txn_id,
330 origin=self.server_name,
331 destination=destination,
332 pdus=pdus,
333 edus=edus,
334 pdu_failures=failures,
335 )
336
337 self._next_txn_id += 1
338
339 yield self.transaction_actions.prepare_to_send(transaction)
340
341 logger.debug("TX [%s] Persisted transaction", destination)
342 logger.info(
343 "TX [%s] {%s} Sending transaction [%s],"
344 " (PDUs: %d, EDUs: %d, failures: %d)",
345 destination, txn_id,
346 transaction.transaction_id,
347 len(pdus),
348 len(edus),
349 len(failures),
350 )
351
352 with limiter:
353 # Actually send the transaction
354
355 # FIXME (erikj): This is a bit of a hack to make the Pdu age
356 # keys work
357 def json_data_cb():
358 data = transaction.get_dict()
359 now = int(self.clock.time_msec())
360 if "pdus" in data:
361 for p in data["pdus"]:
362 if "age_ts" in p:
363 unsigned = p.setdefault("unsigned", {})
364 unsigned["age"] = now - int(p["age_ts"])
365 del p["age_ts"]
366 return data
367
368 try:
369 response = yield self.transport_layer.send_transaction(
370 transaction, json_data_cb
371 )
372 code = 200
373
374 if response:
375 for e_id, r in response.get("pdus", {}).items():
376 if "error" in r:
377 logger.warn(
378 "Transaction returned error for %s: %s",
379 e_id, r,
380 )
381 except HttpResponseException as e:
382 code = e.code
383 response = e.response
384
385 logger.info(
386 "TX [%s] {%s} got %d response",
387 destination, txn_id, code
211388 )
212389
213 logger.debug(
214 "TX [%s] {%s} Attempting new transaction"
215 " (pdus: %d, edus: %d, failures: %d)",
216 destination, txn_id,
217 len(pending_pdus),
218 len(pending_edus),
219 len(pending_failures)
220 )
221
222 logger.debug("TX [%s] Persisting transaction...", destination)
223
224 transaction = Transaction.create_new(
225 origin_server_ts=int(self.clock.time_msec()),
226 transaction_id=txn_id,
227 origin=self.server_name,
228 destination=destination,
229 pdus=pdus,
230 edus=edus,
231 pdu_failures=failures,
232 )
233
234 self._next_txn_id += 1
235
236 yield self.transaction_actions.prepare_to_send(transaction)
237
238 logger.debug("TX [%s] Persisted transaction", destination)
239 logger.info(
240 "TX [%s] {%s} Sending transaction [%s],"
241 " (PDUs: %d, EDUs: %d, failures: %d)",
242 destination, txn_id,
243 transaction.transaction_id,
244 len(pending_pdus),
245 len(pending_edus),
246 len(pending_failures),
247 )
248
249 with limiter:
250 # Actually send the transaction
251
252 # FIXME (erikj): This is a bit of a hack to make the Pdu age
253 # keys work
254 def json_data_cb():
255 data = transaction.get_dict()
256 now = int(self.clock.time_msec())
257 if "pdus" in data:
258 for p in data["pdus"]:
259 if "age_ts" in p:
260 unsigned = p.setdefault("unsigned", {})
261 unsigned["age"] = now - int(p["age_ts"])
262 del p["age_ts"]
263 return data
264
265 try:
266 response = yield self.transport_layer.send_transaction(
267 transaction, json_data_cb
268 )
269 code = 200
270
271 if response:
272 for e_id, r in response.get("pdus", {}).items():
273 if "error" in r:
274 logger.warn(
275 "Transaction returned error for %s: %s",
276 e_id, r,
277 )
278 except HttpResponseException as e:
279 code = e.code
280 response = e.response
281
390 logger.debug("TX [%s] Sent transaction", destination)
391 logger.debug("TX [%s] Marking as delivered...", destination)
392
393 yield self.transaction_actions.delivered(
394 transaction, code, response
395 )
396
397 logger.debug("TX [%s] Marked as delivered", destination)
398
399 if code != 200:
400 for p in pdus:
282401 logger.info(
283 "TX [%s] {%s} got %d response",
284 destination, txn_id, code
285 )
286
287 logger.debug("TX [%s] Sent transaction", destination)
288 logger.debug("TX [%s] Marking as delivered...", destination)
289
290 yield self.transaction_actions.delivered(
291 transaction, code, response
292 )
293
294 logger.debug("TX [%s] Marked as delivered", destination)
295
296 if code != 200:
297 for p in pdus:
298 logger.info(
299 "Failed to send event %s to %s", p.event_id, destination
300 )
301 except NotRetryingDestination:
302 logger.info(
303 "TX [%s] not ready for retry yet - "
304 "dropping transaction for now",
305 destination,
306 )
307 except RuntimeError as e:
308 # We capture this here as there as nothing actually listens
309 # for this finishing functions deferred.
310 logger.warn(
311 "TX [%s] Problem in _attempt_transaction: %s",
312 destination,
313 e,
314 )
315
316 for p in pdus:
317 logger.info("Failed to send event %s to %s", p.event_id, destination)
318 except Exception as e:
319 # We capture this here as there as nothing actually listens
320 # for this finishing functions deferred.
321 logger.warn(
322 "TX [%s] Problem in _attempt_transaction: %s",
323 destination,
324 e,
325 )
326
327 for p in pdus:
328 logger.info("Failed to send event %s to %s", p.event_id, destination)
329
330 finally:
331 # We want to be *very* sure we delete this after we stop processing
332 self.pending_transactions.pop(destination, None)
402 "Failed to send event %s to %s", p.event_id, destination
403 )
404 success = False
405 else:
406 # Remove the acknowledged device messages from the database
407 if should_delete_from_device_stream:
408 yield self.store.delete_device_msgs_for_remote(
409 destination, device_stream_id
410 )
411 self.last_device_stream_id_by_dest[destination] = device_stream_id
412 except RuntimeError as e:
413 # We capture this here as there as nothing actually listens
414 # for this finishing functions deferred.
415 logger.warn(
416 "TX [%s] Problem in _attempt_transaction: %s",
417 destination,
418 e,
419 )
420
421 success = False
422
423 for p in pdus:
424 logger.info("Failed to send event %s to %s", p.event_id, destination)
425 except Exception as e:
426 # We capture this here as there as nothing actually listens
427 # for this finishing functions deferred.
428 logger.warn(
429 "TX [%s] Problem in _attempt_transaction: %s",
430 destination,
431 e,
432 )
433
434 success = False
435
436 for p in pdus:
437 logger.info("Failed to send event %s to %s", p.event_id, destination)
438
439 defer.returnValue(success)
247247
248248 @defer.inlineCallbacks
249249 @log_function
250 def get_public_rooms(self, remote_server):
250 def get_public_rooms(self, remote_server, limit, since_token,
251 search_filter=None):
251252 path = PREFIX + "/publicRooms"
253
254 args = {}
255 if limit:
256 args["limit"] = [str(limit)]
257 if since_token:
258 args["since"] = [since_token]
259
260 # TODO(erikj): Actually send the search_filter across federation.
252261
253262 response = yield self.client.get_json(
254263 destination=remote_server,
255264 path=path,
265 args=args,
256266 )
257267
258268 defer.returnValue(response)
297307
298308 @defer.inlineCallbacks
299309 @log_function
300 def query_client_keys(self, destination, query_content):
310 def query_client_keys(self, destination, query_content, timeout):
301311 """Query the device keys for a list of user ids hosted on a remote
302312 server.
303313
326336 destination=destination,
327337 path=path,
328338 data=query_content,
329 )
330 defer.returnValue(content)
331
332 @defer.inlineCallbacks
333 @log_function
334 def claim_client_keys(self, destination, query_content):
339 timeout=timeout,
340 )
341 defer.returnValue(content)
342
343 @defer.inlineCallbacks
344 @log_function
345 def claim_client_keys(self, destination, query_content, timeout):
335346 """Claim one-time keys for a list of devices hosted on a remote server.
336347
337348 Request:
362373 destination=destination,
363374 path=path,
364375 data=query_content,
376 timeout=timeout,
365377 )
366378 defer.returnValue(content)
367379
1717 from synapse.api.urls import FEDERATION_PREFIX as PREFIX
1818 from synapse.api.errors import Codes, SynapseError
1919 from synapse.http.server import JsonResource
20 from synapse.http.servlet import parse_json_object_from_request
20 from synapse.http.servlet import (
21 parse_json_object_from_request, parse_integer_from_args, parse_string_from_args,
22 )
2123 from synapse.util.ratelimitutils import FederationRateLimiter
2224 from synapse.util.versionstring import get_version_string
2325
553555
554556 @defer.inlineCallbacks
555557 def on_GET(self, origin, content, query):
556 data = yield self.room_list_handler.get_local_public_room_list()
558 limit = parse_integer_from_args(query, "limit", 0)
559 since_token = parse_string_from_args(query, "since", None)
560 data = yield self.room_list_handler.get_local_public_room_list(
561 limit, since_token
562 )
557563 defer.returnValue((200, data))
558564
559565
175175 defer.returnValue(ret)
176176
177177 @defer.inlineCallbacks
178 def get_3pe_protocols(self):
178 def get_3pe_protocols(self, only_protocol=None):
179179 services = yield self.store.get_app_services()
180180 protocols = {}
181
182 # Collect up all the individual protocol responses out of the ASes
181183 for s in services:
182184 for p in s.protocols:
183 protocols[p] = yield self.appservice_api.get_3pe_protocol(s, p)
185 if only_protocol is not None and p != only_protocol:
186 continue
187
188 if p not in protocols:
189 protocols[p] = []
190
191 info = yield self.appservice_api.get_3pe_protocol(s, p)
192
193 if info is not None:
194 protocols[p].append(info)
195
196 def _merge_instances(infos):
197 if not infos:
198 return {}
199
200 # Merge the 'instances' lists of multiple results, but just take
201 # the other fields from the first as they ought to be identical
202 # copy the result so as not to corrupt the cached one
203 combined = dict(infos[0])
204 combined["instances"] = list(combined["instances"])
205
206 for info in infos[1:]:
207 combined["instances"].extend(info["instances"])
208
209 return combined
210
211 for p in protocols.keys():
212 protocols[p] = _merge_instances(protocols[p])
184213
185214 defer.returnValue(protocols)
186215
5757 attempts = 0
5858 while attempts < 5:
5959 try:
60 device_id = stringutils.random_string_with_symbols(16)
60 device_id = stringutils.random_string(10).upper()
6161 yield self.store.store_device(
6262 user_id=user_id,
6363 device_id=device_id,
0 # -*- coding: utf-8 -*-
1 # Copyright 2016 OpenMarket Ltd
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import logging
16
17 from twisted.internet import defer
18
19 from synapse.types import get_domain_from_id
20 from synapse.util.stringutils import random_string
21
22
23 logger = logging.getLogger(__name__)
24
25
26 class DeviceMessageHandler(object):
27
28 def __init__(self, hs):
29 """
30 Args:
31 hs (synapse.server.HomeServer): server
32 """
33 self.store = hs.get_datastore()
34 self.notifier = hs.get_notifier()
35 self.is_mine_id = hs.is_mine_id
36 self.federation = hs.get_replication_layer()
37
38 self.federation.register_edu_handler(
39 "m.direct_to_device", self.on_direct_to_device_edu
40 )
41
42 @defer.inlineCallbacks
43 def on_direct_to_device_edu(self, origin, content):
44 local_messages = {}
45 sender_user_id = content["sender"]
46 if origin != get_domain_from_id(sender_user_id):
47 logger.warn(
48 "Dropping device message from %r with spoofed sender %r",
49 origin, sender_user_id
50 )
51 message_type = content["type"]
52 message_id = content["message_id"]
53 for user_id, by_device in content["messages"].items():
54 messages_by_device = {
55 device_id: {
56 "content": message_content,
57 "type": message_type,
58 "sender": sender_user_id,
59 }
60 for device_id, message_content in by_device.items()
61 }
62 if messages_by_device:
63 local_messages[user_id] = messages_by_device
64
65 stream_id = yield self.store.add_messages_from_remote_to_device_inbox(
66 origin, message_id, local_messages
67 )
68
69 self.notifier.on_new_event(
70 "to_device_key", stream_id, users=local_messages.keys()
71 )
72
73 @defer.inlineCallbacks
74 def send_device_message(self, sender_user_id, message_type, messages):
75
76 local_messages = {}
77 remote_messages = {}
78 for user_id, by_device in messages.items():
79 if self.is_mine_id(user_id):
80 messages_by_device = {
81 device_id: {
82 "content": message_content,
83 "type": message_type,
84 "sender": sender_user_id,
85 }
86 for device_id, message_content in by_device.items()
87 }
88 if messages_by_device:
89 local_messages[user_id] = messages_by_device
90 else:
91 destination = get_domain_from_id(user_id)
92 remote_messages.setdefault(destination, {})[user_id] = by_device
93
94 message_id = random_string(16)
95
96 remote_edu_contents = {}
97 for destination, messages in remote_messages.items():
98 remote_edu_contents[destination] = {
99 "messages": messages,
100 "sender": sender_user_id,
101 "type": message_type,
102 "message_id": message_id,
103 }
104
105 stream_id = yield self.store.add_messages_to_device_inbox(
106 local_messages, remote_edu_contents
107 )
108
109 self.notifier.on_new_event(
110 "to_device_key", stream_id, users=local_messages.keys()
111 )
112
113 for destination in remote_messages.keys():
114 # Enqueue a new federation transaction to send the new
115 # device messages to each remote destination.
116 self.federation.send_device_messages(destination)
1212 # See the License for the specific language governing permissions and
1313 # limitations under the License.
1414
15 import collections
16 import json
15 import ujson as json
1716 import logging
1817
18 from canonicaljson import encode_canonical_json
1919 from twisted.internet import defer
2020
21 from synapse.api import errors
22 import synapse.types
21 from synapse.api.errors import SynapseError, CodeMessageException
22 from synapse.types import get_domain_from_id
23 from synapse.util.logcontext import preserve_fn, preserve_context_over_deferred
24 from synapse.util.retryutils import get_retry_limiter, NotRetryingDestination
2325
2426 logger = logging.getLogger(__name__)
2527
2830 def __init__(self, hs):
2931 self.store = hs.get_datastore()
3032 self.federation = hs.get_replication_layer()
33 self.device_handler = hs.get_device_handler()
3134 self.is_mine_id = hs.is_mine_id
32 self.server_name = hs.hostname
35 self.clock = hs.get_clock()
3336
3437 # doesn't really work as part of the generic query API, because the
3538 # query request requires an object POST, but we abuse the
3942 )
4043
4144 @defer.inlineCallbacks
42 def query_devices(self, query_body):
45 def query_devices(self, query_body, timeout):
4346 """ Handle a device key query from a client
4447
4548 {
6265
6366 # separate users by domain.
6467 # make a map from domain to user_id to device_ids
65 queries_by_domain = collections.defaultdict(dict)
68 local_query = {}
69 remote_queries = {}
70
6671 for user_id, device_ids in device_keys_query.items():
67 user = synapse.types.UserID.from_string(user_id)
68 queries_by_domain[user.domain][user_id] = device_ids
72 if self.is_mine_id(user_id):
73 local_query[user_id] = device_ids
74 else:
75 domain = get_domain_from_id(user_id)
76 remote_queries.setdefault(domain, {})[user_id] = device_ids
6977
7078 # do the queries
71 # TODO: do these in parallel
79 failures = {}
7280 results = {}
73 for destination, destination_query in queries_by_domain.items():
74 if destination == self.server_name:
75 res = yield self.query_local_devices(destination_query)
76 else:
77 res = yield self.federation.query_client_keys(
78 destination, {"device_keys": destination_query}
81 if local_query:
82 local_result = yield self.query_local_devices(local_query)
83 for user_id, keys in local_result.items():
84 if user_id in local_query:
85 results[user_id] = keys
86
87 @defer.inlineCallbacks
88 def do_remote_query(destination):
89 destination_query = remote_queries[destination]
90 try:
91 limiter = yield get_retry_limiter(
92 destination, self.clock, self.store
7993 )
80 res = res["device_keys"]
81 for user_id, keys in res.items():
82 if user_id in destination_query:
83 results[user_id] = keys
84
85 defer.returnValue((200, {"device_keys": results}))
94 with limiter:
95 remote_result = yield self.federation.query_client_keys(
96 destination,
97 {"device_keys": destination_query},
98 timeout=timeout
99 )
100
101 for user_id, keys in remote_result["device_keys"].items():
102 if user_id in destination_query:
103 results[user_id] = keys
104
105 except CodeMessageException as e:
106 failures[destination] = {
107 "status": e.code, "message": e.message
108 }
109 except NotRetryingDestination as e:
110 failures[destination] = {
111 "status": 503, "message": "Not ready for retry",
112 }
113
114 yield preserve_context_over_deferred(defer.gatherResults([
115 preserve_fn(do_remote_query)(destination)
116 for destination in remote_queries
117 ]))
118
119 defer.returnValue({
120 "device_keys": results, "failures": failures,
121 })
86122
87123 @defer.inlineCallbacks
88124 def query_local_devices(self, query):
103139 if not self.is_mine_id(user_id):
104140 logger.warning("Request for keys for non-local user %s",
105141 user_id)
106 raise errors.SynapseError(400, "Not a user here")
142 raise SynapseError(400, "Not a user here")
107143
108144 if not device_ids:
109145 local_query.append((user_id, None))
136172 device_keys_query = query_body.get("device_keys", {})
137173 res = yield self.query_local_devices(device_keys_query)
138174 defer.returnValue({"device_keys": res})
175
176 @defer.inlineCallbacks
177 def claim_one_time_keys(self, query, timeout):
178 local_query = []
179 remote_queries = {}
180
181 for user_id, device_keys in query.get("one_time_keys", {}).items():
182 if self.is_mine_id(user_id):
183 for device_id, algorithm in device_keys.items():
184 local_query.append((user_id, device_id, algorithm))
185 else:
186 domain = get_domain_from_id(user_id)
187 remote_queries.setdefault(domain, {})[user_id] = device_keys
188
189 results = yield self.store.claim_e2e_one_time_keys(local_query)
190
191 json_result = {}
192 failures = {}
193 for user_id, device_keys in results.items():
194 for device_id, keys in device_keys.items():
195 for key_id, json_bytes in keys.items():
196 json_result.setdefault(user_id, {})[device_id] = {
197 key_id: json.loads(json_bytes)
198 }
199
200 @defer.inlineCallbacks
201 def claim_client_keys(destination):
202 device_keys = remote_queries[destination]
203 try:
204 limiter = yield get_retry_limiter(
205 destination, self.clock, self.store
206 )
207 with limiter:
208 remote_result = yield self.federation.claim_client_keys(
209 destination,
210 {"one_time_keys": device_keys},
211 timeout=timeout
212 )
213 for user_id, keys in remote_result["one_time_keys"].items():
214 if user_id in device_keys:
215 json_result[user_id] = keys
216 except CodeMessageException as e:
217 failures[destination] = {
218 "status": e.code, "message": e.message
219 }
220 except NotRetryingDestination as e:
221 failures[destination] = {
222 "status": 503, "message": "Not ready for retry",
223 }
224
225 yield preserve_context_over_deferred(defer.gatherResults([
226 preserve_fn(claim_client_keys)(destination)
227 for destination in remote_queries
228 ]))
229
230 defer.returnValue({
231 "one_time_keys": json_result,
232 "failures": failures
233 })
234
235 @defer.inlineCallbacks
236 def upload_keys_for_user(self, user_id, device_id, keys):
237 time_now = self.clock.time_msec()
238
239 # TODO: Validate the JSON to make sure it has the right keys.
240 device_keys = keys.get("device_keys", None)
241 if device_keys:
242 logger.info(
243 "Updating device_keys for device %r for user %s at %d",
244 device_id, user_id, time_now
245 )
246 # TODO: Sign the JSON with the server key
247 yield self.store.set_e2e_device_keys(
248 user_id, device_id, time_now,
249 encode_canonical_json(device_keys)
250 )
251
252 one_time_keys = keys.get("one_time_keys", None)
253 if one_time_keys:
254 logger.info(
255 "Adding %d one_time_keys for device %r for user %r at %d",
256 len(one_time_keys), device_id, user_id, time_now
257 )
258 key_list = []
259 for key_id, key_json in one_time_keys.items():
260 algorithm, key_id = key_id.split(":")
261 key_list.append((
262 algorithm, key_id, encode_canonical_json(key_json)
263 ))
264
265 yield self.store.add_e2e_one_time_keys(
266 user_id, device_id, time_now, key_list
267 )
268
269 # the device should have been registered already, but it may have been
270 # deleted due to a race with a DELETE request. Or we may be using an
271 # old access_token without an associated device_id. Either way, we
272 # need to double-check the device is registered to avoid ending up with
273 # keys without a corresponding device.
274 self.device_handler.check_device_registered(user_id, device_id)
275
276 result = yield self.store.count_e2e_one_time_keys(user_id, device_id)
277
278 defer.returnValue({"one_time_key_counts": result})
831831
832832 new_pdu = event
833833
834 message_handler = self.hs.get_handlers().message_handler
835 destinations = yield message_handler.get_joined_hosts_for_room_from_state(
836 context
837 )
838 destinations = set(destinations)
834 users_in_room = yield self.store.get_joined_users_from_context(event, context)
835
836 destinations = set(
837 get_domain_from_id(user_id) for user_id in users_in_room
838 if not self.hs.is_mine_id(user_id)
839 )
840
839841 destinations.discard(origin)
840842
841843 logger.debug(
10541056
10551057 new_pdu = event
10561058
1057 message_handler = self.hs.get_handlers().message_handler
1058 destinations = yield message_handler.get_joined_hosts_for_room_from_state(
1059 context
1060 )
1061 destinations = set(destinations)
1059 users_in_room = yield self.store.get_joined_users_from_context(event, context)
1060
1061 destinations = set(
1062 get_domain_from_id(user_id) for user_id in users_in_room
1063 if not self.hs.is_mine_id(user_id)
1064 )
10621065 destinations.discard(origin)
10631066
10641067 logger.debug(
15811584 current_state = set(e.event_id for e in auth_events.values())
15821585 different_auth = event_auth_events - current_state
15831586
1587 context.current_state_ids = dict(context.current_state_ids)
15841588 context.current_state_ids.update({
15851589 k: a.event_id for k, a in auth_events.items()
15861590 if k != event_key
15871591 })
1592 context.prev_state_ids = dict(context.prev_state_ids)
15881593 context.prev_state_ids.update({
15891594 k: a.event_id for k, a in auth_events.items()
15901595 })
16661671 # 4. Look at rejects and their proofs.
16671672 # TODO.
16681673
1674 context.current_state_ids = dict(context.current_state_ids)
16691675 context.current_state_ids.update({
16701676 k: a.event_id for k, a in auth_events.items()
16711677 if k != event_key
16721678 })
1679 context.prev_state_ids = dict(context.prev_state_ids)
16731680 context.prev_state_ids.update({
16741681 k: a.event_id for k, a in auth_events.items()
16751682 })
2929 from synapse.util.caches.snapshot_cache import SnapshotCache
3030 from synapse.util.logcontext import preserve_fn, preserve_context_over_deferred
3131 from synapse.util.metrics import measure_func
32 from synapse.util.caches.descriptors import cachedInlineCallbacks
3332 from synapse.visibility import filter_events_for_client
3433
3534 from ._base import BaseHandler
944943 event_stream_id, max_stream_id
945944 )
946945
947 destinations = yield self.get_joined_hosts_for_room_from_state(context)
946 users_in_room = yield self.store.get_joined_users_from_context(event, context)
947
948 destinations = [
949 get_domain_from_id(user_id) for user_id in users_in_room
950 if not self.hs.is_mine_id(user_id)
951 ]
948952
949953 @defer.inlineCallbacks
950954 def _notify():
962966 preserve_fn(federation_handler.handle_new_event)(
963967 event, destinations=destinations,
964968 )
965
966 def get_joined_hosts_for_room_from_state(self, context):
967 state_group = context.state_group
968 if not state_group:
969 # If state_group is None it means it has yet to be assigned a
970 # state group, i.e. we need to make sure that calls with a state_group
971 # of None don't hit previous cached calls with a None state_group.
972 # To do this we set the state_group to a new object as object() != object()
973 state_group = object()
974
975 return self._get_joined_hosts_for_room_from_state(
976 state_group, context.current_state_ids
977 )
978
979 @cachedInlineCallbacks(num_args=1, cache_context=True)
980 def _get_joined_hosts_for_room_from_state(self, state_group, current_state_ids,
981 cache_context):
982
983 # Don't bother getting state for people on the same HS
984 current_state = yield self.store.get_events([
985 e_id for key, e_id in current_state_ids.items()
986 if key[0] == EventTypes.Member and not self.hs.is_mine_id(key[1])
987 ])
988
989 destinations = set()
990 for e in current_state.itervalues():
991 try:
992 if e.type == EventTypes.Member:
993 if e.content["membership"] == Membership.JOIN:
994 destinations.add(get_domain_from_id(e.state_key))
995 except SynapseError:
996 logger.warn(
997 "Failed to get destination from event %s", e.event_id
998 )
999
1000 defer.returnValue(destinations)
5050 bump_active_time_counter = metrics.register_counter("bump_active_time")
5151
5252 get_updates_counter = metrics.register_counter("get_updates", labels=["type"])
53
54 notify_reason_counter = metrics.register_counter("notify_reason", labels=["reason"])
55 state_transition_counter = metrics.register_counter(
56 "state_transition", labels=["from", "to"]
57 )
5358
5459
5560 # If a user was last active in the last LAST_ACTIVE_GRANULARITY, consider them
211216 is some spurious presence changes that will self-correct.
212217 """
213218 logger.info(
214 "Performing _on_shutdown. Persiting %d unpersisted changes",
219 "Performing _on_shutdown. Persisting %d unpersisted changes",
215220 len(self.user_to_current_state)
216221 )
217222
228233 may stack up and slow down shutdown times.
229234 """
230235 logger.info(
231 "Performing _persist_unpersisted_changes. Persiting %d unpersisted changes",
236 "Performing _persist_unpersisted_changes. Persisting %d unpersisted changes",
232237 len(self.unpersisted_users_changes)
233238 )
234239
258263
259264 to_notify = {} # Changes we want to notify everyone about
260265 to_federation_ping = {} # These need sending keep-alives
266
267 # Only bother handling the last presence change for each user
268 new_states_dict = {}
269 for new_state in new_states:
270 new_states_dict[new_state.user_id] = new_state
271 new_state = new_states_dict.values()
261272
262273 for new_state in new_states:
263274 user_id = new_state.user_id
613624 Args:
614625 hosts_to_states (dict): Mapping `server_name` -> `[UserPresenceState]`
615626 """
616 now = self.clock.time_msec()
617627 for host, states in hosts_to_states.items():
618 self.federation.send_edu(
619 destination=host,
620 edu_type="m.presence",
621 content={
622 "push": [
623 _format_user_presence_state(state, now)
624 for state in states
625 ]
626 }
627 )
628 self.federation.send_presence(host, states)
628629
629630 @defer.inlineCallbacks
630631 def incoming_presence(self, origin, content):
645646 )
646647 continue
647648
649 if get_domain_from_id(user_id) != origin:
650 logger.info(
651 "Got presence update from %r with bad 'user_id': %r",
652 origin, user_id,
653 )
654 continue
655
648656 presence_state = push.get("presence", None)
649657 if not presence_state:
650658 logger.info(
704712 defer.returnValue([
705713 {
706714 "type": "m.presence",
707 "content": _format_user_presence_state(state, now),
715 "content": format_user_presence_state(state, now),
708716 }
709717 for state in updates
710718 ])
711719 else:
712720 defer.returnValue([
713 _format_user_presence_state(state, now) for state in updates
721 format_user_presence_state(state, now) for state in updates
714722 ])
715723
716724 @defer.inlineCallbacks
938946 def should_notify(old_state, new_state):
939947 """Decides if a presence state change should be sent to interested parties.
940948 """
949 if old_state == new_state:
950 return False
951
941952 if old_state.status_msg != new_state.status_msg:
953 notify_reason_counter.inc("status_msg_change")
942954 return True
943955
956 if old_state.state != new_state.state:
957 notify_reason_counter.inc("state_change")
958 state_transition_counter.inc(old_state.state, new_state.state)
959 return True
960
944961 if old_state.state == PresenceState.ONLINE:
945 if new_state.state != PresenceState.ONLINE:
946 # Always notify for online -> anything
947 return True
948
949962 if new_state.currently_active != old_state.currently_active:
963 notify_reason_counter.inc("current_active_change")
950964 return True
951965
952966 if new_state.last_active_ts - old_state.last_active_ts > LAST_ACTIVE_GRANULARITY:
953967 # Only notify about last active bumps if we're not currently acive
954 if not (old_state.currently_active and new_state.currently_active):
968 if not new_state.currently_active:
969 notify_reason_counter.inc("last_active_change_online")
955970 return True
956971
957972 elif new_state.last_active_ts - old_state.last_active_ts > LAST_ACTIVE_GRANULARITY:
958973 # Always notify for a transition where last active gets bumped.
974 notify_reason_counter.inc("last_active_change_not_online")
959975 return True
960976
961 if old_state.state != new_state.state:
962 return True
963
964977 return False
965978
966979
967 def _format_user_presence_state(state, now):
980 def format_user_presence_state(state, now):
968981 """Convert UserPresenceState to a format that can be sent down to clients
969982 and to other servers.
970983 """
10771090 defer.returnValue(([
10781091 {
10791092 "type": "m.presence",
1080 "content": _format_user_presence_state(s, now),
1093 "content": format_user_presence_state(s, now),
10811094 }
10821095 for s in updates.values()
10831096 if include_offline or s.state != PresenceState.OFFLINE
155155 }
156156 },
157157 },
158 key=(room_id, receipt_type, user_id),
158159 )
159160
160161 @defer.inlineCallbacks
1919
2020 from synapse.types import UserID, RoomAlias, RoomID, RoomStreamToken
2121 from synapse.api.constants import (
22 EventTypes, JoinRules, RoomCreationPreset, Membership,
22 EventTypes, JoinRules, RoomCreationPreset
2323 )
2424 from synapse.api.errors import AuthError, StoreError, SynapseError
2525 from synapse.util import stringutils
26 from synapse.util.async import concurrently_execute
27 from synapse.util.caches.response_cache import ResponseCache
2826 from synapse.visibility import filter_events_for_client
2927
3028 from collections import OrderedDict
3432 import string
3533
3634 logger = logging.getLogger(__name__)
37
38 REMOTE_ROOM_LIST_POLL_INTERVAL = 60 * 1000
3935
4036 id_server_scheme = "https://"
4137
195191 },
196192 ratelimit=False)
197193
194 content = {}
195 is_direct = config.get("is_direct", None)
196 if is_direct:
197 content["is_direct"] = is_direct
198
198199 for invitee in invite_list:
199200 yield room_member_handler.update_membership(
200201 requester,
202203 room_id,
203204 "invite",
204205 ratelimit=False,
206 content=content,
205207 )
206208
207209 for invite_3pid in invite_3pid_list:
341343 )
342344
343345
344 class RoomListHandler(BaseHandler):
345 def __init__(self, hs):
346 super(RoomListHandler, self).__init__(hs)
347 self.response_cache = ResponseCache(hs)
348 self.remote_list_request_cache = ResponseCache(hs)
349 self.remote_list_cache = {}
350 self.fetch_looping_call = hs.get_clock().looping_call(
351 self.fetch_all_remote_lists, REMOTE_ROOM_LIST_POLL_INTERVAL
352 )
353 self.fetch_all_remote_lists()
354
355 def get_local_public_room_list(self):
356 result = self.response_cache.get(())
357 if not result:
358 result = self.response_cache.set((), self._get_public_room_list())
359 return result
360
361 @defer.inlineCallbacks
362 def _get_public_room_list(self):
363 room_ids = yield self.store.get_public_room_ids()
364
365 results = []
366
367 @defer.inlineCallbacks
368 def handle_room(room_id):
369 current_state = yield self.state_handler.get_current_state(room_id)
370
371 # Double check that this is actually a public room.
372 join_rules_event = current_state.get((EventTypes.JoinRules, ""))
373 if join_rules_event:
374 join_rule = join_rules_event.content.get("join_rule", None)
375 if join_rule and join_rule != JoinRules.PUBLIC:
376 defer.returnValue(None)
377
378 result = {"room_id": room_id}
379
380 num_joined_users = len([
381 1 for _, event in current_state.items()
382 if event.type == EventTypes.Member
383 and event.membership == Membership.JOIN
384 ])
385 if num_joined_users == 0:
386 return
387
388 result["num_joined_members"] = num_joined_users
389
390 aliases = yield self.store.get_aliases_for_room(room_id)
391 if aliases:
392 result["aliases"] = aliases
393
394 name_event = yield current_state.get((EventTypes.Name, ""))
395 if name_event:
396 name = name_event.content.get("name", None)
397 if name:
398 result["name"] = name
399
400 topic_event = current_state.get((EventTypes.Topic, ""))
401 if topic_event:
402 topic = topic_event.content.get("topic", None)
403 if topic:
404 result["topic"] = topic
405
406 canonical_event = current_state.get((EventTypes.CanonicalAlias, ""))
407 if canonical_event:
408 canonical_alias = canonical_event.content.get("alias", None)
409 if canonical_alias:
410 result["canonical_alias"] = canonical_alias
411
412 visibility_event = current_state.get((EventTypes.RoomHistoryVisibility, ""))
413 visibility = None
414 if visibility_event:
415 visibility = visibility_event.content.get("history_visibility", None)
416 result["world_readable"] = visibility == "world_readable"
417
418 guest_event = current_state.get((EventTypes.GuestAccess, ""))
419 guest = None
420 if guest_event:
421 guest = guest_event.content.get("guest_access", None)
422 result["guest_can_join"] = guest == "can_join"
423
424 avatar_event = current_state.get(("m.room.avatar", ""))
425 if avatar_event:
426 avatar_url = avatar_event.content.get("url", None)
427 if avatar_url:
428 result["avatar_url"] = avatar_url
429
430 results.append(result)
431
432 yield concurrently_execute(handle_room, room_ids, 10)
433
434 # FIXME (erikj): START is no longer a valid value
435 defer.returnValue({"start": "START", "end": "END", "chunk": results})
436
437 @defer.inlineCallbacks
438 def fetch_all_remote_lists(self):
439 deferred = self.hs.get_replication_layer().get_public_rooms(
440 self.hs.config.secondary_directory_servers
441 )
442 self.remote_list_request_cache.set((), deferred)
443 self.remote_list_cache = yield deferred
444
445 @defer.inlineCallbacks
446 def get_aggregated_public_room_list(self):
447 """
448 Get the public room list from this server and the servers
449 specified in the secondary_directory_servers config option.
450 XXX: Pagination...
451 """
452 # We return the results from out cache which is updated by a looping call,
453 # unless we're missing a cache entry, in which case wait for the result
454 # of the fetch if there's one in progress. If not, omit that server.
455 wait = False
456 for s in self.hs.config.secondary_directory_servers:
457 if s not in self.remote_list_cache:
458 logger.warn("No cached room list from %s: waiting for fetch", s)
459 wait = True
460 break
461
462 if wait and self.remote_list_request_cache.get(()):
463 yield self.remote_list_request_cache.get(())
464
465 public_rooms = yield self.get_local_public_room_list()
466
467 # keep track of which room IDs we've seen so we can de-dup
468 room_ids = set()
469
470 # tag all the ones in our list with our server name.
471 # Also add the them to the de-deping set
472 for room in public_rooms['chunk']:
473 room["server_name"] = self.hs.hostname
474 room_ids.add(room["room_id"])
475
476 # Now add the results from federation
477 for server_name, server_result in self.remote_list_cache.items():
478 for room in server_result["chunk"]:
479 if room["room_id"] not in room_ids:
480 room["server_name"] = server_name
481 public_rooms["chunk"].append(room)
482 room_ids.add(room["room_id"])
483
484 defer.returnValue(public_rooms)
485
486
487346 class RoomContextHandler(BaseHandler):
488347 @defer.inlineCallbacks
489348 def get_event_context(self, user, room_id, event_id, limit, is_guest):
0 # -*- coding: utf-8 -*-
1 # Copyright 2014 - 2016 OpenMarket Ltd
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from twisted.internet import defer
16
17 from ._base import BaseHandler
18
19 from synapse.api.constants import (
20 EventTypes, JoinRules,
21 )
22 from synapse.util.async import concurrently_execute
23 from synapse.util.caches.response_cache import ResponseCache
24
25 from collections import namedtuple
26 from unpaddedbase64 import encode_base64, decode_base64
27
28 import logging
29 import msgpack
30
31 logger = logging.getLogger(__name__)
32
33 REMOTE_ROOM_LIST_POLL_INTERVAL = 60 * 1000
34
35
36 class RoomListHandler(BaseHandler):
37 def __init__(self, hs):
38 super(RoomListHandler, self).__init__(hs)
39 self.response_cache = ResponseCache(hs)
40 self.remote_response_cache = ResponseCache(hs, timeout_ms=30 * 1000)
41
42 def get_local_public_room_list(self, limit=None, since_token=None,
43 search_filter=None):
44 if search_filter:
45 # We explicitly don't bother caching searches.
46 return self._get_public_room_list(limit, since_token, search_filter)
47
48 result = self.response_cache.get((limit, since_token))
49 if not result:
50 result = self.response_cache.set(
51 (limit, since_token),
52 self._get_public_room_list(limit, since_token)
53 )
54 return result
55
56 @defer.inlineCallbacks
57 def _get_public_room_list(self, limit=None, since_token=None,
58 search_filter=None):
59 if since_token and since_token != "END":
60 since_token = RoomListNextBatch.from_token(since_token)
61 else:
62 since_token = None
63
64 rooms_to_order_value = {}
65 rooms_to_num_joined = {}
66 rooms_to_latest_event_ids = {}
67
68 newly_visible = []
69 newly_unpublished = []
70 if since_token:
71 stream_token = since_token.stream_ordering
72 current_public_id = yield self.store.get_current_public_room_stream_id()
73 public_room_stream_id = since_token.public_room_stream_id
74 newly_visible, newly_unpublished = yield self.store.get_public_room_changes(
75 public_room_stream_id, current_public_id
76 )
77 else:
78 stream_token = yield self.store.get_room_max_stream_ordering()
79 public_room_stream_id = yield self.store.get_current_public_room_stream_id()
80
81 room_ids = yield self.store.get_public_room_ids_at_stream_id(
82 public_room_stream_id
83 )
84
85 # We want to return rooms in a particular order: the number of joined
86 # users. We then arbitrarily use the room_id as a tie breaker.
87
88 @defer.inlineCallbacks
89 def get_order_for_room(room_id):
90 latest_event_ids = rooms_to_latest_event_ids.get(room_id, None)
91 if not latest_event_ids:
92 latest_event_ids = yield self.store.get_forward_extremeties_for_room(
93 room_id, stream_token
94 )
95 rooms_to_latest_event_ids[room_id] = latest_event_ids
96
97 if not latest_event_ids:
98 return
99
100 joined_users = yield self.state_handler.get_current_user_in_room(
101 room_id, latest_event_ids,
102 )
103 num_joined_users = len(joined_users)
104 rooms_to_num_joined[room_id] = num_joined_users
105
106 if num_joined_users == 0:
107 return
108
109 # We want larger rooms to be first, hence negating num_joined_users
110 rooms_to_order_value[room_id] = (-num_joined_users, room_id)
111
112 yield concurrently_execute(get_order_for_room, room_ids, 10)
113
114 sorted_entries = sorted(rooms_to_order_value.items(), key=lambda e: e[1])
115 sorted_rooms = [room_id for room_id, _ in sorted_entries]
116
117 # `sorted_rooms` should now be a list of all public room ids that is
118 # stable across pagination. Therefore, we can use indices into this
119 # list as our pagination tokens.
120
121 # Filter out rooms that we don't want to return
122 rooms_to_scan = [
123 r for r in sorted_rooms
124 if r not in newly_unpublished and rooms_to_num_joined[room_id] > 0
125 ]
126
127 if since_token:
128 # Filter out rooms we've already returned previously
129 # `since_token.current_limit` is the index of the last room we
130 # sent down, so we exclude it and everything before/after it.
131 if since_token.direction_is_forward:
132 rooms_to_scan = rooms_to_scan[since_token.current_limit + 1:]
133 else:
134 rooms_to_scan = rooms_to_scan[:since_token.current_limit]
135 rooms_to_scan.reverse()
136
137 # Actually generate the entries. _generate_room_entry will append to
138 # chunk but will stop if len(chunk) > limit
139 chunk = []
140 if limit and not search_filter:
141 step = limit + 1
142 for i in xrange(0, len(rooms_to_scan), step):
143 # We iterate here because the vast majority of cases we'll stop
144 # at first iteration, but occaisonally _generate_room_entry
145 # won't append to the chunk and so we need to loop again.
146 # We don't want to scan over the entire range either as that
147 # would potentially waste a lot of work.
148 yield concurrently_execute(
149 lambda r: self._generate_room_entry(
150 r, rooms_to_num_joined[r],
151 chunk, limit, search_filter
152 ),
153 rooms_to_scan[i:i + step], 10
154 )
155 if len(chunk) >= limit + 1:
156 break
157 else:
158 yield concurrently_execute(
159 lambda r: self._generate_room_entry(
160 r, rooms_to_num_joined[r],
161 chunk, limit, search_filter
162 ),
163 rooms_to_scan, 5
164 )
165
166 chunk.sort(key=lambda e: (-e["num_joined_members"], e["room_id"]))
167
168 # Work out the new limit of the batch for pagination, or None if we
169 # know there are no more results that would be returned.
170 # i.e., [since_token.current_limit..new_limit] is the batch of rooms
171 # we've returned (or the reverse if we paginated backwards)
172 # We tried to pull out limit + 1 rooms above, so if we have <= limit
173 # then we know there are no more results to return
174 new_limit = None
175 if chunk and (not limit or len(chunk) > limit):
176
177 if not since_token or since_token.direction_is_forward:
178 if limit:
179 chunk = chunk[:limit]
180 last_room_id = chunk[-1]["room_id"]
181 else:
182 if limit:
183 chunk = chunk[-limit:]
184 last_room_id = chunk[0]["room_id"]
185
186 new_limit = sorted_rooms.index(last_room_id)
187
188 results = {
189 "chunk": chunk,
190 }
191
192 if since_token:
193 results["new_rooms"] = bool(newly_visible)
194
195 if not since_token or since_token.direction_is_forward:
196 if new_limit is not None:
197 results["next_batch"] = RoomListNextBatch(
198 stream_ordering=stream_token,
199 public_room_stream_id=public_room_stream_id,
200 current_limit=new_limit,
201 direction_is_forward=True,
202 ).to_token()
203
204 if since_token:
205 results["prev_batch"] = since_token.copy_and_replace(
206 direction_is_forward=False,
207 current_limit=since_token.current_limit + 1,
208 ).to_token()
209 else:
210 if new_limit is not None:
211 results["prev_batch"] = RoomListNextBatch(
212 stream_ordering=stream_token,
213 public_room_stream_id=public_room_stream_id,
214 current_limit=new_limit,
215 direction_is_forward=False,
216 ).to_token()
217
218 if since_token:
219 results["next_batch"] = since_token.copy_and_replace(
220 direction_is_forward=True,
221 current_limit=since_token.current_limit - 1,
222 ).to_token()
223
224 defer.returnValue(results)
225
226 @defer.inlineCallbacks
227 def _generate_room_entry(self, room_id, num_joined_users, chunk, limit,
228 search_filter):
229 if limit and len(chunk) > limit + 1:
230 # We've already got enough, so lets just drop it.
231 return
232
233 result = {
234 "room_id": room_id,
235 "num_joined_members": num_joined_users,
236 }
237
238 current_state_ids = yield self.state_handler.get_current_state_ids(room_id)
239
240 event_map = yield self.store.get_events([
241 event_id for key, event_id in current_state_ids.items()
242 if key[0] in (
243 EventTypes.JoinRules,
244 EventTypes.Name,
245 EventTypes.Topic,
246 EventTypes.CanonicalAlias,
247 EventTypes.RoomHistoryVisibility,
248 EventTypes.GuestAccess,
249 "m.room.avatar",
250 )
251 ])
252
253 current_state = {
254 (ev.type, ev.state_key): ev
255 for ev in event_map.values()
256 }
257
258 # Double check that this is actually a public room.
259 join_rules_event = current_state.get((EventTypes.JoinRules, ""))
260 if join_rules_event:
261 join_rule = join_rules_event.content.get("join_rule", None)
262 if join_rule and join_rule != JoinRules.PUBLIC:
263 defer.returnValue(None)
264
265 aliases = yield self.store.get_aliases_for_room(room_id)
266 if aliases:
267 result["aliases"] = aliases
268
269 name_event = yield current_state.get((EventTypes.Name, ""))
270 if name_event:
271 name = name_event.content.get("name", None)
272 if name:
273 result["name"] = name
274
275 topic_event = current_state.get((EventTypes.Topic, ""))
276 if topic_event:
277 topic = topic_event.content.get("topic", None)
278 if topic:
279 result["topic"] = topic
280
281 canonical_event = current_state.get((EventTypes.CanonicalAlias, ""))
282 if canonical_event:
283 canonical_alias = canonical_event.content.get("alias", None)
284 if canonical_alias:
285 result["canonical_alias"] = canonical_alias
286
287 visibility_event = current_state.get((EventTypes.RoomHistoryVisibility, ""))
288 visibility = None
289 if visibility_event:
290 visibility = visibility_event.content.get("history_visibility", None)
291 result["world_readable"] = visibility == "world_readable"
292
293 guest_event = current_state.get((EventTypes.GuestAccess, ""))
294 guest = None
295 if guest_event:
296 guest = guest_event.content.get("guest_access", None)
297 result["guest_can_join"] = guest == "can_join"
298
299 avatar_event = current_state.get(("m.room.avatar", ""))
300 if avatar_event:
301 avatar_url = avatar_event.content.get("url", None)
302 if avatar_url:
303 result["avatar_url"] = avatar_url
304
305 if _matches_room_entry(result, search_filter):
306 chunk.append(result)
307
308 @defer.inlineCallbacks
309 def get_remote_public_room_list(self, server_name, limit=None, since_token=None,
310 search_filter=None):
311 if search_filter:
312 # We currently don't support searching across federation, so we have
313 # to do it manually without pagination
314 limit = None
315 since_token = None
316
317 res = yield self._get_remote_list_cached(
318 server_name, limit=limit, since_token=since_token,
319 )
320
321 if search_filter:
322 res = {"chunk": [
323 entry
324 for entry in list(res.get("chunk", []))
325 if _matches_room_entry(entry, search_filter)
326 ]}
327
328 defer.returnValue(res)
329
330 def _get_remote_list_cached(self, server_name, limit=None, since_token=None,
331 search_filter=None):
332 repl_layer = self.hs.get_replication_layer()
333 if search_filter:
334 # We can't cache when asking for search
335 return repl_layer.get_public_rooms(
336 server_name, limit=limit, since_token=since_token,
337 search_filter=search_filter,
338 )
339
340 result = self.remote_response_cache.get((server_name, limit, since_token))
341 if not result:
342 result = self.remote_response_cache.set(
343 (server_name, limit, since_token),
344 repl_layer.get_public_rooms(
345 server_name, limit=limit, since_token=since_token,
346 search_filter=search_filter,
347 )
348 )
349 return result
350
351
352 class RoomListNextBatch(namedtuple("RoomListNextBatch", (
353 "stream_ordering", # stream_ordering of the first public room list
354 "public_room_stream_id", # public room stream id for first public room list
355 "current_limit", # The number of previous rooms returned
356 "direction_is_forward", # Bool if this is a next_batch, false if prev_batch
357 ))):
358
359 KEY_DICT = {
360 "stream_ordering": "s",
361 "public_room_stream_id": "p",
362 "current_limit": "n",
363 "direction_is_forward": "d",
364 }
365
366 REVERSE_KEY_DICT = {v: k for k, v in KEY_DICT.items()}
367
368 @classmethod
369 def from_token(cls, token):
370 return RoomListNextBatch(**{
371 cls.REVERSE_KEY_DICT[key]: val
372 for key, val in msgpack.loads(decode_base64(token)).items()
373 })
374
375 def to_token(self):
376 return encode_base64(msgpack.dumps({
377 self.KEY_DICT[key]: val
378 for key, val in self._asdict().items()
379 }))
380
381 def copy_and_replace(self, **kwds):
382 return self._replace(
383 **kwds
384 )
385
386
387 def _matches_room_entry(room_entry, search_filter):
388 if search_filter and search_filter.get("generic_search_term", None):
389 generic_search_term = search_filter["generic_search_term"].upper()
390 if generic_search_term in room_entry.get("name", "").upper():
391 return True
392 elif generic_search_term in room_entry.get("topic", "").upper():
393 return True
394 elif generic_search_term in room_entry.get("canonical_alias", "").upper():
395 return True
396 else:
397 return True
398
399 return False
186186 "user_id": user_id,
187187 "typing": typing,
188188 },
189 key=(room_id, user_id),
189190 ))
190191
191192 yield preserve_context_over_deferred(
198199 user_id = content["user_id"]
199200
200201 # Check that the string is a valid user id
201 UserID.from_string(user_id)
202 user = UserID.from_string(user_id)
203
204 if user.domain != origin:
205 logger.info(
206 "Got typing update from %r with bad 'user_id': %r",
207 origin, user_id,
208 )
209 return
202210
203211 users = yield self.state.get_current_user_in_room(room_id)
204212 domains = set(get_domain_from_id(u) for u in users)
245245
246246 @defer.inlineCallbacks
247247 def put_json(self, destination, path, data={}, json_data_callback=None,
248 long_retries=False):
248 long_retries=False, timeout=None):
249249 """ Sends the specifed json data using PUT
250250
251251 Args:
258258 use as the request body.
259259 long_retries (bool): A boolean that indicates whether we should
260260 retry for a short or long time.
261 timeout(int): How long to try (in ms) the destination for before
262 giving up. None indicates no timeout.
261263
262264 Returns:
263265 Deferred: Succeeds when we get a 2xx HTTP response. The result
284286 body_callback=body_callback,
285287 headers_dict={"Content-Type": ["application/json"]},
286288 long_retries=long_retries,
289 timeout=timeout,
287290 )
288291
289292 if 200 <= response.code < 300:
299302 defer.returnValue(json.loads(body))
300303
301304 @defer.inlineCallbacks
302 def post_json(self, destination, path, data={}, long_retries=True):
305 def post_json(self, destination, path, data={}, long_retries=True,
306 timeout=None):
303307 """ Sends the specifed json data using POST
304308
305309 Args:
310314 the request body. This will be encoded as JSON.
311315 long_retries (bool): A boolean that indicates whether we should
312316 retry for a short or long time.
317 timeout(int): How long to try (in ms) the destination for before
318 giving up. None indicates no timeout.
313319
314320 Returns:
315321 Deferred: Succeeds when we get a 2xx HTTP response. The result
330336 body_callback=body_callback,
331337 headers_dict={"Content-Type": ["application/json"]},
332338 long_retries=True,
339 timeout=timeout,
333340 )
334341
335342 if 200 <= response.code < 300:
4040 SynapseError: if the parameter is absent and required, or if the
4141 parameter is present and not an integer.
4242 """
43 if name in request.args:
43 return parse_integer_from_args(request.args, name, default, required)
44
45
46 def parse_integer_from_args(args, name, default=None, required=False):
47 if name in args:
4448 try:
45 return int(request.args[name][0])
49 return int(args[name][0])
4650 except:
4751 message = "Query parameter %r must be an integer" % (name,)
4852 raise SynapseError(400, message)
115119 parameter is present, must be one of a list of allowed values and
116120 is not one of those allowed values.
117121 """
118
119 if name in request.args:
120 value = request.args[name][0]
122 return parse_string_from_args(
123 request.args, name, default, required, allowed_values, param_type,
124 )
125
126
127 def parse_string_from_args(args, name, default=None, required=False,
128 allowed_values=None, param_type="string"):
129 if name in args:
130 value = args[name][0]
121131 if allowed_values is not None and value not in allowed_values:
122132 message = "Query parameter %r must be one of [%s]" % (
123133 name, ", ".join(repr(v) for v in allowed_values)
262262 }
263263 ]
264264 },
265 # XXX: once m.direct is standardised everywhere, we should use it to detect
266 # a DM from the user's perspective rather than this heuristic.
265267 {
266268 'rule_id': 'global/underride/.m.rule.room_one_to_one',
267269 'conditions': [
288290 }
289291 ]
290292 },
293 # XXX: this is going to fire for events which aren't m.room.messages
294 # but are encrypted (e.g. m.call.*)...
295 {
296 'rule_id': 'global/underride/.m.rule.encrypted_room_one_to_one',
297 'conditions': [
298 {
299 'kind': 'room_member_count',
300 'is': '2',
301 '_id': 'member_count',
302 },
303 {
304 'kind': 'event_match',
305 'key': 'type',
306 'pattern': 'm.room.encrypted',
307 '_id': '_encrypted',
308 }
309 ],
310 'actions': [
311 'notify',
312 {
313 'set_tweak': 'sound',
314 'value': 'default'
315 }, {
316 'set_tweak': 'highlight',
317 'value': False
318 }
319 ]
320 },
291321 {
292322 'rule_id': 'global/underride/.m.rule.message',
293323 'conditions': [
296326 'key': 'type',
297327 'pattern': 'm.room.message',
298328 '_id': '_message',
329 }
330 ],
331 'actions': [
332 'notify', {
333 'set_tweak': 'highlight',
334 'value': False
335 }
336 ]
337 },
338 # XXX: this is going to fire for events which aren't m.room.messages
339 # but are encrypted (e.g. m.call.*)...
340 {
341 'rule_id': 'global/underride/.m.rule.encrypted',
342 'conditions': [
343 {
344 'kind': 'event_match',
345 'key': 'type',
346 'pattern': 'm.room.encrypted',
347 '_id': '_encrypted',
299348 }
300349 ],
301350 'actions': [
2626
2727
2828 @defer.inlineCallbacks
29 def _get_rules(room_id, user_ids, store):
30 rules_by_user = yield store.bulk_get_push_rules(user_ids)
31
32 rules_by_user = {k: v for k, v in rules_by_user.items() if v is not None}
33
34 defer.returnValue(rules_by_user)
35
36
37 @defer.inlineCallbacks
3829 def evaluator_for_event(event, hs, store, context):
3930 rules_by_user = yield store.bulk_get_push_rules_for_room(
4031 event, context
4738 if invited_user and hs.is_mine_id(invited_user):
4839 has_pusher = yield store.user_has_pusher(invited_user)
4940 if has_pusher:
41 rules_by_user = dict(rules_by_user)
5042 rules_by_user[invited_user] = yield store.get_push_rules_for_user(
5143 invited_user
5244 )
3535 "blist": ["blist"],
3636 "pysaml2>=3.0.0,<4.0.0": ["saml2>=3.0.0,<4.0.0"],
3737 "pymacaroons-pynacl": ["pymacaroons"],
38 "msgpack-python>=0.3.0": ["msgpack"],
3839 }
3940 CONDITIONAL_REQUIREMENTS = {
4041 "web_client": {
4141 ("pushers",),
4242 ("caches",),
4343 ("to_device",),
44 ("public_rooms",),
4445 )
4546
4647
130131 push_rules_token, room_stream_token = self.store.get_push_rules_stream_token()
131132 pushers_token = self.store.get_pushers_stream_token()
132133 caches_token = self.store.get_cache_stream_token()
134 public_rooms_token = self.store.get_current_public_room_stream_id()
133135
134136 defer.returnValue(_ReplicationToken(
135137 room_stream_token,
143145 0, # State stream is no longer a thing
144146 caches_token,
145147 int(stream_token.to_device_key),
148 int(public_rooms_token),
146149 ))
147150
148151 @request_handler()
180183 def replicate(self, request_streams, limit):
181184 writer = _Writer()
182185 current_token = yield self.current_replication_token()
183 logger.info("Replicating up to %r", current_token)
186 logger.debug("Replicating up to %r", current_token)
184187
185188 yield self.account_data(writer, current_token, limit, request_streams)
186189 yield self.events(writer, current_token, limit, request_streams)
192195 yield self.pushers(writer, current_token, limit, request_streams)
193196 yield self.caches(writer, current_token, limit, request_streams)
194197 yield self.to_device(writer, current_token, limit, request_streams)
198 yield self.public_rooms(writer, current_token, limit, request_streams)
195199 self.streams(writer, current_token, request_streams)
196200
197 logger.info("Replicated %d rows", writer.total)
201 logger.debug("Replicated %d rows", writer.total)
198202 defer.returnValue(writer.finish())
199203
200204 def streams(self, writer, current_token, request_streams):
273277
274278 @defer.inlineCallbacks
275279 def typing(self, writer, current_token, request_streams):
276 current_position = current_token.presence
280 current_position = current_token.typing
277281
278282 request_typing = request_streams.get("typing")
279283
280284 if request_typing is not None:
285 # If they have a higher token than current max, we can assume that
286 # they had been talking to a previous instance of the master. Since
287 # we reset the token on restart, the best (but hacky) thing we can
288 # do is to simply resend down all the typing notifications.
289 if request_typing > current_position:
290 request_typing = 0
291
281292 typing_rows = yield self.typing_handler.get_all_typing_updates(
282293 request_typing, current_position
283294 )
392403 "position", "user_id", "device_id", "message_json"
393404 ))
394405
406 @defer.inlineCallbacks
407 def public_rooms(self, writer, current_token, limit, request_streams):
408 current_position = current_token.public_rooms
409
410 public_rooms = request_streams.get("public_rooms")
411
412 if public_rooms is not None:
413 public_rooms_rows = yield self.store.get_all_new_public_rooms(
414 public_rooms, current_position, limit
415 )
416 writer.write_header_and_rows("public_rooms", public_rooms_rows, (
417 "position", "room_id", "visibility"
418 ))
419
395420
396421 class _Writer(object):
397422 """Writes the streams as a JSON object as the response to the request"""
420445
421446 class _ReplicationToken(collections.namedtuple("_ReplicationToken", (
422447 "events", "presence", "typing", "receipts", "account_data", "backfill",
423 "push_rules", "pushers", "state", "caches", "to_device",
448 "push_rules", "pushers", "state", "caches", "to_device", "public_rooms",
424449 ))):
425450 __slots__ = []
426451
1515 from ._base import BaseSlavedStore
1616 from ._slaved_id_tracker import SlavedIdTracker
1717 from synapse.storage import DataStore
18 from synapse.util.caches.stream_change_cache import StreamChangeCache
1819
1920
2021 class SlavedDeviceInboxStore(BaseSlavedStore):
2122 def __init__(self, db_conn, hs):
2223 super(SlavedDeviceInboxStore, self).__init__(db_conn, hs)
2324 self._device_inbox_id_gen = SlavedIdTracker(
24 db_conn, "device_inbox", "stream_id",
25 db_conn, "device_max_stream_id", "stream_id",
26 )
27 self._device_inbox_stream_cache = StreamChangeCache(
28 "DeviceInboxStreamChangeCache",
29 self._device_inbox_id_gen.get_current_token()
2530 )
2631
2732 get_to_device_stream_token = DataStore.get_to_device_stream_token.__func__
3742 stream = result.get("to_device")
3843 if stream:
3944 self._device_inbox_id_gen.advance(int(stream["position"]))
45 for row in stream["rows"]:
46 stream_id = row[0]
47 user_id = row[1]
48 self._device_inbox_stream_cache.entity_has_changed(
49 user_id, stream_id
50 )
4051
4152 return super(SlavedDeviceInboxStore, self).process_replication(result)
5959 self._membership_stream_cache = StreamChangeCache(
6060 "MembershipStreamChangeCache", events_max,
6161 )
62
63 self.stream_ordering_month_ago = 0
64 self._stream_order_on_start = self.get_room_max_stream_ordering()
6265
6366 # Cached functions can't be accessed through a class instance so we need
6467 # to reach inside the __dict__ to extract them.
8588 _get_state_groups_from_groups = (
8689 StateStore.__dict__["_get_state_groups_from_groups"]
8790 )
91 _get_state_groups_from_groups_txn = (
92 DataStore._get_state_groups_from_groups_txn.__func__
93 )
8894 _get_state_group_from_group = (
8995 StateStore.__dict__["_get_state_group_from_group"]
9096 )
164170 get_auth_chain_ids = DataStore.get_auth_chain_ids.__func__
165171 _get_auth_chain_ids_txn = DataStore._get_auth_chain_ids_txn.__func__
166172
173 get_room_max_stream_ordering = DataStore.get_room_max_stream_ordering.__func__
174
175 get_forward_extremeties_for_room = (
176 DataStore.get_forward_extremeties_for_room.__func__
177 )
178 _get_forward_extremeties_for_room = (
179 EventFederationStore.__dict__["_get_forward_extremeties_for_room"]
180 )
181
167182 def stream_positions(self):
168183 result = super(SlavedEventStore, self).stream_positions()
169184 result["events"] = self._stream_id_gen.get_current_token()
1414
1515 from ._base import BaseSlavedStore
1616 from synapse.storage import DataStore
17 from ._slaved_id_tracker import SlavedIdTracker
1718
1819
1920 class RoomStore(BaseSlavedStore):
21 def __init__(self, db_conn, hs):
22 super(RoomStore, self).__init__(db_conn, hs)
23 self._public_room_id_gen = SlavedIdTracker(
24 db_conn, "public_room_list_stream", "stream_id"
25 )
26
2027 get_public_room_ids = DataStore.get_public_room_ids.__func__
28 get_current_public_room_stream_id = (
29 DataStore.get_current_public_room_stream_id.__func__
30 )
31 get_public_room_ids_at_stream_id = (
32 DataStore.get_public_room_ids_at_stream_id.__func__
33 )
34 get_public_room_ids_at_stream_id_txn = (
35 DataStore.get_public_room_ids_at_stream_id_txn.__func__
36 )
37 get_published_at_stream_id_txn = (
38 DataStore.get_published_at_stream_id_txn.__func__
39 )
40 get_public_room_changes = DataStore.get_public_room_changes.__func__
41
42 def stream_positions(self):
43 result = super(RoomStore, self).stream_positions()
44 result["public_rooms"] = self._public_room_id_gen.get_current_token()
45 return result
46
47 def process_replication(self, result):
48 stream = result.get("public_rooms")
49 if stream:
50 self._public_room_id_gen.advance(int(stream["position"]))
51
52 return super(RoomStore, self).process_replication(result)
317317 service_param = urllib.urlencode({
318318 "service": "%s?%s" % (hs_redirect_url, client_redirect_url_param)
319319 })
320 request.redirect("%s?%s" % (self.cas_server_url, service_param))
320 request.redirect("%s/login?%s" % (self.cas_server_url, service_param))
321321 finish_request(request)
322322
323323
384384
385385 def parse_cas_response(self, cas_response_body):
386386 user = None
387 attributes = None
387 attributes = {}
388388 try:
389389 root = ET.fromstring(cas_response_body)
390390 if not root.tag.endswith("serviceResponse"):
394394 if child.tag.endswith("user"):
395395 user = child.text
396396 if child.tag.endswith("attributes"):
397 attributes = {}
398397 for attribute in child:
399398 # ElementTree library expands the namespace in
400399 # attribute tags to the full URL of the namespace.
406405 attributes[tag] = attribute.text
407406 if user is None:
408407 raise Exception("CAS response does not contain user")
409 if attributes is None:
410 raise Exception("CAS response does not contain attributes")
411408 except Exception:
412409 logger.error("Error parsing CAS response", exc_info=1)
413410 raise LoginError(401, "Invalid CAS response",
1414
1515 from twisted.internet import defer
1616
17 from synapse.api.errors import AuthError, Codes
17 from synapse.api.auth import get_access_token_from_request
1818
1919 from .base import ClientV1RestServlet, client_path_patterns
2020
3636
3737 @defer.inlineCallbacks
3838 def on_POST(self, request):
39 try:
40 access_token = request.args["access_token"][0]
41 except KeyError:
42 raise AuthError(
43 self.TOKEN_NOT_FOUND_HTTP_STATUS, "Missing access token.",
44 errcode=Codes.MISSING_TOKEN
45 )
39 access_token = get_access_token_from_request(request)
4640 yield self.store.delete_access_token(access_token)
4741 defer.returnValue((200, {}))
4842
1717
1818 from synapse.api.errors import SynapseError, Codes
1919 from synapse.api.constants import LoginType
20 from synapse.api.auth import get_access_token_from_request
2021 from .base import ClientV1RestServlet, client_path_patterns
2122 import synapse.util.stringutils as stringutils
2223 from synapse.http.servlet import parse_json_object_from_request
295296
296297 @defer.inlineCallbacks
297298 def _do_app_service(self, request, register_json, session):
298 if "access_token" not in request.args:
299 raise SynapseError(400, "Expected application service token.")
299 as_token = get_access_token_from_request(request)
300
300301 if "user" not in register_json:
301302 raise SynapseError(400, "Expected 'user' key.")
302303
303 as_token = request.args["access_token"][0]
304304 user_localpart = register_json["user"].encode("utf-8")
305305
306306 handler = self.handlers.registration_handler
389389 def on_POST(self, request):
390390 user_json = parse_json_object_from_request(request)
391391
392 if "access_token" not in request.args:
393 raise SynapseError(400, "Expected application service token.")
394
392 access_token = get_access_token_from_request(request)
395393 app_service = yield self.store.get_app_service_by_token(
396 request.args["access_token"][0]
394 access_token
397395 )
398396 if not app_service:
399397 raise SynapseError(403, "Invalid application service token.")
2121 from synapse.api.constants import EventTypes, Membership
2222 from synapse.api.filtering import Filter
2323 from synapse.types import UserID, RoomID, RoomAlias
24 from synapse.events.utils import serialize_event
25 from synapse.http.servlet import parse_json_object_from_request
24 from synapse.events.utils import serialize_event, format_event_for_client_v2
25 from synapse.http.servlet import (
26 parse_json_object_from_request, parse_string, parse_integer
27 )
2628
2729 import logging
2830 import urllib
119121 @defer.inlineCallbacks
120122 def on_GET(self, request, room_id, event_type, state_key):
121123 requester = yield self.auth.get_user_by_req(request, allow_guest=True)
124 format = parse_string(request, "format", default="content",
125 allowed_values=["content", "event"])
122126
123127 msg_handler = self.handlers.message_handler
124128 data = yield msg_handler.get_room_data(
133137 raise SynapseError(
134138 404, "Event not found.", errcode=Codes.NOT_FOUND
135139 )
136 defer.returnValue((200, data.get_dict()["content"]))
140
141 if format == "event":
142 event = format_event_for_client_v2(data.get_dict())
143 defer.returnValue((200, event))
144 elif format == "content":
145 defer.returnValue((200, data.get_dict()["content"]))
137146
138147 @defer.inlineCallbacks
139148 def on_PUT(self, request, room_id, event_type, state_key, txn_id=None):
294303
295304 @defer.inlineCallbacks
296305 def on_GET(self, request):
297 try:
298 yield self.auth.get_user_by_req(request)
299 except AuthError:
300 # This endpoint isn't authed, but its useful to know who's hitting
301 # it if they *do* supply an access token
302 pass
306 server = parse_string(request, "server", default=None)
307
308 try:
309 yield self.auth.get_user_by_req(request, allow_guest=True)
310 except AuthError as e:
311 # We allow people to not be authed if they're just looking at our
312 # room list, but require auth when we proxy the request.
313 # In both cases we call the auth function, as that has the side
314 # effect of logging who issued this request if an access token was
315 # provided.
316 if server:
317 raise e
318 else:
319 pass
320
321 limit = parse_integer(request, "limit", 0)
322 since_token = parse_string(request, "since", None)
303323
304324 handler = self.hs.get_room_list_handler()
305 data = yield handler.get_aggregated_public_room_list()
325 if server:
326 data = yield handler.get_remote_public_room_list(
327 server,
328 limit=limit,
329 since_token=since_token,
330 )
331 else:
332 data = yield handler.get_local_public_room_list(
333 limit=limit,
334 since_token=since_token,
335 )
336
337 defer.returnValue((200, data))
338
339 @defer.inlineCallbacks
340 def on_POST(self, request):
341 yield self.auth.get_user_by_req(request, allow_guest=True)
342
343 server = parse_string(request, "server", default=None)
344 content = parse_json_object_from_request(request)
345
346 limit = int(content.get("limit", 100))
347 since_token = content.get("since", None)
348 search_filter = content.get("filter", None)
349
350 handler = self.hs.get_room_list_handler()
351 if server:
352 data = yield handler.get_remote_public_room_list(
353 server,
354 limit=limit,
355 since_token=since_token,
356 search_filter=search_filter,
357 )
358 else:
359 data = yield handler.get_local_public_room_list(
360 limit=limit,
361 since_token=since_token,
362 search_filter=search_filter,
363 )
306364
307365 defer.returnValue((200, data))
308366
1515 """This module contains logic for storing HTTP PUT transactions. This is used
1616 to ensure idempotency when performing PUTs using the REST API."""
1717 import logging
18
19 from synapse.api.auth import get_access_token_from_request
1820
1921 logger = logging.getLogger(__name__)
2022
8991 return response
9092
9193 def _get_key(self, request):
92 token = request.args["access_token"][0]
94 token = get_access_token_from_request(request)
9395 path_without_txn_id = request.path.rsplit("/", 1)[0]
9496 return path_without_txn_id + "/" + token
1414
1515 import logging
1616
17 import simplejson as json
18 from canonicaljson import encode_canonical_json
1917 from twisted.internet import defer
2018
21 import synapse.api.errors
22 import synapse.server
23 import synapse.types
24 from synapse.http.servlet import RestServlet, parse_json_object_from_request
25 from synapse.types import UserID
19 from synapse.api.errors import SynapseError
20 from synapse.http.servlet import (
21 RestServlet, parse_json_object_from_request, parse_integer
22 )
2623 from ._base import client_v2_patterns
2724
2825 logger = logging.getLogger(__name__)
6259 hs (synapse.server.HomeServer): server
6360 """
6461 super(KeyUploadServlet, self).__init__()
65 self.store = hs.get_datastore()
66 self.clock = hs.get_clock()
6762 self.auth = hs.get_auth()
68 self.device_handler = hs.get_device_handler()
63 self.e2e_keys_handler = hs.get_e2e_keys_handler()
6964
7065 @defer.inlineCallbacks
7166 def on_POST(self, request, device_id):
7267 requester = yield self.auth.get_user_by_req(request)
73
7468 user_id = requester.user.to_string()
75
7669 body = parse_json_object_from_request(request)
7770
7871 if device_id is not None:
8780 device_id = requester.device_id
8881
8982 if device_id is None:
90 raise synapse.api.errors.SynapseError(
83 raise SynapseError(
9184 400,
9285 "To upload keys, you must pass device_id when authenticating"
9386 )
9487
95 time_now = self.clock.time_msec()
96
97 # TODO: Validate the JSON to make sure it has the right keys.
98 device_keys = body.get("device_keys", None)
99 if device_keys:
100 logger.info(
101 "Updating device_keys for device %r for user %s at %d",
102 device_id, user_id, time_now
103 )
104 # TODO: Sign the JSON with the server key
105 yield self.store.set_e2e_device_keys(
106 user_id, device_id, time_now,
107 encode_canonical_json(device_keys)
108 )
109
110 one_time_keys = body.get("one_time_keys", None)
111 if one_time_keys:
112 logger.info(
113 "Adding %d one_time_keys for device %r for user %r at %d",
114 len(one_time_keys), device_id, user_id, time_now
115 )
116 key_list = []
117 for key_id, key_json in one_time_keys.items():
118 algorithm, key_id = key_id.split(":")
119 key_list.append((
120 algorithm, key_id, encode_canonical_json(key_json)
121 ))
122
123 yield self.store.add_e2e_one_time_keys(
124 user_id, device_id, time_now, key_list
125 )
126
127 # the device should have been registered already, but it may have been
128 # deleted due to a race with a DELETE request. Or we may be using an
129 # old access_token without an associated device_id. Either way, we
130 # need to double-check the device is registered to avoid ending up with
131 # keys without a corresponding device.
132 self.device_handler.check_device_registered(user_id, device_id)
133
134 result = yield self.store.count_e2e_one_time_keys(user_id, device_id)
135 defer.returnValue((200, {"one_time_key_counts": result}))
88 result = yield self.e2e_keys_handler.upload_keys_for_user(
89 user_id, device_id, body
90 )
91 defer.returnValue((200, result))
13692
13793
13894 class KeyQueryServlet(RestServlet):
194150 @defer.inlineCallbacks
195151 def on_POST(self, request, user_id, device_id):
196152 yield self.auth.get_user_by_req(request)
153 timeout = parse_integer(request, "timeout", 10 * 1000)
197154 body = parse_json_object_from_request(request)
198 result = yield self.e2e_keys_handler.query_devices(body)
199 defer.returnValue(result)
155 result = yield self.e2e_keys_handler.query_devices(body, timeout)
156 defer.returnValue((200, result))
200157
201158 @defer.inlineCallbacks
202159 def on_GET(self, request, user_id, device_id):
203160 requester = yield self.auth.get_user_by_req(request)
161 timeout = parse_integer(request, "timeout", 10 * 1000)
204162 auth_user_id = requester.user.to_string()
205163 user_id = user_id if user_id else auth_user_id
206164 device_ids = [device_id] if device_id else []
207165 result = yield self.e2e_keys_handler.query_devices(
208 {"device_keys": {user_id: device_ids}}
209 )
210 defer.returnValue(result)
166 {"device_keys": {user_id: device_ids}},
167 timeout,
168 )
169 defer.returnValue((200, result))
211170
212171
213172 class OneTimeKeyServlet(RestServlet):
239198
240199 def __init__(self, hs):
241200 super(OneTimeKeyServlet, self).__init__()
242 self.store = hs.get_datastore()
243201 self.auth = hs.get_auth()
244 self.clock = hs.get_clock()
245 self.federation = hs.get_replication_layer()
246 self.is_mine = hs.is_mine
202 self.e2e_keys_handler = hs.get_e2e_keys_handler()
247203
248204 @defer.inlineCallbacks
249205 def on_GET(self, request, user_id, device_id, algorithm):
250206 yield self.auth.get_user_by_req(request)
251 result = yield self.handle_request(
252 {"one_time_keys": {user_id: {device_id: algorithm}}}
253 )
254 defer.returnValue(result)
207 timeout = parse_integer(request, "timeout", 10 * 1000)
208 result = yield self.e2e_keys_handler.claim_one_time_keys(
209 {"one_time_keys": {user_id: {device_id: algorithm}}},
210 timeout,
211 )
212 defer.returnValue((200, result))
255213
256214 @defer.inlineCallbacks
257215 def on_POST(self, request, user_id, device_id, algorithm):
258216 yield self.auth.get_user_by_req(request)
217 timeout = parse_integer(request, "timeout", 10 * 1000)
259218 body = parse_json_object_from_request(request)
260 result = yield self.handle_request(body)
261 defer.returnValue(result)
262
263 @defer.inlineCallbacks
264 def handle_request(self, body):
265 local_query = []
266 remote_queries = {}
267 for user_id, device_keys in body.get("one_time_keys", {}).items():
268 user = UserID.from_string(user_id)
269 if self.is_mine(user):
270 for device_id, algorithm in device_keys.items():
271 local_query.append((user_id, device_id, algorithm))
272 else:
273 remote_queries.setdefault(user.domain, {})[user_id] = (
274 device_keys
275 )
276 results = yield self.store.claim_e2e_one_time_keys(local_query)
277
278 json_result = {}
279 for user_id, device_keys in results.items():
280 for device_id, keys in device_keys.items():
281 for key_id, json_bytes in keys.items():
282 json_result.setdefault(user_id, {})[device_id] = {
283 key_id: json.loads(json_bytes)
284 }
285
286 for destination, device_keys in remote_queries.items():
287 remote_result = yield self.federation.claim_client_keys(
288 destination, {"one_time_keys": device_keys}
289 )
290 for user_id, keys in remote_result["one_time_keys"].items():
291 if user_id in device_keys:
292 json_result[user_id] = keys
293
294 defer.returnValue((200, {"one_time_keys": json_result}))
219 result = yield self.e2e_keys_handler.claim_one_time_keys(
220 body,
221 timeout,
222 )
223 defer.returnValue((200, result))
295224
296225
297226 def register_servlets(hs, http_server):
4444
4545 from_token = parse_string(request, "from", required=False)
4646 limit = parse_integer(request, "limit", default=50)
47 only = parse_string(request, "only", required=False)
4748
4849 limit = min(limit, 500)
4950
5051 push_actions = yield self.store.get_push_actions_for_user(
51 user_id, from_token, limit
52 user_id, from_token, limit, only_highlight=(only == "highlight")
5253 )
5354
5455 receipts_by_room = yield self.store.get_receipts_for_user_with_orderings(
1414
1515 from twisted.internet import defer
1616
17 from synapse.api.auth import get_access_token_from_request, has_access_token
1718 from synapse.api.constants import LoginType
1819 from synapse.api.errors import SynapseError, Codes, UnrecognizedRequestError
1920 from synapse.http.servlet import RestServlet, parse_json_object_from_request
130131 desired_username = body['username']
131132
132133 appservice = None
133 if 'access_token' in request.args:
134 if has_access_token(request):
134135 appservice = yield self.auth.get_appservice_by_req(request)
135136
136137 # fork off as soon as possible for ASes and shared secret auth which
142143 # 'user' key not 'username'). Since this is a new addition, we'll
143144 # fallback to 'username' if they gave one.
144145 desired_username = body.get("user", desired_username)
146 access_token = get_access_token_from_request(request)
145147
146148 if isinstance(desired_username, basestring):
147149 result = yield self._do_appservice_registration(
148 desired_username, request.args["access_token"][0], body
150 desired_username, access_token, body
149151 )
150152 defer.returnValue((200, result)) # we throw for non 200 responses
151153 return
1515 import logging
1616
1717 from twisted.internet import defer
18 from synapse.http.servlet import parse_json_object_from_request
1918
2019 from synapse.http import servlet
20 from synapse.http.servlet import parse_json_object_from_request
2121 from synapse.rest.client.v1.transactions import HttpTransactionStore
22
2223 from ._base import client_v2_patterns
2324
2425 logger = logging.getLogger(__name__)
3839 super(SendToDeviceRestServlet, self).__init__()
3940 self.hs = hs
4041 self.auth = hs.get_auth()
41 self.store = hs.get_datastore()
42 self.notifier = hs.get_notifier()
43 self.is_mine_id = hs.is_mine_id
4442 self.txns = HttpTransactionStore()
43 self.device_message_handler = hs.get_device_message_handler()
4544
4645 @defer.inlineCallbacks
4746 def on_PUT(self, request, message_type, txn_id):
5655
5756 content = parse_json_object_from_request(request)
5857
59 # TODO: Prod the notifier to wake up sync streams.
60 # TODO: Implement replication for the messages.
61 # TODO: Send the messages to remote servers if needed.
58 sender_user_id = requester.user.to_string()
6259
63 local_messages = {}
64 for user_id, by_device in content["messages"].items():
65 if self.is_mine_id(user_id):
66 messages_by_device = {
67 device_id: {
68 "content": message_content,
69 "type": message_type,
70 "sender": requester.user.to_string(),
71 }
72 for device_id, message_content in by_device.items()
73 }
74 if messages_by_device:
75 local_messages[user_id] = messages_by_device
76
77 stream_id = yield self.store.add_messages_to_device_inbox(local_messages)
78
79 self.notifier.on_new_event(
80 "to_device_key", stream_id, users=local_messages.keys()
60 yield self.device_message_handler.send_device_message(
61 sender_user_id, message_type, content["messages"]
8162 )
8263
8364 response = (200, {})
4141 defer.returnValue((200, protocols))
4242
4343
44 class ThirdPartyProtocolServlet(RestServlet):
45 PATTERNS = client_v2_patterns("/thirdparty/protocol/(?P<protocol>[^/]+)$",
46 releases=())
47
48 def __init__(self, hs):
49 super(ThirdPartyProtocolServlet, self).__init__()
50
51 self.auth = hs.get_auth()
52 self.appservice_handler = hs.get_application_service_handler()
53
54 @defer.inlineCallbacks
55 def on_GET(self, request, protocol):
56 yield self.auth.get_user_by_req(request)
57
58 protocols = yield self.appservice_handler.get_3pe_protocols(
59 only_protocol=protocol,
60 )
61 if protocol in protocols:
62 defer.returnValue((200, protocols[protocol]))
63 else:
64 defer.returnValue((404, {"error": "Unknown protocol"}))
65
66
4467 class ThirdPartyUserServlet(RestServlet):
4568 PATTERNS = client_v2_patterns("/thirdparty/user(/(?P<protocol>[^/]+))?$",
4669 releases=())
5679 yield self.auth.get_user_by_req(request)
5780
5881 fields = request.args
59 del fields["access_token"]
82 fields.pop("access_token", None)
6083
6184 results = yield self.appservice_handler.query_3pe(
6285 ThirdPartyEntityKind.USER, protocol, fields
80103 yield self.auth.get_user_by_req(request)
81104
82105 fields = request.args
83 del fields["access_token"]
106 fields.pop("access_token", None)
84107
85108 results = yield self.appservice_handler.query_3pe(
86109 ThirdPartyEntityKind.LOCATION, protocol, fields
91114
92115 def register_servlets(hs, http_server):
93116 ThirdPartyProtocolsServlet(hs).register(http_server)
117 ThirdPartyProtocolServlet(hs).register(http_server)
94118 ThirdPartyUserServlet(hs).register(http_server)
95119 ThirdPartyLocationServlet(hs).register(http_server)
4444 @request_handler()
4545 @defer.inlineCallbacks
4646 def _async_render_GET(self, request):
47 request.setHeader("Content-Security-Policy", "sandbox")
47 request.setHeader(
48 "Content-Security-Policy",
49 "default-src 'none';"
50 " script-src 'none';"
51 " plugin-types application/pdf;"
52 " style-src 'unsafe-inline';"
53 " object-src 'self';"
54 )
4855 server_name, media_id, name = parse_media_id(request)
4956 if server_name == self.server_name:
5057 yield self._respond_local_file(request, media_id, name)
3434 from synapse.handlers import Handlers
3535 from synapse.handlers.appservice import ApplicationServicesHandler
3636 from synapse.handlers.auth import AuthHandler
37 from synapse.handlers.devicemessage import DeviceMessageHandler
3738 from synapse.handlers.device import DeviceHandler
3839 from synapse.handlers.e2e_keys import E2eKeysHandler
3940 from synapse.handlers.presence import PresenceHandler
40 from synapse.handlers.room import RoomListHandler
41 from synapse.handlers.room_list import RoomListHandler
4142 from synapse.handlers.sync import SyncHandler
4243 from synapse.handlers.typing import TypingHandler
4344 from synapse.handlers.events import EventHandler, EventStreamHandler
99100 'application_service_api',
100101 'application_service_scheduler',
101102 'application_service_handler',
103 'device_message_handler',
102104 'notifier',
103105 'distributor',
104106 'client_resource',
204206 def build_device_handler(self):
205207 return DeviceHandler(self)
206208
209 def build_device_message_handler(self):
210 return DeviceMessageHandler(self)
211
207212 def build_e2e_keys_handler(self):
208213 return E2eKeysHandler(self)
209214
2525 from synapse.util.async import Linearizer
2626
2727 from collections import namedtuple
28 from frozendict import frozendict
2829
2930 import logging
3031 import hashlib
5455
5556
5657 class _StateCacheEntry(object):
57 __slots__ = ["state", "state_group", "state_id"]
58
59 def __init__(self, state, state_group):
60 self.state = state
58 __slots__ = ["state", "state_group", "state_id", "prev_group", "delta_ids"]
59
60 def __init__(self, state, state_group, prev_group=None, delta_ids=None):
61 self.state = frozendict(state)
6162 self.state_group = state_group
63
64 self.prev_group = prev_group
65 self.delta_ids = frozendict(delta_ids) if delta_ids is not None else None
6266
6367 # The `state_id` is a unique ID we generate that can be used as ID for
6468 # this collection of state. Usually this would be the same as the
152156 defer.returnValue(state)
153157
154158 @defer.inlineCallbacks
155 def get_current_user_in_room(self, room_id):
156 latest_event_ids = yield self.store.get_latest_event_ids_in_room(room_id)
159 def get_current_user_in_room(self, room_id, latest_event_ids=None):
160 if not latest_event_ids:
161 latest_event_ids = yield self.store.get_latest_event_ids_in_room(room_id)
157162 entry = yield self.resolve_state_groups(room_id, latest_event_ids)
158163 joined_users = yield self.store.get_joined_users_from_state(
159164 room_id, entry.state_id, entry.state
233238 context.prev_state_ids = curr_state
234239 if event.is_state():
235240 context.state_group = self.store.get_next_state_group()
241
242 key = (event.type, event.state_key)
243 if key in context.prev_state_ids:
244 replaces = context.prev_state_ids[key]
245 event.unsigned["replaces_state"] = replaces
246
247 context.current_state_ids = dict(context.prev_state_ids)
248 context.current_state_ids[key] = event.event_id
249
250 context.prev_group = entry.prev_group
251 context.delta_ids = entry.delta_ids
252 if context.delta_ids is not None:
253 context.delta_ids = dict(context.delta_ids)
254 context.delta_ids[key] = event.event_id
236255 else:
237256 if entry.state_group is None:
238257 entry.state_group = self.store.get_next_state_group()
239258 entry.state_id = entry.state_group
259
240260 context.state_group = entry.state_group
241
242 if event.is_state():
243 key = (event.type, event.state_key)
244 if key in context.prev_state_ids:
245 replaces = context.prev_state_ids[key]
246 event.unsigned["replaces_state"] = replaces
247 context.current_state_ids = dict(context.prev_state_ids)
248 context.current_state_ids[key] = event.event_id
249 else:
250261 context.current_state_ids = context.prev_state_ids
262 context.prev_group = entry.prev_group
263 context.delta_ids = entry.delta_ids
251264
252265 context.prev_state_events = []
253266 defer.returnValue(context)
282295 defer.returnValue(_StateCacheEntry(
283296 state=state_list,
284297 state_group=name,
298 prev_group=name,
299 delta_ids={},
285300 ))
286301
287302 with (yield self.resolve_linearizer.queue(group_names)):
339354 if hasattr(self.store, "get_next_state_group"):
340355 state_group = self.store.get_next_state_group()
341356
357 prev_group = None
358 delta_ids = None
359 for old_group, old_ids in state_groups_ids.items():
360 if not set(new_state.iterkeys()) - set(old_ids.iterkeys()):
361 n_delta_ids = {
362 k: v
363 for k, v in new_state.items()
364 if old_ids.get(k) != v
365 }
366 if not delta_ids or len(n_delta_ids) < len(delta_ids):
367 prev_group = old_group
368 delta_ids = n_delta_ids
369
342370 cache = _StateCacheEntry(
343371 state=new_state,
344372 state_group=state_group,
373 prev_group=prev_group,
374 delta_ids=delta_ids,
345375 )
346376
347377 if self._state_cache is not None:
110110 db_conn, "presence_stream", "stream_id"
111111 )
112112 self._device_inbox_id_gen = StreamIdGenerator(
113 db_conn, "device_inbox", "stream_id"
113 db_conn, "device_max_stream_id", "stream_id"
114 )
115 self._public_room_id_gen = StreamIdGenerator(
116 db_conn, "public_room_list_stream", "stream_id"
114117 )
115118
116119 self._transaction_id_gen = IdGenerator(db_conn, "sent_transactions", "id")
181184 prefilled_cache=push_rules_prefill,
182185 )
183186
187 max_device_inbox_id = self._device_inbox_id_gen.get_current_token()
188 device_inbox_prefill, min_device_inbox_id = self._get_cache_dict(
189 db_conn, "device_inbox",
190 entity_column="user_id",
191 stream_column="stream_id",
192 max_value=max_device_inbox_id
193 )
194 self._device_inbox_stream_cache = StreamChangeCache(
195 "DeviceInboxStreamChangeCache", min_device_inbox_id,
196 prefilled_cache=device_inbox_prefill,
197 )
198 # The federation outbox and the local device inbox uses the same
199 # stream_id generator.
200 device_outbox_prefill, min_device_outbox_id = self._get_cache_dict(
201 db_conn, "device_federation_outbox",
202 entity_column="destination",
203 stream_column="stream_id",
204 max_value=max_device_inbox_id,
205 )
206 self._device_federation_outbox_stream_cache = StreamChangeCache(
207 "DeviceFederationOutboxStreamChangeCache", min_device_outbox_id,
208 prefilled_cache=device_outbox_prefill,
209 )
210
184211 cur = LoggingTransaction(
185212 db_conn.cursor(),
186213 name="_find_stream_orderings_for_times_txn",
193220 self.find_stream_orderings_looping_call = self._clock.looping_call(
194221 self._find_stream_orderings_for_times, 60 * 60 * 1000
195222 )
223
224 self._stream_order_on_start = self.get_room_max_stream_ordering()
196225
197226 super(DataStore, self).__init__(hs)
198227
132132 updates = yield self._simple_select_list(
133133 "background_updates",
134134 keyvalues=None,
135 retcols=("update_name",),
135 retcols=("update_name", "depends_on"),
136136 )
137 in_flight = set(update["update_name"] for update in updates)
137138 for update in updates:
138 self._background_update_queue.append(update['update_name'])
139 if update["depends_on"] not in in_flight:
140 self._background_update_queue.append(update['update_name'])
139141
140142 if not self._background_update_queue:
141143 # no work left to do
216218 self._background_update_handlers[update_name] = update_handler
217219
218220 def register_background_index_update(self, update_name, index_name,
219 table, columns):
221 table, columns, where_clause=None):
220222 """Helper for store classes to do a background index addition
221223
222224 To use:
240242 conc = True
241243 else:
242244 conc = False
243
244 sql = "CREATE INDEX %(conc)s %(name)s ON %(table)s (%(columns)s)" \
245 % {
246 "conc": "CONCURRENTLY" if conc else "",
247 "name": index_name,
248 "table": table,
249 "columns": ", ".join(columns),
250 }
245 # We don't use partial indices on SQLite as it wasn't introduced
246 # until 3.8, and wheezy has 3.7
247 where_clause = None
248
249 sql = (
250 "CREATE INDEX %(conc)s %(name)s ON %(table)s (%(columns)s)"
251 " %(where_clause)s"
252 ) % {
253 "conc": "CONCURRENTLY" if conc else "",
254 "name": index_name,
255 "table": table,
256 "columns": ", ".join(columns),
257 "where_clause": "WHERE " + where_clause if where_clause else ""
258 }
251259
252260 def create_index_concurrently(conn):
253261 conn.rollback()
2626 class DeviceInboxStore(SQLBaseStore):
2727
2828 @defer.inlineCallbacks
29 def add_messages_to_device_inbox(self, messages_by_user_then_device):
30 """
31 Args:
32 messages_by_user_and_device(dict):
29 def add_messages_to_device_inbox(self, local_messages_by_user_then_device,
30 remote_messages_by_destination):
31 """Used to send messages from this server.
32
33 Args:
34 sender_user_id(str): The ID of the user sending these messages.
35 local_messages_by_user_and_device(dict):
3336 Dictionary of user_id to device_id to message.
37 remote_messages_by_destination(dict):
38 Dictionary of destination server_name to the EDU JSON to send.
3439 Returns:
3540 A deferred stream_id that resolves when the messages have been
3641 inserted.
3742 """
3843
39 def select_devices_txn(txn, user_id, devices):
40 if not devices:
41 return []
42 sql = (
43 "SELECT user_id, device_id FROM devices"
44 " WHERE user_id = ? AND device_id IN ("
45 + ",".join("?" * len(devices))
46 + ")"
47 )
48 # TODO: Maybe this needs to be done in batches if there are
49 # too many local devices for a given user.
50 args = [user_id] + devices
51 txn.execute(sql, args)
52 return [tuple(row) for row in txn.fetchall()]
53
54 def add_messages_to_device_inbox_txn(txn, stream_id):
55 local_users_and_devices = set()
56 for user_id, messages_by_device in messages_by_user_then_device.items():
57 local_users_and_devices.update(
58 select_devices_txn(txn, user_id, messages_by_device.keys())
59 )
60
61 sql = (
62 "INSERT INTO device_inbox"
63 " (user_id, device_id, stream_id, message_json)"
44 def add_messages_txn(txn, now_ms, stream_id):
45 # Add the local messages directly to the local inbox.
46 self._add_messages_to_local_device_inbox_txn(
47 txn, stream_id, local_messages_by_user_then_device
48 )
49
50 # Add the remote messages to the federation outbox.
51 # We'll send them to a remote server when we next send a
52 # federation transaction to that destination.
53 sql = (
54 "INSERT INTO device_federation_outbox"
55 " (destination, stream_id, queued_ts, messages_json)"
6456 " VALUES (?,?,?,?)"
6557 )
6658 rows = []
67 for user_id, messages_by_device in messages_by_user_then_device.items():
68 for device_id, message in messages_by_device.items():
69 message_json = ujson.dumps(message)
59 for destination, edu in remote_messages_by_destination.items():
60 edu_json = ujson.dumps(edu)
61 rows.append((destination, stream_id, now_ms, edu_json))
62 txn.executemany(sql, rows)
63
64 with self._device_inbox_id_gen.get_next() as stream_id:
65 now_ms = self.clock.time_msec()
66 yield self.runInteraction(
67 "add_messages_to_device_inbox",
68 add_messages_txn,
69 now_ms,
70 stream_id,
71 )
72 for user_id in local_messages_by_user_then_device.keys():
73 self._device_inbox_stream_cache.entity_has_changed(
74 user_id, stream_id
75 )
76 for destination in remote_messages_by_destination.keys():
77 self._device_federation_outbox_stream_cache.entity_has_changed(
78 destination, stream_id
79 )
80
81 defer.returnValue(self._device_inbox_id_gen.get_current_token())
82
83 @defer.inlineCallbacks
84 def add_messages_from_remote_to_device_inbox(
85 self, origin, message_id, local_messages_by_user_then_device
86 ):
87 def add_messages_txn(txn, now_ms, stream_id):
88 # Check if we've already inserted a matching message_id for that
89 # origin. This can happen if the origin doesn't receive our
90 # acknowledgement from the first time we received the message.
91 already_inserted = self._simple_select_one_txn(
92 txn, table="device_federation_inbox",
93 keyvalues={"origin": origin, "message_id": message_id},
94 retcols=("message_id",),
95 allow_none=True,
96 )
97 if already_inserted is not None:
98 return
99
100 # Add an entry for this message_id so that we know we've processed
101 # it.
102 self._simple_insert_txn(
103 txn, table="device_federation_inbox",
104 values={
105 "origin": origin,
106 "message_id": message_id,
107 "received_ts": now_ms,
108 },
109 )
110
111 # Add the messages to the approriate local device inboxes so that
112 # they'll be sent to the devices when they next sync.
113 self._add_messages_to_local_device_inbox_txn(
114 txn, stream_id, local_messages_by_user_then_device
115 )
116
117 with self._device_inbox_id_gen.get_next() as stream_id:
118 now_ms = self.clock.time_msec()
119 yield self.runInteraction(
120 "add_messages_from_remote_to_device_inbox",
121 add_messages_txn,
122 now_ms,
123 stream_id,
124 )
125 for user_id in local_messages_by_user_then_device.keys():
126 self._device_inbox_stream_cache.entity_has_changed(
127 user_id, stream_id
128 )
129
130 defer.returnValue(stream_id)
131
132 def _add_messages_to_local_device_inbox_txn(self, txn, stream_id,
133 messages_by_user_then_device):
134 sql = (
135 "UPDATE device_max_stream_id"
136 " SET stream_id = ?"
137 " WHERE stream_id < ?"
138 )
139 txn.execute(sql, (stream_id, stream_id))
140
141 local_by_user_then_device = {}
142 for user_id, messages_by_device in messages_by_user_then_device.items():
143 messages_json_for_user = {}
144 devices = messages_by_device.keys()
145 if len(devices) == 1 and devices[0] == "*":
146 # Handle wildcard device_ids.
147 sql = (
148 "SELECT device_id FROM devices"
149 " WHERE user_id = ?"
150 )
151 txn.execute(sql, (user_id,))
152 message_json = ujson.dumps(messages_by_device["*"])
153 for row in txn.fetchall():
154 # Add the message for all devices for this user on this
155 # server.
156 device = row[0]
157 messages_json_for_user[device] = message_json
158 else:
159 if not devices:
160 continue
161 sql = (
162 "SELECT device_id FROM devices"
163 " WHERE user_id = ? AND device_id IN ("
164 + ",".join("?" * len(devices))
165 + ")"
166 )
167 # TODO: Maybe this needs to be done in batches if there are
168 # too many local devices for a given user.
169 txn.execute(sql, [user_id] + devices)
170 for row in txn.fetchall():
70171 # Only insert into the local inbox if the device exists on
71172 # this server
72 if (user_id, device_id) in local_users_and_devices:
73 rows.append((user_id, device_id, stream_id, message_json))
74
75 txn.executemany(sql, rows)
76
77 with self._device_inbox_id_gen.get_next() as stream_id:
78 yield self.runInteraction(
79 "add_messages_to_device_inbox",
80 add_messages_to_device_inbox_txn,
81 stream_id
82 )
83
84 defer.returnValue(self._device_inbox_id_gen.get_current_token())
173 device = row[0]
174 message_json = ujson.dumps(messages_by_device[device])
175 messages_json_for_user[device] = message_json
176
177 if messages_json_for_user:
178 local_by_user_then_device[user_id] = messages_json_for_user
179
180 if not local_by_user_then_device:
181 return
182
183 sql = (
184 "INSERT INTO device_inbox"
185 " (user_id, device_id, stream_id, message_json)"
186 " VALUES (?,?,?,?)"
187 )
188 rows = []
189 for user_id, messages_by_device in local_by_user_then_device.items():
190 for device_id, message_json in messages_by_device.items():
191 rows.append((user_id, device_id, stream_id, message_json))
192
193 txn.executemany(sql, rows)
85194
86195 def get_new_messages_for_device(
87196 self, user_id, device_id, last_stream_id, current_stream_id, limit=100
96205 Deferred ([dict], int): List of messages for the device and where
97206 in the stream the messages got to.
98207 """
208 has_changed = self._device_inbox_stream_cache.has_entity_changed(
209 user_id, last_stream_id
210 )
211 if not has_changed:
212 return defer.succeed(([], current_stream_id))
213
99214 def get_new_messages_for_device_txn(txn):
100215 sql = (
101216 "SELECT stream_id, message_json FROM device_inbox"
181296
182297 def get_to_device_stream_token(self):
183298 return self._device_inbox_id_gen.get_current_token()
299
300 def get_new_device_msgs_for_remote(
301 self, destination, last_stream_id, current_stream_id, limit=100
302 ):
303 """
304 Args:
305 destination(str): The name of the remote server.
306 last_stream_id(int): The last position of the device message stream
307 that the server sent up to.
308 current_stream_id(int): The current position of the device
309 message stream.
310 Returns:
311 Deferred ([dict], int): List of messages for the device and where
312 in the stream the messages got to.
313 """
314
315 has_changed = self._device_federation_outbox_stream_cache.has_entity_changed(
316 destination, last_stream_id
317 )
318 if not has_changed or last_stream_id == current_stream_id:
319 return defer.succeed(([], current_stream_id))
320
321 def get_new_messages_for_remote_destination_txn(txn):
322 sql = (
323 "SELECT stream_id, messages_json FROM device_federation_outbox"
324 " WHERE destination = ?"
325 " AND ? < stream_id AND stream_id <= ?"
326 " ORDER BY stream_id ASC"
327 " LIMIT ?"
328 )
329 txn.execute(sql, (
330 destination, last_stream_id, current_stream_id, limit
331 ))
332 messages = []
333 for row in txn.fetchall():
334 stream_pos = row[0]
335 messages.append(ujson.loads(row[1]))
336 if len(messages) < limit:
337 stream_pos = current_stream_id
338 return (messages, stream_pos)
339
340 return self.runInteraction(
341 "get_new_device_msgs_for_remote",
342 get_new_messages_for_remote_destination_txn,
343 )
344
345 def delete_device_msgs_for_remote(self, destination, up_to_stream_id):
346 """Used to delete messages when the remote destination acknowledges
347 their receipt.
348
349 Args:
350 destination(str): The destination server_name
351 up_to_stream_id(int): Where to delete messages up to.
352 Returns:
353 A deferred that resolves when the messages have been deleted.
354 """
355 def delete_messages_for_remote_destination_txn(txn):
356 sql = (
357 "DELETE FROM device_federation_outbox"
358 " WHERE destination = ?"
359 " AND stream_id <= ?"
360 )
361 txn.execute(sql, (destination, up_to_stream_id))
362
363 return self.runInteraction(
364 "delete_device_msgs_for_remote",
365 delete_messages_for_remote_destination_txn
366 )
5353 or_ignore=ignore_if_known,
5454 )
5555 except Exception as e:
56 logger.error("store_device with device_id=%s failed: %s",
57 device_id, e)
56 logger.error("store_device with device_id=%s(%r) user_id=%s(%r)"
57 " display_name=%s(%r) failed: %s",
58 type(device_id).__name__, device_id,
59 type(user_id).__name__, user_id,
60 type(initial_device_display_name).__name__,
61 initial_device_display_name, e)
5862 raise StoreError(500, "Problem storing device.")
5963
6064 def get_device(self, user_id, device_id):
1515 from twisted.internet import defer
1616
1717 from ._base import SQLBaseStore
18 from synapse.api.errors import StoreError
1819 from synapse.util.caches.descriptors import cached
1920 from unpaddedbase64 import encode_base64
2021
3435 of the event graphs. These are used to generate the parents for new events
3536 and backfilling from another server respectively.
3637 """
38
39 def __init__(self, hs):
40 super(EventFederationStore, self).__init__(hs)
41
42 hs.get_clock().looping_call(
43 self._delete_old_forward_extrem_cache, 60 * 60 * 1000
44 )
3745
3846 def get_auth_chain(self, event_ids):
3947 return self.get_auth_chain_ids(event_ids).addCallback(self._get_events)
269277 ]
270278 )
271279
280 # We now insert into stream_ordering_to_exterm a mapping from room_id,
281 # new stream_ordering to new forward extremeties in the room.
282 # This allows us to later efficiently look up the forward extremeties
283 # for a room before a given stream_ordering
284 max_stream_ord = max(
285 ev.internal_metadata.stream_ordering for ev in events
286 )
287 new_extrem = {}
288 for room_id in events_by_room:
289 event_ids = self._simple_select_onecol_txn(
290 txn,
291 table="event_forward_extremities",
292 keyvalues={"room_id": room_id},
293 retcol="event_id",
294 )
295 new_extrem[room_id] = event_ids
296
297 self._simple_insert_many_txn(
298 txn,
299 table="stream_ordering_to_exterm",
300 values=[
301 {
302 "room_id": room_id,
303 "event_id": event_id,
304 "stream_ordering": max_stream_ord,
305 }
306 for room_id, extrem_evs in new_extrem.items()
307 for event_id in extrem_evs
308 ]
309 )
310
272311 query = (
273312 "INSERT INTO event_backward_extremities (event_id, room_id)"
274313 " SELECT ?, ? WHERE NOT EXISTS ("
304343 self.get_latest_event_ids_in_room.invalidate, (room_id,)
305344 )
306345
346 def get_forward_extremeties_for_room(self, room_id, stream_ordering):
347 # We want to make the cache more effective, so we clamp to the last
348 # change before the given ordering.
349 last_change = self._events_stream_cache.get_max_pos_of_last_change(room_id)
350
351 # We don't always have a full stream_to_exterm_id table, e.g. after
352 # the upgrade that introduced it, so we make sure we never ask for a
353 # try and pin to a stream_ordering from before a restart
354 last_change = max(self._stream_order_on_start, last_change)
355
356 if last_change > self.stream_ordering_month_ago:
357 stream_ordering = min(last_change, stream_ordering)
358
359 return self._get_forward_extremeties_for_room(room_id, stream_ordering)
360
361 @cached(max_entries=5000, num_args=2)
362 def _get_forward_extremeties_for_room(self, room_id, stream_ordering):
363 """For a given room_id and stream_ordering, return the forward
364 extremeties of the room at that point in "time".
365
366 Throws a StoreError if we have since purged the index for
367 stream_orderings from that point.
368 """
369
370 if stream_ordering <= self.stream_ordering_month_ago:
371 raise StoreError(400, "stream_ordering too old")
372
373 sql = ("""
374 SELECT event_id FROM stream_ordering_to_exterm
375 INNER JOIN (
376 SELECT room_id, MAX(stream_ordering) AS stream_ordering
377 FROM stream_ordering_to_exterm
378 WHERE stream_ordering <= ? GROUP BY room_id
379 ) AS rms USING (room_id, stream_ordering)
380 WHERE room_id = ?
381 """)
382
383 def get_forward_extremeties_for_room_txn(txn):
384 txn.execute(sql, (stream_ordering, room_id))
385 rows = txn.fetchall()
386 return [event_id for event_id, in rows]
387
388 return self.runInteraction(
389 "get_forward_extremeties_for_room",
390 get_forward_extremeties_for_room_txn
391 )
392
393 def _delete_old_forward_extrem_cache(self):
394 def _delete_old_forward_extrem_cache_txn(txn):
395 # Delete entries older than a month, while making sure we don't delete
396 # the only entries for a room.
397 sql = ("""
398 DELETE FROM stream_ordering_to_exterm
399 WHERE
400 (
401 SELECT max(stream_ordering) AS stream_ordering
402 FROM stream_ordering_to_exterm
403 WHERE room_id = stream_ordering_to_exterm.room_id
404 ) > ?
405 AND stream_ordering < ?
406 """)
407 txn.execute(
408 sql,
409 (self.stream_ordering_month_ago, self.stream_ordering_month_ago,)
410 )
411 return self.runInteraction(
412 "_delete_old_forward_extrem_cache",
413 _delete_old_forward_extrem_cache_txn
414 )
415
307416 def get_backfill_events(self, room_id, event_list, limit):
308417 """Get a list of Events for a given topic that occurred before (and
309418 including) the events in event_list. Return a list of max size `limit`
2525
2626
2727 class EventPushActionsStore(SQLBaseStore):
28 EPA_HIGHLIGHT_INDEX = "epa_highlight_index"
29
2830 def __init__(self, hs):
2931 self.stream_ordering_month_ago = None
3032 super(EventPushActionsStore, self).__init__(hs)
33
34 self.register_background_index_update(
35 self.EPA_HIGHLIGHT_INDEX,
36 index_name="event_push_actions_u_highlight",
37 table="event_push_actions",
38 columns=["user_id", "stream_ordering"],
39 )
3140
3241 def _set_push_actions_for_event_and_users_txn(self, txn, event, tuples):
3342 """
337346 defer.returnValue(notifs[:limit])
338347
339348 @defer.inlineCallbacks
340 def get_push_actions_for_user(self, user_id, before=None, limit=50):
349 def get_push_actions_for_user(self, user_id, before=None, limit=50,
350 only_highlight=False):
341351 def f(txn):
342352 before_clause = ""
343353 if before:
344 before_clause = "AND stream_ordering < ?"
354 before_clause = "AND epa.stream_ordering < ?"
345355 args = [user_id, before, limit]
346356 else:
347357 args = [user_id, limit]
358
359 if only_highlight:
360 if len(before_clause) > 0:
361 before_clause += " "
362 before_clause += "AND epa.highlight = 1"
363
364 # NB. This assumes event_ids are globally unique since
365 # it makes the query easier to index
348366 sql = (
349367 "SELECT epa.event_id, epa.room_id,"
350368 " epa.stream_ordering, epa.topological_ordering,"
351369 " epa.actions, epa.profile_tag, e.received_ts"
352370 " FROM event_push_actions epa, events e"
353 " WHERE epa.room_id = e.room_id AND epa.event_id = e.event_id"
371 " WHERE epa.event_id = e.event_id"
354372 " AND epa.user_id = ? %s"
355373 " ORDER BY epa.stream_ordering DESC"
356374 " LIMIT ?"
186186 self.register_background_update_handler(
187187 self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME,
188188 self._background_reindex_fields_sender,
189 )
190
191 self.register_background_index_update(
192 "event_contains_url_index",
193 index_name="event_contains_url_index",
194 table="events",
195 columns=["room_id", "topological_ordering", "stream_ordering"],
196 where_clause="contains_url = true AND outlier = false",
189197 )
190198
191199 self._event_persist_queue = _EventPeristenceQueue()
496504
497505 # insert into the state_group, state_groups_state and
498506 # event_to_state_groups tables.
499 self._store_mult_state_groups_txn(txn, ((event, context),))
507 try:
508 self._store_mult_state_groups_txn(txn, ((event, context),))
509 except Exception:
510 logger.exception("")
511 raise
500512
501513 metadata_json = encode_json(
502514 event.internal_metadata.get_dict()
15421554 )
15431555 event_rows = txn.fetchall()
15441556
1557 for event_id, state_key in event_rows:
1558 txn.call_after(self._get_state_group_for_event.invalidate, (event_id,))
1559
15451560 # We calculate the new entries for the backward extremeties by finding
15461561 # all events that point to events that are to be purged
15471562 txn.execute(
15811596 " GROUP BY state_group HAVING MAX(topological_ordering) < ?",
15821597 (room_id, topological_ordering, topological_ordering)
15831598 )
1599
15841600 state_rows = txn.fetchall()
1601 state_groups_to_delete = [sg for sg, in state_rows]
1602
1603 # Now we get all the state groups that rely on these state groups
1604 new_state_edges = []
1605 chunks = [
1606 state_groups_to_delete[i:i + 100]
1607 for i in xrange(0, len(state_groups_to_delete), 100)
1608 ]
1609 for chunk in chunks:
1610 rows = self._simple_select_many_txn(
1611 txn,
1612 table="state_group_edges",
1613 column="prev_state_group",
1614 iterable=chunk,
1615 retcols=["state_group"],
1616 keyvalues={},
1617 )
1618 new_state_edges.extend(row["state_group"] for row in rows)
1619
1620 # Now we turn the state groups that reference to-be-deleted state groups
1621 # to non delta versions.
1622 for new_state_edge in new_state_edges:
1623 curr_state = self._get_state_groups_from_groups_txn(
1624 txn, [new_state_edge], types=None
1625 )
1626 curr_state = curr_state[new_state_edge]
1627
1628 self._simple_delete_txn(
1629 txn,
1630 table="state_groups_state",
1631 keyvalues={
1632 "state_group": new_state_edge,
1633 }
1634 )
1635
1636 self._simple_delete_txn(
1637 txn,
1638 table="state_group_edges",
1639 keyvalues={
1640 "state_group": new_state_edge,
1641 }
1642 )
1643
1644 self._simple_insert_many_txn(
1645 txn,
1646 table="state_groups_state",
1647 values=[
1648 {
1649 "state_group": new_state_edge,
1650 "room_id": room_id,
1651 "type": key[0],
1652 "state_key": key[1],
1653 "event_id": state_id,
1654 }
1655 for key, state_id in curr_state.items()
1656 ],
1657 )
1658
15851659 txn.executemany(
15861660 "DELETE FROM state_groups_state WHERE state_group = ?",
15871661 state_rows
2424
2525 # Remember to update this number every time a change is made to database
2626 # schema files, so the users will be informed on server restarts.
27 SCHEMA_VERSION = 34
27 SCHEMA_VERSION = 35
2828
2929 dir_path = os.path.abspath(os.path.dirname(__file__))
3030
241241 module = imp.load_source(
242242 module_name, absolute_path, python_file
243243 )
244 logger.debug("Running script %s", relative_path)
244 logger.info("Running script %s", relative_path)
245245 module.run_create(cur, database_engine)
246246 if not is_empty:
247247 module.run_upgrade(cur, database_engine, config=config)
252252 pass
253253 elif ext == ".sql":
254254 # A plain old .sql file, just read and execute it
255 logger.debug("Applying schema %s", relative_path)
255 logger.info("Applying schema %s", relative_path)
256256 executescript(cur, absolute_path)
257257 else:
258258 # Not a valid delta file.
4747 StoreError if the room could not be stored.
4848 """
4949 try:
50 yield self._simple_insert(
51 "rooms",
52 {
53 "room_id": room_id,
54 "creator": room_creator_user_id,
55 "is_public": is_public,
56 },
57 desc="store_room",
58 )
50 def store_room_txn(txn, next_id):
51 self._simple_insert_txn(
52 txn,
53 "rooms",
54 {
55 "room_id": room_id,
56 "creator": room_creator_user_id,
57 "is_public": is_public,
58 },
59 )
60 if is_public:
61 self._simple_insert_txn(
62 txn,
63 table="public_room_list_stream",
64 values={
65 "stream_id": next_id,
66 "room_id": room_id,
67 "visibility": is_public,
68 }
69 )
70 with self._public_room_id_gen.get_next() as next_id:
71 yield self.runInteraction(
72 "store_room_txn",
73 store_room_txn, next_id,
74 )
5975 except Exception as e:
6076 logger.error("store_room with room_id=%s failed: %s", room_id, e)
6177 raise StoreError(500, "Problem creating room.")
7692 allow_none=True,
7793 )
7894
95 @defer.inlineCallbacks
7996 def set_room_is_public(self, room_id, is_public):
80 return self._simple_update_one(
81 table="rooms",
82 keyvalues={"room_id": room_id},
83 updatevalues={"is_public": is_public},
84 desc="set_room_is_public",
85 )
97 def set_room_is_public_txn(txn, next_id):
98 self._simple_update_one_txn(
99 txn,
100 table="rooms",
101 keyvalues={"room_id": room_id},
102 updatevalues={"is_public": is_public},
103 )
104
105 entries = self._simple_select_list_txn(
106 txn,
107 table="public_room_list_stream",
108 keyvalues={"room_id": room_id},
109 retcols=("stream_id", "visibility"),
110 )
111
112 entries.sort(key=lambda r: r["stream_id"])
113
114 add_to_stream = True
115 if entries:
116 add_to_stream = bool(entries[-1]["visibility"]) != is_public
117
118 if add_to_stream:
119 self._simple_insert_txn(
120 txn,
121 table="public_room_list_stream",
122 values={
123 "stream_id": next_id,
124 "room_id": room_id,
125 "visibility": is_public,
126 }
127 )
128
129 with self._public_room_id_gen.get_next() as next_id:
130 yield self.runInteraction(
131 "set_room_is_public",
132 set_room_is_public_txn, next_id,
133 )
86134
87135 def get_public_room_ids(self):
88136 return self._simple_select_onecol(
206254 },
207255 desc="add_event_report"
208256 )
257
258 def get_current_public_room_stream_id(self):
259 return self._public_room_id_gen.get_current_token()
260
261 def get_public_room_ids_at_stream_id(self, stream_id):
262 return self.runInteraction(
263 "get_public_room_ids_at_stream_id",
264 self.get_public_room_ids_at_stream_id_txn, stream_id
265 )
266
267 def get_public_room_ids_at_stream_id_txn(self, txn, stream_id):
268 return {
269 rm
270 for rm, vis in self.get_published_at_stream_id_txn(txn, stream_id).items()
271 if vis
272 }
273
274 def get_published_at_stream_id_txn(self, txn, stream_id):
275 sql = ("""
276 SELECT room_id, visibility FROM public_room_list_stream
277 INNER JOIN (
278 SELECT room_id, max(stream_id) AS stream_id
279 FROM public_room_list_stream
280 WHERE stream_id <= ?
281 GROUP BY room_id
282 ) grouped USING (room_id, stream_id)
283 """)
284
285 txn.execute(sql, (stream_id,))
286 return dict(txn.fetchall())
287
288 def get_public_room_changes(self, prev_stream_id, new_stream_id):
289 def get_public_room_changes_txn(txn):
290 then_rooms = self.get_public_room_ids_at_stream_id_txn(txn, prev_stream_id)
291
292 now_rooms_dict = self.get_published_at_stream_id_txn(txn, new_stream_id)
293
294 now_rooms_visible = set(
295 rm for rm, vis in now_rooms_dict.items() if vis
296 )
297 now_rooms_not_visible = set(
298 rm for rm, vis in now_rooms_dict.items() if not vis
299 )
300
301 newly_visible = now_rooms_visible - then_rooms
302 newly_unpublished = now_rooms_not_visible & then_rooms
303
304 return newly_visible, newly_unpublished
305
306 return self.runInteraction(
307 "get_public_room_changes", get_public_room_changes_txn
308 )
309
310 def get_all_new_public_rooms(self, prev_id, current_id, limit):
311 def get_all_new_public_rooms(txn):
312 sql = ("""
313 SELECT stream_id, room_id, visibility FROM public_room_list_stream
314 WHERE stream_id > ? AND stream_id <= ?
315 ORDER BY stream_id ASC
316 LIMIT ?
317 """)
318
319 txn.execute(sql, (prev_id, current_id, limit,))
320 return txn.fetchall()
321
322 return self.runInteraction(
323 "get_all_new_public_rooms", get_all_new_public_rooms
324 )
1212 * limitations under the License.
1313 */
1414
15 /** Using CREATE INDEX directly is deprecated in favour of using background
16 * update see synapse/storage/schema/delta/33/access_tokens_device_index.sql
17 * and synapse/storage/registration.py for an example using
18 * "access_tokens_device_index" **/
1519 CREATE INDEX receipts_linearized_room_stream ON receipts_linearized(
1620 room_id, stream_id
1721 );
1212 * limitations under the License.
1313 */
1414
15 /** Using CREATE INDEX directly is deprecated in favour of using background
16 * update see synapse/storage/schema/delta/33/access_tokens_device_index.sql
17 * and synapse/storage/registration.py for an example using
18 * "access_tokens_device_index" **/
1519 CREATE INDEX events_room_stream on events(room_id, stream_ordering);
1212 * limitations under the License.
1313 */
1414
15 /** Using CREATE INDEX directly is deprecated in favour of using background
16 * update see synapse/storage/schema/delta/33/access_tokens_device_index.sql
17 * and synapse/storage/registration.py for an example using
18 * "access_tokens_device_index" **/
1519 CREATE INDEX public_room_index on rooms(is_public);
1212 * limitations under the License.
1313 */
1414
15 /** Using CREATE INDEX directly is deprecated in favour of using background
16 * update see synapse/storage/schema/delta/33/access_tokens_device_index.sql
17 * and synapse/storage/registration.py for an example using
18 * "access_tokens_device_index" **/
1519 CREATE INDEX receipts_linearized_user ON receipts_linearized(
1620 user_id
1721 );
2525
2626 UPDATE event_push_actions SET notif = 1, highlight = 0;
2727
28 /** Using CREATE INDEX directly is deprecated in favour of using background
29 * update see synapse/storage/schema/delta/33/access_tokens_device_index.sql
30 * and synapse/storage/registration.py for an example using
31 * "access_tokens_device_index" **/
2832 CREATE INDEX event_push_actions_rm_tokens on event_push_actions(
2933 user_id, room_id, topological_ordering, stream_ordering
3034 );
1212 * limitations under the License.
1313 */
1414
15 /** Using CREATE INDEX directly is deprecated in favour of using background
16 * update see synapse/storage/schema/delta/33/access_tokens_device_index.sql
17 * and synapse/storage/registration.py for an example using
18 * "access_tokens_device_index" **/
1519 CREATE INDEX event_push_actions_stream_ordering on event_push_actions(
1620 stream_ordering, user_id
1721 );
0 /* Copyright 2016 OpenMarket Ltd
1 *
2 * Licensed under the Apache License, Version 2.0 (the "License");
3 * you may not use this file except in compliance with the License.
4 * You may obtain a copy of the License at
5 *
6 * http://www.apache.org/licenses/LICENSE-2.0
7 *
8 * Unless required by applicable law or agreed to in writing, software
9 * distributed under the License is distributed on an "AS IS" BASIS,
10 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 * See the License for the specific language governing permissions and
12 * limitations under the License.
13 */
14
15
16 ALTER TABLE background_updates ADD COLUMN depends_on TEXT;
17
18 INSERT into background_updates (update_name, progress_json, depends_on)
19 VALUES ('state_group_state_type_index', '{}', 'state_group_state_deduplication');
0 /* Copyright 2016 OpenMarket Ltd
1 *
2 * Licensed under the Apache License, Version 2.0 (the "License");
3 * you may not use this file except in compliance with the License.
4 * You may obtain a copy of the License at
5 *
6 * http://www.apache.org/licenses/LICENSE-2.0
7 *
8 * Unless required by applicable law or agreed to in writing, software
9 * distributed under the License is distributed on an "AS IS" BASIS,
10 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 * See the License for the specific language governing permissions and
12 * limitations under the License.
13 */
14
15 INSERT into background_updates (update_name, progress_json)
16 VALUES ('event_contains_url_index', '{}');
0 /* Copyright 2016 OpenMarket Ltd
1 *
2 * Licensed under the Apache License, Version 2.0 (the "License");
3 * you may not use this file except in compliance with the License.
4 * You may obtain a copy of the License at
5 *
6 * http://www.apache.org/licenses/LICENSE-2.0
7 *
8 * Unless required by applicable law or agreed to in writing, software
9 * distributed under the License is distributed on an "AS IS" BASIS,
10 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 * See the License for the specific language governing permissions and
12 * limitations under the License.
13 */
14
15 DROP TABLE IF EXISTS device_federation_outbox;
16 CREATE TABLE device_federation_outbox (
17 destination TEXT NOT NULL,
18 stream_id BIGINT NOT NULL,
19 queued_ts BIGINT NOT NULL,
20 messages_json TEXT NOT NULL
21 );
22
23
24 DROP INDEX IF EXISTS device_federation_outbox_destination_id;
25 CREATE INDEX device_federation_outbox_destination_id
26 ON device_federation_outbox(destination, stream_id);
27
28
29 DROP TABLE IF EXISTS device_federation_inbox;
30 CREATE TABLE device_federation_inbox (
31 origin TEXT NOT NULL,
32 message_id TEXT NOT NULL,
33 received_ts BIGINT NOT NULL
34 );
35
36 DROP INDEX IF EXISTS device_federation_inbox_sender_id;
37 CREATE INDEX device_federation_inbox_sender_id
38 ON device_federation_inbox(origin, message_id);
0 /* Copyright 2016 OpenMarket Ltd
1 *
2 * Licensed under the Apache License, Version 2.0 (the "License");
3 * you may not use this file except in compliance with the License.
4 * You may obtain a copy of the License at
5 *
6 * http://www.apache.org/licenses/LICENSE-2.0
7 *
8 * Unless required by applicable law or agreed to in writing, software
9 * distributed under the License is distributed on an "AS IS" BASIS,
10 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 * See the License for the specific language governing permissions and
12 * limitations under the License.
13 */
14
15 CREATE TABLE device_max_stream_id (
16 stream_id BIGINT NOT NULL
17 );
18
19 INSERT INTO device_max_stream_id (stream_id)
20 SELECT COALESCE(MAX(stream_id), 0) FROM device_inbox;
0 /* Copyright 2016 OpenMarket Ltd
1 *
2 * Licensed under the Apache License, Version 2.0 (the "License");
3 * you may not use this file except in compliance with the License.
4 * You may obtain a copy of the License at
5 *
6 * http://www.apache.org/licenses/LICENSE-2.0
7 *
8 * Unless required by applicable law or agreed to in writing, software
9 * distributed under the License is distributed on an "AS IS" BASIS,
10 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 * See the License for the specific language governing permissions and
12 * limitations under the License.
13 */
14
15 INSERT into background_updates (update_name, progress_json)
16 VALUES ('epa_highlight_index', '{}');
0 /* Copyright 2016 OpenMarket Ltd
1 *
2 * Licensed under the Apache License, Version 2.0 (the "License");
3 * you may not use this file except in compliance with the License.
4 * You may obtain a copy of the License at
5 *
6 * http://www.apache.org/licenses/LICENSE-2.0
7 *
8 * Unless required by applicable law or agreed to in writing, software
9 * distributed under the License is distributed on an "AS IS" BASIS,
10 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 * See the License for the specific language governing permissions and
12 * limitations under the License.
13 */
14
15
16 CREATE TABLE public_room_list_stream (
17 stream_id BIGINT NOT NULL,
18 room_id TEXT NOT NULL,
19 visibility BOOLEAN NOT NULL
20 );
21
22 INSERT INTO public_room_list_stream (stream_id, room_id, visibility)
23 SELECT 1, room_id, is_public FROM rooms
24 WHERE is_public = CAST(1 AS BOOLEAN);
25
26 CREATE INDEX public_room_list_stream_idx on public_room_list_stream(
27 stream_id
28 );
29
30 CREATE INDEX public_room_list_stream_rm_idx on public_room_list_stream(
31 room_id, stream_id
32 );
0 /* Copyright 2016 OpenMarket Ltd
1 *
2 * Licensed under the Apache License, Version 2.0 (the "License");
3 * you may not use this file except in compliance with the License.
4 * You may obtain a copy of the License at
5 *
6 * http://www.apache.org/licenses/LICENSE-2.0
7 *
8 * Unless required by applicable law or agreed to in writing, software
9 * distributed under the License is distributed on an "AS IS" BASIS,
10 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 * See the License for the specific language governing permissions and
12 * limitations under the License.
13 */
14
15 CREATE TABLE state_group_edges(
16 state_group BIGINT NOT NULL,
17 prev_state_group BIGINT NOT NULL
18 );
19
20 CREATE INDEX state_group_edges_idx ON state_group_edges(state_group);
21 CREATE INDEX state_group_edges_prev_idx ON state_group_edges(prev_state_group);
0 /* Copyright 2016 OpenMarket Ltd
1 *
2 * Licensed under the Apache License, Version 2.0 (the "License");
3 * you may not use this file except in compliance with the License.
4 * You may obtain a copy of the License at
5 *
6 * http://www.apache.org/licenses/LICENSE-2.0
7 *
8 * Unless required by applicable law or agreed to in writing, software
9 * distributed under the License is distributed on an "AS IS" BASIS,
10 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 * See the License for the specific language governing permissions and
12 * limitations under the License.
13 */
14
15 INSERT into background_updates (update_name, progress_json)
16 VALUES ('state_group_state_deduplication', '{}');
0 /* Copyright 2016 OpenMarket Ltd
1 *
2 * Licensed under the Apache License, Version 2.0 (the "License");
3 * you may not use this file except in compliance with the License.
4 * You may obtain a copy of the License at
5 *
6 * http://www.apache.org/licenses/LICENSE-2.0
7 *
8 * Unless required by applicable law or agreed to in writing, software
9 * distributed under the License is distributed on an "AS IS" BASIS,
10 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 * See the License for the specific language governing permissions and
12 * limitations under the License.
13 */
14
15
16 CREATE TABLE stream_ordering_to_exterm (
17 stream_ordering BIGINT NOT NULL,
18 room_id TEXT NOT NULL,
19 event_id TEXT NOT NULL
20 );
21
22 INSERT INTO stream_ordering_to_exterm (stream_ordering, room_id, event_id)
23 SELECT stream_ordering, room_id, event_id FROM event_forward_extremities
24 INNER JOIN (
25 SELECT room_id, max(stream_ordering) as stream_ordering FROM events
26 INNER JOIN event_forward_extremities USING (room_id, event_id)
27 GROUP BY room_id
28 ) AS rms USING (room_id);
29
30 CREATE INDEX stream_ordering_to_exterm_idx on stream_ordering_to_exterm(
31 stream_ordering
32 );
33
34 CREATE INDEX stream_ordering_to_exterm_rm_idx on stream_ordering_to_exterm(
35 room_id, stream_ordering
36 );
1515 from ._base import SQLBaseStore
1616 from synapse.util.caches.descriptors import cached, cachedList
1717 from synapse.util.caches import intern_string
18 from synapse.storage.engines import PostgresEngine
1819
1920 from twisted.internet import defer
2021
2122 import logging
2223
2324 logger = logging.getLogger(__name__)
25
26
27 MAX_STATE_DELTA_HOPS = 100
2428
2529
2630 class StateStore(SQLBaseStore):
4246 * `state_groups_state`: Maps state group to state events.
4347 """
4448
49 STATE_GROUP_DEDUPLICATION_UPDATE_NAME = "state_group_state_deduplication"
50 STATE_GROUP_INDEX_UPDATE_NAME = "state_group_state_type_index"
51
52 def __init__(self, hs):
53 super(StateStore, self).__init__(hs)
54 self.register_background_update_handler(
55 self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME,
56 self._background_deduplicate_state,
57 )
58 self.register_background_update_handler(
59 self.STATE_GROUP_INDEX_UPDATE_NAME,
60 self._background_index_state,
61 )
62
4563 @defer.inlineCallbacks
4664 def get_state_groups_ids(self, room_id, event_ids):
4765 if not event_ids:
102120 state_groups[event.event_id] = context.state_group
103121
104122 if self._have_persisted_state_group_txn(txn, context.state_group):
105 logger.info("Already persisted state_group: %r", context.state_group)
106123 continue
107
108 state_event_ids = dict(context.current_state_ids)
109124
110125 self._simple_insert_txn(
111126 txn,
117132 },
118133 )
119134
120 self._simple_insert_many_txn(
121 txn,
122 table="state_groups_state",
123 values=[
124 {
135 # We persist as a delta if we can, while also ensuring the chain
136 # of deltas isn't tooo long, as otherwise read performance degrades.
137 if context.prev_group:
138 potential_hops = self._count_state_group_hops_txn(
139 txn, context.prev_group
140 )
141 if context.prev_group and potential_hops < MAX_STATE_DELTA_HOPS:
142 self._simple_insert_txn(
143 txn,
144 table="state_group_edges",
145 values={
125146 "state_group": context.state_group,
126 "room_id": event.room_id,
127 "type": key[0],
128 "state_key": key[1],
129 "event_id": state_id,
130 }
131 for key, state_id in state_event_ids.items()
132 ],
133 )
147 "prev_state_group": context.prev_group,
148 },
149 )
150
151 self._simple_insert_many_txn(
152 txn,
153 table="state_groups_state",
154 values=[
155 {
156 "state_group": context.state_group,
157 "room_id": event.room_id,
158 "type": key[0],
159 "state_key": key[1],
160 "event_id": state_id,
161 }
162 for key, state_id in context.delta_ids.items()
163 ],
164 )
165 else:
166 self._simple_insert_many_txn(
167 txn,
168 table="state_groups_state",
169 values=[
170 {
171 "state_group": context.state_group,
172 "room_id": event.room_id,
173 "type": key[0],
174 "state_key": key[1],
175 "event_id": state_id,
176 }
177 for key, state_id in context.current_state_ids.items()
178 ],
179 )
134180
135181 self._simple_insert_many_txn(
136182 txn,
144190 ],
145191 )
146192
193 def _count_state_group_hops_txn(self, txn, state_group):
194 """Given a state group, count how many hops there are in the tree.
195
196 This is used to ensure the delta chains don't get too long.
197 """
198 if isinstance(self.database_engine, PostgresEngine):
199 sql = ("""
200 WITH RECURSIVE state(state_group) AS (
201 VALUES(?::bigint)
202 UNION ALL
203 SELECT prev_state_group FROM state_group_edges e, state s
204 WHERE s.state_group = e.state_group
205 )
206 SELECT count(*) FROM state;
207 """)
208
209 txn.execute(sql, (state_group,))
210 row = txn.fetchone()
211 if row and row[0]:
212 return row[0]
213 else:
214 return 0
215 else:
216 # We don't use WITH RECURSIVE on sqlite3 as there are distributions
217 # that ship with an sqlite3 version that doesn't support it (e.g. wheezy)
218 next_group = state_group
219 count = 0
220
221 while next_group:
222 next_group = self._simple_select_one_onecol_txn(
223 txn,
224 table="state_group_edges",
225 keyvalues={"state_group": next_group},
226 retcol="prev_state_group",
227 allow_none=True,
228 )
229 if next_group:
230 count += 1
231
232 return count
233
147234 @defer.inlineCallbacks
148235 def get_current_state(self, room_id, event_type=None, state_key=""):
149236 if event_type and state_key is not None:
205292 def _get_state_groups_from_groups(self, groups, types):
206293 """Returns dictionary state_group -> (dict of (type, state_key) -> event id)
207294 """
208 def f(txn, groups):
209 if types is not None:
210 where_clause = "AND (%s)" % (
211 " OR ".join(["(type = ? AND state_key = ?)"] * len(types)),
212 )
213 else:
214 where_clause = ""
215
216 sql = (
217 "SELECT state_group, event_id, type, state_key"
218 " FROM state_groups_state WHERE"
219 " state_group IN (%s) %s" % (
220 ",".join("?" for _ in groups),
221 where_clause,
222 )
223 )
224
225 args = list(groups)
226 if types is not None:
227 args.extend([i for typ in types for i in typ])
228
229 txn.execute(sql, args)
230 rows = self.cursor_to_dict(txn)
231
232 results = {group: {} for group in groups}
233 for row in rows:
234 key = (row["type"], row["state_key"])
235 results[row["state_group"]][key] = row["event_id"]
236 return results
237
238295 results = {}
239296
240297 chunks = [groups[i:i + 100] for i in xrange(0, len(groups), 100)]
241298 for chunk in chunks:
242299 res = yield self.runInteraction(
243300 "_get_state_groups_from_groups",
244 f, chunk
301 self._get_state_groups_from_groups_txn, chunk, types,
245302 )
246303 results.update(res)
247304
248305 defer.returnValue(results)
306
307 def _get_state_groups_from_groups_txn(self, txn, groups, types=None):
308 results = {group: {} for group in groups}
309 if isinstance(self.database_engine, PostgresEngine):
310 # Temporarily disable sequential scans in this transaction. This is
311 # a temporary hack until we can add the right indices in
312 txn.execute("SET LOCAL enable_seqscan=off")
313
314 # The below query walks the state_group tree so that the "state"
315 # table includes all state_groups in the tree. It then joins
316 # against `state_groups_state` to fetch the latest state.
317 # It assumes that previous state groups are always numerically
318 # lesser.
319 # The PARTITION is used to get the event_id in the greatest state
320 # group for the given type, state_key.
321 # This may return multiple rows per (type, state_key), but last_value
322 # should be the same.
323 sql = ("""
324 WITH RECURSIVE state(state_group) AS (
325 VALUES(?::bigint)
326 UNION ALL
327 SELECT prev_state_group FROM state_group_edges e, state s
328 WHERE s.state_group = e.state_group
329 )
330 SELECT type, state_key, last_value(event_id) OVER (
331 PARTITION BY type, state_key ORDER BY state_group ASC
332 ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING
333 ) AS event_id FROM state_groups_state
334 WHERE state_group IN (
335 SELECT state_group FROM state
336 )
337 %s
338 """)
339
340 # Turns out that postgres doesn't like doing a list of OR's and
341 # is about 1000x slower, so we just issue a query for each specific
342 # type seperately.
343 if types:
344 clause_to_args = [
345 (
346 "AND type = ? AND state_key = ?",
347 (etype, state_key)
348 )
349 for etype, state_key in types
350 ]
351 else:
352 # If types is None we fetch all the state, and so just use an
353 # empty where clause with no extra args.
354 clause_to_args = [("", [])]
355
356 for where_clause, where_args in clause_to_args:
357 for group in groups:
358 args = [group]
359 args.extend(where_args)
360
361 txn.execute(sql % (where_clause,), args)
362 rows = self.cursor_to_dict(txn)
363 for row in rows:
364 key = (row["type"], row["state_key"])
365 results[group][key] = row["event_id"]
366 else:
367 if types is not None:
368 where_clause = "AND (%s)" % (
369 " OR ".join(["(type = ? AND state_key = ?)"] * len(types)),
370 )
371 else:
372 where_clause = ""
373
374 # We don't use WITH RECURSIVE on sqlite3 as there are distributions
375 # that ship with an sqlite3 version that doesn't support it (e.g. wheezy)
376 for group in groups:
377 group_tree = [group]
378 next_group = group
379
380 while next_group:
381 next_group = self._simple_select_one_onecol_txn(
382 txn,
383 table="state_group_edges",
384 keyvalues={"state_group": next_group},
385 retcol="prev_state_group",
386 allow_none=True,
387 )
388 if next_group:
389 group_tree.append(next_group)
390
391 sql = ("""
392 SELECT type, state_key, event_id FROM state_groups_state
393 INNER JOIN (
394 SELECT type, state_key, max(state_group) as state_group
395 FROM state_groups_state
396 WHERE state_group IN (%s) %s
397 GROUP BY type, state_key
398 ) USING (type, state_key, state_group);
399 """) % (",".join("?" for _ in group_tree), where_clause,)
400
401 args = list(group_tree)
402 if types is not None:
403 args.extend([i for typ in types for i in typ])
404
405 txn.execute(sql, args)
406 rows = self.cursor_to_dict(txn)
407 for row in rows:
408 key = (row["type"], row["state_key"])
409 results[group][key] = row["event_id"]
410
411 return results
249412
250413 @defer.inlineCallbacks
251414 def get_state_for_events(self, event_ids, types):
503666
504667 defer.returnValue(results)
505668
506 def get_all_new_state_groups(self, last_id, current_id, limit):
507 def get_all_new_state_groups_txn(txn):
508 sql = (
509 "SELECT id, room_id, event_id FROM state_groups"
510 " WHERE ? < id AND id <= ? ORDER BY id LIMIT ?"
511 )
512 txn.execute(sql, (last_id, current_id, limit))
513 groups = txn.fetchall()
514
515 if not groups:
516 return ([], [])
517
518 lower_bound = groups[0][0]
519 upper_bound = groups[-1][0]
520 sql = (
521 "SELECT state_group, type, state_key, event_id"
522 " FROM state_groups_state"
523 " WHERE ? <= state_group AND state_group <= ?"
524 )
525
526 txn.execute(sql, (lower_bound, upper_bound))
527 state_group_state = txn.fetchall()
528 return (groups, state_group_state)
529 return self.runInteraction(
530 "get_all_new_state_groups", get_all_new_state_groups_txn
531 )
532
533669 def get_next_state_group(self):
534670 return self._state_groups_id_gen.get_next()
671
672 @defer.inlineCallbacks
673 def _background_deduplicate_state(self, progress, batch_size):
674 """This background update will slowly deduplicate state by reencoding
675 them as deltas.
676 """
677 last_state_group = progress.get("last_state_group", 0)
678 rows_inserted = progress.get("rows_inserted", 0)
679 max_group = progress.get("max_group", None)
680
681 BATCH_SIZE_SCALE_FACTOR = 100
682
683 batch_size = max(1, int(batch_size / BATCH_SIZE_SCALE_FACTOR))
684
685 if max_group is None:
686 rows = yield self._execute(
687 "_background_deduplicate_state", None,
688 "SELECT coalesce(max(id), 0) FROM state_groups",
689 )
690 max_group = rows[0][0]
691
692 def reindex_txn(txn):
693 new_last_state_group = last_state_group
694 for count in xrange(batch_size):
695 txn.execute(
696 "SELECT id, room_id FROM state_groups"
697 " WHERE ? < id AND id <= ?"
698 " ORDER BY id ASC"
699 " LIMIT 1",
700 (new_last_state_group, max_group,)
701 )
702 row = txn.fetchone()
703 if row:
704 state_group, room_id = row
705
706 if not row or not state_group:
707 return True, count
708
709 txn.execute(
710 "SELECT state_group FROM state_group_edges"
711 " WHERE state_group = ?",
712 (state_group,)
713 )
714
715 # If we reach a point where we've already started inserting
716 # edges we should stop.
717 if txn.fetchall():
718 return True, count
719
720 txn.execute(
721 "SELECT coalesce(max(id), 0) FROM state_groups"
722 " WHERE id < ? AND room_id = ?",
723 (state_group, room_id,)
724 )
725 prev_group, = txn.fetchone()
726 new_last_state_group = state_group
727
728 if prev_group:
729 potential_hops = self._count_state_group_hops_txn(
730 txn, prev_group
731 )
732 if potential_hops >= MAX_STATE_DELTA_HOPS:
733 # We want to ensure chains are at most this long,#
734 # otherwise read performance degrades.
735 continue
736
737 prev_state = self._get_state_groups_from_groups_txn(
738 txn, [prev_group], types=None
739 )
740 prev_state = prev_state[prev_group]
741
742 curr_state = self._get_state_groups_from_groups_txn(
743 txn, [state_group], types=None
744 )
745 curr_state = curr_state[state_group]
746
747 if not set(prev_state.keys()) - set(curr_state.keys()):
748 # We can only do a delta if the current has a strict super set
749 # of keys
750
751 delta_state = {
752 key: value for key, value in curr_state.items()
753 if prev_state.get(key, None) != value
754 }
755
756 self._simple_delete_txn(
757 txn,
758 table="state_group_edges",
759 keyvalues={
760 "state_group": state_group,
761 }
762 )
763
764 self._simple_insert_txn(
765 txn,
766 table="state_group_edges",
767 values={
768 "state_group": state_group,
769 "prev_state_group": prev_group,
770 }
771 )
772
773 self._simple_delete_txn(
774 txn,
775 table="state_groups_state",
776 keyvalues={
777 "state_group": state_group,
778 }
779 )
780
781 self._simple_insert_many_txn(
782 txn,
783 table="state_groups_state",
784 values=[
785 {
786 "state_group": state_group,
787 "room_id": room_id,
788 "type": key[0],
789 "state_key": key[1],
790 "event_id": state_id,
791 }
792 for key, state_id in delta_state.items()
793 ],
794 )
795
796 progress = {
797 "last_state_group": state_group,
798 "rows_inserted": rows_inserted + batch_size,
799 "max_group": max_group,
800 }
801
802 self._background_update_progress_txn(
803 txn, self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME, progress
804 )
805
806 return False, batch_size
807
808 finished, result = yield self.runInteraction(
809 self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME, reindex_txn
810 )
811
812 if finished:
813 yield self._end_background_update(self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME)
814
815 defer.returnValue(result * BATCH_SIZE_SCALE_FACTOR)
816
817 @defer.inlineCallbacks
818 def _background_index_state(self, progress, batch_size):
819 def reindex_txn(conn):
820 conn.rollback()
821 if isinstance(self.database_engine, PostgresEngine):
822 # postgres insists on autocommit for the index
823 conn.set_session(autocommit=True)
824 try:
825 txn = conn.cursor()
826 txn.execute(
827 "CREATE INDEX CONCURRENTLY state_groups_state_type_idx"
828 " ON state_groups_state(state_group, type, state_key)"
829 )
830 txn.execute(
831 "DROP INDEX IF EXISTS state_groups_state_id"
832 )
833 finally:
834 conn.set_session(autocommit=False)
835 else:
836 txn = conn.cursor()
837 txn.execute(
838 "CREATE INDEX state_groups_state_type_idx"
839 " ON state_groups_state(state_group, type, state_key)"
840 )
841 txn.execute(
842 "DROP INDEX IF EXISTS state_groups_state_id"
843 )
844
845 yield self.runWithConnection(reindex_txn)
846
847 yield self._end_background_update(self.STATE_GROUP_INDEX_UPDATE_NAME)
848
849 defer.returnValue(1)
530530 )
531531 defer.returnValue("t%d-%d" % (topo, token))
532532
533 def get_room_max_stream_ordering(self):
534 return self._stream_id_gen.get_current_token()
535
533536 def get_stream_token_for_event(self, event_id):
534537 """The stream token for an event
535538 Args:
120120 k, r = self._cache.popitem()
121121 self._earliest_known_stream_pos = max(k, self._earliest_known_stream_pos)
122122 self._entity_to_key.pop(r, None)
123
124 def get_max_pos_of_last_change(self, entity):
125 """Returns an upper bound of the stream id of the last change to an
126 entity.
127 """
128 return self._entity_to_key.get(entity, self._earliest_known_stream_pos)
120120
121121 self.auth.check_joined_room = check_joined_room
122122
123 self.datastore.get_to_device_stream_token = lambda: 0
124 self.datastore.get_new_device_msgs_for_remote = (
125 lambda *args, **kargs: ([], 0)
126 )
127 self.datastore.delete_device_msgs_for_remote = (
128 lambda *args, **kargs: None
129 )
130
123131 # Some local users to test with
124132 self.u_apple = UserID.from_string("@apple:test")
125133 self.u_banana = UserID.from_string("@banana:test")