Imported Upstream version 0.11.0-r2
Erik Johnston
8 years ago
0 | Changes in synapse v0.11.0-r2 (2015-11-19) | |
1 | ========================================== | |
2 | ||
3 | * Fix bug in database port script (PR #387) | |
4 | ||
5 | Changes in synapse v0.11.0-r1 (2015-11-18) | |
6 | ========================================== | |
7 | ||
8 | * Retry and fail federation requests more aggressively for requests that block | |
9 | client side requests (PR #384) | |
10 | ||
0 | 11 | Changes in synapse v0.11.0 (2015-11-17) |
1 | 12 | ======================================= |
2 | 13 |
132 | 132 | Alternatively, Silvio Fricke has contributed a Dockerfile to automate the |
133 | 133 | above in Docker at https://registry.hub.docker.com/u/silviof/docker-matrix/. |
134 | 134 | |
135 | Another alternative is to install via apt from http://matrix.org/packages/debian/. | |
136 | Note that these packages do not include a client - choose one from | |
137 | https://matrix.org/blog/try-matrix-now/ (or build your own with | |
138 | https://github.com/matrix-org/matrix-js-sdk/). | |
139 | ||
135 | 140 | To set up your homeserver, run (in your virtualenv, as before):: |
136 | 141 | |
137 | 142 | cd ~/.synapse |
67 | 67 | "state_groups_state", |
68 | 68 | "event_to_state_groups", |
69 | 69 | "rejections", |
70 | "event_search", | |
70 | 71 | ] |
71 | 72 | |
72 | 73 | |
228 | 229 | if rows: |
229 | 230 | next_chunk = rows[-1][0] + 1 |
230 | 231 | |
231 | self._convert_rows(table, headers, rows) | |
232 | ||
233 | def insert(txn): | |
234 | self.postgres_store.insert_many_txn( | |
235 | txn, table, headers[1:], rows | |
236 | ) | |
237 | ||
238 | self.postgres_store._simple_update_one_txn( | |
239 | txn, | |
240 | table="port_from_sqlite3", | |
241 | keyvalues={"table_name": table}, | |
242 | updatevalues={"rowid": next_chunk}, | |
243 | ) | |
232 | if table == "event_search": | |
233 | # We have to treat event_search differently since it has a | |
234 | # different structure in the two different databases. | |
235 | def insert(txn): | |
236 | sql = ( | |
237 | "INSERT INTO event_search (event_id, room_id, key, sender, vector)" | |
238 | " VALUES (?,?,?,?,to_tsvector('english', ?))" | |
239 | ) | |
240 | ||
241 | rows_dict = [ | |
242 | dict(zip(headers, row)) | |
243 | for row in rows | |
244 | ] | |
245 | ||
246 | txn.executemany(sql, [ | |
247 | ( | |
248 | row["event_id"], | |
249 | row["room_id"], | |
250 | row["key"], | |
251 | row["sender"], | |
252 | row["value"], | |
253 | ) | |
254 | for row in rows_dict | |
255 | ]) | |
256 | ||
257 | self.postgres_store._simple_update_one_txn( | |
258 | txn, | |
259 | table="port_from_sqlite3", | |
260 | keyvalues={"table_name": table}, | |
261 | updatevalues={"rowid": next_chunk}, | |
262 | ) | |
263 | else: | |
264 | self._convert_rows(table, headers, rows) | |
265 | ||
266 | def insert(txn): | |
267 | self.postgres_store.insert_many_txn( | |
268 | txn, table, headers[1:], rows | |
269 | ) | |
270 | ||
271 | self.postgres_store._simple_update_one_txn( | |
272 | txn, | |
273 | table="port_from_sqlite3", | |
274 | keyvalues={"table_name": table}, | |
275 | updatevalues={"rowid": next_chunk}, | |
276 | ) | |
244 | 277 | |
245 | 278 | yield self.postgres_store.execute(insert) |
246 | 279 |
15 | 15 | """ This is a reference implementation of a Matrix home server. |
16 | 16 | """ |
17 | 17 | |
18 | __version__ = "0.11.0" | |
18 | __version__ = "0.11.0-r2" |
24 | 24 | pass |
25 | 25 | |
26 | 26 | |
27 | # We split these messages out to allow packages to override with package | |
28 | # specific instructions. | |
29 | MISSING_REPORT_STATS_CONFIG_INSTRUCTIONS = """\ | |
30 | Please opt in or out of reporting anonymized homeserver usage statistics, by | |
31 | setting the `report_stats` key in your config file to either True or False. | |
32 | """ | |
33 | ||
34 | MISSING_REPORT_STATS_SPIEL = """\ | |
35 | We would really appreciate it if you could help our project out by reporting | |
36 | anonymized usage statistics from your homeserver. Only very basic aggregate | |
37 | data (e.g. number of users) will be reported, but it helps us to track the | |
38 | growth of the Matrix community, and helps us to make Matrix a success, as well | |
39 | as to convince other networks that they should peer with us. | |
40 | ||
41 | Thank you. | |
42 | """ | |
43 | ||
44 | MISSING_SERVER_NAME = """\ | |
45 | Missing mandatory `server_name` config option. | |
46 | """ | |
47 | ||
48 | ||
27 | 49 | class Config(object): |
28 | ||
29 | stats_reporting_begging_spiel = ( | |
30 | "We would really appreciate it if you could help our project out by" | |
31 | " reporting anonymized usage statistics from your homeserver. Only very" | |
32 | " basic aggregate data (e.g. number of users) will be reported, but it" | |
33 | " helps us to track the growth of the Matrix community, and helps us to" | |
34 | " make Matrix a success, as well as to convince other networks that they" | |
35 | " should peer with us." | |
36 | "\nThank you." | |
37 | ) | |
38 | ||
39 | 50 | @staticmethod |
40 | 51 | def parse_size(value): |
41 | 52 | if isinstance(value, int) or isinstance(value, long): |
214 | 225 | if config_args.report_stats is None: |
215 | 226 | config_parser.error( |
216 | 227 | "Please specify either --report-stats=yes or --report-stats=no\n\n" + |
217 | cls.stats_reporting_begging_spiel | |
228 | MISSING_REPORT_STATS_SPIEL | |
218 | 229 | ) |
219 | 230 | if not config_files: |
220 | 231 | config_parser.error( |
289 | 300 | yaml_config = cls.read_config_file(config_file) |
290 | 301 | specified_config.update(yaml_config) |
291 | 302 | |
303 | if "server_name" not in specified_config: | |
304 | sys.stderr.write("\n" + MISSING_SERVER_NAME + "\n") | |
305 | sys.exit(1) | |
306 | ||
292 | 307 | server_name = specified_config["server_name"] |
293 | 308 | _, config = obj.generate_config( |
294 | 309 | config_dir_path=config_dir_path, |
298 | 313 | config.update(specified_config) |
299 | 314 | if "report_stats" not in config: |
300 | 315 | sys.stderr.write( |
301 | "Please opt in or out of reporting anonymized homeserver usage " | |
302 | "statistics, by setting the report_stats key in your config file " | |
303 | " ( " + config_path + " ) " + | |
304 | "to either True or False.\n\n" + | |
305 | Config.stats_reporting_begging_spiel + "\n") | |
316 | "\n" + MISSING_REPORT_STATS_CONFIG_INSTRUCTIONS + "\n" + | |
317 | MISSING_REPORT_STATS_SPIEL + "\n") | |
306 | 318 | sys.exit(1) |
307 | 319 | |
308 | 320 | if generate_keys: |
135 | 135 | path=PREFIX + "/send/%s/" % transaction.transaction_id, |
136 | 136 | data=json_data, |
137 | 137 | json_data_callback=json_data_callback, |
138 | long_retries=True, | |
138 | 139 | ) |
139 | 140 | |
140 | 141 | logger.debug( |
55 | 55 | ) |
56 | 56 | |
57 | 57 | |
58 | MAX_RETRIES = 10 | |
58 | MAX_LONG_RETRIES = 10 | |
59 | MAX_SHORT_RETRIES = 3 | |
59 | 60 | |
60 | 61 | |
61 | 62 | class MatrixFederationEndpointFactory(object): |
102 | 103 | def _create_request(self, destination, method, path_bytes, |
103 | 104 | body_callback, headers_dict={}, param_bytes=b"", |
104 | 105 | query_bytes=b"", retry_on_dns_fail=True, |
105 | timeout=None): | |
106 | timeout=None, long_retries=False): | |
106 | 107 | """ Creates and sends a request to the given url |
107 | 108 | """ |
108 | 109 | headers_dict[b"User-Agent"] = [self.version_string] |
122 | 123 | |
123 | 124 | # XXX: Would be much nicer to retry only at the transaction-layer |
124 | 125 | # (once we have reliable transactions in place) |
125 | retries_left = MAX_RETRIES | |
126 | if long_retries: | |
127 | retries_left = MAX_LONG_RETRIES | |
128 | else: | |
129 | retries_left = MAX_SHORT_RETRIES | |
126 | 130 | |
127 | 131 | http_url_bytes = urlparse.urlunparse( |
128 | 132 | ("", "", path_bytes, param_bytes, query_bytes, "") |
183 | 187 | ) |
184 | 188 | |
185 | 189 | if retries_left and not timeout: |
186 | delay = 4 ** (MAX_RETRIES + 1 - retries_left) | |
187 | delay = max(delay, 60) | |
188 | delay *= random.uniform(0.8, 1.4) | |
190 | if long_retries: | |
191 | delay = 4 ** (MAX_LONG_RETRIES + 1 - retries_left) | |
192 | delay = max(delay, 60) | |
193 | delay *= random.uniform(0.8, 1.4) | |
194 | else: | |
195 | delay = 0.5 * 2 ** (MAX_SHORT_RETRIES - retries_left) | |
196 | delay = max(delay, 2) | |
197 | delay *= random.uniform(0.8, 1.4) | |
198 | ||
189 | 199 | yield sleep(delay) |
190 | 200 | retries_left -= 1 |
191 | 201 | else: |
236 | 246 | headers_dict[b"Authorization"] = auth_headers |
237 | 247 | |
238 | 248 | @defer.inlineCallbacks |
239 | def put_json(self, destination, path, data={}, json_data_callback=None): | |
249 | def put_json(self, destination, path, data={}, json_data_callback=None, | |
250 | long_retries=False): | |
240 | 251 | """ Sends the specifed json data using PUT |
241 | 252 | |
242 | 253 | Args: |
247 | 258 | the request body. This will be encoded as JSON. |
248 | 259 | json_data_callback (callable): A callable returning the dict to |
249 | 260 | use as the request body. |
261 | long_retries (bool): A boolean that indicates whether we should | |
262 | retry for a short or long time. | |
250 | 263 | |
251 | 264 | Returns: |
252 | 265 | Deferred: Succeeds when we get a 2xx HTTP response. The result |
272 | 285 | path.encode("ascii"), |
273 | 286 | body_callback=body_callback, |
274 | 287 | headers_dict={"Content-Type": ["application/json"]}, |
288 | long_retries=long_retries, | |
275 | 289 | ) |
276 | 290 | |
277 | 291 | if 200 <= response.code < 300: |
490 | 504 | def stopProducing(self): |
491 | 505 | pass |
492 | 506 | |
507 | def resumeProducing(self): | |
508 | pass | |
509 | ||
493 | 510 | |
494 | 511 | def _flatten_response_never_received(e): |
495 | 512 | if hasattr(e, "reasons"): |
196 | 196 | 'pdu_failures': [], |
197 | 197 | }, |
198 | 198 | json_data_callback=ANY, |
199 | long_retries=True, | |
199 | 200 | ) |
200 | 201 | |
201 | 202 | @defer.inlineCallbacks |
227 | 228 | 'pdu_failures': [], |
228 | 229 | }, |
229 | 230 | json_data_callback=ANY, |
231 | long_retries=True, | |
230 | 232 | ) |
231 | 233 | |
232 | 234 | @defer.inlineCallbacks |
408 | 408 | } |
409 | 409 | ), |
410 | 410 | json_data_callback=ANY, |
411 | long_retries=True, | |
411 | 412 | ), |
412 | 413 | defer.succeed((200, "OK")) |
413 | 414 | ) |
442 | 443 | } |
443 | 444 | ), |
444 | 445 | json_data_callback=ANY, |
446 | long_retries=True, | |
445 | 447 | ), |
446 | 448 | defer.succeed((200, "OK")) |
447 | 449 | ) |
482 | 484 | } |
483 | 485 | ), |
484 | 486 | json_data_callback=ANY, |
487 | long_retries=True, | |
485 | 488 | ), |
486 | 489 | defer.succeed((200, "OK")) |
487 | 490 | ) |
826 | 829 | } |
827 | 830 | ), |
828 | 831 | json_data_callback=ANY, |
832 | long_retries=True, | |
829 | 833 | ), |
830 | 834 | defer.succeed((200, "OK")) |
831 | 835 | ) |
842 | 846 | } |
843 | 847 | ), |
844 | 848 | json_data_callback=ANY, |
849 | long_retries=True, | |
845 | 850 | ), |
846 | 851 | defer.succeed((200, "OK")) |
847 | 852 | ) |
1032 | 1037 | } |
1033 | 1038 | ), |
1034 | 1039 | json_data_callback=ANY, |
1040 | long_retries=True, | |
1035 | 1041 | ), |
1036 | 1042 | defer.succeed((200, "OK")) |
1037 | 1043 | ) |
1047 | 1053 | } |
1048 | 1054 | ), |
1049 | 1055 | json_data_callback=ANY, |
1056 | long_retries=True, | |
1050 | 1057 | ), |
1051 | 1058 | defer.succeed((200, "OK")) |
1052 | 1059 | ) |
1077 | 1084 | } |
1078 | 1085 | ), |
1079 | 1086 | json_data_callback=ANY, |
1087 | long_retries=True, | |
1080 | 1088 | ), |
1081 | 1089 | defer.succeed((200, "OK")) |
1082 | 1090 | ) |
1183 | 1191 | }, |
1184 | 1192 | ), |
1185 | 1193 | json_data_callback=ANY, |
1194 | long_retries=True, | |
1186 | 1195 | ), |
1187 | 1196 | defer.succeed((200, "OK")) |
1188 | 1197 | ) |
1199 | 1208 | }, |
1200 | 1209 | ), |
1201 | 1210 | json_data_callback=ANY, |
1211 | long_retries=True, | |
1202 | 1212 | ), |
1203 | 1213 | defer.succeed((200, "OK")) |
1204 | 1214 | ) |
1231 | 1241 | }, |
1232 | 1242 | ), |
1233 | 1243 | json_data_callback=ANY, |
1244 | long_retries=True, | |
1234 | 1245 | ), |
1235 | 1246 | defer.succeed((200, "OK")) |
1236 | 1247 | ) |
1264 | 1275 | }, |
1265 | 1276 | ), |
1266 | 1277 | json_data_callback=ANY, |
1278 | long_retries=True, | |
1267 | 1279 | ), |
1268 | 1280 | defer.succeed((200, "OK")) |
1269 | 1281 | ) |
1296 | 1308 | }, |
1297 | 1309 | ), |
1298 | 1310 | json_data_callback=ANY, |
1311 | long_retries=True, | |
1299 | 1312 | ), |
1300 | 1313 | defer.succeed((200, "OK")) |
1301 | 1314 | ) |