Update upstream source from tag 'upstream/1.3.1'
Update to upstream version '1.3.1'
with Debian dir 27afbabe1a4623f247f1200c34af49c52653326d
Piotr Ożarowski
4 years ago
0 | 0 | Changes |
1 | 1 | ------- |
2 | ||
3 | .. towncrier release notes start | |
4 | ||
5 | 1.3.1 (2019-12-02) | |
6 | ^^^^^^^^^^^^^^^^^^ | |
7 | Bugfixes | |
8 | ~~~~~~~~ | |
9 | ||
10 | - Fix transaction data decoding | |
11 | (see `#657 <https://github.com/aio-libs/aioredis/issues/657>`_); | |
12 | - Fix duplicate calls to ``pool.wait_closed()`` upon ``create_pool()`` exception. | |
13 | (see `#671 <https://github.com/aio-libs/aioredis/issues/671>`_); | |
14 | ||
15 | Deprecations and Removals | |
16 | ~~~~~~~~~~~~~~~~~~~~~~~~~ | |
17 | ||
18 | - Drop explicit loop requirement in API. | |
19 | Deprecate ``loop`` argument. | |
20 | Throw warning in Python 3.8+ if explicit ``loop`` is passed to methods. | |
21 | (see `#666 <https://github.com/aio-libs/aioredis/issues/666>`_); | |
22 | ||
23 | Misc | |
24 | ~~~~ | |
25 | ||
26 | - `#643 <https://github.com/aio-libs/aioredis/issues/643>`_, | |
27 | `#646 <https://github.com/aio-libs/aioredis/issues/646>`_, | |
28 | `#648 <https://github.com/aio-libs/aioredis/issues/648>`_; | |
29 | ||
30 | ||
31 | 1.3.0 (2019-09-24) | |
32 | ^^^^^^^^^^^^^^^^^^ | |
33 | Features | |
34 | ~~~~~~~~ | |
35 | ||
36 | - Added ``xdel`` and ``xtrim`` method which missed in ``commands/streams.py`` & also added unit test code for them | |
37 | (see `#438 <https://github.com/aio-libs/aioredis/issues/438>`_); | |
38 | - Add ``count`` argument to ``spop`` command | |
39 | (see `#485 <https://github.com/aio-libs/aioredis/issues/485>`_); | |
40 | - Add support for ``zpopmax`` and ``zpopmin`` redis commands | |
41 | (see `#550 <https://github.com/aio-libs/aioredis/issues/550>`_); | |
42 | - Add ``towncrier``: change notes are now stored in ``CHANGES.txt`` | |
43 | (see `#576 <https://github.com/aio-libs/aioredis/issues/576>`_); | |
44 | - Type hints for the library | |
45 | (see `#584 <https://github.com/aio-libs/aioredis/issues/584>`_); | |
46 | - A few additions to the sorted set commands: | |
47 | ||
48 | - the blocking pop commands: ``BZPOPMAX`` and ``BZPOPMIN`` | |
49 | ||
50 | - the ``CH`` and ``INCR`` options of the ``ZADD`` command | |
51 | ||
52 | (see `#618 <https://github.com/aio-libs/aioredis/issues/618>`_); | |
53 | - Added ``no_ack`` parameter to ``xread_group`` streams method in ``commands/streams.py`` | |
54 | (see `#625 <https://github.com/aio-libs/aioredis/issues/625>`_); | |
55 | ||
56 | Bugfixes | |
57 | ~~~~~~~~ | |
58 | ||
59 | - Fix for sensitive logging | |
60 | (see `#459 <https://github.com/aio-libs/aioredis/issues/459>`_); | |
61 | - Fix slow memory leak in ``wait_closed`` implementation | |
62 | (see `#498 <https://github.com/aio-libs/aioredis/issues/498>`_); | |
63 | - Fix handling of instances were Redis returns null fields for a stream message | |
64 | (see `#605 <https://github.com/aio-libs/aioredis/issues/605>`_); | |
65 | ||
66 | Improved Documentation | |
67 | ~~~~~~~~~~~~~~~~~~~~~~ | |
68 | ||
69 | - Rewrite "Getting started" documentation. | |
70 | (see `#641 <https://github.com/aio-libs/aioredis/issues/641>`_); | |
71 | ||
72 | Misc | |
73 | ~~~~ | |
74 | ||
75 | - `#585 <https://github.com/aio-libs/aioredis/issues/585>`_, | |
76 | `#611 <https://github.com/aio-libs/aioredis/issues/611>`_, | |
77 | `#612 <https://github.com/aio-libs/aioredis/issues/612>`_, | |
78 | `#619 <https://github.com/aio-libs/aioredis/issues/619>`_, | |
79 | `#620 <https://github.com/aio-libs/aioredis/issues/620>`_, | |
80 | `#642 <https://github.com/aio-libs/aioredis/issues/642>`_; | |
81 | ||
2 | 82 | |
3 | 83 | 1.2.0 (2018-10-24) |
4 | 84 | ^^^^^^^^^^^^^^^^^^ |
348 | 428 | * Fixed cancellation of wait_closed |
349 | 429 | (see `#118 <https://github.com/aio-libs/aioredis/issues/118>`_); |
350 | 430 | |
351 | * Fixed ``time()`` convertion to float | |
431 | * Fixed ``time()`` conversion to float | |
352 | 432 | (see `#126 <https://github.com/aio-libs/aioredis/issues/126>`_); |
353 | 433 | |
354 | 434 | * Fixed ``hmset()`` method to return bool instead of ``b'OK'`` |
7 | 7 | Alexander Shorin |
8 | 8 | Aliaksei Urbanski |
9 | 9 | Andrew Svetlov |
10 | Anton Salii | |
10 | 11 | Anton Verinov |
12 | Artem Mazur | |
11 | 13 | <cynecx> |
12 | 14 | David Francos |
13 | 15 | Dima Kruk |
15 | 17 | Hugo <hugovk> |
16 | 18 | Ihor Gorobets |
17 | 19 | Ihor Liubymov |
20 | Ilya Samartsev | |
18 | 21 | James Hilliard |
19 | 22 | Jan Špaček |
20 | 23 | Jeff Moser |
24 | 27 | Marek Szapiel |
25 | 28 | Marijn Giesen |
26 | 29 | Martin <the-panda> |
30 | Maxim Dodonchuk | |
27 | 31 | Michael Käufl |
28 | 32 | Nickolai Novik |
33 | Oleg Butuzov | |
34 | Oleksandr Tykhonruk | |
29 | 35 | Pau Freixes |
30 | 36 | Paul Colomiets |
31 | 37 | Samuel Colvin |
32 | 38 | Samuel Dion-Girardeau |
39 | Sergey Miletskiy | |
33 | 40 | SeungHyun Hwang |
34 | 41 | Taku Fukada |
35 | 42 | Taras Voinarovskyi |
36 | 43 | Thanos Lefteris |
37 | 44 | Thomas Steinacher |
38 | 45 | Volodymyr Hotsyk |
46 | Youngmin Koo <youngminz> | |
47 | Dima Kit | |
48 | <curiouscod3> | |
49 | Dmitry Vasilishin <dmvass> |
0 | 0 | Metadata-Version: 1.1 |
1 | 1 | Name: aioredis |
2 | Version: 1.2.0 | |
2 | Version: 1.3.1 | |
3 | 3 | Summary: asyncio (PEP 3156) Redis support |
4 | 4 | Home-page: https://github.com/aio-libs/aioredis |
5 | 5 | Author: Alexey Popravka |
34 | 34 | Sentinel support Yes |
35 | 35 | Redis Cluster support WIP |
36 | 36 | Trollius (python 2.7) No |
37 | Tested CPython versions `3.5, 3.6 3.7 <travis_>`_ [2]_ | |
38 | Tested PyPy3 versions `5.9.0 <travis_>`_ | |
39 | Tested for Redis server `2.6, 2.8, 3.0, 3.2, 4.0 <travis_>`_ | |
37 | Tested CPython versions `3.5.3, 3.6, 3.7 <travis_>`_ [1]_ | |
38 | Tested PyPy3 versions `pypy3.5-7.0 pypy3.6-7.1.1 <travis_>`_ | |
39 | Tested for Redis server `2.6, 2.8, 3.0, 3.2, 4.0 5.0 <travis_>`_ | |
40 | 40 | Support for dev Redis server through low-level API |
41 | 41 | ================================ ============================== |
42 | 42 | |
43 | ||
44 | .. [2] For Python 3.3, 3.4 support use aioredis v0.3. | |
43 | .. [1] For Python 3.3, 3.4 support use aioredis v0.3. | |
45 | 44 | |
46 | 45 | Documentation |
47 | 46 | ------------- |
48 | 47 | |
49 | 48 | http://aioredis.readthedocs.io/ |
50 | 49 | |
51 | Usage examples | |
52 | -------------- | |
53 | ||
54 | Simple low-level interface: | |
50 | Usage example | |
51 | ------------- | |
52 | ||
53 | Simple high-level interface with connections pool: | |
55 | 54 | |
56 | 55 | .. code:: python |
57 | 56 | |
58 | 57 | import asyncio |
59 | 58 | import aioredis |
60 | 59 | |
61 | loop = asyncio.get_event_loop() | |
62 | ||
63 | 60 | async def go(): |
64 | conn = await aioredis.create_connection( | |
65 | 'redis://localhost', loop=loop) | |
66 | await conn.execute('set', 'my-key', 'value') | |
67 | val = await conn.execute('get', 'my-key') | |
68 | print(val) | |
69 | conn.close() | |
70 | await conn.wait_closed() | |
71 | loop.run_until_complete(go()) | |
72 | # will print 'value' | |
73 | ||
74 | Simple high-level interface: | |
75 | ||
76 | .. code:: python | |
77 | ||
78 | import asyncio | |
79 | import aioredis | |
80 | ||
81 | loop = asyncio.get_event_loop() | |
82 | ||
83 | async def go(): | |
84 | redis = await aioredis.create_redis( | |
85 | 'redis://localhost', loop=loop) | |
61 | redis = await aioredis.create_redis_pool( | |
62 | 'redis://localhost') | |
86 | 63 | await redis.set('my-key', 'value') |
87 | val = await redis.get('my-key') | |
64 | val = await redis.get('my-key', encoding='utf-8') | |
88 | 65 | print(val) |
89 | 66 | redis.close() |
90 | 67 | await redis.wait_closed() |
91 | loop.run_until_complete(go()) | |
92 | # will print 'value' | |
93 | ||
94 | Connections pool: | |
95 | ||
96 | .. code:: python | |
97 | ||
98 | import asyncio | |
99 | import aioredis | |
100 | ||
101 | loop = asyncio.get_event_loop() | |
102 | ||
103 | async def go(): | |
104 | pool = await aioredis.create_pool( | |
105 | 'redis://localhost', | |
106 | minsize=5, maxsize=10, | |
107 | loop=loop) | |
108 | await pool.execute('set', 'my-key', 'value') | |
109 | print(await pool.execute('get', 'my-key')) | |
110 | # graceful shutdown | |
111 | pool.close() | |
112 | await pool.wait_closed() | |
113 | ||
114 | loop.run_until_complete(go()) | |
115 | ||
116 | Simple high-level interface with connections pool: | |
117 | ||
118 | .. code:: python | |
119 | ||
120 | import asyncio | |
121 | import aioredis | |
122 | ||
123 | loop = asyncio.get_event_loop() | |
124 | ||
125 | async def go(): | |
126 | redis = await aioredis.create_redis_pool( | |
127 | 'redis://localhost', | |
128 | minsize=5, maxsize=10, | |
129 | loop=loop) | |
130 | await redis.set('my-key', 'value') | |
131 | val = await redis.get('my-key') | |
132 | print(val) | |
133 | redis.close() | |
134 | await redis.wait_closed() | |
135 | loop.run_until_complete(go()) | |
68 | ||
69 | asyncio.run(go()) | |
136 | 70 | # will print 'value' |
137 | 71 | |
138 | 72 | Requirements |
170 | 104 | |
171 | 105 | Changes |
172 | 106 | ------- |
107 | ||
108 | .. towncrier release notes start | |
109 | ||
110 | 1.3.1 (2019-12-02) | |
111 | ^^^^^^^^^^^^^^^^^^ | |
112 | Bugfixes | |
113 | ~~~~~~~~ | |
114 | ||
115 | - Fix transaction data decoding | |
116 | (see `#657 <https://github.com/aio-libs/aioredis/issues/657>`_); | |
117 | - Fix duplicate calls to ``pool.wait_closed()`` upon ``create_pool()`` exception. | |
118 | (see `#671 <https://github.com/aio-libs/aioredis/issues/671>`_); | |
119 | ||
120 | Deprecations and Removals | |
121 | ~~~~~~~~~~~~~~~~~~~~~~~~~ | |
122 | ||
123 | - Drop explicit loop requirement in API. | |
124 | Deprecate ``loop`` argument. | |
125 | Throw warning in Python 3.8+ if explicit ``loop`` is passed to methods. | |
126 | (see `#666 <https://github.com/aio-libs/aioredis/issues/666>`_); | |
127 | ||
128 | Misc | |
129 | ~~~~ | |
130 | ||
131 | - `#643 <https://github.com/aio-libs/aioredis/issues/643>`_, | |
132 | `#646 <https://github.com/aio-libs/aioredis/issues/646>`_, | |
133 | `#648 <https://github.com/aio-libs/aioredis/issues/648>`_; | |
134 | ||
135 | ||
136 | 1.3.0 (2019-09-24) | |
137 | ^^^^^^^^^^^^^^^^^^ | |
138 | Features | |
139 | ~~~~~~~~ | |
140 | ||
141 | - Added ``xdel`` and ``xtrim`` method which missed in ``commands/streams.py`` & also added unit test code for them | |
142 | (see `#438 <https://github.com/aio-libs/aioredis/issues/438>`_); | |
143 | - Add ``count`` argument to ``spop`` command | |
144 | (see `#485 <https://github.com/aio-libs/aioredis/issues/485>`_); | |
145 | - Add support for ``zpopmax`` and ``zpopmin`` redis commands | |
146 | (see `#550 <https://github.com/aio-libs/aioredis/issues/550>`_); | |
147 | - Add ``towncrier``: change notes are now stored in ``CHANGES.txt`` | |
148 | (see `#576 <https://github.com/aio-libs/aioredis/issues/576>`_); | |
149 | - Type hints for the library | |
150 | (see `#584 <https://github.com/aio-libs/aioredis/issues/584>`_); | |
151 | - A few additions to the sorted set commands: | |
152 | ||
153 | - the blocking pop commands: ``BZPOPMAX`` and ``BZPOPMIN`` | |
154 | ||
155 | - the ``CH`` and ``INCR`` options of the ``ZADD`` command | |
156 | ||
157 | (see `#618 <https://github.com/aio-libs/aioredis/issues/618>`_); | |
158 | - Added ``no_ack`` parameter to ``xread_group`` streams method in ``commands/streams.py`` | |
159 | (see `#625 <https://github.com/aio-libs/aioredis/issues/625>`_); | |
160 | ||
161 | Bugfixes | |
162 | ~~~~~~~~ | |
163 | ||
164 | - Fix for sensitive logging | |
165 | (see `#459 <https://github.com/aio-libs/aioredis/issues/459>`_); | |
166 | - Fix slow memory leak in ``wait_closed`` implementation | |
167 | (see `#498 <https://github.com/aio-libs/aioredis/issues/498>`_); | |
168 | - Fix handling of instances were Redis returns null fields for a stream message | |
169 | (see `#605 <https://github.com/aio-libs/aioredis/issues/605>`_); | |
170 | ||
171 | Improved Documentation | |
172 | ~~~~~~~~~~~~~~~~~~~~~~ | |
173 | ||
174 | - Rewrite "Getting started" documentation. | |
175 | (see `#641 <https://github.com/aio-libs/aioredis/issues/641>`_); | |
176 | ||
177 | Misc | |
178 | ~~~~ | |
179 | ||
180 | - `#585 <https://github.com/aio-libs/aioredis/issues/585>`_, | |
181 | `#611 <https://github.com/aio-libs/aioredis/issues/611>`_, | |
182 | `#612 <https://github.com/aio-libs/aioredis/issues/612>`_, | |
183 | `#619 <https://github.com/aio-libs/aioredis/issues/619>`_, | |
184 | `#620 <https://github.com/aio-libs/aioredis/issues/620>`_, | |
185 | `#642 <https://github.com/aio-libs/aioredis/issues/642>`_; | |
186 | ||
173 | 187 | |
174 | 188 | 1.2.0 (2018-10-24) |
175 | 189 | ^^^^^^^^^^^^^^^^^^ |
519 | 533 | * Fixed cancellation of wait_closed |
520 | 534 | (see `#118 <https://github.com/aio-libs/aioredis/issues/118>`_); |
521 | 535 | |
522 | * Fixed ``time()`` convertion to float | |
536 | * Fixed ``time()`` conversion to float | |
523 | 537 | (see `#126 <https://github.com/aio-libs/aioredis/issues/126>`_); |
524 | 538 | |
525 | 539 | * Fixed ``hmset()`` method to return bool instead of ``b'OK'`` |
26 | 26 | Sentinel support Yes |
27 | 27 | Redis Cluster support WIP |
28 | 28 | Trollius (python 2.7) No |
29 | Tested CPython versions `3.5, 3.6 3.7 <travis_>`_ [2]_ | |
30 | Tested PyPy3 versions `5.9.0 <travis_>`_ | |
31 | Tested for Redis server `2.6, 2.8, 3.0, 3.2, 4.0 <travis_>`_ | |
29 | Tested CPython versions `3.5.3, 3.6, 3.7 <travis_>`_ [1]_ | |
30 | Tested PyPy3 versions `pypy3.5-7.0 pypy3.6-7.1.1 <travis_>`_ | |
31 | Tested for Redis server `2.6, 2.8, 3.0, 3.2, 4.0 5.0 <travis_>`_ | |
32 | 32 | Support for dev Redis server through low-level API |
33 | 33 | ================================ ============================== |
34 | 34 | |
35 | ||
36 | .. [2] For Python 3.3, 3.4 support use aioredis v0.3. | |
35 | .. [1] For Python 3.3, 3.4 support use aioredis v0.3. | |
37 | 36 | |
38 | 37 | Documentation |
39 | 38 | ------------- |
40 | 39 | |
41 | 40 | http://aioredis.readthedocs.io/ |
42 | 41 | |
43 | Usage examples | |
44 | -------------- | |
45 | ||
46 | Simple low-level interface: | |
47 | ||
48 | .. code:: python | |
49 | ||
50 | import asyncio | |
51 | import aioredis | |
52 | ||
53 | loop = asyncio.get_event_loop() | |
54 | ||
55 | async def go(): | |
56 | conn = await aioredis.create_connection( | |
57 | 'redis://localhost', loop=loop) | |
58 | await conn.execute('set', 'my-key', 'value') | |
59 | val = await conn.execute('get', 'my-key') | |
60 | print(val) | |
61 | conn.close() | |
62 | await conn.wait_closed() | |
63 | loop.run_until_complete(go()) | |
64 | # will print 'value' | |
65 | ||
66 | Simple high-level interface: | |
67 | ||
68 | .. code:: python | |
69 | ||
70 | import asyncio | |
71 | import aioredis | |
72 | ||
73 | loop = asyncio.get_event_loop() | |
74 | ||
75 | async def go(): | |
76 | redis = await aioredis.create_redis( | |
77 | 'redis://localhost', loop=loop) | |
78 | await redis.set('my-key', 'value') | |
79 | val = await redis.get('my-key') | |
80 | print(val) | |
81 | redis.close() | |
82 | await redis.wait_closed() | |
83 | loop.run_until_complete(go()) | |
84 | # will print 'value' | |
85 | ||
86 | Connections pool: | |
87 | ||
88 | .. code:: python | |
89 | ||
90 | import asyncio | |
91 | import aioredis | |
92 | ||
93 | loop = asyncio.get_event_loop() | |
94 | ||
95 | async def go(): | |
96 | pool = await aioredis.create_pool( | |
97 | 'redis://localhost', | |
98 | minsize=5, maxsize=10, | |
99 | loop=loop) | |
100 | await pool.execute('set', 'my-key', 'value') | |
101 | print(await pool.execute('get', 'my-key')) | |
102 | # graceful shutdown | |
103 | pool.close() | |
104 | await pool.wait_closed() | |
105 | ||
106 | loop.run_until_complete(go()) | |
42 | Usage example | |
43 | ------------- | |
107 | 44 | |
108 | 45 | Simple high-level interface with connections pool: |
109 | 46 | |
112 | 49 | import asyncio |
113 | 50 | import aioredis |
114 | 51 | |
115 | loop = asyncio.get_event_loop() | |
116 | ||
117 | 52 | async def go(): |
118 | 53 | redis = await aioredis.create_redis_pool( |
119 | 'redis://localhost', | |
120 | minsize=5, maxsize=10, | |
121 | loop=loop) | |
54 | 'redis://localhost') | |
122 | 55 | await redis.set('my-key', 'value') |
123 | val = await redis.get('my-key') | |
56 | val = await redis.get('my-key', encoding='utf-8') | |
124 | 57 | print(val) |
125 | 58 | redis.close() |
126 | 59 | await redis.wait_closed() |
127 | loop.run_until_complete(go()) | |
60 | ||
61 | asyncio.run(go()) | |
128 | 62 | # will print 'value' |
129 | 63 | |
130 | 64 | Requirements |
27 | 27 | ) |
28 | 28 | |
29 | 29 | |
30 | __version__ = '1.2.0' | |
30 | __version__ = '1.3.1' | |
31 | 31 | |
32 | 32 | __all__ = [ |
33 | 33 | # Factories |
2 | 2 | These are intended to be used for implementing custom connection managers. |
3 | 3 | """ |
4 | 4 | import abc |
5 | import asyncio | |
6 | ||
7 | from abc import ABC | |
8 | 5 | |
9 | 6 | |
10 | 7 | __all__ = [ |
14 | 11 | ] |
15 | 12 | |
16 | 13 | |
17 | class AbcConnection(ABC): | |
14 | class AbcConnection(abc.ABC): | |
18 | 15 | """Abstract connection interface.""" |
19 | 16 | |
20 | 17 | @abc.abstractmethod |
29 | 26 | def close(self): |
30 | 27 | """Perform connection(s) close and resources cleanup.""" |
31 | 28 | |
32 | @asyncio.coroutine | |
33 | 29 | @abc.abstractmethod |
34 | def wait_closed(self): | |
30 | async def wait_closed(self): | |
35 | 31 | """ |
36 | 32 | Coroutine waiting until all resources are closed/released/cleaned up. |
37 | 33 | """ |
83 | 79 | """ |
84 | 80 | |
85 | 81 | @abc.abstractmethod |
86 | def get_connection(self): # TODO: arguments | |
82 | def get_connection(self, command, args=()): | |
87 | 83 | """ |
88 | 84 | Gets free connection from pool in a sync way. |
89 | 85 | |
90 | 86 | If no connection available — returns None. |
91 | 87 | """ |
92 | 88 | |
93 | @asyncio.coroutine | |
94 | 89 | @abc.abstractmethod |
95 | def acquire(self): # TODO: arguments | |
90 | async def acquire(self, command=None, args=()): | |
96 | 91 | """Acquires connection from pool.""" |
97 | 92 | |
98 | 93 | @abc.abstractmethod |
99 | def release(self, conn): # TODO: arguments | |
94 | def release(self, conn): | |
100 | 95 | """Releases connection to pool. |
101 | 96 | |
102 | 97 | :param AbcConnection conn: Owned connection to be released. |
108 | 103 | """Connection address or None.""" |
109 | 104 | |
110 | 105 | |
111 | class AbcChannel(ABC): | |
106 | class AbcChannel(abc.ABC): | |
112 | 107 | """Abstract Pub/Sub Channel interface.""" |
113 | 108 | |
114 | 109 | @property |
127 | 122 | """Flag indicating that channel has unreceived messages |
128 | 123 | and not marked as closed.""" |
129 | 124 | |
130 | @asyncio.coroutine | |
131 | 125 | @abc.abstractmethod |
132 | def get(self): | |
126 | async def get(self): | |
133 | 127 | """Wait and return new message. |
134 | 128 | |
135 | 129 | Will raise ``ChannelClosedError`` if channel is not active. |
118 | 118 | return self.execute('QUIT') |
119 | 119 | |
120 | 120 | def select(self, db): |
121 | """Change the selected database for the current connection. | |
122 | ||
123 | This method wraps call to :meth:`aioredis.RedisConnection.select()` | |
124 | """ | |
121 | """Change the selected database.""" | |
125 | 122 | return self._pool_or_conn.select(db) |
126 | 123 | |
127 | 124 | def swapdb(self, from_index, to_index): |
142 | 142 | """Returns the kind of internal representation used in order |
143 | 143 | to store the value associated with a key (OBJECT ENCODING). |
144 | 144 | """ |
145 | # TODO: set default encoding to 'utf-8' | |
146 | return self.execute(b'OBJECT', b'ENCODING', key) | |
145 | return self.execute(b'OBJECT', b'ENCODING', key, encoding='utf-8') | |
147 | 146 | |
148 | 147 | def object_idletime(self, key): |
149 | 148 | """Returns the number of seconds since the object is not requested |
0 | 0 | from collections import namedtuple |
1 | 1 | |
2 | 2 | from aioredis.util import wait_ok, wait_convert, wait_make_dict, _NOTSET |
3 | from aioredis.log import logger | |
4 | 3 | |
5 | 4 | |
6 | 5 | class ServerCommandsMixin: |
205 | 204 | else: |
206 | 205 | return self.execute(b'SHUTDOWN') |
207 | 206 | |
208 | def slaveof(self, host=_NOTSET, port=None): | |
207 | def slaveof(self, host, port=None): | |
209 | 208 | """Make the server a slave of another instance, |
210 | 209 | or promote it as master. |
211 | 210 | |
215 | 214 | ``slaveof()`` form deprecated |
216 | 215 | in favour of explicit ``slaveof(None)``. |
217 | 216 | """ |
218 | if host is _NOTSET: | |
219 | logger.warning("slaveof() form is deprecated!" | |
220 | " Use slaveof(None) to turn redis into a MASTER.") | |
221 | host = None | |
222 | # TODO: drop in 0.3.0 | |
223 | 217 | if host is None and port is None: |
224 | 218 | return self.execute(b'SLAVEOF', b'NO', b'ONE') |
225 | 219 | return self.execute(b'SLAVEOF', host, port) |
42 | 42 | """Move a member from one set to another.""" |
43 | 43 | return self.execute(b'SMOVE', sourcekey, destkey, member) |
44 | 44 | |
45 | def spop(self, key, *, encoding=_NOTSET): | |
46 | """Remove and return a random member from a set.""" | |
47 | return self.execute(b'SPOP', key, encoding=encoding) | |
45 | def spop(self, key, count=None, *, encoding=_NOTSET): | |
46 | """Remove and return one or multiple random members from a set.""" | |
47 | args = [key] | |
48 | if count is not None: | |
49 | args.append(count) | |
50 | return self.execute(b'SPOP', *args, encoding=encoding) | |
48 | 51 | |
49 | 52 | def srandmember(self, key, count=None, *, encoding=_NOTSET): |
50 | 53 | """Get one or multiple random members from a set.""" |
17 | 17 | ZSET_IF_NOT_EXIST = 'ZSET_IF_NOT_EXIST' # NX |
18 | 18 | ZSET_IF_EXIST = 'ZSET_IF_EXIST' # XX |
19 | 19 | |
20 | def zadd(self, key, score, member, *pairs, exist=None): | |
20 | def bzpopmax(self, key, *keys, timeout=0, encoding=_NOTSET): | |
21 | """Remove and get an element with the highest score in the sorted set, | |
22 | or block until one is available. | |
23 | ||
24 | :raises TypeError: if timeout is not int | |
25 | :raises ValueError: if timeout is less than 0 | |
26 | """ | |
27 | if not isinstance(timeout, int): | |
28 | raise TypeError("timeout argument must be int") | |
29 | if timeout < 0: | |
30 | raise ValueError("timeout must be greater equal 0") | |
31 | args = keys + (timeout,) | |
32 | return self.execute(b'BZPOPMAX', key, *args, encoding=encoding) | |
33 | ||
34 | def bzpopmin(self, key, *keys, timeout=0, encoding=_NOTSET): | |
35 | """Remove and get an element with the lowest score in the sorted set, | |
36 | or block until one is available. | |
37 | ||
38 | :raises TypeError: if timeout is not int | |
39 | :raises ValueError: if timeout is less than 0 | |
40 | """ | |
41 | if not isinstance(timeout, int): | |
42 | raise TypeError("timeout argument must be int") | |
43 | if timeout < 0: | |
44 | raise ValueError("timeout must be greater equal 0") | |
45 | args = keys + (timeout,) | |
46 | return self.execute(b'BZPOPMIN', key, *args, encoding=encoding) | |
47 | ||
48 | def zadd(self, key, score, member, *pairs, exist=None, changed=False, | |
49 | incr=False): | |
21 | 50 | """Add one or more members to a sorted set or update its score. |
22 | 51 | |
23 | 52 | :raises TypeError: score not int or float |
37 | 66 | args.append(b'XX') |
38 | 67 | elif exist is self.ZSET_IF_NOT_EXIST: |
39 | 68 | args.append(b'NX') |
69 | ||
70 | if changed: | |
71 | args.append(b'CH') | |
72 | ||
73 | if incr: | |
74 | if pairs: | |
75 | raise ValueError('only one score-element pair ' | |
76 | 'can be specified in this mode') | |
77 | args.append(b'INCR') | |
40 | 78 | |
41 | 79 | args.extend([score, member]) |
42 | 80 | if pairs: |
423 | 461 | match=match, |
424 | 462 | count=count)) |
425 | 463 | |
464 | def zpopmin(self, key, count=None, *, encoding=_NOTSET): | |
465 | """Removes and returns up to count members with the lowest scores | |
466 | in the sorted set stored at key. | |
467 | ||
468 | :raises TypeError: if count is not int | |
469 | """ | |
470 | if count is not None and not isinstance(count, int): | |
471 | raise TypeError("count argument must be int") | |
472 | ||
473 | args = [] | |
474 | if count is not None: | |
475 | args.extend([count]) | |
476 | ||
477 | fut = self.execute(b'ZPOPMIN', key, *args, encoding=encoding) | |
478 | return fut | |
479 | ||
480 | def zpopmax(self, key, count=None, *, encoding=_NOTSET): | |
481 | """Removes and returns up to count members with the highest scores | |
482 | in the sorted set stored at key. | |
483 | ||
484 | :raises TypeError: if count is not int | |
485 | """ | |
486 | if count is not None and not isinstance(count, int): | |
487 | raise TypeError("count argument must be int") | |
488 | ||
489 | args = [] | |
490 | if count is not None: | |
491 | args.extend([count]) | |
492 | ||
493 | fut = self.execute(b'ZPOPMAX', key, *args, encoding=encoding) | |
494 | return fut | |
495 | ||
426 | 496 | |
427 | 497 | def _encode_min_max(flag, min, max): |
428 | 498 | if flag is SortedSetCommandsMixin.ZSET_EXCLUDE_MIN: |
32 | 32 | """ |
33 | 33 | if messages is None: |
34 | 34 | return [] |
35 | return [(mid, fields_to_dict(values)) for mid, values in messages] | |
35 | ||
36 | messages = (message for message in messages if message is not None) | |
37 | return [ | |
38 | (mid, fields_to_dict(values)) | |
39 | for mid, values | |
40 | in messages if values is not None | |
41 | ] | |
36 | 42 | |
37 | 43 | |
38 | 44 | def parse_messages_by_stream(messages_by_stream): |
78 | 84 | class StreamCommandsMixin: |
79 | 85 | """Stream commands mixin |
80 | 86 | |
81 | Streams are under development in Redis and | |
82 | not currently released. | |
87 | Streams are available in Redis since v5.0 | |
83 | 88 | """ |
84 | 89 | |
85 | 90 | def xadd(self, stream, fields, message_id=b'*', max_len=None, |
127 | 132 | return wait_convert(fut, parse_messages_by_stream) |
128 | 133 | |
129 | 134 | def xread_group(self, group_name, consumer_name, streams, timeout=0, |
130 | count=None, latest_ids=None): | |
135 | count=None, latest_ids=None, no_ack=False): | |
131 | 136 | """Perform a blocking read on the given stream as part of a consumer group |
132 | 137 | |
133 | 138 | :raises ValueError: if the length of streams and latest_ids do |
134 | 139 | not match |
135 | 140 | """ |
136 | args = self._xread(streams, timeout, count, latest_ids) | |
141 | args = self._xread( | |
142 | streams, timeout, count, latest_ids, no_ack | |
143 | ) | |
137 | 144 | fut = self.execute( |
138 | 145 | b'XREADGROUP', b'GROUP', group_name, consumer_name, *args |
139 | 146 | ) |
140 | 147 | return wait_convert(fut, parse_messages_by_stream) |
141 | 148 | |
142 | def xgroup_create(self, stream, group_name, latest_id='$'): | |
149 | def xgroup_create(self, stream, group_name, latest_id='$', mkstream=False): | |
143 | 150 | """Create a consumer group""" |
144 | fut = self.execute(b'XGROUP', b'CREATE', stream, group_name, latest_id) | |
151 | args = [b'CREATE', stream, group_name, latest_id] | |
152 | if mkstream: | |
153 | args.append(b'MKSTREAM') | |
154 | fut = self.execute(b'XGROUP', *args) | |
145 | 155 | return wait_ok(fut) |
146 | 156 | |
147 | 157 | def xgroup_setid(self, stream, group_name, latest_id='$'): |
200 | 210 | """Acknowledge a message for a given consumer group""" |
201 | 211 | return self.execute(b'XACK', stream, group_name, id, *ids) |
202 | 212 | |
213 | def xdel(self, stream, id): | |
214 | """Removes the specified entries(IDs) from a stream""" | |
215 | return self.execute(b'XDEL', stream, id) | |
216 | ||
217 | def xtrim(self, stream, max_len, exact_len=False): | |
218 | """trims the stream to a given number of items, evicting older items""" | |
219 | args = [] | |
220 | if exact_len: | |
221 | args.extend((b'MAXLEN', max_len)) | |
222 | else: | |
223 | args.extend((b'MAXLEN', b'~', max_len)) | |
224 | return self.execute(b'XTRIM', stream, *args) | |
225 | ||
226 | def xlen(self, stream): | |
227 | """Returns the number of entries inside a stream""" | |
228 | return self.execute(b'XLEN', stream) | |
229 | ||
203 | 230 | def xinfo(self, stream): |
204 | 231 | """Retrieve information about the given stream. |
205 | 232 | |
228 | 255 | fut = self.execute(b'XINFO', b'HELP') |
229 | 256 | return wait_convert(fut, lambda l: b'\n'.join(l)) |
230 | 257 | |
231 | def _xread(self, streams, timeout=0, count=None, latest_ids=None): | |
258 | def _xread(self, streams, timeout=0, count=None, latest_ids=None, | |
259 | no_ack=False): | |
232 | 260 | """Wraps up common functionality between ``xread()`` |
233 | 261 | and ``xread_group()`` |
234 | 262 | |
245 | 273 | count_args = [b'COUNT', count] if count else [] |
246 | 274 | if timeout is None: |
247 | 275 | block_args = [] |
276 | elif not isinstance(timeout, int): | |
277 | raise TypeError( | |
278 | "timeout argument must be int, not {!r}".format(timeout)) | |
248 | 279 | else: |
249 | 280 | block_args = [b'BLOCK', timeout] |
250 | return block_args + count_args + [b'STREAMS'] + streams + latest_ids | |
281 | ||
282 | noack_args = [b'NOACK'] if no_ack else [] | |
283 | ||
284 | return count_args + block_args + noack_args + [b'STREAMS'] + streams \ | |
285 | + latest_ids |
0 | from itertools import chain | |
1 | ||
0 | 2 | from aioredis.util import wait_convert, wait_ok, _NOTSET |
1 | 3 | |
2 | 4 | |
135 | 137 | """Get the values of all the given keys.""" |
136 | 138 | return self.execute(b'MGET', key, *keys, encoding=encoding) |
137 | 139 | |
138 | def mset(self, key, value, *pairs): | |
139 | """Set multiple keys to multiple values. | |
140 | ||
141 | :raises TypeError: if len of pairs is not event number | |
142 | """ | |
143 | if len(pairs) % 2 != 0: | |
140 | def mset(self, *args): | |
141 | """Set multiple keys to multiple values or unpack dict to keys & values. | |
142 | ||
143 | :raises TypeError: if len of args is not event number | |
144 | :raises TypeError: if len of args equals 1 and it is not a dict | |
145 | """ | |
146 | data = args | |
147 | if len(args) == 1: | |
148 | if not isinstance(args[0], dict): | |
149 | raise TypeError("if one arg it should be a dict") | |
150 | data = chain.from_iterable(args[0].items()) | |
151 | elif len(args) % 2 != 0: | |
144 | 152 | raise TypeError("length of pairs must be even number") |
145 | fut = self.execute(b'MSET', key, value, *pairs) | |
153 | fut = self.execute(b'MSET', *data) | |
146 | 154 | return wait_ok(fut) |
147 | 155 | |
148 | 156 | def msetnx(self, key, value, *pairs): |
10 | 10 | from ..util import ( |
11 | 11 | wait_ok, |
12 | 12 | _set_exception, |
13 | get_event_loop, | |
13 | 14 | ) |
14 | 15 | |
15 | 16 | |
62 | 63 | >>> await asyncio.gather(fut1, fut2) |
63 | 64 | [1, 1] |
64 | 65 | """ |
65 | return MultiExec(self._pool_or_conn, self.__class__, | |
66 | loop=self._pool_or_conn._loop) | |
66 | return MultiExec(self._pool_or_conn, self.__class__) | |
67 | 67 | |
68 | 68 | def pipeline(self): |
69 | 69 | """Returns :class:`Pipeline` object to execute bulk of commands. |
89 | 89 | >>> await asyncio.gather(fut1, fut2) |
90 | 90 | [2, 2] |
91 | 91 | """ |
92 | return Pipeline(self._pool_or_conn, self.__class__, | |
93 | loop=self._pool_or_conn._loop) | |
92 | return Pipeline(self._pool_or_conn, self.__class__) | |
94 | 93 | |
95 | 94 | |
96 | 95 | class _RedisBuffer: |
97 | 96 | |
98 | 97 | def __init__(self, pipeline, *, loop=None): |
99 | if loop is None: | |
100 | loop = asyncio.get_event_loop() | |
98 | # TODO: deprecation note | |
99 | # if loop is None: | |
100 | # loop = asyncio.get_event_loop() | |
101 | 101 | self._pipeline = pipeline |
102 | self._loop = loop | |
103 | 102 | |
104 | 103 | def execute(self, cmd, *args, **kw): |
105 | fut = self._loop.create_future() | |
104 | fut = get_event_loop().create_future() | |
106 | 105 | self._pipeline.append((fut, cmd, args, kw)) |
107 | 106 | return fut |
108 | 107 | |
128 | 127 | |
129 | 128 | def __init__(self, pool_or_connection, commands_factory=lambda conn: conn, |
130 | 129 | *, loop=None): |
131 | if loop is None: | |
132 | loop = asyncio.get_event_loop() | |
130 | # TODO: deprecation note | |
131 | # if loop is None: | |
132 | # loop = asyncio.get_event_loop() | |
133 | 133 | self._pool_or_conn = pool_or_connection |
134 | self._loop = loop | |
135 | 134 | self._pipeline = [] |
136 | 135 | self._results = [] |
137 | self._buffer = _RedisBuffer(self._pipeline, loop=loop) | |
136 | self._buffer = _RedisBuffer(self._pipeline) | |
138 | 137 | self._redis = commands_factory(self._buffer) |
139 | 138 | self._done = False |
140 | 139 | |
146 | 145 | @functools.wraps(attr) |
147 | 146 | def wrapper(*args, **kw): |
148 | 147 | try: |
149 | task = asyncio.ensure_future(attr(*args, **kw), | |
150 | loop=self._loop) | |
148 | task = asyncio.ensure_future(attr(*args, **kw)) | |
151 | 149 | except Exception as exc: |
152 | task = self._loop.create_future() | |
150 | task = get_event_loop().create_future() | |
153 | 151 | task.set_exception(exc) |
154 | 152 | self._results.append(task) |
155 | 153 | return task |
182 | 180 | |
183 | 181 | async def _do_execute(self, conn, *, return_exceptions=False): |
184 | 182 | await asyncio.gather(*self._send_pipeline(conn), |
185 | loop=self._loop, | |
186 | 183 | return_exceptions=True) |
187 | 184 | return await self._gather_result(return_exceptions) |
188 | 185 | |
264 | 261 | multi = conn.execute('MULTI') |
265 | 262 | coros = list(self._send_pipeline(conn)) |
266 | 263 | exec_ = conn.execute('EXEC') |
267 | gather = asyncio.gather(multi, *coros, loop=self._loop, | |
264 | gather = asyncio.gather(multi, *coros, | |
268 | 265 | return_exceptions=True) |
269 | 266 | last_error = None |
270 | 267 | try: |
271 | await asyncio.shield(gather, loop=self._loop) | |
268 | await asyncio.shield(gather) | |
272 | 269 | except asyncio.CancelledError: |
273 | 270 | await gather |
274 | 271 | except Exception as err: |
0 | 0 | import types |
1 | 1 | import asyncio |
2 | 2 | import socket |
3 | import warnings | |
4 | import sys | |
5 | ||
3 | 6 | from functools import partial |
4 | 7 | from collections import deque |
5 | 8 | from contextlib import contextmanager |
13 | 16 | coerced_keys_dict, |
14 | 17 | decode, |
15 | 18 | parse_url, |
19 | get_event_loop, | |
16 | 20 | ) |
17 | 21 | from .parser import Reader |
18 | 22 | from .stream import open_connection, open_unix_connection |
75 | 79 | """ |
76 | 80 | assert isinstance(address, (tuple, list, str)), "tuple or str expected" |
77 | 81 | if isinstance(address, str): |
78 | logger.debug("Parsing Redis URI %r", address) | |
79 | 82 | address, options = parse_url(address) |
83 | logger.debug("Parsed Redis URI %r", address) | |
80 | 84 | db = options.setdefault('db', db) |
81 | 85 | password = options.setdefault('password', password) |
82 | 86 | encoding = options.setdefault('encoding', encoding) |
96 | 100 | else: |
97 | 101 | cls = RedisConnection |
98 | 102 | |
99 | if loop is None: | |
100 | loop = asyncio.get_event_loop() | |
103 | if loop is not None and sys.version_info >= (3, 8, 0): | |
104 | warnings.warn("The loop argument is deprecated", | |
105 | DeprecationWarning) | |
101 | 106 | |
102 | 107 | if isinstance(address, (list, tuple)): |
103 | 108 | host, port = address |
104 | 109 | logger.debug("Creating tcp connection to %r", address) |
105 | 110 | reader, writer = await asyncio.wait_for(open_connection( |
106 | host, port, limit=MAX_CHUNK_SIZE, ssl=ssl, loop=loop), | |
107 | timeout, loop=loop) | |
111 | host, port, limit=MAX_CHUNK_SIZE, ssl=ssl), | |
112 | timeout) | |
108 | 113 | sock = writer.transport.get_extra_info('socket') |
109 | 114 | if sock is not None: |
110 | 115 | sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) |
113 | 118 | else: |
114 | 119 | logger.debug("Creating unix connection to %r", address) |
115 | 120 | reader, writer = await asyncio.wait_for(open_unix_connection( |
116 | address, ssl=ssl, limit=MAX_CHUNK_SIZE, loop=loop), | |
117 | timeout, loop=loop) | |
121 | address, ssl=ssl, limit=MAX_CHUNK_SIZE), | |
122 | timeout) | |
118 | 123 | sock = writer.transport.get_extra_info('socket') |
119 | 124 | if sock is not None: |
120 | 125 | address = sock.getpeername() |
121 | 126 | |
122 | 127 | conn = cls(reader, writer, encoding=encoding, |
123 | address=address, parser=parser, | |
124 | loop=loop) | |
128 | address=address, parser=parser) | |
125 | 129 | |
126 | 130 | try: |
127 | 131 | if password is not None: |
140 | 144 | |
141 | 145 | def __init__(self, reader, writer, *, address, encoding=None, |
142 | 146 | parser=None, loop=None): |
143 | if loop is None: | |
144 | loop = asyncio.get_event_loop() | |
147 | if loop is not None and sys.version_info >= (3, 8): | |
148 | warnings.warn("The loop argument is deprecated", | |
149 | DeprecationWarning) | |
145 | 150 | if parser is None: |
146 | 151 | parser = Reader |
147 | 152 | assert callable(parser), ( |
149 | 154 | self._reader = reader |
150 | 155 | self._writer = writer |
151 | 156 | self._address = address |
152 | self._loop = loop | |
153 | 157 | self._waiters = deque() |
154 | 158 | self._reader.set_parser( |
155 | 159 | parser(protocolError=ProtocolError, replyError=ReplyError) |
156 | 160 | ) |
157 | self._reader_task = asyncio.ensure_future(self._read_data(), | |
158 | loop=self._loop) | |
161 | self._reader_task = asyncio.ensure_future(self._read_data()) | |
159 | 162 | self._close_msg = None |
160 | 163 | self._db = 0 |
161 | 164 | self._closing = False |
162 | 165 | self._closed = False |
163 | self._close_waiter = loop.create_future() | |
164 | self._reader_task.add_done_callback(self._close_waiter.set_result) | |
166 | self._close_state = asyncio.Event() | |
167 | self._reader_task.add_done_callback(lambda x: self._close_state.set()) | |
165 | 168 | self._in_transaction = None |
166 | 169 | self._transaction_error = None # XXX: never used? |
167 | 170 | self._in_pubsub = 0 |
211 | 214 | else: |
212 | 215 | self._process_data(obj) |
213 | 216 | self._closing = True |
214 | self._loop.call_soon(self._do_close, last_error) | |
217 | get_event_loop().call_soon(self._do_close, last_error) | |
215 | 218 | |
216 | 219 | def _process_data(self, obj): |
217 | 220 | """Processes command results.""" |
335 | 338 | cb = self._start_transaction |
336 | 339 | elif command in ('EXEC', b'EXEC'): |
337 | 340 | cb = partial(self._end_transaction, discard=False) |
341 | encoding = None | |
338 | 342 | elif command in ('DISCARD', b'DISCARD'): |
339 | 343 | cb = partial(self._end_transaction, discard=True) |
340 | 344 | else: |
341 | 345 | cb = None |
342 | 346 | if encoding is _NOTSET: |
343 | 347 | encoding = self._encoding |
344 | fut = self._loop.create_future() | |
348 | fut = get_event_loop().create_future() | |
345 | 349 | if self._pipeline_buffer is None: |
346 | 350 | self._writer.write(encode_command(command, *args)) |
347 | 351 | else: |
365 | 369 | if not len(channels): |
366 | 370 | raise TypeError("No channels/patterns supplied") |
367 | 371 | is_pattern = len(command) in (10, 12) |
368 | mkchannel = partial(Channel, is_pattern=is_pattern, loop=self._loop) | |
372 | mkchannel = partial(Channel, is_pattern=is_pattern) | |
369 | 373 | channels = [ch if isinstance(ch, AbcChannel) else mkchannel(ch) |
370 | 374 | for ch in channels] |
371 | 375 | if not all(ch.is_pattern == is_pattern for ch in channels): |
374 | 378 | cmd = encode_command(command, *(ch.name for ch in channels)) |
375 | 379 | res = [] |
376 | 380 | for ch in channels: |
377 | fut = self._loop.create_future() | |
381 | fut = get_event_loop().create_future() | |
378 | 382 | res.append(fut) |
379 | 383 | cb = partial(self._update_pubsub, ch=ch) |
380 | 384 | self._waiters.append((fut, None, cb)) |
382 | 386 | self._writer.write(cmd) |
383 | 387 | else: |
384 | 388 | self._pipeline_buffer.extend(cmd) |
385 | return asyncio.gather(*res, loop=self._loop) | |
389 | return asyncio.gather(*res) | |
386 | 390 | |
387 | 391 | def close(self): |
388 | 392 | """Close connection.""" |
425 | 429 | closed = self._closing or self._closed |
426 | 430 | if not closed and self._reader and self._reader.at_eof(): |
427 | 431 | self._closing = closed = True |
428 | self._loop.call_soon(self._do_close, None) | |
432 | get_event_loop().call_soon(self._do_close, None) | |
429 | 433 | return closed |
430 | 434 | |
431 | 435 | async def wait_closed(self): |
432 | 436 | """Coroutine waiting until connection is closed.""" |
433 | await asyncio.shield(self._close_waiter, loop=self._loop) | |
437 | await self._close_state.wait() | |
434 | 438 | |
435 | 439 | @property |
436 | 440 | def db(self): |
0 | from typing import Optional, Sequence # noqa | |
1 | ||
0 | 2 | __all__ = [ |
1 | 3 | 'RedisError', |
2 | 4 | 'ProtocolError', |
27 | 29 | class ReplyError(RedisError): |
28 | 30 | """Raised for redis error replies (-ERR).""" |
29 | 31 | |
30 | MATCH_REPLY = None | |
32 | MATCH_REPLY = None # type: Optional[Sequence[str]] | |
31 | 33 | |
32 | 34 | def __new__(cls, msg, *args): |
33 | 35 | for klass in cls.__subclasses__(): |
46 | 48 | class AuthError(ReplyError): |
47 | 49 | """Raised when authentication errors occurs.""" |
48 | 50 | |
49 | MATCH_REPLY = ("NOAUTH ", "ERR invalid password") | |
51 | MATCH_REPLY = ( | |
52 | "NOAUTH ", | |
53 | "ERR invalid password", | |
54 | "ERR Client sent AUTH, but no password is set", | |
55 | ) | |
56 | ||
57 | ||
58 | class BusyGroupError(ReplyError): | |
59 | """Raised if Consumer Group name already exists.""" | |
60 | ||
61 | MATCH_REPLY = "BUSYGROUP Consumer Group name already exists" | |
50 | 62 | |
51 | 63 | |
52 | 64 | class PipelineError(RedisError): |
0 | import asyncio | |
1 | import sys | |
2 | ||
0 | 3 | from asyncio.locks import Lock as _Lock |
1 | from asyncio import coroutine | |
2 | from asyncio import futures | |
3 | 4 | |
4 | 5 | # Fixes an issue with all Python versions that leaves pending waiters |
5 | 6 | # without being awakened when the first waiter is canceled. |
10 | 11 | |
11 | 12 | class Lock(_Lock): |
12 | 13 | |
13 | @coroutine | |
14 | def acquire(self): | |
15 | """Acquire a lock. | |
16 | This method blocks until the lock is unlocked, then sets it to | |
17 | locked and returns True. | |
18 | """ | |
19 | if not self._locked and all(w.cancelled() for w in self._waiters): | |
20 | self._locked = True | |
21 | return True | |
14 | if sys.version_info < (3, 7, 0): | |
15 | async def acquire(self): | |
16 | """Acquire a lock. | |
17 | This method blocks until the lock is unlocked, then sets it to | |
18 | locked and returns True. | |
19 | """ | |
20 | if not self._locked and all(w.cancelled() for w in self._waiters): | |
21 | self._locked = True | |
22 | return True | |
22 | 23 | |
23 | fut = self._loop.create_future() | |
24 | fut = self._loop.create_future() | |
24 | 25 | |
25 | self._waiters.append(fut) | |
26 | try: | |
27 | yield from fut | |
28 | self._locked = True | |
29 | return True | |
30 | except futures.CancelledError: | |
31 | if not self._locked: # pragma: no cover | |
32 | self._wake_up_first() | |
33 | raise | |
34 | finally: | |
35 | self._waiters.remove(fut) | |
26 | self._waiters.append(fut) | |
27 | try: | |
28 | await fut | |
29 | self._locked = True | |
30 | return True | |
31 | except asyncio.CancelledError: | |
32 | if not self._locked: # pragma: no cover | |
33 | self._wake_up_first() | |
34 | raise | |
35 | finally: | |
36 | self._waiters.remove(fut) | |
36 | 37 | |
37 | def _wake_up_first(self): | |
38 | """Wake up the first waiter who isn't cancelled.""" | |
39 | for fut in self._waiters: | |
40 | if not fut.done(): | |
41 | fut.set_result(True) | |
42 | break | |
38 | def _wake_up_first(self): | |
39 | """Wake up the first waiter who isn't cancelled.""" | |
40 | for fut in self._waiters: | |
41 | if not fut.done(): | |
42 | fut.set_result(True) | |
43 | break |
0 | 0 | from .errors import ProtocolError, ReplyError |
1 | from typing import Optional, Generator, Callable, Iterator # noqa | |
1 | 2 | |
2 | 3 | __all__ = [ |
3 | 4 | 'Reader', 'PyReader', |
8 | 9 | """Pure-Python Redis protocol parser that follows hiredis.Reader |
9 | 10 | interface (except setmaxbuf/getmaxbuf). |
10 | 11 | """ |
11 | def __init__(self, protocolError=ProtocolError, replyError=ReplyError, | |
12 | encoding=None): | |
12 | def __init__(self, protocolError: Callable = ProtocolError, | |
13 | replyError: Callable = ReplyError, | |
14 | encoding: Optional[str] = None): | |
13 | 15 | if not callable(protocolError): |
14 | 16 | raise TypeError("Expected a callable") |
15 | 17 | if not callable(replyError): |
16 | 18 | raise TypeError("Expected a callable") |
17 | 19 | self._parser = Parser(protocolError, replyError, encoding) |
18 | 20 | |
19 | def feed(self, data, o=0, l=-1): | |
21 | def feed(self, data, o: int = 0, l: int = -1): | |
20 | 22 | """Feed data to parser.""" |
21 | 23 | if l == -1: |
22 | 24 | l = len(data) - o |
34 | 36 | """ |
35 | 37 | return self._parser.parse_one() |
36 | 38 | |
37 | def setmaxbuf(self, size): | |
39 | def setmaxbuf(self, size: Optional[int]) -> None: | |
38 | 40 | """No-op.""" |
39 | 41 | pass |
40 | 42 | |
41 | def getmaxbuf(self): | |
43 | def getmaxbuf(self) -> int: | |
42 | 44 | """No-op.""" |
43 | 45 | return 0 |
44 | 46 | |
45 | 47 | |
46 | 48 | class Parser: |
47 | def __init__(self, protocolError, replyError, encoding): | |
48 | self.buf = bytearray() | |
49 | self.pos = 0 | |
50 | self.protocolError = protocolError | |
51 | self.replyError = replyError | |
52 | self.encoding = encoding | |
49 | def __init__(self, protocolError: Callable, | |
50 | replyError: Callable, encoding: Optional[str]): | |
51 | ||
52 | self.buf = bytearray() # type: bytearray | |
53 | self.pos = 0 # type: int | |
54 | self.protocolError = protocolError # type: Callable | |
55 | self.replyError = replyError # type: Callable | |
56 | self.encoding = encoding # type: Optional[str] | |
53 | 57 | self._err = None |
54 | self._gen = None | |
58 | self._gen = None # type: Optional[Generator] | |
55 | 59 | |
56 | def waitsome(self, size): | |
60 | def waitsome(self, size: int) -> Iterator[bool]: | |
57 | 61 | # keep yielding false until at least `size` bytes added to buf. |
58 | 62 | while len(self.buf) < self.pos+size: |
59 | 63 | yield False |
60 | 64 | |
61 | def waitany(self): | |
65 | def waitany(self) -> Iterator[bool]: | |
62 | 66 | yield from self.waitsome(len(self.buf) + 1) |
63 | 67 | |
64 | 68 | def readone(self): |
65 | if not self.buf[self.pos:1]: | |
69 | if not self.buf[self.pos:self.pos + 1]: | |
66 | 70 | yield from self.waitany() |
67 | val = self.buf[self.pos:1] | |
71 | val = self.buf[self.pos:self.pos + 1] | |
68 | 72 | self.pos += 1 |
69 | 73 | return val |
70 | 74 | |
71 | def readline(self, size=None): | |
75 | def readline(self, size: Optional[int] = None): | |
72 | 76 | if size is not None: |
73 | 77 | if len(self.buf) < size + 2 + self.pos: |
74 | 78 | yield from self.waitsome(size + 2) |
95 | 99 | self._err = self.protocolError(msg) |
96 | 100 | return self._err |
97 | 101 | |
98 | def parse(self, is_bulk=False): | |
102 | def parse(self, is_bulk: bool = False): | |
99 | 103 | if self._err is not None: |
100 | 104 | raise self._err |
101 | 105 | ctl = yield from self.readone() |
0 | 0 | import asyncio |
1 | 1 | import collections |
2 | 2 | import types |
3 | import warnings | |
4 | import sys | |
3 | 5 | |
4 | 6 | from .connection import create_connection, _PUBSUB_COMMANDS |
5 | 7 | from .log import logger |
6 | from .util import parse_url | |
8 | from .util import parse_url, CloseEvent | |
7 | 9 | from .errors import PoolClosedError |
8 | 10 | from .abc import AbcPool |
9 | 11 | from .locks import Lock |
53 | 55 | loop=loop) |
54 | 56 | try: |
55 | 57 | await pool._fill_free(override_min=False) |
56 | except Exception as ex: | |
58 | except Exception: | |
57 | 59 | pool.close() |
58 | 60 | await pool.wait_closed() |
59 | 61 | raise |
75 | 77 | "maxsize must be int > 0", maxsize, type(maxsize)) |
76 | 78 | assert minsize <= maxsize, ( |
77 | 79 | "Invalid pool min/max sizes", minsize, maxsize) |
78 | if loop is None: | |
79 | loop = asyncio.get_event_loop() | |
80 | if loop is not None and sys.version_info >= (3, 8): | |
81 | warnings.warn("The loop argument is deprecated", | |
82 | DeprecationWarning) | |
80 | 83 | self._address = address |
81 | 84 | self._db = db |
82 | 85 | self._password = password |
85 | 88 | self._parser_class = parser |
86 | 89 | self._minsize = minsize |
87 | 90 | self._create_connection_timeout = create_connection_timeout |
88 | self._loop = loop | |
89 | 91 | self._pool = collections.deque(maxlen=maxsize) |
90 | 92 | self._used = set() |
91 | 93 | self._acquiring = 0 |
92 | self._cond = asyncio.Condition(lock=Lock(loop=loop), loop=loop) | |
93 | self._close_state = asyncio.Event(loop=loop) | |
94 | self._close_waiter = None | |
94 | self._cond = asyncio.Condition(lock=Lock()) | |
95 | self._close_state = CloseEvent(self._do_close) | |
95 | 96 | self._pubsub_conn = None |
96 | 97 | self._connection_cls = connection_cls |
97 | 98 | |
138 | 139 | conn = self._pool.popleft() |
139 | 140 | conn.close() |
140 | 141 | waiters.append(conn.wait_closed()) |
141 | await asyncio.gather(*waiters, loop=self._loop) | |
142 | await asyncio.gather(*waiters) | |
142 | 143 | |
143 | 144 | async def _do_close(self): |
144 | await self._close_state.wait() | |
145 | 145 | async with self._cond: |
146 | 146 | assert not self._acquiring, self._acquiring |
147 | 147 | waiters = [] |
152 | 152 | for conn in self._used: |
153 | 153 | conn.close() |
154 | 154 | waiters.append(conn.wait_closed()) |
155 | await asyncio.gather(*waiters, loop=self._loop) | |
155 | await asyncio.gather(*waiters) | |
156 | 156 | # TODO: close _pubsub_conn connection |
157 | 157 | logger.debug("Closed %d connection(s)", len(waiters)) |
158 | 158 | |
160 | 160 | """Close all free and in-progress connections and mark pool as closed. |
161 | 161 | """ |
162 | 162 | if not self._close_state.is_set(): |
163 | self._close_waiter = asyncio.ensure_future(self._do_close(), | |
164 | loop=self._loop) | |
165 | 163 | self._close_state.set() |
166 | 164 | |
167 | 165 | @property |
172 | 170 | async def wait_closed(self): |
173 | 171 | """Wait until pool gets closed.""" |
174 | 172 | await self._close_state.wait() |
175 | assert self._close_waiter is not None | |
176 | await asyncio.shield(self._close_waiter, loop=self._loop) | |
177 | 173 | |
178 | 174 | @property |
179 | 175 | def db(self): |
286 | 282 | async with self._cond: |
287 | 283 | for i in range(self.freesize): |
288 | 284 | res = res and (await self._pool[i].select(db)) |
289 | else: | |
290 | self._db = db | |
285 | self._db = db | |
291 | 286 | return res |
292 | 287 | |
293 | 288 | async def auth(self, password): |
367 | 362 | else: |
368 | 363 | conn.close() |
369 | 364 | # FIXME: check event loop is not closed |
370 | asyncio.ensure_future(self._wakeup(), loop=self._loop) | |
365 | asyncio.ensure_future(self._wakeup()) | |
371 | 366 | |
372 | 367 | def _drop_closed(self): |
373 | 368 | for i in range(self.freesize): |
415 | 410 | parser=self._parser_class, |
416 | 411 | timeout=self._create_connection_timeout, |
417 | 412 | connection_cls=self._connection_cls, |
418 | loop=self._loop) | |
413 | ) | |
419 | 414 | |
420 | 415 | async def _wakeup(self, closing_conn=None): |
421 | 416 | async with self._cond: |
1 | 1 | import json |
2 | 2 | import types |
3 | 3 | import collections |
4 | import warnings | |
5 | import sys | |
4 | 6 | |
5 | 7 | from .abc import AbcChannel |
6 | 8 | from .util import _converters # , _set_result |
22 | 24 | """Wrapper around asyncio.Queue.""" |
23 | 25 | |
24 | 26 | def __init__(self, name, is_pattern, loop=None): |
25 | self._queue = ClosableQueue(loop=loop) | |
27 | if loop is not None and sys.version_info >= (3, 8): | |
28 | warnings.warn("The loop argument is deprecated", | |
29 | DeprecationWarning) | |
30 | self._queue = ClosableQueue() | |
26 | 31 | self._name = _converters[type(name)](name) |
27 | 32 | self._is_pattern = is_pattern |
28 | 33 | |
164 | 169 | |
165 | 170 | >>> from aioredis.pubsub import Receiver |
166 | 171 | >>> from aioredis.abc import AbcChannel |
167 | >>> mpsc = Receiver(loop=loop) | |
172 | >>> mpsc = Receiver() | |
168 | 173 | >>> async def reader(mpsc): |
169 | 174 | ... async for channel, msg in mpsc.iter(): |
170 | 175 | ... assert isinstance(channel, AbcChannel) |
187 | 192 | def __init__(self, loop=None, on_close=None): |
188 | 193 | assert on_close is None or callable(on_close), ( |
189 | 194 | "on_close must be None or callable", on_close) |
190 | if loop is None: | |
191 | loop = asyncio.get_event_loop() | |
195 | if loop is not None: | |
196 | warnings.warn("The loop argument is deprecated", | |
197 | DeprecationWarning) | |
192 | 198 | if on_close is None: |
193 | 199 | on_close = self.check_stop |
194 | self._queue = ClosableQueue(loop=loop) | |
200 | self._queue = ClosableQueue() | |
195 | 201 | self._refs = {} |
196 | 202 | self._on_close = on_close |
197 | 203 | |
395 | 401 | |
396 | 402 | class ClosableQueue: |
397 | 403 | |
398 | def __init__(self, *, loop=None): | |
404 | def __init__(self): | |
399 | 405 | self._queue = collections.deque() |
400 | self._event = asyncio.Event(loop=loop) | |
406 | self._event = asyncio.Event() | |
401 | 407 | self._closed = False |
402 | 408 | |
403 | 409 | async def wait(self): |
14 | 14 | MasterReplyError, |
15 | 15 | SlaveReplyError, |
16 | 16 | ) |
17 | from ..util import CloseEvent | |
17 | 18 | |
18 | 19 | |
19 | 20 | # Address marker for discovery |
28 | 29 | """Create SentinelPool.""" |
29 | 30 | # FIXME: revise default timeout value |
30 | 31 | assert isinstance(sentinels, (list, tuple)), sentinels |
31 | if loop is None: | |
32 | loop = asyncio.get_event_loop() | |
32 | # TODO: deprecation note | |
33 | # if loop is None: | |
34 | # loop = asyncio.get_event_loop() | |
33 | 35 | |
34 | 36 | pool = SentinelPool(sentinels, db=db, |
35 | 37 | password=password, |
54 | 56 | def __init__(self, sentinels, *, db=None, password=None, ssl=None, |
55 | 57 | encoding=None, parser=None, minsize, maxsize, timeout, |
56 | 58 | loop=None): |
57 | if loop is None: | |
58 | loop = asyncio.get_event_loop() | |
59 | # TODO: deprecation note | |
60 | # if loop is None: | |
61 | # loop = asyncio.get_event_loop() | |
59 | 62 | # TODO: add connection/discover timeouts; |
60 | 63 | # and what to do if no master is found: |
61 | 64 | # (raise error or try forever or try until timeout) |
62 | 65 | |
63 | 66 | # XXX: _sentinels is unordered |
64 | 67 | self._sentinels = set(sentinels) |
65 | self._loop = loop | |
66 | 68 | self._timeout = timeout |
67 | 69 | self._pools = [] # list of sentinel pools |
68 | 70 | self._masters = {} |
74 | 76 | self._redis_encoding = encoding |
75 | 77 | self._redis_minsize = minsize |
76 | 78 | self._redis_maxsize = maxsize |
77 | self._close_state = asyncio.Event(loop=loop) | |
79 | self._close_state = CloseEvent(self._do_close) | |
78 | 80 | self._close_waiter = None |
79 | self._monitor = monitor = Receiver(loop=loop) | |
81 | self._monitor = monitor = Receiver() | |
80 | 82 | |
81 | 83 | async def echo_events(): |
82 | 84 | try: |
83 | 85 | while await monitor.wait_message(): |
84 | ch, (ev, data) = await monitor.get(encoding='utf-8') | |
86 | _, (ev, data) = await monitor.get(encoding='utf-8') | |
85 | 87 | ev = ev.decode('utf-8') |
86 | 88 | _logger.debug("%s: %s", ev, data) |
87 | 89 | if ev in ('+odown',): |
101 | 103 | # etc... |
102 | 104 | except asyncio.CancelledError: |
103 | 105 | pass |
104 | self._monitor_task = asyncio.ensure_future(echo_events(), loop=loop) | |
106 | self._monitor_task = asyncio.ensure_future(echo_events()) | |
105 | 107 | |
106 | 108 | @property |
107 | 109 | def discover_timeout(self): |
123 | 125 | maxsize=self._redis_maxsize, |
124 | 126 | ssl=self._redis_ssl, |
125 | 127 | parser=self._parser_class, |
126 | loop=self._loop) | |
128 | ) | |
127 | 129 | return self._masters[service] |
128 | 130 | |
129 | 131 | def slave_for(self, service): |
139 | 141 | maxsize=self._redis_maxsize, |
140 | 142 | ssl=self._redis_ssl, |
141 | 143 | parser=self._parser_class, |
142 | loop=self._loop) | |
144 | ) | |
143 | 145 | return self._slaves[service] |
144 | 146 | |
145 | 147 | def execute(self, command, *args, **kwargs): |
161 | 163 | def close(self): |
162 | 164 | """Close all controlled connections (both sentinel and redis).""" |
163 | 165 | if not self._close_state.is_set(): |
164 | self._close_waiter = asyncio.ensure_future(self._do_close(), | |
165 | loop=self._loop) | |
166 | 166 | self._close_state.set() |
167 | 167 | |
168 | 168 | async def _do_close(self): |
169 | await self._close_state.wait() | |
170 | 169 | # TODO: lock |
171 | 170 | tasks = [] |
172 | 171 | task, self._monitor_task = self._monitor_task, None |
184 | 183 | _, pool = self._slaves.popitem() |
185 | 184 | pool.close() |
186 | 185 | tasks.append(pool.wait_closed()) |
187 | await asyncio.gather(*tasks, loop=self._loop) | |
186 | await asyncio.gather(*tasks) | |
188 | 187 | |
189 | 188 | async def wait_closed(self): |
190 | 189 | """Wait until pool gets closed.""" |
191 | 190 | await self._close_state.wait() |
192 | assert self._close_waiter is not None | |
193 | await asyncio.shield(self._close_waiter, loop=self._loop) | |
194 | 191 | |
195 | 192 | async def discover(self, timeout=None): # TODO: better name? |
196 | 193 | """Discover sentinels and all monitored services within given timeout. |
209 | 206 | pools = [] |
210 | 207 | for addr in self._sentinels: # iterate over unordered set |
211 | 208 | tasks.append(self._connect_sentinel(addr, timeout, pools)) |
212 | done, pending = await asyncio.wait(tasks, loop=self._loop, | |
209 | done, pending = await asyncio.wait(tasks, | |
213 | 210 | return_when=ALL_COMPLETED) |
214 | 211 | assert not pending, ("Expected all tasks to complete", done, pending) |
215 | 212 | |
235 | 232 | connections pool or exception. |
236 | 233 | """ |
237 | 234 | try: |
238 | with async_timeout(timeout, loop=self._loop): | |
235 | with async_timeout(timeout): | |
239 | 236 | pool = await create_pool( |
240 | 237 | address, minsize=1, maxsize=2, |
241 | 238 | parser=self._parser_class, |
242 | loop=self._loop) | |
239 | ) | |
243 | 240 | pools.append(pool) |
244 | 241 | return pool |
245 | 242 | except asyncio.TimeoutError as err: |
267 | 264 | pools = self._pools[:] |
268 | 265 | for sentinel in pools: |
269 | 266 | try: |
270 | with async_timeout(timeout, loop=self._loop): | |
267 | with async_timeout(timeout): | |
271 | 268 | address = await self._get_masters_address( |
272 | 269 | sentinel, service) |
273 | 270 | |
274 | 271 | pool = self._masters[service] |
275 | with async_timeout(timeout, loop=self._loop), \ | |
272 | with async_timeout(timeout), \ | |
276 | 273 | contextlib.ExitStack() as stack: |
277 | 274 | conn = await pool._create_new_connection(address) |
278 | 275 | stack.callback(conn.close) |
290 | 287 | except DiscoverError as err: |
291 | 288 | sentinel_logger.debug("DiscoverError(%r, %s): %r", |
292 | 289 | sentinel, service, err) |
293 | await asyncio.sleep(idle_timeout, loop=self._loop) | |
290 | await asyncio.sleep(idle_timeout) | |
294 | 291 | continue |
295 | 292 | except RedisError as err: |
296 | 293 | raise MasterReplyError("Service {} error".format(service), err) |
297 | 294 | except Exception: |
298 | 295 | # TODO: clear (drop) connections to schedule reconnect |
299 | await asyncio.sleep(idle_timeout, loop=self._loop) | |
300 | continue | |
301 | else: | |
302 | raise MasterNotFoundError("No master found for {}".format(service)) | |
296 | await asyncio.sleep(idle_timeout) | |
297 | continue | |
298 | # Otherwise | |
299 | raise MasterNotFoundError("No master found for {}".format(service)) | |
303 | 300 | |
304 | 301 | async def discover_slave(self, service, timeout, **kwargs): |
305 | 302 | """Perform Slave discovery for specified service.""" |
309 | 306 | pools = self._pools[:] |
310 | 307 | for sentinel in pools: |
311 | 308 | try: |
312 | with async_timeout(timeout, loop=self._loop): | |
309 | with async_timeout(timeout): | |
313 | 310 | address = await self._get_slave_address( |
314 | 311 | sentinel, service) # add **kwargs |
315 | 312 | pool = self._slaves[service] |
316 | with async_timeout(timeout, loop=self._loop), \ | |
313 | with async_timeout(timeout), \ | |
317 | 314 | contextlib.ExitStack() as stack: |
318 | 315 | conn = await pool._create_new_connection(address) |
319 | 316 | stack.callback(conn.close) |
325 | 322 | except asyncio.TimeoutError: |
326 | 323 | continue |
327 | 324 | except DiscoverError: |
328 | await asyncio.sleep(idle_timeout, loop=self._loop) | |
325 | await asyncio.sleep(idle_timeout) | |
329 | 326 | continue |
330 | 327 | except RedisError as err: |
331 | 328 | raise SlaveReplyError("Service {} error".format(service), err) |
332 | 329 | except Exception: |
333 | await asyncio.sleep(idle_timeout, loop=self._loop) | |
330 | await asyncio.sleep(idle_timeout) | |
334 | 331 | continue |
335 | 332 | raise SlaveNotFoundError("No slave found for {}".format(service)) |
336 | 333 | |
361 | 358 | if {'s_down', 'o_down', 'disconnected'} & flags: |
362 | 359 | continue |
363 | 360 | return address |
364 | else: | |
365 | raise BadState(state) # XXX: only last state | |
361 | raise BadState() # XXX: only last state | |
366 | 362 | |
367 | 363 | async def _verify_service_role(self, conn, role): |
368 | 364 | res = await conn.execute(b'role', encoding='utf-8') |
0 | 0 | import asyncio |
1 | import warnings | |
2 | import sys | |
3 | ||
4 | from .util import get_event_loop | |
1 | 5 | |
2 | 6 | __all__ = [ |
3 | 7 | 'open_connection', |
10 | 14 | limit, loop=None, |
11 | 15 | parser=None, **kwds): |
12 | 16 | # XXX: parser is not used (yet) |
13 | if loop is None: | |
14 | loop = asyncio.get_event_loop() | |
15 | reader = StreamReader(limit=limit, loop=loop) | |
16 | protocol = asyncio.StreamReaderProtocol(reader, loop=loop) | |
17 | transport, _ = await loop.create_connection( | |
17 | if loop is not None and sys.version_info >= (3, 8): | |
18 | warnings.warn("The loop argument is deprecated", | |
19 | DeprecationWarning) | |
20 | reader = StreamReader(limit=limit) | |
21 | protocol = asyncio.StreamReaderProtocol(reader) | |
22 | transport, _ = await get_event_loop().create_connection( | |
18 | 23 | lambda: protocol, host, port, **kwds) |
19 | writer = asyncio.StreamWriter(transport, protocol, reader, loop) | |
24 | writer = asyncio.StreamWriter(transport, protocol, reader, | |
25 | loop=get_event_loop()) | |
20 | 26 | return reader, writer |
21 | 27 | |
22 | 28 | |
24 | 30 | limit, loop=None, |
25 | 31 | parser=None, **kwds): |
26 | 32 | # XXX: parser is not used (yet) |
27 | if loop is None: | |
28 | loop = asyncio.get_event_loop() | |
29 | reader = StreamReader(limit=limit, loop=loop) | |
30 | protocol = asyncio.StreamReaderProtocol(reader, loop=loop) | |
31 | transport, _ = await loop.create_unix_connection( | |
33 | if loop is not None and sys.version_info >= (3, 8): | |
34 | warnings.warn("The loop argument is deprecated", | |
35 | DeprecationWarning) | |
36 | reader = StreamReader(limit=limit) | |
37 | protocol = asyncio.StreamReaderProtocol(reader) | |
38 | transport, _ = await get_event_loop().create_unix_connection( | |
32 | 39 | lambda: protocol, address, **kwds) |
33 | writer = asyncio.StreamWriter(transport, protocol, reader, loop) | |
40 | writer = asyncio.StreamWriter(transport, protocol, reader, | |
41 | loop=get_event_loop()) | |
34 | 42 | return reader, writer |
35 | 43 | |
36 | 44 |
0 | import asyncio | |
1 | import sys | |
2 | ||
0 | 3 | from urllib.parse import urlparse, parse_qsl |
1 | 4 | |
2 | 5 | from .log import logger |
3 | 6 | |
4 | 7 | _NOTSET = object() |
5 | 8 | |
9 | IS_PY38 = sys.version_info >= (3, 8) | |
6 | 10 | |
7 | 11 | # NOTE: never put here anything else; |
8 | 12 | # just this basic types |
206 | 210 | if 'timeout' in params: |
207 | 211 | options['timeout'] = float(params['timeout']) |
208 | 212 | return options |
213 | ||
214 | ||
215 | class CloseEvent: | |
216 | def __init__(self, on_close): | |
217 | self._close_init = asyncio.Event() | |
218 | self._close_done = asyncio.Event() | |
219 | self._on_close = on_close | |
220 | ||
221 | async def wait(self): | |
222 | await self._close_init.wait() | |
223 | await self._close_done.wait() | |
224 | ||
225 | def is_set(self): | |
226 | return self._close_done.is_set() or self._close_init.is_set() | |
227 | ||
228 | def set(self): | |
229 | if self._close_init.is_set(): | |
230 | return | |
231 | ||
232 | task = asyncio.ensure_future(self._on_close()) | |
233 | task.add_done_callback(self._cleanup) | |
234 | self._close_init.set() | |
235 | ||
236 | def _cleanup(self, task): | |
237 | self._on_close = None | |
238 | self._close_done.set() | |
239 | ||
240 | ||
241 | get_event_loop = getattr(asyncio, 'get_running_loop', asyncio.get_event_loop) |
0 | 0 | Metadata-Version: 1.1 |
1 | 1 | Name: aioredis |
2 | Version: 1.2.0 | |
2 | Version: 1.3.1 | |
3 | 3 | Summary: asyncio (PEP 3156) Redis support |
4 | 4 | Home-page: https://github.com/aio-libs/aioredis |
5 | 5 | Author: Alexey Popravka |
34 | 34 | Sentinel support Yes |
35 | 35 | Redis Cluster support WIP |
36 | 36 | Trollius (python 2.7) No |
37 | Tested CPython versions `3.5, 3.6 3.7 <travis_>`_ [2]_ | |
38 | Tested PyPy3 versions `5.9.0 <travis_>`_ | |
39 | Tested for Redis server `2.6, 2.8, 3.0, 3.2, 4.0 <travis_>`_ | |
37 | Tested CPython versions `3.5.3, 3.6, 3.7 <travis_>`_ [1]_ | |
38 | Tested PyPy3 versions `pypy3.5-7.0 pypy3.6-7.1.1 <travis_>`_ | |
39 | Tested for Redis server `2.6, 2.8, 3.0, 3.2, 4.0 5.0 <travis_>`_ | |
40 | 40 | Support for dev Redis server through low-level API |
41 | 41 | ================================ ============================== |
42 | 42 | |
43 | ||
44 | .. [2] For Python 3.3, 3.4 support use aioredis v0.3. | |
43 | .. [1] For Python 3.3, 3.4 support use aioredis v0.3. | |
45 | 44 | |
46 | 45 | Documentation |
47 | 46 | ------------- |
48 | 47 | |
49 | 48 | http://aioredis.readthedocs.io/ |
50 | 49 | |
51 | Usage examples | |
52 | -------------- | |
53 | ||
54 | Simple low-level interface: | |
50 | Usage example | |
51 | ------------- | |
52 | ||
53 | Simple high-level interface with connections pool: | |
55 | 54 | |
56 | 55 | .. code:: python |
57 | 56 | |
58 | 57 | import asyncio |
59 | 58 | import aioredis |
60 | 59 | |
61 | loop = asyncio.get_event_loop() | |
62 | ||
63 | 60 | async def go(): |
64 | conn = await aioredis.create_connection( | |
65 | 'redis://localhost', loop=loop) | |
66 | await conn.execute('set', 'my-key', 'value') | |
67 | val = await conn.execute('get', 'my-key') | |
68 | print(val) | |
69 | conn.close() | |
70 | await conn.wait_closed() | |
71 | loop.run_until_complete(go()) | |
72 | # will print 'value' | |
73 | ||
74 | Simple high-level interface: | |
75 | ||
76 | .. code:: python | |
77 | ||
78 | import asyncio | |
79 | import aioredis | |
80 | ||
81 | loop = asyncio.get_event_loop() | |
82 | ||
83 | async def go(): | |
84 | redis = await aioredis.create_redis( | |
85 | 'redis://localhost', loop=loop) | |
61 | redis = await aioredis.create_redis_pool( | |
62 | 'redis://localhost') | |
86 | 63 | await redis.set('my-key', 'value') |
87 | val = await redis.get('my-key') | |
64 | val = await redis.get('my-key', encoding='utf-8') | |
88 | 65 | print(val) |
89 | 66 | redis.close() |
90 | 67 | await redis.wait_closed() |
91 | loop.run_until_complete(go()) | |
92 | # will print 'value' | |
93 | ||
94 | Connections pool: | |
95 | ||
96 | .. code:: python | |
97 | ||
98 | import asyncio | |
99 | import aioredis | |
100 | ||
101 | loop = asyncio.get_event_loop() | |
102 | ||
103 | async def go(): | |
104 | pool = await aioredis.create_pool( | |
105 | 'redis://localhost', | |
106 | minsize=5, maxsize=10, | |
107 | loop=loop) | |
108 | await pool.execute('set', 'my-key', 'value') | |
109 | print(await pool.execute('get', 'my-key')) | |
110 | # graceful shutdown | |
111 | pool.close() | |
112 | await pool.wait_closed() | |
113 | ||
114 | loop.run_until_complete(go()) | |
115 | ||
116 | Simple high-level interface with connections pool: | |
117 | ||
118 | .. code:: python | |
119 | ||
120 | import asyncio | |
121 | import aioredis | |
122 | ||
123 | loop = asyncio.get_event_loop() | |
124 | ||
125 | async def go(): | |
126 | redis = await aioredis.create_redis_pool( | |
127 | 'redis://localhost', | |
128 | minsize=5, maxsize=10, | |
129 | loop=loop) | |
130 | await redis.set('my-key', 'value') | |
131 | val = await redis.get('my-key') | |
132 | print(val) | |
133 | redis.close() | |
134 | await redis.wait_closed() | |
135 | loop.run_until_complete(go()) | |
68 | ||
69 | asyncio.run(go()) | |
136 | 70 | # will print 'value' |
137 | 71 | |
138 | 72 | Requirements |
170 | 104 | |
171 | 105 | Changes |
172 | 106 | ------- |
107 | ||
108 | .. towncrier release notes start | |
109 | ||
110 | 1.3.1 (2019-12-02) | |
111 | ^^^^^^^^^^^^^^^^^^ | |
112 | Bugfixes | |
113 | ~~~~~~~~ | |
114 | ||
115 | - Fix transaction data decoding | |
116 | (see `#657 <https://github.com/aio-libs/aioredis/issues/657>`_); | |
117 | - Fix duplicate calls to ``pool.wait_closed()`` upon ``create_pool()`` exception. | |
118 | (see `#671 <https://github.com/aio-libs/aioredis/issues/671>`_); | |
119 | ||
120 | Deprecations and Removals | |
121 | ~~~~~~~~~~~~~~~~~~~~~~~~~ | |
122 | ||
123 | - Drop explicit loop requirement in API. | |
124 | Deprecate ``loop`` argument. | |
125 | Throw warning in Python 3.8+ if explicit ``loop`` is passed to methods. | |
126 | (see `#666 <https://github.com/aio-libs/aioredis/issues/666>`_); | |
127 | ||
128 | Misc | |
129 | ~~~~ | |
130 | ||
131 | - `#643 <https://github.com/aio-libs/aioredis/issues/643>`_, | |
132 | `#646 <https://github.com/aio-libs/aioredis/issues/646>`_, | |
133 | `#648 <https://github.com/aio-libs/aioredis/issues/648>`_; | |
134 | ||
135 | ||
136 | 1.3.0 (2019-09-24) | |
137 | ^^^^^^^^^^^^^^^^^^ | |
138 | Features | |
139 | ~~~~~~~~ | |
140 | ||
141 | - Added ``xdel`` and ``xtrim`` method which missed in ``commands/streams.py`` & also added unit test code for them | |
142 | (see `#438 <https://github.com/aio-libs/aioredis/issues/438>`_); | |
143 | - Add ``count`` argument to ``spop`` command | |
144 | (see `#485 <https://github.com/aio-libs/aioredis/issues/485>`_); | |
145 | - Add support for ``zpopmax`` and ``zpopmin`` redis commands | |
146 | (see `#550 <https://github.com/aio-libs/aioredis/issues/550>`_); | |
147 | - Add ``towncrier``: change notes are now stored in ``CHANGES.txt`` | |
148 | (see `#576 <https://github.com/aio-libs/aioredis/issues/576>`_); | |
149 | - Type hints for the library | |
150 | (see `#584 <https://github.com/aio-libs/aioredis/issues/584>`_); | |
151 | - A few additions to the sorted set commands: | |
152 | ||
153 | - the blocking pop commands: ``BZPOPMAX`` and ``BZPOPMIN`` | |
154 | ||
155 | - the ``CH`` and ``INCR`` options of the ``ZADD`` command | |
156 | ||
157 | (see `#618 <https://github.com/aio-libs/aioredis/issues/618>`_); | |
158 | - Added ``no_ack`` parameter to ``xread_group`` streams method in ``commands/streams.py`` | |
159 | (see `#625 <https://github.com/aio-libs/aioredis/issues/625>`_); | |
160 | ||
161 | Bugfixes | |
162 | ~~~~~~~~ | |
163 | ||
164 | - Fix for sensitive logging | |
165 | (see `#459 <https://github.com/aio-libs/aioredis/issues/459>`_); | |
166 | - Fix slow memory leak in ``wait_closed`` implementation | |
167 | (see `#498 <https://github.com/aio-libs/aioredis/issues/498>`_); | |
168 | - Fix handling of instances were Redis returns null fields for a stream message | |
169 | (see `#605 <https://github.com/aio-libs/aioredis/issues/605>`_); | |
170 | ||
171 | Improved Documentation | |
172 | ~~~~~~~~~~~~~~~~~~~~~~ | |
173 | ||
174 | - Rewrite "Getting started" documentation. | |
175 | (see `#641 <https://github.com/aio-libs/aioredis/issues/641>`_); | |
176 | ||
177 | Misc | |
178 | ~~~~ | |
179 | ||
180 | - `#585 <https://github.com/aio-libs/aioredis/issues/585>`_, | |
181 | `#611 <https://github.com/aio-libs/aioredis/issues/611>`_, | |
182 | `#612 <https://github.com/aio-libs/aioredis/issues/612>`_, | |
183 | `#619 <https://github.com/aio-libs/aioredis/issues/619>`_, | |
184 | `#620 <https://github.com/aio-libs/aioredis/issues/620>`_, | |
185 | `#642 <https://github.com/aio-libs/aioredis/issues/642>`_; | |
186 | ||
173 | 187 | |
174 | 188 | 1.2.0 (2018-10-24) |
175 | 189 | ^^^^^^^^^^^^^^^^^^ |
519 | 533 | * Fixed cancellation of wait_closed |
520 | 534 | (see `#118 <https://github.com/aio-libs/aioredis/issues/118>`_); |
521 | 535 | |
522 | * Fixed ``time()`` convertion to float | |
536 | * Fixed ``time()`` conversion to float | |
523 | 537 | (see `#126 <https://github.com/aio-libs/aioredis/issues/126>`_); |
524 | 538 | |
525 | 539 | * Fixed ``hmset()`` method to return bool instead of ``b'OK'`` |
53 | 53 | docs/_build/man/aioredis.1 |
54 | 54 | examples/commands.py |
55 | 55 | examples/connection.py |
56 | examples/iscan.py | |
57 | 56 | examples/pipeline.py |
58 | 57 | examples/pool.py |
59 | examples/pool2.py | |
60 | 58 | examples/pool_pubsub.py |
61 | 59 | examples/pubsub.py |
62 | 60 | examples/pubsub2.py |
64 | 62 | examples/sentinel.py |
65 | 63 | examples/transaction.py |
66 | 64 | examples/transaction2.py |
65 | examples/getting_started/00_connect.py | |
66 | examples/getting_started/01_decoding.py | |
67 | examples/getting_started/02_decoding.py | |
68 | examples/getting_started/03_multiexec.py | |
69 | examples/getting_started/04_pubsub.py | |
70 | examples/getting_started/05_pubsub.py | |
71 | examples/getting_started/06_sentinel.py | |
72 | tests/_testutils.py | |
67 | 73 | tests/coerced_keys_dict_test.py |
68 | 74 | tests/conftest.py |
69 | 75 | tests/connection_commands_test.py |
0 | 0 | .\" Man page generated from reStructuredText. |
1 | 1 | . |
2 | .TH "AIOREDIS" "1" "Oct 24, 2018" "1.2" "aioredis" | |
2 | .TH "AIOREDIS" "1" "Dec 02, 2019" "1.3" "aioredis" | |
3 | 3 | .SH NAME |
4 | 4 | aioredis \- aioredis Documentation |
5 | 5 | . |
78 | 78 | T{ |
79 | 79 | Sentinel support |
80 | 80 | T} T{ |
81 | Yes [1] | |
81 | Yes | |
82 | 82 | T} |
83 | 83 | _ |
84 | 84 | T{ |
96 | 96 | T{ |
97 | 97 | Tested CPython versions |
98 | 98 | T} T{ |
99 | \fI\%3.5, 3.6\fP [2] | |
99 | \fI\%3.5.3, 3.6, 3.7\fP [1] | |
100 | 100 | T} |
101 | 101 | _ |
102 | 102 | T{ |
103 | 103 | Tested PyPy3 versions |
104 | 104 | T} T{ |
105 | \fI\%5.9.0\fP | |
105 | \fI\%pypy3.5\-7.0 pypy3.6\-7.1.1\fP | |
106 | 106 | T} |
107 | 107 | _ |
108 | 108 | T{ |
109 | 109 | Tested for Redis server |
110 | 110 | T} T{ |
111 | \fI\%2.6, 2.8, 3.0, 3.2, 4.0\fP | |
111 | \fI\%2.6, 2.8, 3.0, 3.2, 4.0 5.0\fP | |
112 | 112 | T} |
113 | 113 | _ |
114 | 114 | T{ |
119 | 119 | _ |
120 | 120 | .TE |
121 | 121 | .IP [1] 5 |
122 | Sentinel support is available in master branch. | |
123 | This feature is not yet stable and may have some issues. | |
124 | .IP [2] 5 | |
125 | 122 | For Python 3.3, 3.4 support use aioredis v0.3. |
126 | 123 | .SH INSTALLATION |
127 | 124 | .sp |
150 | 147 | .INDENT 0.0 |
151 | 148 | .IP \(bu 2 |
152 | 149 | Issue Tracker: \fI\%https://github.com/aio\-libs/aioredis/issues\fP |
150 | .IP \(bu 2 | |
151 | Google Group: \fI\%https://groups.google.com/forum/#!forum/aio\-libs\fP | |
152 | .IP \(bu 2 | |
153 | Gitter: \fI\%https://gitter.im/aio\-libs/Lobby\fP | |
153 | 154 | .IP \(bu 2 |
154 | 155 | Source Code: \fI\%https://github.com/aio\-libs/aioredis\fP |
155 | 156 | .IP \(bu 2 |
169 | 170 | .ce 0 |
170 | 171 | .sp |
171 | 172 | .SH GETTING STARTED |
172 | .SS Commands Pipelining | |
173 | .sp | |
174 | Commands pipelining is built\-in. | |
175 | .sp | |
176 | Every command is sent to transport at\-once | |
177 | (ofcourse if no \fBTypeError\fP/\fBValueError\fP was raised) | |
178 | .sp | |
179 | When you making a call with \fBawait\fP / \fByield from\fP you will be waiting result, | |
180 | and then gather results. | |
181 | .sp | |
182 | Simple example show both cases (\fBget source code\fP): | |
183 | .INDENT 0.0 | |
184 | .INDENT 3.5 | |
185 | .sp | |
186 | .nf | |
187 | .ft C | |
188 | # No pipelining; | |
189 | async def wait_each_command(): | |
190 | val = await redis.get(\(aqfoo\(aq) # wait until \(gaval\(ga is available | |
191 | cnt = await redis.incr(\(aqbar\(aq) # wait until \(gacnt\(ga is available | |
192 | return val, cnt | |
193 | ||
194 | # Sending multiple commands and then gathering results | |
195 | async def pipelined(): | |
196 | fut1 = redis.get(\(aqfoo\(aq) # issue command and return future | |
197 | fut2 = redis.incr(\(aqbar\(aq) # issue command and return future | |
198 | # block until results are available | |
199 | val, cnt = await asyncio.gather(fut1, fut2) | |
200 | return val, cnt | |
201 | ||
173 | .SS Installation | |
174 | .INDENT 0.0 | |
175 | .INDENT 3.5 | |
176 | .sp | |
177 | .nf | |
178 | .ft C | |
179 | $ pip install aioredis | |
180 | .ft P | |
181 | .fi | |
182 | .UNINDENT | |
183 | .UNINDENT | |
184 | .sp | |
185 | This will install aioredis along with its dependencies: | |
186 | .INDENT 0.0 | |
187 | .IP \(bu 2 | |
188 | hiredis protocol parser; | |
189 | .IP \(bu 2 | |
190 | async\-timeout \-\-\- used in Sentinel client. | |
191 | .UNINDENT | |
192 | .SS Without dependencies | |
193 | .sp | |
194 | In some cases [1] you might need to install \fBaioredis\fP without \fBhiredis\fP, | |
195 | it is achievable with the following command: | |
196 | .INDENT 0.0 | |
197 | .INDENT 3.5 | |
198 | .sp | |
199 | .nf | |
200 | .ft C | |
201 | $ pip install \-\-no\-deps aioredis async\-timeout | |
202 | .ft P | |
203 | .fi | |
204 | .UNINDENT | |
205 | .UNINDENT | |
206 | .SS Installing latest version from Git | |
207 | .INDENT 0.0 | |
208 | .INDENT 3.5 | |
209 | .sp | |
210 | .nf | |
211 | .ft C | |
212 | $ pip install git+https://github.com/aio\-libs/aioredis@master#egg=aioredis | |
213 | .ft P | |
214 | .fi | |
215 | .UNINDENT | |
216 | .UNINDENT | |
217 | .SS Connecting | |
218 | .sp | |
219 | \fBget source code\fP | |
220 | .INDENT 0.0 | |
221 | .INDENT 3.5 | |
222 | .sp | |
223 | .nf | |
224 | .ft C | |
225 | import asyncio | |
226 | import aioredis | |
227 | ||
228 | ||
229 | async def main(): | |
230 | redis = await aioredis.create_redis_pool(\(aqredis://localhost\(aq) | |
231 | await redis.set(\(aqmy\-key\(aq, \(aqvalue\(aq) | |
232 | value = await redis.get(\(aqmy\-key\(aq, encoding=\(aqutf\-8\(aq) | |
233 | print(value) | |
234 | ||
235 | redis.close() | |
236 | await redis.wait_closed() | |
237 | ||
238 | asyncio.run(main()) | |
239 | ||
240 | .ft P | |
241 | .fi | |
242 | .UNINDENT | |
243 | .UNINDENT | |
244 | .sp | |
245 | \fBaioredis.create_redis_pool()\fP creates a Redis client backed by a pool of | |
246 | connections. The only required argument is the address of Redis server. | |
247 | Redis server address can be either host and port tuple | |
248 | (ex: \fB(\(aqlocalhost\(aq, 6379)\fP), or a string which will be parsed into | |
249 | TCP or UNIX socket address (ex: \fB\(aqunix://var/run/redis.sock\(aq\fP, | |
250 | \fB\(aq//var/run/redis.sock\(aq\fP, \fBredis://redis\-host\-or\-ip:6379/1\fP). | |
251 | .sp | |
252 | Closing the client. Calling \fBredis.close()\fP and then \fBredis.wait_closed()\fP | |
253 | is strongly encouraged as this will methods will shutdown all open connections | |
254 | and cleanup resources. | |
255 | .sp | |
256 | See the commands reference for the full list of supported commands. | |
257 | .SS Connecting to specific DB | |
258 | .sp | |
259 | There are several ways you can specify database index to select on connection: | |
260 | .INDENT 0.0 | |
261 | .IP 1. 3 | |
262 | explicitly pass db index as \fBdb\fP argument: | |
263 | .INDENT 3.0 | |
264 | .INDENT 3.5 | |
265 | .sp | |
266 | .nf | |
267 | .ft C | |
268 | redis = await aioredis.create_redis_pool( | |
269 | \(aqredis://localhost\(aq, db=1) | |
270 | .ft P | |
271 | .fi | |
272 | .UNINDENT | |
273 | .UNINDENT | |
274 | .IP 2. 3 | |
275 | pass db index in URI as path component: | |
276 | .INDENT 3.0 | |
277 | .INDENT 3.5 | |
278 | .sp | |
279 | .nf | |
280 | .ft C | |
281 | redis = await aioredis.create_redis_pool( | |
282 | \(aqredis://localhost/2\(aq) | |
202 | 283 | .ft P |
203 | 284 | .fi |
204 | 285 | .UNINDENT |
205 | 286 | .UNINDENT |
206 | 287 | .sp |
207 | 288 | \fBNOTE:\fP |
208 | .INDENT 0.0 | |
209 | .INDENT 3.5 | |
210 | For convenience \fBaioredis\fP provides | |
211 | \fBpipeline()\fP | |
212 | method allowing to execute bulk of commands as one | |
213 | (\fBget source code\fP): | |
214 | .INDENT 0.0 | |
215 | .INDENT 3.5 | |
216 | .INDENT 0.0 | |
217 | .INDENT 3.5 | |
218 | .sp | |
219 | .nf | |
220 | .ft C | |
221 | # Explicit pipeline | |
222 | async def explicit_pipeline(): | |
223 | pipe = redis.pipeline() | |
224 | fut1 = pipe.get(\(aqfoo\(aq) | |
225 | fut2 = pipe.incr(\(aqbar\(aq) | |
226 | result = await pipe.execute() | |
227 | val, cnt = await asyncio.gather(fut1, fut2) | |
228 | assert result == [val, cnt] | |
229 | return val, cnt | |
230 | ||
231 | .ft P | |
232 | .fi | |
233 | .UNINDENT | |
234 | .UNINDENT | |
235 | .UNINDENT | |
236 | .UNINDENT | |
289 | .INDENT 3.0 | |
290 | .INDENT 3.5 | |
291 | DB index specified in URI will take precedence over | |
292 | \fBdb\fP keyword argument. | |
293 | .UNINDENT | |
294 | .UNINDENT | |
295 | .IP 3. 3 | |
296 | call \fBselect()\fP method: | |
297 | .INDENT 3.0 | |
298 | .INDENT 3.5 | |
299 | .sp | |
300 | .nf | |
301 | .ft C | |
302 | redis = await aioredis.create_redis_pool( | |
303 | \(aqredis://localhost/\(aq) | |
304 | await redis.select(3) | |
305 | .ft P | |
306 | .fi | |
307 | .UNINDENT | |
308 | .UNINDENT | |
309 | .UNINDENT | |
310 | .SS Connecting to password\-protected Redis instance | |
311 | .sp | |
312 | The password can be specified either in keyword argument or in address URI: | |
313 | .INDENT 0.0 | |
314 | .INDENT 3.5 | |
315 | .sp | |
316 | .nf | |
317 | .ft C | |
318 | redis = await aioredis.create_redis_pool( | |
319 | \(aqredis://localhost\(aq, password=\(aqsEcRet\(aq) | |
320 | ||
321 | redis = await aioredis.create_redis_pool( | |
322 | \(aqredis://:sEcRet@localhost/\(aq) | |
323 | ||
324 | redis = await aioredis.create_redis_pool( | |
325 | \(aqredis://localhost/?password=sEcRet\(aq) | |
326 | .ft P | |
327 | .fi | |
328 | .UNINDENT | |
329 | .UNINDENT | |
330 | .sp | |
331 | \fBNOTE:\fP | |
332 | .INDENT 0.0 | |
333 | .INDENT 3.5 | |
334 | Password specified in URI will take precedence over password keyword. | |
335 | .sp | |
336 | Also specifying both password as authentication component and | |
337 | query parameter in URI is forbidden. | |
338 | .INDENT 0.0 | |
339 | .INDENT 3.5 | |
340 | .sp | |
341 | .nf | |
342 | .ft C | |
343 | # This will cause assertion error | |
344 | await aioredis.create_redis_pool( | |
345 | \(aqredis://:sEcRet@localhost/?password=SeCreT\(aq) | |
346 | .ft P | |
347 | .fi | |
348 | .UNINDENT | |
349 | .UNINDENT | |
350 | .UNINDENT | |
351 | .UNINDENT | |
352 | .SS Result messages decoding | |
353 | .sp | |
354 | By default \fBaioredis\fP will return \fI\%bytes\fP for most Redis | |
355 | commands that return string replies. Redis error replies are known to be | |
356 | valid UTF\-8 strings so error messages are decoded automatically. | |
357 | .sp | |
358 | If you know that data in Redis is valid string you can tell \fBaioredis\fP | |
359 | to decode result by passing keyword\-only argument \fBencoding\fP | |
360 | in a command call: | |
361 | .sp | |
362 | \fBget source code\fP | |
363 | .INDENT 0.0 | |
364 | .INDENT 3.5 | |
365 | .sp | |
366 | .nf | |
367 | .ft C | |
368 | import asyncio | |
369 | import aioredis | |
370 | ||
371 | ||
372 | async def main(): | |
373 | redis = await aioredis.create_redis_pool(\(aqredis://localhost\(aq) | |
374 | await redis.set(\(aqkey\(aq, \(aqstring\-value\(aq) | |
375 | bin_value = await redis.get(\(aqkey\(aq) | |
376 | assert bin_value == b\(aqstring\-value\(aq | |
377 | ||
378 | str_value = await redis.get(\(aqkey\(aq, encoding=\(aqutf\-8\(aq) | |
379 | assert str_value == \(aqstring\-value\(aq | |
380 | ||
381 | redis.close() | |
382 | await redis.wait_closed() | |
383 | ||
384 | asyncio.run(main()) | |
385 | ||
386 | .ft P | |
387 | .fi | |
388 | .UNINDENT | |
389 | .UNINDENT | |
390 | .sp | |
391 | \fBaioredis\fP can decode messages for all Redis data types like | |
392 | lists, hashes, sorted sets, etc: | |
393 | .sp | |
394 | \fBget source code\fP | |
395 | .INDENT 0.0 | |
396 | .INDENT 3.5 | |
397 | .sp | |
398 | .nf | |
399 | .ft C | |
400 | import asyncio | |
401 | import aioredis | |
402 | ||
403 | ||
404 | async def main(): | |
405 | redis = await aioredis.create_redis_pool(\(aqredis://localhost\(aq) | |
406 | ||
407 | await redis.hmset_dict(\(aqhash\(aq, | |
408 | key1=\(aqvalue1\(aq, | |
409 | key2=\(aqvalue2\(aq, | |
410 | key3=123) | |
411 | ||
412 | result = await redis.hgetall(\(aqhash\(aq, encoding=\(aqutf\-8\(aq) | |
413 | assert result == { | |
414 | \(aqkey1\(aq: \(aqvalue1\(aq, | |
415 | \(aqkey2\(aq: \(aqvalue2\(aq, | |
416 | \(aqkey3\(aq: \(aq123\(aq, # note that Redis returns int as string | |
417 | } | |
418 | ||
419 | redis.close() | |
420 | await redis.wait_closed() | |
421 | ||
422 | asyncio.run(main()) | |
423 | ||
424 | .ft P | |
425 | .fi | |
237 | 426 | .UNINDENT |
238 | 427 | .UNINDENT |
239 | 428 | .SS Multi/Exec transactions |
240 | 429 | .sp |
241 | \fBaioredis\fP provides several ways for executing transactions: | |
242 | .INDENT 0.0 | |
243 | .IP \(bu 2 | |
244 | when using raw connection you can issue \fBMulti\fP/\fBExec\fP commands | |
245 | manually; | |
246 | .IP \(bu 2 | |
247 | when using \fBaioredis.Redis\fP instance you can use | |
248 | \fBmulti_exec()\fP transaction pipeline. | |
430 | \fBget source code\fP | |
431 | .INDENT 0.0 | |
432 | .INDENT 3.5 | |
433 | .sp | |
434 | .nf | |
435 | .ft C | |
436 | import asyncio | |
437 | import aioredis | |
438 | ||
439 | ||
440 | async def main(): | |
441 | redis = await aioredis.create_redis_pool(\(aqredis://localhost\(aq) | |
442 | ||
443 | tr = redis.multi_exec() | |
444 | tr.set(\(aqkey1\(aq, \(aqvalue1\(aq) | |
445 | tr.set(\(aqkey2\(aq, \(aqvalue2\(aq) | |
446 | ok1, ok2 = await tr.execute() | |
447 | assert ok1 | |
448 | assert ok2 | |
449 | ||
450 | asyncio.run(main()) | |
451 | ||
452 | .ft P | |
453 | .fi | |
454 | .UNINDENT | |
249 | 455 | .UNINDENT |
250 | 456 | .sp |
251 | 457 | \fBmulti_exec()\fP method creates and returns new |
252 | 458 | \fBMultiExec\fP object which is used for buffering commands and |
253 | 459 | then executing them inside MULTI/EXEC block. |
254 | 460 | .sp |
255 | Here is a simple example | |
256 | (\fBget source code\fP): | |
257 | .INDENT 0.0 | |
258 | .INDENT 3.5 | |
259 | .sp | |
260 | .nf | |
261 | .ft C | |
262 | async def transaction(): | |
263 | tr = redis.multi_exec() | |
264 | future1 = tr.set(\(aqfoo\(aq, \(aq123\(aq) | |
265 | future2 = tr.set(\(aqbar\(aq, \(aq321\(aq) | |
266 | result = await tr.execute() | |
267 | assert result == await asyncio.gather(future1, future2) | |
268 | return result | |
269 | ||
270 | .ft P | |
271 | .fi | |
272 | .UNINDENT | |
273 | .UNINDENT | |
274 | .sp | |
275 | As you can notice \fBawait\fP is \fBonly\fP used at line 5 with \fBtr.execute\fP | |
276 | and \fBnot with\fP \fBtr.set(...)\fP calls. | |
277 | .sp | |
278 | 461 | \fBWARNING:\fP |
279 | 462 | .INDENT 0.0 |
280 | 463 | .INDENT 3.5 |
299 | 482 | .sp |
300 | 483 | \fBaioredis\fP provides support for Redis Publish/Subscribe messaging. |
301 | 484 | .sp |
302 | To switch connection to subscribe mode you must execute \fBsubscribe\fP command | |
303 | by yield\(aqing from \fBsubscribe()\fP it returns a list of | |
304 | \fBChannel\fP objects representing subscribed channels. | |
305 | .sp | |
306 | As soon as connection is switched to subscribed mode the channel will receive | |
307 | and store messages | |
485 | To start listening for messages you must call either | |
486 | \fBsubscribe()\fP or | |
487 | \fBpsubscribe()\fP method. | |
488 | Both methods return list of \fBChannel\fP objects representing | |
489 | subscribed channels. | |
490 | .sp | |
491 | Right after that the channel will receive and store messages | |
308 | 492 | (the \fBChannel\fP object is basically a wrapper around \fI\%asyncio.Queue\fP). |
309 | 493 | To read messages from channel you need to use \fBget()\fP |
310 | 494 | or \fBget_json()\fP coroutines. |
311 | 495 | .sp |
312 | \fBNOTE:\fP | |
313 | .INDENT 0.0 | |
314 | .INDENT 3.5 | |
315 | In Pub/Sub mode redis connection can only receive messages or issue | |
316 | (P)SUBSCRIBE / (P)UNSUBSCRIBE commands. | |
317 | .UNINDENT | |
318 | .UNINDENT | |
319 | .sp | |
320 | Pub/Sub example (\fBget source code\fP): | |
321 | .INDENT 0.0 | |
322 | .INDENT 3.5 | |
323 | .sp | |
324 | .nf | |
325 | .ft C | |
326 | sub = await aioredis.create_redis( | |
327 | \(aqredis://localhost\(aq) | |
328 | ||
329 | ch1, ch2 = await sub.subscribe(\(aqchannel:1\(aq, \(aqchannel:2\(aq) | |
330 | assert isinstance(ch1, aioredis.Channel) | |
331 | assert isinstance(ch2, aioredis.Channel) | |
332 | ||
333 | async def async_reader(channel): | |
334 | while await channel.wait_message(): | |
335 | msg = await channel.get(encoding=\(aqutf\-8\(aq) | |
336 | # ... process message ... | |
337 | print("message in {}: {}".format(channel.name, msg)) | |
338 | ||
339 | tsk1 = asyncio.ensure_future(async_reader(ch1)) | |
340 | ||
341 | # Or alternatively: | |
342 | ||
343 | async def async_reader2(channel): | |
344 | while True: | |
345 | msg = await channel.get(encoding=\(aqutf\-8\(aq) | |
346 | if msg is None: | |
347 | break | |
348 | # ... process message ... | |
349 | print("message in {}: {}".format(channel.name, msg)) | |
350 | ||
351 | tsk2 = asyncio.ensure_future(async_reader2(ch2)) | |
352 | ||
353 | .ft P | |
354 | .fi | |
355 | .UNINDENT | |
356 | .UNINDENT | |
357 | .sp | |
358 | Pub/Sub example (\fBget source code\fP): | |
359 | .INDENT 0.0 | |
360 | .INDENT 3.5 | |
361 | .sp | |
362 | .nf | |
363 | .ft C | |
364 | async def reader(channel): | |
365 | while (await channel.wait_message()): | |
366 | msg = await channel.get(encoding=\(aqutf\-8\(aq) | |
367 | # ... process message ... | |
368 | print("message in {}: {}".format(channel.name, msg)) | |
369 | ||
370 | if msg == STOPWORD: | |
371 | return | |
372 | ||
373 | with await pool as conn: | |
374 | await conn.execute_pubsub(\(aqsubscribe\(aq, \(aqchannel:1\(aq) | |
375 | channel = conn.pubsub_channels[\(aqchannel:1\(aq] | |
376 | await reader(channel) # wait for reader to complete | |
377 | await conn.execute_pubsub(\(aqunsubscribe\(aq, \(aqchannel:1\(aq) | |
378 | ||
379 | # Explicit connection usage | |
380 | conn = await pool.acquire() | |
381 | try: | |
382 | await conn.execute_pubsub(\(aqsubscribe\(aq, \(aqchannel:1\(aq) | |
383 | channel = conn.pubsub_channels[\(aqchannel:1\(aq] | |
384 | await reader(channel) # wait for reader to complete | |
385 | await conn.execute_pubsub(\(aqunsubscribe\(aq, \(aqchannel:1\(aq) | |
386 | finally: | |
387 | pool.release(conn) | |
388 | ||
389 | .ft P | |
390 | .fi | |
391 | .UNINDENT | |
392 | .UNINDENT | |
393 | .SS Python 3.5 \fBasync with\fP / \fBasync for\fP support | |
394 | .sp | |
395 | \fBaioredis\fP is compatible with \fI\%PEP 492\fP\&. | |
396 | .sp | |
397 | \fBPool\fP can be used with \fI\%async with\fP | |
398 | (\fBget source code\fP): | |
399 | .INDENT 0.0 | |
400 | .INDENT 3.5 | |
401 | .sp | |
402 | .nf | |
403 | .ft C | |
404 | pool = await aioredis.create_pool( | |
405 | \(aqredis://localhost\(aq) | |
406 | async with pool.get() as conn: | |
407 | value = await conn.execute(\(aqget\(aq, \(aqmy\-key\(aq) | |
408 | print(\(aqraw value:\(aq, value) | |
409 | ||
410 | .ft P | |
411 | .fi | |
412 | .UNINDENT | |
413 | .UNINDENT | |
414 | .sp | |
415 | It also can be used with \fBawait\fP: | |
416 | .INDENT 0.0 | |
417 | .INDENT 3.5 | |
418 | .sp | |
419 | .nf | |
420 | .ft C | |
421 | pool = await aioredis.create_pool( | |
422 | \(aqredis://localhost\(aq) | |
423 | # This is exactly the same as: | |
424 | # with (yield from pool) as conn: | |
425 | with (await pool) as conn: | |
426 | value = await conn.execute(\(aqget\(aq, \(aqmy\-key\(aq) | |
427 | print(\(aqraw value:\(aq, value) | |
428 | ||
429 | .ft P | |
430 | .fi | |
431 | .UNINDENT | |
432 | .UNINDENT | |
433 | .sp | |
434 | New \fBscan\fP\-family commands added with support of \fI\%async for\fP | |
435 | (\fBget source code\fP): | |
436 | .INDENT 0.0 | |
437 | .INDENT 3.5 | |
438 | .sp | |
439 | .nf | |
440 | .ft C | |
441 | redis = await aioredis.create_redis( | |
442 | \(aqredis://localhost\(aq) | |
443 | ||
444 | async for key in redis.iscan(match=\(aqsomething*\(aq): | |
445 | print(\(aqMatched:\(aq, key) | |
446 | ||
447 | async for name, val in redis.ihscan(key, match=\(aqsomething*\(aq): | |
448 | print(\(aqMatched:\(aq, name, \(aq\->\(aq, val) | |
449 | ||
450 | async for val in redis.isscan(key, match=\(aqsomething*\(aq): | |
451 | print(\(aqMatched:\(aq, val) | |
452 | ||
453 | async for val, score in redis.izscan(key, match=\(aqsomething*\(aq): | |
454 | print(\(aqMatched:\(aq, val, \(aq:\(aq, score) | |
455 | ||
456 | .ft P | |
457 | .fi | |
458 | .UNINDENT | |
459 | .UNINDENT | |
460 | .SS SSL/TLS support | |
461 | .sp | |
462 | Though Redis server \fI\%does not support data encryption\fP | |
463 | it is still possible to setup Redis server behind SSL proxy. For such cases | |
464 | \fBaioredis\fP library support secure connections through \fI\%asyncio\fP | |
465 | SSL support. See \fI\%BaseEventLoop.create_connection\fP for details. | |
466 | .SH MIGRATING FROM V0.3 TO V1.0 | |
467 | .SS API changes and backward incompatible changes: | |
468 | .INDENT 0.0 | |
469 | .IP \(bu 2 | |
470 | \fI\%aioredis.create_pool\fP | |
471 | .IP \(bu 2 | |
472 | \fI\%aioredis.create_reconnecting_redis\fP | |
473 | .IP \(bu 2 | |
474 | \fI\%aioredis.Redis\fP | |
475 | .IP \(bu 2 | |
476 | \fI\%Blocking operations and connection sharing\fP | |
477 | .IP \(bu 2 | |
478 | \fI\%Sorted set commands return values\fP | |
479 | .IP \(bu 2 | |
480 | \fI\%Hash hscan command now returns list of tuples\fP | |
481 | .UNINDENT | |
496 | Example subscribing and reading channels: | |
497 | .sp | |
498 | \fBget source code\fP | |
499 | .INDENT 0.0 | |
500 | .INDENT 3.5 | |
501 | .sp | |
502 | .nf | |
503 | .ft C | |
504 | import asyncio | |
505 | import aioredis | |
506 | ||
507 | ||
508 | async def main(): | |
509 | redis = await aioredis.create_redis_pool(\(aqredis://localhost\(aq) | |
510 | ||
511 | ch1, ch2 = await redis.subscribe(\(aqchannel:1\(aq, \(aqchannel:2\(aq) | |
512 | assert isinstance(ch1, aioredis.Channel) | |
513 | assert isinstance(ch2, aioredis.Channel) | |
514 | ||
515 | async def reader(channel): | |
516 | async for message in channel.iter(): | |
517 | print("Got message:", message) | |
518 | asyncio.get_running_loop().create_task(reader(ch1)) | |
519 | asyncio.get_running_loop().create_task(reader(ch2)) | |
520 | ||
521 | await redis.publish(\(aqchannel:1\(aq, \(aqHello\(aq) | |
522 | await redis.publish(\(aqchannel:2\(aq, \(aqWorld\(aq) | |
523 | ||
524 | redis.close() | |
525 | await redis.wait_closed() | |
526 | ||
527 | asyncio.run(main()) | |
528 | ||
529 | .ft P | |
530 | .fi | |
531 | .UNINDENT | |
532 | .UNINDENT | |
533 | .sp | |
534 | Subscribing and reading patterns: | |
535 | .sp | |
536 | \fBget source code\fP | |
537 | .INDENT 0.0 | |
538 | .INDENT 3.5 | |
539 | .sp | |
540 | .nf | |
541 | .ft C | |
542 | import asyncio | |
543 | import aioredis | |
544 | ||
545 | ||
546 | async def main(): | |
547 | redis = await aioredis.create_redis_pool(\(aqredis://localhost\(aq) | |
548 | ||
549 | ch, = await redis.psubscribe(\(aqchannel:*\(aq) | |
550 | assert isinstance(ch, aioredis.Channel) | |
551 | ||
552 | async def reader(channel): | |
553 | async for ch, message in channel.iter(): | |
554 | print("Got message in channel:", ch, ":", message) | |
555 | asyncio.get_running_loop().create_task(reader(ch)) | |
556 | ||
557 | await redis.publish(\(aqchannel:1\(aq, \(aqHello\(aq) | |
558 | await redis.publish(\(aqchannel:2\(aq, \(aqWorld\(aq) | |
559 | ||
560 | redis.close() | |
561 | await redis.wait_closed() | |
562 | ||
563 | asyncio.run(main()) | |
564 | ||
565 | .ft P | |
566 | .fi | |
567 | .UNINDENT | |
568 | .UNINDENT | |
569 | .SS Sentinel client | |
570 | .sp | |
571 | \fBget source code\fP | |
572 | .INDENT 0.0 | |
573 | .INDENT 3.5 | |
574 | .sp | |
575 | .nf | |
576 | .ft C | |
577 | import asyncio | |
578 | import aioredis | |
579 | ||
580 | ||
581 | async def main(): | |
582 | sentinel = await aioredis.create_sentinel( | |
583 | [\(aqredis://localhost:26379\(aq, \(aqredis://sentinel2:26379\(aq]) | |
584 | redis = sentinel.master_for(\(aqmymaster\(aq) | |
585 | ||
586 | ok = await redis.set(\(aqkey\(aq, \(aqvalue\(aq) | |
587 | assert ok | |
588 | val = await redis.get(\(aqkey\(aq, encoding=\(aqutf\-8\(aq) | |
589 | assert val == \(aqvalue\(aq | |
590 | ||
591 | asyncio.run(main()) | |
592 | ||
593 | .ft P | |
594 | .fi | |
595 | .UNINDENT | |
596 | .UNINDENT | |
597 | .sp | |
598 | Sentinel client requires a list of Redis Sentinel addresses to connect to | |
599 | and start discovering services. | |
600 | .sp | |
601 | Calling \fBmaster_for()\fP or | |
602 | \fBslave_for()\fP methods will return | |
603 | Redis clients connected to specified services monitored by Sentinel. | |
604 | .sp | |
605 | Sentinel client will detect failover and reconnect Redis clients automatically. | |
606 | .sp | |
607 | See detailed reference here | |
482 | 608 | |
483 | 609 | .sp |
484 | 610 | .ce |
486 | 612 | |
487 | 613 | .ce 0 |
488 | 614 | .sp |
489 | .SS aioredis.create_pool | |
490 | .sp | |
491 | \fBcreate_pool()\fP now returns \fBConnectionsPool\fP | |
492 | instead of \fBRedisPool\fP\&. | |
493 | .sp | |
494 | This means that pool now operates with \fBRedisConnection\fP | |
495 | objects and not \fBRedis\fP\&. | |
496 | .TS | |
497 | center; | |
498 | |l|l|. | |
499 | _ | |
500 | T{ | |
501 | v0.3 | |
502 | T} T{ | |
503 | .INDENT 0.0 | |
504 | .INDENT 3.5 | |
505 | .sp | |
506 | .nf | |
507 | .ft C | |
508 | pool = await aioredis.create_pool((\(aqlocalhost\(aq, 6379)) | |
509 | ||
510 | with await pool as redis: | |
511 | # calling methods of Redis class | |
512 | await redis.lpush(\(aqlist\-key\(aq, \(aqitem1\(aq, \(aqitem2\(aq) | |
513 | .ft P | |
514 | .fi | |
515 | .UNINDENT | |
516 | .UNINDENT | |
517 | T} | |
518 | _ | |
519 | T{ | |
520 | v1.0 | |
521 | T} T{ | |
522 | .INDENT 0.0 | |
523 | .INDENT 3.5 | |
524 | .sp | |
525 | .nf | |
526 | .ft C | |
527 | pool = await aioredis.create_pool((\(aqlocalhost\(aq, 6379)) | |
528 | ||
529 | with await pool as conn: | |
530 | # calling conn.lpush will raise AttributeError exception | |
531 | await conn.execute(\(aqlpush\(aq, \(aqlist\-key\(aq, \(aqitem1\(aq, \(aqitem2\(aq) | |
532 | .ft P | |
533 | .fi | |
534 | .UNINDENT | |
535 | .UNINDENT | |
536 | T} | |
537 | _ | |
538 | .TE | |
539 | .SS aioredis.create_reconnecting_redis | |
540 | .sp | |
541 | \fBcreate_reconnecting_redis()\fP has been dropped. | |
542 | .sp | |
543 | \fBcreate_redis_pool()\fP can be used instead of former function. | |
544 | .TS | |
545 | center; | |
546 | |l|l|. | |
547 | _ | |
548 | T{ | |
549 | v0.3 | |
550 | T} T{ | |
551 | .INDENT 0.0 | |
552 | .INDENT 3.5 | |
553 | .sp | |
554 | .nf | |
555 | .ft C | |
556 | redis = await aioredis.create_reconnecting_redis( | |
557 | (\(aqlocalhost\(aq, 6379)) | |
558 | ||
559 | await redis.lpush(\(aqlist\-key\(aq, \(aqitem1\(aq, \(aqitem2\(aq) | |
560 | .ft P | |
561 | .fi | |
562 | .UNINDENT | |
563 | .UNINDENT | |
564 | T} | |
565 | _ | |
566 | T{ | |
567 | v1.0 | |
568 | T} T{ | |
569 | .INDENT 0.0 | |
570 | .INDENT 3.5 | |
571 | .sp | |
572 | .nf | |
573 | .ft C | |
574 | redis = await aioredis.create_redis_pool( | |
575 | (\(aqlocalhost\(aq, 6379)) | |
576 | ||
577 | await redis.lpush(\(aqlist\-key\(aq, \(aqitem1\(aq, \(aqitem2\(aq) | |
578 | .ft P | |
579 | .fi | |
580 | .UNINDENT | |
581 | .UNINDENT | |
582 | T} | |
583 | _ | |
584 | .TE | |
585 | .sp | |
586 | \fBcreate_redis_pool\fP returns \fBRedis\fP initialized with | |
587 | \fBConnectionsPool\fP which is responsible for reconnecting to server. | |
588 | .sp | |
589 | Also \fBcreate_reconnecting_redis\fP was patching the \fBRedisConnection\fP and | |
590 | breaking \fBclosed\fP property (it was always \fBTrue\fP). | |
591 | .SS aioredis.Redis | |
592 | .sp | |
593 | \fBRedis\fP class now operates with objects implementing | |
594 | \fBaioredis.abc.AbcConnection\fP interface. | |
595 | \fBRedisConnection\fP and \fBConnectionsPool\fP are | |
596 | both implementing \fBAbcConnection\fP so it is become possible to use same API | |
597 | when working with either single connection or connections pool. | |
598 | .TS | |
599 | center; | |
600 | |l|l|. | |
601 | _ | |
602 | T{ | |
603 | v0.3 | |
604 | T} T{ | |
605 | .INDENT 0.0 | |
606 | .INDENT 3.5 | |
607 | .sp | |
608 | .nf | |
609 | .ft C | |
610 | redis = await aioredis.create_redis((\(aqlocalhost\(aq, 6379)) | |
611 | await redis.lpush(\(aqlist\-key\(aq, \(aqitem1\(aq, \(aqitem2\(aq) | |
612 | ||
613 | pool = await aioredis.create_pool((\(aqlocalhost\(aq, 6379)) | |
614 | redis = await pool.acquire() # get Redis object | |
615 | await redis.lpush(\(aqlist\-key\(aq, \(aqitem1\(aq, \(aqitem2\(aq) | |
616 | .ft P | |
617 | .fi | |
618 | .UNINDENT | |
619 | .UNINDENT | |
620 | T} | |
621 | _ | |
622 | T{ | |
623 | v1.0 | |
624 | T} T{ | |
625 | .INDENT 0.0 | |
626 | .INDENT 3.5 | |
627 | .sp | |
628 | .nf | |
629 | .ft C | |
630 | redis = await aioredis.create_redis((\(aqlocalhost\(aq, 6379)) | |
631 | await redis.lpush(\(aqlist\-key\(aq, \(aqitem1\(aq, \(aqitem2\(aq) | |
632 | ||
633 | redis = await aioredis.create_redis_pool((\(aqlocalhost\(aq, 6379)) | |
634 | await redis.lpush(\(aqlist\-key\(aq, \(aqitem1\(aq, \(aqitem2\(aq) | |
635 | .ft P | |
636 | .fi | |
637 | .UNINDENT | |
638 | .UNINDENT | |
639 | T} | |
640 | _ | |
641 | .TE | |
642 | .SS Blocking operations and connection sharing | |
643 | .sp | |
644 | Current implementation of \fBConnectionsPool\fP by default \fBexecute | |
645 | every command on random connection\fP\&. The \fIPros\fP of this is that it allowed | |
646 | implementing \fBAbcConnection\fP interface and hide pool inside \fBRedis\fP class, | |
647 | and also keep pipelining feature (like RedisConnection.execute). | |
648 | The \fICons\fP of this is that \fBdifferent tasks may use same connection and block | |
649 | it\fP with some long\-running command. | |
650 | .sp | |
651 | We can call it \fBShared Mode\fP \-\-\- commands are sent to random connections | |
652 | in pool without need to lock [connection]: | |
653 | .INDENT 0.0 | |
654 | .INDENT 3.5 | |
655 | .sp | |
656 | .nf | |
657 | .ft C | |
658 | redis = await aioredis.create_redis_pool( | |
659 | (\(aqlocalhost\(aq, 6379), | |
660 | minsize=1, | |
661 | maxsize=1) | |
662 | ||
663 | async def task(): | |
664 | # Shared mode | |
665 | await redis.set(\(aqkey\(aq, \(aqval\(aq) | |
666 | ||
667 | asyncio.ensure_future(task()) | |
668 | asyncio.ensure_future(task()) | |
669 | # Both tasks will send commands through same connection | |
670 | # without acquiring (locking) it first. | |
671 | .ft P | |
672 | .fi | |
673 | .UNINDENT | |
674 | .UNINDENT | |
675 | .sp | |
676 | Blocking operations (like \fBblpop\fP, \fBbrpop\fP or long\-running LUA scripts) | |
677 | in \fBshared mode\fP mode will block connection and thus may lead to whole | |
678 | program malfunction. | |
679 | .sp | |
680 | This \fIblocking\fP issue can be easily solved by using exclusive connection | |
681 | for such operations: | |
682 | .INDENT 0.0 | |
683 | .INDENT 3.5 | |
684 | .sp | |
685 | .nf | |
686 | .ft C | |
687 | redis = await aioredis.create_redis_pool( | |
688 | (\(aqlocalhost\(aq, 6379), | |
689 | minsize=1, | |
690 | maxsize=1) | |
691 | ||
692 | async def task(): | |
693 | # Exclusive mode | |
694 | with await redis as r: | |
695 | await r.set(\(aqkey\(aq, \(aqval\(aq) | |
696 | asyncio.ensure_future(task()) | |
697 | asyncio.ensure_future(task()) | |
698 | # Both tasks will first acquire connection. | |
699 | .ft P | |
700 | .fi | |
701 | .UNINDENT | |
702 | .UNINDENT | |
703 | .sp | |
704 | We can call this \fBExclusive Mode\fP \-\-\- context manager is used to | |
705 | acquire (lock) exclusive connection from pool and send all commands through it. | |
706 | .sp | |
707 | \fBNOTE:\fP | |
708 | .INDENT 0.0 | |
709 | .INDENT 3.5 | |
710 | This technique is similar to v0.3 pool usage: | |
711 | .INDENT 0.0 | |
712 | .INDENT 3.5 | |
713 | .sp | |
714 | .nf | |
715 | .ft C | |
716 | # in aioredis v0.3 | |
717 | pool = await aioredis.create_pool((\(aqlocalhost\(aq, 6379)) | |
718 | with await pool as redis: | |
719 | # Redis is bound to exclusive connection | |
720 | redis.set(\(aqkey\(aq, \(aqval\(aq) | |
721 | .ft P | |
722 | .fi | |
723 | .UNINDENT | |
724 | .UNINDENT | |
725 | .UNINDENT | |
726 | .UNINDENT | |
727 | .SS Sorted set commands return values | |
728 | .sp | |
729 | Sorted set commands (like \fBzrange\fP, \fBzrevrange\fP and others) that accept | |
730 | \fBwithscores\fP argument now \fBreturn list of tuples\fP instead of plain list. | |
731 | .TS | |
732 | center; | |
733 | |l|l|. | |
734 | _ | |
735 | T{ | |
736 | v0.3 | |
737 | T} T{ | |
738 | .INDENT 0.0 | |
739 | .INDENT 3.5 | |
740 | .sp | |
741 | .nf | |
742 | .ft C | |
743 | redis = await aioredis.create_redis((\(aqlocalhost\(aq, 6379)) | |
744 | await redis.zadd(\(aqzset\-key\(aq, 1, \(aqone\(aq, 2, \(aqtwo\(aq) | |
745 | res = await redis.zrage(\(aqzset\-key\(aq, withscores=True) | |
746 | assert res == [b\(aqone\(aq, 1, b\(aqtwo\(aq, 2] | |
747 | ||
748 | # not an esiest way to make a dict | |
749 | it = iter(res) | |
750 | assert dict(zip(it, it)) == {b\(aqone\(aq: 1, b\(aqtwo\(aq: 2} | |
751 | .ft P | |
752 | .fi | |
753 | .UNINDENT | |
754 | .UNINDENT | |
755 | T} | |
756 | _ | |
757 | T{ | |
758 | v1.0 | |
759 | T} T{ | |
760 | .INDENT 0.0 | |
761 | .INDENT 3.5 | |
762 | .sp | |
763 | .nf | |
764 | .ft C | |
765 | redis = await aioredis.create_redis((\(aqlocalhost\(aq, 6379)) | |
766 | await redis.zadd(\(aqzset\-key\(aq, 1, \(aqone\(aq, 2, \(aqtwo\(aq) | |
767 | res = await redis.zrage(\(aqzset\-key\(aq, withscores=True) | |
768 | assert res == [(b\(aqone\(aq, 1), (b\(aqtwo\(aq, 2)] | |
769 | ||
770 | # now its easier to make a dict of it | |
771 | assert dict(res) == {b\(aqone\(aq: 1, b\(aqtwo\(aq: 2} | |
772 | .ft P | |
773 | .fi | |
774 | .UNINDENT | |
775 | .UNINDENT | |
776 | T} | |
777 | _ | |
778 | .TE | |
779 | .SS Hash \fBhscan\fP command now returns list of tuples | |
780 | .sp | |
781 | \fBhscan\fP updated to return a list of tuples instead of plain | |
782 | mixed key/value list. | |
783 | .TS | |
784 | center; | |
785 | |l|l|. | |
786 | _ | |
787 | T{ | |
788 | v0.3 | |
789 | T} T{ | |
790 | .INDENT 0.0 | |
791 | .INDENT 3.5 | |
792 | .sp | |
793 | .nf | |
794 | .ft C | |
795 | redis = await aioredis.create_redis((\(aqlocalhost\(aq, 6379)) | |
796 | await redis.hmset(\(aqhash\(aq, \(aqone\(aq, 1, \(aqtwo\(aq, 2) | |
797 | cur, data = await redis.hscan(\(aqhash\(aq) | |
798 | assert data == [b\(aqone\(aq, b\(aq1\(aq, b\(aqtwo\(aq, b\(aq2\(aq] | |
799 | ||
800 | # not an esiest way to make a dict | |
801 | it = iter(data) | |
802 | assert dict(zip(it, it)) == {b\(aqone\(aq: b\(aq1\(aq, b\(aqtwo\(aq: b\(aq2\(aq} | |
803 | .ft P | |
804 | .fi | |
805 | .UNINDENT | |
806 | .UNINDENT | |
807 | T} | |
808 | _ | |
809 | T{ | |
810 | v1.0 | |
811 | T} T{ | |
812 | .INDENT 0.0 | |
813 | .INDENT 3.5 | |
814 | .sp | |
815 | .nf | |
816 | .ft C | |
817 | redis = await aioredis.create_redis((\(aqlocalhost\(aq, 6379)) | |
818 | await redis.hmset(\(aqhash\(aq, \(aqone\(aq, 1, \(aqtwo\(aq, 2) | |
819 | cur, data = await redis.hscan(\(aqhash\(aq) | |
820 | assert data == [(b\(aqone\(aq, b\(aq1\(aq), (b\(aqtwo\(aq, b\(aq2\(aq)] | |
821 | ||
822 | # now its easier to make a dict of it | |
823 | assert dict(data) == {b\(aqone\(aq: b\(aq1\(aq: b\(aqtwo\(aq: b\(aq2\(aq} | |
824 | .ft P | |
825 | .fi | |
826 | .UNINDENT | |
827 | .UNINDENT | |
828 | T} | |
829 | _ | |
830 | .TE | |
615 | .IP [1] 5 | |
616 | Celery hiredis issues | |
617 | (\fI\%#197\fP, | |
618 | \fI\%#317\fP) | |
831 | 619 | .SH AIOREDIS --- API REFERENCE |
832 | 620 | .SS Connection |
833 | 621 | .sp |
845 | 633 | import aioredis |
846 | 634 | |
847 | 635 | async def connect_uri(): |
848 | conn = await aioredis\&.create_connection( | |
636 | conn = await aioredis.create_connection( | |
849 | 637 | \(aqredis://localhost/0\(aq) |
850 | val = await conn\&.execute(\(aqGET\(aq, \(aqmy\-key\(aq) | |
638 | val = await conn.execute(\(aqGET\(aq, \(aqmy\-key\(aq) | |
851 | 639 | |
852 | 640 | async def connect_tcp(): |
853 | conn = await aioredis\&.create_connection( | |
641 | conn = await aioredis.create_connection( | |
854 | 642 | (\(aqlocalhost\(aq, 6379)) |
855 | val = await conn\&.execute(\(aqGET\(aq, \(aqmy\-key\(aq) | |
643 | val = await conn.execute(\(aqGET\(aq, \(aqmy\-key\(aq) | |
856 | 644 | |
857 | 645 | async def connect_unixsocket(): |
858 | conn = await aioredis\&.create_connection( | |
646 | conn = await aioredis.create_connection( | |
859 | 647 | \(aq/path/to/redis/socket\(aq) |
860 | 648 | # or uri \(aqunix:///path/to/redis/socket?db=1\(aq |
861 | val = await conn\&.execute(\(aqGET\(aq, \(aqmy\-key\(aq) | |
862 | ||
863 | asyncio\&.get_event_loop()\&.run_until_complete(connect_tcp()) | |
864 | asyncio\&.get_event_loop()\&.run_until_complete(connect_unixsocket()) | |
865 | .ft P | |
866 | .fi | |
867 | .UNINDENT | |
868 | .UNINDENT | |
869 | .INDENT 0.0 | |
870 | .TP | |
871 | .B coroutine aioredis.create_connection(address, *, db=0, password=None, ssl=None, encoding=None, parser=None, loop=None, timeout=None) | |
649 | val = await conn.execute(\(aqGET\(aq, \(aqmy\-key\(aq) | |
650 | ||
651 | asyncio.get_event_loop().run_until_complete(connect_tcp()) | |
652 | asyncio.get_event_loop().run_until_complete(connect_unixsocket()) | |
653 | .ft P | |
654 | .fi | |
655 | .UNINDENT | |
656 | .UNINDENT | |
657 | .INDENT 0.0 | |
658 | .TP | |
659 | .B coroutine aioredis.create_connection(address, *, db=0, password=None, ssl=None, encoding=None, parser=None, timeout=None, connection_cls=None) | |
872 | 660 | Creates Redis connection. |
873 | 661 | .sp |
874 | 662 | Changed in version v0.3.1: \fBtimeout\fP argument added. |
875 | 663 | |
876 | 664 | .sp |
877 | 665 | Changed in version v1.0: \fBparser\fP argument added. |
666 | ||
667 | .sp | |
668 | Deprecated since version v1.3.1: \fBloop\fP argument deprecated for Python 3.8 compatibility. | |
878 | 669 | |
879 | 670 | .INDENT 7.0 |
880 | 671 | .TP |
909 | 700 | \fBparser\fP (\fIcallable\fP\fI or \fP\fI\%None\fP) \-\- Protocol parser class. Can be used to set custom protocol |
910 | 701 | reader; expected same interface as \fBhiredis.Reader\fP\&. |
911 | 702 | .IP \(bu 2 |
912 | \fBloop\fP (\fI\%EventLoop\fP) \-\- An optional \fIevent loop\fP instance | |
913 | (uses \fI\%asyncio.get_event_loop()\fP if not specified). | |
914 | .IP \(bu 2 | |
915 | 703 | \fBtimeout\fP (\fIfloat greater than 0\fP\fI or \fP\fI\%None\fP) \-\- Max time to open a connection, otherwise |
916 | 704 | raise \fI\%asyncio.TimeoutError\fP exception. |
917 | 705 | \fBNone\fP by default |
706 | .IP \(bu 2 | |
707 | \fBconnection_cls\fP (\fBabc.AbcConnection\fP or None) \-\- Custom connection class. \fBNone\fP by default. | |
918 | 708 | .UNINDENT |
919 | 709 | .TP |
920 | 710 | .B Returns |
1023 | 813 | .sp |
1024 | 814 | .nf |
1025 | 815 | .ft C |
1026 | >>> ch1 = Channel(\(aqA\(aq, is_pattern=False, loop=loop) | |
816 | >>> ch1 = Channel(\(aqA\(aq, is_pattern=False) | |
1027 | 817 | >>> await conn.execute_pubsub(\(aqsubscribe\(aq, ch1) |
1028 | 818 | [[b\(aqsubscribe\(aq, b\(aqA\(aq, 1]] |
1029 | 819 | .ft P |
1132 | 922 | import aioredis |
1133 | 923 | |
1134 | 924 | async def sample_pool(): |
1135 | pool = await aioredis\&.create_pool(\(aqredis://localhost\(aq) | |
1136 | val = await pool\&.execute(\(aqget\(aq, \(aqmy\-key\(aq) | |
1137 | .ft P | |
1138 | .fi | |
1139 | .UNINDENT | |
1140 | .UNINDENT | |
1141 | .INDENT 0.0 | |
1142 | .TP | |
1143 | .B aioredis.create_pool(address, *, db=0, password=None, ssl=None, encoding=None, minsize=1, maxsize=10, parser=None, loop=None, create_connection_timeout=None, pool_cls=None, connection_cls=None) | |
925 | pool = await aioredis.create_pool(\(aqredis://localhost\(aq) | |
926 | val = await pool.execute(\(aqget\(aq, \(aqmy\-key\(aq) | |
927 | .ft P | |
928 | .fi | |
929 | .UNINDENT | |
930 | .UNINDENT | |
931 | .INDENT 0.0 | |
932 | .TP | |
933 | .B aioredis.create_pool(address, *, db=0, password=None, ssl=None, encoding=None, minsize=1, maxsize=10, parser=None, create_connection_timeout=None, pool_cls=None, connection_cls=None) | |
1144 | 934 | A \fI\%coroutine\fP that instantiates a pool of |
1145 | 935 | \fI\%RedisConnection\fP\&. |
1146 | 936 | .sp |
1157 | 947 | |
1158 | 948 | .sp |
1159 | 949 | New in version v1.0: \fBparser\fP, \fBpool_cls\fP and \fBconnection_cls\fP arguments added. |
950 | ||
951 | .sp | |
952 | Deprecated since version v1.3.1: \fBloop\fP argument deprecated for Python 3.8 compatibility. | |
1160 | 953 | |
1161 | 954 | .INDENT 7.0 |
1162 | 955 | .TP |
1196 | 989 | .IP \(bu 2 |
1197 | 990 | \fBparser\fP (\fIcallable\fP\fI or \fP\fI\%None\fP) \-\- Protocol parser class. Can be used to set custom protocol |
1198 | 991 | reader; expected same interface as \fBhiredis.Reader\fP\&. |
1199 | .IP \(bu 2 | |
1200 | \fBloop\fP (\fI\%EventLoop\fP) \-\- An optional \fIevent loop\fP instance | |
1201 | (uses \fI\%asyncio.get_event_loop()\fP if not specified). | |
1202 | 992 | .IP \(bu 2 |
1203 | 993 | \fBcreate_connection_timeout\fP (\fIfloat greater than 0\fP\fI or \fP\fI\%None\fP) \-\- Max time to open a connection, |
1204 | 994 | otherwise raise an \fI\%asyncio.TimeoutError\fP\&. \fBNone\fP by default. |
1377 | 1167 | Wait until pool gets closed (when all connections are closed). |
1378 | 1168 | .sp |
1379 | 1169 | New in version v0.2.8. |
1380 | ||
1381 | .UNINDENT | |
1382 | .UNINDENT | |
1383 | ||
1384 | .sp | |
1385 | .ce | |
1386 | ---- | |
1387 | ||
1388 | .ce 0 | |
1389 | .sp | |
1390 | .SS Pub/Sub Channel object | |
1391 | .sp | |
1392 | \fIChannel\fP object is a wrapper around queue for storing received pub/sub messages. | |
1393 | .INDENT 0.0 | |
1394 | .TP | |
1395 | .B class aioredis.Channel(name, is_pattern, loop=None) | |
1396 | Bases: \fBabc.AbcChannel\fP | |
1397 | .sp | |
1398 | Object representing Pub/Sub messages queue. | |
1399 | It\(aqs basically a wrapper around \fI\%asyncio.Queue\fP\&. | |
1400 | .INDENT 7.0 | |
1401 | .TP | |
1402 | .B name | |
1403 | Holds encoded channel/pattern name. | |
1404 | .UNINDENT | |
1405 | .INDENT 7.0 | |
1406 | .TP | |
1407 | .B is_pattern | |
1408 | Set to True for pattern channels. | |
1409 | .UNINDENT | |
1410 | .INDENT 7.0 | |
1411 | .TP | |
1412 | .B is_active | |
1413 | Set to True if there are messages in queue and connection is still | |
1414 | subscribed to this channel. | |
1415 | .UNINDENT | |
1416 | .INDENT 7.0 | |
1417 | .TP | |
1418 | .B coroutine get(*, encoding=None, decoder=None) | |
1419 | Coroutine that waits for and returns a message. | |
1420 | .sp | |
1421 | Return value is message received or \fBNone\fP signifying that channel has | |
1422 | been unsubscribed and no more messages will be received. | |
1423 | .INDENT 7.0 | |
1424 | .TP | |
1425 | .B Parameters | |
1426 | .INDENT 7.0 | |
1427 | .IP \(bu 2 | |
1428 | \fBencoding\fP (\fI\%str\fP) \-\- If not None used to decode resulting bytes message. | |
1429 | .IP \(bu 2 | |
1430 | \fBdecoder\fP (\fIcallable\fP) \-\- If specified used to decode message, | |
1431 | ex. \fI\%json.loads()\fP | |
1432 | .UNINDENT | |
1433 | .TP | |
1434 | .B Raises | |
1435 | \fBaioredis.ChannelClosedError\fP \-\- If channel is unsubscribed and | |
1436 | has no more messages. | |
1437 | .UNINDENT | |
1438 | .UNINDENT | |
1439 | .INDENT 7.0 | |
1440 | .TP | |
1441 | .B get_json(*, encoding="utf\-8") | |
1442 | Shortcut to \fBget(encoding="utf\-8", decoder=json.loads)\fP | |
1443 | .UNINDENT | |
1444 | .INDENT 7.0 | |
1445 | .TP | |
1446 | .B coroutine wait_message() | |
1447 | Waits for message to become available in channel | |
1448 | or channel is closed (unsubscribed). | |
1449 | .sp | |
1450 | Main idea is to use it in loops: | |
1451 | .sp | |
1452 | .nf | |
1453 | .ft C | |
1454 | >>> ch = redis.channels[\(aqchannel:1\(aq] | |
1455 | >>> while await ch.wait_message(): | |
1456 | \&... msg = await ch.get() | |
1457 | .ft P | |
1458 | .fi | |
1459 | .INDENT 7.0 | |
1460 | .TP | |
1461 | .B Return type | |
1462 | \fI\%bool\fP | |
1463 | .UNINDENT | |
1464 | .UNINDENT | |
1465 | .INDENT 7.0 | |
1466 | .TP | |
1467 | .B coroutine async\-for iter(*, encoding=None, decoder=None) | |
1468 | Same as \fI\%get()\fP method but it is a native coroutine. | |
1469 | .sp | |
1470 | Usage example: | |
1471 | .INDENT 7.0 | |
1472 | .INDENT 3.5 | |
1473 | .sp | |
1474 | .nf | |
1475 | .ft C | |
1476 | >>> async for msg in ch.iter(): | |
1477 | \&... print(msg) | |
1478 | .ft P | |
1479 | .fi | |
1480 | .UNINDENT | |
1481 | .UNINDENT | |
1482 | .sp | |
1483 | New in version 0.2.5: Available for Python 3.5 only | |
1484 | 1170 | |
1485 | 1171 | .UNINDENT |
1486 | 1172 | .UNINDENT |
1724 | 1410 | |
1725 | 1411 | .ce 0 |
1726 | 1412 | .sp |
1413 | .SS Pub/Sub Channel object | |
1414 | .sp | |
1415 | \fIChannel\fP object is a wrapper around queue for storing received pub/sub messages. | |
1416 | .INDENT 0.0 | |
1417 | .TP | |
1418 | .B class aioredis.Channel(name, is_pattern) | |
1419 | Bases: \fBabc.AbcChannel\fP | |
1420 | .sp | |
1421 | Object representing Pub/Sub messages queue. | |
1422 | It\(aqs basically a wrapper around \fI\%asyncio.Queue\fP\&. | |
1423 | .INDENT 7.0 | |
1424 | .TP | |
1425 | .B name | |
1426 | Holds encoded channel/pattern name. | |
1427 | .UNINDENT | |
1428 | .INDENT 7.0 | |
1429 | .TP | |
1430 | .B is_pattern | |
1431 | Set to True for pattern channels. | |
1432 | .UNINDENT | |
1433 | .INDENT 7.0 | |
1434 | .TP | |
1435 | .B is_active | |
1436 | Set to True if there are messages in queue and connection is still | |
1437 | subscribed to this channel. | |
1438 | .UNINDENT | |
1439 | .INDENT 7.0 | |
1440 | .TP | |
1441 | .B coroutine get(*, encoding=None, decoder=None) | |
1442 | Coroutine that waits for and returns a message. | |
1443 | .sp | |
1444 | Return value is message received or \fBNone\fP signifying that channel has | |
1445 | been unsubscribed and no more messages will be received. | |
1446 | .INDENT 7.0 | |
1447 | .TP | |
1448 | .B Parameters | |
1449 | .INDENT 7.0 | |
1450 | .IP \(bu 2 | |
1451 | \fBencoding\fP (\fI\%str\fP) \-\- If not None used to decode resulting bytes message. | |
1452 | .IP \(bu 2 | |
1453 | \fBdecoder\fP (\fIcallable\fP) \-\- If specified used to decode message, | |
1454 | ex. \fI\%json.loads()\fP | |
1455 | .UNINDENT | |
1456 | .TP | |
1457 | .B Raises | |
1458 | \fBaioredis.ChannelClosedError\fP \-\- If channel is unsubscribed and | |
1459 | has no more messages. | |
1460 | .UNINDENT | |
1461 | .UNINDENT | |
1462 | .INDENT 7.0 | |
1463 | .TP | |
1464 | .B get_json(*, encoding="utf\-8") | |
1465 | Shortcut to \fBget(encoding="utf\-8", decoder=json.loads)\fP | |
1466 | .UNINDENT | |
1467 | .INDENT 7.0 | |
1468 | .TP | |
1469 | .B coroutine wait_message() | |
1470 | Waits for message to become available in channel | |
1471 | or channel is closed (unsubscribed). | |
1472 | .sp | |
1473 | Main idea is to use it in loops: | |
1474 | .sp | |
1475 | .nf | |
1476 | .ft C | |
1477 | >>> ch = redis.channels[\(aqchannel:1\(aq] | |
1478 | >>> while await ch.wait_message(): | |
1479 | \&... msg = await ch.get() | |
1480 | .ft P | |
1481 | .fi | |
1482 | .INDENT 7.0 | |
1483 | .TP | |
1484 | .B Return type | |
1485 | \fI\%bool\fP | |
1486 | .UNINDENT | |
1487 | .UNINDENT | |
1488 | .INDENT 7.0 | |
1489 | .TP | |
1490 | .B coroutine async\-for iter(*, encoding=None, decoder=None) | |
1491 | Same as \fI\%get()\fP method but it is a native coroutine. | |
1492 | .sp | |
1493 | Usage example: | |
1494 | .INDENT 7.0 | |
1495 | .INDENT 3.5 | |
1496 | .sp | |
1497 | .nf | |
1498 | .ft C | |
1499 | >>> async for msg in ch.iter(): | |
1500 | \&... print(msg) | |
1501 | .ft P | |
1502 | .fi | |
1503 | .UNINDENT | |
1504 | .UNINDENT | |
1505 | .sp | |
1506 | New in version 0.2.5: Available for Python 3.5 only | |
1507 | ||
1508 | .UNINDENT | |
1509 | .UNINDENT | |
1510 | ||
1511 | .sp | |
1512 | .ce | |
1513 | ---- | |
1514 | ||
1515 | .ce 0 | |
1516 | .sp | |
1727 | 1517 | .SS Commands Interface |
1728 | 1518 | .sp |
1729 | 1519 | The library provides high\-level API implementing simple interface |
1739 | 1529 | |
1740 | 1530 | # Create Redis client bound to single non\-reconnecting connection. |
1741 | 1531 | async def single_connection(): |
1742 | redis = await aioredis\&.create_redis( | |
1532 | redis = await aioredis.create_redis( | |
1743 | 1533 | \(aqredis://localhost\(aq) |
1744 | val = await redis\&.get(\(aqmy\-key\(aq) | |
1534 | val = await redis.get(\(aqmy\-key\(aq) | |
1745 | 1535 | |
1746 | 1536 | # Create Redis client bound to connections pool. |
1747 | 1537 | async def pool_of_connections(): |
1748 | redis = await aioredis\&.create_redis_pool( | |
1538 | redis = await aioredis.create_redis_pool( | |
1749 | 1539 | \(aqredis://localhost\(aq) |
1750 | val = await redis\&.get(\(aqmy\-key\(aq) | |
1540 | val = await redis.get(\(aqmy\-key\(aq) | |
1751 | 1541 | |
1752 | 1542 | # we can also use pub/sub as underlying pool |
1753 | 1543 | # has several free connections: |
1754 | ch1, ch2 = await redis\&.subscribe(\(aqchan:1\(aq, \(aqchan:2\(aq) | |
1544 | ch1, ch2 = await redis.subscribe(\(aqchan:1\(aq, \(aqchan:2\(aq) | |
1755 | 1545 | # publish using free connection |
1756 | await redis\&.publish(\(aqchan:1\(aq, \(aqHello\(aq) | |
1757 | await ch1\&.get() | |
1546 | await redis.publish(\(aqchan:1\(aq, \(aqHello\(aq) | |
1547 | await ch1.get() | |
1758 | 1548 | .ft P |
1759 | 1549 | .fi |
1760 | 1550 | .UNINDENT |
1764 | 1554 | see commands mixins reference\&. |
1765 | 1555 | .INDENT 0.0 |
1766 | 1556 | .TP |
1767 | .B coroutine aioredis.create_redis(address, *, db=0, password=None, ssl=None, encoding=None, commands_factory=Redis, parser=None, timeout=None, connection_cls=None, loop=None) | |
1557 | .B coroutine aioredis.create_redis(address, *, db=0, password=None, ssl=None, encoding=None, commands_factory=Redis, parser=None, timeout=None, connection_cls=None) | |
1768 | 1558 | This \fI\%coroutine\fP creates high\-level Redis |
1769 | 1559 | interface instance bound to single Redis connection |
1770 | 1560 | (without auto\-reconnect). |
1771 | 1561 | .sp |
1772 | 1562 | New in version v1.0: \fBparser\fP, \fBtimeout\fP and \fBconnection_cls\fP arguments added. |
1563 | ||
1564 | .sp | |
1565 | Deprecated since version v1.3.1: \fBloop\fP argument deprecated for Python 3.8 compatibility. | |
1773 | 1566 | |
1774 | 1567 | .sp |
1775 | 1568 | See also \fI\%RedisConnection\fP for parameters description. |
1806 | 1599 | \fBconnection_cls\fP (\fIaioredis.abc.AbcConnection\fP) \-\- Can be used to instantiate custom |
1807 | 1600 | connection class. This argument \fBmust be\fP a subclass of |
1808 | 1601 | \fBAbcConnection\fP\&. |
1809 | .IP \(bu 2 | |
1810 | \fBloop\fP (\fI\%EventLoop\fP) \-\- An optional \fIevent loop\fP instance | |
1811 | (uses \fI\%asyncio.get_event_loop()\fP if not specified). | |
1812 | 1602 | .UNINDENT |
1813 | 1603 | .TP |
1814 | 1604 | .B Returns |
1818 | 1608 | .UNINDENT |
1819 | 1609 | .INDENT 0.0 |
1820 | 1610 | .TP |
1821 | .B coroutine aioredis.create_redis_pool(address, *, db=0, password=None, ssl=None, encoding=None, commands_factory=Redis, minsize=1, maxsize=10, parser=None, timeout=None, pool_cls=None, connection_cls=None, loop=None) | |
1611 | .B coroutine aioredis.create_redis_pool(address, *, db=0, password=None, ssl=None, encoding=None, commands_factory=Redis, minsize=1, maxsize=10, parser=None, timeout=None, pool_cls=None, connection_cls=None) | |
1822 | 1612 | This \fI\%coroutine\fP create high\-level Redis client instance |
1823 | 1613 | bound to connections pool (this allows auto\-reconnect and simple pub/sub |
1824 | 1614 | use). |
1827 | 1617 | .sp |
1828 | 1618 | Changed in version v1.0: \fBparser\fP, \fBtimeout\fP, \fBpool_cls\fP and \fBconnection_cls\fP |
1829 | 1619 | arguments added. |
1620 | ||
1621 | .sp | |
1622 | Deprecated since version v1.3.1: \fBloop\fP argument deprecated for Python 3.8 compatibility. | |
1830 | 1623 | |
1831 | 1624 | .INDENT 7.0 |
1832 | 1625 | .TP |
1870 | 1663 | \fBconnection_cls\fP (\fIaioredis.abc.AbcConnection\fP) \-\- Can be used to make pool instantiate custom |
1871 | 1664 | connection classes. This argument \fBmust be\fP a subclass of |
1872 | 1665 | \fBAbcConnection\fP\&. |
1873 | .IP \(bu 2 | |
1874 | \fBloop\fP (\fI\%EventLoop\fP) \-\- An optional \fIevent loop\fP instance | |
1875 | (uses \fI\%asyncio.get_event_loop()\fP if not specified). | |
1876 | 1666 | .UNINDENT |
1877 | 1667 | .TP |
1878 | 1668 | .B Returns |
1901 | 1691 | .UNINDENT |
1902 | 1692 | .INDENT 7.0 |
1903 | 1693 | .TP |
1904 | .B address | |
1694 | .B property address | |
1905 | 1695 | Redis connection address (if applicable). |
1906 | 1696 | .UNINDENT |
1907 | 1697 | .INDENT 7.0 |
1918 | 1708 | .UNINDENT |
1919 | 1709 | .INDENT 7.0 |
1920 | 1710 | .TP |
1921 | .B closed | |
1711 | .B property closed | |
1922 | 1712 | True if connection is closed. |
1923 | 1713 | .UNINDENT |
1924 | 1714 | .INDENT 7.0 |
1925 | 1715 | .TP |
1926 | .B connection | |
1716 | .B property connection | |
1927 | 1717 | Either \fBaioredis.RedisConnection\fP, |
1928 | 1718 | or \fBaioredis.ConnectionsPool\fP instance. |
1929 | 1719 | .UNINDENT |
1930 | 1720 | .INDENT 7.0 |
1931 | 1721 | .TP |
1932 | .B db | |
1722 | .B property db | |
1933 | 1723 | Currently selected db index. |
1934 | 1724 | .UNINDENT |
1935 | 1725 | .INDENT 7.0 |
1939 | 1729 | .UNINDENT |
1940 | 1730 | .INDENT 7.0 |
1941 | 1731 | .TP |
1942 | .B encoding | |
1732 | .B property encoding | |
1943 | 1733 | Current set codec or None. |
1944 | 1734 | .UNINDENT |
1945 | 1735 | .INDENT 7.0 |
1946 | 1736 | .TP |
1947 | .B in_transaction | |
1737 | .B property in_transaction | |
1948 | 1738 | Set to True when MULTI command was issued. |
1949 | 1739 | .UNINDENT |
1950 | 1740 | .INDENT 7.0 |
1962 | 1752 | .INDENT 7.0 |
1963 | 1753 | .TP |
1964 | 1754 | .B select(db) |
1965 | Change the selected database for the current connection. | |
1966 | .sp | |
1967 | This method wraps call to \fBaioredis.RedisConnection.select()\fP | |
1968 | .UNINDENT | |
1969 | .INDENT 7.0 | |
1970 | .TP | |
1971 | .B coroutine wait_closed() | |
1972 | Coroutine waiting until underlying connections are closed. | |
1755 | Change the selected database. | |
1973 | 1756 | .UNINDENT |
1974 | 1757 | .UNINDENT |
1975 | 1758 | .SS Generic commands |
2532 | 2315 | .UNINDENT |
2533 | 2316 | .INDENT 7.0 |
2534 | 2317 | .TP |
2535 | .B mset(key, value, *pairs) | |
2536 | Set multiple keys to multiple values. | |
2318 | .B mset(*args) | |
2319 | Set multiple keys to multiple values or unpack dict to keys & values. | |
2537 | 2320 | .INDENT 7.0 |
2538 | 2321 | .TP |
2539 | 2322 | .B Raises |
2540 | \fI\%TypeError\fP \-\- if len of pairs is not event number | |
2323 | .INDENT 7.0 | |
2324 | .IP \(bu 2 | |
2325 | \fI\%TypeError\fP \-\- if len of args is not event number | |
2326 | .IP \(bu 2 | |
2327 | \fI\%TypeError\fP \-\- if len of args equals 1 and it is not a dict | |
2328 | .UNINDENT | |
2541 | 2329 | .UNINDENT |
2542 | 2330 | .UNINDENT |
2543 | 2331 | .INDENT 7.0 |
2998 | 2786 | .UNINDENT |
2999 | 2787 | .INDENT 7.0 |
3000 | 2788 | .TP |
3001 | .B spop(key, *, encoding=<object object>) | |
3002 | Remove and return a random member from a set. | |
2789 | .B spop(key, count=None, *, encoding=<object object>) | |
2790 | Remove and return one or multiple random members from a set. | |
3003 | 2791 | .UNINDENT |
3004 | 2792 | .INDENT 7.0 |
3005 | 2793 | .TP |
3036 | 2824 | For commands details see: \fI\%http://redis.io/commands/#sorted_set\fP |
3037 | 2825 | .INDENT 7.0 |
3038 | 2826 | .TP |
2827 | .B bzpopmax(key, *keys, timeout=0, encoding=<object object>) | |
2828 | Remove and get an element with the highest score in the sorted set, | |
2829 | or block until one is available. | |
2830 | .INDENT 7.0 | |
2831 | .TP | |
2832 | .B Raises | |
2833 | .INDENT 7.0 | |
2834 | .IP \(bu 2 | |
2835 | \fI\%TypeError\fP \-\- if timeout is not int | |
2836 | .IP \(bu 2 | |
2837 | \fI\%ValueError\fP \-\- if timeout is less than 0 | |
2838 | .UNINDENT | |
2839 | .UNINDENT | |
2840 | .UNINDENT | |
2841 | .INDENT 7.0 | |
2842 | .TP | |
2843 | .B bzpopmin(key, *keys, timeout=0, encoding=<object object>) | |
2844 | Remove and get an element with the lowest score in the sorted set, | |
2845 | or block until one is available. | |
2846 | .INDENT 7.0 | |
2847 | .TP | |
2848 | .B Raises | |
2849 | .INDENT 7.0 | |
2850 | .IP \(bu 2 | |
2851 | \fI\%TypeError\fP \-\- if timeout is not int | |
2852 | .IP \(bu 2 | |
2853 | \fI\%ValueError\fP \-\- if timeout is less than 0 | |
2854 | .UNINDENT | |
2855 | .UNINDENT | |
2856 | .UNINDENT | |
2857 | .INDENT 7.0 | |
2858 | .TP | |
3039 | 2859 | .B izscan(key, *, match=None, count=None) |
3040 | 2860 | Incrementally iterate sorted set items using async for. |
3041 | 2861 | .sp |
3050 | 2870 | .UNINDENT |
3051 | 2871 | .INDENT 7.0 |
3052 | 2872 | .TP |
3053 | .B zadd(key, score, member, *pairs, exist=None) | |
2873 | .B zadd(key, score, member, *pairs, exist=None, changed=False, incr=False) | |
3054 | 2874 | Add one or more members to a sorted set or update its score. |
3055 | 2875 | .INDENT 7.0 |
3056 | 2876 | .TP |
3123 | 2943 | .UNINDENT |
3124 | 2944 | .INDENT 7.0 |
3125 | 2945 | .TP |
2946 | .B zpopmax(key, count=None, *, encoding=<object object>) | |
2947 | Removes and returns up to count members with the highest scores | |
2948 | in the sorted set stored at key. | |
2949 | .INDENT 7.0 | |
2950 | .TP | |
2951 | .B Raises | |
2952 | \fI\%TypeError\fP \-\- if count is not int | |
2953 | .UNINDENT | |
2954 | .UNINDENT | |
2955 | .INDENT 7.0 | |
2956 | .TP | |
2957 | .B zpopmin(key, count=None, *, encoding=<object object>) | |
2958 | Removes and returns up to count members with the lowest scores | |
2959 | in the sorted set stored at key. | |
2960 | .INDENT 7.0 | |
2961 | .TP | |
2962 | .B Raises | |
2963 | \fI\%TypeError\fP \-\- if count is not int | |
2964 | .UNINDENT | |
2965 | .UNINDENT | |
2966 | .INDENT 7.0 | |
2967 | .TP | |
3126 | 2968 | .B zrange(key, start=0, stop=\-1, withscores=False, encoding=<object object>) |
3127 | 2969 | Return a range of members in a sorted set, by index. |
3128 | 2970 | .INDENT 7.0 |
3501 | 3343 | .UNINDENT |
3502 | 3344 | .INDENT 7.0 |
3503 | 3345 | .TP |
3504 | .B slaveof(host=<object object>, port=None) | |
3346 | .B slaveof(host, port=None) | |
3505 | 3347 | Make the server a slave of another instance, |
3506 | 3348 | or promote it as master. |
3507 | 3349 | .sp |
3648 | 3490 | .UNINDENT |
3649 | 3491 | .INDENT 0.0 |
3650 | 3492 | .TP |
3651 | .B class aioredis.commands.Pipeline(connection, commands_factory=lambda conn: conn, *, loop=None) | |
3493 | .B class aioredis.commands.Pipeline(connection, commands_factory=lambda conn: conn) | |
3652 | 3494 | Commands pipeline. |
3653 | 3495 | .sp |
3654 | 3496 | Buffers commands for execution in bulk. |
3655 | 3497 | .sp |
3656 | 3498 | This class implements \fI__getattr__\fP method allowing to call methods |
3657 | 3499 | on instance created with \fBcommands_factory\fP\&. |
3500 | .sp | |
3501 | Deprecated since version v1.3.1: \fBloop\fP argument deprecated for Python 3.8 compatibility. | |
3502 | ||
3658 | 3503 | .INDENT 7.0 |
3659 | 3504 | .TP |
3660 | 3505 | .B Parameters |
3663 | 3508 | \fBconnection\fP (\fIaioredis.RedisConnection\fP) \-\- Redis connection |
3664 | 3509 | .IP \(bu 2 |
3665 | 3510 | \fBcommands_factory\fP (\fIcallable\fP) \-\- Commands factory to get methods from. |
3666 | .IP \(bu 2 | |
3667 | \fBloop\fP (\fI\%EventLoop\fP) \-\- An optional \fIevent loop\fP instance | |
3668 | (uses \fI\%asyncio.get_event_loop()\fP if not specified). | |
3669 | 3511 | .UNINDENT |
3670 | 3512 | .UNINDENT |
3671 | 3513 | .INDENT 7.0 |
3692 | 3534 | .UNINDENT |
3693 | 3535 | .INDENT 0.0 |
3694 | 3536 | .TP |
3695 | .B class aioredis.commands.MultiExec(connection, commands_factory=lambda conn: conn, *, loop=None) | |
3537 | .B class aioredis.commands.MultiExec(connection, commands_factory=lambda conn: conn) | |
3696 | 3538 | Bases: \fI\%Pipeline\fP\&. |
3697 | 3539 | .sp |
3698 | 3540 | Multi/Exec pipeline wrapper. |
3699 | 3541 | .sp |
3700 | 3542 | See \fI\%Pipeline\fP for parameters description. |
3543 | .sp | |
3544 | Deprecated since version v1.3.1: \fBloop\fP argument deprecated for Python 3.8 compatibility. | |
3545 | ||
3701 | 3546 | .INDENT 7.0 |
3702 | 3547 | .TP |
3703 | 3548 | .B coroutine execute(*, return_exceptions=False) |
3956 | 3801 | .UNINDENT |
3957 | 3802 | .INDENT 7.0 |
3958 | 3803 | .TP |
3959 | .B slaveof(host=<object object>, port=None) | |
3804 | .B slaveof(host, port=None) | |
3960 | 3805 | Make the server a slave of another instance, |
3961 | 3806 | or promote it as master. |
3962 | 3807 | .sp |
4003 | 3848 | For commands details see: \fI\%http://redis.io/commands/#pubsub\fP |
4004 | 3849 | .INDENT 7.0 |
4005 | 3850 | .TP |
4006 | .B channels | |
3851 | .B property channels | |
4007 | 3852 | Returns read\-only channels dict. |
4008 | 3853 | .sp |
4009 | 3854 | See \fBpubsub_channels\fP |
4010 | 3855 | .UNINDENT |
4011 | 3856 | .INDENT 7.0 |
4012 | 3857 | .TP |
4013 | .B in_pubsub | |
3858 | .B property in_pubsub | |
4014 | 3859 | Indicates that connection is in PUB/SUB mode. |
4015 | 3860 | .sp |
4016 | 3861 | Provides the number of subscribed channels. |
4017 | 3862 | .UNINDENT |
4018 | 3863 | .INDENT 7.0 |
4019 | 3864 | .TP |
4020 | .B patterns | |
3865 | .B property patterns | |
4021 | 3866 | Returns read\-only patterns dict. |
4022 | 3867 | .sp |
4023 | 3868 | See \fBpubsub_patterns\fP |
4090 | 3935 | \fBWARNING:\fP |
4091 | 3936 | .INDENT 0.0 |
4092 | 3937 | .INDENT 3.5 |
4093 | Current release (1.2.0) of the library \fBdoes not support\fP | |
3938 | Current release (1.3.0) of the library \fBdoes not support\fP | |
4094 | 3939 | \fI\%Redis Cluster\fP in a full manner. |
4095 | 3940 | It provides only several API methods which may be changed in future. |
4096 | 3941 | .UNINDENT |
4101 | 3946 | .B class aioredis.commands.StreamCommandsMixin |
4102 | 3947 | Stream commands mixin |
4103 | 3948 | .sp |
4104 | Streams are under development in Redis and | |
4105 | not currently released. | |
3949 | Streams are available in Redis since v5.0 | |
4106 | 3950 | .INDENT 7.0 |
4107 | 3951 | .TP |
4108 | 3952 | .B xack(stream, group_name, id, *ids) |
4120 | 3964 | .UNINDENT |
4121 | 3965 | .INDENT 7.0 |
4122 | 3966 | .TP |
4123 | .B xgroup_create(stream, group_name, latest_id=\(aq$\(aq) | |
3967 | .B xdel(stream, id) | |
3968 | Removes the specified entries(IDs) from a stream | |
3969 | .UNINDENT | |
3970 | .INDENT 7.0 | |
3971 | .TP | |
3972 | .B xgroup_create(stream, group_name, latest_id=\(aq$\(aq, mkstream=False) | |
4124 | 3973 | Create a consumer group |
4125 | 3974 | .UNINDENT |
4126 | 3975 | .INDENT 7.0 |
4164 | 4013 | .TP |
4165 | 4014 | .B xinfo_stream(stream) |
4166 | 4015 | Retrieve information about the given stream. |
4016 | .UNINDENT | |
4017 | .INDENT 7.0 | |
4018 | .TP | |
4019 | .B xlen(stream) | |
4020 | Returns the number of entries inside a stream | |
4167 | 4021 | .UNINDENT |
4168 | 4022 | .INDENT 7.0 |
4169 | 4023 | .TP |
4198 | 4052 | .UNINDENT |
4199 | 4053 | .INDENT 7.0 |
4200 | 4054 | .TP |
4201 | .B xread_group(group_name, consumer_name, streams, timeout=0, count=None, latest_ids=None) | |
4055 | .B xread_group(group_name, consumer_name, streams, timeout=0, count=None, latest_ids=None, no_ack=False) | |
4202 | 4056 | Perform a blocking read on the given stream as part of a consumer group |
4203 | 4057 | .INDENT 7.0 |
4204 | 4058 | .TP |
4212 | 4066 | .B xrevrange(stream, start=\(aq+\(aq, stop=\(aq\-\(aq, count=None) |
4213 | 4067 | Retrieve messages from a stream in reverse order. |
4214 | 4068 | .UNINDENT |
4069 | .INDENT 7.0 | |
4070 | .TP | |
4071 | .B xtrim(stream, max_len, exact_len=False) | |
4072 | trims the stream to a given number of items, evicting older items | |
4073 | .UNINDENT | |
4215 | 4074 | .UNINDENT |
4216 | 4075 | .SH AIOREDIS.ABC --- INTERFACES REFERENCE |
4217 | 4076 | .sp |
4225 | 4084 | Abstract connection interface. |
4226 | 4085 | .INDENT 7.0 |
4227 | 4086 | .TP |
4228 | .B address | |
4087 | .B abstract property address | |
4229 | 4088 | Connection address. |
4230 | 4089 | .UNINDENT |
4231 | 4090 | .INDENT 7.0 |
4232 | 4091 | .TP |
4233 | .B close() | |
4092 | .B abstract close() | |
4234 | 4093 | Perform connection(s) close and resources cleanup. |
4235 | 4094 | .UNINDENT |
4236 | 4095 | .INDENT 7.0 |
4237 | 4096 | .TP |
4238 | .B closed | |
4097 | .B abstract property closed | |
4239 | 4098 | Flag indicating if connection is closing or already closed. |
4240 | 4099 | .UNINDENT |
4241 | 4100 | .INDENT 7.0 |
4242 | 4101 | .TP |
4243 | .B db | |
4102 | .B abstract property db | |
4244 | 4103 | Current selected DB index. |
4245 | 4104 | .UNINDENT |
4246 | 4105 | .INDENT 7.0 |
4247 | 4106 | .TP |
4248 | .B encoding | |
4107 | .B abstract property encoding | |
4249 | 4108 | Current set connection codec. |
4250 | 4109 | .UNINDENT |
4251 | 4110 | .INDENT 7.0 |
4252 | 4111 | .TP |
4253 | .B execute(command, *args, **kwargs) | |
4112 | .B abstract execute(command, *args, **kwargs) | |
4254 | 4113 | Execute redis command. |
4255 | 4114 | .UNINDENT |
4256 | 4115 | .INDENT 7.0 |
4257 | 4116 | .TP |
4258 | .B execute_pubsub(command, *args, **kwargs) | |
4117 | .B abstract execute_pubsub(command, *args, **kwargs) | |
4259 | 4118 | Execute Redis (p)subscribe/(p)unsubscribe commands. |
4260 | 4119 | .UNINDENT |
4261 | 4120 | .INDENT 7.0 |
4262 | 4121 | .TP |
4263 | .B in_pubsub | |
4122 | .B abstract property in_pubsub | |
4264 | 4123 | Returns number of subscribed channels. |
4265 | 4124 | .sp |
4266 | 4125 | Can be tested as bool indicating Pub/Sub mode state. |
4267 | 4126 | .UNINDENT |
4268 | 4127 | .INDENT 7.0 |
4269 | 4128 | .TP |
4270 | .B pubsub_channels | |
4129 | .B abstract property pubsub_channels | |
4271 | 4130 | Read\-only channels dict. |
4272 | 4131 | .UNINDENT |
4273 | 4132 | .INDENT 7.0 |
4274 | 4133 | .TP |
4275 | .B pubsub_patterns | |
4134 | .B abstract property pubsub_patterns | |
4276 | 4135 | Read\-only patterns dict. |
4277 | .UNINDENT | |
4278 | .INDENT 7.0 | |
4279 | .TP | |
4280 | .B coroutine wait_closed() | |
4281 | Coroutine waiting until all resources are closed/released/cleaned up. | |
4282 | 4136 | .UNINDENT |
4283 | 4137 | .UNINDENT |
4284 | 4138 | .INDENT 0.0 |
4292 | 4146 | for executing Redis commands. |
4293 | 4147 | .INDENT 7.0 |
4294 | 4148 | .TP |
4295 | .B coroutine acquire() | |
4296 | Acquires connection from pool. | |
4297 | .UNINDENT | |
4298 | .INDENT 7.0 | |
4299 | .TP | |
4300 | .B address | |
4149 | .B abstract property address | |
4301 | 4150 | Connection address or None. |
4302 | 4151 | .UNINDENT |
4303 | 4152 | .INDENT 7.0 |
4304 | 4153 | .TP |
4305 | .B get_connection() | |
4154 | .B abstract get_connection(command, args=()) | |
4306 | 4155 | Gets free connection from pool in a sync way. |
4307 | 4156 | .sp |
4308 | 4157 | If no connection available — returns None. |
4309 | 4158 | .UNINDENT |
4310 | 4159 | .INDENT 7.0 |
4311 | 4160 | .TP |
4312 | .B release(conn) | |
4161 | .B abstract release(conn) | |
4313 | 4162 | Releases connection to pool. |
4314 | 4163 | .INDENT 7.0 |
4315 | 4164 | .TP |
4326 | 4175 | Abstract Pub/Sub Channel interface. |
4327 | 4176 | .INDENT 7.0 |
4328 | 4177 | .TP |
4329 | .B close(exc=None) | |
4178 | .B abstract close(exc=None) | |
4330 | 4179 | Marks Channel as closed, no more messages will be sent to it. |
4331 | 4180 | .sp |
4332 | 4181 | Called by RedisConnection when channel is unsubscribed |
4334 | 4183 | .UNINDENT |
4335 | 4184 | .INDENT 7.0 |
4336 | 4185 | .TP |
4337 | .B coroutine get() | |
4338 | Wait and return new message. | |
4339 | .sp | |
4340 | Will raise \fBChannelClosedError\fP if channel is not active. | |
4341 | .UNINDENT | |
4342 | .INDENT 7.0 | |
4343 | .TP | |
4344 | .B is_active | |
4186 | .B abstract property is_active | |
4345 | 4187 | Flag indicating that channel has unreceived messages |
4346 | 4188 | and not marked as closed. |
4347 | 4189 | .UNINDENT |
4348 | 4190 | .INDENT 7.0 |
4349 | 4191 | .TP |
4350 | .B is_pattern | |
4192 | .B abstract property is_pattern | |
4351 | 4193 | Boolean flag indicating if channel is pattern channel. |
4352 | 4194 | .UNINDENT |
4353 | 4195 | .INDENT 7.0 |
4354 | 4196 | .TP |
4355 | .B name | |
4197 | .B abstract property name | |
4356 | 4198 | Encoded channel name or pattern. |
4357 | 4199 | .UNINDENT |
4358 | 4200 | .INDENT 7.0 |
4359 | 4201 | .TP |
4360 | .B put_nowait(data) | |
4202 | .B abstract put_nowait(data) | |
4361 | 4203 | Send data to channel. |
4362 | 4204 | .sp |
4363 | 4205 | Called by RedisConnection when new message received. |
4385 | 4227 | .ft C |
4386 | 4228 | >>> from aioredis.pubsub import Receiver |
4387 | 4229 | >>> from aioredis.abc import AbcChannel |
4388 | >>> mpsc = Receiver(loop=loop) | |
4230 | >>> mpsc = Receiver() | |
4389 | 4231 | >>> async def reader(mpsc): |
4390 | 4232 | \&... async for channel, msg in mpsc.iter(): |
4391 | 4233 | \&... assert isinstance(channel, AbcChannel) |
4435 | 4277 | .UNINDENT |
4436 | 4278 | .INDENT 7.0 |
4437 | 4279 | .TP |
4438 | .B channels | |
4280 | .B property channels | |
4439 | 4281 | Read\-only channels dict. |
4440 | 4282 | .UNINDENT |
4441 | 4283 | .INDENT 7.0 |
4445 | 4287 | .UNINDENT |
4446 | 4288 | .INDENT 7.0 |
4447 | 4289 | .TP |
4448 | .B coroutine get(*, encoding=None, decoder=None) | |
4449 | Wait for and return pub/sub message from one of channels. | |
4450 | .sp | |
4451 | Return value is either: | |
4452 | .INDENT 7.0 | |
4453 | .IP \(bu 2 | |
4454 | tuple of two elements: channel & message; | |
4455 | .IP \(bu 2 | |
4456 | tuple of three elements: pattern channel, (target channel & message); | |
4457 | .IP \(bu 2 | |
4458 | or None in case Receiver is not active or has just been stopped. | |
4459 | .UNINDENT | |
4460 | .INDENT 7.0 | |
4461 | .TP | |
4462 | .B Raises | |
4463 | \fBaioredis.ChannelClosedError\fP \-\- If listener is stopped | |
4464 | and all messages have been received. | |
4465 | .UNINDENT | |
4466 | .UNINDENT | |
4467 | .INDENT 7.0 | |
4468 | .TP | |
4469 | .B is_active | |
4290 | .B property is_active | |
4470 | 4291 | Returns True if listener has any active subscription. |
4471 | 4292 | .UNINDENT |
4472 | 4293 | .INDENT 7.0 |
4493 | 4314 | .UNINDENT |
4494 | 4315 | .INDENT 7.0 |
4495 | 4316 | .TP |
4496 | .B patterns | |
4317 | .B property patterns | |
4497 | 4318 | Read\-only patterns dict. |
4498 | 4319 | .UNINDENT |
4499 | 4320 | .INDENT 7.0 |
4504 | 4325 | All new messages after this call will be ignored, |
4505 | 4326 | so you must call unsubscribe before stopping this listener. |
4506 | 4327 | .UNINDENT |
4507 | .INDENT 7.0 | |
4508 | .TP | |
4509 | .B coroutine wait_message() | |
4510 | Blocks until new message appear. | |
4511 | .UNINDENT | |
4512 | 4328 | .UNINDENT |
4513 | 4329 | .INDENT 0.0 |
4514 | 4330 | .TP |
4534 | 4350 | .ft C |
4535 | 4351 | import aioredis |
4536 | 4352 | |
4537 | sentinel = await aioredis\&.create_sentinel( | |
4353 | sentinel = await aioredis.create_sentinel( | |
4538 | 4354 | [(\(aqsentinel.host1\(aq, 26379), (\(aqsentinel.host2\(aq, 26379)]) |
4539 | 4355 | |
4540 | redis = sentinel\&.master_for(\(aqmymaster\(aq) | |
4541 | assert await redis\&.set(\(aqkey\(aq, \(aqvalue\(aq) | |
4542 | assert await redis\&.get(\(aqkey\(aq, encoding=\(aqutf\-8\(aq) == \(aqvalue\(aq | |
4356 | redis = sentinel.master_for(\(aqmymaster\(aq) | |
4357 | assert await redis.set(\(aqkey\(aq, \(aqvalue\(aq) | |
4358 | assert await redis.get(\(aqkey\(aq, encoding=\(aqutf\-8\(aq) == \(aqvalue\(aq | |
4543 | 4359 | |
4544 | 4360 | # redis client will reconnect/reconfigure automatically |
4545 | 4361 | # by sentinel client instance |
4550 | 4366 | .SS \fBRedisSentinel\fP |
4551 | 4367 | .INDENT 0.0 |
4552 | 4368 | .TP |
4553 | .B coroutine aioredis.sentinel.create_sentinel(sentinels, *, db=None, password=None, encoding=None, minsize=1, maxsize=10, ssl=None, parser=None, loop=None) | |
4369 | .B coroutine aioredis.sentinel.create_sentinel(sentinels, *, db=None, password=None, encoding=None, minsize=1, maxsize=10, ssl=None, parser=None) | |
4554 | 4370 | Creates Redis Sentinel client. |
4371 | .sp | |
4372 | Deprecated since version v1.3.1: \fBloop\fP argument deprecated for Python 3.8 compatibility. | |
4373 | ||
4555 | 4374 | .INDENT 7.0 |
4556 | 4375 | .TP |
4557 | 4376 | .B Parameters |
4578 | 4397 | .IP \(bu 2 |
4579 | 4398 | \fBparser\fP (\fIcallable\fP\fI or \fP\fI\%None\fP) \-\- Protocol parser class. Can be used to set custom protocol |
4580 | 4399 | reader; expected same interface as \fBhiredis.Reader\fP\&. |
4581 | .IP \(bu 2 | |
4582 | \fBloop\fP (\fI\%EventLoop\fP) \-\- An optional \fIevent loop\fP instance | |
4583 | (uses \fI\%asyncio.get_event_loop()\fP if not specified). | |
4584 | 4400 | .UNINDENT |
4585 | 4401 | .TP |
4586 | 4402 | .B Return type |
4896 | 4712 | (see for more). |
4897 | 4713 | .sp |
4898 | 4714 | Every example is a correct python program that can be executed. |
4899 | .SS Low\-level connection usage example | |
4900 | .sp | |
4901 | \fBget source code\fP | |
4902 | .INDENT 0.0 | |
4903 | .INDENT 3.5 | |
4904 | .sp | |
4905 | .nf | |
4906 | .ft C | |
4907 | import asyncio | |
4908 | import aioredis | |
4909 | ||
4910 | ||
4911 | async def main(): | |
4912 | conn = await aioredis.create_connection( | |
4913 | \(aqredis://localhost\(aq, encoding=\(aqutf\-8\(aq) | |
4914 | ||
4915 | ok = await conn.execute(\(aqset\(aq, \(aqmy\-key\(aq, \(aqsome value\(aq) | |
4916 | assert ok == \(aqOK\(aq, ok | |
4917 | ||
4918 | str_value = await conn.execute(\(aqget\(aq, \(aqmy\-key\(aq) | |
4919 | raw_value = await conn.execute(\(aqget\(aq, \(aqmy\-key\(aq, encoding=None) | |
4920 | assert str_value == \(aqsome value\(aq | |
4921 | assert raw_value == b\(aqsome value\(aq | |
4922 | ||
4923 | print(\(aqstr value:\(aq, str_value) | |
4924 | print(\(aqraw value:\(aq, raw_value) | |
4925 | ||
4926 | # optionally close connection | |
4927 | conn.close() | |
4928 | await conn.wait_closed() | |
4929 | ||
4930 | ||
4931 | if __name__ == \(aq__main__\(aq: | |
4932 | asyncio.get_event_loop().run_until_complete(main()) | |
4933 | ||
4934 | .ft P | |
4935 | .fi | |
4936 | .UNINDENT | |
4937 | .UNINDENT | |
4938 | .SS Connections pool example | |
4939 | .sp | |
4940 | \fBget source code\fP | |
4941 | .INDENT 0.0 | |
4942 | .INDENT 3.5 | |
4943 | .sp | |
4944 | .nf | |
4945 | .ft C | |
4946 | import asyncio | |
4947 | import aioredis | |
4948 | ||
4949 | ||
4950 | async def main(): | |
4951 | pool = await aioredis.create_pool( | |
4952 | \(aqredis://localhost\(aq, | |
4953 | minsize=5, maxsize=10) | |
4954 | with await pool as conn: # low\-level redis connection | |
4955 | await conn.execute(\(aqset\(aq, \(aqmy\-key\(aq, \(aqvalue\(aq) | |
4956 | val = await conn.execute(\(aqget\(aq, \(aqmy\-key\(aq) | |
4957 | print(\(aqraw value:\(aq, val) | |
4958 | pool.close() | |
4959 | await pool.wait_closed() # closing all open connections | |
4960 | ||
4961 | ||
4962 | if __name__ == \(aq__main__\(aq: | |
4963 | asyncio.get_event_loop().run_until_complete(main()) | |
4964 | ||
4965 | .ft P | |
4966 | .fi | |
4967 | .UNINDENT | |
4968 | .UNINDENT | |
4969 | 4715 | .SS Commands example |
4970 | 4716 | .sp |
4971 | 4717 | \fBget source code\fP |
5005 | 4751 | |
5006 | 4752 | |
5007 | 4753 | if __name__ == \(aq__main__\(aq: |
5008 | asyncio.get_event_loop().run_until_complete(main()) | |
5009 | asyncio.get_event_loop().run_until_complete(redis_pool()) | |
4754 | asyncio.run(main()) | |
4755 | asyncio.run(redis_pool()) | |
5010 | 4756 | |
5011 | 4757 | .ft P |
5012 | 4758 | .fi |
5041 | 4787 | |
5042 | 4788 | |
5043 | 4789 | if __name__ == \(aq__main__\(aq: |
5044 | asyncio.get_event_loop().run_until_complete(main()) | |
4790 | asyncio.run(main()) | |
5045 | 4791 | |
5046 | 4792 | .ft P |
5047 | 4793 | .fi |
5085 | 4831 | |
5086 | 4832 | |
5087 | 4833 | if __name__ == \(aq__main__\(aq: |
5088 | asyncio.get_event_loop().run_until_complete(main()) | |
4834 | asyncio.run(main()) | |
5089 | 4835 | |
5090 | 4836 | .ft P |
5091 | 4837 | .fi |
5121 | 4867 | if __name__ == \(aq__main__\(aq: |
5122 | 4868 | import os |
5123 | 4869 | if \(aqredis_version:2.6\(aq not in os.environ.get(\(aqREDIS_VERSION\(aq, \(aq\(aq): |
5124 | asyncio.get_event_loop().run_until_complete(main()) | |
4870 | asyncio.run(main()) | |
5125 | 4871 | |
5126 | 4872 | .ft P |
5127 | 4873 | .fi |
5153 | 4899 | |
5154 | 4900 | |
5155 | 4901 | if __name__ == \(aq__main__\(aq: |
5156 | asyncio.get_event_loop().run_until_complete(main()) | |
4902 | asyncio.run(main()) | |
4903 | ||
4904 | .ft P | |
4905 | .fi | |
4906 | .UNINDENT | |
4907 | .UNINDENT | |
4908 | .SS Low\-level connection usage example | |
4909 | .sp | |
4910 | \fBget source code\fP | |
4911 | .INDENT 0.0 | |
4912 | .INDENT 3.5 | |
4913 | .sp | |
4914 | .nf | |
4915 | .ft C | |
4916 | import asyncio | |
4917 | import aioredis | |
4918 | ||
4919 | ||
4920 | async def main(): | |
4921 | conn = await aioredis.create_connection( | |
4922 | \(aqredis://localhost\(aq, encoding=\(aqutf\-8\(aq) | |
4923 | ||
4924 | ok = await conn.execute(\(aqset\(aq, \(aqmy\-key\(aq, \(aqsome value\(aq) | |
4925 | assert ok == \(aqOK\(aq, ok | |
4926 | ||
4927 | str_value = await conn.execute(\(aqget\(aq, \(aqmy\-key\(aq) | |
4928 | raw_value = await conn.execute(\(aqget\(aq, \(aqmy\-key\(aq, encoding=None) | |
4929 | assert str_value == \(aqsome value\(aq | |
4930 | assert raw_value == b\(aqsome value\(aq | |
4931 | ||
4932 | print(\(aqstr value:\(aq, str_value) | |
4933 | print(\(aqraw value:\(aq, raw_value) | |
4934 | ||
4935 | # optionally close connection | |
4936 | conn.close() | |
4937 | await conn.wait_closed() | |
4938 | ||
4939 | ||
4940 | if __name__ == \(aq__main__\(aq: | |
4941 | asyncio.run(main()) | |
4942 | ||
4943 | .ft P | |
4944 | .fi | |
4945 | .UNINDENT | |
4946 | .UNINDENT | |
4947 | .SS Connections pool example | |
4948 | .sp | |
4949 | \fBget source code\fP | |
4950 | .INDENT 0.0 | |
4951 | .INDENT 3.5 | |
4952 | .sp | |
4953 | .nf | |
4954 | .ft C | |
4955 | import asyncio | |
4956 | import aioredis | |
4957 | ||
4958 | ||
4959 | async def main(): | |
4960 | pool = await aioredis.create_pool( | |
4961 | \(aqredis://localhost\(aq, | |
4962 | minsize=5, maxsize=10) | |
4963 | with await pool as conn: # low\-level redis connection | |
4964 | await conn.execute(\(aqset\(aq, \(aqmy\-key\(aq, \(aqvalue\(aq) | |
4965 | val = await conn.execute(\(aqget\(aq, \(aqmy\-key\(aq) | |
4966 | print(\(aqraw value:\(aq, val) | |
4967 | pool.close() | |
4968 | await pool.wait_closed() # closing all open connections | |
4969 | ||
4970 | ||
4971 | if __name__ == \(aq__main__\(aq: | |
4972 | asyncio.run(main()) | |
5157 | 4973 | |
5158 | 4974 | .ft P |
5159 | 4975 | .fi |
5198 | 5014 | \fBflake8\fP for code linting; |
5199 | 5015 | .IP \(bu 2 |
5200 | 5016 | and few other packages. |
5017 | .UNINDENT | |
5018 | .sp | |
5019 | Make sure you have provided a \fBtowncrier\fP note. | |
5020 | Just add short description running following commands: | |
5021 | .INDENT 0.0 | |
5022 | .INDENT 3.5 | |
5023 | .sp | |
5024 | .nf | |
5025 | .ft C | |
5026 | $ echo "Short description" > CHANGES/filename.type | |
5027 | .ft P | |
5028 | .fi | |
5029 | .UNINDENT | |
5030 | .UNINDENT | |
5031 | .sp | |
5032 | This will create new file in \fBCHANGES\fP directory. | |
5033 | Filename should consist of the ticket ID or other unique identifier. | |
5034 | Five default types are: | |
5035 | .INDENT 0.0 | |
5036 | .IP \(bu 2 | |
5037 | \&.feature \- signifying new feature | |
5038 | .IP \(bu 2 | |
5039 | \&.bugfix \- signifying a bug fix | |
5040 | .IP \(bu 2 | |
5041 | \&.doc \- documentation improvement | |
5042 | .IP \(bu 2 | |
5043 | \&.removal \- deprecation or removal of public API | |
5044 | .IP \(bu 2 | |
5045 | \&.misc \- a ticket has been closed, but not in interest of users | |
5046 | .UNINDENT | |
5047 | .sp | |
5048 | You can check if everything is correct by typing: | |
5049 | .INDENT 0.0 | |
5050 | .INDENT 3.5 | |
5051 | .sp | |
5052 | .nf | |
5053 | .ft C | |
5054 | $ towncrier \-\-draft | |
5055 | .ft P | |
5056 | .fi | |
5057 | .UNINDENT | |
5058 | .UNINDENT | |
5059 | .sp | |
5060 | To produce the news file: | |
5061 | .INDENT 0.0 | |
5062 | .INDENT 3.5 | |
5063 | .sp | |
5064 | .nf | |
5065 | .ft C | |
5066 | $ towncrier | |
5067 | .ft P | |
5068 | .fi | |
5069 | .UNINDENT | |
5201 | 5070 | .UNINDENT |
5202 | 5071 | .SS Code style |
5203 | 5072 | .sp |
5225 | 5094 | # will run tests in a verbose mode |
5226 | 5095 | $ make test |
5227 | 5096 | # or |
5228 | $ py.test | |
5097 | $ pytest | |
5098 | ||
5099 | # or with particular Redis server | |
5100 | $ pytest \-\-redis\-server=/usr/local/bin/redis\-server tests/errors_test.py | |
5229 | 5101 | |
5230 | 5102 | # will run tests with coverage report |
5231 | 5103 | $ make cov |
5232 | 5104 | # or |
5233 | $ py.test \-\-cov | |
5105 | $ pytest \-\-cov | |
5234 | 5106 | .ft P |
5235 | 5107 | .fi |
5236 | 5108 | .UNINDENT |
5266 | 5138 | .sp |
5267 | 5139 | .nf |
5268 | 5140 | .ft C |
5269 | $ py.test \-\-redis\-server=/path/to/custom/redis\-server | |
5141 | $ pytest \-\-redis\-server=/path/to/custom/redis\-server | |
5270 | 5142 | .ft P |
5271 | 5143 | .fi |
5272 | 5144 | .UNINDENT |
5280 | 5152 | .nf |
5281 | 5153 | .ft C |
5282 | 5154 | $ pip install uvloop |
5283 | $ py.test \-\-uvloop | |
5155 | $ pytest \-\-uvloop | |
5284 | 5156 | .ft P |
5285 | 5157 | .fi |
5286 | 5158 | .UNINDENT |
5298 | 5170 | \fBaioredis\fP uses pytest tool. |
5299 | 5171 | .sp |
5300 | 5172 | Tests are located under \fB/tests\fP directory. |
5301 | .sp | |
5302 | Pure Python 3.5 tests (ie the ones using \fBasync\fP/\fBawait\fP syntax) must be | |
5303 | prefixed with \fBpy35_\fP, for instance see: | |
5304 | .INDENT 0.0 | |
5305 | .INDENT 3.5 | |
5306 | .sp | |
5307 | .nf | |
5308 | .ft C | |
5309 | tests/py35_generic_commands_tests.py | |
5310 | tests/py35_pool_test.py | |
5311 | .ft P | |
5312 | .fi | |
5313 | .UNINDENT | |
5314 | .UNINDENT | |
5315 | 5173 | .SS Fixtures |
5316 | 5174 | .sp |
5317 | 5175 | There is a number of fixtures that can be used to write tests: |
5439 | 5297 | \fI\%tuple\fP |
5440 | 5298 | .UNINDENT |
5441 | 5299 | .UNINDENT |
5442 | .SS Helpers | |
5443 | .sp | |
5444 | \fBaioredis\fP also updates pytest\(aqs namespace with several helpers. | |
5445 | .INDENT 0.0 | |
5446 | .TP | |
5447 | .B pytest.redis_version(*version, reason) | |
5300 | .SS \fBredis_version\fP tests helper | |
5301 | .sp | |
5302 | In \fBtests\fP directory there is a \fB_testutils\fP module with a simple | |
5303 | helper \-\-\- \fBredis_version()\fP \-\-\- a function that add a pytest mark to a test | |
5304 | allowing to run it with requested Redis server versions. | |
5305 | .INDENT 0.0 | |
5306 | .TP | |
5307 | .B _testutils.redis_version(*version, reason) | |
5448 | 5308 | Marks test with minimum redis version to run. |
5449 | 5309 | .sp |
5450 | 5310 | Example: |
5453 | 5313 | .sp |
5454 | 5314 | .nf |
5455 | 5315 | .ft C |
5456 | @pytest.redis_version(3, 2, 0, reason="HSTRLEN new in redis 3.2.0") | |
5316 | from _testutil import redis_version | |
5317 | ||
5318 | @redis_version(3, 2, 0, reason="HSTRLEN new in redis 3.2.0") | |
5457 | 5319 | def test_hstrlen(redis): |
5458 | 5320 | pass |
5459 | 5321 | .ft P |
5461 | 5323 | .UNINDENT |
5462 | 5324 | .UNINDENT |
5463 | 5325 | .UNINDENT |
5464 | .INDENT 0.0 | |
5465 | .TP | |
5466 | .B pytest.logs(logger, level=None) | |
5467 | Adopted version of \fI\%unittest.TestCase.assertEqual()\fP, | |
5468 | see it for details. | |
5469 | .sp | |
5470 | Example: | |
5471 | .INDENT 7.0 | |
5472 | .INDENT 3.5 | |
5473 | .sp | |
5474 | .nf | |
5475 | .ft C | |
5476 | def test_logs(create_connection, server): | |
5477 | with pytest.logs(\(aqaioredis\(aq, \(aqDEBUG\(aq) as cm: | |
5478 | conn yield from create_connection(server.tcp_address) | |
5479 | assert cm.output[0].startswith( | |
5480 | \(aqDEBUG:aioredis:Creating tcp connection\(aq) | |
5481 | .ft P | |
5482 | .fi | |
5483 | .UNINDENT | |
5484 | .UNINDENT | |
5485 | .UNINDENT | |
5486 | .INDENT 0.0 | |
5487 | .TP | |
5488 | .B pytest.assert_almost_equal(first, second, places=None, msg=None, delta=None) | |
5489 | Adopted version of \fI\%unittest.TestCase.assertAlmostEqual()\fP\&. | |
5490 | .UNINDENT | |
5491 | .INDENT 0.0 | |
5492 | .TP | |
5493 | .B pytest.raises_regex(exc_type, message) | |
5494 | Adopted version of \fI\%unittest.TestCase.assertRaisesRegex()\fP\&. | |
5495 | .UNINDENT | |
5326 | .SH MIGRATING FROM V0.3 TO V1.0 | |
5327 | .SS API changes and backward incompatible changes: | |
5328 | .INDENT 0.0 | |
5329 | .IP \(bu 2 | |
5330 | \fI\%aioredis.create_pool\fP | |
5331 | .IP \(bu 2 | |
5332 | \fI\%aioredis.create_reconnecting_redis\fP | |
5333 | .IP \(bu 2 | |
5334 | \fI\%aioredis.Redis\fP | |
5335 | .IP \(bu 2 | |
5336 | \fI\%Blocking operations and connection sharing\fP | |
5337 | .IP \(bu 2 | |
5338 | \fI\%Sorted set commands return values\fP | |
5339 | .IP \(bu 2 | |
5340 | \fI\%Hash hscan command now returns list of tuples\fP | |
5341 | .UNINDENT | |
5342 | ||
5343 | .sp | |
5344 | .ce | |
5345 | ---- | |
5346 | ||
5347 | .ce 0 | |
5348 | .sp | |
5349 | .SS aioredis.create_pool | |
5350 | .sp | |
5351 | \fBcreate_pool()\fP now returns \fBConnectionsPool\fP | |
5352 | instead of \fBRedisPool\fP\&. | |
5353 | .sp | |
5354 | This means that pool now operates with \fBRedisConnection\fP | |
5355 | objects and not \fBRedis\fP\&. | |
5356 | .TS | |
5357 | center; | |
5358 | |l|l|. | |
5359 | _ | |
5360 | T{ | |
5361 | v0.3 | |
5362 | T} T{ | |
5363 | .INDENT 0.0 | |
5364 | .INDENT 3.5 | |
5365 | .sp | |
5366 | .nf | |
5367 | .ft C | |
5368 | pool = await aioredis.create_pool((\(aqlocalhost\(aq, 6379)) | |
5369 | ||
5370 | with await pool as redis: | |
5371 | # calling methods of Redis class | |
5372 | await redis.lpush(\(aqlist\-key\(aq, \(aqitem1\(aq, \(aqitem2\(aq) | |
5373 | .ft P | |
5374 | .fi | |
5375 | .UNINDENT | |
5376 | .UNINDENT | |
5377 | T} | |
5378 | _ | |
5379 | T{ | |
5380 | v1.0 | |
5381 | T} T{ | |
5382 | .INDENT 0.0 | |
5383 | .INDENT 3.5 | |
5384 | .sp | |
5385 | .nf | |
5386 | .ft C | |
5387 | pool = await aioredis.create_pool((\(aqlocalhost\(aq, 6379)) | |
5388 | ||
5389 | with await pool as conn: | |
5390 | # calling conn.lpush will raise AttributeError exception | |
5391 | await conn.execute(\(aqlpush\(aq, \(aqlist\-key\(aq, \(aqitem1\(aq, \(aqitem2\(aq) | |
5392 | .ft P | |
5393 | .fi | |
5394 | .UNINDENT | |
5395 | .UNINDENT | |
5396 | T} | |
5397 | _ | |
5398 | .TE | |
5399 | .SS aioredis.create_reconnecting_redis | |
5400 | .sp | |
5401 | \fBcreate_reconnecting_redis()\fP has been dropped. | |
5402 | .sp | |
5403 | \fBcreate_redis_pool()\fP can be used instead of former function. | |
5404 | .TS | |
5405 | center; | |
5406 | |l|l|. | |
5407 | _ | |
5408 | T{ | |
5409 | v0.3 | |
5410 | T} T{ | |
5411 | .INDENT 0.0 | |
5412 | .INDENT 3.5 | |
5413 | .sp | |
5414 | .nf | |
5415 | .ft C | |
5416 | redis = await aioredis.create_reconnecting_redis( | |
5417 | (\(aqlocalhost\(aq, 6379)) | |
5418 | ||
5419 | await redis.lpush(\(aqlist\-key\(aq, \(aqitem1\(aq, \(aqitem2\(aq) | |
5420 | .ft P | |
5421 | .fi | |
5422 | .UNINDENT | |
5423 | .UNINDENT | |
5424 | T} | |
5425 | _ | |
5426 | T{ | |
5427 | v1.0 | |
5428 | T} T{ | |
5429 | .INDENT 0.0 | |
5430 | .INDENT 3.5 | |
5431 | .sp | |
5432 | .nf | |
5433 | .ft C | |
5434 | redis = await aioredis.create_redis_pool( | |
5435 | (\(aqlocalhost\(aq, 6379)) | |
5436 | ||
5437 | await redis.lpush(\(aqlist\-key\(aq, \(aqitem1\(aq, \(aqitem2\(aq) | |
5438 | .ft P | |
5439 | .fi | |
5440 | .UNINDENT | |
5441 | .UNINDENT | |
5442 | T} | |
5443 | _ | |
5444 | .TE | |
5445 | .sp | |
5446 | \fBcreate_redis_pool\fP returns \fBRedis\fP initialized with | |
5447 | \fBConnectionsPool\fP which is responsible for reconnecting to server. | |
5448 | .sp | |
5449 | Also \fBcreate_reconnecting_redis\fP was patching the \fBRedisConnection\fP and | |
5450 | breaking \fBclosed\fP property (it was always \fBTrue\fP). | |
5451 | .SS aioredis.Redis | |
5452 | .sp | |
5453 | \fBRedis\fP class now operates with objects implementing | |
5454 | \fBaioredis.abc.AbcConnection\fP interface. | |
5455 | \fBRedisConnection\fP and \fBConnectionsPool\fP are | |
5456 | both implementing \fBAbcConnection\fP so it is become possible to use same API | |
5457 | when working with either single connection or connections pool. | |
5458 | .TS | |
5459 | center; | |
5460 | |l|l|. | |
5461 | _ | |
5462 | T{ | |
5463 | v0.3 | |
5464 | T} T{ | |
5465 | .INDENT 0.0 | |
5466 | .INDENT 3.5 | |
5467 | .sp | |
5468 | .nf | |
5469 | .ft C | |
5470 | redis = await aioredis.create_redis((\(aqlocalhost\(aq, 6379)) | |
5471 | await redis.lpush(\(aqlist\-key\(aq, \(aqitem1\(aq, \(aqitem2\(aq) | |
5472 | ||
5473 | pool = await aioredis.create_pool((\(aqlocalhost\(aq, 6379)) | |
5474 | redis = await pool.acquire() # get Redis object | |
5475 | await redis.lpush(\(aqlist\-key\(aq, \(aqitem1\(aq, \(aqitem2\(aq) | |
5476 | .ft P | |
5477 | .fi | |
5478 | .UNINDENT | |
5479 | .UNINDENT | |
5480 | T} | |
5481 | _ | |
5482 | T{ | |
5483 | v1.0 | |
5484 | T} T{ | |
5485 | .INDENT 0.0 | |
5486 | .INDENT 3.5 | |
5487 | .sp | |
5488 | .nf | |
5489 | .ft C | |
5490 | redis = await aioredis.create_redis((\(aqlocalhost\(aq, 6379)) | |
5491 | await redis.lpush(\(aqlist\-key\(aq, \(aqitem1\(aq, \(aqitem2\(aq) | |
5492 | ||
5493 | redis = await aioredis.create_redis_pool((\(aqlocalhost\(aq, 6379)) | |
5494 | await redis.lpush(\(aqlist\-key\(aq, \(aqitem1\(aq, \(aqitem2\(aq) | |
5495 | .ft P | |
5496 | .fi | |
5497 | .UNINDENT | |
5498 | .UNINDENT | |
5499 | T} | |
5500 | _ | |
5501 | .TE | |
5502 | .SS Blocking operations and connection sharing | |
5503 | .sp | |
5504 | Current implementation of \fBConnectionsPool\fP by default \fBexecute | |
5505 | every command on random connection\fP\&. The \fIPros\fP of this is that it allowed | |
5506 | implementing \fBAbcConnection\fP interface and hide pool inside \fBRedis\fP class, | |
5507 | and also keep pipelining feature (like RedisConnection.execute). | |
5508 | The \fICons\fP of this is that \fBdifferent tasks may use same connection and block | |
5509 | it\fP with some long\-running command. | |
5510 | .sp | |
5511 | We can call it \fBShared Mode\fP \-\-\- commands are sent to random connections | |
5512 | in pool without need to lock [connection]: | |
5513 | .INDENT 0.0 | |
5514 | .INDENT 3.5 | |
5515 | .sp | |
5516 | .nf | |
5517 | .ft C | |
5518 | redis = await aioredis.create_redis_pool( | |
5519 | (\(aqlocalhost\(aq, 6379), | |
5520 | minsize=1, | |
5521 | maxsize=1) | |
5522 | ||
5523 | async def task(): | |
5524 | # Shared mode | |
5525 | await redis.set(\(aqkey\(aq, \(aqval\(aq) | |
5526 | ||
5527 | asyncio.ensure_future(task()) | |
5528 | asyncio.ensure_future(task()) | |
5529 | # Both tasks will send commands through same connection | |
5530 | # without acquiring (locking) it first. | |
5531 | .ft P | |
5532 | .fi | |
5533 | .UNINDENT | |
5534 | .UNINDENT | |
5535 | .sp | |
5536 | Blocking operations (like \fBblpop\fP, \fBbrpop\fP or long\-running LUA scripts) | |
5537 | in \fBshared mode\fP mode will block connection and thus may lead to whole | |
5538 | program malfunction. | |
5539 | .sp | |
5540 | This \fIblocking\fP issue can be easily solved by using exclusive connection | |
5541 | for such operations: | |
5542 | .INDENT 0.0 | |
5543 | .INDENT 3.5 | |
5544 | .sp | |
5545 | .nf | |
5546 | .ft C | |
5547 | redis = await aioredis.create_redis_pool( | |
5548 | (\(aqlocalhost\(aq, 6379), | |
5549 | minsize=1, | |
5550 | maxsize=1) | |
5551 | ||
5552 | async def task(): | |
5553 | # Exclusive mode | |
5554 | with await redis as r: | |
5555 | await r.set(\(aqkey\(aq, \(aqval\(aq) | |
5556 | asyncio.ensure_future(task()) | |
5557 | asyncio.ensure_future(task()) | |
5558 | # Both tasks will first acquire connection. | |
5559 | .ft P | |
5560 | .fi | |
5561 | .UNINDENT | |
5562 | .UNINDENT | |
5563 | .sp | |
5564 | We can call this \fBExclusive Mode\fP \-\-\- context manager is used to | |
5565 | acquire (lock) exclusive connection from pool and send all commands through it. | |
5566 | .sp | |
5567 | \fBNOTE:\fP | |
5568 | .INDENT 0.0 | |
5569 | .INDENT 3.5 | |
5570 | This technique is similar to v0.3 pool usage: | |
5571 | .INDENT 0.0 | |
5572 | .INDENT 3.5 | |
5573 | .sp | |
5574 | .nf | |
5575 | .ft C | |
5576 | # in aioredis v0.3 | |
5577 | pool = await aioredis.create_pool((\(aqlocalhost\(aq, 6379)) | |
5578 | with await pool as redis: | |
5579 | # Redis is bound to exclusive connection | |
5580 | redis.set(\(aqkey\(aq, \(aqval\(aq) | |
5581 | .ft P | |
5582 | .fi | |
5583 | .UNINDENT | |
5584 | .UNINDENT | |
5585 | .UNINDENT | |
5586 | .UNINDENT | |
5587 | .SS Sorted set commands return values | |
5588 | .sp | |
5589 | Sorted set commands (like \fBzrange\fP, \fBzrevrange\fP and others) that accept | |
5590 | \fBwithscores\fP argument now \fBreturn list of tuples\fP instead of plain list. | |
5591 | .TS | |
5592 | center; | |
5593 | |l|l|. | |
5594 | _ | |
5595 | T{ | |
5596 | v0.3 | |
5597 | T} T{ | |
5598 | .INDENT 0.0 | |
5599 | .INDENT 3.5 | |
5600 | .sp | |
5601 | .nf | |
5602 | .ft C | |
5603 | redis = await aioredis.create_redis((\(aqlocalhost\(aq, 6379)) | |
5604 | await redis.zadd(\(aqzset\-key\(aq, 1, \(aqone\(aq, 2, \(aqtwo\(aq) | |
5605 | res = await redis.zrange(\(aqzset\-key\(aq, withscores=True) | |
5606 | assert res == [b\(aqone\(aq, 1, b\(aqtwo\(aq, 2] | |
5607 | ||
5608 | # not an easy way to make a dict | |
5609 | it = iter(res) | |
5610 | assert dict(zip(it, it)) == {b\(aqone\(aq: 1, b\(aqtwo\(aq: 2} | |
5611 | .ft P | |
5612 | .fi | |
5613 | .UNINDENT | |
5614 | .UNINDENT | |
5615 | T} | |
5616 | _ | |
5617 | T{ | |
5618 | v1.0 | |
5619 | T} T{ | |
5620 | .INDENT 0.0 | |
5621 | .INDENT 3.5 | |
5622 | .sp | |
5623 | .nf | |
5624 | .ft C | |
5625 | redis = await aioredis.create_redis((\(aqlocalhost\(aq, 6379)) | |
5626 | await redis.zadd(\(aqzset\-key\(aq, 1, \(aqone\(aq, 2, \(aqtwo\(aq) | |
5627 | res = await redis.zrange(\(aqzset\-key\(aq, withscores=True) | |
5628 | assert res == [(b\(aqone\(aq, 1), (b\(aqtwo\(aq, 2)] | |
5629 | ||
5630 | # now its easier to make a dict of it | |
5631 | assert dict(res) == {b\(aqone\(aq: 1, b\(aqtwo\(aq: 2} | |
5632 | .ft P | |
5633 | .fi | |
5634 | .UNINDENT | |
5635 | .UNINDENT | |
5636 | T} | |
5637 | _ | |
5638 | .TE | |
5639 | .SS Hash \fBhscan\fP command now returns list of tuples | |
5640 | .sp | |
5641 | \fBhscan\fP updated to return a list of tuples instead of plain | |
5642 | mixed key/value list. | |
5643 | .TS | |
5644 | center; | |
5645 | |l|l|. | |
5646 | _ | |
5647 | T{ | |
5648 | v0.3 | |
5649 | T} T{ | |
5650 | .INDENT 0.0 | |
5651 | .INDENT 3.5 | |
5652 | .sp | |
5653 | .nf | |
5654 | .ft C | |
5655 | redis = await aioredis.create_redis((\(aqlocalhost\(aq, 6379)) | |
5656 | await redis.hmset(\(aqhash\(aq, \(aqone\(aq, 1, \(aqtwo\(aq, 2) | |
5657 | cur, data = await redis.hscan(\(aqhash\(aq) | |
5658 | assert data == [b\(aqone\(aq, b\(aq1\(aq, b\(aqtwo\(aq, b\(aq2\(aq] | |
5659 | ||
5660 | # not an easy way to make a dict | |
5661 | it = iter(data) | |
5662 | assert dict(zip(it, it)) == {b\(aqone\(aq: b\(aq1\(aq, b\(aqtwo\(aq: b\(aq2\(aq} | |
5663 | .ft P | |
5664 | .fi | |
5665 | .UNINDENT | |
5666 | .UNINDENT | |
5667 | T} | |
5668 | _ | |
5669 | T{ | |
5670 | v1.0 | |
5671 | T} T{ | |
5672 | .INDENT 0.0 | |
5673 | .INDENT 3.5 | |
5674 | .sp | |
5675 | .nf | |
5676 | .ft C | |
5677 | redis = await aioredis.create_redis((\(aqlocalhost\(aq, 6379)) | |
5678 | await redis.hmset(\(aqhash\(aq, \(aqone\(aq, 1, \(aqtwo\(aq, 2) | |
5679 | cur, data = await redis.hscan(\(aqhash\(aq) | |
5680 | assert data == [(b\(aqone\(aq, b\(aq1\(aq), (b\(aqtwo\(aq, b\(aq2\(aq)] | |
5681 | ||
5682 | # now its easier to make a dict of it | |
5683 | assert dict(data) == {b\(aqone\(aq: b\(aq1\(aq: b\(aqtwo\(aq: b\(aq2\(aq} | |
5684 | .ft P | |
5685 | .fi | |
5686 | .UNINDENT | |
5687 | .UNINDENT | |
5688 | T} | |
5689 | _ | |
5690 | .TE | |
5496 | 5691 | .SH RELEASES |
5692 | .SS 1.3.1 (2019\-12\-02) | |
5693 | .SS Bugfixes | |
5694 | .INDENT 0.0 | |
5695 | .IP \(bu 2 | |
5696 | Fix transaction data decoding | |
5697 | (see \fI\%#657\fP); | |
5698 | .IP \(bu 2 | |
5699 | Fix duplicate calls to \fBpool.wait_closed()\fP upon \fBcreate_pool()\fP exception. | |
5700 | (see \fI\%#671\fP); | |
5701 | .UNINDENT | |
5702 | .SS Deprecations and Removals | |
5703 | .INDENT 0.0 | |
5704 | .IP \(bu 2 | |
5705 | Drop explicit loop requirement in API. | |
5706 | Deprecate \fBloop\fP argument. | |
5707 | Throw warning in Python 3.8+ if explicit \fBloop\fP is passed to methods. | |
5708 | (see \fI\%#666\fP); | |
5709 | .UNINDENT | |
5710 | .SS Misc | |
5711 | .INDENT 0.0 | |
5712 | .IP \(bu 2 | |
5713 | \fI\%#643\fP, | |
5714 | \fI\%#646\fP, | |
5715 | \fI\%#648\fP; | |
5716 | .UNINDENT | |
5717 | .SS 1.3.0 (2019\-09\-24) | |
5718 | .SS Features | |
5719 | .INDENT 0.0 | |
5720 | .IP \(bu 2 | |
5721 | Added \fBxdel\fP and \fBxtrim\fP method which missed in \fBcommands/streams.py\fP & also added unit test code for them | |
5722 | (see \fI\%#438\fP); | |
5723 | .IP \(bu 2 | |
5724 | Add \fBcount\fP argument to \fBspop\fP command | |
5725 | (see \fI\%#485\fP); | |
5726 | .IP \(bu 2 | |
5727 | Add support for \fBzpopmax\fP and \fBzpopmin\fP redis commands | |
5728 | (see \fI\%#550\fP); | |
5729 | .IP \(bu 2 | |
5730 | Add \fBtowncrier\fP: change notes are now stored in \fBCHANGES.txt\fP | |
5731 | (see \fI\%#576\fP); | |
5732 | .IP \(bu 2 | |
5733 | Type hints for the library | |
5734 | (see \fI\%#584\fP); | |
5735 | .IP \(bu 2 | |
5736 | A few additions to the sorted set commands: | |
5737 | .INDENT 2.0 | |
5738 | .IP \(bu 2 | |
5739 | the blocking pop commands: \fBBZPOPMAX\fP and \fBBZPOPMIN\fP | |
5740 | .IP \(bu 2 | |
5741 | the \fBCH\fP and \fBINCR\fP options of the \fBZADD\fP command | |
5742 | .UNINDENT | |
5743 | .sp | |
5744 | (see \fI\%#618\fP); | |
5745 | .IP \(bu 2 | |
5746 | Added \fBno_ack\fP parameter to \fBxread_group\fP streams method in \fBcommands/streams.py\fP | |
5747 | (see \fI\%#625\fP); | |
5748 | .UNINDENT | |
5749 | .SS Bugfixes | |
5750 | .INDENT 0.0 | |
5751 | .IP \(bu 2 | |
5752 | Fix for sensitive logging | |
5753 | (see \fI\%#459\fP); | |
5754 | .IP \(bu 2 | |
5755 | Fix slow memory leak in \fBwait_closed\fP implementation | |
5756 | (see \fI\%#498\fP); | |
5757 | .IP \(bu 2 | |
5758 | Fix handling of instances were Redis returns null fields for a stream message | |
5759 | (see \fI\%#605\fP); | |
5760 | .UNINDENT | |
5761 | .SS Improved Documentation | |
5762 | .INDENT 0.0 | |
5763 | .IP \(bu 2 | |
5764 | Rewrite "Getting started" documentation. | |
5765 | (see \fI\%#641\fP); | |
5766 | .UNINDENT | |
5767 | .SS Misc | |
5768 | .INDENT 0.0 | |
5769 | .IP \(bu 2 | |
5770 | \fI\%#585\fP, | |
5771 | \fI\%#611\fP, | |
5772 | \fI\%#612\fP, | |
5773 | \fI\%#619\fP, | |
5774 | \fI\%#620\fP, | |
5775 | \fI\%#642\fP; | |
5776 | .UNINDENT | |
5497 | 5777 | .SS 1.2.0 (2018\-10\-24) |
5498 | 5778 | .sp |
5499 | 5779 | \fBNEW\fP: |
5857 | 6137 | Fixed cancellation of wait_closed |
5858 | 6138 | (see \fI\%#118\fP); |
5859 | 6139 | .IP \(bu 2 |
5860 | Fixed \fBtime()\fP convertion to float | |
6140 | Fixed \fBtime()\fP conversion to float | |
5861 | 6141 | (see \fI\%#126\fP); |
5862 | 6142 | .IP \(bu 2 |
5863 | 6143 | Fixed \fBhmset()\fP method to return bool instead of \fBb\(aqOK\(aq\fP |
6155 | 6435 | .SH AUTHOR |
6156 | 6436 | Alexey Popravka |
6157 | 6437 | .SH COPYRIGHT |
6158 | 2014-2018, Alexey Popravka | |
6438 | 2014-2019, Alexey Popravka | |
6159 | 6439 | .\" Generated by docutils manpage writer. |
6160 | 6440 | . |
41 | 41 | |
42 | 42 | |
43 | 43 | .. cofunction:: create_connection(address, \*, db=0, password=None, ssl=None,\ |
44 | encoding=None, parser=None, loop=None,\ | |
45 | timeout=None) | |
44 | encoding=None, parser=None,\ | |
45 | timeout=None, connection_cls=None) | |
46 | 46 | |
47 | 47 | Creates Redis connection. |
48 | 48 | |
51 | 51 | |
52 | 52 | .. versionchanged:: v1.0 |
53 | 53 | ``parser`` argument added. |
54 | ||
55 | .. deprecated:: v1.3.1 | |
56 | ``loop`` argument deprecated for Python 3.8 compatibility. | |
54 | 57 | |
55 | 58 | :param address: An address where to connect. |
56 | 59 | Can be one of the following: |
79 | 82 | :param parser: Protocol parser class. Can be used to set custom protocol |
80 | 83 | reader; expected same interface as :class:`hiredis.Reader`. |
81 | 84 | :type parser: callable or None |
82 | ||
83 | :param loop: An optional *event loop* instance | |
84 | (uses :func:`asyncio.get_event_loop` if not specified). | |
85 | :type loop: :ref:`EventLoop<asyncio-event-loop>` | |
86 | 85 | |
87 | 86 | :param timeout: Max time to open a connection, otherwise |
88 | 87 | raise :exc:`asyncio.TimeoutError` exception. |
89 | 88 | ``None`` by default |
90 | 89 | :type timeout: float greater than 0 or None |
90 | ||
91 | :param connection_cls: Custom connection class. ``None`` by default. | |
92 | :type connection_cls: :class:`abc.AbcConnection` or None | |
91 | 93 | |
92 | 94 | :return: :class:`RedisConnection` instance. |
93 | 95 | |
170 | 172 | Method also accept :class:`aioredis.Channel` instances as command |
171 | 173 | arguments:: |
172 | 174 | |
173 | >>> ch1 = Channel('A', is_pattern=False, loop=loop) | |
175 | >>> ch1 = Channel('A', is_pattern=False) | |
174 | 176 | >>> await conn.execute_pubsub('subscribe', ch1) |
175 | 177 | [[b'subscribe', b'A', 1]] |
176 | 178 | |
251 | 253 | |
252 | 254 | .. function:: create_pool(address, \*, db=0, password=None, ssl=None, \ |
253 | 255 | encoding=None, minsize=1, maxsize=10, \ |
254 | parser=None, loop=None, \ | |
256 | parser=None, \ | |
255 | 257 | create_connection_timeout=None, \ |
256 | 258 | pool_cls=None, connection_cls=None) |
257 | 259 | |
275 | 277 | |
276 | 278 | .. versionadded:: v1.0 |
277 | 279 | ``parser``, ``pool_cls`` and ``connection_cls`` arguments added. |
280 | ||
281 | .. deprecated:: v1.3.1 | |
282 | ``loop`` argument deprecated for Python 3.8 compatibility. | |
278 | 283 | |
279 | 284 | :param address: An address where to connect. |
280 | 285 | Can be one of the following: |
309 | 314 | :param parser: Protocol parser class. Can be used to set custom protocol |
310 | 315 | reader; expected same interface as :class:`hiredis.Reader`. |
311 | 316 | :type parser: callable or None |
312 | ||
313 | :param loop: An optional *event loop* instance | |
314 | (uses :func:`asyncio.get_event_loop` if not specified). | |
315 | :type loop: :ref:`EventLoop<asyncio-event-loop>` | |
316 | 317 | |
317 | 318 | :param create_connection_timeout: Max time to open a connection, |
318 | 319 | otherwise raise an :exc:`asyncio.TimeoutError`. ``None`` by default. |
448 | 449 | Wait until pool gets closed (when all connections are closed). |
449 | 450 | |
450 | 451 | .. versionadded:: v0.2.8 |
451 | ||
452 | ||
453 | ---- | |
454 | ||
455 | .. _aioredis-channel: | |
456 | ||
457 | Pub/Sub Channel object | |
458 | ---------------------- | |
459 | ||
460 | `Channel` object is a wrapper around queue for storing received pub/sub messages. | |
461 | ||
462 | ||
463 | .. class:: Channel(name, is_pattern, loop=None) | |
464 | ||
465 | Bases: :class:`abc.AbcChannel` | |
466 | ||
467 | Object representing Pub/Sub messages queue. | |
468 | It's basically a wrapper around :class:`asyncio.Queue`. | |
469 | ||
470 | .. attribute:: name | |
471 | ||
472 | Holds encoded channel/pattern name. | |
473 | ||
474 | .. attribute:: is_pattern | |
475 | ||
476 | Set to True for pattern channels. | |
477 | ||
478 | .. attribute:: is_active | |
479 | ||
480 | Set to True if there are messages in queue and connection is still | |
481 | subscribed to this channel. | |
482 | ||
483 | .. comethod:: get(\*, encoding=None, decoder=None) | |
484 | ||
485 | Coroutine that waits for and returns a message. | |
486 | ||
487 | Return value is message received or ``None`` signifying that channel has | |
488 | been unsubscribed and no more messages will be received. | |
489 | ||
490 | :param str encoding: If not None used to decode resulting bytes message. | |
491 | ||
492 | :param callable decoder: If specified used to decode message, | |
493 | ex. :func:`json.loads()` | |
494 | ||
495 | :raise aioredis.ChannelClosedError: If channel is unsubscribed and | |
496 | has no more messages. | |
497 | ||
498 | .. method:: get_json(\*, encoding="utf-8") | |
499 | ||
500 | Shortcut to ``get(encoding="utf-8", decoder=json.loads)`` | |
501 | ||
502 | .. comethod:: wait_message() | |
503 | ||
504 | Waits for message to become available in channel | |
505 | or channel is closed (unsubscribed). | |
506 | ||
507 | Main idea is to use it in loops: | |
508 | ||
509 | >>> ch = redis.channels['channel:1'] | |
510 | >>> while await ch.wait_message(): | |
511 | ... msg = await ch.get() | |
512 | ||
513 | :rtype: bool | |
514 | ||
515 | .. comethod:: iter(, \*, encoding=None, decoder=None) | |
516 | :async-for: | |
517 | :coroutine: | |
518 | ||
519 | Same as :meth:`~.get` method but it is a native coroutine. | |
520 | ||
521 | Usage example:: | |
522 | ||
523 | >>> async for msg in ch.iter(): | |
524 | ... print(msg) | |
525 | ||
526 | .. versionadded:: 0.2.5 | |
527 | Available for Python 3.5 only | |
528 | 452 | |
529 | 453 | ---- |
530 | 454 | |
669 | 593 | MasterReplyError |
670 | 594 | SlaveReplyError |
671 | 595 | |
596 | ||
597 | ---- | |
598 | ||
599 | .. _aioredis-channel: | |
600 | ||
601 | Pub/Sub Channel object | |
602 | ---------------------- | |
603 | ||
604 | `Channel` object is a wrapper around queue for storing received pub/sub messages. | |
605 | ||
606 | ||
607 | .. class:: Channel(name, is_pattern) | |
608 | ||
609 | Bases: :class:`abc.AbcChannel` | |
610 | ||
611 | Object representing Pub/Sub messages queue. | |
612 | It's basically a wrapper around :class:`asyncio.Queue`. | |
613 | ||
614 | .. attribute:: name | |
615 | ||
616 | Holds encoded channel/pattern name. | |
617 | ||
618 | .. attribute:: is_pattern | |
619 | ||
620 | Set to True for pattern channels. | |
621 | ||
622 | .. attribute:: is_active | |
623 | ||
624 | Set to True if there are messages in queue and connection is still | |
625 | subscribed to this channel. | |
626 | ||
627 | .. comethod:: get(\*, encoding=None, decoder=None) | |
628 | ||
629 | Coroutine that waits for and returns a message. | |
630 | ||
631 | Return value is message received or ``None`` signifying that channel has | |
632 | been unsubscribed and no more messages will be received. | |
633 | ||
634 | :param str encoding: If not None used to decode resulting bytes message. | |
635 | ||
636 | :param callable decoder: If specified used to decode message, | |
637 | ex. :func:`json.loads()` | |
638 | ||
639 | :raise aioredis.ChannelClosedError: If channel is unsubscribed and | |
640 | has no more messages. | |
641 | ||
642 | .. method:: get_json(\*, encoding="utf-8") | |
643 | ||
644 | Shortcut to ``get(encoding="utf-8", decoder=json.loads)`` | |
645 | ||
646 | .. comethod:: wait_message() | |
647 | ||
648 | Waits for message to become available in channel | |
649 | or channel is closed (unsubscribed). | |
650 | ||
651 | Main idea is to use it in loops: | |
652 | ||
653 | >>> ch = redis.channels['channel:1'] | |
654 | >>> while await ch.wait_message(): | |
655 | ... msg = await ch.get() | |
656 | ||
657 | :rtype: bool | |
658 | ||
659 | .. comethod:: iter(, \*, encoding=None, decoder=None) | |
660 | :async-for: | |
661 | :coroutine: | |
662 | ||
663 | Same as :meth:`~.get` method but it is a native coroutine. | |
664 | ||
665 | Usage example:: | |
666 | ||
667 | >>> async for msg in ch.iter(): | |
668 | ... print(msg) | |
669 | ||
670 | .. versionadded:: 0.2.5 | |
671 | Available for Python 3.5 only | |
672 | ||
673 | ||
672 | 674 | ---- |
673 | 675 | |
674 | 676 | .. _aioredis-redis: |
711 | 713 | .. cofunction:: create_redis(address, \*, db=0, password=None, ssl=None,\ |
712 | 714 | encoding=None, commands_factory=Redis,\ |
713 | 715 | parser=None, timeout=None,\ |
714 | connection_cls=None, loop=None) | |
716 | connection_cls=None) | |
715 | 717 | |
716 | 718 | This :ref:`coroutine<coroutine>` creates high-level Redis |
717 | 719 | interface instance bound to single Redis connection |
719 | 721 | |
720 | 722 | .. versionadded:: v1.0 |
721 | 723 | ``parser``, ``timeout`` and ``connection_cls`` arguments added. |
724 | ||
725 | .. deprecated:: v1.3.1 | |
726 | ``loop`` argument deprecated for Python 3.8 compatibility. | |
722 | 727 | |
723 | 728 | See also :class:`~aioredis.RedisConnection` for parameters description. |
724 | 729 | |
759 | 764 | :class:`~aioredis.abc.AbcConnection`. |
760 | 765 | :type connection_cls: aioredis.abc.AbcConnection |
761 | 766 | |
762 | :param loop: An optional *event loop* instance | |
763 | (uses :func:`asyncio.get_event_loop` if not specified). | |
764 | :type loop: :ref:`EventLoop<asyncio-event-loop>` | |
765 | ||
766 | 767 | :returns: Redis client (result of ``commands_factory`` call), |
767 | 768 | :class:`Redis` by default. |
768 | 769 | |
772 | 773 | minsize=1, maxsize=10,\ |
773 | 774 | parser=None, timeout=None,\ |
774 | 775 | pool_cls=None, connection_cls=None,\ |
775 | loop=None) | |
776 | ) | |
776 | 777 | |
777 | 778 | This :ref:`coroutine<coroutine>` create high-level Redis client instance |
778 | 779 | bound to connections pool (this allows auto-reconnect and simple pub/sub |
783 | 784 | .. versionchanged:: v1.0 |
784 | 785 | ``parser``, ``timeout``, ``pool_cls`` and ``connection_cls`` |
785 | 786 | arguments added. |
787 | ||
788 | .. deprecated:: v1.3.1 | |
789 | ``loop`` argument deprecated for Python 3.8 compatibility. | |
786 | 790 | |
787 | 791 | :param address: An address where to connect. Can be a (host, port) tuple, |
788 | 792 | unix domain socket path string or a Redis URI string. |
830 | 834 | :class:`~aioredis.abc.AbcConnection`. |
831 | 835 | :type connection_cls: aioredis.abc.AbcConnection |
832 | 836 | |
833 | :param loop: An optional *event loop* instance | |
834 | (uses :func:`asyncio.get_event_loop` if not specified). | |
835 | :type loop: :ref:`EventLoop<asyncio-event-loop>` | |
836 | ||
837 | 837 | :returns: Redis client (result of ``commands_factory`` call), |
838 | 838 | :class:`Redis` by default. |
22 | 22 | * ``flake8`` for code linting; |
23 | 23 | * and few other packages. |
24 | 24 | |
25 | Make sure you have provided a ``towncrier`` note. | |
26 | Just add short description running following commands:: | |
27 | ||
28 | $ echo "Short description" > CHANGES/filename.type | |
29 | ||
30 | This will create new file in ``CHANGES`` directory. | |
31 | Filename should consist of the ticket ID or other unique identifier. | |
32 | Five default types are: | |
33 | ||
34 | * .feature - signifying new feature | |
35 | * .bugfix - signifying a bug fix | |
36 | * .doc - documentation improvement | |
37 | * .removal - deprecation or removal of public API | |
38 | * .misc - a ticket has been closed, but not in interest of users | |
39 | ||
40 | You can check if everything is correct by typing:: | |
41 | ||
42 | $ towncrier --draft | |
43 | ||
44 | To produce the news file:: | |
45 | ||
46 | $ towncrier | |
47 | ||
25 | 48 | Code style |
26 | 49 | ---------- |
27 | 50 | |
40 | 63 | # will run tests in a verbose mode |
41 | 64 | $ make test |
42 | 65 | # or |
43 | $ py.test | |
66 | $ pytest | |
67 | ||
68 | # or with particular Redis server | |
69 | $ pytest --redis-server=/usr/local/bin/redis-server tests/errors_test.py | |
44 | 70 | |
45 | 71 | # will run tests with coverage report |
46 | 72 | $ make cov |
47 | 73 | # or |
48 | $ py.test --cov | |
49 | ||
74 | $ pytest --cov | |
50 | 75 | |
51 | 76 | SSL tests |
52 | 77 | ~~~~~~~~~ |
68 | 93 | To run tests against different redises use ``--redis-server`` command line |
69 | 94 | option:: |
70 | 95 | |
71 | $ py.test --redis-server=/path/to/custom/redis-server | |
96 | $ pytest --redis-server=/path/to/custom/redis-server | |
72 | 97 | |
73 | 98 | UVLoop |
74 | 99 | ~~~~~~ |
76 | 101 | To run tests with :term:`uvloop`:: |
77 | 102 | |
78 | 103 | $ pip install uvloop |
79 | $ py.test --uvloop | |
104 | $ pytest --uvloop | |
80 | 105 | |
81 | 106 | .. note:: Until Python 3.5.2 EventLoop has no ``create_future`` method |
82 | 107 | so aioredis won't benefit from uvloop's futures. |
88 | 113 | :mod:`aioredis` uses :term:`pytest` tool. |
89 | 114 | |
90 | 115 | Tests are located under ``/tests`` directory. |
91 | ||
92 | Pure Python 3.5 tests (ie the ones using ``async``/``await`` syntax) must be | |
93 | prefixed with ``py35_``, for instance see:: | |
94 | ||
95 | tests/py35_generic_commands_tests.py | |
96 | tests/py35_pool_test.py | |
97 | 116 | |
98 | 117 | |
99 | 118 | Fixtures |
186 | 205 | :rtype: tuple |
187 | 206 | |
188 | 207 | |
189 | Helpers | |
190 | ~~~~~~~ | |
191 | ||
192 | :mod:`aioredis` also updates :term:`pytest`'s namespace with several helpers. | |
193 | ||
194 | .. function:: pytest.redis_version(\*version, reason) | |
208 | ``redis_version`` tests helper | |
209 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | |
210 | ||
211 | In ``tests`` directory there is a :mod:`_testutils` module with a simple | |
212 | helper --- :func:`redis_version` --- a function that add a pytest mark to a test | |
213 | allowing to run it with requested Redis server versions. | |
214 | ||
215 | .. function:: _testutils.redis_version(\*version, reason) | |
195 | 216 | |
196 | 217 | Marks test with minimum redis version to run. |
197 | 218 | |
199 | 220 | |
200 | 221 | .. code-block:: python |
201 | 222 | |
202 | @pytest.redis_version(3, 2, 0, reason="HSTRLEN new in redis 3.2.0") | |
223 | from _testutil import redis_version | |
224 | ||
225 | @redis_version(3, 2, 0, reason="HSTRLEN new in redis 3.2.0") | |
203 | 226 | def test_hstrlen(redis): |
204 | 227 | pass |
205 | ||
206 | ||
207 | .. function:: pytest.logs(logger, level=None) | |
208 | ||
209 | Adopted version of :meth:`unittest.TestCase.assertEqual`, | |
210 | see it for details. | |
211 | ||
212 | Example: | |
213 | ||
214 | .. code-block:: python | |
215 | ||
216 | def test_logs(create_connection, server): | |
217 | with pytest.logs('aioredis', 'DEBUG') as cm: | |
218 | conn yield from create_connection(server.tcp_address) | |
219 | assert cm.output[0].startswith( | |
220 | 'DEBUG:aioredis:Creating tcp connection') | |
221 | ||
222 | ||
223 | .. function:: pytest.assert_almost_equal(first, second, places=None, \ | |
224 | msg=None, delta=None) | |
225 | ||
226 | Adopted version of :meth:`unittest.TestCase.assertAlmostEqual`. | |
227 | ||
228 | ||
229 | .. function:: pytest.raises_regex(exc_type, message) | |
230 | ||
231 | Adopted version of :meth:`unittest.TestCase.assertRaisesRegex`. |
5 | 5 | (see for more). |
6 | 6 | |
7 | 7 | Every example is a correct python program that can be executed. |
8 | ||
9 | .. _aioredis-examples-simple: | |
10 | ||
11 | Low-level connection usage example | |
12 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | |
13 | ||
14 | :download:`get source code<../examples/connection.py>` | |
15 | ||
16 | .. literalinclude:: ../examples/connection.py | |
17 | ||
18 | ||
19 | Connections pool example | |
20 | ~~~~~~~~~~~~~~~~~~~~~~~~ | |
21 | ||
22 | :download:`get source code<../examples/pool.py>` | |
23 | ||
24 | .. literalinclude:: ../examples/pool.py | |
25 | 8 | |
26 | 9 | |
27 | 10 | Commands example |
62 | 45 | :download:`get source code<../examples/sentinel.py>` |
63 | 46 | |
64 | 47 | .. literalinclude:: ../examples/sentinel.py |
48 | ||
49 | .. _aioredis-examples-simple: | |
50 | ||
51 | Low-level connection usage example | |
52 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | |
53 | ||
54 | :download:`get source code<../examples/connection.py>` | |
55 | ||
56 | .. literalinclude:: ../examples/connection.py | |
57 | ||
58 | ||
59 | Connections pool example | |
60 | ~~~~~~~~~~~~~~~~~~~~~~~~ | |
61 | ||
62 | :download:`get source code<../examples/pool.py>` | |
63 | ||
64 | .. literalinclude:: ../examples/pool.py |
21 | 21 | Connections Pool Yes |
22 | 22 | Pipelining support Yes |
23 | 23 | Pub/Sub support Yes |
24 | Sentinel support Yes [1]_ | |
24 | Sentinel support Yes | |
25 | 25 | Redis Cluster support WIP |
26 | 26 | Trollius (python 2.7) No |
27 | Tested CPython versions `3.5, 3.6 <travis_>`_ [2]_ | |
28 | Tested PyPy3 versions `5.9.0 <travis_>`_ | |
29 | Tested for Redis server `2.6, 2.8, 3.0, 3.2, 4.0 <travis_>`_ | |
27 | Tested CPython versions `3.5.3, 3.6, 3.7 <travis_>`_ [1]_ | |
28 | Tested PyPy3 versions `pypy3.5-7.0 pypy3.6-7.1.1 <travis_>`_ | |
29 | Tested for Redis server `2.6, 2.8, 3.0, 3.2, 4.0 5.0 <travis_>`_ | |
30 | 30 | Support for dev Redis server through low-level API |
31 | 31 | ================================ ============================== |
32 | 32 | |
33 | .. [1] Sentinel support is available in master branch. | |
34 | This feature is not yet stable and may have some issues. | |
35 | ||
36 | .. [2] For Python 3.3, 3.4 support use aioredis v0.3. | |
33 | .. [1] For Python 3.3, 3.4 support use aioredis v0.3. | |
37 | 34 | |
38 | 35 | Installation |
39 | 36 | ------------ |
57 | 54 | ---------- |
58 | 55 | |
59 | 56 | - Issue Tracker: https://github.com/aio-libs/aioredis/issues |
57 | - Google Group: https://groups.google.com/forum/#!forum/aio-libs | |
58 | - Gitter: https://gitter.im/aio-libs/Lobby | |
60 | 59 | - Source Code: https://github.com/aio-libs/aioredis |
61 | 60 | - Contributor's guide: :doc:`devel` |
62 | 61 | |
77 | 76 | :maxdepth: 3 |
78 | 77 | |
79 | 78 | start |
80 | migration | |
81 | 79 | api_reference |
82 | 80 | mixins |
83 | 81 | abc |
85 | 83 | sentinel |
86 | 84 | examples |
87 | 85 | devel |
86 | migration | |
88 | 87 | releases |
89 | 88 | glossary |
89 | ||
90 | .. :: | |
91 | todo insert after start | |
92 | advanced | |
90 | 93 | |
91 | 94 | Indices and tables |
92 | 95 | ================== |
96 | 99 | * :ref:`search` |
97 | 100 | |
98 | 101 | .. _MIT license: https://github.com/aio-libs/aioredis/blob/master/LICENSE |
99 | .. _travis: https://travis-ci.org/aio-libs/aioredis | |
102 | .. _travis: https://travis-ci.com/aio-libs/aioredis |
181 | 181 | | | | |
182 | 182 | | | redis = await aioredis.create_redis(('localhost', 6379)) | |
183 | 183 | | | await redis.zadd('zset-key', 1, 'one', 2, 'two') | |
184 | | | res = await redis.zrage('zset-key', withscores=True) | | |
184 | | | res = await redis.zrange('zset-key', withscores=True) | | |
185 | 185 | | | assert res == [b'one', 1, b'two', 2] | |
186 | 186 | | | | |
187 | | | # not an esiest way to make a dict | | |
187 | | | # not an easy way to make a dict | | |
188 | 188 | | | it = iter(res) | |
189 | 189 | | | assert dict(zip(it, it)) == {b'one': 1, b'two': 2} | |
190 | 190 | | | | |
194 | 194 | | | | |
195 | 195 | | | redis = await aioredis.create_redis(('localhost', 6379)) | |
196 | 196 | | | await redis.zadd('zset-key', 1, 'one', 2, 'two') | |
197 | | | res = await redis.zrage('zset-key', withscores=True) | | |
197 | | | res = await redis.zrange('zset-key', withscores=True) | | |
198 | 198 | | | assert res == [(b'one', 1), (b'two', 2)] | |
199 | 199 | | | | |
200 | 200 | | | # now its easier to make a dict of it | |
218 | 218 | | | cur, data = await redis.hscan('hash') | |
219 | 219 | | | assert data == [b'one', b'1', b'two', b'2'] | |
220 | 220 | | | | |
221 | | | # not an esiest way to make a dict | | |
221 | | | # not an easy way to make a dict | | |
222 | 222 | | | it = iter(data) | |
223 | 223 | | | assert dict(zip(it, it)) == {b'one': b'1', b'two': b'2'} | |
224 | 224 | | | | |
118 | 118 | .. autoclass:: TransactionsCommandsMixin |
119 | 119 | :members: |
120 | 120 | |
121 | .. class:: Pipeline(connection, commands_factory=lambda conn: conn, \*,\ | |
122 | loop=None) | |
121 | .. class:: Pipeline(connection, commands_factory=lambda conn: conn) | |
123 | 122 | |
124 | 123 | Commands pipeline. |
125 | 124 | |
128 | 127 | This class implements `__getattr__` method allowing to call methods |
129 | 128 | on instance created with ``commands_factory``. |
130 | 129 | |
130 | .. deprecated:: v1.3.1 | |
131 | ``loop`` argument deprecated for Python 3.8 compatibility. | |
132 | ||
131 | 133 | :param connection: Redis connection |
132 | 134 | :type connection: aioredis.RedisConnection |
133 | 135 | |
134 | 136 | :param callable commands_factory: Commands factory to get methods from. |
135 | ||
136 | :param loop: An optional *event loop* instance | |
137 | (uses :func:`asyncio.get_event_loop` if not specified). | |
138 | :type loop: :ref:`EventLoop<asyncio-event-loop>` | |
139 | 137 | |
140 | 138 | .. comethod:: execute(\*, return_exceptions=False) |
141 | 139 | |
153 | 151 | |
154 | 152 | :raise aioredis.PipelineError: Raised when any command caused error. |
155 | 153 | |
156 | .. class:: MultiExec(connection, commands_factory=lambda conn: conn, \*,\ | |
157 | loop=None) | |
154 | .. class:: MultiExec(connection, commands_factory=lambda conn: conn) | |
158 | 155 | |
159 | 156 | Bases: :class:`~Pipeline`. |
160 | 157 | |
161 | 158 | Multi/Exec pipeline wrapper. |
162 | 159 | |
163 | 160 | See :class:`~Pipeline` for parameters description. |
161 | ||
162 | .. deprecated:: v1.3.1 | |
163 | ``loop`` argument deprecated for Python 3.8 compatibility. | |
164 | 164 | |
165 | 165 | .. comethod:: execute(\*, return_exceptions=False) |
166 | 166 |
27 | 27 | .. corofunction:: create_sentinel(sentinels, \*, db=None, password=None,\ |
28 | 28 | encoding=None, minsize=1, maxsize=10,\ |
29 | 29 | ssl=None, parser=None,\ |
30 | loop=None) | |
30 | ) | |
31 | 31 | |
32 | 32 | Creates Redis Sentinel client. |
33 | ||
34 | .. deprecated:: v1.3.1 | |
35 | ``loop`` argument deprecated for Python 3.8 compatibility. | |
33 | 36 | |
34 | 37 | :param sentinels: A list of Sentinel node addresses. |
35 | 38 | :type sentinels: list[tuple] |
57 | 60 | :param parser: Protocol parser class. Can be used to set custom protocol |
58 | 61 | reader; expected same interface as :class:`hiredis.Reader`. |
59 | 62 | :type parser: callable or None |
60 | ||
61 | :param loop: An optional *event loop* instance | |
62 | (uses :func:`asyncio.get_event_loop` if not specified). | |
63 | :type loop: :ref:`EventLoop<asyncio-event-loop>` | |
64 | 63 | |
65 | 64 | :rtype: RedisSentinel |
66 | 65 |
3 | 3 | Getting started |
4 | 4 | =============== |
5 | 5 | |
6 | ||
7 | Commands Pipelining | |
8 | ------------------- | |
9 | ||
10 | Commands pipelining is built-in. | |
11 | ||
12 | Every command is sent to transport at-once | |
13 | (ofcourse if no ``TypeError``/``ValueError`` was raised) | |
14 | ||
15 | When you making a call with ``await`` / ``yield from`` you will be waiting result, | |
16 | and then gather results. | |
17 | ||
18 | Simple example show both cases (:download:`get source code<../examples/pipeline.py>`): | |
19 | ||
20 | .. literalinclude:: ../examples/pipeline.py | |
21 | :language: python3 | |
22 | :lines: 9-21 | |
23 | :dedent: 4 | |
6 | Installation | |
7 | ------------ | |
8 | ||
9 | .. code-block:: bash | |
10 | ||
11 | $ pip install aioredis | |
12 | ||
13 | This will install aioredis along with its dependencies: | |
14 | ||
15 | * hiredis protocol parser; | |
16 | ||
17 | * async-timeout --- used in Sentinel client. | |
18 | ||
19 | Without dependencies | |
20 | ~~~~~~~~~~~~~~~~~~~~ | |
21 | ||
22 | In some cases [1]_ you might need to install :mod:`aioredis` without ``hiredis``, | |
23 | it is achievable with the following command: | |
24 | ||
25 | .. code-block:: bash | |
26 | ||
27 | $ pip install --no-deps aioredis async-timeout | |
28 | ||
29 | Installing latest version from Git | |
30 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | |
31 | ||
32 | .. code-block:: bash | |
33 | ||
34 | $ pip install git+https://github.com/aio-libs/aioredis@master#egg=aioredis | |
35 | ||
36 | Connecting | |
37 | ---------- | |
38 | ||
39 | :download:`get source code<../examples/getting_started/00_connect.py>` | |
40 | ||
41 | .. literalinclude:: ../examples/getting_started/00_connect.py | |
42 | :language: python3 | |
43 | ||
44 | :func:`aioredis.create_redis_pool` creates a Redis client backed by a pool of | |
45 | connections. The only required argument is the address of Redis server. | |
46 | Redis server address can be either host and port tuple | |
47 | (ex: ``('localhost', 6379)``), or a string which will be parsed into | |
48 | TCP or UNIX socket address (ex: ``'unix://var/run/redis.sock'``, | |
49 | ``'//var/run/redis.sock'``, ``redis://redis-host-or-ip:6379/1``). | |
50 | ||
51 | Closing the client. Calling ``redis.close()`` and then ``redis.wait_closed()`` | |
52 | is strongly encouraged as this will methods will shutdown all open connections | |
53 | and cleanup resources. | |
54 | ||
55 | See the :doc:`commands reference </mixins>` for the full list of supported commands. | |
56 | ||
57 | Connecting to specific DB | |
58 | ~~~~~~~~~~~~~~~~~~~~~~~~~ | |
59 | ||
60 | There are several ways you can specify database index to select on connection: | |
61 | ||
62 | #. explicitly pass db index as ``db`` argument: | |
63 | ||
64 | .. code-block:: python | |
65 | ||
66 | redis = await aioredis.create_redis_pool( | |
67 | 'redis://localhost', db=1) | |
68 | ||
69 | #. pass db index in URI as path component: | |
70 | ||
71 | .. code-block:: python | |
72 | ||
73 | redis = await aioredis.create_redis_pool( | |
74 | 'redis://localhost/2') | |
75 | ||
76 | .. note:: | |
77 | ||
78 | DB index specified in URI will take precedence over | |
79 | ``db`` keyword argument. | |
80 | ||
81 | #. call :meth:`~aioredis.Redis.select` method: | |
82 | ||
83 | .. code-block:: python | |
84 | ||
85 | redis = await aioredis.create_redis_pool( | |
86 | 'redis://localhost/') | |
87 | await redis.select(3) | |
88 | ||
89 | ||
90 | Connecting to password-protected Redis instance | |
91 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | |
92 | ||
93 | The password can be specified either in keyword argument or in address URI: | |
94 | ||
95 | .. code-block:: python | |
96 | ||
97 | redis = await aioredis.create_redis_pool( | |
98 | 'redis://localhost', password='sEcRet') | |
99 | ||
100 | redis = await aioredis.create_redis_pool( | |
101 | 'redis://:sEcRet@localhost/') | |
102 | ||
103 | redis = await aioredis.create_redis_pool( | |
104 | 'redis://localhost/?password=sEcRet') | |
24 | 105 | |
25 | 106 | .. note:: |
26 | ||
27 | For convenience :mod:`aioredis` provides | |
28 | :meth:`~TransactionsCommandsMixin.pipeline` | |
29 | method allowing to execute bulk of commands as one | |
30 | (:download:`get source code<../examples/pipeline.py>`): | |
31 | ||
32 | .. literalinclude:: ../examples/pipeline.py | |
33 | :language: python3 | |
34 | :lines: 23-31 | |
35 | :dedent: 4 | |
107 | Password specified in URI will take precedence over password keyword. | |
108 | ||
109 | Also specifying both password as authentication component and | |
110 | query parameter in URI is forbidden. | |
111 | ||
112 | .. code-block:: python | |
113 | ||
114 | # This will cause assertion error | |
115 | await aioredis.create_redis_pool( | |
116 | 'redis://:sEcRet@localhost/?password=SeCreT') | |
117 | ||
118 | Result messages decoding | |
119 | ------------------------ | |
120 | ||
121 | By default :mod:`aioredis` will return :class:`bytes` for most Redis | |
122 | commands that return string replies. Redis error replies are known to be | |
123 | valid UTF-8 strings so error messages are decoded automatically. | |
124 | ||
125 | If you know that data in Redis is valid string you can tell :mod:`aioredis` | |
126 | to decode result by passing keyword-only argument ``encoding`` | |
127 | in a command call: | |
128 | ||
129 | :download:`get source code<../examples/getting_started/01_decoding.py>` | |
130 | ||
131 | .. literalinclude:: ../examples/getting_started/01_decoding.py | |
132 | :language: python3 | |
133 | ||
134 | ||
135 | :mod:`aioredis` can decode messages for all Redis data types like | |
136 | lists, hashes, sorted sets, etc: | |
137 | ||
138 | :download:`get source code<../examples/getting_started/02_decoding.py>` | |
139 | ||
140 | .. literalinclude:: ../examples/getting_started/02_decoding.py | |
141 | :language: python3 | |
36 | 142 | |
37 | 143 | |
38 | 144 | Multi/Exec transactions |
39 | 145 | ----------------------- |
40 | 146 | |
41 | :mod:`aioredis` provides several ways for executing transactions: | |
42 | ||
43 | * when using raw connection you can issue ``Multi``/``Exec`` commands | |
44 | manually; | |
45 | ||
46 | * when using :class:`aioredis.Redis` instance you can use | |
47 | :meth:`~TransactionsCommandsMixin.multi_exec` transaction pipeline. | |
147 | :download:`get source code<../examples/getting_started/03_multiexec.py>` | |
148 | ||
149 | .. literalinclude:: ../examples/getting_started/03_multiexec.py | |
150 | :language: python3 | |
48 | 151 | |
49 | 152 | :meth:`~TransactionsCommandsMixin.multi_exec` method creates and returns new |
50 | 153 | :class:`~aioredis.commands.MultiExec` object which is used for buffering commands and |
51 | 154 | then executing them inside MULTI/EXEC block. |
52 | 155 | |
53 | Here is a simple example | |
54 | (:download:`get source code<../examples/transaction2.py>`): | |
55 | ||
56 | .. literalinclude:: ../examples/transaction2.py | |
57 | :language: python3 | |
58 | :lines: 9-15 | |
59 | :linenos: | |
60 | :emphasize-lines: 5 | |
61 | :dedent: 4 | |
62 | ||
63 | As you can notice ``await`` is **only** used at line 5 with ``tr.execute`` | |
64 | and **not with** ``tr.set(...)`` calls. | |
65 | ||
66 | 156 | .. warning:: |
67 | 157 | |
68 | 158 | It is very important not to ``await`` buffered command |
79 | 169 | |
80 | 170 | :mod:`aioredis` provides support for Redis Publish/Subscribe messaging. |
81 | 171 | |
82 | To switch connection to subscribe mode you must execute ``subscribe`` command | |
83 | by yield'ing from :meth:`~PubSubCommandsMixin.subscribe` it returns a list of | |
84 | :class:`~aioredis.Channel` objects representing subscribed channels. | |
85 | ||
86 | As soon as connection is switched to subscribed mode the channel will receive | |
87 | and store messages | |
172 | To start listening for messages you must call either | |
173 | :meth:`~PubSubCommandsMixin.subscribe` or | |
174 | :meth:`~PubSubCommandsMixin.psubscribe` method. | |
175 | Both methods return list of :class:`~aioredis.Channel` objects representing | |
176 | subscribed channels. | |
177 | ||
178 | Right after that the channel will receive and store messages | |
88 | 179 | (the ``Channel`` object is basically a wrapper around :class:`asyncio.Queue`). |
89 | 180 | To read messages from channel you need to use :meth:`~aioredis.Channel.get` |
90 | 181 | or :meth:`~aioredis.Channel.get_json` coroutines. |
91 | 182 | |
92 | .. note:: | |
93 | In Pub/Sub mode redis connection can only receive messages or issue | |
94 | (P)SUBSCRIBE / (P)UNSUBSCRIBE commands. | |
95 | ||
96 | Pub/Sub example (:download:`get source code<../examples/pubsub2.py>`): | |
97 | ||
98 | .. literalinclude:: ../examples/pubsub2.py | |
99 | :language: python3 | |
100 | :lines: 6-31 | |
101 | :dedent: 4 | |
102 | ||
103 | .. .. warning:: | |
104 | Using Pub/Sub mode with :class:`~aioredis.Pool` is possible but | |
105 | only within ``with`` block or by explicitly ``acquiring/releasing`` | |
106 | connection. See example below. | |
107 | ||
108 | Pub/Sub example (:download:`get source code<../examples/pool_pubsub.py>`): | |
109 | ||
110 | .. literalinclude:: ../examples/pool_pubsub.py | |
111 | :language: python3 | |
112 | :lines: 13-36 | |
113 | :dedent: 4 | |
114 | ||
115 | ||
116 | Python 3.5 ``async with`` / ``async for`` support | |
117 | ------------------------------------------------- | |
118 | ||
119 | :mod:`aioredis` is compatible with :pep:`492`. | |
120 | ||
121 | :class:`~aioredis.Pool` can be used with :ref:`async with<async with>` | |
122 | (:download:`get source code<../examples/pool2.py>`): | |
123 | ||
124 | .. literalinclude:: ../examples/pool2.py | |
125 | :language: python3 | |
126 | :lines: 7-8,20-22 | |
127 | :dedent: 4 | |
128 | ||
129 | ||
130 | It also can be used with ``await``: | |
131 | ||
132 | .. literalinclude:: ../examples/pool2.py | |
133 | :language: python3 | |
134 | :lines: 7-8,26-30 | |
135 | :dedent: 4 | |
136 | ||
137 | ||
138 | New ``scan``-family commands added with support of :ref:`async for<async for>` | |
139 | (:download:`get source code<../examples/iscan.py>`): | |
140 | ||
141 | .. literalinclude:: ../examples/iscan.py | |
142 | :language: python3 | |
143 | :lines: 7-9,29-31,34-36,39-41,44-45 | |
144 | :dedent: 4 | |
145 | ||
146 | ||
147 | SSL/TLS support | |
183 | Example subscribing and reading channels: | |
184 | ||
185 | :download:`get source code<../examples/getting_started/04_pubsub.py>` | |
186 | ||
187 | .. literalinclude:: ../examples/getting_started/04_pubsub.py | |
188 | :language: python3 | |
189 | ||
190 | Subscribing and reading patterns: | |
191 | ||
192 | :download:`get source code<../examples/getting_started/05_pubsub.py>` | |
193 | ||
194 | .. literalinclude:: ../examples/getting_started/05_pubsub.py | |
195 | :language: python3 | |
196 | ||
197 | Sentinel client | |
148 | 198 | --------------- |
149 | 199 | |
150 | Though Redis server `does not support data encryption <data_encryption_>`_ | |
151 | it is still possible to setup Redis server behind SSL proxy. For such cases | |
152 | :mod:`aioredis` library support secure connections through :mod:`asyncio` | |
153 | SSL support. See `BaseEventLoop.create_connection`_ for details. | |
154 | ||
155 | .. _data_encryption: http://redis.io/topics/security#data-encryption-support | |
156 | .. _BaseEventLoop.create_connection: https://docs.python.org/3/library/asyncio-eventloop.html#creating-connections | |
200 | :download:`get source code<../examples/getting_started/06_sentinel.py>` | |
201 | ||
202 | .. literalinclude:: ../examples/getting_started/06_sentinel.py | |
203 | :language: python3 | |
204 | ||
205 | Sentinel client requires a list of Redis Sentinel addresses to connect to | |
206 | and start discovering services. | |
207 | ||
208 | Calling :meth:`~aioredis.sentinel.SentinelPool.master_for` or | |
209 | :meth:`~aioredis.sentinel.SentinelPool.slave_for` methods will return | |
210 | Redis clients connected to specified services monitored by Sentinel. | |
211 | ||
212 | Sentinel client will detect failover and reconnect Redis clients automatically. | |
213 | ||
214 | See detailed reference :doc:`here <sentinel>` | |
215 | ||
216 | ---- | |
217 | ||
218 | .. [1] | |
219 | Celery hiredis issues | |
220 | (`#197 <https://github.com/aio-libs/aioredis/issues/197>`_, | |
221 | `#317 <https://github.com/aio-libs/aioredis/pull/317>`_) |
28 | 28 | |
29 | 29 | |
30 | 30 | if __name__ == '__main__': |
31 | asyncio.get_event_loop().run_until_complete(main()) | |
32 | asyncio.get_event_loop().run_until_complete(redis_pool()) | |
31 | asyncio.run(main()) | |
32 | asyncio.run(redis_pool()) |
22 | 22 | |
23 | 23 | |
24 | 24 | if __name__ == '__main__': |
25 | asyncio.get_event_loop().run_until_complete(main()) | |
25 | asyncio.run(main()) |
0 | import asyncio | |
1 | import aioredis | |
2 | ||
3 | ||
4 | async def main(): | |
5 | redis = await aioredis.create_redis_pool('redis://localhost') | |
6 | await redis.set('my-key', 'value') | |
7 | value = await redis.get('my-key', encoding='utf-8') | |
8 | print(value) | |
9 | ||
10 | redis.close() | |
11 | await redis.wait_closed() | |
12 | ||
13 | asyncio.run(main()) |
0 | import asyncio | |
1 | import aioredis | |
2 | ||
3 | ||
4 | async def main(): | |
5 | redis = await aioredis.create_redis_pool('redis://localhost') | |
6 | await redis.set('key', 'string-value') | |
7 | bin_value = await redis.get('key') | |
8 | assert bin_value == b'string-value' | |
9 | ||
10 | str_value = await redis.get('key', encoding='utf-8') | |
11 | assert str_value == 'string-value' | |
12 | ||
13 | redis.close() | |
14 | await redis.wait_closed() | |
15 | ||
16 | asyncio.run(main()) |
0 | import asyncio | |
1 | import aioredis | |
2 | ||
3 | ||
4 | async def main(): | |
5 | redis = await aioredis.create_redis_pool('redis://localhost') | |
6 | ||
7 | await redis.hmset_dict('hash', | |
8 | key1='value1', | |
9 | key2='value2', | |
10 | key3=123) | |
11 | ||
12 | result = await redis.hgetall('hash', encoding='utf-8') | |
13 | assert result == { | |
14 | 'key1': 'value1', | |
15 | 'key2': 'value2', | |
16 | 'key3': '123', # note that Redis returns int as string | |
17 | } | |
18 | ||
19 | redis.close() | |
20 | await redis.wait_closed() | |
21 | ||
22 | asyncio.run(main()) |
0 | import asyncio | |
1 | import aioredis | |
2 | ||
3 | ||
4 | async def main(): | |
5 | redis = await aioredis.create_redis_pool('redis://localhost') | |
6 | ||
7 | tr = redis.multi_exec() | |
8 | tr.set('key1', 'value1') | |
9 | tr.set('key2', 'value2') | |
10 | ok1, ok2 = await tr.execute() | |
11 | assert ok1 | |
12 | assert ok2 | |
13 | ||
14 | asyncio.run(main()) |
0 | import asyncio | |
1 | import aioredis | |
2 | ||
3 | ||
4 | async def main(): | |
5 | redis = await aioredis.create_redis_pool('redis://localhost') | |
6 | ||
7 | ch1, ch2 = await redis.subscribe('channel:1', 'channel:2') | |
8 | assert isinstance(ch1, aioredis.Channel) | |
9 | assert isinstance(ch2, aioredis.Channel) | |
10 | ||
11 | async def reader(channel): | |
12 | async for message in channel.iter(): | |
13 | print("Got message:", message) | |
14 | asyncio.get_running_loop().create_task(reader(ch1)) | |
15 | asyncio.get_running_loop().create_task(reader(ch2)) | |
16 | ||
17 | await redis.publish('channel:1', 'Hello') | |
18 | await redis.publish('channel:2', 'World') | |
19 | ||
20 | redis.close() | |
21 | await redis.wait_closed() | |
22 | ||
23 | asyncio.run(main()) |
0 | import asyncio | |
1 | import aioredis | |
2 | ||
3 | ||
4 | async def main(): | |
5 | redis = await aioredis.create_redis_pool('redis://localhost') | |
6 | ||
7 | ch, = await redis.psubscribe('channel:*') | |
8 | assert isinstance(ch, aioredis.Channel) | |
9 | ||
10 | async def reader(channel): | |
11 | async for ch, message in channel.iter(): | |
12 | print("Got message in channel:", ch, ":", message) | |
13 | asyncio.get_running_loop().create_task(reader(ch)) | |
14 | ||
15 | await redis.publish('channel:1', 'Hello') | |
16 | await redis.publish('channel:2', 'World') | |
17 | ||
18 | redis.close() | |
19 | await redis.wait_closed() | |
20 | ||
21 | asyncio.run(main()) |
0 | import asyncio | |
1 | import aioredis | |
2 | ||
3 | ||
4 | async def main(): | |
5 | sentinel = await aioredis.create_sentinel( | |
6 | ['redis://localhost:26379', 'redis://sentinel2:26379']) | |
7 | redis = sentinel.master_for('mymaster') | |
8 | ||
9 | ok = await redis.set('key', 'value') | |
10 | assert ok | |
11 | val = await redis.get('key', encoding='utf-8') | |
12 | assert val == 'value' | |
13 | ||
14 | asyncio.run(main()) |
0 | import asyncio | |
1 | import aioredis | |
2 | ||
3 | ||
4 | async def main(): | |
5 | ||
6 | redis = await aioredis.create_redis( | |
7 | 'redis://localhost') | |
8 | ||
9 | await redis.delete('something:hash', | |
10 | 'something:set', | |
11 | 'something:zset') | |
12 | await redis.mset('something', 'value', | |
13 | 'something:else', 'else') | |
14 | await redis.hmset('something:hash', | |
15 | 'something:1', 'value:1', | |
16 | 'something:2', 'value:2') | |
17 | await redis.sadd('something:set', 'something:1', | |
18 | 'something:2', 'something:else') | |
19 | await redis.zadd('something:zset', 1, 'something:1', | |
20 | 2, 'something:2', 3, 'something:else') | |
21 | ||
22 | await go(redis) | |
23 | redis.close() | |
24 | await redis.wait_closed() | |
25 | ||
26 | ||
27 | async def go(redis): | |
28 | async for key in redis.iscan(match='something*'): | |
29 | print('Matched:', key) | |
30 | ||
31 | key = 'something:hash' | |
32 | ||
33 | async for name, val in redis.ihscan(key, match='something*'): | |
34 | print('Matched:', name, '->', val) | |
35 | ||
36 | key = 'something:set' | |
37 | ||
38 | async for val in redis.isscan(key, match='something*'): | |
39 | print('Matched:', val) | |
40 | ||
41 | key = 'something:zset' | |
42 | ||
43 | async for val, score in redis.izscan(key, match='something*'): | |
44 | print('Matched:', val, ':', score) | |
45 | ||
46 | ||
47 | if __name__ == '__main__': | |
48 | import os | |
49 | if 'redis_version:2.6' not in os.environ.get('REDIS_VERSION', ''): | |
50 | loop = asyncio.get_event_loop() | |
51 | loop.run_until_complete(main()) |
41 | 41 | |
42 | 42 | |
43 | 43 | if __name__ == '__main__': |
44 | loop = asyncio.get_event_loop() | |
45 | loop.run_until_complete(main()) | |
44 | asyncio.run(main()) |
14 | 14 | |
15 | 15 | |
16 | 16 | if __name__ == '__main__': |
17 | asyncio.get_event_loop().run_until_complete(main()) | |
17 | asyncio.run(main()) |
0 | import asyncio | |
1 | import aioredis | |
2 | ||
3 | ||
4 | async def main(): | |
5 | ||
6 | pool = await aioredis.create_pool( | |
7 | 'redis://localhost') | |
8 | ||
9 | # async with pool.get() as conn: | |
10 | await pool.execute('set', 'my-key', 'value') | |
11 | ||
12 | await async_with(pool) | |
13 | await with_await(pool) | |
14 | pool.close() | |
15 | await pool.wait_closed() | |
16 | ||
17 | ||
18 | async def async_with(pool): | |
19 | async with pool.get() as conn: | |
20 | value = await conn.execute('get', 'my-key') | |
21 | print('raw value:', value) | |
22 | ||
23 | ||
24 | async def with_await(pool): | |
25 | # This is exactly the same as: | |
26 | # with (yield from pool) as conn: | |
27 | with (await pool) as conn: | |
28 | value = await conn.execute('get', 'my-key') | |
29 | print('raw value:', value) | |
30 | ||
31 | ||
32 | if __name__ == '__main__': | |
33 | loop = asyncio.get_event_loop() | |
34 | loop.run_until_complete(main()) |
27 | 27 | |
28 | 28 | |
29 | 29 | if __name__ == '__main__': |
30 | asyncio.get_event_loop().run_until_complete(main()) | |
30 | asyncio.run(main()) |
40 | 40 | for msg in ("Hello", ",", "world!"): |
41 | 41 | for ch in ('channel:1', 'channel:2'): |
42 | 42 | await pub.publish(ch, msg) |
43 | asyncio.get_event_loop().call_soon(pub.close) | |
44 | asyncio.get_event_loop().call_soon(sub.close) | |
45 | await asyncio.sleep(0) | |
43 | await asyncio.sleep(0.1) | |
44 | pub.close() | |
45 | sub.close() | |
46 | 46 | await pub.wait_closed() |
47 | 47 | await sub.wait_closed() |
48 | 48 | await asyncio.gather(tsk1, tsk2) |
51 | 51 | if __name__ == '__main__': |
52 | 52 | import os |
53 | 53 | if 'redis_version:2.6' not in os.environ.get('REDIS_VERSION', ''): |
54 | loop = asyncio.get_event_loop() | |
55 | loop.run_until_complete(pubsub()) | |
54 | asyncio.run(pubsub()) |
19 | 19 | if __name__ == '__main__': |
20 | 20 | import os |
21 | 21 | if 'redis_version:2.6' not in os.environ.get('REDIS_VERSION', ''): |
22 | asyncio.get_event_loop().run_until_complete(main()) | |
22 | asyncio.run(main()) |
15 | 15 | |
16 | 16 | |
17 | 17 | if __name__ == '__main__': |
18 | asyncio.get_event_loop().run_until_complete(main()) | |
18 | asyncio.run(main()) |
18 | 18 | |
19 | 19 | |
20 | 20 | if __name__ == '__main__': |
21 | asyncio.get_event_loop().run_until_complete(main()) | |
21 | asyncio.run(main()) |
19 | 19 | |
20 | 20 | |
21 | 21 | if __name__ == '__main__': |
22 | loop = asyncio.get_event_loop() | |
23 | loop.run_until_complete(main()) | |
22 | asyncio.run(main()) |
0 | 0 | [tool:pytest] |
1 | 1 | minversion = 2.9.1 |
2 | addopts = --cov-report=term --cov-report=html | |
2 | addopts = -r a --cov-report=term --cov-report=html | |
3 | 3 | restpaths = tests |
4 | 4 | markers = |
5 | run_loop: Mark coroutine to be run with asyncio loop. | |
5 | timeout: Set coroutine execution timeout (default is 15 seconds). | |
6 | 6 | redis_version(*version, reason): Mark test expecting minimum Redis version |
7 | 7 | skip(reason): Skip test |
8 | python_files = | |
9 | test_*.py | |
10 | *_test.py | |
11 | _testutils.py | |
8 | 12 | |
9 | 13 | [coverage:run] |
10 | 14 | branch = true |
28 | 28 | match = regexp.match(line) |
29 | 29 | if match is not None: |
30 | 30 | return match.group(1) |
31 | else: | |
32 | raise RuntimeError('Cannot find version in aioredis/__init__.py') | |
31 | raise RuntimeError('Cannot find version in {}'.format(init_py)) | |
33 | 32 | |
34 | 33 | |
35 | 34 | classifiers = [ |
0 | import pytest | |
1 | ||
2 | __all__ = [ | |
3 | 'redis_version', | |
4 | ] | |
5 | ||
6 | ||
7 | def redis_version(*version, reason): | |
8 | assert 1 < len(version) <= 3, version | |
9 | assert all(isinstance(v, int) for v in version), version | |
10 | return pytest.mark.redis_version(version=version, reason=reason) |
6 | 6 | import os |
7 | 7 | import ssl |
8 | 8 | import time |
9 | import logging | |
10 | 9 | import tempfile |
11 | 10 | import atexit |
11 | import inspect | |
12 | 12 | |
13 | 13 | from collections import namedtuple |
14 | 14 | from urllib.parse import urlencode, urlunparse |
33 | 33 | def loop(): |
34 | 34 | """Creates new event loop.""" |
35 | 35 | loop = asyncio.new_event_loop() |
36 | asyncio.set_event_loop(None) | |
36 | if sys.version_info < (3, 8): | |
37 | asyncio.set_event_loop(loop) | |
37 | 38 | |
38 | 39 | try: |
39 | 40 | yield loop |
59 | 60 | |
60 | 61 | |
61 | 62 | @pytest.fixture |
62 | def create_connection(_closable, loop): | |
63 | def create_connection(_closable): | |
63 | 64 | """Wrapper around aioredis.create_connection.""" |
64 | 65 | |
65 | 66 | async def f(*args, **kw): |
66 | kw.setdefault('loop', loop) | |
67 | 67 | conn = await aioredis.create_connection(*args, **kw) |
68 | 68 | _closable(conn) |
69 | 69 | return conn |
74 | 74 | aioredis.create_redis, |
75 | 75 | aioredis.create_redis_pool], |
76 | 76 | ids=['single', 'pool']) |
77 | def create_redis(_closable, loop, request): | |
77 | def create_redis(_closable, request): | |
78 | 78 | """Wrapper around aioredis.create_redis.""" |
79 | 79 | factory = request.param |
80 | 80 | |
81 | 81 | async def f(*args, **kw): |
82 | kw.setdefault('loop', loop) | |
83 | 82 | redis = await factory(*args, **kw) |
84 | 83 | _closable(redis) |
85 | 84 | return redis |
87 | 86 | |
88 | 87 | |
89 | 88 | @pytest.fixture |
90 | def create_pool(_closable, loop): | |
89 | def create_pool(_closable): | |
91 | 90 | """Wrapper around aioredis.create_pool.""" |
92 | 91 | |
93 | 92 | async def f(*args, **kw): |
94 | kw.setdefault('loop', loop) | |
95 | 93 | redis = await aioredis.create_pool(*args, **kw) |
96 | 94 | _closable(redis) |
97 | 95 | return redis |
99 | 97 | |
100 | 98 | |
101 | 99 | @pytest.fixture |
102 | def create_sentinel(_closable, loop): | |
100 | def create_sentinel(_closable): | |
103 | 101 | """Helper instantiating RedisSentinel client.""" |
104 | 102 | |
105 | 103 | async def f(*args, **kw): |
106 | kw.setdefault('loop', loop) | |
107 | 104 | # make it fail fast on slow CIs (if timeout argument is ommitted) |
108 | 105 | kw.setdefault('timeout', .001) |
109 | 106 | client = await aioredis.sentinel.create_sentinel(*args, **kw) |
115 | 112 | @pytest.fixture |
116 | 113 | def pool(create_pool, server, loop): |
117 | 114 | """Returns RedisPool instance.""" |
118 | pool = loop.run_until_complete( | |
119 | create_pool(server.tcp_address, loop=loop)) | |
120 | return pool | |
115 | return loop.run_until_complete(create_pool(server.tcp_address)) | |
121 | 116 | |
122 | 117 | |
123 | 118 | @pytest.fixture |
124 | 119 | def redis(create_redis, server, loop): |
125 | 120 | """Returns Redis client instance.""" |
126 | 121 | redis = loop.run_until_complete( |
127 | create_redis(server.tcp_address, loop=loop)) | |
128 | loop.run_until_complete(redis.flushall()) | |
122 | create_redis(server.tcp_address)) | |
123 | ||
124 | async def clear(): | |
125 | await redis.flushall() | |
126 | loop.run_until_complete(clear()) | |
129 | 127 | return redis |
130 | 128 | |
131 | 129 | |
133 | 131 | def redis_sentinel(create_sentinel, sentinel, loop): |
134 | 132 | """Returns Redis Sentinel client instance.""" |
135 | 133 | redis_sentinel = loop.run_until_complete( |
136 | create_sentinel([sentinel.tcp_address], timeout=2, loop=loop)) | |
137 | assert loop.run_until_complete(redis_sentinel.ping()) == b'PONG' | |
134 | create_sentinel([sentinel.tcp_address], timeout=2)) | |
135 | ||
136 | async def ping(): | |
137 | return await redis_sentinel.ping() | |
138 | assert loop.run_until_complete(ping()) == b'PONG' | |
138 | 139 | return redis_sentinel |
139 | 140 | |
140 | 141 | |
142 | 143 | def _closable(loop): |
143 | 144 | conns = [] |
144 | 145 | |
145 | try: | |
146 | yield conns.append | |
147 | finally: | |
146 | async def close(): | |
148 | 147 | waiters = [] |
149 | 148 | while conns: |
150 | 149 | conn = conns.pop(0) |
151 | 150 | conn.close() |
152 | 151 | waiters.append(conn.wait_closed()) |
153 | 152 | if waiters: |
154 | loop.run_until_complete(asyncio.gather(*waiters, loop=loop)) | |
153 | await asyncio.gather(*waiters) | |
154 | try: | |
155 | yield conns.append | |
156 | finally: | |
157 | loop.run_until_complete(close()) | |
155 | 158 | |
156 | 159 | |
157 | 160 | @pytest.fixture(scope='session') |
380 | 383 | yield True |
381 | 384 | raise RuntimeError("Redis startup timeout expired") |
382 | 385 | |
383 | def maker(name, *masters, quorum=1, noslaves=False): | |
386 | def maker(name, *masters, quorum=1, noslaves=False, | |
387 | down_after_milliseconds=3000, | |
388 | failover_timeout=1000): | |
384 | 389 | key = (name,) + masters |
385 | 390 | if key in sentinels: |
386 | 391 | return sentinels[key] |
409 | 414 | for master in masters: |
410 | 415 | write('sentinel monitor', master.name, |
411 | 416 | '127.0.0.1', master.tcp_address.port, quorum) |
412 | write('sentinel down-after-milliseconds', master.name, '3000') | |
413 | write('sentinel failover-timeout', master.name, '3000') | |
417 | write('sentinel down-after-milliseconds', master.name, | |
418 | down_after_milliseconds) | |
419 | write('sentinel failover-timeout', master.name, | |
420 | failover_timeout) | |
414 | 421 | write('sentinel auth-pass', master.name, master.password) |
415 | 422 | |
416 | 423 | f = open(stdout_file, 'w') |
517 | 524 | |
518 | 525 | |
519 | 526 | @pytest.mark.tryfirst |
520 | def pytest_pycollect_makeitem(collector, name, obj): | |
521 | if collector.funcnamefilter(name): | |
522 | if not callable(obj): | |
523 | return | |
524 | item = pytest.Function(name, parent=collector) | |
525 | if item.get_closest_marker('run_loop') is not None: | |
526 | # TODO: re-wrap with asyncio.coroutine if not native coroutine | |
527 | return list(collector._genfunctions(name, obj)) | |
528 | ||
529 | ||
530 | @pytest.mark.tryfirst | |
531 | 527 | def pytest_pyfunc_call(pyfuncitem): |
532 | 528 | """ |
533 | 529 | Run asyncio marked test functions in an event loop instead of a normal |
534 | 530 | function call. |
535 | 531 | """ |
536 | marker = pyfuncitem.get_closest_marker('run_loop') | |
537 | if marker is not None: | |
532 | if inspect.iscoroutinefunction(pyfuncitem.obj): | |
533 | marker = pyfuncitem.get_closest_marker('timeout') | |
534 | if marker is not None and marker.args: | |
535 | timeout = marker.args[0] | |
536 | else: | |
537 | timeout = 15 | |
538 | ||
538 | 539 | funcargs = pyfuncitem.funcargs |
539 | 540 | loop = funcargs['loop'] |
540 | 541 | testargs = {arg: funcargs[arg] |
541 | 542 | for arg in pyfuncitem._fixtureinfo.argnames} |
542 | 543 | |
543 | 544 | loop.run_until_complete( |
544 | _wait_coro(pyfuncitem.obj, testargs, | |
545 | timeout=marker.kwargs.get('timeout', 15), | |
546 | loop=loop)) | |
545 | _wait_coro(pyfuncitem.obj, testargs, timeout=timeout)) | |
547 | 546 | return True |
548 | 547 | |
549 | 548 | |
550 | async def _wait_coro(corofunc, kwargs, timeout, loop): | |
551 | with async_timeout(timeout, loop=loop): | |
549 | async def _wait_coro(corofunc, kwargs, timeout): | |
550 | with async_timeout(timeout): | |
552 | 551 | return (await corofunc(**kwargs)) |
553 | 552 | |
554 | 553 | |
555 | 554 | def pytest_runtest_setup(item): |
556 | run_loop = item.get_closest_marker('run_loop') | |
557 | if run_loop and 'loop' not in item.fixturenames: | |
555 | is_coro = inspect.iscoroutinefunction(item.obj) | |
556 | if is_coro and 'loop' not in item.fixturenames: | |
558 | 557 | # inject an event loop fixture for all async tests |
559 | 558 | item.fixturenames.append('loop') |
560 | 559 | |
584 | 583 | |
585 | 584 | def pytest_configure(config): |
586 | 585 | bins = config.getoption('--redis-server')[:] |
587 | REDIS_SERVERS[:] = bins or ['/usr/bin/redis-server'] | |
586 | cmd = 'which redis-server' | |
587 | if not bins: | |
588 | with os.popen(cmd) as pipe: | |
589 | path = pipe.read().rstrip() | |
590 | assert path, ( | |
591 | "There is no redis-server on your computer." | |
592 | " Please install it first") | |
593 | REDIS_SERVERS[:] = [path] | |
594 | else: | |
595 | REDIS_SERVERS[:] = bins | |
596 | ||
588 | 597 | VERSIONS.update({srv: _read_server_version(srv) |
589 | 598 | for srv in REDIS_SERVERS}) |
590 | 599 | assert VERSIONS, ("Expected to detect redis versions", REDIS_SERVERS) |
607 | 616 | raise RuntimeError( |
608 | 617 | "Can not import uvloop, make sure it is installed") |
609 | 618 | asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) |
610 | ||
611 | ||
612 | def logs(logger, level=None): | |
613 | """Catches logs for given logger and level. | |
614 | ||
615 | See unittest.TestCase.assertLogs for details. | |
616 | """ | |
617 | return _AssertLogsContext(logger, level) | |
618 | ||
619 | ||
620 | _LoggingWatcher = namedtuple("_LoggingWatcher", ["records", "output"]) | |
621 | ||
622 | ||
623 | class _CapturingHandler(logging.Handler): | |
624 | """ | |
625 | A logging handler capturing all (raw and formatted) logging output. | |
626 | """ | |
627 | ||
628 | def __init__(self): | |
629 | logging.Handler.__init__(self) | |
630 | self.watcher = _LoggingWatcher([], []) | |
631 | ||
632 | def flush(self): | |
633 | pass | |
634 | ||
635 | def emit(self, record): | |
636 | self.watcher.records.append(record) | |
637 | msg = self.format(record) | |
638 | self.watcher.output.append(msg) | |
639 | ||
640 | ||
641 | class _AssertLogsContext: | |
642 | """Standard unittest's _AssertLogsContext context manager | |
643 | adopted to raise pytest failure. | |
644 | """ | |
645 | LOGGING_FORMAT = "%(levelname)s:%(name)s:%(message)s" | |
646 | ||
647 | def __init__(self, logger_name, level): | |
648 | self.logger_name = logger_name | |
649 | if level: | |
650 | self.level = level | |
651 | else: | |
652 | self.level = logging.INFO | |
653 | self.msg = None | |
654 | ||
655 | def __enter__(self): | |
656 | if isinstance(self.logger_name, logging.Logger): | |
657 | logger = self.logger = self.logger_name | |
658 | else: | |
659 | logger = self.logger = logging.getLogger(self.logger_name) | |
660 | formatter = logging.Formatter(self.LOGGING_FORMAT) | |
661 | handler = _CapturingHandler() | |
662 | handler.setFormatter(formatter) | |
663 | self.watcher = handler.watcher | |
664 | self.old_handlers = logger.handlers[:] | |
665 | self.old_level = logger.level | |
666 | self.old_propagate = logger.propagate | |
667 | logger.handlers = [handler] | |
668 | logger.setLevel(self.level) | |
669 | logger.propagate = False | |
670 | return handler.watcher | |
671 | ||
672 | def __exit__(self, exc_type, exc_value, tb): | |
673 | self.logger.handlers = self.old_handlers | |
674 | self.logger.propagate = self.old_propagate | |
675 | self.logger.setLevel(self.old_level) | |
676 | if exc_type is not None: | |
677 | # let unexpected exceptions pass through | |
678 | return False | |
679 | if len(self.watcher.records) == 0: | |
680 | pytest.fail( | |
681 | "no logs of level {} or higher triggered on {}" | |
682 | .format(logging.getLevelName(self.level), self.logger.name)) | |
683 | ||
684 | ||
685 | def redis_version(*version, reason): | |
686 | assert 1 < len(version) <= 3, version | |
687 | assert all(isinstance(v, int) for v in version), version | |
688 | return pytest.mark.redis_version(version=version, reason=reason) | |
689 | ||
690 | ||
691 | def assert_almost_equal(first, second, places=None, msg=None, delta=None): | |
692 | assert not (places is None and delta is None), \ | |
693 | "Both places and delta are not set, please set one" | |
694 | if delta is not None: | |
695 | assert abs(first - second) <= delta | |
696 | else: | |
697 | assert round(abs(first - second), places) == 0 | |
698 | ||
699 | ||
700 | def pytest_namespace(): | |
701 | return { | |
702 | 'assert_almost_equal': assert_almost_equal, | |
703 | 'redis_version': redis_version, | |
704 | 'logs': logs, | |
705 | } |
3 | 3 | from aioredis import ConnectionClosedError, ReplyError |
4 | 4 | from aioredis.pool import ConnectionsPool |
5 | 5 | from aioredis import Redis |
6 | from _testutils import redis_version | |
6 | 7 | |
7 | 8 | |
8 | @pytest.mark.run_loop | |
9 | async def test_repr(create_redis, loop, server): | |
10 | redis = await create_redis( | |
11 | server.tcp_address, db=1, loop=loop) | |
9 | async def test_repr(create_redis, server): | |
10 | redis = await create_redis(server.tcp_address, db=1) | |
12 | 11 | assert repr(redis) in { |
13 | 12 | '<Redis <RedisConnection [db:1]>>', |
14 | 13 | '<Redis <ConnectionsPool [db:1, size:[1:10], free:1]>>', |
15 | 14 | } |
16 | 15 | |
17 | redis = await create_redis( | |
18 | server.tcp_address, db=0, loop=loop) | |
16 | redis = await create_redis(server.tcp_address, db=0) | |
19 | 17 | assert repr(redis) in { |
20 | 18 | '<Redis <RedisConnection [db:0]>>', |
21 | 19 | '<Redis <ConnectionsPool [db:0, size:[1:10], free:1]>>', |
22 | 20 | } |
23 | 21 | |
24 | 22 | |
25 | @pytest.mark.run_loop | |
26 | 23 | async def test_auth(redis): |
27 | 24 | expected_message = "ERR Client sent AUTH, but no password is set" |
28 | 25 | with pytest.raises(ReplyError, match=expected_message): |
29 | 26 | await redis.auth('') |
30 | 27 | |
31 | 28 | |
32 | @pytest.mark.run_loop | |
33 | 29 | async def test_echo(redis): |
34 | 30 | resp = await redis.echo('ECHO') |
35 | 31 | assert resp == b'ECHO' |
38 | 34 | await redis.echo(None) |
39 | 35 | |
40 | 36 | |
41 | @pytest.mark.run_loop | |
42 | 37 | async def test_ping(redis): |
43 | 38 | assert await redis.ping() == b'PONG' |
44 | 39 | |
45 | 40 | |
46 | @pytest.mark.run_loop | |
47 | async def test_quit(redis, loop): | |
41 | async def test_quit(redis): | |
48 | 42 | expected = (ConnectionClosedError, ConnectionError) |
49 | 43 | try: |
50 | 44 | assert b'OK' == await redis.quit() |
61 | 55 | assert False, "Cancelled error must not be raised" |
62 | 56 | |
63 | 57 | # wait one loop iteration until it get surely closed |
64 | await asyncio.sleep(0, loop=loop) | |
58 | await asyncio.sleep(0) | |
65 | 59 | assert redis.connection.closed |
66 | 60 | |
67 | 61 | with pytest.raises(ConnectionClosedError): |
68 | 62 | await redis.ping() |
69 | 63 | |
70 | 64 | |
71 | @pytest.mark.run_loop | |
72 | 65 | async def test_select(redis): |
73 | 66 | assert redis.db == 0 |
74 | 67 | |
78 | 71 | assert redis.connection.db == 1 |
79 | 72 | |
80 | 73 | |
81 | @pytest.mark.run_loop | |
82 | async def test_encoding(create_redis, loop, server): | |
83 | redis = await create_redis( | |
84 | server.tcp_address, | |
85 | db=1, encoding='utf-8', | |
86 | loop=loop) | |
74 | async def test_encoding(create_redis, server): | |
75 | redis = await create_redis(server.tcp_address, db=1, encoding='utf-8') | |
87 | 76 | assert redis.encoding == 'utf-8' |
88 | 77 | |
89 | 78 | |
90 | @pytest.mark.run_loop | |
91 | async def test_yield_from_backwards_compatability(create_redis, server, loop): | |
92 | redis = await create_redis(server.tcp_address, loop=loop) | |
79 | async def test_yield_from_backwards_compatibility(create_redis, server): | |
80 | redis = await create_redis(server.tcp_address) | |
93 | 81 | |
94 | 82 | assert isinstance(redis, Redis) |
95 | 83 | # TODO: there should not be warning |
100 | 88 | assert await client.ping() |
101 | 89 | |
102 | 90 | |
103 | @pytest.redis_version(4, 0, 0, reason="SWAPDB is available since redis>=4.0.0") | |
104 | @pytest.mark.run_loop | |
105 | async def test_swapdb(create_redis, start_server, loop): | |
91 | @redis_version(4, 0, 0, reason="SWAPDB is available since redis>=4.0.0") | |
92 | async def test_swapdb(create_redis, start_server): | |
106 | 93 | server = start_server('swapdb_1') |
107 | cli1 = await create_redis(server.tcp_address, db=0, loop=loop) | |
108 | cli2 = await create_redis(server.tcp_address, db=1, loop=loop) | |
94 | cli1 = await create_redis(server.tcp_address, db=0) | |
95 | cli2 = await create_redis(server.tcp_address, db=1) | |
109 | 96 | |
110 | 97 | await cli1.flushall() |
111 | 98 | assert await cli1.set('key', 'val') is True |
13 | 13 | Channel, |
14 | 14 | MaxClientsError, |
15 | 15 | ) |
16 | ||
17 | ||
18 | @pytest.mark.run_loop | |
19 | async def test_connect_tcp(request, create_connection, loop, server): | |
20 | conn = await create_connection( | |
21 | server.tcp_address, loop=loop) | |
16 | from _testutils import redis_version | |
17 | ||
18 | ||
19 | async def test_connect_tcp(request, create_connection, server): | |
20 | conn = await create_connection(server.tcp_address) | |
22 | 21 | assert conn.db == 0 |
23 | 22 | assert isinstance(conn.address, tuple) |
24 | 23 | assert conn.address[0] in ('127.0.0.1', '::1') |
25 | 24 | assert conn.address[1] == server.tcp_address.port |
26 | 25 | assert str(conn) == '<RedisConnection [db:0]>' |
27 | 26 | |
28 | conn = await create_connection( | |
29 | ['localhost', server.tcp_address.port], loop=loop) | |
27 | conn = await create_connection(['localhost', server.tcp_address.port]) | |
30 | 28 | assert conn.db == 0 |
31 | 29 | assert isinstance(conn.address, tuple) |
32 | 30 | assert conn.address[0] in ('127.0.0.1', '::1') |
34 | 32 | assert str(conn) == '<RedisConnection [db:0]>' |
35 | 33 | |
36 | 34 | |
37 | @pytest.mark.run_loop | |
38 | 35 | async def test_connect_inject_connection_cls( |
39 | 36 | request, |
40 | 37 | create_connection, |
41 | loop, | |
42 | 38 | server): |
43 | 39 | |
44 | 40 | class MyConnection(RedisConnection): |
45 | 41 | pass |
46 | 42 | |
47 | 43 | conn = await create_connection( |
48 | server.tcp_address, loop=loop, connection_cls=MyConnection) | |
44 | server.tcp_address, connection_cls=MyConnection) | |
49 | 45 | |
50 | 46 | assert isinstance(conn, MyConnection) |
51 | 47 | |
52 | 48 | |
53 | @pytest.mark.run_loop | |
54 | 49 | async def test_connect_inject_connection_cls_invalid( |
55 | 50 | request, |
56 | 51 | create_connection, |
57 | loop, | |
58 | 52 | server): |
59 | 53 | |
60 | 54 | with pytest.raises(AssertionError): |
61 | 55 | await create_connection( |
62 | server.tcp_address, loop=loop, connection_cls=type) | |
63 | ||
64 | ||
65 | @pytest.mark.run_loop | |
66 | async def test_connect_tcp_timeout(request, create_connection, loop, server): | |
67 | with patch.object(loop, 'create_connection') as\ | |
68 | open_conn_mock: | |
69 | open_conn_mock.side_effect = lambda *a, **kw: asyncio.sleep(0.2, | |
70 | loop=loop) | |
56 | server.tcp_address, connection_cls=type) | |
57 | ||
58 | ||
59 | async def test_connect_tcp_timeout(request, create_connection, server): | |
60 | with patch('aioredis.connection.open_connection') as open_conn_mock: | |
61 | open_conn_mock.side_effect = lambda *a, **kw: asyncio.sleep(0.2) | |
71 | 62 | with pytest.raises(asyncio.TimeoutError): |
72 | await create_connection( | |
73 | server.tcp_address, loop=loop, timeout=0.1) | |
74 | ||
75 | ||
76 | @pytest.mark.run_loop | |
63 | await create_connection(server.tcp_address, timeout=0.1) | |
64 | ||
65 | ||
77 | 66 | async def test_connect_tcp_invalid_timeout( |
78 | request, create_connection, loop, server): | |
67 | request, create_connection, server): | |
79 | 68 | with pytest.raises(ValueError): |
80 | 69 | await create_connection( |
81 | server.tcp_address, loop=loop, timeout=0) | |
82 | ||
83 | ||
84 | @pytest.mark.run_loop | |
70 | server.tcp_address, timeout=0) | |
71 | ||
72 | ||
85 | 73 | @pytest.mark.skipif(sys.platform == 'win32', |
86 | 74 | reason="No unixsocket on Windows") |
87 | async def test_connect_unixsocket(create_connection, loop, server): | |
88 | conn = await create_connection( | |
89 | server.unixsocket, db=0, loop=loop) | |
75 | async def test_connect_unixsocket(create_connection, server): | |
76 | conn = await create_connection(server.unixsocket, db=0) | |
90 | 77 | assert conn.db == 0 |
91 | 78 | assert conn.address == server.unixsocket |
92 | 79 | assert str(conn) == '<RedisConnection [db:0]>' |
93 | 80 | |
94 | 81 | |
95 | @pytest.mark.run_loop | |
96 | 82 | @pytest.mark.skipif(sys.platform == 'win32', |
97 | 83 | reason="No unixsocket on Windows") |
98 | async def test_connect_unixsocket_timeout(create_connection, loop, server): | |
99 | with patch.object(loop, 'create_unix_connection') as open_conn_mock: | |
100 | open_conn_mock.side_effect = lambda *a, **kw: asyncio.sleep(0.2, | |
101 | loop=loop) | |
84 | async def test_connect_unixsocket_timeout(create_connection, server): | |
85 | with patch('aioredis.connection.open_unix_connection') as open_conn_mock: | |
86 | open_conn_mock.side_effect = lambda *a, **kw: asyncio.sleep(0.2) | |
102 | 87 | with pytest.raises(asyncio.TimeoutError): |
103 | await create_connection( | |
104 | server.unixsocket, db=0, loop=loop, timeout=0.1) | |
105 | ||
106 | ||
107 | @pytest.mark.run_loop | |
108 | @pytest.redis_version(2, 8, 0, reason="maxclients config setting") | |
109 | async def test_connect_maxclients(create_connection, loop, start_server): | |
88 | await create_connection(server.unixsocket, db=0, timeout=0.1) | |
89 | ||
90 | ||
91 | @redis_version(2, 8, 0, reason="maxclients config setting") | |
92 | async def test_connect_maxclients(create_connection, start_server): | |
110 | 93 | server = start_server('server-maxclients') |
111 | conn = await create_connection( | |
112 | server.tcp_address, loop=loop) | |
94 | conn = await create_connection(server.tcp_address) | |
113 | 95 | await conn.execute(b'CONFIG', b'SET', 'maxclients', 1) |
114 | 96 | |
115 | 97 | errors = (MaxClientsError, ConnectionClosedError, ConnectionError) |
116 | 98 | with pytest.raises(errors): |
117 | conn2 = await create_connection( | |
118 | server.tcp_address, loop=loop) | |
99 | conn2 = await create_connection(server.tcp_address) | |
119 | 100 | await conn2.execute('ping') |
120 | 101 | |
121 | 102 | |
122 | def test_global_loop(create_connection, loop, server): | |
123 | asyncio.set_event_loop(loop) | |
124 | ||
125 | conn = loop.run_until_complete(create_connection( | |
126 | server.tcp_address, db=0)) | |
103 | async def test_select_db(create_connection, server): | |
104 | address = server.tcp_address | |
105 | conn = await create_connection(address) | |
127 | 106 | assert conn.db == 0 |
128 | assert conn._loop is loop | |
129 | ||
130 | ||
131 | @pytest.mark.run_loop | |
132 | async def test_select_db(create_connection, loop, server): | |
133 | address = server.tcp_address | |
134 | conn = await create_connection(address, loop=loop) | |
135 | assert conn.db == 0 | |
136 | 107 | |
137 | 108 | with pytest.raises(ValueError): |
138 | await create_connection(address, db=-1, loop=loop) | |
139 | with pytest.raises(TypeError): | |
140 | await create_connection(address, db=1.0, loop=loop) | |
141 | with pytest.raises(TypeError): | |
142 | await create_connection( | |
143 | address, db='bad value', loop=loop) | |
144 | with pytest.raises(TypeError): | |
145 | conn = await create_connection( | |
146 | address, db=None, loop=loop) | |
109 | await create_connection(address, db=-1) | |
110 | with pytest.raises(TypeError): | |
111 | await create_connection(address, db=1.0) | |
112 | with pytest.raises(TypeError): | |
113 | await create_connection(address, db='bad value') | |
114 | with pytest.raises(TypeError): | |
115 | conn = await create_connection(address, db=None) | |
147 | 116 | await conn.select(None) |
148 | 117 | with pytest.raises(ReplyError): |
149 | await create_connection( | |
150 | address, db=100000, loop=loop) | |
118 | await create_connection(address, db=100000) | |
151 | 119 | |
152 | 120 | await conn.select(1) |
153 | 121 | assert conn.db == 1 |
159 | 127 | assert conn.db == 1 |
160 | 128 | |
161 | 129 | |
162 | @pytest.mark.run_loop | |
163 | async def test_protocol_error(create_connection, loop, server): | |
164 | conn = await create_connection( | |
165 | server.tcp_address, loop=loop) | |
130 | async def test_protocol_error(create_connection, server): | |
131 | conn = await create_connection(server.tcp_address) | |
166 | 132 | |
167 | 133 | reader = conn._reader |
168 | 134 | |
174 | 140 | |
175 | 141 | |
176 | 142 | def test_close_connection__tcp(create_connection, loop, server): |
177 | conn = loop.run_until_complete(create_connection( | |
178 | server.tcp_address, loop=loop)) | |
143 | conn = loop.run_until_complete(create_connection(server.tcp_address)) | |
179 | 144 | conn.close() |
180 | 145 | with pytest.raises(ConnectionClosedError): |
181 | 146 | loop.run_until_complete(conn.select(1)) |
182 | 147 | |
183 | conn = loop.run_until_complete(create_connection( | |
184 | server.tcp_address, loop=loop)) | |
148 | conn = loop.run_until_complete(create_connection(server.tcp_address)) | |
185 | 149 | conn.close() |
186 | 150 | fut = None |
187 | 151 | with pytest.raises(ConnectionClosedError): |
188 | 152 | fut = conn.select(1) |
189 | 153 | assert fut is None |
190 | 154 | |
191 | conn = loop.run_until_complete(create_connection( | |
192 | server.tcp_address, loop=loop)) | |
155 | conn = loop.run_until_complete(create_connection(server.tcp_address)) | |
193 | 156 | conn.close() |
194 | 157 | with pytest.raises(ConnectionClosedError): |
195 | 158 | conn.execute_pubsub('subscribe', 'channel:1') |
196 | 159 | |
197 | 160 | |
198 | @pytest.mark.run_loop | |
199 | 161 | @pytest.mark.skipif(sys.platform == 'win32', |
200 | 162 | reason="No unixsocket on Windows") |
201 | async def test_close_connection__socket(create_connection, loop, server): | |
202 | conn = await create_connection( | |
203 | server.unixsocket, loop=loop) | |
163 | async def test_close_connection__socket(create_connection, server): | |
164 | conn = await create_connection(server.unixsocket) | |
204 | 165 | conn.close() |
205 | 166 | with pytest.raises(ConnectionClosedError): |
206 | 167 | await conn.select(1) |
207 | 168 | |
208 | conn = await create_connection( | |
209 | server.unixsocket, loop=loop) | |
169 | conn = await create_connection(server.unixsocket) | |
210 | 170 | conn.close() |
211 | 171 | with pytest.raises(ConnectionClosedError): |
212 | 172 | await conn.execute_pubsub('subscribe', 'channel:1') |
213 | 173 | |
214 | 174 | |
215 | @pytest.mark.run_loop | |
216 | 175 | async def test_closed_connection_with_none_reader( |
217 | create_connection, loop, server): | |
176 | create_connection, server): | |
218 | 177 | address = server.tcp_address |
219 | conn = await create_connection(address, loop=loop) | |
178 | conn = await create_connection(address) | |
220 | 179 | stored_reader = conn._reader |
221 | 180 | conn._reader = None |
222 | 181 | with pytest.raises(ConnectionClosedError): |
224 | 183 | conn._reader = stored_reader |
225 | 184 | conn.close() |
226 | 185 | |
227 | conn = await create_connection(address, loop=loop) | |
186 | conn = await create_connection(address) | |
228 | 187 | stored_reader = conn._reader |
229 | 188 | conn._reader = None |
230 | 189 | with pytest.raises(ConnectionClosedError): |
233 | 192 | conn.close() |
234 | 193 | |
235 | 194 | |
236 | @pytest.mark.run_loop | |
237 | async def test_wait_closed(create_connection, loop, server): | |
195 | async def test_wait_closed(create_connection, server): | |
238 | 196 | address = server.tcp_address |
239 | conn = await create_connection(address, loop=loop) | |
197 | conn = await create_connection(address) | |
240 | 198 | reader_task = conn._reader_task |
241 | 199 | conn.close() |
242 | 200 | assert not reader_task.done() |
244 | 202 | assert reader_task.done() |
245 | 203 | |
246 | 204 | |
247 | @pytest.mark.run_loop | |
248 | 205 | async def test_cancel_wait_closed(create_connection, loop, server): |
249 | 206 | # Regression test: Don't throw error if wait_closed() is cancelled. |
250 | 207 | address = server.tcp_address |
251 | conn = await create_connection(address, loop=loop) | |
208 | conn = await create_connection(address) | |
252 | 209 | reader_task = conn._reader_task |
253 | 210 | conn.close() |
254 | task = asyncio.ensure_future(conn.wait_closed(), loop=loop) | |
211 | task = asyncio.ensure_future(conn.wait_closed()) | |
255 | 212 | |
256 | 213 | # Make sure the task is cancelled |
257 | 214 | # after it has been started by the loop. |
261 | 218 | assert reader_task.done() |
262 | 219 | |
263 | 220 | |
264 | @pytest.mark.run_loop | |
265 | async def test_auth(create_connection, loop, server): | |
266 | conn = await create_connection( | |
267 | server.tcp_address, loop=loop) | |
221 | async def test_auth(create_connection, server): | |
222 | conn = await create_connection(server.tcp_address) | |
268 | 223 | |
269 | 224 | res = await conn.execute('CONFIG', 'SET', 'requirepass', 'pass') |
270 | 225 | assert res == b'OK' |
271 | 226 | |
272 | conn2 = await create_connection( | |
273 | server.tcp_address, loop=loop) | |
227 | conn2 = await create_connection(server.tcp_address) | |
274 | 228 | |
275 | 229 | with pytest.raises(ReplyError): |
276 | 230 | await conn2.select(1) |
280 | 234 | res = await conn2.select(1) |
281 | 235 | assert res is True |
282 | 236 | |
283 | conn3 = await create_connection( | |
284 | server.tcp_address, password='pass', loop=loop) | |
237 | conn3 = await create_connection(server.tcp_address, password='pass') | |
285 | 238 | |
286 | 239 | res = await conn3.select(1) |
287 | 240 | assert res is True |
290 | 243 | assert res == b'OK' |
291 | 244 | |
292 | 245 | |
293 | @pytest.mark.run_loop | |
294 | async def test_decoding(create_connection, loop, server): | |
295 | conn = await create_connection( | |
296 | server.tcp_address, encoding='utf-8', loop=loop) | |
246 | async def test_decoding(create_connection, server): | |
247 | conn = await create_connection(server.tcp_address, encoding='utf-8') | |
297 | 248 | assert conn.encoding == 'utf-8' |
298 | 249 | res = await conn.execute('set', '{prefix}:key1', 'value') |
299 | 250 | assert res == 'OK' |
314 | 265 | await conn.execute('set', '{prefix}:key1', 'значение') |
315 | 266 | await conn.execute('get', '{prefix}:key1', encoding='ascii') |
316 | 267 | |
317 | conn2 = await create_connection( | |
318 | server.tcp_address, loop=loop) | |
268 | conn2 = await create_connection(server.tcp_address) | |
319 | 269 | res = await conn2.execute('get', '{prefix}:key1', encoding='utf-8') |
320 | 270 | assert res == 'значение' |
321 | 271 | |
322 | 272 | |
323 | @pytest.mark.run_loop | |
324 | async def test_execute_exceptions(create_connection, loop, server): | |
325 | conn = await create_connection( | |
326 | server.tcp_address, loop=loop) | |
273 | async def test_execute_exceptions(create_connection, server): | |
274 | conn = await create_connection(server.tcp_address) | |
327 | 275 | with pytest.raises(TypeError): |
328 | 276 | await conn.execute(None) |
329 | 277 | with pytest.raises(TypeError): |
333 | 281 | assert len(conn._waiters) == 0 |
334 | 282 | |
335 | 283 | |
336 | @pytest.mark.run_loop | |
337 | async def test_subscribe_unsubscribe(create_connection, loop, server): | |
338 | conn = await create_connection( | |
339 | server.tcp_address, loop=loop) | |
284 | async def test_subscribe_unsubscribe(create_connection, server): | |
285 | conn = await create_connection(server.tcp_address) | |
340 | 286 | |
341 | 287 | assert conn.in_pubsub == 0 |
342 | 288 | |
364 | 310 | assert conn.in_pubsub == 1 |
365 | 311 | |
366 | 312 | |
367 | @pytest.mark.run_loop | |
368 | async def test_psubscribe_punsubscribe(create_connection, loop, server): | |
369 | conn = await create_connection( | |
370 | server.tcp_address, loop=loop) | |
313 | async def test_psubscribe_punsubscribe(create_connection, server): | |
314 | conn = await create_connection(server.tcp_address) | |
371 | 315 | res = await conn.execute('psubscribe', 'chan:*') |
372 | 316 | assert res == [[b'psubscribe', b'chan:*', 1]] |
373 | 317 | assert conn.in_pubsub == 1 |
374 | 318 | |
375 | 319 | |
376 | @pytest.mark.run_loop | |
377 | async def test_bad_command_in_pubsub(create_connection, loop, server): | |
378 | conn = await create_connection( | |
379 | server.tcp_address, loop=loop) | |
320 | async def test_bad_command_in_pubsub(create_connection, server): | |
321 | conn = await create_connection(server.tcp_address) | |
380 | 322 | |
381 | 323 | res = await conn.execute('subscribe', 'chan:1') |
382 | 324 | assert res == [[b'subscribe', b'chan:1', 1]] |
388 | 330 | conn.execute('get') |
389 | 331 | |
390 | 332 | |
391 | @pytest.mark.run_loop | |
392 | async def test_pubsub_messages(create_connection, loop, server): | |
393 | sub = await create_connection( | |
394 | server.tcp_address, loop=loop) | |
395 | pub = await create_connection( | |
396 | server.tcp_address, loop=loop) | |
333 | async def test_pubsub_messages(create_connection, server): | |
334 | sub = await create_connection(server.tcp_address) | |
335 | pub = await create_connection(server.tcp_address) | |
397 | 336 | res = await sub.execute('subscribe', 'chan:1') |
398 | 337 | assert res == [[b'subscribe', b'chan:1', 1]] |
399 | 338 | |
425 | 364 | assert msg == b'Hello!' |
426 | 365 | |
427 | 366 | |
428 | @pytest.mark.run_loop | |
429 | async def test_multiple_subscribe_unsubscribe(create_connection, loop, server): | |
430 | sub = await create_connection(server.tcp_address, loop=loop) | |
367 | async def test_multiple_subscribe_unsubscribe(create_connection, server): | |
368 | sub = await create_connection(server.tcp_address) | |
431 | 369 | |
432 | 370 | res = await sub.execute_pubsub('subscribe', 'chan:1') |
433 | 371 | ch = sub.pubsub_channels['chan:1'] |
455 | 393 | assert res == [[b'punsubscribe', b'chan:*', 0]] |
456 | 394 | |
457 | 395 | |
458 | @pytest.mark.run_loop | |
459 | async def test_execute_pubsub_errors(create_connection, loop, server): | |
460 | sub = await create_connection( | |
461 | server.tcp_address, loop=loop) | |
396 | async def test_execute_pubsub_errors(create_connection, server): | |
397 | sub = await create_connection(server.tcp_address) | |
462 | 398 | |
463 | 399 | with pytest.raises(TypeError): |
464 | 400 | sub.execute_pubsub('subscribe', "chan:1", None) |
467 | 403 | with pytest.raises(ValueError): |
468 | 404 | sub.execute_pubsub( |
469 | 405 | 'subscribe', |
470 | Channel('chan:1', is_pattern=True, loop=loop)) | |
406 | Channel('chan:1', is_pattern=True)) | |
471 | 407 | with pytest.raises(ValueError): |
472 | 408 | sub.execute_pubsub( |
473 | 409 | 'unsubscribe', |
474 | Channel('chan:1', is_pattern=True, loop=loop)) | |
410 | Channel('chan:1', is_pattern=True)) | |
475 | 411 | with pytest.raises(ValueError): |
476 | 412 | sub.execute_pubsub( |
477 | 413 | 'psubscribe', |
478 | Channel('chan:1', is_pattern=False, loop=loop)) | |
414 | Channel('chan:1', is_pattern=False)) | |
479 | 415 | with pytest.raises(ValueError): |
480 | 416 | sub.execute_pubsub( |
481 | 417 | 'punsubscribe', |
482 | Channel('chan:1', is_pattern=False, loop=loop)) | |
483 | ||
484 | ||
485 | @pytest.mark.run_loop | |
486 | async def test_multi_exec(create_connection, loop, server): | |
487 | conn = await create_connection(server.tcp_address, loop=loop) | |
418 | Channel('chan:1', is_pattern=False)) | |
419 | ||
420 | ||
421 | async def test_multi_exec(create_connection, server): | |
422 | conn = await create_connection(server.tcp_address) | |
488 | 423 | |
489 | 424 | ok = await conn.execute('set', 'foo', 'bar') |
490 | 425 | assert ok == b'OK' |
504 | 439 | assert res == b'OK' |
505 | 440 | |
506 | 441 | |
507 | @pytest.mark.run_loop | |
508 | async def test_multi_exec__enc(create_connection, loop, server): | |
509 | conn = await create_connection( | |
510 | server.tcp_address, loop=loop, encoding='utf-8') | |
442 | async def test_multi_exec__enc(create_connection, server): | |
443 | conn = await create_connection(server.tcp_address, encoding='utf-8') | |
511 | 444 | |
512 | 445 | ok = await conn.execute('set', 'foo', 'bar') |
513 | 446 | assert ok == 'OK' |
527 | 460 | assert res == 'OK' |
528 | 461 | |
529 | 462 | |
530 | @pytest.mark.run_loop | |
531 | async def test_connection_parser_argument(create_connection, server, loop): | |
463 | async def test_connection_parser_argument(create_connection, server): | |
532 | 464 | klass = mock.MagicMock() |
533 | 465 | klass.return_value = reader = mock.Mock() |
534 | conn = await create_connection(server.tcp_address, | |
535 | parser=klass, loop=loop) | |
466 | conn = await create_connection(server.tcp_address, parser=klass) | |
536 | 467 | |
537 | 468 | assert klass.mock_calls == [ |
538 | 469 | mock.call(protocolError=ProtocolError, replyError=ReplyError), |
548 | 479 | assert b'+PONG\r\n' == await conn.execute('ping') |
549 | 480 | |
550 | 481 | |
551 | @pytest.mark.run_loop | |
552 | async def test_connection_idle_close(create_connection, start_server, loop): | |
482 | async def test_connection_idle_close(create_connection, start_server): | |
553 | 483 | server = start_server('idle') |
554 | conn = await create_connection(server.tcp_address, loop=loop) | |
484 | conn = await create_connection(server.tcp_address) | |
555 | 485 | ok = await conn.execute("config", "set", "timeout", 1) |
556 | 486 | assert ok == b'OK' |
557 | 487 | |
558 | await asyncio.sleep(3, loop=loop) | |
488 | await asyncio.sleep(3) | |
559 | 489 | |
560 | 490 | with pytest.raises(ConnectionClosedError): |
561 | 491 | assert await conn.execute('ping') is None |
566 | 496 | {'db': 1}, |
567 | 497 | {'encoding': 'utf-8'}, |
568 | 498 | ], ids=repr) |
569 | @pytest.mark.run_loop | |
570 | 499 | async def test_create_connection__tcp_url( |
571 | create_connection, server_tcp_url, loop, kwargs): | |
500 | create_connection, server_tcp_url, kwargs): | |
572 | 501 | url = server_tcp_url(**kwargs) |
573 | 502 | db = kwargs.get('db', 0) |
574 | 503 | enc = kwargs.get('encoding', None) |
575 | conn = await create_connection(url, loop=loop) | |
504 | conn = await create_connection(url) | |
576 | 505 | pong = b'PONG' if not enc else b'PONG'.decode(enc) |
577 | 506 | assert await conn.execute('ping') == pong |
578 | 507 | assert conn.db == db |
586 | 515 | {'db': 1}, |
587 | 516 | {'encoding': 'utf-8'}, |
588 | 517 | ], ids=repr) |
589 | @pytest.mark.run_loop | |
590 | 518 | async def test_create_connection__unix_url( |
591 | create_connection, server_unix_url, loop, kwargs): | |
519 | create_connection, server_unix_url, kwargs): | |
592 | 520 | url = server_unix_url(**kwargs) |
593 | 521 | db = kwargs.get('db', 0) |
594 | 522 | enc = kwargs.get('encoding', None) |
595 | conn = await create_connection(url, loop=loop) | |
523 | conn = await create_connection(url) | |
596 | 524 | pong = b'PONG' if not enc else b'PONG'.decode(enc) |
597 | 525 | assert await conn.execute('ping') == pong |
598 | 526 | assert conn.db == db |
6 | 6 | from unittest import mock |
7 | 7 | |
8 | 8 | from aioredis import ReplyError |
9 | from _testutils import redis_version | |
9 | 10 | |
10 | 11 | |
11 | 12 | async def add(redis, key, value): |
13 | 14 | assert ok == b'OK' |
14 | 15 | |
15 | 16 | |
16 | @pytest.mark.run_loop | |
17 | 17 | async def test_delete(redis): |
18 | 18 | await add(redis, 'my-key', 123) |
19 | 19 | await add(redis, 'other-key', 123) |
31 | 31 | await redis.delete('my-key', 'my-key', None) |
32 | 32 | |
33 | 33 | |
34 | @pytest.mark.run_loop | |
35 | 34 | async def test_dump(redis): |
36 | 35 | await add(redis, 'my-key', 123) |
37 | 36 | |
47 | 46 | await redis.dump(None) |
48 | 47 | |
49 | 48 | |
50 | @pytest.mark.run_loop | |
51 | 49 | async def test_exists(redis, server): |
52 | 50 | await add(redis, 'my-key', 123) |
53 | 51 | |
66 | 64 | await redis.exists('key-1', 'key-2') |
67 | 65 | |
68 | 66 | |
69 | @pytest.redis_version( | |
67 | @redis_version( | |
70 | 68 | 3, 0, 3, reason='Multi-key EXISTS available since redis>=2.8.0') |
71 | @pytest.mark.run_loop | |
72 | 69 | async def test_exists_multiple(redis): |
73 | 70 | await add(redis, 'my-key', 123) |
74 | 71 | |
85 | 82 | assert res == 0 |
86 | 83 | |
87 | 84 | |
88 | @pytest.mark.run_loop | |
89 | 85 | async def test_expire(redis): |
90 | 86 | await add(redis, 'my-key', 132) |
91 | 87 | |
114 | 110 | await redis.expire('my-key', 'timeout') |
115 | 111 | |
116 | 112 | |
117 | @pytest.mark.run_loop | |
118 | 113 | async def test_expireat(redis): |
119 | 114 | await add(redis, 'my-key', 123) |
120 | 115 | now = math.ceil(time.time()) |
151 | 146 | await redis.expireat('my-key', 'timestamp') |
152 | 147 | |
153 | 148 | |
154 | @pytest.mark.run_loop | |
155 | 149 | async def test_keys(redis): |
156 | 150 | res = await redis.keys('*pattern*') |
157 | 151 | assert res == [] |
176 | 170 | await redis.keys(None) |
177 | 171 | |
178 | 172 | |
179 | @pytest.mark.run_loop | |
180 | async def test_migrate(create_redis, loop, server, serverB): | |
173 | async def test_migrate(create_redis, server, serverB): | |
181 | 174 | redisA = await create_redis(server.tcp_address) |
182 | 175 | redisB = await create_redis(serverB.tcp_address, db=2) |
183 | 176 | |
209 | 202 | await redisA.migrate('host', 6379, 'key', 1, -1000) |
210 | 203 | |
211 | 204 | |
212 | @pytest.redis_version( | |
205 | @redis_version( | |
213 | 206 | 3, 0, 0, reason="Copy/Replace flags available since Redis 3.0") |
214 | @pytest.mark.run_loop | |
215 | async def test_migrate_copy_replace(create_redis, loop, server, serverB): | |
207 | async def test_migrate_copy_replace(create_redis, server, serverB): | |
216 | 208 | redisA = await create_redis(server.tcp_address) |
217 | 209 | redisB = await create_redis(serverB.tcp_address, db=0) |
218 | 210 | |
232 | 224 | assert (await redisB.get('my-key')) |
233 | 225 | |
234 | 226 | |
235 | @pytest.redis_version( | |
227 | @redis_version( | |
236 | 228 | 3, 0, 6, reason="MIGRATE…KEYS available since Redis 3.0.6") |
237 | 229 | @pytest.mark.skipif( |
238 | 230 | sys.platform == 'win32', reason="Seems to be unavailable in win32 build") |
239 | @pytest.mark.run_loop | |
240 | async def test_migrate_keys(create_redis, loop, server, serverB): | |
231 | async def test_migrate_keys(create_redis, server, serverB): | |
241 | 232 | redisA = await create_redis(server.tcp_address) |
242 | 233 | redisB = await create_redis(serverB.tcp_address, db=0) |
243 | 234 | |
292 | 283 | assert (await redisA.get('key3')) is None |
293 | 284 | |
294 | 285 | |
295 | @pytest.mark.run_loop | |
296 | async def test_migrate__exceptions(redis, loop, server, unused_port): | |
286 | async def test_migrate__exceptions(redis, server, unused_port): | |
297 | 287 | await add(redis, 'my-key', 123) |
298 | 288 | |
299 | 289 | assert (await redis.exists('my-key')) |
304 | 294 | 'my-key', dest_db=30, timeout=10)) |
305 | 295 | |
306 | 296 | |
307 | @pytest.redis_version( | |
297 | @redis_version( | |
308 | 298 | 3, 0, 6, reason="MIGRATE…KEYS available since Redis 3.0.6") |
309 | 299 | @pytest.mark.skipif( |
310 | 300 | sys.platform == 'win32', reason="Seems to be unavailable in win32 build") |
311 | @pytest.mark.run_loop | |
312 | 301 | async def test_migrate_keys__errors(redis): |
313 | 302 | with pytest.raises(TypeError, match="host .* str"): |
314 | 303 | await redis.migrate_keys(None, 1234, 'key', 1, 23) |
328 | 317 | await redis.migrate_keys('host', '1234', (), 2, 123) |
329 | 318 | |
330 | 319 | |
331 | @pytest.mark.run_loop | |
332 | 320 | async def test_move(redis): |
333 | 321 | await add(redis, 'my-key', 123) |
334 | 322 | |
346 | 334 | await redis.move('my-key', 'not db') |
347 | 335 | |
348 | 336 | |
349 | @pytest.mark.run_loop | |
350 | 337 | async def test_object_refcount(redis): |
351 | 338 | await add(redis, 'foo', 'bar') |
352 | 339 | |
359 | 346 | await redis.object_refcount(None) |
360 | 347 | |
361 | 348 | |
362 | @pytest.mark.run_loop | |
363 | 349 | async def test_object_encoding(redis, server): |
364 | 350 | await add(redis, 'foo', 'bar') |
365 | 351 | |
366 | 352 | res = await redis.object_encoding('foo') |
367 | 353 | |
368 | 354 | if server.version < (3, 0, 0): |
369 | assert res == b'raw' | |
355 | assert res == 'raw' | |
370 | 356 | else: |
371 | assert res == b'embstr' | |
357 | assert res == 'embstr' | |
372 | 358 | |
373 | 359 | res = await redis.incr('key') |
374 | 360 | assert res == 1 |
375 | 361 | res = await redis.object_encoding('key') |
376 | assert res == b'int' | |
362 | assert res == 'int' | |
377 | 363 | res = await redis.object_encoding('non-existent-key') |
378 | 364 | assert res is None |
379 | 365 | |
381 | 367 | await redis.object_encoding(None) |
382 | 368 | |
383 | 369 | |
384 | @pytest.mark.run_loop(timeout=20) | |
385 | async def test_object_idletime(redis, loop, server): | |
370 | @redis_version( | |
371 | 3, 0, 0, reason="Older Redis version has lower idle time resolution") | |
372 | @pytest.mark.timeout(20) | |
373 | async def test_object_idletime(redis, server): | |
386 | 374 | await add(redis, 'foo', 'bar') |
387 | 375 | |
388 | 376 | res = await redis.object_idletime('foo') |
392 | 380 | res = 0 |
393 | 381 | while not res: |
394 | 382 | res = await redis.object_idletime('foo') |
395 | await asyncio.sleep(.5, loop=loop) | |
383 | await asyncio.sleep(.5) | |
396 | 384 | assert res >= 1 |
397 | 385 | |
398 | 386 | res = await redis.object_idletime('non-existent-key') |
402 | 390 | await redis.object_idletime(None) |
403 | 391 | |
404 | 392 | |
405 | @pytest.mark.run_loop | |
406 | 393 | async def test_persist(redis): |
407 | 394 | await add(redis, 'my-key', 123) |
408 | 395 | res = await redis.expire('my-key', 10) |
418 | 405 | await redis.persist(None) |
419 | 406 | |
420 | 407 | |
421 | @pytest.mark.run_loop | |
422 | async def test_pexpire(redis, loop): | |
408 | async def test_pexpire(redis): | |
423 | 409 | await add(redis, 'my-key', 123) |
424 | 410 | res = await redis.pexpire('my-key', 100) |
425 | 411 | assert res is True |
434 | 420 | assert res is True |
435 | 421 | |
436 | 422 | # XXX: tests now looks strange to me. |
437 | await asyncio.sleep(.2, loop=loop) | |
423 | await asyncio.sleep(.2) | |
438 | 424 | |
439 | 425 | res = await redis.exists('my-key') |
440 | 426 | assert not res |
445 | 431 | await redis.pexpire('my-key', 1.0) |
446 | 432 | |
447 | 433 | |
448 | @pytest.mark.run_loop | |
449 | 434 | async def test_pexpireat(redis): |
450 | 435 | await add(redis, 'my-key', 123) |
451 | now = math.ceil((await redis.time()) * 1000) | |
436 | now = int((await redis.time()) * 1000) | |
452 | 437 | fut1 = redis.pexpireat('my-key', now + 2000) |
453 | 438 | fut2 = redis.ttl('my-key') |
454 | 439 | fut3 = redis.pttl('my-key') |
455 | assert (await fut1) is True | |
456 | assert (await fut2) == 2 | |
457 | pytest.assert_almost_equal((await fut3), 2000, -3) | |
440 | assert await fut1 is True | |
441 | assert await fut2 == 2 | |
442 | assert 1000 < await fut3 <= 2000 | |
458 | 443 | |
459 | 444 | with pytest.raises(TypeError): |
460 | 445 | await redis.pexpireat(None, 1234) |
464 | 449 | await redis.pexpireat('key', 1000.0) |
465 | 450 | |
466 | 451 | |
467 | @pytest.mark.run_loop | |
468 | 452 | async def test_pttl(redis, server): |
469 | 453 | await add(redis, 'key', 'val') |
470 | 454 | res = await redis.pttl('key') |
477 | 461 | |
478 | 462 | await redis.pexpire('key', 500) |
479 | 463 | res = await redis.pttl('key') |
480 | pytest.assert_almost_equal(res, 500, -2) | |
464 | assert 400 < res <= 500 | |
481 | 465 | |
482 | 466 | with pytest.raises(TypeError): |
483 | 467 | await redis.pttl(None) |
484 | 468 | |
485 | 469 | |
486 | @pytest.mark.run_loop | |
487 | 470 | async def test_randomkey(redis): |
488 | 471 | await add(redis, 'key:1', 123) |
489 | 472 | await add(redis, 'key:2', 123) |
501 | 484 | assert res is None |
502 | 485 | |
503 | 486 | |
504 | @pytest.mark.run_loop | |
505 | 487 | async def test_rename(redis, server): |
506 | 488 | await add(redis, 'foo', 'bar') |
507 | 489 | await redis.delete('bar') |
523 | 505 | await redis.rename('bar', b'bar') |
524 | 506 | |
525 | 507 | |
526 | @pytest.mark.run_loop | |
527 | 508 | async def test_renamenx(redis, server): |
528 | 509 | await redis.delete('foo', 'bar') |
529 | 510 | await add(redis, 'foo', 123) |
549 | 530 | await redis.renamenx('foo', b'foo') |
550 | 531 | |
551 | 532 | |
552 | @pytest.mark.run_loop | |
553 | 533 | async def test_restore(redis): |
554 | 534 | ok = await redis.set('key', 'value') |
555 | 535 | assert ok |
561 | 541 | assert (await redis.get('key')) == b'value' |
562 | 542 | |
563 | 543 | |
564 | @pytest.redis_version(2, 8, 0, reason='SCAN is available since redis>=2.8.0') | |
565 | @pytest.mark.run_loop | |
544 | @redis_version(2, 8, 0, reason='SCAN is available since redis>=2.8.0') | |
566 | 545 | async def test_scan(redis): |
567 | 546 | for i in range(1, 11): |
568 | 547 | foo_or_bar = 'bar' if i % 3 else 'foo' |
602 | 581 | assert len(test_values) == 10 |
603 | 582 | |
604 | 583 | |
605 | @pytest.mark.run_loop | |
606 | 584 | async def test_sort(redis): |
607 | 585 | async def _make_list(key, items): |
608 | 586 | await redis.delete(key) |
659 | 637 | assert res == [b'10', b'30', b'20'] |
660 | 638 | |
661 | 639 | |
662 | @pytest.redis_version(3, 2, 1, reason="TOUCH is available since redis>=3.2.1") | |
663 | @pytest.mark.run_loop(timeout=20) | |
664 | async def test_touch(redis, loop): | |
640 | @redis_version(3, 2, 1, reason="TOUCH is available since redis>=3.2.1") | |
641 | @pytest.mark.timeout(20) | |
642 | async def test_touch(redis): | |
665 | 643 | await add(redis, 'key', 'val') |
666 | 644 | res = 0 |
667 | 645 | while not res: |
668 | 646 | res = await redis.object_idletime('key') |
669 | await asyncio.sleep(.5, loop=loop) | |
647 | await asyncio.sleep(.5) | |
670 | 648 | assert res > 0 |
671 | 649 | assert await redis.touch('key', 'key', 'key') == 3 |
672 | 650 | res2 = await redis.object_idletime('key') |
673 | 651 | assert 0 <= res2 < res |
674 | 652 | |
675 | 653 | |
676 | @pytest.mark.run_loop | |
677 | 654 | async def test_ttl(redis, server): |
678 | 655 | await add(redis, 'key', 'val') |
679 | 656 | res = await redis.ttl('key') |
692 | 669 | await redis.ttl(None) |
693 | 670 | |
694 | 671 | |
695 | @pytest.mark.run_loop | |
696 | 672 | async def test_type(redis): |
697 | 673 | await add(redis, 'key', 'val') |
698 | 674 | res = await redis.type('key') |
715 | 691 | await redis.type(None) |
716 | 692 | |
717 | 693 | |
718 | @pytest.redis_version(2, 8, 0, reason='SCAN is available since redis>=2.8.0') | |
719 | @pytest.mark.run_loop | |
694 | @redis_version(2, 8, 0, reason='SCAN is available since redis>=2.8.0') | |
720 | 695 | async def test_iscan(redis): |
721 | 696 | full = set() |
722 | 697 | foo = set() |
760 | 735 | assert set(ret) == full |
761 | 736 | |
762 | 737 | |
763 | @pytest.redis_version(4, 0, 0, reason="UNLINK is available since redis>=4.0.0") | |
764 | @pytest.mark.run_loop | |
738 | @redis_version(4, 0, 0, reason="UNLINK is available since redis>=4.0.0") | |
765 | 739 | async def test_unlink(redis): |
766 | 740 | await add(redis, 'my-key', 123) |
767 | 741 | await add(redis, 'other-key', 123) |
779 | 753 | await redis.unlink('my-key', 'my-key', None) |
780 | 754 | |
781 | 755 | |
782 | @pytest.redis_version(3, 0, 0, reason="WAIT is available since redis>=3.0.0") | |
783 | @pytest.mark.run_loop | |
784 | async def test_wait(redis, loop): | |
756 | @redis_version(3, 0, 0, reason="WAIT is available since redis>=3.0.0") | |
757 | async def test_wait(redis): | |
785 | 758 | await add(redis, 'key', 'val1') |
786 | 759 | start = await redis.time() |
787 | 760 | res = await redis.wait(1, 400) |
0 | 0 | import pytest |
1 | 1 | |
2 | 2 | from aioredis import GeoPoint, GeoMember |
3 | ||
4 | ||
5 | @pytest.mark.run_loop | |
6 | @pytest.redis_version( | |
3 | from _testutils import redis_version | |
4 | ||
5 | ||
6 | @redis_version( | |
7 | 7 | 3, 2, 0, reason='GEOADD is available since redis >= 3.2.0') |
8 | 8 | async def test_geoadd(redis): |
9 | 9 | res = await redis.geoadd('geodata', 13.361389, 38.115556, 'Palermo') |
17 | 17 | assert res == 2 |
18 | 18 | |
19 | 19 | |
20 | @pytest.mark.run_loop | |
21 | @pytest.redis_version( | |
20 | @redis_version( | |
22 | 21 | 3, 2, 0, reason='GEODIST is available since redis >= 3.2.0') |
23 | 22 | async def test_geodist(redis): |
24 | 23 | res = await redis.geoadd( |
35 | 34 | assert res == 166.2742 |
36 | 35 | |
37 | 36 | |
38 | @pytest.mark.run_loop | |
39 | @pytest.redis_version( | |
37 | @redis_version( | |
40 | 38 | 3, 2, 0, reason='GEOHASH is available since redis >= 3.2.0') |
41 | 39 | async def test_geohash(redis): |
42 | 40 | res = await redis.geoadd( |
57 | 55 | assert res == ['sqc8b49rny0', 'sqdtr74hyu0'] |
58 | 56 | |
59 | 57 | |
60 | @pytest.mark.run_loop | |
61 | @pytest.redis_version( | |
58 | @redis_version( | |
62 | 59 | 3, 2, 0, reason='GEOPOS is available since redis >= 3.2.0') |
63 | 60 | async def test_geopos(redis): |
64 | 61 | res = await redis.geoadd( |
80 | 77 | ] |
81 | 78 | |
82 | 79 | |
83 | @pytest.mark.run_loop | |
84 | @pytest.redis_version( | |
80 | @redis_version( | |
85 | 81 | 3, 2, 0, reason='GEO* is available since redis >= 3.2.0') |
86 | 82 | async def test_geo_not_exist_members(redis): |
87 | 83 | res = await redis.geoadd('geodata', 13.361389, 38.115556, 'Palermo') |
115 | 111 | ] |
116 | 112 | |
117 | 113 | |
118 | @pytest.mark.run_loop | |
119 | @pytest.redis_version( | |
114 | @redis_version( | |
120 | 115 | 3, 2, 0, reason='GEORADIUS is available since redis >= 3.2.0') |
121 | 116 | async def test_georadius_validation(redis): |
122 | 117 | res = await redis.geoadd( |
143 | 138 | ) |
144 | 139 | |
145 | 140 | |
146 | @pytest.mark.run_loop | |
147 | @pytest.redis_version( | |
141 | @redis_version( | |
148 | 142 | 3, 2, 0, reason='GEORADIUS is available since redis >= 3.2.0') |
149 | 143 | async def test_georadius(redis): |
150 | 144 | res = await redis.geoadd( |
262 | 256 | ] |
263 | 257 | |
264 | 258 | |
265 | @pytest.mark.run_loop | |
266 | @pytest.redis_version( | |
259 | @redis_version( | |
267 | 260 | 3, 2, 0, reason='GEORADIUSBYMEMBER is available since redis >= 3.2.0') |
268 | 261 | async def test_georadiusbymember(redis): |
269 | 262 | res = await redis.geoadd( |
316 | 309 | ] |
317 | 310 | |
318 | 311 | |
319 | @pytest.mark.run_loop | |
320 | @pytest.redis_version( | |
312 | @redis_version( | |
321 | 313 | 3, 2, 0, reason='GEOHASH is available since redis >= 3.2.0') |
322 | 314 | async def test_geohash_binary(redis): |
323 | 315 | res = await redis.geoadd( |
338 | 330 | assert res == [b'sqc8b49rny0', b'sqdtr74hyu0'] |
339 | 331 | |
340 | 332 | |
341 | @pytest.mark.run_loop | |
342 | @pytest.redis_version( | |
333 | @redis_version( | |
343 | 334 | 3, 2, 0, reason='GEORADIUS is available since redis >= 3.2.0') |
344 | 335 | async def test_georadius_binary(redis): |
345 | 336 | res = await redis.geoadd( |
457 | 448 | ] |
458 | 449 | |
459 | 450 | |
460 | @pytest.mark.run_loop | |
461 | @pytest.redis_version( | |
451 | @redis_version( | |
462 | 452 | 3, 2, 0, reason='GEORADIUSBYMEMBER is available since redis >= 3.2.0') |
463 | 453 | async def test_georadiusbymember_binary(redis): |
464 | 454 | res = await redis.geoadd( |
0 | 0 | import pytest |
1 | 1 | |
2 | 2 | from aioredis import ReplyError |
3 | from _testutils import redis_version | |
3 | 4 | |
4 | 5 | |
5 | 6 | async def add(redis, key, field, value): |
8 | 9 | assert ok == 1 |
9 | 10 | |
10 | 11 | |
11 | @pytest.mark.run_loop | |
12 | 12 | async def test_hdel(redis): |
13 | 13 | key, field, value = b'key:hdel', b'bar', b'zap' |
14 | 14 | await add(redis, key, field, value) |
23 | 23 | await redis.hdel(None, field) |
24 | 24 | |
25 | 25 | |
26 | @pytest.mark.run_loop | |
27 | 26 | async def test_hexists(redis): |
28 | 27 | key, field, value = b'key:hexists', b'bar', b'zap' |
29 | 28 | await add(redis, key, field, value) |
41 | 40 | await redis.hexists(None, field) |
42 | 41 | |
43 | 42 | |
44 | @pytest.mark.run_loop | |
45 | 43 | async def test_hget(redis): |
46 | 44 | |
47 | 45 | key, field, value = b'key:hget', b'bar', b'zap' |
64 | 62 | await redis.hget(None, field) |
65 | 63 | |
66 | 64 | |
67 | @pytest.mark.run_loop | |
68 | 65 | async def test_hgetall(redis): |
69 | 66 | await add(redis, 'key:hgetall', 'foo', 'baz') |
70 | 67 | await add(redis, 'key:hgetall', 'bar', 'zap') |
85 | 82 | await redis.hgetall(None) |
86 | 83 | |
87 | 84 | |
88 | @pytest.mark.run_loop | |
89 | 85 | async def test_hincrby(redis): |
90 | 86 | key, field, value = b'key:hincrby', b'bar', 1 |
91 | 87 | await add(redis, key, field, value) |
120 | 116 | await redis.hincrby(None, field, 2) |
121 | 117 | |
122 | 118 | |
123 | @pytest.mark.run_loop | |
124 | 119 | async def test_hincrbyfloat(redis): |
125 | 120 | key, field, value = b'key:hincrbyfloat', b'bar', 2.71 |
126 | 121 | await add(redis, key, field, value) |
145 | 140 | await redis.hincrbyfloat(None, field, 2) |
146 | 141 | |
147 | 142 | |
148 | @pytest.mark.run_loop | |
149 | 143 | async def test_hkeys(redis): |
150 | 144 | key = b'key:hkeys' |
151 | 145 | field1, field2 = b'foo', b'bar' |
166 | 160 | await redis.hkeys(None) |
167 | 161 | |
168 | 162 | |
169 | @pytest.mark.run_loop | |
170 | 163 | async def test_hlen(redis): |
171 | 164 | key = b'key:hlen' |
172 | 165 | field1, field2 = b'foo', b'bar' |
184 | 177 | await redis.hlen(None) |
185 | 178 | |
186 | 179 | |
187 | @pytest.mark.run_loop | |
188 | 180 | async def test_hmget(redis): |
189 | 181 | key = b'key:hmget' |
190 | 182 | field1, field2 = b'foo', b'bar' |
209 | 201 | await redis.hmget(None, field1, field2) |
210 | 202 | |
211 | 203 | |
212 | @pytest.mark.run_loop | |
213 | 204 | async def test_hmset(redis): |
214 | 205 | key, field, value = b'key:hmset', b'bar', b'zap' |
215 | 206 | await add(redis, key, field, value) |
247 | 238 | await redis.hmset(key) |
248 | 239 | |
249 | 240 | |
250 | @pytest.mark.run_loop | |
251 | 241 | async def test_hmset_dict(redis): |
252 | 242 | key = 'key:hmset' |
253 | 243 | |
299 | 289 | await redis.hmset_dict(key, {'a': 1}, {'b': 2}, 'c', 3, d=4) |
300 | 290 | |
301 | 291 | |
302 | @pytest.mark.run_loop | |
303 | 292 | async def test_hset(redis): |
304 | 293 | key, field, value = b'key:hset', b'bar', b'zap' |
305 | 294 | test_value = await redis.hset(key, field, value) |
318 | 307 | await redis.hset(None, field, value) |
319 | 308 | |
320 | 309 | |
321 | @pytest.mark.run_loop | |
322 | 310 | async def test_hsetnx(redis): |
323 | 311 | key, field, value = b'key:hsetnx', b'bar', b'zap' |
324 | 312 | # field does not exists, operation should be successful |
338 | 326 | await redis.hsetnx(None, field, value) |
339 | 327 | |
340 | 328 | |
341 | @pytest.mark.run_loop | |
342 | 329 | async def test_hvals(redis): |
343 | 330 | key = b'key:hvals' |
344 | 331 | field1, field2 = b'foo', b'bar' |
358 | 345 | await redis.hvals(None) |
359 | 346 | |
360 | 347 | |
361 | @pytest.redis_version(2, 8, 0, reason='HSCAN is available since redis>=2.8.0') | |
362 | @pytest.mark.run_loop | |
348 | @redis_version(2, 8, 0, reason='HSCAN is available since redis>=2.8.0') | |
363 | 349 | async def test_hscan(redis): |
364 | 350 | key = b'key:hscan' |
365 | 351 | # setup initial values 3 "field:foo:*" items and 7 "field:bar:*" items |
403 | 389 | await redis.hscan(None) |
404 | 390 | |
405 | 391 | |
406 | @pytest.mark.run_loop | |
407 | async def test_hgetall_enc(create_redis, loop, server): | |
408 | redis = await create_redis( | |
409 | server.tcp_address, loop=loop, encoding='utf-8') | |
392 | async def test_hgetall_enc(create_redis, server): | |
393 | redis = await create_redis(server.tcp_address, encoding='utf-8') | |
410 | 394 | TEST_KEY = 'my-key-nx' |
411 | 395 | await redis.hmset(TEST_KEY, 'foo', 'bar', 'baz', 'bad') |
412 | 396 | |
416 | 400 | assert res == [{'foo': 'bar', 'baz': 'bad'}] |
417 | 401 | |
418 | 402 | |
419 | @pytest.mark.run_loop | |
420 | @pytest.redis_version(3, 2, 0, reason="HSTRLEN new in redis 3.2.0") | |
403 | @redis_version(3, 2, 0, reason="HSTRLEN new in redis 3.2.0") | |
421 | 404 | async def test_hstrlen(redis): |
422 | 405 | ok = await redis.hset('myhash', 'str_field', 'some value') |
423 | 406 | assert ok == 1 |
441 | 424 | assert l == 0 |
442 | 425 | |
443 | 426 | |
444 | @pytest.redis_version(2, 8, 0, reason='HSCAN is available since redis>=2.8.0') | |
445 | @pytest.mark.run_loop | |
427 | @redis_version(2, 8, 0, reason='HSCAN is available since redis>=2.8.0') | |
446 | 428 | async def test_ihscan(redis): |
447 | 429 | key = b'key:hscan' |
448 | 430 | # setup initial values 3 "field:foo:*" items and 7 "field:bar:*" items |
0 | 0 | import pytest |
1 | 1 | |
2 | from _testutils import redis_version | |
2 | 3 | |
3 | pytestmark = pytest.redis_version( | |
4 | pytestmark = redis_version( | |
4 | 5 | 2, 8, 9, reason='HyperLogLog works only with redis>=2.8.9') |
5 | 6 | |
6 | 7 | |
7 | @pytest.mark.run_loop | |
8 | 8 | async def test_pfcount(redis): |
9 | 9 | key = 'hll_pfcount' |
10 | 10 | other_key = 'some-other-hll' |
41 | 41 | await redis.pfcount(key, key, None) |
42 | 42 | |
43 | 43 | |
44 | @pytest.mark.run_loop | |
45 | 44 | async def test_pfadd(redis): |
46 | 45 | key = 'hll_pfadd' |
47 | 46 | values = ['a', 's', 'y', 'n', 'c', 'i', 'o'] |
53 | 52 | assert is_changed == 0 |
54 | 53 | |
55 | 54 | |
56 | @pytest.mark.run_loop | |
57 | 55 | async def test_pfadd_wrong_input(redis): |
58 | 56 | with pytest.raises(TypeError): |
59 | 57 | await redis.pfadd(None, 'value') |
60 | 58 | |
61 | 59 | |
62 | @pytest.mark.run_loop | |
63 | 60 | async def test_pfmerge(redis): |
64 | 61 | key = 'hll_asyncio' |
65 | 62 | key_other = 'hll_aioredis' |
95 | 92 | await redis.pfmerge(key_dest, key, None) |
96 | 93 | |
97 | 94 | |
98 | @pytest.mark.run_loop | |
99 | 95 | async def test_pfmerge_wrong_input(redis): |
100 | 96 | with pytest.raises(TypeError): |
101 | 97 | await redis.pfmerge(None, 'value') |
4 | 4 | |
5 | 5 | |
6 | 6 | @pytest.fixture |
7 | def pool_or_redis(_closable, server, loop): | |
7 | def pool_or_redis(_closable, server): | |
8 | 8 | version = tuple(map(int, aioredis.__version__.split('.')[:2])) |
9 | 9 | if version >= (1, 0): |
10 | 10 | factory = aioredis.create_redis_pool |
12 | 12 | factory = aioredis.create_pool |
13 | 13 | |
14 | 14 | async def redis_factory(maxsize): |
15 | redis = await factory(server.tcp_address, loop=loop, | |
15 | redis = await factory(server.tcp_address, | |
16 | 16 | minsize=1, maxsize=maxsize) |
17 | 17 | _closable(redis) |
18 | 18 | return redis |
19 | 19 | return redis_factory |
20 | 20 | |
21 | 21 | |
22 | async def simple_get_set(pool, idx, loop): | |
22 | async def simple_get_set(pool, idx): | |
23 | 23 | """A simple test to make sure Redis(pool) can be used as old Pool(Redis). |
24 | 24 | """ |
25 | 25 | val = 'val:{}'.format(idx) |
28 | 28 | await redis.get('key', encoding='utf-8') |
29 | 29 | |
30 | 30 | |
31 | async def pipeline(pool, val, loop): | |
31 | async def pipeline(pool, val): | |
32 | 32 | val = 'val:{}'.format(val) |
33 | 33 | with await pool as redis: |
34 | 34 | f1 = redis.set('key', val) |
35 | 35 | f2 = redis.get('key', encoding='utf-8') |
36 | ok, res = await asyncio.gather(f1, f2, loop=loop) | |
36 | ok, res = await asyncio.gather(f1, f2) | |
37 | 37 | |
38 | 38 | |
39 | async def transaction(pool, val, loop): | |
39 | async def transaction(pool, val): | |
40 | 40 | val = 'val:{}'.format(val) |
41 | 41 | with await pool as redis: |
42 | 42 | tr = redis.multi_exec() |
47 | 47 | assert res == val |
48 | 48 | |
49 | 49 | |
50 | async def blocking_pop(pool, val, loop): | |
50 | async def blocking_pop(pool, val): | |
51 | 51 | |
52 | 52 | async def lpush(): |
53 | 53 | with await pool as redis: |
54 | 54 | # here v0.3 has bound connection, v1.0 does not; |
55 | await asyncio.sleep(.1, loop=loop) | |
55 | await asyncio.sleep(.1) | |
56 | 56 | await redis.lpush('list-key', 'val') |
57 | 57 | |
58 | 58 | async def blpop(): |
61 | 61 | res = await redis.blpop( |
62 | 62 | 'list-key', timeout=2, encoding='utf-8') |
63 | 63 | assert res == ['list-key', 'val'], res |
64 | await asyncio.gather(blpop(), lpush(), loop=loop) | |
64 | await asyncio.gather(blpop(), lpush()) | |
65 | 65 | |
66 | 66 | |
67 | @pytest.mark.run_loop | |
68 | 67 | @pytest.mark.parametrize('test_case,pool_size', [ |
69 | 68 | (simple_get_set, 1), |
70 | 69 | (pipeline, 1), |
79 | 78 | (transaction, 10), |
80 | 79 | (blocking_pop, 10), |
81 | 80 | ], ids=lambda o: getattr(o, '__name__', repr(o))) |
82 | async def test_operations(pool_or_redis, test_case, pool_size, loop): | |
81 | async def test_operations(pool_or_redis, test_case, pool_size): | |
83 | 82 | repeat = 100 |
84 | 83 | redis = await pool_or_redis(pool_size) |
85 | 84 | done, pending = await asyncio.wait( |
86 | [asyncio.ensure_future(test_case(redis, i, loop), loop=loop) | |
87 | for i in range(repeat)], loop=loop) | |
85 | [asyncio.ensure_future(test_case(redis, i)) | |
86 | for i in range(repeat)]) | |
88 | 87 | |
89 | 88 | assert not pending |
90 | 89 | success = 0 |
3 | 3 | from aioredis import ReplyError |
4 | 4 | |
5 | 5 | |
6 | async def push_data_with_sleep(redis, loop, key, *values): | |
7 | await asyncio.sleep(0.2, loop=loop) | |
6 | async def push_data_with_sleep(redis, key, *values): | |
7 | await asyncio.sleep(0.2) | |
8 | 8 | result = await redis.lpush(key, *values) |
9 | 9 | return result |
10 | 10 | |
11 | 11 | |
12 | @pytest.mark.run_loop | |
13 | 12 | async def test_blpop(redis): |
14 | 13 | key1, value1 = b'key:blpop:1', b'blpop:value:1' |
15 | 14 | key2, value2 = b'key:blpop:2', b'blpop:value:2' |
39 | 38 | assert test_value == ['key:blpop:2', 'blpop:value:1'] |
40 | 39 | |
41 | 40 | |
42 | @pytest.mark.run_loop | |
43 | async def test_blpop_blocking_features(redis, create_redis, loop, server): | |
41 | async def test_blpop_blocking_features(redis, create_redis, server): | |
44 | 42 | key1, key2 = b'key:blpop:1', b'key:blpop:2' |
45 | 43 | value = b'blpop:value:2' |
46 | 44 | |
47 | other_redis = await create_redis( | |
48 | server.tcp_address, loop=loop) | |
45 | other_redis = await create_redis(server.tcp_address) | |
49 | 46 | |
50 | 47 | # create blocking task in separate connection |
51 | 48 | consumer = other_redis.blpop(key1, key2) |
52 | 49 | |
53 | producer_task = asyncio.Task( | |
54 | push_data_with_sleep(redis, loop, key2, value), loop=loop) | |
55 | results = await asyncio.gather( | |
56 | consumer, producer_task, loop=loop) | |
50 | producer_task = asyncio.ensure_future( | |
51 | push_data_with_sleep(redis, key2, value)) | |
52 | results = await asyncio.gather(consumer, producer_task) | |
57 | 53 | |
58 | 54 | assert results[0] == [key2, value] |
59 | 55 | assert results[1] == 1 |
66 | 62 | other_redis.close() |
67 | 63 | |
68 | 64 | |
69 | @pytest.mark.run_loop | |
70 | 65 | async def test_brpop(redis): |
71 | 66 | key1, value1 = b'key:brpop:1', b'brpop:value:1' |
72 | 67 | key2, value2 = b'key:brpop:2', b'brpop:value:2' |
96 | 91 | assert test_value == ['key:brpop:2', 'brpop:value:1'] |
97 | 92 | |
98 | 93 | |
99 | @pytest.mark.run_loop | |
100 | async def test_brpop_blocking_features(redis, create_redis, server, loop): | |
94 | async def test_brpop_blocking_features(redis, create_redis, server): | |
101 | 95 | key1, key2 = b'key:brpop:1', b'key:brpop:2' |
102 | 96 | value = b'brpop:value:2' |
103 | 97 | |
104 | 98 | other_redis = await create_redis( |
105 | server.tcp_address, loop=loop) | |
99 | server.tcp_address) | |
106 | 100 | # create blocking task in separate connection |
107 | 101 | consumer_task = other_redis.brpop(key1, key2) |
108 | 102 | |
109 | producer_task = asyncio.Task( | |
110 | push_data_with_sleep(redis, loop, key2, value), loop=loop) | |
111 | ||
112 | results = await asyncio.gather( | |
113 | consumer_task, producer_task, loop=loop) | |
103 | producer_task = asyncio.ensure_future( | |
104 | push_data_with_sleep(redis, key2, value)) | |
105 | ||
106 | results = await asyncio.gather(consumer_task, producer_task) | |
114 | 107 | |
115 | 108 | assert results[0] == [key2, value] |
116 | 109 | assert results[1] == 1 |
122 | 115 | assert test_value is None |
123 | 116 | |
124 | 117 | |
125 | @pytest.mark.run_loop | |
126 | 118 | async def test_brpoplpush(redis): |
127 | 119 | key = b'key:brpoplpush:1' |
128 | 120 | value1, value2 = b'brpoplpush:value:1', b'brpoplpush:value:2' |
161 | 153 | assert result == 'brpoplpush:value:2' |
162 | 154 | |
163 | 155 | |
164 | @pytest.mark.run_loop | |
165 | async def test_brpoplpush_blocking_features(redis, create_redis, server, loop): | |
156 | async def test_brpoplpush_blocking_features(redis, create_redis, server): | |
166 | 157 | source = b'key:brpoplpush:12' |
167 | 158 | value = b'brpoplpush:value:2' |
168 | 159 | destkey = b'destkey:brpoplpush:2' |
169 | 160 | other_redis = await create_redis( |
170 | server.tcp_address, loop=loop) | |
161 | server.tcp_address) | |
171 | 162 | # create blocking task |
172 | 163 | consumer_task = other_redis.brpoplpush(source, destkey) |
173 | producer_task = asyncio.Task( | |
174 | push_data_with_sleep(redis, loop, source, value), loop=loop) | |
175 | results = await asyncio.gather( | |
176 | consumer_task, producer_task, loop=loop) | |
164 | producer_task = asyncio.ensure_future( | |
165 | push_data_with_sleep(redis, source, value)) | |
166 | results = await asyncio.gather(consumer_task, producer_task) | |
177 | 167 | assert results[0] == value |
178 | 168 | assert results[1] == 1 |
179 | 169 | |
189 | 179 | other_redis.close() |
190 | 180 | |
191 | 181 | |
192 | @pytest.mark.run_loop | |
193 | 182 | async def test_lindex(redis): |
194 | 183 | key, value = b'key:lindex:1', 'value:{}' |
195 | 184 | # setup list |
222 | 211 | await redis.lindex(key, b'one') |
223 | 212 | |
224 | 213 | |
225 | @pytest.mark.run_loop | |
226 | 214 | async def test_linsert(redis): |
227 | 215 | key = b'key:linsert:1' |
228 | 216 | value1, value2, value3, value4 = b'Hello', b'World', b'foo', b'bar' |
251 | 239 | await redis.linsert(None, value1, value3) |
252 | 240 | |
253 | 241 | |
254 | @pytest.mark.run_loop | |
255 | 242 | async def test_llen(redis): |
256 | 243 | key = b'key:llen:1' |
257 | 244 | value1, value2 = b'Hello', b'World' |
267 | 254 | await redis.llen(None) |
268 | 255 | |
269 | 256 | |
270 | @pytest.mark.run_loop | |
271 | 257 | async def test_lpop(redis): |
272 | 258 | key = b'key:lpop:1' |
273 | 259 | value1, value2 = b'lpop:value:1', b'lpop:value:2' |
294 | 280 | await redis.lpop(None) |
295 | 281 | |
296 | 282 | |
297 | @pytest.mark.run_loop | |
298 | 283 | async def test_lpush(redis): |
299 | 284 | key = b'key:lpush' |
300 | 285 | value1, value2 = b'value:1', b'value:2' |
315 | 300 | await redis.lpush(None, value1) |
316 | 301 | |
317 | 302 | |
318 | @pytest.mark.run_loop | |
319 | 303 | async def test_lpushx(redis): |
320 | 304 | key = b'key:lpushx' |
321 | 305 | value1, value2 = b'value:1', b'value:2' |
339 | 323 | await redis.lpushx(None, value1) |
340 | 324 | |
341 | 325 | |
342 | @pytest.mark.run_loop | |
343 | 326 | async def test_lrange(redis): |
344 | 327 | key, value = b'key:lrange:1', 'value:{}' |
345 | 328 | values = [value.format(i).encode('utf-8') for i in range(0, 10)] |
368 | 351 | await redis.lrange(key, 0, b'one') |
369 | 352 | |
370 | 353 | |
371 | @pytest.mark.run_loop | |
372 | 354 | async def test_lrem(redis): |
373 | 355 | key, value = b'key:lrem:1', 'value:{}' |
374 | 356 | values = [value.format(i % 2).encode('utf-8') for i in range(0, 10)] |
403 | 385 | await redis.lrem(key, b'ten', b'value:0') |
404 | 386 | |
405 | 387 | |
406 | @pytest.mark.run_loop | |
407 | 388 | async def test_lset(redis): |
408 | 389 | key, value = b'key:lset', 'value:{}' |
409 | 390 | values = [value.format(i).encode('utf-8') for i in range(0, 3)] |
426 | 407 | await redis.lset(key, b'one', b'value:0') |
427 | 408 | |
428 | 409 | |
429 | @pytest.mark.run_loop | |
430 | 410 | async def test_ltrim(redis): |
431 | 411 | key, value = b'key:ltrim', 'value:{}' |
432 | 412 | values = [value.format(i).encode('utf-8') for i in range(0, 10)] |
457 | 437 | await redis.ltrim(key, 0, b'one') |
458 | 438 | |
459 | 439 | |
460 | @pytest.mark.run_loop | |
461 | 440 | async def test_rpop(redis): |
462 | 441 | key = b'key:rpop:1' |
463 | 442 | value1, value2 = b'rpop:value:1', b'rpop:value:2' |
484 | 463 | await redis.rpop(None) |
485 | 464 | |
486 | 465 | |
487 | @pytest.mark.run_loop | |
488 | 466 | async def test_rpoplpush(redis): |
489 | 467 | key = b'key:rpoplpush:1' |
490 | 468 | value1, value2 = b'rpoplpush:value:1', b'rpoplpush:value:2' |
516 | 494 | await redis.rpoplpush(key, None) |
517 | 495 | |
518 | 496 | |
519 | @pytest.mark.run_loop | |
520 | 497 | async def test_rpush(redis): |
521 | 498 | key = b'key:rpush' |
522 | 499 | value1, value2 = b'value:1', b'value:2' |
533 | 510 | await redis.rpush(None, value1) |
534 | 511 | |
535 | 512 | |
536 | @pytest.mark.run_loop | |
537 | 513 | async def test_rpushx(redis): |
538 | 514 | key = b'key:rpushx' |
539 | 515 | value1, value2 = b'value:1', b'value:2' |
0 | 0 | import asyncio |
1 | import pytest | |
2 | 1 | |
3 | 2 | from aioredis.locks import Lock |
4 | 3 | |
5 | 4 | |
6 | @pytest.mark.run_loop | |
7 | async def test_finished_waiter_cancelled(loop): | |
8 | lock = Lock(loop=loop) | |
5 | async def test_finished_waiter_cancelled(): | |
6 | lock = Lock() | |
9 | 7 | |
10 | ta = asyncio.ensure_future(lock.acquire(), loop=loop) | |
11 | await asyncio.sleep(0, loop=loop) | |
8 | ta = asyncio.ensure_future(lock.acquire()) | |
9 | await asyncio.sleep(0) | |
12 | 10 | assert lock.locked() |
13 | 11 | |
14 | tb = asyncio.ensure_future(lock.acquire(), loop=loop) | |
15 | await asyncio.sleep(0, loop=loop) | |
12 | tb = asyncio.ensure_future(lock.acquire()) | |
13 | await asyncio.sleep(0) | |
16 | 14 | assert len(lock._waiters) == 1 |
17 | 15 | |
18 | 16 | # Create a second waiter, wake up the first, and cancel it. |
19 | 17 | # Without the fix, the second was not woken up and the lock |
20 | 18 | # will never be locked |
21 | asyncio.ensure_future(lock.acquire(), loop=loop) | |
22 | await asyncio.sleep(0, loop=loop) | |
19 | asyncio.ensure_future(lock.acquire()) | |
20 | await asyncio.sleep(0) | |
23 | 21 | lock.release() |
24 | 22 | tb.cancel() |
25 | 23 | |
26 | await asyncio.sleep(0, loop=loop) | |
24 | await asyncio.sleep(0) | |
27 | 25 | assert ta.done() |
28 | 26 | assert tb.cancelled() |
29 | 27 | |
30 | await asyncio.sleep(0, loop=loop) | |
28 | await asyncio.sleep(0) | |
31 | 29 | assert lock.locked() |
22 | 22 | asyncio.set_event_loop(loop) |
23 | 23 | |
24 | 24 | tr = MultiExec(conn, commands_factory=Redis) |
25 | assert tr._loop is loop | |
25 | # assert tr._loop is loop | |
26 | 26 | |
27 | 27 | def make_fut(cmd, *args, **kw): |
28 | 28 | fut = asyncio.get_event_loop().create_future() |
0 | 0 | import asyncio |
1 | 1 | import pytest |
2 | 2 | import async_timeout |
3 | import logging | |
4 | import sys | |
3 | 5 | |
4 | 6 | from unittest.mock import patch |
5 | 7 | |
10 | 12 | ConnectionsPool, |
11 | 13 | MaxClientsError, |
12 | 14 | ) |
15 | from _testutils import redis_version | |
16 | ||
17 | BPO_34638 = sys.version_info >= (3, 8) | |
13 | 18 | |
14 | 19 | |
15 | 20 | def _assert_defaults(pool): |
18 | 23 | assert pool.maxsize == 10 |
19 | 24 | assert pool.size == 1 |
20 | 25 | assert pool.freesize == 1 |
21 | assert pool._close_waiter is None | |
26 | assert not pool._close_state.is_set() | |
22 | 27 | |
23 | 28 | |
24 | 29 | def test_connect(pool): |
25 | 30 | _assert_defaults(pool) |
26 | 31 | |
27 | 32 | |
28 | def test_global_loop(create_pool, loop, server): | |
29 | asyncio.set_event_loop(loop) | |
30 | ||
31 | pool = loop.run_until_complete(create_pool( | |
32 | server.tcp_address)) | |
33 | _assert_defaults(pool) | |
34 | ||
35 | ||
36 | @pytest.mark.run_loop | |
37 | 33 | async def test_clear(pool): |
38 | 34 | _assert_defaults(pool) |
39 | 35 | |
41 | 37 | assert pool.freesize == 0 |
42 | 38 | |
43 | 39 | |
44 | @pytest.mark.run_loop | |
45 | 40 | @pytest.mark.parametrize('minsize', [None, -100, 0.0, 100]) |
46 | async def test_minsize(minsize, create_pool, loop, server): | |
41 | async def test_minsize(minsize, create_pool, server): | |
47 | 42 | |
48 | 43 | with pytest.raises(AssertionError): |
49 | 44 | await create_pool( |
50 | 45 | server.tcp_address, |
51 | minsize=minsize, maxsize=10, loop=loop) | |
52 | ||
53 | ||
54 | @pytest.mark.run_loop | |
46 | minsize=minsize, maxsize=10) | |
47 | ||
48 | ||
55 | 49 | @pytest.mark.parametrize('maxsize', [None, -100, 0.0, 1]) |
56 | async def test_maxsize(maxsize, create_pool, loop, server): | |
50 | async def test_maxsize(maxsize, create_pool, server): | |
57 | 51 | |
58 | 52 | with pytest.raises(AssertionError): |
59 | 53 | await create_pool( |
60 | 54 | server.tcp_address, |
61 | minsize=2, maxsize=maxsize, loop=loop) | |
62 | ||
63 | ||
64 | @pytest.mark.run_loop | |
65 | async def test_create_connection_timeout(create_pool, loop, server): | |
66 | with patch.object(loop, 'create_connection') as\ | |
55 | minsize=2, maxsize=maxsize) | |
56 | ||
57 | ||
58 | async def test_create_connection_timeout(create_pool, server): | |
59 | with patch('aioredis.connection.open_connection') as\ | |
67 | 60 | open_conn_mock: |
68 | open_conn_mock.side_effect = lambda *a, **kw: asyncio.sleep(0.2, | |
69 | loop=loop) | |
61 | open_conn_mock.side_effect = lambda *a, **kw: asyncio.sleep(0.2) | |
70 | 62 | with pytest.raises(asyncio.TimeoutError): |
71 | 63 | await create_pool( |
72 | server.tcp_address, loop=loop, | |
64 | server.tcp_address, | |
73 | 65 | create_connection_timeout=0.1) |
74 | 66 | |
75 | 67 | |
79 | 71 | pass # pragma: no cover |
80 | 72 | |
81 | 73 | |
82 | @pytest.mark.run_loop | |
83 | async def test_simple_command(create_pool, loop, server): | |
84 | pool = await create_pool( | |
85 | server.tcp_address, | |
86 | minsize=10, loop=loop) | |
74 | async def test_simple_command(create_pool, server): | |
75 | pool = await create_pool( | |
76 | server.tcp_address, | |
77 | minsize=10) | |
87 | 78 | |
88 | 79 | with (await pool) as conn: |
89 | 80 | msg = await conn.execute('echo', 'hello') |
94 | 85 | assert pool.freesize == 10 |
95 | 86 | |
96 | 87 | |
97 | @pytest.mark.run_loop | |
98 | async def test_create_new(create_pool, loop, server): | |
99 | pool = await create_pool( | |
100 | server.tcp_address, | |
101 | minsize=1, loop=loop) | |
88 | async def test_create_new(create_pool, server): | |
89 | pool = await create_pool( | |
90 | server.tcp_address, | |
91 | minsize=1) | |
102 | 92 | assert pool.size == 1 |
103 | 93 | assert pool.freesize == 1 |
104 | 94 | |
114 | 104 | assert pool.freesize == 2 |
115 | 105 | |
116 | 106 | |
117 | @pytest.mark.run_loop | |
118 | async def test_create_constraints(create_pool, loop, server): | |
119 | pool = await create_pool( | |
120 | server.tcp_address, | |
121 | minsize=1, maxsize=1, loop=loop) | |
107 | async def test_create_constraints(create_pool, server): | |
108 | pool = await create_pool( | |
109 | server.tcp_address, | |
110 | minsize=1, maxsize=1) | |
122 | 111 | assert pool.size == 1 |
123 | 112 | assert pool.freesize == 1 |
124 | 113 | |
128 | 117 | |
129 | 118 | with pytest.raises(asyncio.TimeoutError): |
130 | 119 | await asyncio.wait_for(pool.acquire(), |
131 | timeout=0.2, | |
132 | loop=loop) | |
133 | ||
134 | ||
135 | @pytest.mark.run_loop | |
136 | async def test_create_no_minsize(create_pool, loop, server): | |
137 | pool = await create_pool( | |
138 | server.tcp_address, | |
139 | minsize=0, maxsize=1, loop=loop) | |
120 | timeout=0.2) | |
121 | ||
122 | ||
123 | async def test_create_no_minsize(create_pool, server): | |
124 | pool = await create_pool( | |
125 | server.tcp_address, | |
126 | minsize=0, maxsize=1) | |
140 | 127 | assert pool.size == 0 |
141 | 128 | assert pool.freesize == 0 |
142 | 129 | |
146 | 133 | |
147 | 134 | with pytest.raises(asyncio.TimeoutError): |
148 | 135 | await asyncio.wait_for(pool.acquire(), |
149 | timeout=0.2, | |
150 | loop=loop) | |
151 | assert pool.size == 1 | |
152 | assert pool.freesize == 1 | |
153 | ||
154 | ||
155 | @pytest.mark.run_loop | |
156 | async def test_create_pool_cls(create_pool, loop, server): | |
136 | timeout=0.2) | |
137 | assert pool.size == 1 | |
138 | assert pool.freesize == 1 | |
139 | ||
140 | ||
141 | async def test_create_pool_cls(create_pool, server): | |
157 | 142 | |
158 | 143 | class MyPool(ConnectionsPool): |
159 | 144 | pass |
160 | 145 | |
161 | 146 | pool = await create_pool( |
162 | 147 | server.tcp_address, |
163 | loop=loop, | |
164 | 148 | pool_cls=MyPool) |
165 | 149 | |
166 | 150 | assert isinstance(pool, MyPool) |
167 | 151 | |
168 | 152 | |
169 | @pytest.mark.run_loop | |
170 | async def test_create_pool_cls_invalid(create_pool, loop, server): | |
153 | async def test_create_pool_cls_invalid(create_pool, server): | |
171 | 154 | with pytest.raises(AssertionError): |
172 | 155 | await create_pool( |
173 | 156 | server.tcp_address, |
174 | loop=loop, | |
175 | 157 | pool_cls=type) |
176 | 158 | |
177 | 159 | |
178 | @pytest.mark.run_loop | |
179 | async def test_release_closed(create_pool, loop, server): | |
180 | pool = await create_pool( | |
181 | server.tcp_address, | |
182 | minsize=1, loop=loop) | |
160 | async def test_release_closed(create_pool, server): | |
161 | pool = await create_pool( | |
162 | server.tcp_address, | |
163 | minsize=1) | |
183 | 164 | assert pool.size == 1 |
184 | 165 | assert pool.freesize == 1 |
185 | 166 | |
190 | 171 | assert pool.freesize == 0 |
191 | 172 | |
192 | 173 | |
193 | @pytest.mark.run_loop | |
194 | async def test_release_pending(create_pool, loop, server): | |
195 | pool = await create_pool( | |
196 | server.tcp_address, | |
197 | minsize=1, loop=loop) | |
198 | assert pool.size == 1 | |
199 | assert pool.freesize == 1 | |
200 | ||
201 | with pytest.logs('aioredis', 'WARNING') as cm: | |
174 | async def test_release_pending(create_pool, server, caplog): | |
175 | pool = await create_pool( | |
176 | server.tcp_address, | |
177 | minsize=1) | |
178 | assert pool.size == 1 | |
179 | assert pool.freesize == 1 | |
180 | ||
181 | caplog.clear() | |
182 | with caplog.at_level('WARNING', 'aioredis'): | |
202 | 183 | with (await pool) as conn: |
203 | 184 | try: |
204 | 185 | await asyncio.wait_for( |
206 | 187 | b'blpop', |
207 | 188 | b'somekey:not:exists', |
208 | 189 | b'0'), |
209 | 0.1, | |
210 | loop=loop) | |
190 | 0.05, | |
191 | ) | |
211 | 192 | except asyncio.TimeoutError: |
212 | 193 | pass |
213 | 194 | assert pool.size == 0 |
214 | 195 | assert pool.freesize == 0 |
215 | assert cm.output == [ | |
216 | 'WARNING:aioredis:Connection <RedisConnection [db:0]>' | |
217 | ' has pending commands, closing it.' | |
196 | assert caplog.record_tuples == [ | |
197 | ('aioredis', logging.WARNING, 'Connection <RedisConnection [db:0]>' | |
198 | ' has pending commands, closing it.'), | |
218 | 199 | ] |
219 | 200 | |
220 | 201 | |
221 | @pytest.mark.run_loop | |
222 | async def test_release_bad_connection(create_pool, create_redis, loop, server): | |
223 | pool = await create_pool( | |
224 | server.tcp_address, | |
225 | loop=loop) | |
202 | async def test_release_bad_connection(create_pool, create_redis, server): | |
203 | pool = await create_pool(server.tcp_address) | |
226 | 204 | conn = await pool.acquire() |
227 | 205 | assert conn.address[0] in ('127.0.0.1', '::1') |
228 | 206 | assert conn.address[1] == server.tcp_address.port |
229 | other_conn = await create_redis( | |
230 | server.tcp_address, | |
231 | loop=loop) | |
207 | other_conn = await create_redis(server.tcp_address) | |
232 | 208 | with pytest.raises(AssertionError): |
233 | 209 | pool.release(other_conn) |
234 | 210 | |
237 | 213 | await other_conn.wait_closed() |
238 | 214 | |
239 | 215 | |
240 | @pytest.mark.run_loop | |
241 | async def test_select_db(create_pool, loop, server): | |
242 | pool = await create_pool( | |
243 | server.tcp_address, | |
244 | loop=loop) | |
216 | async def test_select_db(create_pool, server): | |
217 | pool = await create_pool(server.tcp_address) | |
245 | 218 | |
246 | 219 | await pool.select(1) |
247 | 220 | with (await pool) as conn: |
248 | 221 | assert conn.db == 1 |
249 | 222 | |
250 | 223 | |
251 | @pytest.mark.run_loop | |
252 | async def test_change_db(create_pool, loop, server): | |
253 | pool = await create_pool( | |
254 | server.tcp_address, | |
255 | minsize=1, db=0, | |
256 | loop=loop) | |
224 | async def test_change_db(create_pool, server): | |
225 | pool = await create_pool(server.tcp_address, minsize=1, db=0) | |
257 | 226 | assert pool.size == 1 |
258 | 227 | assert pool.freesize == 1 |
259 | 228 | |
275 | 244 | assert pool.db == 1 |
276 | 245 | |
277 | 246 | |
278 | @pytest.mark.run_loop | |
279 | async def test_change_db_errors(create_pool, loop, server): | |
280 | pool = await create_pool( | |
281 | server.tcp_address, | |
282 | minsize=1, db=0, | |
283 | loop=loop) | |
247 | async def test_change_db_errors(create_pool, server): | |
248 | pool = await create_pool(server.tcp_address, minsize=1, db=0) | |
284 | 249 | |
285 | 250 | with pytest.raises(TypeError): |
286 | 251 | await pool.select(None) |
303 | 268 | |
304 | 269 | |
305 | 270 | @pytest.mark.xfail(reason="Need to refactor this test") |
306 | @pytest.mark.run_loop | |
307 | async def test_select_and_create(create_pool, loop, server): | |
271 | async def test_select_and_create(create_pool, server): | |
308 | 272 | # trying to model situation when select and acquire |
309 | 273 | # called simultaneously |
310 | 274 | # but acquire freezes on _wait_select and |
311 | # then continues with propper db | |
275 | # then continues with proper db | |
312 | 276 | |
313 | 277 | # TODO: refactor this test as there's no _wait_select any more. |
314 | with async_timeout.timeout(10, loop=loop): | |
278 | with async_timeout.timeout(10): | |
315 | 279 | pool = await create_pool( |
316 | 280 | server.tcp_address, |
317 | 281 | minsize=1, db=0, |
318 | loop=loop) | |
282 | ) | |
319 | 283 | db = 0 |
320 | 284 | while True: |
321 | 285 | db = (db + 1) & 1 |
322 | 286 | _, conn = await asyncio.gather(pool.select(db), |
323 | pool.acquire(), | |
324 | loop=loop) | |
287 | pool.acquire()) | |
325 | 288 | assert pool.db == db |
326 | 289 | pool.release(conn) |
327 | 290 | if conn.db == db: |
329 | 292 | # await asyncio.wait_for(test(), 3, loop=loop) |
330 | 293 | |
331 | 294 | |
332 | @pytest.mark.run_loop | |
333 | async def test_response_decoding(create_pool, loop, server): | |
334 | pool = await create_pool( | |
335 | server.tcp_address, | |
336 | encoding='utf-8', loop=loop) | |
295 | async def test_response_decoding(create_pool, server): | |
296 | pool = await create_pool(server.tcp_address, encoding='utf-8') | |
337 | 297 | |
338 | 298 | assert pool.encoding == 'utf-8' |
339 | 299 | with (await pool) as conn: |
343 | 303 | assert res == 'value' |
344 | 304 | |
345 | 305 | |
346 | @pytest.mark.run_loop | |
347 | async def test_hgetall_response_decoding(create_pool, loop, server): | |
348 | pool = await create_pool( | |
349 | server.tcp_address, | |
350 | encoding='utf-8', loop=loop) | |
306 | async def test_hgetall_response_decoding(create_pool, server): | |
307 | pool = await create_pool(server.tcp_address, encoding='utf-8') | |
351 | 308 | |
352 | 309 | assert pool.encoding == 'utf-8' |
353 | 310 | with (await pool) as conn: |
359 | 316 | assert res == ['foo', 'bar', 'baz', 'zap'] |
360 | 317 | |
361 | 318 | |
362 | @pytest.mark.run_loop | |
363 | async def test_crappy_multiexec(create_pool, loop, server): | |
364 | pool = await create_pool( | |
365 | server.tcp_address, | |
366 | encoding='utf-8', loop=loop, | |
319 | async def test_crappy_multiexec(create_pool, server): | |
320 | pool = await create_pool( | |
321 | server.tcp_address, | |
322 | encoding='utf-8', | |
367 | 323 | minsize=1, maxsize=1) |
368 | 324 | |
369 | 325 | with (await pool) as conn: |
376 | 332 | assert value == 'def' |
377 | 333 | |
378 | 334 | |
379 | @pytest.mark.run_loop | |
380 | async def test_pool_size_growth(create_pool, server, loop): | |
381 | pool = await create_pool( | |
382 | server.tcp_address, | |
383 | loop=loop, | |
335 | async def test_pool_size_growth(create_pool, server): | |
336 | pool = await create_pool( | |
337 | server.tcp_address, | |
384 | 338 | minsize=1, maxsize=1) |
385 | 339 | |
386 | 340 | done = set() |
390 | 344 | with (await pool): |
391 | 345 | assert pool.size <= pool.maxsize |
392 | 346 | assert pool.freesize == 0 |
393 | await asyncio.sleep(0.2, loop=loop) | |
347 | await asyncio.sleep(0.2) | |
394 | 348 | done.add(i) |
395 | 349 | |
396 | 350 | async def task2(): |
400 | 354 | assert done == {0, 1} |
401 | 355 | |
402 | 356 | for _ in range(2): |
403 | tasks.append(asyncio.ensure_future(task1(_), loop=loop)) | |
404 | tasks.append(asyncio.ensure_future(task2(), loop=loop)) | |
405 | await asyncio.gather(*tasks, loop=loop) | |
406 | ||
407 | ||
408 | @pytest.mark.run_loop | |
409 | async def test_pool_with_closed_connections(create_pool, server, loop): | |
410 | pool = await create_pool( | |
411 | server.tcp_address, | |
412 | loop=loop, | |
357 | tasks.append(asyncio.ensure_future(task1(_))) | |
358 | tasks.append(asyncio.ensure_future(task2())) | |
359 | await asyncio.gather(*tasks) | |
360 | ||
361 | ||
362 | async def test_pool_with_closed_connections(create_pool, server): | |
363 | pool = await create_pool( | |
364 | server.tcp_address, | |
413 | 365 | minsize=1, maxsize=2) |
414 | 366 | assert 1 == pool.freesize |
415 | 367 | conn1 = pool._pool[0] |
421 | 373 | assert conn1 is not conn2 |
422 | 374 | |
423 | 375 | |
424 | @pytest.mark.run_loop | |
425 | async def test_pool_close(create_pool, server, loop): | |
426 | pool = await create_pool( | |
427 | server.tcp_address, loop=loop) | |
376 | async def test_pool_close(create_pool, server): | |
377 | pool = await create_pool(server.tcp_address) | |
428 | 378 | |
429 | 379 | assert pool.closed is False |
430 | 380 | |
440 | 390 | assert (await conn.execute('ping')) == b'PONG' |
441 | 391 | |
442 | 392 | |
443 | @pytest.mark.run_loop | |
444 | async def test_pool_close__used(create_pool, server, loop): | |
445 | pool = await create_pool( | |
446 | server.tcp_address, loop=loop) | |
393 | async def test_pool_close__used(create_pool, server): | |
394 | pool = await create_pool(server.tcp_address) | |
447 | 395 | |
448 | 396 | assert pool.closed is False |
449 | 397 | |
456 | 404 | await conn.execute('ping') |
457 | 405 | |
458 | 406 | |
459 | @pytest.mark.run_loop | |
460 | @pytest.redis_version(2, 8, 0, reason="maxclients config setting") | |
407 | @redis_version(2, 8, 0, reason="maxclients config setting") | |
461 | 408 | async def test_pool_check_closed_when_exception( |
462 | create_pool, create_redis, start_server, loop): | |
409 | create_pool, create_redis, start_server, caplog): | |
463 | 410 | server = start_server('server-small') |
464 | redis = await create_redis(server.tcp_address, loop=loop) | |
411 | redis = await create_redis(server.tcp_address) | |
465 | 412 | await redis.config_set('maxclients', 2) |
466 | 413 | |
467 | 414 | errors = (MaxClientsError, ConnectionClosedError, ConnectionError) |
468 | with pytest.logs('aioredis', 'DEBUG') as cm: | |
415 | caplog.clear() | |
416 | with caplog.at_level('DEBUG', 'aioredis'): | |
469 | 417 | with pytest.raises(errors): |
470 | 418 | await create_pool(address=tuple(server.tcp_address), |
471 | minsize=3, loop=loop) | |
472 | ||
473 | assert len(cm.output) >= 3 | |
474 | connect_msg = ( | |
475 | "DEBUG:aioredis:Creating tcp connection" | |
476 | " to ('localhost', {})".format(server.tcp_address.port)) | |
477 | assert cm.output[:2] == [connect_msg, connect_msg] | |
478 | assert cm.output[-1] == "DEBUG:aioredis:Closed 1 connection(s)" | |
479 | ||
480 | ||
481 | @pytest.mark.run_loop | |
482 | async def test_pool_get_connection(create_pool, server, loop): | |
483 | pool = await create_pool(server.tcp_address, minsize=1, maxsize=2, | |
484 | loop=loop) | |
419 | minsize=3) | |
420 | ||
421 | assert len(caplog.record_tuples) >= 3 | |
422 | connect_msg = "Creating tcp connection to ('localhost', {})".format( | |
423 | server.tcp_address.port) | |
424 | assert caplog.record_tuples[:2] == [ | |
425 | ('aioredis', logging.DEBUG, connect_msg), | |
426 | ('aioredis', logging.DEBUG, connect_msg), | |
427 | ] | |
428 | assert caplog.record_tuples[-1] == ( | |
429 | 'aioredis', logging.DEBUG, 'Closed 1 connection(s)' | |
430 | ) | |
431 | ||
432 | ||
433 | async def test_pool_get_connection(create_pool, server): | |
434 | pool = await create_pool(server.tcp_address, minsize=1, maxsize=2) | |
485 | 435 | res = await pool.execute("set", "key", "val") |
486 | 436 | assert res == b'OK' |
487 | 437 | |
498 | 448 | assert res == b'value' |
499 | 449 | |
500 | 450 | |
501 | @pytest.mark.run_loop | |
502 | async def test_pool_get_connection_with_pipelining(create_pool, server, loop): | |
503 | pool = await create_pool(server.tcp_address, minsize=1, maxsize=2, | |
504 | loop=loop) | |
451 | async def test_pool_get_connection_with_pipelining(create_pool, server): | |
452 | pool = await create_pool(server.tcp_address, minsize=1, maxsize=2) | |
505 | 453 | fut1 = pool.execute('set', 'key', 'val') |
506 | 454 | fut2 = pool.execute_pubsub("subscribe", "channel:1") |
507 | 455 | fut3 = pool.execute('getset', 'key', 'next') |
519 | 467 | assert res == b'next' |
520 | 468 | |
521 | 469 | |
522 | @pytest.mark.run_loop | |
523 | async def test_pool_idle_close(create_pool, start_server, loop): | |
470 | @pytest.mark.skipif(sys.platform == "win32", reason="flaky on windows") | |
471 | async def test_pool_idle_close(create_pool, start_server, caplog): | |
524 | 472 | server = start_server('idle') |
525 | conn = await create_pool(server.tcp_address, minsize=2, loop=loop) | |
473 | conn = await create_pool(server.tcp_address, minsize=2) | |
526 | 474 | ok = await conn.execute("config", "set", "timeout", 1) |
527 | 475 | assert ok == b'OK' |
528 | 476 | |
529 | await asyncio.sleep(2, loop=loop) | |
530 | ||
477 | caplog.clear() | |
478 | with caplog.at_level('DEBUG', 'aioredis'): | |
479 | # wait for either disconnection logged or test timeout reached. | |
480 | while len(caplog.record_tuples) < 2: | |
481 | await asyncio.sleep(.5) | |
482 | expected = [ | |
483 | ('aioredis', logging.DEBUG, | |
484 | 'Connection has been closed by server, response: None'), | |
485 | ('aioredis', logging.DEBUG, | |
486 | 'Connection has been closed by server, response: None'), | |
487 | ] | |
488 | if BPO_34638: | |
489 | expected += [ | |
490 | ('asyncio', logging.ERROR, | |
491 | 'An open stream object is being garbage collected; ' | |
492 | 'call "stream.close()" explicitly.'), | |
493 | ('asyncio', logging.ERROR, | |
494 | 'An open stream object is being garbage collected; ' | |
495 | 'call "stream.close()" explicitly.')] | |
496 | # The order in which logs are collected differs each time. | |
497 | assert sorted(caplog.record_tuples) == sorted(expected) | |
498 | ||
499 | # On CI this test fails from time to time. | |
500 | # It is possible to pick 'unclosed' connection and send command, | |
501 | # however on the same loop iteration it gets closed and exception is raised | |
531 | 502 | assert (await conn.execute('ping')) == b'PONG' |
532 | 503 | |
533 | 504 | |
534 | @pytest.mark.run_loop | |
535 | async def test_await(create_pool, server, loop): | |
536 | pool = await create_pool( | |
537 | server.tcp_address, | |
538 | minsize=10, loop=loop) | |
505 | async def test_await(create_pool, server): | |
506 | pool = await create_pool(server.tcp_address, minsize=10) | |
539 | 507 | |
540 | 508 | with (await pool) as conn: |
541 | 509 | msg = await conn.execute('echo', 'hello') |
542 | 510 | assert msg == b'hello' |
543 | 511 | |
544 | 512 | |
545 | @pytest.mark.run_loop | |
546 | async def test_async_with(create_pool, server, loop): | |
547 | pool = await create_pool( | |
548 | server.tcp_address, | |
549 | minsize=10, loop=loop) | |
513 | async def test_async_with(create_pool, server): | |
514 | pool = await create_pool(server.tcp_address, minsize=10) | |
550 | 515 | |
551 | 516 | async with pool.get() as conn: |
552 | 517 | msg = await conn.execute('echo', 'hello') |
553 | 518 | assert msg == b'hello' |
554 | 519 | |
555 | 520 | |
556 | @pytest.mark.run_loop | |
557 | async def test_pool__drop_closed(create_pool, server, loop): | |
558 | pool = await create_pool(server.tcp_address, | |
559 | minsize=3, | |
560 | maxsize=3, | |
561 | loop=loop) | |
521 | async def test_pool__drop_closed(create_pool, server): | |
522 | pool = await create_pool(server.tcp_address, minsize=3, maxsize=3) | |
562 | 523 | assert pool.size == 3 |
563 | 524 | assert pool.freesize == 3 |
564 | 525 | assert not pool._pool[0].closed |
0 | 0 | import asyncio |
1 | 1 | import pytest |
2 | 2 | import aioredis |
3 | ||
4 | from _testutils import redis_version | |
3 | 5 | |
4 | 6 | |
5 | 7 | async def _reader(channel, output, waiter, conn): |
11 | 13 | await output.put(msg) |
12 | 14 | |
13 | 15 | |
14 | @pytest.mark.run_loop | |
15 | 16 | async def test_publish(create_connection, redis, server, loop): |
16 | out = asyncio.Queue(loop=loop) | |
17 | out = asyncio.Queue() | |
17 | 18 | fut = loop.create_future() |
18 | conn = await create_connection( | |
19 | server.tcp_address, loop=loop) | |
20 | sub = asyncio.ensure_future(_reader('chan:1', out, fut, conn), loop=loop) | |
19 | conn = await create_connection(server.tcp_address) | |
20 | sub = asyncio.ensure_future(_reader('chan:1', out, fut, conn)) | |
21 | 21 | |
22 | 22 | await fut |
23 | 23 | await redis.publish('chan:1', 'Hello') |
27 | 27 | sub.cancel() |
28 | 28 | |
29 | 29 | |
30 | @pytest.mark.run_loop | |
31 | 30 | async def test_publish_json(create_connection, redis, server, loop): |
32 | out = asyncio.Queue(loop=loop) | |
31 | out = asyncio.Queue() | |
33 | 32 | fut = loop.create_future() |
34 | conn = await create_connection( | |
35 | server.tcp_address, loop=loop) | |
36 | sub = asyncio.ensure_future(_reader('chan:1', out, fut, conn), loop=loop) | |
33 | conn = await create_connection(server.tcp_address) | |
34 | sub = asyncio.ensure_future(_reader('chan:1', out, fut, conn)) | |
37 | 35 | |
38 | 36 | await fut |
39 | 37 | |
40 | 38 | res = await redis.publish_json('chan:1', {"Hello": "world"}) |
41 | assert res == 1 # recievers | |
39 | assert res == 1 # receivers | |
42 | 40 | |
43 | 41 | msg = await out.get() |
44 | 42 | assert msg == b'{"Hello": "world"}' |
45 | 43 | sub.cancel() |
46 | 44 | |
47 | 45 | |
48 | @pytest.mark.run_loop | |
49 | 46 | async def test_subscribe(redis): |
50 | 47 | res = await redis.subscribe('chan:1', 'chan:2') |
51 | 48 | assert redis.in_pubsub == 2 |
65 | 62 | @pytest.mark.parametrize('create_redis', [ |
66 | 63 | pytest.param(aioredis.create_redis_pool, id='pool'), |
67 | 64 | ]) |
68 | @pytest.mark.run_loop | |
69 | async def test_subscribe_empty_pool(create_redis, server, loop, _closable): | |
70 | redis = await create_redis(server.tcp_address, loop=loop) | |
65 | async def test_subscribe_empty_pool(create_redis, server, _closable): | |
66 | redis = await create_redis(server.tcp_address) | |
71 | 67 | _closable(redis) |
72 | 68 | await redis.connection.clear() |
73 | 69 | |
86 | 82 | [b'unsubscribe', b'chan:2', 0]] |
87 | 83 | |
88 | 84 | |
89 | @pytest.mark.run_loop | |
90 | async def test_psubscribe(redis, create_redis, server, loop): | |
85 | async def test_psubscribe(redis, create_redis, server): | |
91 | 86 | sub = redis |
92 | 87 | res = await sub.psubscribe('patt:*', 'chan:*') |
93 | 88 | assert sub.in_pubsub == 2 |
96 | 91 | pat2 = sub.patterns['chan:*'] |
97 | 92 | assert res == [pat1, pat2] |
98 | 93 | |
99 | pub = await create_redis( | |
100 | server.tcp_address, loop=loop) | |
94 | pub = await create_redis(server.tcp_address) | |
101 | 95 | await pub.publish_json('chan:123', {"Hello": "World"}) |
102 | 96 | res = await pat2.get_json() |
103 | 97 | assert res == (b'chan:123', {"Hello": "World"}) |
112 | 106 | @pytest.mark.parametrize('create_redis', [ |
113 | 107 | pytest.param(aioredis.create_redis_pool, id='pool'), |
114 | 108 | ]) |
115 | @pytest.mark.run_loop | |
116 | async def test_psubscribe_empty_pool(create_redis, server, loop, _closable): | |
117 | sub = await create_redis(server.tcp_address, loop=loop) | |
118 | pub = await create_redis(server.tcp_address, loop=loop) | |
109 | async def test_psubscribe_empty_pool(create_redis, server, _closable): | |
110 | sub = await create_redis(server.tcp_address) | |
111 | pub = await create_redis(server.tcp_address) | |
119 | 112 | _closable(sub) |
120 | 113 | _closable(pub) |
121 | 114 | await sub.connection.clear() |
137 | 130 | ] |
138 | 131 | |
139 | 132 | |
140 | @pytest.redis_version( | |
133 | @redis_version( | |
141 | 134 | 2, 8, 0, reason='PUBSUB CHANNELS is available since redis>=2.8.0') |
142 | @pytest.mark.run_loop | |
143 | async def test_pubsub_channels(create_redis, server, loop): | |
144 | redis = await create_redis( | |
145 | server.tcp_address, loop=loop) | |
135 | async def test_pubsub_channels(create_redis, server): | |
136 | redis = await create_redis(server.tcp_address) | |
146 | 137 | res = await redis.pubsub_channels() |
147 | 138 | assert res == [] |
148 | 139 | |
149 | 140 | res = await redis.pubsub_channels('chan:*') |
150 | 141 | assert res == [] |
151 | 142 | |
152 | sub = await create_redis( | |
153 | server.tcp_address, loop=loop) | |
143 | sub = await create_redis(server.tcp_address) | |
154 | 144 | await sub.subscribe('chan:1') |
155 | 145 | |
156 | 146 | res = await redis.pubsub_channels() |
166 | 156 | assert res == [] |
167 | 157 | |
168 | 158 | |
169 | @pytest.redis_version( | |
159 | @redis_version( | |
170 | 160 | 2, 8, 0, reason='PUBSUB NUMSUB is available since redis>=2.8.0') |
171 | @pytest.mark.run_loop | |
172 | async def test_pubsub_numsub(create_redis, server, loop): | |
173 | redis = await create_redis( | |
174 | server.tcp_address, loop=loop) | |
161 | async def test_pubsub_numsub(create_redis, server): | |
162 | redis = await create_redis(server.tcp_address) | |
175 | 163 | res = await redis.pubsub_numsub() |
176 | 164 | assert res == {} |
177 | 165 | |
178 | 166 | res = await redis.pubsub_numsub('chan:1') |
179 | 167 | assert res == {b'chan:1': 0} |
180 | 168 | |
181 | sub = await create_redis( | |
182 | server.tcp_address, loop=loop) | |
169 | sub = await create_redis(server.tcp_address) | |
183 | 170 | await sub.subscribe('chan:1') |
184 | 171 | |
185 | 172 | res = await redis.pubsub_numsub() |
201 | 188 | assert res == {} |
202 | 189 | |
203 | 190 | |
204 | @pytest.redis_version( | |
191 | @redis_version( | |
205 | 192 | 2, 8, 0, reason='PUBSUB NUMPAT is available since redis>=2.8.0') |
206 | @pytest.mark.run_loop | |
207 | async def test_pubsub_numpat(create_redis, server, loop, redis): | |
208 | sub = await create_redis( | |
209 | server.tcp_address, loop=loop) | |
193 | async def test_pubsub_numpat(create_redis, server, redis): | |
194 | sub = await create_redis(server.tcp_address) | |
210 | 195 | |
211 | 196 | res = await redis.pubsub_numpat() |
212 | 197 | assert res == 0 |
220 | 205 | assert res == 1 |
221 | 206 | |
222 | 207 | |
223 | @pytest.mark.run_loop | |
224 | async def test_close_pubsub_channels(redis, loop): | |
208 | async def test_close_pubsub_channels(redis): | |
225 | 209 | ch, = await redis.subscribe('chan:1') |
226 | 210 | |
227 | 211 | async def waiter(ch): |
228 | 212 | assert not await ch.wait_message() |
229 | 213 | |
230 | tsk = asyncio.ensure_future(waiter(ch), loop=loop) | |
214 | tsk = asyncio.ensure_future(waiter(ch)) | |
231 | 215 | redis.close() |
232 | 216 | await redis.wait_closed() |
233 | 217 | await tsk |
234 | 218 | |
235 | 219 | |
236 | @pytest.mark.run_loop | |
237 | async def test_close_pubsub_patterns(redis, loop): | |
220 | async def test_close_pubsub_patterns(redis): | |
238 | 221 | ch, = await redis.psubscribe('chan:*') |
239 | 222 | |
240 | 223 | async def waiter(ch): |
241 | 224 | assert not await ch.wait_message() |
242 | 225 | |
243 | tsk = asyncio.ensure_future(waiter(ch), loop=loop) | |
226 | tsk = asyncio.ensure_future(waiter(ch)) | |
244 | 227 | redis.close() |
245 | 228 | await redis.wait_closed() |
246 | 229 | await tsk |
247 | 230 | |
248 | 231 | |
249 | @pytest.mark.run_loop | |
250 | async def test_close_cancelled_pubsub_channel(redis, loop): | |
232 | async def test_close_cancelled_pubsub_channel(redis): | |
251 | 233 | ch, = await redis.subscribe('chan:1') |
252 | 234 | |
253 | 235 | async def waiter(ch): |
254 | 236 | with pytest.raises(asyncio.CancelledError): |
255 | 237 | await ch.wait_message() |
256 | 238 | |
257 | tsk = asyncio.ensure_future(waiter(ch), loop=loop) | |
258 | await asyncio.sleep(0, loop=loop) | |
239 | tsk = asyncio.ensure_future(waiter(ch)) | |
240 | await asyncio.sleep(0) | |
259 | 241 | tsk.cancel() |
260 | 242 | |
261 | 243 | |
262 | @pytest.mark.run_loop | |
263 | 244 | async def test_channel_get_after_close(create_redis, loop, server): |
264 | sub = await create_redis( | |
265 | server.tcp_address, loop=loop) | |
266 | pub = await create_redis( | |
267 | server.tcp_address, loop=loop) | |
245 | sub = await create_redis(server.tcp_address) | |
246 | pub = await create_redis(server.tcp_address) | |
268 | 247 | ch, = await sub.subscribe('chan:1') |
269 | 248 | |
270 | 249 | await pub.publish('chan:1', 'message') |
275 | 254 | assert await ch.get() |
276 | 255 | |
277 | 256 | |
278 | @pytest.mark.run_loop | |
279 | async def test_subscribe_concurrency(create_redis, server, loop): | |
280 | sub = await create_redis( | |
281 | server.tcp_address, loop=loop) | |
282 | pub = await create_redis( | |
283 | server.tcp_address, loop=loop) | |
257 | async def test_subscribe_concurrency(create_redis, server): | |
258 | sub = await create_redis(server.tcp_address) | |
259 | pub = await create_redis(server.tcp_address) | |
284 | 260 | |
285 | 261 | async def subscribe(*args): |
286 | 262 | return await sub.subscribe(*args) |
287 | 263 | |
288 | 264 | async def publish(*args): |
289 | await asyncio.sleep(0, loop=loop) | |
265 | await asyncio.sleep(0) | |
290 | 266 | return await pub.publish(*args) |
291 | 267 | |
292 | 268 | res = await asyncio.gather( |
293 | 269 | subscribe('channel:0'), |
294 | 270 | publish('channel:0', 'Hello'), |
295 | 271 | subscribe('channel:1'), |
296 | loop=loop) | |
272 | ) | |
297 | 273 | (ch1,), subs, (ch2,) = res |
298 | 274 | |
299 | 275 | assert ch1.name == b'channel:0' |
301 | 277 | assert ch2.name == b'channel:1' |
302 | 278 | |
303 | 279 | |
304 | @pytest.redis_version( | |
280 | @redis_version( | |
305 | 281 | 3, 2, 0, reason='PUBSUB PING is available since redis>=3.2.0') |
306 | @pytest.mark.run_loop | |
307 | 282 | async def test_pubsub_ping(redis): |
308 | 283 | await redis.subscribe('chan:1', 'chan:2') |
309 | 284 | |
317 | 292 | await redis.unsubscribe('chan:1', 'chan:2') |
318 | 293 | |
319 | 294 | |
320 | @pytest.mark.run_loop | |
321 | async def test_pubsub_channel_iter(create_redis, server, loop): | |
322 | sub = await create_redis(server.tcp_address, loop=loop) | |
323 | pub = await create_redis(server.tcp_address, loop=loop) | |
295 | async def test_pubsub_channel_iter(create_redis, server): | |
296 | sub = await create_redis(server.tcp_address) | |
297 | pub = await create_redis(server.tcp_address) | |
324 | 298 | |
325 | 299 | ch, = await sub.subscribe('chan:1') |
326 | 300 | |
330 | 304 | lst.append(msg) |
331 | 305 | return lst |
332 | 306 | |
333 | tsk = asyncio.ensure_future(coro(ch), loop=loop) | |
307 | tsk = asyncio.ensure_future(coro(ch)) | |
334 | 308 | await pub.publish_json('chan:1', {'Hello': 'World'}) |
335 | 309 | await pub.publish_json('chan:1', ['message']) |
336 | await asyncio.sleep(0, loop=loop) | |
310 | await asyncio.sleep(0.1) | |
337 | 311 | ch.close() |
338 | 312 | assert await tsk == [b'{"Hello": "World"}', b'["message"]'] |
313 | ||
314 | ||
315 | @redis_version( | |
316 | 2, 8, 12, reason="extended `client kill` format required") | |
317 | async def test_pubsub_disconnection_notification(create_redis, server): | |
318 | sub = await create_redis(server.tcp_address) | |
319 | pub = await create_redis(server.tcp_address) | |
320 | ||
321 | async def coro(ch): | |
322 | lst = [] | |
323 | async for msg in ch.iter(): | |
324 | assert ch.is_active | |
325 | lst.append(msg) | |
326 | return lst | |
327 | ||
328 | ch, = await sub.subscribe('chan:1') | |
329 | tsk = asyncio.ensure_future(coro(ch)) | |
330 | assert ch.is_active | |
331 | await pub.publish_json('chan:1', {'Hello': 'World'}) | |
332 | assert ch.is_active | |
333 | assert await pub.execute('client', 'kill', 'type', 'pubsub') >= 1 | |
334 | assert await pub.publish_json('chan:1', ['message']) == 0 | |
335 | assert await tsk == [b'{"Hello": "World"}'] | |
336 | assert not ch.is_active |
1 | 1 | import asyncio |
2 | 2 | import json |
3 | 3 | import sys |
4 | import logging | |
4 | 5 | |
5 | 6 | from unittest import mock |
6 | 7 | |
9 | 10 | from aioredis.pubsub import Receiver, _Sender |
10 | 11 | |
11 | 12 | |
12 | def test_listener_channel(loop): | |
13 | mpsc = Receiver(loop=loop) | |
13 | def test_listener_channel(): | |
14 | mpsc = Receiver() | |
14 | 15 | assert not mpsc.is_active |
15 | 16 | |
16 | 17 | ch_a = mpsc.channel("channel:1") |
35 | 36 | assert dict(mpsc.patterns) == {} |
36 | 37 | |
37 | 38 | |
38 | def test_listener_pattern(loop): | |
39 | mpsc = Receiver(loop=loop) | |
39 | def test_listener_pattern(): | |
40 | mpsc = Receiver() | |
40 | 41 | assert not mpsc.is_active |
41 | 42 | |
42 | 43 | ch_a = mpsc.pattern("*") |
61 | 62 | assert dict(mpsc.patterns) == {b'*': ch} |
62 | 63 | |
63 | 64 | |
64 | @pytest.mark.run_loop | |
65 | async def test_sender(loop): | |
65 | async def test_sender(): | |
66 | 66 | receiver = mock.Mock() |
67 | 67 | |
68 | 68 | sender = _Sender(receiver, 'name', is_pattern=False) |
94 | 94 | assert receiver.mock_calls == [] |
95 | 95 | |
96 | 96 | |
97 | @pytest.mark.run_loop | |
98 | async def test_subscriptions(create_connection, server, loop): | |
99 | sub = await create_connection(server.tcp_address, loop=loop) | |
100 | pub = await create_connection(server.tcp_address, loop=loop) | |
101 | ||
102 | mpsc = Receiver(loop=loop) | |
97 | async def test_subscriptions(create_connection, server): | |
98 | sub = await create_connection(server.tcp_address) | |
99 | pub = await create_connection(server.tcp_address) | |
100 | ||
101 | mpsc = Receiver() | |
103 | 102 | await sub.execute_pubsub('subscribe', |
104 | 103 | mpsc.channel('channel:1'), |
105 | 104 | mpsc.channel('channel:3')) |
120 | 119 | assert msg == b"Hello world" |
121 | 120 | |
122 | 121 | |
123 | @pytest.mark.run_loop | |
124 | async def test_unsubscribe(create_connection, server, loop): | |
125 | sub = await create_connection(server.tcp_address, loop=loop) | |
126 | pub = await create_connection(server.tcp_address, loop=loop) | |
127 | ||
128 | mpsc = Receiver(loop=loop) | |
122 | async def test_unsubscribe(create_connection, server): | |
123 | sub = await create_connection(server.tcp_address) | |
124 | pub = await create_connection(server.tcp_address) | |
125 | ||
126 | mpsc = Receiver() | |
129 | 127 | await sub.execute_pubsub('subscribe', |
130 | 128 | mpsc.channel('channel:1'), |
131 | 129 | mpsc.channel('channel:3')) |
158 | 156 | assert not ch.is_pattern |
159 | 157 | assert msg == b"message" |
160 | 158 | |
161 | waiter = asyncio.ensure_future(mpsc.get(), loop=loop) | |
159 | waiter = asyncio.ensure_future(mpsc.get()) | |
162 | 160 | await sub.execute_pubsub('unsubscribe', 'channel:3') |
163 | 161 | assert not mpsc.is_active |
164 | 162 | assert await waiter is None |
165 | 163 | |
166 | 164 | |
167 | @pytest.mark.run_loop | |
168 | async def test_stopped(create_connection, server, loop): | |
169 | sub = await create_connection(server.tcp_address, loop=loop) | |
170 | pub = await create_connection(server.tcp_address, loop=loop) | |
171 | ||
172 | mpsc = Receiver(loop=loop) | |
165 | async def test_stopped(create_connection, server, caplog): | |
166 | sub = await create_connection(server.tcp_address) | |
167 | pub = await create_connection(server.tcp_address) | |
168 | ||
169 | mpsc = Receiver() | |
173 | 170 | await sub.execute_pubsub('subscribe', mpsc.channel('channel:1')) |
174 | 171 | assert mpsc.is_active |
175 | 172 | mpsc.stop() |
176 | 173 | |
177 | with pytest.logs('aioredis', 'DEBUG') as cm: | |
174 | caplog.clear() | |
175 | with caplog.at_level('DEBUG', 'aioredis'): | |
178 | 176 | await pub.execute('publish', 'channel:1', b'Hello') |
179 | await asyncio.sleep(0, loop=loop) | |
180 | ||
181 | assert len(cm.output) == 1 | |
177 | await asyncio.sleep(0) | |
178 | ||
179 | assert len(caplog.record_tuples) == 1 | |
182 | 180 | # Receiver must have 1 EndOfStream message |
183 | warn_messaege = ( | |
184 | "WARNING:aioredis:Pub/Sub listener message after stop: " | |
181 | message = ( | |
182 | "Pub/Sub listener message after stop: " | |
185 | 183 | "sender: <_Sender name:b'channel:1', is_pattern:False, receiver:" |
186 | 184 | "<Receiver is_active:False, senders:1, qsize:0>>, data: b'Hello'" |
187 | 185 | ) |
188 | assert cm.output == [warn_messaege] | |
186 | assert caplog.record_tuples == [ | |
187 | ('aioredis', logging.WARNING, message), | |
188 | ] | |
189 | 189 | |
190 | 190 | # assert (await mpsc.get()) is None |
191 | 191 | with pytest.raises(ChannelClosedError): |
194 | 194 | assert res is False |
195 | 195 | |
196 | 196 | |
197 | @pytest.mark.run_loop | |
198 | async def test_wait_message(create_connection, server, loop): | |
199 | sub = await create_connection(server.tcp_address, loop=loop) | |
200 | pub = await create_connection(server.tcp_address, loop=loop) | |
201 | ||
202 | mpsc = Receiver(loop=loop) | |
197 | async def test_wait_message(create_connection, server): | |
198 | sub = await create_connection(server.tcp_address) | |
199 | pub = await create_connection(server.tcp_address) | |
200 | ||
201 | mpsc = Receiver() | |
203 | 202 | await sub.execute_pubsub('subscribe', mpsc.channel('channel:1')) |
204 | fut = asyncio.ensure_future(mpsc.wait_message(), loop=loop) | |
203 | fut = asyncio.ensure_future(mpsc.wait_message()) | |
205 | 204 | assert not fut.done() |
206 | await asyncio.sleep(0, loop=loop) | |
205 | await asyncio.sleep(0) | |
207 | 206 | assert not fut.done() |
208 | 207 | |
209 | 208 | await pub.execute('publish', 'channel:1', 'hello') |
210 | await asyncio.sleep(0, loop=loop) # read in connection | |
211 | await asyncio.sleep(0, loop=loop) # call Future.set_result | |
209 | await asyncio.sleep(0) # read in connection | |
210 | await asyncio.sleep(0) # call Future.set_result | |
212 | 211 | assert fut.done() |
213 | 212 | res = await fut |
214 | 213 | assert res is True |
215 | 214 | |
216 | 215 | |
217 | @pytest.mark.run_loop | |
218 | async def test_decode_message(loop): | |
219 | mpsc = Receiver(loop) | |
216 | async def test_decode_message(): | |
217 | mpsc = Receiver() | |
220 | 218 | ch = mpsc.channel('channel:1') |
221 | 219 | ch.put_nowait(b'Some data') |
222 | 220 | |
237 | 235 | |
238 | 236 | @pytest.mark.skipif(sys.version_info >= (3, 6), |
239 | 237 | reason="json.loads accept bytes since Python 3.6") |
240 | @pytest.mark.run_loop | |
241 | async def test_decode_message_error(loop): | |
242 | mpsc = Receiver(loop) | |
238 | async def test_decode_message_error(): | |
239 | mpsc = Receiver() | |
243 | 240 | ch = mpsc.channel('channel:1') |
244 | 241 | |
245 | 242 | ch.put_nowait(b'{"hello": "world"}') |
254 | 251 | assert (await mpsc.get(decoder=json.loads)) == unexpected |
255 | 252 | |
256 | 253 | |
257 | @pytest.mark.run_loop | |
258 | async def test_decode_message_for_pattern(loop): | |
259 | mpsc = Receiver(loop) | |
254 | async def test_decode_message_for_pattern(): | |
255 | mpsc = Receiver() | |
260 | 256 | ch = mpsc.pattern('*') |
261 | 257 | ch.put_nowait((b'channel', b'Some data')) |
262 | 258 | |
275 | 271 | assert res[1] == (b'channel', {'hello': 'world'}) |
276 | 272 | |
277 | 273 | |
278 | @pytest.mark.run_loop | |
279 | 274 | async def test_pubsub_receiver_iter(create_redis, server, loop): |
280 | sub = await create_redis(server.tcp_address, loop=loop) | |
281 | pub = await create_redis(server.tcp_address, loop=loop) | |
282 | ||
283 | mpsc = Receiver(loop=loop) | |
275 | sub = await create_redis(server.tcp_address) | |
276 | pub = await create_redis(server.tcp_address) | |
277 | ||
278 | mpsc = Receiver() | |
284 | 279 | |
285 | 280 | async def coro(mpsc): |
286 | 281 | lst = [] |
288 | 283 | lst.append(msg) |
289 | 284 | return lst |
290 | 285 | |
291 | tsk = asyncio.ensure_future(coro(mpsc), loop=loop) | |
286 | tsk = asyncio.ensure_future(coro(mpsc)) | |
292 | 287 | snd1, = await sub.subscribe(mpsc.channel('chan:1')) |
293 | 288 | snd2, = await sub.subscribe(mpsc.channel('chan:2')) |
294 | 289 | snd3, = await sub.psubscribe(mpsc.pattern('chan:*')) |
298 | 293 | subscribers = await pub.publish_json('chan:2', ['message']) |
299 | 294 | assert subscribers > 1 |
300 | 295 | loop.call_later(0, mpsc.stop) |
301 | # await asyncio.sleep(0, loop=loop) | |
296 | await asyncio.sleep(0.01) | |
302 | 297 | assert await tsk == [ |
303 | 298 | (snd1, b'{"Hello": "World"}'), |
304 | 299 | (snd3, (b'chan:1', b'{"Hello": "World"}')), |
308 | 303 | assert not mpsc.is_active |
309 | 304 | |
310 | 305 | |
311 | @pytest.mark.run_loop(timeout=5) | |
306 | @pytest.mark.timeout(5) | |
312 | 307 | async def test_pubsub_receiver_call_stop_with_empty_queue( |
313 | 308 | create_redis, server, loop): |
314 | sub = await create_redis(server.tcp_address, loop=loop) | |
315 | ||
316 | mpsc = Receiver(loop=loop) | |
309 | sub = await create_redis(server.tcp_address) | |
310 | ||
311 | mpsc = Receiver() | |
317 | 312 | |
318 | 313 | # FIXME: currently at least one subscriber is needed |
319 | 314 | snd1, = await sub.subscribe(mpsc.channel('chan:1')) |
327 | 322 | assert not mpsc.is_active |
328 | 323 | |
329 | 324 | |
330 | @pytest.mark.run_loop | |
331 | async def test_pubsub_receiver_stop_on_disconnect(create_redis, server, loop): | |
332 | pub = await create_redis(server.tcp_address, loop=loop) | |
333 | sub = await create_redis(server.tcp_address, loop=loop) | |
325 | async def test_pubsub_receiver_stop_on_disconnect(create_redis, server): | |
326 | pub = await create_redis(server.tcp_address) | |
327 | sub = await create_redis(server.tcp_address) | |
334 | 328 | sub_name = 'sub-{:X}'.format(id(sub)) |
335 | 329 | await sub.client_setname(sub_name) |
336 | 330 | for sub_info in await pub.client_list(): |
338 | 332 | break |
339 | 333 | assert sub_info.name == sub_name |
340 | 334 | |
341 | mpsc = Receiver(loop=loop) | |
335 | mpsc = Receiver() | |
342 | 336 | await sub.subscribe(mpsc.channel('channel:1')) |
343 | 337 | await sub.subscribe(mpsc.channel('channel:2')) |
344 | 338 | await sub.psubscribe(mpsc.pattern('channel:*')) |
345 | 339 | |
346 | q = asyncio.Queue(loop=loop) | |
340 | q = asyncio.Queue() | |
347 | 341 | EOF = object() |
348 | 342 | |
349 | 343 | async def reader(): |
351 | 345 | await q.put((ch.name, msg)) |
352 | 346 | await q.put(EOF) |
353 | 347 | |
354 | tsk = asyncio.ensure_future(reader(), loop=loop) | |
348 | tsk = asyncio.ensure_future(reader()) | |
355 | 349 | await pub.publish_json('channel:1', ['hello']) |
356 | 350 | await pub.publish_json('channel:2', ['hello']) |
357 | 351 | # receive all messages |
362 | 356 | |
363 | 357 | # XXX: need to implement `client kill` |
364 | 358 | assert await pub.execute('client', 'kill', sub_info.addr) in (b'OK', 1) |
365 | await asyncio.wait_for(tsk, timeout=1, loop=loop) | |
359 | await asyncio.wait_for(tsk, timeout=1) | |
366 | 360 | assert await q.get() is EOF |
3 | 3 | from aioredis import ReplyError |
4 | 4 | |
5 | 5 | |
6 | @pytest.mark.run_loop | |
7 | 6 | async def test_eval(redis): |
8 | 7 | await redis.delete('key:eval', 'value:eval') |
9 | 8 | |
37 | 36 | await redis.eval(None) |
38 | 37 | |
39 | 38 | |
40 | @pytest.mark.run_loop | |
41 | 39 | async def test_evalsha(redis): |
42 | 40 | script = b"return 42" |
43 | 41 | sha_hash = await redis.script_load(script) |
61 | 59 | await redis.evalsha(None) |
62 | 60 | |
63 | 61 | |
64 | @pytest.mark.run_loop | |
65 | 62 | async def test_script_exists(redis): |
66 | 63 | sha_hash1 = await redis.script_load(b'return 1') |
67 | 64 | sha_hash2 = await redis.script_load(b'return 2') |
81 | 78 | await redis.script_exists('123', None) |
82 | 79 | |
83 | 80 | |
84 | @pytest.mark.run_loop | |
85 | 81 | async def test_script_flush(redis): |
86 | 82 | sha_hash1 = await redis.script_load(b'return 1') |
87 | 83 | assert len(sha_hash1) == 40 |
93 | 89 | assert res == [0] |
94 | 90 | |
95 | 91 | |
96 | @pytest.mark.run_loop | |
97 | 92 | async def test_script_load(redis): |
98 | 93 | sha_hash1 = await redis.script_load(b'return 1') |
99 | 94 | sha_hash2 = await redis.script_load(b'return 2') |
103 | 98 | assert res == [1, 1] |
104 | 99 | |
105 | 100 | |
106 | @pytest.mark.run_loop | |
107 | async def test_script_kill(create_redis, loop, server, redis): | |
101 | async def test_script_kill(create_redis, server, redis): | |
108 | 102 | script = "while (1) do redis.call('TIME') end" |
109 | 103 | |
110 | other_redis = await create_redis( | |
111 | server.tcp_address, loop=loop) | |
104 | other_redis = await create_redis(server.tcp_address) | |
112 | 105 | |
113 | 106 | ok = await redis.set('key1', 'value') |
114 | 107 | assert ok is True |
115 | 108 | |
116 | 109 | fut = other_redis.eval(script, keys=['non-existent-key'], args=[10]) |
117 | await asyncio.sleep(0.1, loop=loop) | |
110 | await asyncio.sleep(0.1) | |
118 | 111 | resp = await redis.script_kill() |
119 | 112 | assert resp is True |
120 | 113 |
0 | 0 | import asyncio |
1 | 1 | import pytest |
2 | 2 | import sys |
3 | import logging | |
3 | 4 | |
4 | 5 | from aioredis import RedisError, ReplyError, PoolClosedError |
5 | 6 | from aioredis.errors import MasterReplyError |
6 | 7 | from aioredis.sentinel.commands import RedisSentinel |
7 | 8 | from aioredis.abc import AbcPool |
8 | ||
9 | pytestmark = pytest.redis_version(2, 8, 12, reason="Sentinel v2 required") | |
9 | from _testutils import redis_version | |
10 | ||
11 | pytestmark = redis_version(2, 8, 12, reason="Sentinel v2 required") | |
10 | 12 | if sys.platform == 'win32': |
11 | 13 | pytestmark = pytest.mark.skip(reason="unstable on windows") |
12 | 14 | |
13 | 15 | BPO_30399 = sys.version_info >= (3, 7, 0, 'alpha', 3) |
14 | 16 | |
15 | 17 | |
16 | @pytest.mark.run_loop | |
17 | 18 | async def test_client_close(redis_sentinel): |
18 | 19 | assert isinstance(redis_sentinel, RedisSentinel) |
19 | 20 | assert not redis_sentinel.closed |
26 | 27 | await redis_sentinel.wait_closed() |
27 | 28 | |
28 | 29 | |
29 | @pytest.mark.run_loop | |
30 | async def test_global_loop(sentinel, create_sentinel, loop): | |
31 | asyncio.set_event_loop(loop) | |
32 | ||
33 | # force global loop | |
34 | client = await create_sentinel([sentinel.tcp_address], | |
35 | timeout=1, loop=None) | |
36 | assert client._pool._loop is loop | |
37 | ||
38 | asyncio.set_event_loop(None) | |
39 | ||
40 | ||
41 | @pytest.mark.run_loop | |
42 | 30 | async def test_ping(redis_sentinel): |
43 | 31 | assert b'PONG' == (await redis_sentinel.ping()) |
44 | 32 | |
45 | 33 | |
46 | @pytest.mark.run_loop | |
47 | 34 | async def test_master_info(redis_sentinel, sentinel): |
48 | 35 | info = await redis_sentinel.master('master-no-fail') |
49 | 36 | assert isinstance(info, dict) |
81 | 68 | assert 'link-refcount' in info |
82 | 69 | |
83 | 70 | |
84 | @pytest.mark.run_loop | |
85 | async def test_master__auth(create_sentinel, start_sentinel, | |
86 | start_server, loop): | |
71 | async def test_master__auth(create_sentinel, start_sentinel, start_server): | |
87 | 72 | master = start_server('master_1', password='123') |
88 | 73 | start_server('slave_1', slaveof=master, password='123') |
89 | 74 | |
90 | 75 | sentinel = start_sentinel('auth_sentinel_1', master) |
91 | 76 | client1 = await create_sentinel( |
92 | [sentinel.tcp_address], password='123', timeout=1, loop=loop) | |
77 | [sentinel.tcp_address], password='123', timeout=1) | |
93 | 78 | |
94 | 79 | client2 = await create_sentinel( |
95 | [sentinel.tcp_address], password='111', timeout=1, loop=loop) | |
96 | ||
97 | client3 = await create_sentinel( | |
98 | [sentinel.tcp_address], timeout=1, loop=loop) | |
80 | [sentinel.tcp_address], password='111', timeout=1) | |
81 | ||
82 | client3 = await create_sentinel([sentinel.tcp_address], timeout=1) | |
99 | 83 | |
100 | 84 | m1 = client1.master_for(master.name) |
101 | 85 | await m1.set('mykey', 'myval') |
116 | 100 | await m3.set('mykey', 'myval') |
117 | 101 | |
118 | 102 | |
119 | @pytest.mark.run_loop | |
120 | async def test_master__no_auth(create_sentinel, sentinel, loop): | |
103 | async def test_master__no_auth(create_sentinel, sentinel): | |
121 | 104 | client = await create_sentinel( |
122 | [sentinel.tcp_address], password='123', timeout=1, loop=loop) | |
105 | [sentinel.tcp_address], password='123', timeout=1) | |
123 | 106 | |
124 | 107 | master = client.master_for('masterA') |
125 | 108 | with pytest.raises(MasterReplyError): |
126 | 109 | await master.set('mykey', 'myval') |
127 | 110 | |
128 | 111 | |
129 | @pytest.mark.run_loop | |
130 | 112 | async def test_master__unknown(redis_sentinel): |
131 | 113 | with pytest.raises(ReplyError): |
132 | 114 | await redis_sentinel.master('unknown-master') |
133 | 115 | |
134 | 116 | |
135 | @pytest.mark.run_loop | |
136 | 117 | async def test_master_address(redis_sentinel, sentinel): |
137 | 118 | _, port = await redis_sentinel.master_address('master-no-fail') |
138 | 119 | assert port == sentinel.masters['master-no-fail'].tcp_address.port |
139 | 120 | |
140 | 121 | |
141 | @pytest.mark.run_loop | |
142 | 122 | async def test_master_address__unknown(redis_sentinel): |
143 | 123 | res = await redis_sentinel.master_address('unknown-master') |
144 | 124 | assert res is None |
145 | 125 | |
146 | 126 | |
147 | @pytest.mark.run_loop | |
148 | 127 | async def test_masters(redis_sentinel): |
149 | 128 | masters = await redis_sentinel.masters() |
150 | 129 | assert isinstance(masters, dict) |
153 | 132 | assert isinstance(masters['master-no-fail'], dict) |
154 | 133 | |
155 | 134 | |
156 | @pytest.mark.run_loop | |
157 | 135 | async def test_slave_info(sentinel, redis_sentinel): |
158 | 136 | info = await redis_sentinel.slaves('master-no-fail') |
159 | 137 | assert len(info) == 1 |
195 | 173 | assert not missing |
196 | 174 | |
197 | 175 | |
198 | @pytest.mark.run_loop | |
199 | 176 | async def test_slave__unknown(redis_sentinel): |
200 | 177 | with pytest.raises(ReplyError): |
201 | 178 | await redis_sentinel.slaves('unknown-master') |
202 | 179 | |
203 | 180 | |
204 | @pytest.mark.run_loop | |
205 | 181 | async def test_sentinels_empty(redis_sentinel): |
206 | 182 | res = await redis_sentinel.sentinels('master-no-fail') |
207 | 183 | assert res == [] |
210 | 186 | await redis_sentinel.sentinels('unknown-master') |
211 | 187 | |
212 | 188 | |
213 | @pytest.mark.run_loop(timeout=30) | |
189 | @pytest.mark.timeout(30) | |
214 | 190 | async def test_sentinels__exist(create_sentinel, start_sentinel, |
215 | start_server, loop): | |
191 | start_server): | |
216 | 192 | m1 = start_server('master-two-sentinels') |
217 | 193 | s1 = start_sentinel('peer-sentinel-1', m1, quorum=2, noslaves=True) |
218 | 194 | s2 = start_sentinel('peer-sentinel-2', m1, quorum=2, noslaves=True) |
225 | 201 | info = await redis_sentinel.master('master-two-sentinels') |
226 | 202 | if info['num-other-sentinels'] > 0: |
227 | 203 | break |
228 | await asyncio.sleep(.2, loop=loop) | |
204 | await asyncio.sleep(.2) | |
229 | 205 | info = await redis_sentinel.sentinels('master-two-sentinels') |
230 | 206 | assert len(info) == 1 |
231 | 207 | assert 'sentinel' in info[0]['flags'] |
232 | 208 | assert info[0]['port'] in (s1.tcp_address.port, s2.tcp_address.port) |
233 | 209 | |
234 | 210 | |
235 | @pytest.mark.run_loop | |
236 | 211 | async def test_ckquorum(redis_sentinel): |
237 | 212 | assert (await redis_sentinel.check_quorum('master-no-fail')) |
238 | 213 | |
247 | 222 | assert (await redis_sentinel.check_quorum('master-no-fail')) |
248 | 223 | |
249 | 224 | |
250 | @pytest.mark.run_loop | |
251 | 225 | async def test_set_option(redis_sentinel): |
252 | 226 | assert (await redis_sentinel.set('master-no-fail', 'quorum', 10)) |
253 | 227 | master = await redis_sentinel.master('master-no-fail') |
261 | 235 | await redis_sentinel.set('masterA', 'foo', 'bar') |
262 | 236 | |
263 | 237 | |
264 | @pytest.mark.run_loop | |
265 | async def test_sentinel_role(sentinel, create_redis, loop): | |
266 | redis = await create_redis(sentinel.tcp_address, loop=loop) | |
238 | async def test_sentinel_role(sentinel, create_redis): | |
239 | redis = await create_redis(sentinel.tcp_address) | |
267 | 240 | info = await redis.role() |
268 | 241 | assert info.role == 'sentinel' |
269 | 242 | assert isinstance(info.masters, list) |
270 | 243 | assert 'master-no-fail' in info.masters |
271 | 244 | |
272 | 245 | |
273 | @pytest.mark.run_loop(timeout=30) | |
274 | async def test_remove(redis_sentinel, start_server, loop): | |
246 | @pytest.mark.timeout(30) | |
247 | async def test_remove(redis_sentinel, start_server): | |
275 | 248 | m1 = start_server('master-to-remove') |
276 | 249 | ok = await redis_sentinel.monitor( |
277 | 250 | m1.name, '127.0.0.1', m1.tcp_address.port, 1) |
284 | 257 | await redis_sentinel.remove('unknown-master') |
285 | 258 | |
286 | 259 | |
287 | @pytest.mark.run_loop(timeout=30) | |
288 | async def test_monitor(redis_sentinel, start_server, loop, unused_port): | |
260 | @pytest.mark.timeout(30) | |
261 | async def test_monitor(redis_sentinel, start_server, unused_port): | |
289 | 262 | m1 = start_server('master-to-monitor') |
290 | 263 | ok = await redis_sentinel.monitor( |
291 | 264 | m1.name, '127.0.0.1', m1.tcp_address.port, 1) |
295 | 268 | assert port == m1.tcp_address.port |
296 | 269 | |
297 | 270 | |
298 | @pytest.mark.run_loop(timeout=5) | |
299 | async def test_sentinel_master_pool_size(sentinel, create_sentinel): | |
271 | @pytest.mark.timeout(5) | |
272 | async def test_sentinel_master_pool_size(sentinel, create_sentinel, caplog): | |
300 | 273 | redis_s = await create_sentinel([sentinel.tcp_address], timeout=1, |
301 | 274 | minsize=10, maxsize=10) |
302 | 275 | master = redis_s.master_for('master-no-fail') |
303 | 276 | assert isinstance(master.connection, AbcPool) |
304 | 277 | assert master.connection.size == 0 |
305 | 278 | |
306 | with pytest.logs('aioredis.sentinel', 'DEBUG') as cm: | |
279 | caplog.clear() | |
280 | with caplog.at_level('DEBUG', 'aioredis.sentinel'): | |
307 | 281 | assert await master.ping() |
308 | assert len(cm.output) == 1 | |
309 | assert cm.output == [ | |
310 | "DEBUG:aioredis.sentinel:Discoverred new address {}" | |
311 | " for master-no-fail".format(master.address), | |
282 | assert len(caplog.record_tuples) == 1 | |
283 | assert caplog.record_tuples == [ | |
284 | ('aioredis.sentinel', logging.DEBUG, | |
285 | "Discoverred new address {} for master-no-fail".format( | |
286 | master.address) | |
287 | ), | |
312 | 288 | ] |
313 | 289 | assert master.connection.size == 10 |
314 | 290 | assert master.connection.freesize == 10 |
5 | 5 | SlaveNotFoundError, |
6 | 6 | ReadOnlyError, |
7 | 7 | ) |
8 | ||
9 | ||
10 | pytestmark = pytest.redis_version(2, 8, 12, reason="Sentinel v2 required") | |
8 | from _testutils import redis_version | |
9 | ||
10 | ||
11 | pytestmark = redis_version(2, 8, 12, reason="Sentinel v2 required") | |
11 | 12 | if sys.platform == 'win32': |
12 | 13 | pytestmark = pytest.mark.skip(reason="unstable on windows") |
13 | 14 | |
14 | 15 | |
15 | @pytest.mark.xfail | |
16 | @pytest.mark.run_loop(timeout=40) | |
16 | @pytest.mark.timeout(40) | |
17 | 17 | async def test_auto_failover(start_sentinel, start_server, |
18 | create_sentinel, create_connection, loop): | |
18 | create_sentinel, create_connection): | |
19 | 19 | server1 = start_server('master-failover', ['slave-read-only yes']) |
20 | 20 | start_server('slave-failover1', ['slave-read-only yes'], slaveof=server1) |
21 | 21 | start_server('slave-failover2', ['slave-read-only yes'], slaveof=server1) |
22 | 22 | |
23 | sentinel1 = start_sentinel('sentinel-failover1', server1, quorum=2) | |
24 | sentinel2 = start_sentinel('sentinel-failover2', server1, quorum=2) | |
23 | sentinel1 = start_sentinel('sentinel-failover1', server1, quorum=2, | |
24 | down_after_milliseconds=300, | |
25 | failover_timeout=1000) | |
26 | sentinel2 = start_sentinel('sentinel-failover2', server1, quorum=2, | |
27 | down_after_milliseconds=300, | |
28 | failover_timeout=1000) | |
29 | # Wait a bit for sentinels to sync | |
30 | await asyncio.sleep(3) | |
25 | 31 | |
26 | 32 | sp = await create_sentinel([sentinel1.tcp_address, |
27 | 33 | sentinel2.tcp_address], |
38 | 44 | |
39 | 45 | # wait failover |
40 | 46 | conn = await create_connection(server1.tcp_address) |
41 | await conn.execute("debug", "sleep", 6) | |
42 | await asyncio.sleep(3, loop=loop) | |
47 | await conn.execute("debug", "sleep", 2) | |
43 | 48 | |
44 | 49 | # _, new_port = await sp.master_address(server1.name) |
45 | 50 | # assert new_port != old_port |
49 | 54 | assert master.address[1] != old_port |
50 | 55 | |
51 | 56 | |
52 | @pytest.mark.run_loop | |
53 | 57 | async def test_sentinel_normal(sentinel, create_sentinel): |
54 | 58 | redis_sentinel = await create_sentinel([sentinel.tcp_address], timeout=1) |
55 | 59 | redis = redis_sentinel.master_for('masterA') |
70 | 74 | |
71 | 75 | |
72 | 76 | @pytest.mark.xfail(reason="same sentinel; single master;") |
73 | @pytest.mark.run_loop | |
74 | 77 | async def test_sentinel_slave(sentinel, create_sentinel): |
75 | 78 | redis_sentinel = await create_sentinel([sentinel.tcp_address], timeout=1) |
76 | 79 | redis = redis_sentinel.slave_for('masterA') |
90 | 93 | |
91 | 94 | |
92 | 95 | @pytest.mark.xfail(reason="Need proper sentinel configuration") |
93 | @pytest.mark.run_loop # (timeout=600) | |
94 | async def test_sentinel_slave_fail(sentinel, create_sentinel, loop): | |
96 | async def test_sentinel_slave_fail(sentinel, create_sentinel): | |
95 | 97 | redis_sentinel = await create_sentinel([sentinel.tcp_address], timeout=1) |
96 | 98 | |
97 | 99 | key, field, value = b'key:hset', b'bar', b'zap' |
107 | 109 | |
108 | 110 | ret = await redis_sentinel.failover('masterA') |
109 | 111 | assert ret is True |
110 | await asyncio.sleep(2, loop=loop) | |
112 | await asyncio.sleep(2) | |
111 | 113 | |
112 | 114 | with pytest.raises(ReadOnlyError): |
113 | 115 | await redis.hset(key, field, value) |
114 | 116 | |
115 | 117 | ret = await redis_sentinel.failover('masterA') |
116 | 118 | assert ret is True |
117 | await asyncio.sleep(2, loop=loop) | |
119 | await asyncio.sleep(2) | |
118 | 120 | while True: |
119 | 121 | try: |
120 | await asyncio.sleep(1, loop=loop) | |
122 | await asyncio.sleep(1) | |
121 | 123 | await redis.hset(key, field, value) |
122 | 124 | except SlaveNotFoundError: |
123 | 125 | continue |
126 | 128 | |
127 | 129 | |
128 | 130 | @pytest.mark.xfail(reason="Need proper sentinel configuration") |
129 | @pytest.mark.run_loop | |
130 | async def test_sentinel_normal_fail(sentinel, create_sentinel, loop): | |
131 | async def test_sentinel_normal_fail(sentinel, create_sentinel): | |
131 | 132 | redis_sentinel = await create_sentinel([sentinel.tcp_address], timeout=1) |
132 | 133 | |
133 | 134 | key, field, value = b'key:hset', b'bar', b'zap' |
141 | 142 | assert ret == 1 |
142 | 143 | ret = await redis_sentinel.failover('masterA') |
143 | 144 | assert ret is True |
144 | await asyncio.sleep(2, loop=loop) | |
145 | await asyncio.sleep(2) | |
145 | 146 | ret = await redis.hset(key, field, value) |
146 | 147 | assert ret == 0 |
147 | 148 | ret = await redis_sentinel.failover('masterA') |
148 | 149 | assert ret is True |
149 | await asyncio.sleep(2, loop=loop) | |
150 | await asyncio.sleep(2) | |
150 | 151 | redis = redis_sentinel.slave_for('masterA') |
151 | 152 | while True: |
152 | 153 | try: |
153 | 154 | await redis.hset(key, field, value) |
154 | await asyncio.sleep(1, loop=loop) | |
155 | await asyncio.sleep(1) | |
155 | 156 | # redis = await get_slave_connection() |
156 | 157 | except ReadOnlyError: |
157 | 158 | break |
158 | 159 | |
159 | 160 | |
160 | @pytest.mark.xfail(reason="same sentinel; single master;") | |
161 | @pytest.mark.run_loop | |
162 | async def test_failover_command(sentinel, create_sentinel, loop): | |
163 | master_name = 'masterA' | |
164 | redis_sentinel = await create_sentinel([sentinel.tcp_address], timeout=1) | |
165 | ||
166 | orig_master = await redis_sentinel.master_address(master_name) | |
167 | ret = await redis_sentinel.failover(master_name) | |
168 | assert ret is True | |
169 | await asyncio.sleep(2, loop=loop) | |
170 | ||
171 | new_master = await redis_sentinel.master_address(master_name) | |
161 | @pytest.mark.timeout(30) | |
162 | async def test_failover_command(start_server, start_sentinel, | |
163 | create_sentinel): | |
164 | server = start_server('master-failover-cmd', ['slave-read-only yes']) | |
165 | start_server('slave-failover-cmd', ['slave-read-only yes'], slaveof=server) | |
166 | ||
167 | sentinel = start_sentinel('sentinel-failover-cmd', server, quorum=1, | |
168 | down_after_milliseconds=300, | |
169 | failover_timeout=1000) | |
170 | ||
171 | name = 'master-failover-cmd' | |
172 | redis_sentinel = await create_sentinel([sentinel.tcp_address], timeout=1) | |
173 | # Wait a bit for sentinels to sync | |
174 | await asyncio.sleep(3) | |
175 | ||
176 | orig_master = await redis_sentinel.master_address(name) | |
177 | assert await redis_sentinel.failover(name) is True | |
178 | await asyncio.sleep(2) | |
179 | ||
180 | new_master = await redis_sentinel.master_address(name) | |
172 | 181 | assert orig_master != new_master |
173 | 182 | |
174 | ret = await redis_sentinel.failover(master_name) | |
175 | assert ret is True | |
176 | await asyncio.sleep(2, loop=loop) | |
177 | ||
178 | new_master = await redis_sentinel.master_address(master_name) | |
183 | ret = await redis_sentinel.failover(name) | |
184 | assert ret is True | |
185 | await asyncio.sleep(2) | |
186 | ||
187 | new_master = await redis_sentinel.master_address(name) | |
179 | 188 | assert orig_master == new_master |
180 | 189 | |
181 | redis = redis_sentinel.slave_for(master_name) | |
182 | key, field, value = b'key:hset', b'bar', b'zap' | |
183 | while True: | |
184 | try: | |
185 | await asyncio.sleep(1, loop=loop) | |
186 | await redis.hset(key, field, value) | |
187 | except SlaveNotFoundError: | |
188 | pass | |
189 | except ReadOnlyError: | |
190 | break | |
190 | # This part takes almost 10 seconds (waiting for '+convert-to-slave'). | |
191 | # Disabled for time being. | |
192 | ||
193 | # redis = redis_sentinel.slave_for(name) | |
194 | # while True: | |
195 | # try: | |
196 | # await asyncio.sleep(.2) | |
197 | # await redis.set('foo', 'bar') | |
198 | # except SlaveNotFoundError: | |
199 | # pass | |
200 | # except ReadOnlyError: | |
201 | # break |
4 | 4 | from unittest import mock |
5 | 5 | |
6 | 6 | from aioredis import ReplyError |
7 | ||
8 | ||
9 | @pytest.mark.run_loop | |
7 | from _testutils import redis_version | |
8 | ||
9 | ||
10 | 10 | async def test_client_list(redis, server, request): |
11 | 11 | name = request.node.callspec.id |
12 | 12 | assert (await redis.client_setname(name)) |
39 | 39 | assert expected in res |
40 | 40 | |
41 | 41 | |
42 | @pytest.mark.run_loop | |
43 | 42 | @pytest.mark.skipif(sys.platform == 'win32', |
44 | 43 | reason="No unixsocket on Windows") |
45 | async def test_client_list__unixsocket(create_redis, loop, server, request): | |
46 | redis = await create_redis(server.unixsocket, loop=loop) | |
44 | async def test_client_list__unixsocket(create_redis, server, request): | |
45 | redis = await create_redis(server.unixsocket) | |
47 | 46 | name = request.node.callspec.id |
48 | 47 | assert (await redis.client_setname(name)) |
49 | 48 | res = await redis.client_list() |
74 | 73 | assert expected in info |
75 | 74 | |
76 | 75 | |
77 | @pytest.mark.run_loop | |
78 | @pytest.redis_version( | |
76 | @redis_version( | |
79 | 77 | 2, 9, 50, reason='CLIENT PAUSE is available since redis >= 2.9.50') |
80 | 78 | async def test_client_pause(redis): |
81 | ts = time.time() | |
82 | res = await redis.client_pause(2000) | |
83 | assert res is True | |
84 | await redis.ping() | |
85 | assert int(time.time() - ts) >= 2 | |
79 | tr = redis.pipeline() | |
80 | tr.time() | |
81 | tr.client_pause(100) | |
82 | tr.time() | |
83 | t1, ok, t2 = await tr.execute() | |
84 | assert ok | |
85 | assert t2 - t1 >= .1 | |
86 | 86 | |
87 | 87 | with pytest.raises(TypeError): |
88 | 88 | await redis.client_pause(2.0) |
90 | 90 | await redis.client_pause(-1) |
91 | 91 | |
92 | 92 | |
93 | @pytest.mark.run_loop | |
94 | 93 | async def test_client_getname(redis): |
95 | 94 | res = await redis.client_getname() |
96 | 95 | assert res is None |
103 | 102 | assert res == 'TestClient' |
104 | 103 | |
105 | 104 | |
106 | @pytest.redis_version(2, 8, 13, reason="available since Redis 2.8.13") | |
107 | @pytest.mark.run_loop | |
105 | @redis_version(2, 8, 13, reason="available since Redis 2.8.13") | |
108 | 106 | async def test_command(redis): |
109 | 107 | res = await redis.command() |
110 | 108 | assert isinstance(res, list) |
111 | 109 | assert len(res) > 0 |
112 | 110 | |
113 | 111 | |
114 | @pytest.redis_version(2, 8, 13, reason="available since Redis 2.8.13") | |
115 | @pytest.mark.run_loop | |
112 | @redis_version(2, 8, 13, reason="available since Redis 2.8.13") | |
116 | 113 | async def test_command_count(redis): |
117 | 114 | res = await redis.command_count() |
118 | 115 | assert res > 0 |
119 | 116 | |
120 | 117 | |
121 | @pytest.redis_version(3, 0, 0, reason="available since Redis 3.0.0") | |
122 | @pytest.mark.run_loop | |
118 | @redis_version(3, 0, 0, reason="available since Redis 3.0.0") | |
123 | 119 | async def test_command_getkeys(redis): |
124 | 120 | res = await redis.command_getkeys('get', 'key') |
125 | 121 | assert res == ['key'] |
136 | 132 | assert not (await redis.command_getkeys(None)) |
137 | 133 | |
138 | 134 | |
139 | @pytest.redis_version(2, 8, 13, reason="available since Redis 2.8.13") | |
140 | @pytest.mark.run_loop | |
135 | @redis_version(2, 8, 13, reason="available since Redis 2.8.13") | |
141 | 136 | async def test_command_info(redis): |
142 | 137 | res = await redis.command_info('get') |
143 | 138 | assert res == [ |
150 | 145 | assert res == [None, None] |
151 | 146 | |
152 | 147 | |
153 | @pytest.mark.run_loop | |
154 | 148 | async def test_config_get(redis, server): |
155 | 149 | res = await redis.config_get('port') |
156 | 150 | assert res == {'port': str(server.tcp_address.port)} |
165 | 159 | await redis.config_get(b'port') |
166 | 160 | |
167 | 161 | |
168 | @pytest.mark.run_loop | |
169 | 162 | async def test_config_rewrite(redis): |
170 | 163 | with pytest.raises(ReplyError): |
171 | 164 | await redis.config_rewrite() |
172 | 165 | |
173 | 166 | |
174 | @pytest.mark.run_loop | |
175 | 167 | async def test_config_set(redis): |
176 | 168 | cur_value = await redis.config_get('slave-read-only') |
177 | 169 | res = await redis.config_set('slave-read-only', 'no') |
186 | 178 | await redis.config_set(100, 'databases') |
187 | 179 | |
188 | 180 | |
189 | # @pytest.mark.run_loop | |
190 | 181 | # @pytest.mark.skip("Not implemented") |
191 | 182 | # def test_config_resetstat(): |
192 | 183 | # pass |
193 | 184 | |
194 | @pytest.mark.run_loop | |
195 | 185 | async def test_debug_object(redis): |
196 | 186 | with pytest.raises(ReplyError): |
197 | 187 | assert (await redis.debug_object('key')) is None |
202 | 192 | assert res is not None |
203 | 193 | |
204 | 194 | |
205 | @pytest.mark.run_loop | |
206 | 195 | async def test_debug_sleep(redis): |
207 | 196 | t1 = await redis.time() |
208 | ok = await redis.debug_sleep(2) | |
197 | ok = await redis.debug_sleep(.2) | |
209 | 198 | assert ok |
210 | 199 | t2 = await redis.time() |
211 | assert t2 - t1 >= 2 | |
212 | ||
213 | ||
214 | @pytest.mark.run_loop | |
200 | assert t2 - t1 >= .2 | |
201 | ||
202 | ||
215 | 203 | async def test_dbsize(redis): |
216 | 204 | res = await redis.dbsize() |
217 | 205 | assert res == 0 |
229 | 217 | assert res == 1 |
230 | 218 | |
231 | 219 | |
232 | @pytest.mark.run_loop | |
233 | 220 | async def test_info(redis): |
234 | 221 | res = await redis.info() |
235 | 222 | assert isinstance(res, dict) |
241 | 228 | await redis.info('') |
242 | 229 | |
243 | 230 | |
244 | @pytest.mark.run_loop | |
245 | 231 | async def test_lastsave(redis): |
246 | 232 | res = await redis.lastsave() |
247 | 233 | assert res > 0 |
248 | 234 | |
249 | 235 | |
250 | @pytest.mark.run_loop | |
251 | @pytest.redis_version(2, 8, 12, reason='ROLE is available since redis>=2.8.12') | |
236 | @redis_version(2, 8, 12, reason='ROLE is available since redis>=2.8.12') | |
252 | 237 | async def test_role(redis): |
253 | 238 | res = await redis.role() |
254 | 239 | assert dict(res._asdict()) == { |
258 | 243 | } |
259 | 244 | |
260 | 245 | |
261 | @pytest.mark.run_loop | |
262 | 246 | async def test_save(redis): |
263 | 247 | res = await redis.dbsize() |
264 | 248 | assert res == 0 |
269 | 253 | assert t2 >= t1 |
270 | 254 | |
271 | 255 | |
272 | @pytest.mark.run_loop | |
273 | async def test_time(redis): | |
256 | @pytest.mark.parametrize('encoding', [ | |
257 | pytest.param(None, id='no decoding'), | |
258 | pytest.param('utf-8', id='with decoding'), | |
259 | ]) | |
260 | async def test_time(create_redis, server, encoding): | |
261 | redis = await create_redis(server.tcp_address, encoding='utf-8') | |
262 | now = time.time() | |
274 | 263 | res = await redis.time() |
275 | 264 | assert isinstance(res, float) |
276 | pytest.assert_almost_equal(int(res), int(time.time()), delta=10) | |
277 | ||
278 | ||
279 | @pytest.mark.run_loop | |
280 | async def test_time_with_encoding(create_redis, server, loop): | |
281 | redis = await create_redis(server.tcp_address, loop=loop, | |
282 | encoding='utf-8') | |
283 | res = await redis.time() | |
284 | assert isinstance(res, float) | |
285 | pytest.assert_almost_equal(int(res), int(time.time()), delta=10) | |
286 | ||
287 | ||
288 | @pytest.mark.run_loop | |
265 | assert res == pytest.approx(now, abs=10) | |
266 | ||
267 | ||
289 | 268 | async def test_slowlog_len(redis): |
290 | 269 | res = await redis.slowlog_len() |
291 | 270 | assert res >= 0 |
292 | 271 | |
293 | 272 | |
294 | @pytest.mark.run_loop | |
295 | 273 | async def test_slowlog_get(redis): |
296 | 274 | res = await redis.slowlog_get() |
297 | 275 | assert isinstance(res, list) |
307 | 285 | assert not (await redis.slowlog_get('1')) |
308 | 286 | |
309 | 287 | |
310 | @pytest.mark.run_loop | |
311 | 288 | async def test_slowlog_reset(redis): |
312 | 289 | ok = await redis.slowlog_reset() |
313 | 290 | assert ok is True |
0 | 0 | import pytest |
1 | ||
2 | from aioredis import ReplyError | |
3 | from _testutils import redis_version | |
1 | 4 | |
2 | 5 | |
3 | 6 | async def add(redis, key, members): |
5 | 8 | assert ok == 1 |
6 | 9 | |
7 | 10 | |
8 | @pytest.mark.run_loop | |
9 | 11 | async def test_sadd(redis): |
10 | 12 | key, member = b'key:sadd', b'hello' |
11 | 13 | # add member to the set, expected result: 1 |
24 | 26 | await redis.sadd(None, 10) |
25 | 27 | |
26 | 28 | |
27 | @pytest.mark.run_loop | |
28 | 29 | async def test_scard(redis): |
29 | 30 | key, member = b'key:scard', b'hello' |
30 | 31 | |
43 | 44 | await redis.scard(None) |
44 | 45 | |
45 | 46 | |
46 | @pytest.mark.run_loop | |
47 | 47 | async def test_sdiff(redis): |
48 | 48 | key1 = b'key:sdiff:1' |
49 | 49 | key2 = b'key:sdiff:2' |
71 | 71 | await redis.sdiff(key1, None) |
72 | 72 | |
73 | 73 | |
74 | @pytest.mark.run_loop | |
75 | 74 | async def test_sdiffstore(redis): |
76 | 75 | key1 = b'key:sdiffstore:1' |
77 | 76 | key2 = b'key:sdiffstore:2' |
103 | 102 | await redis.sdiffstore(destkey, key1, None) |
104 | 103 | |
105 | 104 | |
106 | @pytest.mark.run_loop | |
107 | 105 | async def test_sinter(redis): |
108 | 106 | key1 = b'key:sinter:1' |
109 | 107 | key2 = b'key:sinter:2' |
131 | 129 | await redis.sinter(key1, None) |
132 | 130 | |
133 | 131 | |
134 | @pytest.mark.run_loop | |
135 | 132 | async def test_sinterstore(redis): |
136 | 133 | key1 = b'key:sinterstore:1' |
137 | 134 | key2 = b'key:sinterstore:2' |
163 | 160 | await redis.sinterstore(destkey, key1, None) |
164 | 161 | |
165 | 162 | |
166 | @pytest.mark.run_loop | |
167 | 163 | async def test_sismember(redis): |
168 | 164 | key, member = b'key:sismember', b'hello' |
169 | 165 | # add member to the set, expected result: 1 |
181 | 177 | await redis.sismember(None, b'world') |
182 | 178 | |
183 | 179 | |
184 | @pytest.mark.run_loop | |
185 | 180 | async def test_smembers(redis): |
186 | 181 | key = b'key:smembers' |
187 | 182 | member1 = b'hello' |
206 | 201 | await redis.smembers(None) |
207 | 202 | |
208 | 203 | |
209 | @pytest.mark.run_loop | |
210 | 204 | async def test_smove(redis): |
211 | 205 | key1 = b'key:smove:1' |
212 | 206 | key2 = b'key:smove:2' |
246 | 240 | await redis.smove(key1, None, member1) |
247 | 241 | |
248 | 242 | |
249 | @pytest.mark.run_loop | |
250 | 243 | async def test_spop(redis): |
251 | 244 | key = b'key:spop:1' |
252 | 245 | members = b'one', b'two', b'three' |
276 | 269 | await redis.spop(None) |
277 | 270 | |
278 | 271 | |
279 | @pytest.mark.run_loop | |
272 | @redis_version( | |
273 | 3, 2, 0, | |
274 | reason="The count argument in SPOP is available since redis>=3.2.0" | |
275 | ) | |
276 | async def test_spop_count(redis): | |
277 | key = b'key:spop:1' | |
278 | members1 = b'one', b'two', b'three' | |
279 | await redis.sadd(key, *members1) | |
280 | ||
281 | # fetch 3 random members | |
282 | test_result1 = await redis.spop(key, 3) | |
283 | assert len(test_result1) == 3 | |
284 | assert set(test_result1).issubset(members1) is True | |
285 | ||
286 | members2 = 'four', 'five', 'six' | |
287 | await redis.sadd(key, *members2) | |
288 | ||
289 | # test with encoding, fetch 3 random members | |
290 | test_result2 = await redis.spop(key, 3, encoding='utf-8') | |
291 | assert len(test_result2) == 3 | |
292 | assert set(test_result2).issubset(members2) is True | |
293 | ||
294 | # try to pop data from empty set | |
295 | test_result = await redis.spop(b'not:' + key, 2) | |
296 | assert len(test_result) == 0 | |
297 | ||
298 | # test with negative counter | |
299 | with pytest.raises(ReplyError): | |
300 | await redis.spop(key, -2) | |
301 | ||
302 | # test with counter is zero | |
303 | test_result3 = await redis.spop(key, 0) | |
304 | assert len(test_result3) == 0 | |
305 | ||
306 | ||
280 | 307 | async def test_srandmember(redis): |
281 | 308 | key = b'key:srandmember:1' |
282 | 309 | members = b'one', b'two', b'three', b'four', b'five', b'six', b'seven' |
314 | 341 | await redis.srandmember(None) |
315 | 342 | |
316 | 343 | |
317 | @pytest.mark.run_loop | |
318 | 344 | async def test_srem(redis): |
319 | 345 | key = b'key:srem:1' |
320 | 346 | members = b'one', b'two', b'three', b'four', b'five', b'six', b'seven' |
339 | 365 | await redis.srem(None, members) |
340 | 366 | |
341 | 367 | |
342 | @pytest.mark.run_loop | |
343 | 368 | async def test_sunion(redis): |
344 | 369 | key1 = b'key:sunion:1' |
345 | 370 | key2 = b'key:sunion:2' |
367 | 392 | await redis.sunion(key1, None) |
368 | 393 | |
369 | 394 | |
370 | @pytest.mark.run_loop | |
371 | 395 | async def test_sunionstore(redis): |
372 | 396 | key1 = b'key:sunionstore:1' |
373 | 397 | key2 = b'key:sunionstore:2' |
399 | 423 | await redis.sunionstore(destkey, key1, None) |
400 | 424 | |
401 | 425 | |
402 | @pytest.redis_version(2, 8, 0, reason='SSCAN is available since redis>=2.8.0') | |
403 | @pytest.mark.run_loop | |
426 | @redis_version(2, 8, 0, reason='SSCAN is available since redis>=2.8.0') | |
404 | 427 | async def test_sscan(redis): |
405 | 428 | key = b'key:sscan' |
406 | 429 | for i in range(1, 11): |
430 | 453 | await redis.sscan(None) |
431 | 454 | |
432 | 455 | |
433 | @pytest.redis_version(2, 8, 0, reason='SSCAN is available since redis>=2.8.0') | |
434 | @pytest.mark.run_loop | |
456 | @redis_version(2, 8, 0, reason='SSCAN is available since redis>=2.8.0') | |
435 | 457 | async def test_isscan(redis): |
436 | 458 | key = b'key:sscan' |
437 | 459 | for i in range(1, 11): |
0 | 0 | import itertools |
1 | ||
1 | 2 | import pytest |
2 | 3 | |
3 | ||
4 | @pytest.mark.run_loop | |
4 | from _testutils import redis_version | |
5 | ||
6 | ||
7 | @redis_version(5, 0, 0, reason='BZPOPMAX is available since redis>=5.0.0') | |
8 | async def test_bzpopmax(redis): | |
9 | key1 = b'key:zpopmax:1' | |
10 | key2 = b'key:zpopmax:2' | |
11 | ||
12 | pairs = [ | |
13 | (0, b'a'), (5, b'c'), (2, b'd'), (8, b'e'), (9, b'f'), (3, b'g') | |
14 | ] | |
15 | await redis.zadd(key1, *pairs[0]) | |
16 | await redis.zadd(key2, *itertools.chain.from_iterable(pairs)) | |
17 | ||
18 | res = await redis.bzpopmax(key1, timeout=0) | |
19 | assert res == [key1, b'a', b'0'] | |
20 | res = await redis.bzpopmax(key1, key2, timeout=0) | |
21 | assert res == [key2, b'f', b'9'] | |
22 | ||
23 | with pytest.raises(TypeError): | |
24 | await redis.bzpopmax(key1, timeout=b'one') | |
25 | with pytest.raises(ValueError): | |
26 | await redis.bzpopmax(key2, timeout=-10) | |
27 | ||
28 | ||
29 | @redis_version(5, 0, 0, reason='BZPOPMIN is available since redis>=5.0.0') | |
30 | async def test_bzpopmin(redis): | |
31 | key1 = b'key:zpopmin:1' | |
32 | key2 = b'key:zpopmin:2' | |
33 | ||
34 | pairs = [ | |
35 | (0, b'a'), (5, b'c'), (2, b'd'), (8, b'e'), (9, b'f'), (3, b'g') | |
36 | ] | |
37 | await redis.zadd(key1, *pairs[0]) | |
38 | await redis.zadd(key2, *itertools.chain.from_iterable(pairs)) | |
39 | ||
40 | res = await redis.bzpopmin(key1, timeout=0) | |
41 | assert res == [key1, b'a', b'0'] | |
42 | res = await redis.bzpopmin(key1, key2, timeout=0) | |
43 | assert res == [key2, b'a', b'0'] | |
44 | ||
45 | with pytest.raises(TypeError): | |
46 | await redis.bzpopmin(key1, timeout=b'one') | |
47 | with pytest.raises(ValueError): | |
48 | await redis.bzpopmin(key2, timeout=-10) | |
49 | ||
50 | ||
5 | 51 | async def test_zadd(redis): |
6 | 52 | key = b'key:zadd' |
7 | 53 | res = await redis.zadd(key, 1, b'one') |
28 | 74 | await redis.zadd(key, 3, b'three', 'four', 4) |
29 | 75 | |
30 | 76 | |
31 | @pytest.redis_version( | |
77 | @redis_version( | |
32 | 78 | 3, 0, 2, reason='ZADD options is available since redis>=3.0.2', |
33 | 79 | ) |
34 | @pytest.mark.run_loop | |
35 | 80 | async def test_zadd_options(redis): |
36 | 81 | key = b'key:zaddopt' |
37 | 82 | |
65 | 110 | res = await redis.zrange(key, 0, -1, withscores=False) |
66 | 111 | assert res == [b'one', b'two'] |
67 | 112 | |
68 | ||
69 | @pytest.mark.run_loop | |
113 | res = await redis.zadd(key, 1, b'two', changed=True) | |
114 | assert res == 1 | |
115 | ||
116 | res = await redis.zadd(key, 1, b'two', incr=True) | |
117 | assert int(res) == 2 | |
118 | ||
119 | with pytest.raises(ValueError): | |
120 | await redis.zadd(key, 1, b'one', 2, b'two', incr=True) | |
121 | ||
122 | ||
70 | 123 | async def test_zcard(redis): |
71 | 124 | key = b'key:zcard' |
72 | 125 | pairs = [1, b'one', 2, b'two', 3, b'three'] |
83 | 136 | await redis.zcard(None) |
84 | 137 | |
85 | 138 | |
86 | @pytest.mark.run_loop | |
87 | 139 | async def test_zcount(redis): |
88 | 140 | key = b'key:zcount' |
89 | 141 | pairs = [1, b'one', 1, b'uno', 2.5, b'two', 3, b'three', 7, b'seven'] |
127 | 179 | await redis.zcount(key, 10, 1) |
128 | 180 | |
129 | 181 | |
130 | @pytest.mark.run_loop | |
131 | 182 | async def test_zincrby(redis): |
132 | 183 | key = b'key:zincrby' |
133 | 184 | pairs = [1, b'one', 1, b'uno', 2.5, b'two', 3, b'three'] |
147 | 198 | await redis.zincrby(key, 'one', 5) |
148 | 199 | |
149 | 200 | |
150 | @pytest.mark.run_loop | |
151 | 201 | async def test_zinterstore(redis): |
152 | 202 | zset1 = [2, 'one', 2, 'two'] |
153 | 203 | zset2 = [3, 'one', 3, 'three'] |
195 | 245 | assert res == [(b'one', 10)] |
196 | 246 | |
197 | 247 | |
198 | @pytest.redis_version( | |
248 | @redis_version( | |
199 | 249 | 2, 8, 9, reason='ZLEXCOUNT is available since redis>=2.8.9') |
200 | @pytest.mark.run_loop | |
201 | 250 | async def test_zlexcount(redis): |
202 | 251 | key = b'key:zlexcount' |
203 | 252 | pairs = [0, b'a', 0, b'b', 0, b'c', 0, b'd', 0, b'e'] |
221 | 270 | |
222 | 271 | |
223 | 272 | @pytest.mark.parametrize('encoding', [None, 'utf-8']) |
224 | @pytest.mark.run_loop | |
225 | 273 | async def test_zrange(redis, encoding): |
226 | 274 | key = b'key:zrange' |
227 | 275 | scores = [1, 1, 2.5, 3, 7] |
252 | 300 | await redis.zrange(key, 0, 'last') |
253 | 301 | |
254 | 302 | |
255 | @pytest.redis_version( | |
303 | @redis_version( | |
256 | 304 | 2, 8, 9, reason='ZRANGEBYLEX is available since redis>=2.8.9') |
257 | @pytest.mark.run_loop | |
258 | 305 | async def test_zrangebylex(redis): |
259 | 306 | key = b'key:zrangebylex' |
260 | 307 | scores = [0] * 5 |
298 | 345 | offset=1, count='one') |
299 | 346 | |
300 | 347 | |
301 | @pytest.mark.run_loop | |
302 | 348 | async def test_zrank(redis): |
303 | 349 | key = b'key:zrank' |
304 | 350 | scores = [1, 1, 2.5, 3, 7] |
320 | 366 | |
321 | 367 | |
322 | 368 | @pytest.mark.parametrize('encoding', [None, 'utf-8']) |
323 | @pytest.mark.run_loop | |
324 | 369 | async def test_zrangebyscore(redis, encoding): |
325 | 370 | key = b'key:zrangebyscore' |
326 | 371 | scores = [1, 1, 2.5, 3, 7] |
364 | 409 | await redis.zrangebyscore(key, 1, 7, offset=1, count='one') |
365 | 410 | |
366 | 411 | |
367 | @pytest.mark.run_loop | |
368 | 412 | async def test_zrem(redis): |
369 | 413 | key = b'key:zrem' |
370 | 414 | scores = [1, 1, 2.5, 3, 7] |
390 | 434 | await redis.zrem(None, b'one') |
391 | 435 | |
392 | 436 | |
393 | @pytest.redis_version( | |
437 | @redis_version( | |
394 | 438 | 2, 8, 9, reason='ZREMRANGEBYLEX is available since redis>=2.8.9') |
395 | @pytest.mark.run_loop | |
396 | 439 | async def test_zremrangebylex(redis): |
397 | 440 | key = b'key:zremrangebylex' |
398 | 441 | members = [b'aaaa', b'b', b'c', b'd', b'e', b'foo', b'zap', b'zip', |
431 | 474 | await redis.zremrangebylex(key, b'a', 20) |
432 | 475 | |
433 | 476 | |
434 | @pytest.mark.run_loop | |
435 | 477 | async def test_zremrangebyrank(redis): |
436 | 478 | key = b'key:zremrangebyrank' |
437 | 479 | scores = [0, 1, 2, 3, 4, 5] |
458 | 500 | await redis.zremrangebyrank(key, 0, 'last') |
459 | 501 | |
460 | 502 | |
461 | @pytest.mark.run_loop | |
462 | 503 | async def test_zremrangebyscore(redis): |
463 | 504 | key = b'key:zremrangebyscore' |
464 | 505 | scores = [1, 1, 2.5, 3, 7] |
493 | 534 | |
494 | 535 | |
495 | 536 | @pytest.mark.parametrize('encoding', [None, 'utf-8']) |
496 | @pytest.mark.run_loop | |
497 | 537 | async def test_zrevrange(redis, encoding): |
498 | 538 | key = b'key:zrevrange' |
499 | 539 | scores = [1, 1, 2.5, 3, 7] |
528 | 568 | await redis.zrevrange(key, 0, 'last') |
529 | 569 | |
530 | 570 | |
531 | @pytest.mark.run_loop | |
532 | 571 | async def test_zrevrank(redis): |
533 | 572 | key = b'key:zrevrank' |
534 | 573 | scores = [1, 1, 2.5, 3, 7] |
549 | 588 | await redis.zrevrank(None, b'one') |
550 | 589 | |
551 | 590 | |
552 | @pytest.mark.run_loop | |
553 | 591 | async def test_zscore(redis): |
554 | 592 | key = b'key:zscore' |
555 | 593 | scores = [1, 1, 2.5, 3, 7] |
569 | 607 | assert res is None |
570 | 608 | |
571 | 609 | |
572 | @pytest.mark.run_loop | |
573 | 610 | async def test_zunionstore(redis): |
574 | 611 | zset1 = [2, 'one', 2, 'two'] |
575 | 612 | zset2 = [3, 'one', 3, 'three'] |
618 | 655 | |
619 | 656 | |
620 | 657 | @pytest.mark.parametrize('encoding', [None, 'utf-8']) |
621 | @pytest.mark.run_loop | |
622 | 658 | async def test_zrevrangebyscore(redis, encoding): |
623 | 659 | key = b'key:zrevrangebyscore' |
624 | 660 | scores = [1, 1, 2.5, 3, 7] |
663 | 699 | await redis.zrevrangebyscore(key, 1, 7, offset=1, count='one') |
664 | 700 | |
665 | 701 | |
666 | @pytest.redis_version( | |
702 | @redis_version( | |
667 | 703 | 2, 8, 9, reason='ZREVRANGEBYLEX is available since redis>=2.8.9') |
668 | @pytest.mark.run_loop | |
669 | 704 | async def test_zrevrangebylex(redis): |
670 | 705 | key = b'key:zrevrangebylex' |
671 | 706 | scores = [0] * 5 |
711 | 746 | offset=1, count='one') |
712 | 747 | |
713 | 748 | |
714 | @pytest.redis_version(2, 8, 0, reason='ZSCAN is available since redis>=2.8.0') | |
715 | @pytest.mark.run_loop | |
749 | @redis_version(2, 8, 0, reason='ZSCAN is available since redis>=2.8.0') | |
716 | 750 | async def test_zscan(redis): |
717 | 751 | key = b'key:zscan' |
718 | 752 | scores, members = [], [] |
745 | 779 | await redis.zscan(None) |
746 | 780 | |
747 | 781 | |
748 | @pytest.redis_version(2, 8, 0, reason='ZSCAN is available since redis>=2.8.0') | |
749 | @pytest.mark.run_loop | |
782 | @redis_version(2, 8, 0, reason='ZSCAN is available since redis>=2.8.0') | |
750 | 783 | async def test_izscan(redis): |
751 | 784 | key = b'key:zscan' |
752 | 785 | scores, members = [], [] |
783 | 816 | |
784 | 817 | with pytest.raises(TypeError): |
785 | 818 | await redis.izscan(None) |
819 | ||
820 | ||
821 | @redis_version(5, 0, 0, reason='ZPOPMAX is available since redis>=5.0.0') | |
822 | async def test_zpopmax(redis): | |
823 | key = b'key:zpopmax' | |
824 | ||
825 | pairs = [ | |
826 | (0, b'a'), (5, b'c'), (2, b'd'), (8, b'e'), (9, b'f'), (3, b'g') | |
827 | ] | |
828 | await redis.zadd(key, *itertools.chain.from_iterable(pairs)) | |
829 | ||
830 | assert await redis.zpopmax(key) == [b'f', b'9'] | |
831 | assert await redis.zpopmax(key, 3) == [b'e', b'8', b'c', b'5', b'g', b'3'] | |
832 | ||
833 | with pytest.raises(TypeError): | |
834 | await redis.zpopmax(key, b'b') | |
835 | ||
836 | ||
837 | @redis_version(5, 0, 0, reason='ZPOPMIN is available since redis>=5.0.0') | |
838 | async def test_zpopmin(redis): | |
839 | key = b'key:zpopmin' | |
840 | ||
841 | pairs = [ | |
842 | (0, b'a'), (5, b'c'), (2, b'd'), (8, b'e'), (9, b'f'), (3, b'g') | |
843 | ] | |
844 | await redis.zadd(key, *itertools.chain.from_iterable(pairs)) | |
845 | ||
846 | assert await redis.zpopmin(key) == [b'a', b'0'] | |
847 | assert await redis.zpopmin(key, 3) == [b'd', b'2', b'g', b'3', b'c', b'5'] | |
848 | ||
849 | with pytest.raises(TypeError): | |
850 | await redis.zpopmin(key, b'b') |
0 | import pytest | |
1 | 0 | |
2 | ||
3 | @pytest.mark.run_loop | |
4 | async def test_ssl_connection(create_connection, loop, server, ssl_proxy): | |
1 | async def test_ssl_connection(create_connection, server, ssl_proxy): | |
5 | 2 | ssl_port, ssl_ctx = ssl_proxy(server.tcp_address.port) |
6 | 3 | |
7 | 4 | conn = await create_connection( |
8 | ('localhost', ssl_port), ssl=ssl_ctx, loop=loop) | |
5 | ('localhost', ssl_port), ssl=ssl_ctx) | |
9 | 6 | res = await conn.execute('ping') |
10 | 7 | assert res == b'PONG' |
11 | 8 | |
12 | 9 | |
13 | @pytest.mark.run_loop | |
14 | async def test_ssl_redis(create_redis, loop, server, ssl_proxy): | |
10 | async def test_ssl_redis(create_redis, server, ssl_proxy): | |
15 | 11 | ssl_port, ssl_ctx = ssl_proxy(server.tcp_address.port) |
16 | 12 | |
17 | 13 | redis = await create_redis( |
18 | ('localhost', ssl_port), ssl=ssl_ctx, loop=loop) | |
14 | ('localhost', ssl_port), ssl=ssl_ctx) | |
19 | 15 | res = await redis.ping() |
20 | 16 | assert res == b'PONG' |
21 | 17 | |
22 | 18 | |
23 | @pytest.mark.run_loop | |
24 | async def test_ssl_pool(create_pool, server, loop, ssl_proxy): | |
19 | async def test_ssl_pool(create_pool, server, ssl_proxy): | |
25 | 20 | ssl_port, ssl_ctx = ssl_proxy(server.tcp_address.port) |
26 | 21 | |
27 | 22 | pool = await create_pool( |
28 | ('localhost', ssl_port), ssl=ssl_ctx, loop=loop) | |
23 | ('localhost', ssl_port), ssl=ssl_ctx) | |
29 | 24 | with (await pool) as conn: |
30 | 25 | res = await conn.execute('PING') |
31 | 26 | assert res == b'PONG' |
3 | 3 | from collections import OrderedDict |
4 | 4 | from unittest import mock |
5 | 5 | |
6 | from aioredis import ReplyError | |
7 | ||
8 | ||
9 | @asyncio.coroutine | |
10 | async def add_message_with_sleep(redis, loop, stream, fields): | |
11 | await asyncio.sleep(0.2, loop=loop) | |
6 | from aioredis.commands.streams import parse_messages | |
7 | from aioredis.errors import BusyGroupError | |
8 | from _testutils import redis_version | |
9 | ||
10 | pytestmark = redis_version( | |
11 | 5, 0, 0, reason="Streams only available since Redis 5.0.0") | |
12 | ||
13 | ||
14 | async def add_message_with_sleep(redis, stream, fields): | |
15 | await asyncio.sleep(0.2) | |
12 | 16 | result = await redis.xadd(stream, fields) |
13 | 17 | return result |
14 | 18 | |
15 | 19 | |
16 | @pytest.mark.run_loop | |
17 | @pytest.redis_version(999, 999, 999, reason="Streams only available on redis " | |
18 | "unstable branch") | |
19 | 20 | async def test_xadd(redis, server_bin): |
20 | 21 | fields = OrderedDict(( |
21 | 22 | (b'field1', b'value1'), |
40 | 41 | ) |
41 | 42 | |
42 | 43 | |
43 | @pytest.mark.run_loop | |
44 | @pytest.redis_version(999, 999, 999, reason="Streams only available on redis " | |
45 | "unstable branch") | |
46 | 44 | async def test_xadd_maxlen_exact(redis, server_bin): |
47 | 45 | message_id1 = await redis.xadd('test_stream', {'f1': 'v1'}) # noqa |
48 | 46 | |
69 | 67 | assert message3[1] == OrderedDict([(b'f3', b'v3')]) |
70 | 68 | |
71 | 69 | |
72 | @pytest.mark.run_loop | |
73 | @pytest.redis_version(999, 999, 999, reason="Streams only available on redis " | |
74 | "unstable branch") | |
75 | 70 | async def test_xadd_manual_message_ids(redis, server_bin): |
76 | 71 | await redis.xadd('test_stream', {'f1': 'v1'}, message_id='1515958771000-0') |
77 | 72 | await redis.xadd('test_stream', {'f1': 'v1'}, message_id='1515958771000-1') |
86 | 81 | ] |
87 | 82 | |
88 | 83 | |
89 | @pytest.mark.run_loop | |
90 | @pytest.redis_version(999, 999, 999, reason="Streams only available on redis " | |
91 | "unstable branch") | |
92 | 84 | async def test_xadd_maxlen_inexact(redis, server_bin): |
93 | 85 | await redis.xadd('test_stream', {'f1': 'v1'}) |
94 | 86 | # Ensure the millisecond-based message ID increments |
110 | 102 | assert len(messages) < 1000 |
111 | 103 | |
112 | 104 | |
113 | @pytest.mark.run_loop | |
114 | @pytest.redis_version(999, 999, 999, reason="Streams only available on redis " | |
115 | "unstable branch") | |
116 | 105 | async def test_xrange(redis, server_bin): |
117 | 106 | stream = 'test_stream' |
118 | 107 | fields = OrderedDict(( |
166 | 155 | assert len(messages) == 2 |
167 | 156 | |
168 | 157 | |
169 | @pytest.mark.run_loop | |
170 | @pytest.redis_version(999, 999, 999, reason="Streams only available on redis " | |
171 | "unstable branch") | |
172 | 158 | async def test_xrevrange(redis, server_bin): |
173 | 159 | stream = 'test_stream' |
174 | 160 | fields = OrderedDict(( |
222 | 208 | assert len(messages) == 2 |
223 | 209 | |
224 | 210 | |
225 | @pytest.mark.run_loop | |
226 | @pytest.redis_version(999, 999, 999, reason="Streams only available on redis " | |
227 | "unstable branch") | |
228 | 211 | async def test_xread_selection(redis, server_bin): |
229 | 212 | """Test use of counts and starting IDs""" |
230 | 213 | stream = 'test_stream' |
257 | 240 | assert len(messages) == 2 |
258 | 241 | |
259 | 242 | |
260 | @pytest.mark.run_loop | |
261 | @pytest.redis_version(999, 999, 999, reason="Streams only available on redis " | |
262 | "unstable branch") | |
263 | async def test_xread_blocking(redis, create_redis, loop, server, server_bin): | |
243 | async def test_xread_blocking(redis, create_redis, server, server_bin): | |
264 | 244 | """Test the blocking read features""" |
265 | 245 | fields = OrderedDict(( |
266 | 246 | (b'field1', b'value1'), |
267 | 247 | (b'field2', b'value2'), |
268 | 248 | )) |
269 | 249 | other_redis = await create_redis( |
270 | server.tcp_address, loop=loop) | |
250 | server.tcp_address) | |
271 | 251 | |
272 | 252 | # create blocking task in separate connection |
273 | 253 | consumer = other_redis.xread(['test_stream'], timeout=1000) |
274 | 254 | |
275 | 255 | producer_task = asyncio.Task( |
276 | add_message_with_sleep(redis, loop, 'test_stream', fields), loop=loop) | |
277 | results = await asyncio.gather( | |
278 | consumer, producer_task, loop=loop) | |
256 | add_message_with_sleep(redis, 'test_stream', fields)) | |
257 | results = await asyncio.gather(consumer, producer_task) | |
279 | 258 | |
280 | 259 | received_messages, sent_message_id = results |
281 | 260 | assert len(received_messages) == 1 |
295 | 274 | other_redis.close() |
296 | 275 | |
297 | 276 | |
298 | @pytest.mark.run_loop | |
299 | @pytest.redis_version(999, 999, 999, reason="Streams only available on redis " | |
300 | "unstable branch") | |
301 | 277 | async def test_xgroup_create(redis, server_bin): |
302 | 278 | # Also tests xinfo_groups() |
303 | # TODO: Remove xadd() if resolved: | |
304 | # https://github.com/antirez/redis/issues/4824 | |
305 | 279 | await redis.xadd('test_stream', {'a': 1}) |
306 | 280 | await redis.xgroup_create('test_stream', 'test_group') |
307 | 281 | info = await redis.xinfo_groups('test_stream') |
313 | 287 | }] |
314 | 288 | |
315 | 289 | |
316 | @pytest.mark.run_loop | |
317 | @pytest.redis_version(999, 999, 999, reason="Streams only available on redis " | |
318 | "unstable branch") | |
290 | async def test_xgroup_create_mkstream(redis, server_bin): | |
291 | await redis.xgroup_create('test_stream', 'test_group', mkstream=True) | |
292 | info = await redis.xinfo_groups('test_stream') | |
293 | assert info == [{ | |
294 | b'name': b'test_group', | |
295 | b'last-delivered-id': mock.ANY, | |
296 | b'pending': 0, | |
297 | b'consumers': 0 | |
298 | }] | |
299 | ||
300 | ||
319 | 301 | async def test_xgroup_create_already_exists(redis, server_bin): |
320 | 302 | await redis.xadd('test_stream', {'a': 1}) |
321 | 303 | await redis.xgroup_create('test_stream', 'test_group') |
322 | with pytest.raises(ReplyError): | |
304 | with pytest.raises(BusyGroupError): | |
323 | 305 | await redis.xgroup_create('test_stream', 'test_group') |
324 | 306 | |
325 | 307 | |
326 | @pytest.mark.run_loop | |
327 | @pytest.redis_version(999, 999, 999, reason="Streams only available on redis " | |
328 | "unstable branch") | |
329 | 308 | async def test_xgroup_setid(redis, server_bin): |
330 | 309 | await redis.xadd('test_stream', {'a': 1}) |
331 | 310 | await redis.xgroup_create('test_stream', 'test_group') |
332 | 311 | await redis.xgroup_setid('test_stream', 'test_group', '$') |
333 | 312 | |
334 | 313 | |
335 | @pytest.mark.run_loop | |
336 | @pytest.redis_version(999, 999, 999, reason="Streams only available on redis " | |
337 | "unstable branch") | |
338 | 314 | async def test_xgroup_destroy(redis, server_bin): |
339 | 315 | await redis.xadd('test_stream', {'a': 1}) |
340 | 316 | await redis.xgroup_create('test_stream', 'test_group') |
343 | 319 | assert not info |
344 | 320 | |
345 | 321 | |
346 | @pytest.mark.run_loop | |
347 | @pytest.redis_version(999, 999, 999, reason="Streams only available on redis " | |
348 | "unstable branch") | |
349 | 322 | async def test_xread_group(redis): |
350 | 323 | await redis.xadd('test_stream', {'a': 1}) |
351 | 324 | await redis.xgroup_create('test_stream', 'test_group', latest_id='0') |
352 | 325 | |
326 | # read all pending messages | |
353 | 327 | messages = await redis.xread_group( |
354 | 328 | 'test_group', 'test_consumer', ['test_stream'], |
355 | timeout=1000, latest_ids=[0] | |
329 | timeout=1000, latest_ids=['>'] | |
356 | 330 | ) |
357 | 331 | assert len(messages) == 1 |
358 | 332 | stream, message_id, fields = messages[0] |
361 | 335 | assert fields == {b'a': b'1'} |
362 | 336 | |
363 | 337 | |
364 | @pytest.mark.run_loop | |
365 | @pytest.redis_version(999, 999, 999, reason="Streams only available on redis " | |
366 | "unstable branch") | |
338 | async def test_xread_group_with_no_ack(redis): | |
339 | await redis.xadd('test_stream', {'a': 1}) | |
340 | await redis.xgroup_create('test_stream', 'test_group', latest_id='0') | |
341 | ||
342 | # read all pending messages | |
343 | messages = await redis.xread_group( | |
344 | 'test_group', 'test_consumer', ['test_stream'], | |
345 | timeout=1000, latest_ids=['>'], no_ack=True | |
346 | ) | |
347 | assert len(messages) == 1 | |
348 | stream, message_id, fields = messages[0] | |
349 | assert stream == b'test_stream' | |
350 | assert message_id | |
351 | assert fields == {b'a': b'1'} | |
352 | ||
353 | ||
367 | 354 | async def test_xack_and_xpending(redis): |
368 | 355 | # Test a full xread -> xack cycle, using xpending to check the status |
369 | 356 | message_id = await redis.xadd('test_stream', {'a': 1}) |
377 | 364 | # Read the message |
378 | 365 | await redis.xread_group( |
379 | 366 | 'test_group', 'test_consumer', ['test_stream'], |
380 | timeout=1000, latest_ids=[0] | |
367 | timeout=1000, latest_ids=['>'] | |
381 | 368 | ) |
382 | 369 | |
383 | 370 | # It is now pending |
397 | 384 | assert pending_count == 0 |
398 | 385 | |
399 | 386 | |
400 | @pytest.mark.run_loop | |
401 | @pytest.redis_version(999, 999, 999, reason="Streams only available on redis " | |
402 | "unstable branch") | |
403 | 387 | async def test_xpending_get_messages(redis): |
404 | 388 | # Like test_xack_and_xpending(), but using the start/end xpending() |
405 | 389 | # params to get the messages |
407 | 391 | await redis.xgroup_create('test_stream', 'test_group', latest_id='0') |
408 | 392 | await redis.xread_group( |
409 | 393 | 'test_group', 'test_consumer', ['test_stream'], |
410 | timeout=1000, latest_ids=[0] | |
394 | timeout=1000, latest_ids=['>'] | |
411 | 395 | ) |
412 | 396 | await asyncio.sleep(0.05) |
413 | 397 | |
425 | 409 | assert num_deliveries == 1 |
426 | 410 | |
427 | 411 | |
428 | @pytest.mark.run_loop | |
429 | @pytest.redis_version(999, 999, 999, reason="Streams only available on redis " | |
430 | "unstable branch") | |
431 | 412 | async def test_xpending_start_of_zero(redis): |
432 | 413 | await redis.xadd('test_stream', {'a': 1}) |
433 | 414 | await redis.xgroup_create('test_stream', 'test_group', latest_id='0') |
435 | 416 | await redis.xpending('test_stream', 'test_group', 0, '+', 10) |
436 | 417 | |
437 | 418 | |
438 | @pytest.mark.run_loop | |
439 | @pytest.redis_version(999, 999, 999, reason="Streams only available on redis " | |
440 | "unstable branch") | |
441 | 419 | async def test_xclaim_simple(redis): |
442 | 420 | # Put a message in a pending state then reclaim it is XCLAIM |
443 | 421 | message_id = await redis.xadd('test_stream', {'a': 1}) |
444 | 422 | await redis.xgroup_create('test_stream', 'test_group', latest_id='0') |
445 | 423 | await redis.xread_group( |
446 | 424 | 'test_group', 'test_consumer', ['test_stream'], |
447 | timeout=1000, latest_ids=[0] | |
425 | timeout=1000, latest_ids=['>'] | |
448 | 426 | ) |
449 | 427 | |
450 | 428 | # Message is now pending |
468 | 446 | assert pel == [[b'new_consumer', b'1']] |
469 | 447 | |
470 | 448 | |
471 | @pytest.mark.run_loop | |
472 | @pytest.redis_version(999, 999, 999, reason="Streams only available on redis " | |
473 | "unstable branch") | |
474 | 449 | async def test_xclaim_min_idle_time_includes_messages(redis): |
475 | 450 | message_id = await redis.xadd('test_stream', {'a': 1}) |
476 | 451 | await redis.xgroup_create('test_stream', 'test_group', latest_id='0') |
477 | 452 | await redis.xread_group( |
478 | 453 | 'test_group', 'test_consumer', ['test_stream'], |
479 | timeout=1000, latest_ids=[0] | |
454 | timeout=1000, latest_ids=['>'] | |
480 | 455 | ) |
481 | 456 | |
482 | 457 | # Message is now pending. Wait 100ms |
488 | 463 | assert result |
489 | 464 | |
490 | 465 | |
491 | @pytest.mark.run_loop | |
492 | @pytest.redis_version(999, 999, 999, reason="Streams only available on redis " | |
493 | "unstable branch") | |
494 | 466 | async def test_xclaim_min_idle_time_excludes_messages(redis): |
495 | 467 | message_id = await redis.xadd('test_stream', {'a': 1}) |
496 | 468 | await redis.xgroup_create('test_stream', 'test_group', latest_id='0') |
497 | 469 | await redis.xread_group( |
498 | 470 | 'test_group', 'test_consumer', ['test_stream'], |
499 | timeout=1000, latest_ids=[0] | |
471 | timeout=1000, latest_ids=['>'] | |
500 | 472 | ) |
501 | 473 | # Message is now pending. Wait no time at all |
502 | 474 | |
507 | 479 | assert not result |
508 | 480 | |
509 | 481 | |
510 | @pytest.mark.run_loop | |
511 | @pytest.redis_version(999, 999, 999, reason="Streams only available on redis " | |
512 | "unstable branch") | |
513 | 482 | async def test_xgroup_delconsumer(redis, create_redis, server): |
514 | 483 | await redis.xadd('test_stream', {'a': 1}) |
515 | 484 | await redis.xgroup_create('test_stream', 'test_group') |
530 | 499 | assert not info |
531 | 500 | |
532 | 501 | |
533 | @pytest.mark.run_loop | |
534 | @pytest.redis_version(999, 999, 999, reason="Streams only available on redis " | |
535 | "unstable branch") | |
502 | async def test_xdel_stream(redis): | |
503 | message_id = await redis.xadd('test_stream', {'a': 1}) | |
504 | response = await redis.xdel('test_stream', id=message_id) | |
505 | assert response >= 0 | |
506 | ||
507 | ||
508 | async def test_xtrim_stream(redis): | |
509 | await redis.xadd('test_stream', {'a': 1}) | |
510 | await redis.xadd('test_stream', {'b': 1}) | |
511 | await redis.xadd('test_stream', {'c': 1}) | |
512 | response = await redis.xtrim('test_stream', max_len=1, exact_len=False) | |
513 | assert response >= 0 | |
514 | ||
515 | ||
516 | async def test_xlen_stream(redis): | |
517 | await redis.xadd('test_stream', {'a': 1}) | |
518 | response = await redis.xlen('test_stream') | |
519 | assert response >= 0 | |
520 | ||
521 | ||
536 | 522 | async def test_xinfo_consumers(redis): |
537 | 523 | await redis.xadd('test_stream', {'a': 1}) |
538 | 524 | await redis.xgroup_create('test_stream', 'test_group') |
550 | 536 | assert isinstance(info[0], dict) |
551 | 537 | |
552 | 538 | |
553 | @pytest.mark.run_loop | |
554 | @pytest.redis_version(999, 999, 999, reason="Streams only available on redis " | |
555 | "unstable branch") | |
556 | 539 | async def test_xinfo_stream(redis): |
557 | 540 | await redis.xadd('test_stream', {'a': 1}) |
558 | 541 | await redis.xgroup_create('test_stream', 'test_group') |
574 | 557 | assert isinstance(info, dict) |
575 | 558 | |
576 | 559 | |
577 | @pytest.mark.run_loop | |
578 | @pytest.redis_version(999, 999, 999, reason="Streams only available on redis " | |
579 | "unstable branch") | |
580 | 560 | async def test_xinfo_help(redis): |
581 | 561 | info = await redis.xinfo_help() |
582 | 562 | assert info |
563 | ||
564 | ||
565 | @pytest.mark.parametrize('param', [0.1, '1']) | |
566 | async def test_xread_param_types(redis, param): | |
567 | with pytest.raises(TypeError): | |
568 | await redis.xread( | |
569 | ["system_event_stream"], | |
570 | timeout=param, latest_ids=[0] | |
571 | ) | |
572 | ||
573 | ||
574 | def test_parse_messages_ok(): | |
575 | message = [(b'123', [b'f1', b'v1', b'f2', b'v2'])] | |
576 | assert parse_messages(message) == [(b'123', {b'f1': b'v1', b'f2': b'v2'})] | |
577 | ||
578 | ||
579 | def test_parse_messages_null_fields(): | |
580 | # Redis can sometimes respond with a fields value of 'null', | |
581 | # so ensure we handle that sensibly | |
582 | message = [(b'123', None)] | |
583 | assert parse_messages(message) == [] | |
584 | ||
585 | ||
586 | def test_parse_messages_null_message(): | |
587 | # Redis can sometimes respond with a fields value of 'null', | |
588 | # so ensure we handle that sensibly | |
589 | message = [None] | |
590 | assert parse_messages(message) == [] |
16 | 16 | return reader |
17 | 17 | |
18 | 18 | |
19 | @pytest.mark.run_loop | |
20 | 19 | async def test_feed_and_parse(reader): |
21 | 20 | reader.feed_data(b'+PONG\r\n') |
22 | 21 | assert (await reader.readobj()) == b'PONG' |
23 | 22 | |
24 | 23 | |
25 | @pytest.mark.run_loop | |
26 | 24 | async def test_buffer_available_after_RST(reader): |
27 | 25 | reader.feed_data(b'+PONG\r\n') |
28 | 26 | reader.set_exception(Exception()) |
45 | 43 | 'read_method', |
46 | 44 | ['read', 'readline', 'readuntil', 'readexactly'] |
47 | 45 | ) |
48 | @pytest.mark.run_loop | |
49 | 46 | async def test_read_flavors_not_supported(reader, read_method): |
50 | 47 | with pytest.raises(RuntimeError): |
51 | 48 | await getattr(reader, read_method)() |
1 | 1 | import pytest |
2 | 2 | |
3 | 3 | from aioredis import ReplyError |
4 | from _testutils import redis_version | |
4 | 5 | |
5 | 6 | |
6 | 7 | async def add(redis, key, value): |
8 | 9 | assert ok is True |
9 | 10 | |
10 | 11 | |
11 | @pytest.mark.run_loop | |
12 | 12 | async def test_append(redis): |
13 | 13 | len_ = await redis.append('my-key', 'Hello') |
14 | 14 | assert len_ == 5 |
24 | 24 | await redis.append('none-key', None) |
25 | 25 | |
26 | 26 | |
27 | @pytest.mark.run_loop | |
28 | 27 | async def test_bitcount(redis): |
29 | 28 | await add(redis, 'my-key', b'\x00\x10\x01') |
30 | 29 | |
55 | 54 | await redis.bitcount('my-key', 2, None) |
56 | 55 | |
57 | 56 | |
58 | @pytest.mark.run_loop | |
59 | 57 | async def test_bitop_and(redis): |
60 | 58 | key1, value1 = b'key:bitop:and:1', 5 |
61 | 59 | key2, value2 = b'key:bitop:and:2', 7 |
77 | 75 | await redis.bitop_and(destkey, key1, None) |
78 | 76 | |
79 | 77 | |
80 | @pytest.mark.run_loop | |
81 | 78 | async def test_bitop_or(redis): |
82 | 79 | key1, value1 = b'key:bitop:or:1', 5 |
83 | 80 | key2, value2 = b'key:bitop:or:2', 7 |
99 | 96 | await redis.bitop_or(destkey, key1, None) |
100 | 97 | |
101 | 98 | |
102 | @pytest.mark.run_loop | |
103 | 99 | async def test_bitop_xor(redis): |
104 | 100 | key1, value1 = b'key:bitop:xor:1', 5 |
105 | 101 | key2, value2 = b'key:bitop:xor:2', 7 |
121 | 117 | await redis.bitop_xor(destkey, key1, None) |
122 | 118 | |
123 | 119 | |
124 | @pytest.mark.run_loop | |
125 | 120 | async def test_bitop_not(redis): |
126 | 121 | key1, value1 = b'key:bitop:not:1', 5 |
127 | 122 | await add(redis, key1, value1) |
138 | 133 | await redis.bitop_not(destkey, None) |
139 | 134 | |
140 | 135 | |
141 | @pytest.redis_version(2, 8, 0, reason='BITPOS is available since redis>=2.8.0') | |
142 | @pytest.mark.run_loop | |
136 | @redis_version(2, 8, 0, reason='BITPOS is available since redis>=2.8.0') | |
143 | 137 | async def test_bitpos(redis): |
144 | 138 | key, value = b'key:bitop', b'\xff\xf0\x00' |
145 | 139 | await add(redis, key, value) |
172 | 166 | test_value = await redis.bitpos(key, 7) |
173 | 167 | |
174 | 168 | |
175 | @pytest.mark.run_loop | |
176 | 169 | async def test_decr(redis): |
177 | 170 | await redis.delete('key') |
178 | 171 | |
191 | 184 | await redis.decr(None) |
192 | 185 | |
193 | 186 | |
194 | @pytest.mark.run_loop | |
195 | 187 | async def test_decrby(redis): |
196 | 188 | await redis.delete('key') |
197 | 189 | |
214 | 206 | await redis.decrby('key', None) |
215 | 207 | |
216 | 208 | |
217 | @pytest.mark.run_loop | |
218 | 209 | async def test_get(redis): |
219 | 210 | await add(redis, 'my-key', 'value') |
220 | 211 | ret = await redis.get('my-key') |
231 | 222 | await redis.get(None) |
232 | 223 | |
233 | 224 | |
234 | @pytest.mark.run_loop | |
235 | 225 | async def test_getbit(redis): |
236 | 226 | key, value = b'key:getbit', 10 |
237 | 227 | await add(redis, key, value) |
259 | 249 | await redis.getbit(key, -7) |
260 | 250 | |
261 | 251 | |
262 | @pytest.mark.run_loop | |
263 | 252 | async def test_getrange(redis): |
264 | 253 | key, value = b'key:getrange', b'This is a string' |
265 | 254 | await add(redis, key, value) |
293 | 282 | await redis.getrange(key, 0, b'seven') |
294 | 283 | |
295 | 284 | |
296 | @pytest.mark.run_loop | |
297 | 285 | async def test_getset(redis): |
298 | 286 | key, value = b'key:getset', b'hello' |
299 | 287 | await add(redis, key, value) |
318 | 306 | await redis.getset(None, b'asyncio') |
319 | 307 | |
320 | 308 | |
321 | @pytest.mark.run_loop | |
322 | 309 | async def test_incr(redis): |
323 | 310 | await redis.delete('key') |
324 | 311 | |
337 | 324 | await redis.incr(None) |
338 | 325 | |
339 | 326 | |
340 | @pytest.mark.run_loop | |
341 | 327 | async def test_incrby(redis): |
342 | 328 | await redis.delete('key') |
343 | 329 | |
360 | 346 | await redis.incrby('key', None) |
361 | 347 | |
362 | 348 | |
363 | @pytest.mark.run_loop | |
364 | 349 | async def test_incrbyfloat(redis): |
365 | 350 | await redis.delete('key') |
366 | 351 | |
387 | 372 | await redis.incrbyfloat('key', '1.0') |
388 | 373 | |
389 | 374 | |
390 | @pytest.mark.run_loop | |
391 | 375 | async def test_mget(redis): |
392 | 376 | key1, value1 = b'foo', b'bar' |
393 | 377 | key2, value2 = b'baz', b'bzz' |
412 | 396 | await redis.mget(key1, None) |
413 | 397 | |
414 | 398 | |
415 | @pytest.mark.run_loop | |
416 | 399 | async def test_mset(redis): |
417 | 400 | key1, value1 = b'key:mset:1', b'hello' |
418 | 401 | key2, value2 = b'key:mset:2', b'world' |
432 | 415 | await redis.mset(key1, value1, key1) |
433 | 416 | |
434 | 417 | |
435 | @pytest.mark.run_loop | |
418 | async def test_mset_with_dict(redis): | |
419 | array = [str(n) for n in range(10)] | |
420 | _dict = dict.fromkeys(array, 'default value', ) | |
421 | ||
422 | await redis.mset(_dict) | |
423 | ||
424 | test_values = await redis.mget(*_dict.keys()) | |
425 | assert test_values == [str.encode(val) for val in _dict.values()] | |
426 | ||
427 | with pytest.raises(TypeError): | |
428 | await redis.mset('param', ) | |
429 | ||
430 | ||
436 | 431 | async def test_msetnx(redis): |
437 | 432 | key1, value1 = b'key:msetnx:1', b'Hello' |
438 | 433 | key2, value2 = b'key:msetnx:2', b'there' |
453 | 448 | await redis.msetnx(key1, value1, key2) |
454 | 449 | |
455 | 450 | |
456 | @pytest.mark.run_loop | |
457 | async def test_psetex(redis, loop): | |
451 | async def test_psetex(redis): | |
458 | 452 | key, value = b'key:psetex:1', b'Hello' |
459 | 453 | # test expiration in milliseconds |
460 | 454 | tr = redis.multi_exec() |
465 | 459 | test_value = await fut2 |
466 | 460 | assert test_value == value |
467 | 461 | |
468 | await asyncio.sleep(0.050, loop=loop) | |
462 | await asyncio.sleep(0.050) | |
469 | 463 | test_value = await redis.get(key) |
470 | 464 | assert test_value is None |
471 | 465 | |
475 | 469 | await redis.psetex(key, 7.5, value) |
476 | 470 | |
477 | 471 | |
478 | @pytest.mark.run_loop | |
479 | 472 | async def test_set(redis): |
480 | 473 | ok = await redis.set('my-key', 'value') |
481 | 474 | assert ok is True |
490 | 483 | await redis.set(None, 'value') |
491 | 484 | |
492 | 485 | |
493 | @pytest.mark.run_loop | |
494 | async def test_set_expire(redis, loop): | |
486 | async def test_set_expire(redis): | |
495 | 487 | key, value = b'key:set:expire', b'foo' |
496 | 488 | # test expiration in milliseconds |
497 | 489 | tr = redis.multi_exec() |
501 | 493 | await fut1 |
502 | 494 | result_1 = await fut2 |
503 | 495 | assert result_1 == value |
504 | await asyncio.sleep(0.050, loop=loop) | |
496 | await asyncio.sleep(0.050) | |
505 | 497 | result_2 = await redis.get(key) |
506 | 498 | assert result_2 is None |
507 | 499 | |
513 | 505 | await fut1 |
514 | 506 | result_3 = await fut2 |
515 | 507 | assert result_3 == value |
516 | await asyncio.sleep(1.050, loop=loop) | |
508 | await asyncio.sleep(1.050) | |
517 | 509 | result_4 = await redis.get(key) |
518 | 510 | assert result_4 is None |
519 | 511 | |
520 | 512 | |
521 | @pytest.mark.run_loop | |
522 | 513 | async def test_set_only_if_not_exists(redis): |
523 | 514 | key, value = b'key:set:only_if_not_exists', b'foo' |
524 | 515 | await redis.set( |
534 | 525 | assert result_2 == value |
535 | 526 | |
536 | 527 | |
537 | @pytest.mark.run_loop | |
538 | 528 | async def test_set_only_if_exists(redis): |
539 | 529 | key, value = b'key:set:only_if_exists', b'only_if_exists:foo' |
540 | 530 | # ensure that such key does not exits, and value not sets |
550 | 540 | assert result_2 == b'foo' |
551 | 541 | |
552 | 542 | |
553 | @pytest.mark.run_loop | |
554 | 543 | async def test_set_wrong_input(redis): |
555 | 544 | key, value = b'key:set:', b'foo' |
556 | 545 | |
562 | 551 | await redis.set(key, value, pexpire=7.8) |
563 | 552 | |
564 | 553 | |
565 | @pytest.mark.run_loop | |
566 | 554 | async def test_setbit(redis): |
567 | 555 | key = b'key:setbit' |
568 | 556 | result = await redis.setbit(key, 7, 1) |
580 | 568 | await redis.setbit(key, 1, 7) |
581 | 569 | |
582 | 570 | |
583 | @pytest.mark.run_loop | |
584 | async def test_setex(redis, loop): | |
571 | async def test_setex(redis): | |
585 | 572 | key, value = b'key:setex:1', b'Hello' |
586 | 573 | tr = redis.multi_exec() |
587 | 574 | fut1 = tr.setex(key, 1, value) |
590 | 577 | await fut1 |
591 | 578 | test_value = await fut2 |
592 | 579 | assert test_value == value |
593 | await asyncio.sleep(1.050, loop=loop) | |
580 | await asyncio.sleep(1.050) | |
594 | 581 | test_value = await redis.get(key) |
595 | 582 | assert test_value is None |
596 | 583 | |
601 | 588 | await fut1 |
602 | 589 | test_value = await fut2 |
603 | 590 | assert test_value == value |
604 | await asyncio.sleep(0.50, loop=loop) | |
591 | await asyncio.sleep(0.50) | |
605 | 592 | test_value = await redis.get(key) |
606 | 593 | assert test_value is None |
607 | 594 | |
611 | 598 | await redis.setex(key, b'one', value) |
612 | 599 | |
613 | 600 | |
614 | @pytest.mark.run_loop | |
615 | 601 | async def test_setnx(redis): |
616 | 602 | key, value = b'key:setnx:1', b'Hello' |
617 | 603 | # set fresh new value |
633 | 619 | await redis.setnx(None, value) |
634 | 620 | |
635 | 621 | |
636 | @pytest.mark.run_loop | |
637 | 622 | async def test_setrange(redis): |
638 | 623 | key, value = b'key:setrange', b'Hello World' |
639 | 624 | await add(redis, key, value) |
655 | 640 | await redis.setrange(key, -1, b'Redis') |
656 | 641 | |
657 | 642 | |
658 | @pytest.mark.run_loop | |
659 | 643 | async def test_strlen(redis): |
660 | 644 | key, value = b'key:strlen', b'asyncio' |
661 | 645 | await add(redis, key, value) |
669 | 653 | await redis.strlen(None) |
670 | 654 | |
671 | 655 | |
672 | @pytest.mark.run_loop | |
673 | 656 | async def test_cancel_hang(redis): |
674 | 657 | exists_coro = redis.execute("EXISTS", b"key:test1") |
675 | 658 | exists_coro.cancel() |
677 | 660 | assert not exists_check |
678 | 661 | |
679 | 662 | |
680 | @pytest.mark.run_loop | |
681 | async def test_set_enc(create_redis, loop, server): | |
682 | redis = await create_redis( | |
683 | server.tcp_address, loop=loop, encoding='utf-8') | |
663 | async def test_set_enc(create_redis, server): | |
664 | redis = await create_redis(server.tcp_address, encoding='utf-8') | |
684 | 665 | TEST_KEY = 'my-key' |
685 | 666 | ok = await redis.set(TEST_KEY, 'value') |
686 | 667 | assert ok is True |
2 | 2 | import asyncio |
3 | 3 | |
4 | 4 | |
5 | @pytest.mark.run_loop | |
6 | 5 | async def test_future_cancellation(create_connection, loop, server): |
7 | conn = await create_connection( | |
8 | server.tcp_address, loop=loop) | |
6 | conn = await create_connection(server.tcp_address) | |
9 | 7 | |
10 | 8 | ts = loop.time() |
11 | 9 | fut = conn.execute('BLPOP', 'some-list', 5) |
12 | 10 | with pytest.raises(asyncio.TimeoutError): |
13 | await asyncio.wait_for(fut, 1, loop=loop) | |
11 | await asyncio.wait_for(fut, 1) | |
14 | 12 | assert fut.cancelled() |
15 | 13 | |
16 | 14 | # NOTE: Connection becomes available only after timeout expires |
4 | 4 | from aioredis import ConnectionClosedError |
5 | 5 | |
6 | 6 | |
7 | @pytest.mark.run_loop | |
8 | async def test_multi_exec(redis, loop): | |
7 | async def test_multi_exec(redis): | |
9 | 8 | await redis.delete('foo', 'bar') |
10 | 9 | |
11 | 10 | tr = redis.multi_exec() |
13 | 12 | f2 = tr.incr('bar') |
14 | 13 | res = await tr.execute() |
15 | 14 | assert res == [1, 1] |
16 | res2 = await asyncio.gather(f1, f2, loop=loop) | |
15 | res2 = await asyncio.gather(f1, f2) | |
17 | 16 | assert res == res2 |
18 | 17 | |
19 | 18 | tr = redis.multi_exec() |
28 | 27 | f2 = tr.incrbyfloat('foo', 1.2) |
29 | 28 | res = await tr.execute() |
30 | 29 | assert res == [True, 2.2] |
31 | res2 = await asyncio.gather(f1, f2, loop=loop) | |
30 | res2 = await asyncio.gather(f1, f2) | |
32 | 31 | assert res == res2 |
33 | 32 | |
34 | 33 | tr = redis.multi_exec() |
39 | 38 | await f1 |
40 | 39 | |
41 | 40 | |
42 | @pytest.mark.run_loop | |
43 | 41 | async def test_empty(redis): |
44 | 42 | tr = redis.multi_exec() |
45 | 43 | res = await tr.execute() |
46 | 44 | assert res == [] |
47 | 45 | |
48 | 46 | |
49 | @pytest.mark.run_loop | |
50 | 47 | async def test_double_execute(redis): |
51 | 48 | tr = redis.multi_exec() |
52 | 49 | await tr.execute() |
56 | 53 | await tr.incr('foo') |
57 | 54 | |
58 | 55 | |
59 | @pytest.mark.run_loop | |
60 | 56 | async def test_connection_closed(redis): |
61 | 57 | tr = redis.multi_exec() |
62 | 58 | fut1 = tr.quit() |
88 | 84 | (ConnectionClosedError, ConnectionError)) |
89 | 85 | |
90 | 86 | |
91 | @pytest.mark.run_loop | |
92 | 87 | async def test_discard(redis): |
93 | 88 | await redis.delete('foo') |
94 | 89 | tr = redis.multi_exec() |
107 | 102 | assert res == 1 |
108 | 103 | |
109 | 104 | |
110 | @pytest.mark.run_loop | |
111 | 105 | async def test_exec_error(redis): |
112 | 106 | tr = redis.multi_exec() |
113 | 107 | fut = tr.connection.execute('INCRBY', 'key', '1.0') |
125 | 119 | await fut |
126 | 120 | |
127 | 121 | |
128 | @pytest.mark.run_loop | |
129 | 122 | async def test_command_errors(redis): |
130 | 123 | tr = redis.multi_exec() |
131 | 124 | fut = tr.incrby('key', 1.0) |
135 | 128 | await fut |
136 | 129 | |
137 | 130 | |
138 | @pytest.mark.run_loop | |
139 | 131 | async def test_several_command_errors(redis): |
140 | 132 | tr = redis.multi_exec() |
141 | 133 | fut1 = tr.incrby('key', 1.0) |
148 | 140 | await fut2 |
149 | 141 | |
150 | 142 | |
151 | @pytest.mark.run_loop | |
152 | 143 | async def test_error_in_connection(redis): |
153 | 144 | await redis.set('foo', 1) |
154 | 145 | tr = redis.multi_exec() |
161 | 152 | await fut2 |
162 | 153 | |
163 | 154 | |
164 | @pytest.mark.run_loop | |
165 | 155 | async def test_watch_unwatch(redis): |
166 | 156 | res = await redis.watch('key') |
167 | 157 | assert res is True |
179 | 169 | assert res is True |
180 | 170 | |
181 | 171 | |
182 | @pytest.mark.run_loop | |
183 | 172 | async def test_encoding(redis): |
184 | 173 | res = await redis.set('key', 'value') |
185 | 174 | assert res is True |
200 | 189 | assert res == {'foo': 'val1', 'bar': 'val2'} |
201 | 190 | |
202 | 191 | |
203 | @pytest.mark.run_loop | |
204 | async def test_global_encoding(redis, create_redis, server, loop): | |
205 | redis = await create_redis( | |
206 | server.tcp_address, | |
207 | loop=loop, encoding='utf-8') | |
192 | async def test_global_encoding(redis, create_redis, server): | |
193 | redis = await create_redis(server.tcp_address, encoding='utf-8') | |
208 | 194 | res = await redis.set('key', 'value') |
209 | 195 | assert res is True |
210 | 196 | res = await redis.hmset( |
214 | 200 | tr = redis.multi_exec() |
215 | 201 | fut1 = tr.get('key') |
216 | 202 | fut2 = tr.get('key', encoding='utf-8') |
217 | fut3 = tr.hgetall('hash-key', encoding='utf-8') | |
203 | fut3 = tr.get('key', encoding=None) | |
204 | fut4 = tr.hgetall('hash-key', encoding='utf-8') | |
218 | 205 | await tr.execute() |
219 | 206 | res = await fut1 |
220 | 207 | assert res == 'value' |
221 | 208 | res = await fut2 |
222 | 209 | assert res == 'value' |
223 | 210 | res = await fut3 |
211 | assert res == b'value' | |
212 | res = await fut4 | |
224 | 213 | assert res == {'foo': 'val1', 'bar': 'val2'} |
225 | 214 | |
226 | 215 | |
227 | @pytest.mark.run_loop | |
228 | async def test_transaction__watch_error(redis, create_redis, server, loop): | |
229 | other = await create_redis( | |
230 | server.tcp_address, loop=loop) | |
216 | async def test_transaction__watch_error(redis, create_redis, server): | |
217 | other = await create_redis(server.tcp_address) | |
231 | 218 | |
232 | 219 | ok = await redis.set('foo', 'bar') |
233 | 220 | assert ok is True |
249 | 236 | await fut2 |
250 | 237 | |
251 | 238 | |
252 | @pytest.mark.run_loop | |
253 | 239 | async def test_multi_exec_and_pool_release(redis): |
254 | 240 | # Test the case when pool connection is released before |
255 | 241 | # `exec` result is received. |
270 | 256 | assert (await fut1) is None |
271 | 257 | |
272 | 258 | |
273 | @pytest.mark.run_loop | |
274 | 259 | async def test_multi_exec_db_select(redis): |
275 | 260 | await redis.set('foo', 'bar') |
276 | 261 |