diff --git a/CHANGES.txt b/CHANGES.txt index 61f130e..cf031ac 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -1,5 +1,85 @@ Changes ------- + +.. towncrier release notes start + +1.3.1 (2019-12-02) +^^^^^^^^^^^^^^^^^^ +Bugfixes +~~~~~~~~ + +- Fix transaction data decoding + (see `#657 `_); +- Fix duplicate calls to ``pool.wait_closed()`` upon ``create_pool()`` exception. + (see `#671 `_); + +Deprecations and Removals +~~~~~~~~~~~~~~~~~~~~~~~~~ + +- Drop explicit loop requirement in API. + Deprecate ``loop`` argument. + Throw warning in Python 3.8+ if explicit ``loop`` is passed to methods. + (see `#666 `_); + +Misc +~~~~ + +- `#643 `_, + `#646 `_, + `#648 `_; + + +1.3.0 (2019-09-24) +^^^^^^^^^^^^^^^^^^ +Features +~~~~~~~~ + +- Added ``xdel`` and ``xtrim`` method which missed in ``commands/streams.py`` & also added unit test code for them + (see `#438 `_); +- Add ``count`` argument to ``spop`` command + (see `#485 `_); +- Add support for ``zpopmax`` and ``zpopmin`` redis commands + (see `#550 `_); +- Add ``towncrier``: change notes are now stored in ``CHANGES.txt`` + (see `#576 `_); +- Type hints for the library + (see `#584 `_); +- A few additions to the sorted set commands: + + - the blocking pop commands: ``BZPOPMAX`` and ``BZPOPMIN`` + + - the ``CH`` and ``INCR`` options of the ``ZADD`` command + + (see `#618 `_); +- Added ``no_ack`` parameter to ``xread_group`` streams method in ``commands/streams.py`` + (see `#625 `_); + +Bugfixes +~~~~~~~~ + +- Fix for sensitive logging + (see `#459 `_); +- Fix slow memory leak in ``wait_closed`` implementation + (see `#498 `_); +- Fix handling of instances were Redis returns null fields for a stream message + (see `#605 `_); + +Improved Documentation +~~~~~~~~~~~~~~~~~~~~~~ + +- Rewrite "Getting started" documentation. + (see `#641 `_); + +Misc +~~~~ + +- `#585 `_, + `#611 `_, + `#612 `_, + `#619 `_, + `#620 `_, + `#642 `_; + 1.2.0 (2018-10-24) ^^^^^^^^^^^^^^^^^^ @@ -349,7 +429,7 @@ * Fixed cancellation of wait_closed (see `#118 `_); -* Fixed ``time()`` convertion to float +* Fixed ``time()`` conversion to float (see `#126 `_); * Fixed ``hmset()`` method to return bool instead of ``b'OK'`` diff --git a/CONTRIBUTORS.txt b/CONTRIBUTORS.txt index d3a89d0..ea3e2e3 100644 --- a/CONTRIBUTORS.txt +++ b/CONTRIBUTORS.txt @@ -8,7 +8,9 @@ Alexander Shorin Aliaksei Urbanski Andrew Svetlov +Anton Salii Anton Verinov +Artem Mazur David Francos Dima Kruk @@ -16,6 +18,7 @@ Hugo Ihor Gorobets Ihor Liubymov +Ilya Samartsev James Hilliard Jan Špaček Jeff Moser @@ -25,15 +28,23 @@ Marek Szapiel Marijn Giesen Martin +Maxim Dodonchuk Michael Käufl Nickolai Novik +Oleg Butuzov +Oleksandr Tykhonruk Pau Freixes Paul Colomiets Samuel Colvin Samuel Dion-Girardeau +Sergey Miletskiy SeungHyun Hwang Taku Fukada Taras Voinarovskyi Thanos Lefteris Thomas Steinacher Volodymyr Hotsyk +Youngmin Koo +Dima Kit + +Dmitry Vasilishin diff --git a/PKG-INFO b/PKG-INFO index 252c39b..212d30c 100644 --- a/PKG-INFO +++ b/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: aioredis -Version: 1.2.0 +Version: 1.3.1 Summary: asyncio (PEP 3156) Redis support Home-page: https://github.com/aio-libs/aioredis Author: Alexey Popravka @@ -35,105 +35,39 @@ Sentinel support Yes Redis Cluster support WIP Trollius (python 2.7) No - Tested CPython versions `3.5, 3.6 3.7 `_ [2]_ - Tested PyPy3 versions `5.9.0 `_ - Tested for Redis server `2.6, 2.8, 3.0, 3.2, 4.0 `_ + Tested CPython versions `3.5.3, 3.6, 3.7 `_ [1]_ + Tested PyPy3 versions `pypy3.5-7.0 pypy3.6-7.1.1 `_ + Tested for Redis server `2.6, 2.8, 3.0, 3.2, 4.0 5.0 `_ Support for dev Redis server through low-level API ================================ ============================== - - .. [2] For Python 3.3, 3.4 support use aioredis v0.3. + .. [1] For Python 3.3, 3.4 support use aioredis v0.3. Documentation ------------- http://aioredis.readthedocs.io/ - Usage examples - -------------- - - Simple low-level interface: + Usage example + ------------- + + Simple high-level interface with connections pool: .. code:: python import asyncio import aioredis - loop = asyncio.get_event_loop() - async def go(): - conn = await aioredis.create_connection( - 'redis://localhost', loop=loop) - await conn.execute('set', 'my-key', 'value') - val = await conn.execute('get', 'my-key') - print(val) - conn.close() - await conn.wait_closed() - loop.run_until_complete(go()) - # will print 'value' - - Simple high-level interface: - - .. code:: python - - import asyncio - import aioredis - - loop = asyncio.get_event_loop() - - async def go(): - redis = await aioredis.create_redis( - 'redis://localhost', loop=loop) + redis = await aioredis.create_redis_pool( + 'redis://localhost') await redis.set('my-key', 'value') - val = await redis.get('my-key') + val = await redis.get('my-key', encoding='utf-8') print(val) redis.close() await redis.wait_closed() - loop.run_until_complete(go()) - # will print 'value' - - Connections pool: - - .. code:: python - - import asyncio - import aioredis - - loop = asyncio.get_event_loop() - - async def go(): - pool = await aioredis.create_pool( - 'redis://localhost', - minsize=5, maxsize=10, - loop=loop) - await pool.execute('set', 'my-key', 'value') - print(await pool.execute('get', 'my-key')) - # graceful shutdown - pool.close() - await pool.wait_closed() - - loop.run_until_complete(go()) - - Simple high-level interface with connections pool: - - .. code:: python - - import asyncio - import aioredis - - loop = asyncio.get_event_loop() - - async def go(): - redis = await aioredis.create_redis_pool( - 'redis://localhost', - minsize=5, maxsize=10, - loop=loop) - await redis.set('my-key', 'value') - val = await redis.get('my-key') - print(val) - redis.close() - await redis.wait_closed() - loop.run_until_complete(go()) + + asyncio.run(go()) # will print 'value' Requirements @@ -171,6 +105,86 @@ Changes ------- + + .. towncrier release notes start + + 1.3.1 (2019-12-02) + ^^^^^^^^^^^^^^^^^^ + Bugfixes + ~~~~~~~~ + + - Fix transaction data decoding + (see `#657 `_); + - Fix duplicate calls to ``pool.wait_closed()`` upon ``create_pool()`` exception. + (see `#671 `_); + + Deprecations and Removals + ~~~~~~~~~~~~~~~~~~~~~~~~~ + + - Drop explicit loop requirement in API. + Deprecate ``loop`` argument. + Throw warning in Python 3.8+ if explicit ``loop`` is passed to methods. + (see `#666 `_); + + Misc + ~~~~ + + - `#643 `_, + `#646 `_, + `#648 `_; + + + 1.3.0 (2019-09-24) + ^^^^^^^^^^^^^^^^^^ + Features + ~~~~~~~~ + + - Added ``xdel`` and ``xtrim`` method which missed in ``commands/streams.py`` & also added unit test code for them + (see `#438 `_); + - Add ``count`` argument to ``spop`` command + (see `#485 `_); + - Add support for ``zpopmax`` and ``zpopmin`` redis commands + (see `#550 `_); + - Add ``towncrier``: change notes are now stored in ``CHANGES.txt`` + (see `#576 `_); + - Type hints for the library + (see `#584 `_); + - A few additions to the sorted set commands: + + - the blocking pop commands: ``BZPOPMAX`` and ``BZPOPMIN`` + + - the ``CH`` and ``INCR`` options of the ``ZADD`` command + + (see `#618 `_); + - Added ``no_ack`` parameter to ``xread_group`` streams method in ``commands/streams.py`` + (see `#625 `_); + + Bugfixes + ~~~~~~~~ + + - Fix for sensitive logging + (see `#459 `_); + - Fix slow memory leak in ``wait_closed`` implementation + (see `#498 `_); + - Fix handling of instances were Redis returns null fields for a stream message + (see `#605 `_); + + Improved Documentation + ~~~~~~~~~~~~~~~~~~~~~~ + + - Rewrite "Getting started" documentation. + (see `#641 `_); + + Misc + ~~~~ + + - `#585 `_, + `#611 `_, + `#612 `_, + `#619 `_, + `#620 `_, + `#642 `_; + 1.2.0 (2018-10-24) ^^^^^^^^^^^^^^^^^^ @@ -520,7 +534,7 @@ * Fixed cancellation of wait_closed (see `#118 `_); - * Fixed ``time()`` convertion to float + * Fixed ``time()`` conversion to float (see `#126 `_); * Fixed ``hmset()`` method to return bool instead of ``b'OK'`` diff --git a/README.rst b/README.rst index 1cd286f..992cd2b 100644 --- a/README.rst +++ b/README.rst @@ -27,84 +27,21 @@ Sentinel support Yes Redis Cluster support WIP Trollius (python 2.7) No -Tested CPython versions `3.5, 3.6 3.7 `_ [2]_ -Tested PyPy3 versions `5.9.0 `_ -Tested for Redis server `2.6, 2.8, 3.0, 3.2, 4.0 `_ +Tested CPython versions `3.5.3, 3.6, 3.7 `_ [1]_ +Tested PyPy3 versions `pypy3.5-7.0 pypy3.6-7.1.1 `_ +Tested for Redis server `2.6, 2.8, 3.0, 3.2, 4.0 5.0 `_ Support for dev Redis server through low-level API ================================ ============================== - -.. [2] For Python 3.3, 3.4 support use aioredis v0.3. +.. [1] For Python 3.3, 3.4 support use aioredis v0.3. Documentation ------------- http://aioredis.readthedocs.io/ -Usage examples --------------- - -Simple low-level interface: - -.. code:: python - - import asyncio - import aioredis - - loop = asyncio.get_event_loop() - - async def go(): - conn = await aioredis.create_connection( - 'redis://localhost', loop=loop) - await conn.execute('set', 'my-key', 'value') - val = await conn.execute('get', 'my-key') - print(val) - conn.close() - await conn.wait_closed() - loop.run_until_complete(go()) - # will print 'value' - -Simple high-level interface: - -.. code:: python - - import asyncio - import aioredis - - loop = asyncio.get_event_loop() - - async def go(): - redis = await aioredis.create_redis( - 'redis://localhost', loop=loop) - await redis.set('my-key', 'value') - val = await redis.get('my-key') - print(val) - redis.close() - await redis.wait_closed() - loop.run_until_complete(go()) - # will print 'value' - -Connections pool: - -.. code:: python - - import asyncio - import aioredis - - loop = asyncio.get_event_loop() - - async def go(): - pool = await aioredis.create_pool( - 'redis://localhost', - minsize=5, maxsize=10, - loop=loop) - await pool.execute('set', 'my-key', 'value') - print(await pool.execute('get', 'my-key')) - # graceful shutdown - pool.close() - await pool.wait_closed() - - loop.run_until_complete(go()) +Usage example +------------- Simple high-level interface with connections pool: @@ -113,19 +50,16 @@ import asyncio import aioredis - loop = asyncio.get_event_loop() - async def go(): redis = await aioredis.create_redis_pool( - 'redis://localhost', - minsize=5, maxsize=10, - loop=loop) + 'redis://localhost') await redis.set('my-key', 'value') - val = await redis.get('my-key') + val = await redis.get('my-key', encoding='utf-8') print(val) redis.close() await redis.wait_closed() - loop.run_until_complete(go()) + + asyncio.run(go()) # will print 'value' Requirements diff --git a/aioredis/__init__.py b/aioredis/__init__.py index 521d1b2..4293b35 100644 --- a/aioredis/__init__.py +++ b/aioredis/__init__.py @@ -28,7 +28,7 @@ ) -__version__ = '1.2.0' +__version__ = '1.3.1' __all__ = [ # Factories diff --git a/aioredis/abc.py b/aioredis/abc.py index 0af13c6..5c1bed8 100644 --- a/aioredis/abc.py +++ b/aioredis/abc.py @@ -3,9 +3,6 @@ These are intended to be used for implementing custom connection managers. """ import abc -import asyncio - -from abc import ABC __all__ = [ @@ -15,7 +12,7 @@ ] -class AbcConnection(ABC): +class AbcConnection(abc.ABC): """Abstract connection interface.""" @abc.abstractmethod @@ -30,9 +27,8 @@ def close(self): """Perform connection(s) close and resources cleanup.""" - @asyncio.coroutine @abc.abstractmethod - def wait_closed(self): + async def wait_closed(self): """ Coroutine waiting until all resources are closed/released/cleaned up. """ @@ -84,20 +80,19 @@ """ @abc.abstractmethod - def get_connection(self): # TODO: arguments + def get_connection(self, command, args=()): """ Gets free connection from pool in a sync way. If no connection available — returns None. """ - @asyncio.coroutine @abc.abstractmethod - def acquire(self): # TODO: arguments + async def acquire(self, command=None, args=()): """Acquires connection from pool.""" @abc.abstractmethod - def release(self, conn): # TODO: arguments + def release(self, conn): """Releases connection to pool. :param AbcConnection conn: Owned connection to be released. @@ -109,7 +104,7 @@ """Connection address or None.""" -class AbcChannel(ABC): +class AbcChannel(abc.ABC): """Abstract Pub/Sub Channel interface.""" @property @@ -128,9 +123,8 @@ """Flag indicating that channel has unreceived messages and not marked as closed.""" - @asyncio.coroutine @abc.abstractmethod - def get(self): + async def get(self): """Wait and return new message. Will raise ``ChannelClosedError`` if channel is not active. diff --git a/aioredis/commands/__init__.py b/aioredis/commands/__init__.py index 57cb748..14cf4a1 100644 --- a/aioredis/commands/__init__.py +++ b/aioredis/commands/__init__.py @@ -119,10 +119,7 @@ return self.execute('QUIT') def select(self, db): - """Change the selected database for the current connection. - - This method wraps call to :meth:`aioredis.RedisConnection.select()` - """ + """Change the selected database.""" return self._pool_or_conn.select(db) def swapdb(self, from_index, to_index): diff --git a/aioredis/commands/generic.py b/aioredis/commands/generic.py index b0378c5..7e1406a 100644 --- a/aioredis/commands/generic.py +++ b/aioredis/commands/generic.py @@ -143,8 +143,7 @@ """Returns the kind of internal representation used in order to store the value associated with a key (OBJECT ENCODING). """ - # TODO: set default encoding to 'utf-8' - return self.execute(b'OBJECT', b'ENCODING', key) + return self.execute(b'OBJECT', b'ENCODING', key, encoding='utf-8') def object_idletime(self, key): """Returns the number of seconds since the object is not requested diff --git a/aioredis/commands/server.py b/aioredis/commands/server.py index 3655d12..24869f3 100644 --- a/aioredis/commands/server.py +++ b/aioredis/commands/server.py @@ -1,7 +1,6 @@ from collections import namedtuple from aioredis.util import wait_ok, wait_convert, wait_make_dict, _NOTSET -from aioredis.log import logger class ServerCommandsMixin: @@ -206,7 +205,7 @@ else: return self.execute(b'SHUTDOWN') - def slaveof(self, host=_NOTSET, port=None): + def slaveof(self, host, port=None): """Make the server a slave of another instance, or promote it as master. @@ -216,11 +215,6 @@ ``slaveof()`` form deprecated in favour of explicit ``slaveof(None)``. """ - if host is _NOTSET: - logger.warning("slaveof() form is deprecated!" - " Use slaveof(None) to turn redis into a MASTER.") - host = None - # TODO: drop in 0.3.0 if host is None and port is None: return self.execute(b'SLAVEOF', b'NO', b'ONE') return self.execute(b'SLAVEOF', host, port) diff --git a/aioredis/commands/set.py b/aioredis/commands/set.py index e29ebe5..6c20b97 100644 --- a/aioredis/commands/set.py +++ b/aioredis/commands/set.py @@ -43,9 +43,12 @@ """Move a member from one set to another.""" return self.execute(b'SMOVE', sourcekey, destkey, member) - def spop(self, key, *, encoding=_NOTSET): - """Remove and return a random member from a set.""" - return self.execute(b'SPOP', key, encoding=encoding) + def spop(self, key, count=None, *, encoding=_NOTSET): + """Remove and return one or multiple random members from a set.""" + args = [key] + if count is not None: + args.append(count) + return self.execute(b'SPOP', *args, encoding=encoding) def srandmember(self, key, count=None, *, encoding=_NOTSET): """Get one or multiple random members from a set.""" diff --git a/aioredis/commands/sorted_set.py b/aioredis/commands/sorted_set.py index 5ddf78d..1df2ab9 100644 --- a/aioredis/commands/sorted_set.py +++ b/aioredis/commands/sorted_set.py @@ -18,7 +18,36 @@ ZSET_IF_NOT_EXIST = 'ZSET_IF_NOT_EXIST' # NX ZSET_IF_EXIST = 'ZSET_IF_EXIST' # XX - def zadd(self, key, score, member, *pairs, exist=None): + def bzpopmax(self, key, *keys, timeout=0, encoding=_NOTSET): + """Remove and get an element with the highest score in the sorted set, + or block until one is available. + + :raises TypeError: if timeout is not int + :raises ValueError: if timeout is less than 0 + """ + if not isinstance(timeout, int): + raise TypeError("timeout argument must be int") + if timeout < 0: + raise ValueError("timeout must be greater equal 0") + args = keys + (timeout,) + return self.execute(b'BZPOPMAX', key, *args, encoding=encoding) + + def bzpopmin(self, key, *keys, timeout=0, encoding=_NOTSET): + """Remove and get an element with the lowest score in the sorted set, + or block until one is available. + + :raises TypeError: if timeout is not int + :raises ValueError: if timeout is less than 0 + """ + if not isinstance(timeout, int): + raise TypeError("timeout argument must be int") + if timeout < 0: + raise ValueError("timeout must be greater equal 0") + args = keys + (timeout,) + return self.execute(b'BZPOPMIN', key, *args, encoding=encoding) + + def zadd(self, key, score, member, *pairs, exist=None, changed=False, + incr=False): """Add one or more members to a sorted set or update its score. :raises TypeError: score not int or float @@ -38,6 +67,15 @@ args.append(b'XX') elif exist is self.ZSET_IF_NOT_EXIST: args.append(b'NX') + + if changed: + args.append(b'CH') + + if incr: + if pairs: + raise ValueError('only one score-element pair ' + 'can be specified in this mode') + args.append(b'INCR') args.extend([score, member]) if pairs: @@ -424,6 +462,38 @@ match=match, count=count)) + def zpopmin(self, key, count=None, *, encoding=_NOTSET): + """Removes and returns up to count members with the lowest scores + in the sorted set stored at key. + + :raises TypeError: if count is not int + """ + if count is not None and not isinstance(count, int): + raise TypeError("count argument must be int") + + args = [] + if count is not None: + args.extend([count]) + + fut = self.execute(b'ZPOPMIN', key, *args, encoding=encoding) + return fut + + def zpopmax(self, key, count=None, *, encoding=_NOTSET): + """Removes and returns up to count members with the highest scores + in the sorted set stored at key. + + :raises TypeError: if count is not int + """ + if count is not None and not isinstance(count, int): + raise TypeError("count argument must be int") + + args = [] + if count is not None: + args.extend([count]) + + fut = self.execute(b'ZPOPMAX', key, *args, encoding=encoding) + return fut + def _encode_min_max(flag, min, max): if flag is SortedSetCommandsMixin.ZSET_EXCLUDE_MIN: diff --git a/aioredis/commands/streams.py b/aioredis/commands/streams.py index efc2882..e3b7ebc 100644 --- a/aioredis/commands/streams.py +++ b/aioredis/commands/streams.py @@ -33,7 +33,13 @@ """ if messages is None: return [] - return [(mid, fields_to_dict(values)) for mid, values in messages] + + messages = (message for message in messages if message is not None) + return [ + (mid, fields_to_dict(values)) + for mid, values + in messages if values is not None + ] def parse_messages_by_stream(messages_by_stream): @@ -79,8 +85,7 @@ class StreamCommandsMixin: """Stream commands mixin - Streams are under development in Redis and - not currently released. + Streams are available in Redis since v5.0 """ def xadd(self, stream, fields, message_id=b'*', max_len=None, @@ -128,21 +133,26 @@ return wait_convert(fut, parse_messages_by_stream) def xread_group(self, group_name, consumer_name, streams, timeout=0, - count=None, latest_ids=None): + count=None, latest_ids=None, no_ack=False): """Perform a blocking read on the given stream as part of a consumer group :raises ValueError: if the length of streams and latest_ids do not match """ - args = self._xread(streams, timeout, count, latest_ids) + args = self._xread( + streams, timeout, count, latest_ids, no_ack + ) fut = self.execute( b'XREADGROUP', b'GROUP', group_name, consumer_name, *args ) return wait_convert(fut, parse_messages_by_stream) - def xgroup_create(self, stream, group_name, latest_id='$'): + def xgroup_create(self, stream, group_name, latest_id='$', mkstream=False): """Create a consumer group""" - fut = self.execute(b'XGROUP', b'CREATE', stream, group_name, latest_id) + args = [b'CREATE', stream, group_name, latest_id] + if mkstream: + args.append(b'MKSTREAM') + fut = self.execute(b'XGROUP', *args) return wait_ok(fut) def xgroup_setid(self, stream, group_name, latest_id='$'): @@ -201,6 +211,23 @@ """Acknowledge a message for a given consumer group""" return self.execute(b'XACK', stream, group_name, id, *ids) + def xdel(self, stream, id): + """Removes the specified entries(IDs) from a stream""" + return self.execute(b'XDEL', stream, id) + + def xtrim(self, stream, max_len, exact_len=False): + """trims the stream to a given number of items, evicting older items""" + args = [] + if exact_len: + args.extend((b'MAXLEN', max_len)) + else: + args.extend((b'MAXLEN', b'~', max_len)) + return self.execute(b'XTRIM', stream, *args) + + def xlen(self, stream): + """Returns the number of entries inside a stream""" + return self.execute(b'XLEN', stream) + def xinfo(self, stream): """Retrieve information about the given stream. @@ -229,7 +256,8 @@ fut = self.execute(b'XINFO', b'HELP') return wait_convert(fut, lambda l: b'\n'.join(l)) - def _xread(self, streams, timeout=0, count=None, latest_ids=None): + def _xread(self, streams, timeout=0, count=None, latest_ids=None, + no_ack=False): """Wraps up common functionality between ``xread()`` and ``xread_group()`` @@ -246,6 +274,13 @@ count_args = [b'COUNT', count] if count else [] if timeout is None: block_args = [] + elif not isinstance(timeout, int): + raise TypeError( + "timeout argument must be int, not {!r}".format(timeout)) else: block_args = [b'BLOCK', timeout] - return block_args + count_args + [b'STREAMS'] + streams + latest_ids + + noack_args = [b'NOACK'] if no_ack else [] + + return count_args + block_args + noack_args + [b'STREAMS'] + streams \ + + latest_ids diff --git a/aioredis/commands/string.py b/aioredis/commands/string.py index c117ecb..fe2dbc8 100644 --- a/aioredis/commands/string.py +++ b/aioredis/commands/string.py @@ -1,3 +1,5 @@ +from itertools import chain + from aioredis.util import wait_convert, wait_ok, _NOTSET @@ -136,14 +138,20 @@ """Get the values of all the given keys.""" return self.execute(b'MGET', key, *keys, encoding=encoding) - def mset(self, key, value, *pairs): - """Set multiple keys to multiple values. - - :raises TypeError: if len of pairs is not event number - """ - if len(pairs) % 2 != 0: + def mset(self, *args): + """Set multiple keys to multiple values or unpack dict to keys & values. + + :raises TypeError: if len of args is not event number + :raises TypeError: if len of args equals 1 and it is not a dict + """ + data = args + if len(args) == 1: + if not isinstance(args[0], dict): + raise TypeError("if one arg it should be a dict") + data = chain.from_iterable(args[0].items()) + elif len(args) % 2 != 0: raise TypeError("length of pairs must be even number") - fut = self.execute(b'MSET', key, value, *pairs) + fut = self.execute(b'MSET', *data) return wait_ok(fut) def msetnx(self, key, value, *pairs): diff --git a/aioredis/commands/transaction.py b/aioredis/commands/transaction.py index 26eb819..22f1717 100644 --- a/aioredis/commands/transaction.py +++ b/aioredis/commands/transaction.py @@ -11,6 +11,7 @@ from ..util import ( wait_ok, _set_exception, + get_event_loop, ) @@ -63,8 +64,7 @@ >>> await asyncio.gather(fut1, fut2) [1, 1] """ - return MultiExec(self._pool_or_conn, self.__class__, - loop=self._pool_or_conn._loop) + return MultiExec(self._pool_or_conn, self.__class__) def pipeline(self): """Returns :class:`Pipeline` object to execute bulk of commands. @@ -90,20 +90,19 @@ >>> await asyncio.gather(fut1, fut2) [2, 2] """ - return Pipeline(self._pool_or_conn, self.__class__, - loop=self._pool_or_conn._loop) + return Pipeline(self._pool_or_conn, self.__class__) class _RedisBuffer: def __init__(self, pipeline, *, loop=None): - if loop is None: - loop = asyncio.get_event_loop() + # TODO: deprecation note + # if loop is None: + # loop = asyncio.get_event_loop() self._pipeline = pipeline - self._loop = loop def execute(self, cmd, *args, **kw): - fut = self._loop.create_future() + fut = get_event_loop().create_future() self._pipeline.append((fut, cmd, args, kw)) return fut @@ -129,13 +128,13 @@ def __init__(self, pool_or_connection, commands_factory=lambda conn: conn, *, loop=None): - if loop is None: - loop = asyncio.get_event_loop() + # TODO: deprecation note + # if loop is None: + # loop = asyncio.get_event_loop() self._pool_or_conn = pool_or_connection - self._loop = loop self._pipeline = [] self._results = [] - self._buffer = _RedisBuffer(self._pipeline, loop=loop) + self._buffer = _RedisBuffer(self._pipeline) self._redis = commands_factory(self._buffer) self._done = False @@ -147,10 +146,9 @@ @functools.wraps(attr) def wrapper(*args, **kw): try: - task = asyncio.ensure_future(attr(*args, **kw), - loop=self._loop) + task = asyncio.ensure_future(attr(*args, **kw)) except Exception as exc: - task = self._loop.create_future() + task = get_event_loop().create_future() task.set_exception(exc) self._results.append(task) return task @@ -183,7 +181,6 @@ async def _do_execute(self, conn, *, return_exceptions=False): await asyncio.gather(*self._send_pipeline(conn), - loop=self._loop, return_exceptions=True) return await self._gather_result(return_exceptions) @@ -265,11 +262,11 @@ multi = conn.execute('MULTI') coros = list(self._send_pipeline(conn)) exec_ = conn.execute('EXEC') - gather = asyncio.gather(multi, *coros, loop=self._loop, + gather = asyncio.gather(multi, *coros, return_exceptions=True) last_error = None try: - await asyncio.shield(gather, loop=self._loop) + await asyncio.shield(gather) except asyncio.CancelledError: await gather except Exception as err: diff --git a/aioredis/connection.py b/aioredis/connection.py index a4ad452..b46e360 100644 --- a/aioredis/connection.py +++ b/aioredis/connection.py @@ -1,6 +1,9 @@ import types import asyncio import socket +import warnings +import sys + from functools import partial from collections import deque from contextlib import contextmanager @@ -14,6 +17,7 @@ coerced_keys_dict, decode, parse_url, + get_event_loop, ) from .parser import Reader from .stream import open_connection, open_unix_connection @@ -76,8 +80,8 @@ """ assert isinstance(address, (tuple, list, str)), "tuple or str expected" if isinstance(address, str): - logger.debug("Parsing Redis URI %r", address) address, options = parse_url(address) + logger.debug("Parsed Redis URI %r", address) db = options.setdefault('db', db) password = options.setdefault('password', password) encoding = options.setdefault('encoding', encoding) @@ -97,15 +101,16 @@ else: cls = RedisConnection - if loop is None: - loop = asyncio.get_event_loop() + if loop is not None and sys.version_info >= (3, 8, 0): + warnings.warn("The loop argument is deprecated", + DeprecationWarning) if isinstance(address, (list, tuple)): host, port = address logger.debug("Creating tcp connection to %r", address) reader, writer = await asyncio.wait_for(open_connection( - host, port, limit=MAX_CHUNK_SIZE, ssl=ssl, loop=loop), - timeout, loop=loop) + host, port, limit=MAX_CHUNK_SIZE, ssl=ssl), + timeout) sock = writer.transport.get_extra_info('socket') if sock is not None: sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) @@ -114,15 +119,14 @@ else: logger.debug("Creating unix connection to %r", address) reader, writer = await asyncio.wait_for(open_unix_connection( - address, ssl=ssl, limit=MAX_CHUNK_SIZE, loop=loop), - timeout, loop=loop) + address, ssl=ssl, limit=MAX_CHUNK_SIZE), + timeout) sock = writer.transport.get_extra_info('socket') if sock is not None: address = sock.getpeername() conn = cls(reader, writer, encoding=encoding, - address=address, parser=parser, - loop=loop) + address=address, parser=parser) try: if password is not None: @@ -141,8 +145,9 @@ def __init__(self, reader, writer, *, address, encoding=None, parser=None, loop=None): - if loop is None: - loop = asyncio.get_event_loop() + if loop is not None and sys.version_info >= (3, 8): + warnings.warn("The loop argument is deprecated", + DeprecationWarning) if parser is None: parser = Reader assert callable(parser), ( @@ -150,19 +155,17 @@ self._reader = reader self._writer = writer self._address = address - self._loop = loop self._waiters = deque() self._reader.set_parser( parser(protocolError=ProtocolError, replyError=ReplyError) ) - self._reader_task = asyncio.ensure_future(self._read_data(), - loop=self._loop) + self._reader_task = asyncio.ensure_future(self._read_data()) self._close_msg = None self._db = 0 self._closing = False self._closed = False - self._close_waiter = loop.create_future() - self._reader_task.add_done_callback(self._close_waiter.set_result) + self._close_state = asyncio.Event() + self._reader_task.add_done_callback(lambda x: self._close_state.set()) self._in_transaction = None self._transaction_error = None # XXX: never used? self._in_pubsub = 0 @@ -212,7 +215,7 @@ else: self._process_data(obj) self._closing = True - self._loop.call_soon(self._do_close, last_error) + get_event_loop().call_soon(self._do_close, last_error) def _process_data(self, obj): """Processes command results.""" @@ -336,13 +339,14 @@ cb = self._start_transaction elif command in ('EXEC', b'EXEC'): cb = partial(self._end_transaction, discard=False) + encoding = None elif command in ('DISCARD', b'DISCARD'): cb = partial(self._end_transaction, discard=True) else: cb = None if encoding is _NOTSET: encoding = self._encoding - fut = self._loop.create_future() + fut = get_event_loop().create_future() if self._pipeline_buffer is None: self._writer.write(encode_command(command, *args)) else: @@ -366,7 +370,7 @@ if not len(channels): raise TypeError("No channels/patterns supplied") is_pattern = len(command) in (10, 12) - mkchannel = partial(Channel, is_pattern=is_pattern, loop=self._loop) + mkchannel = partial(Channel, is_pattern=is_pattern) channels = [ch if isinstance(ch, AbcChannel) else mkchannel(ch) for ch in channels] if not all(ch.is_pattern == is_pattern for ch in channels): @@ -375,7 +379,7 @@ cmd = encode_command(command, *(ch.name for ch in channels)) res = [] for ch in channels: - fut = self._loop.create_future() + fut = get_event_loop().create_future() res.append(fut) cb = partial(self._update_pubsub, ch=ch) self._waiters.append((fut, None, cb)) @@ -383,7 +387,7 @@ self._writer.write(cmd) else: self._pipeline_buffer.extend(cmd) - return asyncio.gather(*res, loop=self._loop) + return asyncio.gather(*res) def close(self): """Close connection.""" @@ -426,12 +430,12 @@ closed = self._closing or self._closed if not closed and self._reader and self._reader.at_eof(): self._closing = closed = True - self._loop.call_soon(self._do_close, None) + get_event_loop().call_soon(self._do_close, None) return closed async def wait_closed(self): """Coroutine waiting until connection is closed.""" - await asyncio.shield(self._close_waiter, loop=self._loop) + await self._close_state.wait() @property def db(self): diff --git a/aioredis/errors.py b/aioredis/errors.py index b73e2e4..30cac33 100644 --- a/aioredis/errors.py +++ b/aioredis/errors.py @@ -1,3 +1,5 @@ +from typing import Optional, Sequence # noqa + __all__ = [ 'RedisError', 'ProtocolError', @@ -28,7 +30,7 @@ class ReplyError(RedisError): """Raised for redis error replies (-ERR).""" - MATCH_REPLY = None + MATCH_REPLY = None # type: Optional[Sequence[str]] def __new__(cls, msg, *args): for klass in cls.__subclasses__(): @@ -47,7 +49,17 @@ class AuthError(ReplyError): """Raised when authentication errors occurs.""" - MATCH_REPLY = ("NOAUTH ", "ERR invalid password") + MATCH_REPLY = ( + "NOAUTH ", + "ERR invalid password", + "ERR Client sent AUTH, but no password is set", + ) + + +class BusyGroupError(ReplyError): + """Raised if Consumer Group name already exists.""" + + MATCH_REPLY = "BUSYGROUP Consumer Group name already exists" class PipelineError(RedisError): diff --git a/aioredis/locks.py b/aioredis/locks.py index e057dd9..1715530 100644 --- a/aioredis/locks.py +++ b/aioredis/locks.py @@ -1,6 +1,7 @@ +import asyncio +import sys + from asyncio.locks import Lock as _Lock -from asyncio import coroutine -from asyncio import futures # Fixes an issue with all Python versions that leaves pending waiters # without being awakened when the first waiter is canceled. @@ -11,33 +12,33 @@ class Lock(_Lock): - @coroutine - def acquire(self): - """Acquire a lock. - This method blocks until the lock is unlocked, then sets it to - locked and returns True. - """ - if not self._locked and all(w.cancelled() for w in self._waiters): - self._locked = True - return True + if sys.version_info < (3, 7, 0): + async def acquire(self): + """Acquire a lock. + This method blocks until the lock is unlocked, then sets it to + locked and returns True. + """ + if not self._locked and all(w.cancelled() for w in self._waiters): + self._locked = True + return True - fut = self._loop.create_future() + fut = self._loop.create_future() - self._waiters.append(fut) - try: - yield from fut - self._locked = True - return True - except futures.CancelledError: - if not self._locked: # pragma: no cover - self._wake_up_first() - raise - finally: - self._waiters.remove(fut) + self._waiters.append(fut) + try: + await fut + self._locked = True + return True + except asyncio.CancelledError: + if not self._locked: # pragma: no cover + self._wake_up_first() + raise + finally: + self._waiters.remove(fut) - def _wake_up_first(self): - """Wake up the first waiter who isn't cancelled.""" - for fut in self._waiters: - if not fut.done(): - fut.set_result(True) - break + def _wake_up_first(self): + """Wake up the first waiter who isn't cancelled.""" + for fut in self._waiters: + if not fut.done(): + fut.set_result(True) + break diff --git a/aioredis/parser.py b/aioredis/parser.py index 85e75e6..8a68ca5 100644 --- a/aioredis/parser.py +++ b/aioredis/parser.py @@ -1,4 +1,5 @@ from .errors import ProtocolError, ReplyError +from typing import Optional, Generator, Callable, Iterator # noqa __all__ = [ 'Reader', 'PyReader', @@ -9,15 +10,16 @@ """Pure-Python Redis protocol parser that follows hiredis.Reader interface (except setmaxbuf/getmaxbuf). """ - def __init__(self, protocolError=ProtocolError, replyError=ReplyError, - encoding=None): + def __init__(self, protocolError: Callable = ProtocolError, + replyError: Callable = ReplyError, + encoding: Optional[str] = None): if not callable(protocolError): raise TypeError("Expected a callable") if not callable(replyError): raise TypeError("Expected a callable") self._parser = Parser(protocolError, replyError, encoding) - def feed(self, data, o=0, l=-1): + def feed(self, data, o: int = 0, l: int = -1): """Feed data to parser.""" if l == -1: l = len(data) - o @@ -35,41 +37,43 @@ """ return self._parser.parse_one() - def setmaxbuf(self, size): + def setmaxbuf(self, size: Optional[int]) -> None: """No-op.""" pass - def getmaxbuf(self): + def getmaxbuf(self) -> int: """No-op.""" return 0 class Parser: - def __init__(self, protocolError, replyError, encoding): - self.buf = bytearray() - self.pos = 0 - self.protocolError = protocolError - self.replyError = replyError - self.encoding = encoding + def __init__(self, protocolError: Callable, + replyError: Callable, encoding: Optional[str]): + + self.buf = bytearray() # type: bytearray + self.pos = 0 # type: int + self.protocolError = protocolError # type: Callable + self.replyError = replyError # type: Callable + self.encoding = encoding # type: Optional[str] self._err = None - self._gen = None + self._gen = None # type: Optional[Generator] - def waitsome(self, size): + def waitsome(self, size: int) -> Iterator[bool]: # keep yielding false until at least `size` bytes added to buf. while len(self.buf) < self.pos+size: yield False - def waitany(self): + def waitany(self) -> Iterator[bool]: yield from self.waitsome(len(self.buf) + 1) def readone(self): - if not self.buf[self.pos:1]: + if not self.buf[self.pos:self.pos + 1]: yield from self.waitany() - val = self.buf[self.pos:1] + val = self.buf[self.pos:self.pos + 1] self.pos += 1 return val - def readline(self, size=None): + def readline(self, size: Optional[int] = None): if size is not None: if len(self.buf) < size + 2 + self.pos: yield from self.waitsome(size + 2) @@ -96,7 +100,7 @@ self._err = self.protocolError(msg) return self._err - def parse(self, is_bulk=False): + def parse(self, is_bulk: bool = False): if self._err is not None: raise self._err ctl = yield from self.readone() diff --git a/aioredis/pool.py b/aioredis/pool.py index 4121ffd..2a14f41 100644 --- a/aioredis/pool.py +++ b/aioredis/pool.py @@ -1,10 +1,12 @@ import asyncio import collections import types +import warnings +import sys from .connection import create_connection, _PUBSUB_COMMANDS from .log import logger -from .util import parse_url +from .util import parse_url, CloseEvent from .errors import PoolClosedError from .abc import AbcPool from .locks import Lock @@ -54,7 +56,7 @@ loop=loop) try: await pool._fill_free(override_min=False) - except Exception as ex: + except Exception: pool.close() await pool.wait_closed() raise @@ -76,8 +78,9 @@ "maxsize must be int > 0", maxsize, type(maxsize)) assert minsize <= maxsize, ( "Invalid pool min/max sizes", minsize, maxsize) - if loop is None: - loop = asyncio.get_event_loop() + if loop is not None and sys.version_info >= (3, 8): + warnings.warn("The loop argument is deprecated", + DeprecationWarning) self._address = address self._db = db self._password = password @@ -86,13 +89,11 @@ self._parser_class = parser self._minsize = minsize self._create_connection_timeout = create_connection_timeout - self._loop = loop self._pool = collections.deque(maxlen=maxsize) self._used = set() self._acquiring = 0 - self._cond = asyncio.Condition(lock=Lock(loop=loop), loop=loop) - self._close_state = asyncio.Event(loop=loop) - self._close_waiter = None + self._cond = asyncio.Condition(lock=Lock()) + self._close_state = CloseEvent(self._do_close) self._pubsub_conn = None self._connection_cls = connection_cls @@ -139,10 +140,9 @@ conn = self._pool.popleft() conn.close() waiters.append(conn.wait_closed()) - await asyncio.gather(*waiters, loop=self._loop) + await asyncio.gather(*waiters) async def _do_close(self): - await self._close_state.wait() async with self._cond: assert not self._acquiring, self._acquiring waiters = [] @@ -153,7 +153,7 @@ for conn in self._used: conn.close() waiters.append(conn.wait_closed()) - await asyncio.gather(*waiters, loop=self._loop) + await asyncio.gather(*waiters) # TODO: close _pubsub_conn connection logger.debug("Closed %d connection(s)", len(waiters)) @@ -161,8 +161,6 @@ """Close all free and in-progress connections and mark pool as closed. """ if not self._close_state.is_set(): - self._close_waiter = asyncio.ensure_future(self._do_close(), - loop=self._loop) self._close_state.set() @property @@ -173,8 +171,6 @@ async def wait_closed(self): """Wait until pool gets closed.""" await self._close_state.wait() - assert self._close_waiter is not None - await asyncio.shield(self._close_waiter, loop=self._loop) @property def db(self): @@ -287,8 +283,7 @@ async with self._cond: for i in range(self.freesize): res = res and (await self._pool[i].select(db)) - else: - self._db = db + self._db = db return res async def auth(self, password): @@ -368,7 +363,7 @@ else: conn.close() # FIXME: check event loop is not closed - asyncio.ensure_future(self._wakeup(), loop=self._loop) + asyncio.ensure_future(self._wakeup()) def _drop_closed(self): for i in range(self.freesize): @@ -416,7 +411,7 @@ parser=self._parser_class, timeout=self._create_connection_timeout, connection_cls=self._connection_cls, - loop=self._loop) + ) async def _wakeup(self, closing_conn=None): async with self._cond: diff --git a/aioredis/pubsub.py b/aioredis/pubsub.py index a33997e..77f09e1 100644 --- a/aioredis/pubsub.py +++ b/aioredis/pubsub.py @@ -2,6 +2,8 @@ import json import types import collections +import warnings +import sys from .abc import AbcChannel from .util import _converters # , _set_result @@ -23,7 +25,10 @@ """Wrapper around asyncio.Queue.""" def __init__(self, name, is_pattern, loop=None): - self._queue = ClosableQueue(loop=loop) + if loop is not None and sys.version_info >= (3, 8): + warnings.warn("The loop argument is deprecated", + DeprecationWarning) + self._queue = ClosableQueue() self._name = _converters[type(name)](name) self._is_pattern = is_pattern @@ -165,7 +170,7 @@ >>> from aioredis.pubsub import Receiver >>> from aioredis.abc import AbcChannel - >>> mpsc = Receiver(loop=loop) + >>> mpsc = Receiver() >>> async def reader(mpsc): ... async for channel, msg in mpsc.iter(): ... assert isinstance(channel, AbcChannel) @@ -188,11 +193,12 @@ def __init__(self, loop=None, on_close=None): assert on_close is None or callable(on_close), ( "on_close must be None or callable", on_close) - if loop is None: - loop = asyncio.get_event_loop() + if loop is not None: + warnings.warn("The loop argument is deprecated", + DeprecationWarning) if on_close is None: on_close = self.check_stop - self._queue = ClosableQueue(loop=loop) + self._queue = ClosableQueue() self._refs = {} self._on_close = on_close @@ -396,9 +402,9 @@ class ClosableQueue: - def __init__(self, *, loop=None): + def __init__(self): self._queue = collections.deque() - self._event = asyncio.Event(loop=loop) + self._event = asyncio.Event() self._closed = False async def wait(self): diff --git a/aioredis/sentinel/pool.py b/aioredis/sentinel/pool.py index 227c304..08c4c9d 100644 --- a/aioredis/sentinel/pool.py +++ b/aioredis/sentinel/pool.py @@ -15,6 +15,7 @@ MasterReplyError, SlaveReplyError, ) +from ..util import CloseEvent # Address marker for discovery @@ -29,8 +30,9 @@ """Create SentinelPool.""" # FIXME: revise default timeout value assert isinstance(sentinels, (list, tuple)), sentinels - if loop is None: - loop = asyncio.get_event_loop() + # TODO: deprecation note + # if loop is None: + # loop = asyncio.get_event_loop() pool = SentinelPool(sentinels, db=db, password=password, @@ -55,15 +57,15 @@ def __init__(self, sentinels, *, db=None, password=None, ssl=None, encoding=None, parser=None, minsize, maxsize, timeout, loop=None): - if loop is None: - loop = asyncio.get_event_loop() + # TODO: deprecation note + # if loop is None: + # loop = asyncio.get_event_loop() # TODO: add connection/discover timeouts; # and what to do if no master is found: # (raise error or try forever or try until timeout) # XXX: _sentinels is unordered self._sentinels = set(sentinels) - self._loop = loop self._timeout = timeout self._pools = [] # list of sentinel pools self._masters = {} @@ -75,14 +77,14 @@ self._redis_encoding = encoding self._redis_minsize = minsize self._redis_maxsize = maxsize - self._close_state = asyncio.Event(loop=loop) + self._close_state = CloseEvent(self._do_close) self._close_waiter = None - self._monitor = monitor = Receiver(loop=loop) + self._monitor = monitor = Receiver() async def echo_events(): try: while await monitor.wait_message(): - ch, (ev, data) = await monitor.get(encoding='utf-8') + _, (ev, data) = await monitor.get(encoding='utf-8') ev = ev.decode('utf-8') _logger.debug("%s: %s", ev, data) if ev in ('+odown',): @@ -102,7 +104,7 @@ # etc... except asyncio.CancelledError: pass - self._monitor_task = asyncio.ensure_future(echo_events(), loop=loop) + self._monitor_task = asyncio.ensure_future(echo_events()) @property def discover_timeout(self): @@ -124,7 +126,7 @@ maxsize=self._redis_maxsize, ssl=self._redis_ssl, parser=self._parser_class, - loop=self._loop) + ) return self._masters[service] def slave_for(self, service): @@ -140,7 +142,7 @@ maxsize=self._redis_maxsize, ssl=self._redis_ssl, parser=self._parser_class, - loop=self._loop) + ) return self._slaves[service] def execute(self, command, *args, **kwargs): @@ -162,12 +164,9 @@ def close(self): """Close all controlled connections (both sentinel and redis).""" if not self._close_state.is_set(): - self._close_waiter = asyncio.ensure_future(self._do_close(), - loop=self._loop) self._close_state.set() async def _do_close(self): - await self._close_state.wait() # TODO: lock tasks = [] task, self._monitor_task = self._monitor_task, None @@ -185,13 +184,11 @@ _, pool = self._slaves.popitem() pool.close() tasks.append(pool.wait_closed()) - await asyncio.gather(*tasks, loop=self._loop) + await asyncio.gather(*tasks) async def wait_closed(self): """Wait until pool gets closed.""" await self._close_state.wait() - assert self._close_waiter is not None - await asyncio.shield(self._close_waiter, loop=self._loop) async def discover(self, timeout=None): # TODO: better name? """Discover sentinels and all monitored services within given timeout. @@ -210,7 +207,7 @@ pools = [] for addr in self._sentinels: # iterate over unordered set tasks.append(self._connect_sentinel(addr, timeout, pools)) - done, pending = await asyncio.wait(tasks, loop=self._loop, + done, pending = await asyncio.wait(tasks, return_when=ALL_COMPLETED) assert not pending, ("Expected all tasks to complete", done, pending) @@ -236,11 +233,11 @@ connections pool or exception. """ try: - with async_timeout(timeout, loop=self._loop): + with async_timeout(timeout): pool = await create_pool( address, minsize=1, maxsize=2, parser=self._parser_class, - loop=self._loop) + ) pools.append(pool) return pool except asyncio.TimeoutError as err: @@ -268,12 +265,12 @@ pools = self._pools[:] for sentinel in pools: try: - with async_timeout(timeout, loop=self._loop): + with async_timeout(timeout): address = await self._get_masters_address( sentinel, service) pool = self._masters[service] - with async_timeout(timeout, loop=self._loop), \ + with async_timeout(timeout), \ contextlib.ExitStack() as stack: conn = await pool._create_new_connection(address) stack.callback(conn.close) @@ -291,16 +288,16 @@ except DiscoverError as err: sentinel_logger.debug("DiscoverError(%r, %s): %r", sentinel, service, err) - await asyncio.sleep(idle_timeout, loop=self._loop) + await asyncio.sleep(idle_timeout) continue except RedisError as err: raise MasterReplyError("Service {} error".format(service), err) except Exception: # TODO: clear (drop) connections to schedule reconnect - await asyncio.sleep(idle_timeout, loop=self._loop) - continue - else: - raise MasterNotFoundError("No master found for {}".format(service)) + await asyncio.sleep(idle_timeout) + continue + # Otherwise + raise MasterNotFoundError("No master found for {}".format(service)) async def discover_slave(self, service, timeout, **kwargs): """Perform Slave discovery for specified service.""" @@ -310,11 +307,11 @@ pools = self._pools[:] for sentinel in pools: try: - with async_timeout(timeout, loop=self._loop): + with async_timeout(timeout): address = await self._get_slave_address( sentinel, service) # add **kwargs pool = self._slaves[service] - with async_timeout(timeout, loop=self._loop), \ + with async_timeout(timeout), \ contextlib.ExitStack() as stack: conn = await pool._create_new_connection(address) stack.callback(conn.close) @@ -326,12 +323,12 @@ except asyncio.TimeoutError: continue except DiscoverError: - await asyncio.sleep(idle_timeout, loop=self._loop) + await asyncio.sleep(idle_timeout) continue except RedisError as err: raise SlaveReplyError("Service {} error".format(service), err) except Exception: - await asyncio.sleep(idle_timeout, loop=self._loop) + await asyncio.sleep(idle_timeout) continue raise SlaveNotFoundError("No slave found for {}".format(service)) @@ -362,8 +359,7 @@ if {'s_down', 'o_down', 'disconnected'} & flags: continue return address - else: - raise BadState(state) # XXX: only last state + raise BadState() # XXX: only last state async def _verify_service_role(self, conn, role): res = await conn.execute(b'role', encoding='utf-8') diff --git a/aioredis/stream.py b/aioredis/stream.py index 5d62e43..743e9e5 100644 --- a/aioredis/stream.py +++ b/aioredis/stream.py @@ -1,4 +1,8 @@ import asyncio +import warnings +import sys + +from .util import get_event_loop __all__ = [ 'open_connection', @@ -11,13 +15,15 @@ limit, loop=None, parser=None, **kwds): # XXX: parser is not used (yet) - if loop is None: - loop = asyncio.get_event_loop() - reader = StreamReader(limit=limit, loop=loop) - protocol = asyncio.StreamReaderProtocol(reader, loop=loop) - transport, _ = await loop.create_connection( + if loop is not None and sys.version_info >= (3, 8): + warnings.warn("The loop argument is deprecated", + DeprecationWarning) + reader = StreamReader(limit=limit) + protocol = asyncio.StreamReaderProtocol(reader) + transport, _ = await get_event_loop().create_connection( lambda: protocol, host, port, **kwds) - writer = asyncio.StreamWriter(transport, protocol, reader, loop) + writer = asyncio.StreamWriter(transport, protocol, reader, + loop=get_event_loop()) return reader, writer @@ -25,13 +31,15 @@ limit, loop=None, parser=None, **kwds): # XXX: parser is not used (yet) - if loop is None: - loop = asyncio.get_event_loop() - reader = StreamReader(limit=limit, loop=loop) - protocol = asyncio.StreamReaderProtocol(reader, loop=loop) - transport, _ = await loop.create_unix_connection( + if loop is not None and sys.version_info >= (3, 8): + warnings.warn("The loop argument is deprecated", + DeprecationWarning) + reader = StreamReader(limit=limit) + protocol = asyncio.StreamReaderProtocol(reader) + transport, _ = await get_event_loop().create_unix_connection( lambda: protocol, address, **kwds) - writer = asyncio.StreamWriter(transport, protocol, reader, loop) + writer = asyncio.StreamWriter(transport, protocol, reader, + loop=get_event_loop()) return reader, writer diff --git a/aioredis/util.py b/aioredis/util.py index 980de70..73f7e98 100644 --- a/aioredis/util.py +++ b/aioredis/util.py @@ -1,9 +1,13 @@ +import asyncio +import sys + from urllib.parse import urlparse, parse_qsl from .log import logger _NOTSET = object() +IS_PY38 = sys.version_info >= (3, 8) # NOTE: never put here anything else; # just this basic types @@ -207,3 +211,32 @@ if 'timeout' in params: options['timeout'] = float(params['timeout']) return options + + +class CloseEvent: + def __init__(self, on_close): + self._close_init = asyncio.Event() + self._close_done = asyncio.Event() + self._on_close = on_close + + async def wait(self): + await self._close_init.wait() + await self._close_done.wait() + + def is_set(self): + return self._close_done.is_set() or self._close_init.is_set() + + def set(self): + if self._close_init.is_set(): + return + + task = asyncio.ensure_future(self._on_close()) + task.add_done_callback(self._cleanup) + self._close_init.set() + + def _cleanup(self, task): + self._on_close = None + self._close_done.set() + + +get_event_loop = getattr(asyncio, 'get_running_loop', asyncio.get_event_loop) diff --git a/aioredis.egg-info/PKG-INFO b/aioredis.egg-info/PKG-INFO index 252c39b..212d30c 100644 --- a/aioredis.egg-info/PKG-INFO +++ b/aioredis.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: aioredis -Version: 1.2.0 +Version: 1.3.1 Summary: asyncio (PEP 3156) Redis support Home-page: https://github.com/aio-libs/aioredis Author: Alexey Popravka @@ -35,105 +35,39 @@ Sentinel support Yes Redis Cluster support WIP Trollius (python 2.7) No - Tested CPython versions `3.5, 3.6 3.7 `_ [2]_ - Tested PyPy3 versions `5.9.0 `_ - Tested for Redis server `2.6, 2.8, 3.0, 3.2, 4.0 `_ + Tested CPython versions `3.5.3, 3.6, 3.7 `_ [1]_ + Tested PyPy3 versions `pypy3.5-7.0 pypy3.6-7.1.1 `_ + Tested for Redis server `2.6, 2.8, 3.0, 3.2, 4.0 5.0 `_ Support for dev Redis server through low-level API ================================ ============================== - - .. [2] For Python 3.3, 3.4 support use aioredis v0.3. + .. [1] For Python 3.3, 3.4 support use aioredis v0.3. Documentation ------------- http://aioredis.readthedocs.io/ - Usage examples - -------------- - - Simple low-level interface: + Usage example + ------------- + + Simple high-level interface with connections pool: .. code:: python import asyncio import aioredis - loop = asyncio.get_event_loop() - async def go(): - conn = await aioredis.create_connection( - 'redis://localhost', loop=loop) - await conn.execute('set', 'my-key', 'value') - val = await conn.execute('get', 'my-key') - print(val) - conn.close() - await conn.wait_closed() - loop.run_until_complete(go()) - # will print 'value' - - Simple high-level interface: - - .. code:: python - - import asyncio - import aioredis - - loop = asyncio.get_event_loop() - - async def go(): - redis = await aioredis.create_redis( - 'redis://localhost', loop=loop) + redis = await aioredis.create_redis_pool( + 'redis://localhost') await redis.set('my-key', 'value') - val = await redis.get('my-key') + val = await redis.get('my-key', encoding='utf-8') print(val) redis.close() await redis.wait_closed() - loop.run_until_complete(go()) - # will print 'value' - - Connections pool: - - .. code:: python - - import asyncio - import aioredis - - loop = asyncio.get_event_loop() - - async def go(): - pool = await aioredis.create_pool( - 'redis://localhost', - minsize=5, maxsize=10, - loop=loop) - await pool.execute('set', 'my-key', 'value') - print(await pool.execute('get', 'my-key')) - # graceful shutdown - pool.close() - await pool.wait_closed() - - loop.run_until_complete(go()) - - Simple high-level interface with connections pool: - - .. code:: python - - import asyncio - import aioredis - - loop = asyncio.get_event_loop() - - async def go(): - redis = await aioredis.create_redis_pool( - 'redis://localhost', - minsize=5, maxsize=10, - loop=loop) - await redis.set('my-key', 'value') - val = await redis.get('my-key') - print(val) - redis.close() - await redis.wait_closed() - loop.run_until_complete(go()) + + asyncio.run(go()) # will print 'value' Requirements @@ -171,6 +105,86 @@ Changes ------- + + .. towncrier release notes start + + 1.3.1 (2019-12-02) + ^^^^^^^^^^^^^^^^^^ + Bugfixes + ~~~~~~~~ + + - Fix transaction data decoding + (see `#657 `_); + - Fix duplicate calls to ``pool.wait_closed()`` upon ``create_pool()`` exception. + (see `#671 `_); + + Deprecations and Removals + ~~~~~~~~~~~~~~~~~~~~~~~~~ + + - Drop explicit loop requirement in API. + Deprecate ``loop`` argument. + Throw warning in Python 3.8+ if explicit ``loop`` is passed to methods. + (see `#666 `_); + + Misc + ~~~~ + + - `#643 `_, + `#646 `_, + `#648 `_; + + + 1.3.0 (2019-09-24) + ^^^^^^^^^^^^^^^^^^ + Features + ~~~~~~~~ + + - Added ``xdel`` and ``xtrim`` method which missed in ``commands/streams.py`` & also added unit test code for them + (see `#438 `_); + - Add ``count`` argument to ``spop`` command + (see `#485 `_); + - Add support for ``zpopmax`` and ``zpopmin`` redis commands + (see `#550 `_); + - Add ``towncrier``: change notes are now stored in ``CHANGES.txt`` + (see `#576 `_); + - Type hints for the library + (see `#584 `_); + - A few additions to the sorted set commands: + + - the blocking pop commands: ``BZPOPMAX`` and ``BZPOPMIN`` + + - the ``CH`` and ``INCR`` options of the ``ZADD`` command + + (see `#618 `_); + - Added ``no_ack`` parameter to ``xread_group`` streams method in ``commands/streams.py`` + (see `#625 `_); + + Bugfixes + ~~~~~~~~ + + - Fix for sensitive logging + (see `#459 `_); + - Fix slow memory leak in ``wait_closed`` implementation + (see `#498 `_); + - Fix handling of instances were Redis returns null fields for a stream message + (see `#605 `_); + + Improved Documentation + ~~~~~~~~~~~~~~~~~~~~~~ + + - Rewrite "Getting started" documentation. + (see `#641 `_); + + Misc + ~~~~ + + - `#585 `_, + `#611 `_, + `#612 `_, + `#619 `_, + `#620 `_, + `#642 `_; + 1.2.0 (2018-10-24) ^^^^^^^^^^^^^^^^^^ @@ -520,7 +534,7 @@ * Fixed cancellation of wait_closed (see `#118 `_); - * Fixed ``time()`` convertion to float + * Fixed ``time()`` conversion to float (see `#126 `_); * Fixed ``hmset()`` method to return bool instead of ``b'OK'`` diff --git a/aioredis.egg-info/SOURCES.txt b/aioredis.egg-info/SOURCES.txt index 1c5305c..2bdebcf 100644 --- a/aioredis.egg-info/SOURCES.txt +++ b/aioredis.egg-info/SOURCES.txt @@ -54,10 +54,8 @@ docs/_build/man/aioredis.1 examples/commands.py examples/connection.py -examples/iscan.py examples/pipeline.py examples/pool.py -examples/pool2.py examples/pool_pubsub.py examples/pubsub.py examples/pubsub2.py @@ -65,6 +63,14 @@ examples/sentinel.py examples/transaction.py examples/transaction2.py +examples/getting_started/00_connect.py +examples/getting_started/01_decoding.py +examples/getting_started/02_decoding.py +examples/getting_started/03_multiexec.py +examples/getting_started/04_pubsub.py +examples/getting_started/05_pubsub.py +examples/getting_started/06_sentinel.py +tests/_testutils.py tests/coerced_keys_dict_test.py tests/conftest.py tests/connection_commands_test.py diff --git a/docs/_build/man/aioredis.1 b/docs/_build/man/aioredis.1 index 7c1591f..b09b4e3 100644 --- a/docs/_build/man/aioredis.1 +++ b/docs/_build/man/aioredis.1 @@ -1,6 +1,6 @@ .\" Man page generated from reStructuredText. . -.TH "AIOREDIS" "1" "Oct 24, 2018" "1.2" "aioredis" +.TH "AIOREDIS" "1" "Dec 02, 2019" "1.3" "aioredis" .SH NAME aioredis \- aioredis Documentation . @@ -79,7 +79,7 @@ T{ Sentinel support T} T{ -Yes [1] +Yes T} _ T{ @@ -97,19 +97,19 @@ T{ Tested CPython versions T} T{ -\fI\%3.5, 3.6\fP [2] +\fI\%3.5.3, 3.6, 3.7\fP [1] T} _ T{ Tested PyPy3 versions T} T{ -\fI\%5.9.0\fP +\fI\%pypy3.5\-7.0 pypy3.6\-7.1.1\fP T} _ T{ Tested for Redis server T} T{ -\fI\%2.6, 2.8, 3.0, 3.2, 4.0\fP +\fI\%2.6, 2.8, 3.0, 3.2, 4.0 5.0\fP T} _ T{ @@ -120,9 +120,6 @@ _ .TE .IP [1] 5 -Sentinel support is available in master branch. -This feature is not yet stable and may have some issues. -.IP [2] 5 For Python 3.3, 3.4 support use aioredis v0.3. .SH INSTALLATION .sp @@ -151,6 +148,10 @@ .INDENT 0.0 .IP \(bu 2 Issue Tracker: \fI\%https://github.com/aio\-libs/aioredis/issues\fP +.IP \(bu 2 +Google Group: \fI\%https://groups.google.com/forum/#!forum/aio\-libs\fP +.IP \(bu 2 +Gitter: \fI\%https://gitter.im/aio\-libs/Lobby\fP .IP \(bu 2 Source Code: \fI\%https://github.com/aio\-libs/aioredis\fP .IP \(bu 2 @@ -170,112 +171,294 @@ .ce 0 .sp .SH GETTING STARTED -.SS Commands Pipelining -.sp -Commands pipelining is built\-in. -.sp -Every command is sent to transport at\-once -(ofcourse if no \fBTypeError\fP/\fBValueError\fP was raised) -.sp -When you making a call with \fBawait\fP / \fByield from\fP you will be waiting result, -and then gather results. -.sp -Simple example show both cases (\fBget source code\fP): -.INDENT 0.0 -.INDENT 3.5 -.sp -.nf -.ft C -# No pipelining; -async def wait_each_command(): - val = await redis.get(\(aqfoo\(aq) # wait until \(gaval\(ga is available - cnt = await redis.incr(\(aqbar\(aq) # wait until \(gacnt\(ga is available - return val, cnt - -# Sending multiple commands and then gathering results -async def pipelined(): - fut1 = redis.get(\(aqfoo\(aq) # issue command and return future - fut2 = redis.incr(\(aqbar\(aq) # issue command and return future - # block until results are available - val, cnt = await asyncio.gather(fut1, fut2) - return val, cnt - +.SS Installation +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +$ pip install aioredis +.ft P +.fi +.UNINDENT +.UNINDENT +.sp +This will install aioredis along with its dependencies: +.INDENT 0.0 +.IP \(bu 2 +hiredis protocol parser; +.IP \(bu 2 +async\-timeout \-\-\- used in Sentinel client. +.UNINDENT +.SS Without dependencies +.sp +In some cases [1] you might need to install \fBaioredis\fP without \fBhiredis\fP, +it is achievable with the following command: +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +$ pip install \-\-no\-deps aioredis async\-timeout +.ft P +.fi +.UNINDENT +.UNINDENT +.SS Installing latest version from Git +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +$ pip install git+https://github.com/aio\-libs/aioredis@master#egg=aioredis +.ft P +.fi +.UNINDENT +.UNINDENT +.SS Connecting +.sp +\fBget source code\fP +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +import asyncio +import aioredis + + +async def main(): + redis = await aioredis.create_redis_pool(\(aqredis://localhost\(aq) + await redis.set(\(aqmy\-key\(aq, \(aqvalue\(aq) + value = await redis.get(\(aqmy\-key\(aq, encoding=\(aqutf\-8\(aq) + print(value) + + redis.close() + await redis.wait_closed() + +asyncio.run(main()) + +.ft P +.fi +.UNINDENT +.UNINDENT +.sp +\fBaioredis.create_redis_pool()\fP creates a Redis client backed by a pool of +connections. The only required argument is the address of Redis server. +Redis server address can be either host and port tuple +(ex: \fB(\(aqlocalhost\(aq, 6379)\fP), or a string which will be parsed into +TCP or UNIX socket address (ex: \fB\(aqunix://var/run/redis.sock\(aq\fP, +\fB\(aq//var/run/redis.sock\(aq\fP, \fBredis://redis\-host\-or\-ip:6379/1\fP). +.sp +Closing the client. Calling \fBredis.close()\fP and then \fBredis.wait_closed()\fP +is strongly encouraged as this will methods will shutdown all open connections +and cleanup resources. +.sp +See the commands reference for the full list of supported commands. +.SS Connecting to specific DB +.sp +There are several ways you can specify database index to select on connection: +.INDENT 0.0 +.IP 1. 3 +explicitly pass db index as \fBdb\fP argument: +.INDENT 3.0 +.INDENT 3.5 +.sp +.nf +.ft C +redis = await aioredis.create_redis_pool( + \(aqredis://localhost\(aq, db=1) +.ft P +.fi +.UNINDENT +.UNINDENT +.IP 2. 3 +pass db index in URI as path component: +.INDENT 3.0 +.INDENT 3.5 +.sp +.nf +.ft C +redis = await aioredis.create_redis_pool( + \(aqredis://localhost/2\(aq) .ft P .fi .UNINDENT .UNINDENT .sp \fBNOTE:\fP -.INDENT 0.0 -.INDENT 3.5 -For convenience \fBaioredis\fP provides -\fBpipeline()\fP -method allowing to execute bulk of commands as one -(\fBget source code\fP): -.INDENT 0.0 -.INDENT 3.5 -.INDENT 0.0 -.INDENT 3.5 -.sp -.nf -.ft C -# Explicit pipeline -async def explicit_pipeline(): - pipe = redis.pipeline() - fut1 = pipe.get(\(aqfoo\(aq) - fut2 = pipe.incr(\(aqbar\(aq) - result = await pipe.execute() - val, cnt = await asyncio.gather(fut1, fut2) - assert result == [val, cnt] - return val, cnt - -.ft P -.fi -.UNINDENT -.UNINDENT -.UNINDENT -.UNINDENT +.INDENT 3.0 +.INDENT 3.5 +DB index specified in URI will take precedence over +\fBdb\fP keyword argument. +.UNINDENT +.UNINDENT +.IP 3. 3 +call \fBselect()\fP method: +.INDENT 3.0 +.INDENT 3.5 +.sp +.nf +.ft C +redis = await aioredis.create_redis_pool( + \(aqredis://localhost/\(aq) +await redis.select(3) +.ft P +.fi +.UNINDENT +.UNINDENT +.UNINDENT +.SS Connecting to password\-protected Redis instance +.sp +The password can be specified either in keyword argument or in address URI: +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +redis = await aioredis.create_redis_pool( + \(aqredis://localhost\(aq, password=\(aqsEcRet\(aq) + +redis = await aioredis.create_redis_pool( + \(aqredis://:sEcRet@localhost/\(aq) + +redis = await aioredis.create_redis_pool( + \(aqredis://localhost/?password=sEcRet\(aq) +.ft P +.fi +.UNINDENT +.UNINDENT +.sp +\fBNOTE:\fP +.INDENT 0.0 +.INDENT 3.5 +Password specified in URI will take precedence over password keyword. +.sp +Also specifying both password as authentication component and +query parameter in URI is forbidden. +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +# This will cause assertion error +await aioredis.create_redis_pool( + \(aqredis://:sEcRet@localhost/?password=SeCreT\(aq) +.ft P +.fi +.UNINDENT +.UNINDENT +.UNINDENT +.UNINDENT +.SS Result messages decoding +.sp +By default \fBaioredis\fP will return \fI\%bytes\fP for most Redis +commands that return string replies. Redis error replies are known to be +valid UTF\-8 strings so error messages are decoded automatically. +.sp +If you know that data in Redis is valid string you can tell \fBaioredis\fP +to decode result by passing keyword\-only argument \fBencoding\fP +in a command call: +.sp +\fBget source code\fP +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +import asyncio +import aioredis + + +async def main(): + redis = await aioredis.create_redis_pool(\(aqredis://localhost\(aq) + await redis.set(\(aqkey\(aq, \(aqstring\-value\(aq) + bin_value = await redis.get(\(aqkey\(aq) + assert bin_value == b\(aqstring\-value\(aq + + str_value = await redis.get(\(aqkey\(aq, encoding=\(aqutf\-8\(aq) + assert str_value == \(aqstring\-value\(aq + + redis.close() + await redis.wait_closed() + +asyncio.run(main()) + +.ft P +.fi +.UNINDENT +.UNINDENT +.sp +\fBaioredis\fP can decode messages for all Redis data types like +lists, hashes, sorted sets, etc: +.sp +\fBget source code\fP +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +import asyncio +import aioredis + + +async def main(): + redis = await aioredis.create_redis_pool(\(aqredis://localhost\(aq) + + await redis.hmset_dict(\(aqhash\(aq, + key1=\(aqvalue1\(aq, + key2=\(aqvalue2\(aq, + key3=123) + + result = await redis.hgetall(\(aqhash\(aq, encoding=\(aqutf\-8\(aq) + assert result == { + \(aqkey1\(aq: \(aqvalue1\(aq, + \(aqkey2\(aq: \(aqvalue2\(aq, + \(aqkey3\(aq: \(aq123\(aq, # note that Redis returns int as string + } + + redis.close() + await redis.wait_closed() + +asyncio.run(main()) + +.ft P +.fi .UNINDENT .UNINDENT .SS Multi/Exec transactions .sp -\fBaioredis\fP provides several ways for executing transactions: -.INDENT 0.0 -.IP \(bu 2 -when using raw connection you can issue \fBMulti\fP/\fBExec\fP commands -manually; -.IP \(bu 2 -when using \fBaioredis.Redis\fP instance you can use -\fBmulti_exec()\fP transaction pipeline. +\fBget source code\fP +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +import asyncio +import aioredis + + +async def main(): + redis = await aioredis.create_redis_pool(\(aqredis://localhost\(aq) + + tr = redis.multi_exec() + tr.set(\(aqkey1\(aq, \(aqvalue1\(aq) + tr.set(\(aqkey2\(aq, \(aqvalue2\(aq) + ok1, ok2 = await tr.execute() + assert ok1 + assert ok2 + +asyncio.run(main()) + +.ft P +.fi +.UNINDENT .UNINDENT .sp \fBmulti_exec()\fP method creates and returns new \fBMultiExec\fP object which is used for buffering commands and then executing them inside MULTI/EXEC block. .sp -Here is a simple example -(\fBget source code\fP): -.INDENT 0.0 -.INDENT 3.5 -.sp -.nf -.ft C -async def transaction(): - tr = redis.multi_exec() - future1 = tr.set(\(aqfoo\(aq, \(aq123\(aq) - future2 = tr.set(\(aqbar\(aq, \(aq321\(aq) - result = await tr.execute() - assert result == await asyncio.gather(future1, future2) - return result - -.ft P -.fi -.UNINDENT -.UNINDENT -.sp -As you can notice \fBawait\fP is \fBonly\fP used at line 5 with \fBtr.execute\fP -and \fBnot with\fP \fBtr.set(...)\fP calls. -.sp \fBWARNING:\fP .INDENT 0.0 .INDENT 3.5 @@ -300,186 +483,129 @@ .sp \fBaioredis\fP provides support for Redis Publish/Subscribe messaging. .sp -To switch connection to subscribe mode you must execute \fBsubscribe\fP command -by yield\(aqing from \fBsubscribe()\fP it returns a list of -\fBChannel\fP objects representing subscribed channels. -.sp -As soon as connection is switched to subscribed mode the channel will receive -and store messages +To start listening for messages you must call either +\fBsubscribe()\fP or +\fBpsubscribe()\fP method. +Both methods return list of \fBChannel\fP objects representing +subscribed channels. +.sp +Right after that the channel will receive and store messages (the \fBChannel\fP object is basically a wrapper around \fI\%asyncio.Queue\fP). To read messages from channel you need to use \fBget()\fP or \fBget_json()\fP coroutines. .sp -\fBNOTE:\fP -.INDENT 0.0 -.INDENT 3.5 -In Pub/Sub mode redis connection can only receive messages or issue -(P)SUBSCRIBE / (P)UNSUBSCRIBE commands. -.UNINDENT -.UNINDENT -.sp -Pub/Sub example (\fBget source code\fP): -.INDENT 0.0 -.INDENT 3.5 -.sp -.nf -.ft C -sub = await aioredis.create_redis( - \(aqredis://localhost\(aq) - -ch1, ch2 = await sub.subscribe(\(aqchannel:1\(aq, \(aqchannel:2\(aq) -assert isinstance(ch1, aioredis.Channel) -assert isinstance(ch2, aioredis.Channel) - -async def async_reader(channel): - while await channel.wait_message(): - msg = await channel.get(encoding=\(aqutf\-8\(aq) - # ... process message ... - print("message in {}: {}".format(channel.name, msg)) - -tsk1 = asyncio.ensure_future(async_reader(ch1)) - -# Or alternatively: - -async def async_reader2(channel): - while True: - msg = await channel.get(encoding=\(aqutf\-8\(aq) - if msg is None: - break - # ... process message ... - print("message in {}: {}".format(channel.name, msg)) - -tsk2 = asyncio.ensure_future(async_reader2(ch2)) - -.ft P -.fi -.UNINDENT -.UNINDENT -.sp -Pub/Sub example (\fBget source code\fP): -.INDENT 0.0 -.INDENT 3.5 -.sp -.nf -.ft C -async def reader(channel): - while (await channel.wait_message()): - msg = await channel.get(encoding=\(aqutf\-8\(aq) - # ... process message ... - print("message in {}: {}".format(channel.name, msg)) - - if msg == STOPWORD: - return - -with await pool as conn: - await conn.execute_pubsub(\(aqsubscribe\(aq, \(aqchannel:1\(aq) - channel = conn.pubsub_channels[\(aqchannel:1\(aq] - await reader(channel) # wait for reader to complete - await conn.execute_pubsub(\(aqunsubscribe\(aq, \(aqchannel:1\(aq) - -# Explicit connection usage -conn = await pool.acquire() -try: - await conn.execute_pubsub(\(aqsubscribe\(aq, \(aqchannel:1\(aq) - channel = conn.pubsub_channels[\(aqchannel:1\(aq] - await reader(channel) # wait for reader to complete - await conn.execute_pubsub(\(aqunsubscribe\(aq, \(aqchannel:1\(aq) -finally: - pool.release(conn) - -.ft P -.fi -.UNINDENT -.UNINDENT -.SS Python 3.5 \fBasync with\fP / \fBasync for\fP support -.sp -\fBaioredis\fP is compatible with \fI\%PEP 492\fP\&. -.sp -\fBPool\fP can be used with \fI\%async with\fP -(\fBget source code\fP): -.INDENT 0.0 -.INDENT 3.5 -.sp -.nf -.ft C -pool = await aioredis.create_pool( - \(aqredis://localhost\(aq) -async with pool.get() as conn: - value = await conn.execute(\(aqget\(aq, \(aqmy\-key\(aq) - print(\(aqraw value:\(aq, value) - -.ft P -.fi -.UNINDENT -.UNINDENT -.sp -It also can be used with \fBawait\fP: -.INDENT 0.0 -.INDENT 3.5 -.sp -.nf -.ft C -pool = await aioredis.create_pool( - \(aqredis://localhost\(aq) -# This is exactly the same as: -# with (yield from pool) as conn: -with (await pool) as conn: - value = await conn.execute(\(aqget\(aq, \(aqmy\-key\(aq) - print(\(aqraw value:\(aq, value) - -.ft P -.fi -.UNINDENT -.UNINDENT -.sp -New \fBscan\fP\-family commands added with support of \fI\%async for\fP -(\fBget source code\fP): -.INDENT 0.0 -.INDENT 3.5 -.sp -.nf -.ft C -redis = await aioredis.create_redis( - \(aqredis://localhost\(aq) - -async for key in redis.iscan(match=\(aqsomething*\(aq): - print(\(aqMatched:\(aq, key) - -async for name, val in redis.ihscan(key, match=\(aqsomething*\(aq): - print(\(aqMatched:\(aq, name, \(aq\->\(aq, val) - -async for val in redis.isscan(key, match=\(aqsomething*\(aq): - print(\(aqMatched:\(aq, val) - -async for val, score in redis.izscan(key, match=\(aqsomething*\(aq): - print(\(aqMatched:\(aq, val, \(aq:\(aq, score) - -.ft P -.fi -.UNINDENT -.UNINDENT -.SS SSL/TLS support -.sp -Though Redis server \fI\%does not support data encryption\fP -it is still possible to setup Redis server behind SSL proxy. For such cases -\fBaioredis\fP library support secure connections through \fI\%asyncio\fP -SSL support. See \fI\%BaseEventLoop.create_connection\fP for details. -.SH MIGRATING FROM V0.3 TO V1.0 -.SS API changes and backward incompatible changes: -.INDENT 0.0 -.IP \(bu 2 -\fI\%aioredis.create_pool\fP -.IP \(bu 2 -\fI\%aioredis.create_reconnecting_redis\fP -.IP \(bu 2 -\fI\%aioredis.Redis\fP -.IP \(bu 2 -\fI\%Blocking operations and connection sharing\fP -.IP \(bu 2 -\fI\%Sorted set commands return values\fP -.IP \(bu 2 -\fI\%Hash hscan command now returns list of tuples\fP -.UNINDENT +Example subscribing and reading channels: +.sp +\fBget source code\fP +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +import asyncio +import aioredis + + +async def main(): + redis = await aioredis.create_redis_pool(\(aqredis://localhost\(aq) + + ch1, ch2 = await redis.subscribe(\(aqchannel:1\(aq, \(aqchannel:2\(aq) + assert isinstance(ch1, aioredis.Channel) + assert isinstance(ch2, aioredis.Channel) + + async def reader(channel): + async for message in channel.iter(): + print("Got message:", message) + asyncio.get_running_loop().create_task(reader(ch1)) + asyncio.get_running_loop().create_task(reader(ch2)) + + await redis.publish(\(aqchannel:1\(aq, \(aqHello\(aq) + await redis.publish(\(aqchannel:2\(aq, \(aqWorld\(aq) + + redis.close() + await redis.wait_closed() + +asyncio.run(main()) + +.ft P +.fi +.UNINDENT +.UNINDENT +.sp +Subscribing and reading patterns: +.sp +\fBget source code\fP +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +import asyncio +import aioredis + + +async def main(): + redis = await aioredis.create_redis_pool(\(aqredis://localhost\(aq) + + ch, = await redis.psubscribe(\(aqchannel:*\(aq) + assert isinstance(ch, aioredis.Channel) + + async def reader(channel): + async for ch, message in channel.iter(): + print("Got message in channel:", ch, ":", message) + asyncio.get_running_loop().create_task(reader(ch)) + + await redis.publish(\(aqchannel:1\(aq, \(aqHello\(aq) + await redis.publish(\(aqchannel:2\(aq, \(aqWorld\(aq) + + redis.close() + await redis.wait_closed() + +asyncio.run(main()) + +.ft P +.fi +.UNINDENT +.UNINDENT +.SS Sentinel client +.sp +\fBget source code\fP +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +import asyncio +import aioredis + + +async def main(): + sentinel = await aioredis.create_sentinel( + [\(aqredis://localhost:26379\(aq, \(aqredis://sentinel2:26379\(aq]) + redis = sentinel.master_for(\(aqmymaster\(aq) + + ok = await redis.set(\(aqkey\(aq, \(aqvalue\(aq) + assert ok + val = await redis.get(\(aqkey\(aq, encoding=\(aqutf\-8\(aq) + assert val == \(aqvalue\(aq + +asyncio.run(main()) + +.ft P +.fi +.UNINDENT +.UNINDENT +.sp +Sentinel client requires a list of Redis Sentinel addresses to connect to +and start discovering services. +.sp +Calling \fBmaster_for()\fP or +\fBslave_for()\fP methods will return +Redis clients connected to specified services monitored by Sentinel. +.sp +Sentinel client will detect failover and reconnect Redis clients automatically. +.sp +See detailed reference here .sp .ce @@ -487,348 +613,10 @@ .ce 0 .sp -.SS aioredis.create_pool -.sp -\fBcreate_pool()\fP now returns \fBConnectionsPool\fP -instead of \fBRedisPool\fP\&. -.sp -This means that pool now operates with \fBRedisConnection\fP -objects and not \fBRedis\fP\&. -.TS -center; -|l|l|. -_ -T{ -v0.3 -T} T{ -.INDENT 0.0 -.INDENT 3.5 -.sp -.nf -.ft C -pool = await aioredis.create_pool((\(aqlocalhost\(aq, 6379)) - -with await pool as redis: - # calling methods of Redis class - await redis.lpush(\(aqlist\-key\(aq, \(aqitem1\(aq, \(aqitem2\(aq) -.ft P -.fi -.UNINDENT -.UNINDENT -T} -_ -T{ -v1.0 -T} T{ -.INDENT 0.0 -.INDENT 3.5 -.sp -.nf -.ft C -pool = await aioredis.create_pool((\(aqlocalhost\(aq, 6379)) - -with await pool as conn: - # calling conn.lpush will raise AttributeError exception - await conn.execute(\(aqlpush\(aq, \(aqlist\-key\(aq, \(aqitem1\(aq, \(aqitem2\(aq) -.ft P -.fi -.UNINDENT -.UNINDENT -T} -_ -.TE -.SS aioredis.create_reconnecting_redis -.sp -\fBcreate_reconnecting_redis()\fP has been dropped. -.sp -\fBcreate_redis_pool()\fP can be used instead of former function. -.TS -center; -|l|l|. -_ -T{ -v0.3 -T} T{ -.INDENT 0.0 -.INDENT 3.5 -.sp -.nf -.ft C -redis = await aioredis.create_reconnecting_redis( - (\(aqlocalhost\(aq, 6379)) - -await redis.lpush(\(aqlist\-key\(aq, \(aqitem1\(aq, \(aqitem2\(aq) -.ft P -.fi -.UNINDENT -.UNINDENT -T} -_ -T{ -v1.0 -T} T{ -.INDENT 0.0 -.INDENT 3.5 -.sp -.nf -.ft C -redis = await aioredis.create_redis_pool( - (\(aqlocalhost\(aq, 6379)) - -await redis.lpush(\(aqlist\-key\(aq, \(aqitem1\(aq, \(aqitem2\(aq) -.ft P -.fi -.UNINDENT -.UNINDENT -T} -_ -.TE -.sp -\fBcreate_redis_pool\fP returns \fBRedis\fP initialized with -\fBConnectionsPool\fP which is responsible for reconnecting to server. -.sp -Also \fBcreate_reconnecting_redis\fP was patching the \fBRedisConnection\fP and -breaking \fBclosed\fP property (it was always \fBTrue\fP). -.SS aioredis.Redis -.sp -\fBRedis\fP class now operates with objects implementing -\fBaioredis.abc.AbcConnection\fP interface. -\fBRedisConnection\fP and \fBConnectionsPool\fP are -both implementing \fBAbcConnection\fP so it is become possible to use same API -when working with either single connection or connections pool. -.TS -center; -|l|l|. -_ -T{ -v0.3 -T} T{ -.INDENT 0.0 -.INDENT 3.5 -.sp -.nf -.ft C -redis = await aioredis.create_redis((\(aqlocalhost\(aq, 6379)) -await redis.lpush(\(aqlist\-key\(aq, \(aqitem1\(aq, \(aqitem2\(aq) - -pool = await aioredis.create_pool((\(aqlocalhost\(aq, 6379)) -redis = await pool.acquire() # get Redis object -await redis.lpush(\(aqlist\-key\(aq, \(aqitem1\(aq, \(aqitem2\(aq) -.ft P -.fi -.UNINDENT -.UNINDENT -T} -_ -T{ -v1.0 -T} T{ -.INDENT 0.0 -.INDENT 3.5 -.sp -.nf -.ft C -redis = await aioredis.create_redis((\(aqlocalhost\(aq, 6379)) -await redis.lpush(\(aqlist\-key\(aq, \(aqitem1\(aq, \(aqitem2\(aq) - -redis = await aioredis.create_redis_pool((\(aqlocalhost\(aq, 6379)) -await redis.lpush(\(aqlist\-key\(aq, \(aqitem1\(aq, \(aqitem2\(aq) -.ft P -.fi -.UNINDENT -.UNINDENT -T} -_ -.TE -.SS Blocking operations and connection sharing -.sp -Current implementation of \fBConnectionsPool\fP by default \fBexecute -every command on random connection\fP\&. The \fIPros\fP of this is that it allowed -implementing \fBAbcConnection\fP interface and hide pool inside \fBRedis\fP class, -and also keep pipelining feature (like RedisConnection.execute). -The \fICons\fP of this is that \fBdifferent tasks may use same connection and block -it\fP with some long\-running command. -.sp -We can call it \fBShared Mode\fP \-\-\- commands are sent to random connections -in pool without need to lock [connection]: -.INDENT 0.0 -.INDENT 3.5 -.sp -.nf -.ft C -redis = await aioredis.create_redis_pool( - (\(aqlocalhost\(aq, 6379), - minsize=1, - maxsize=1) - -async def task(): - # Shared mode - await redis.set(\(aqkey\(aq, \(aqval\(aq) - -asyncio.ensure_future(task()) -asyncio.ensure_future(task()) -# Both tasks will send commands through same connection -# without acquiring (locking) it first. -.ft P -.fi -.UNINDENT -.UNINDENT -.sp -Blocking operations (like \fBblpop\fP, \fBbrpop\fP or long\-running LUA scripts) -in \fBshared mode\fP mode will block connection and thus may lead to whole -program malfunction. -.sp -This \fIblocking\fP issue can be easily solved by using exclusive connection -for such operations: -.INDENT 0.0 -.INDENT 3.5 -.sp -.nf -.ft C -redis = await aioredis.create_redis_pool( - (\(aqlocalhost\(aq, 6379), - minsize=1, - maxsize=1) - -async def task(): - # Exclusive mode - with await redis as r: - await r.set(\(aqkey\(aq, \(aqval\(aq) -asyncio.ensure_future(task()) -asyncio.ensure_future(task()) -# Both tasks will first acquire connection. -.ft P -.fi -.UNINDENT -.UNINDENT -.sp -We can call this \fBExclusive Mode\fP \-\-\- context manager is used to -acquire (lock) exclusive connection from pool and send all commands through it. -.sp -\fBNOTE:\fP -.INDENT 0.0 -.INDENT 3.5 -This technique is similar to v0.3 pool usage: -.INDENT 0.0 -.INDENT 3.5 -.sp -.nf -.ft C -# in aioredis v0.3 -pool = await aioredis.create_pool((\(aqlocalhost\(aq, 6379)) -with await pool as redis: - # Redis is bound to exclusive connection - redis.set(\(aqkey\(aq, \(aqval\(aq) -.ft P -.fi -.UNINDENT -.UNINDENT -.UNINDENT -.UNINDENT -.SS Sorted set commands return values -.sp -Sorted set commands (like \fBzrange\fP, \fBzrevrange\fP and others) that accept -\fBwithscores\fP argument now \fBreturn list of tuples\fP instead of plain list. -.TS -center; -|l|l|. -_ -T{ -v0.3 -T} T{ -.INDENT 0.0 -.INDENT 3.5 -.sp -.nf -.ft C -redis = await aioredis.create_redis((\(aqlocalhost\(aq, 6379)) -await redis.zadd(\(aqzset\-key\(aq, 1, \(aqone\(aq, 2, \(aqtwo\(aq) -res = await redis.zrage(\(aqzset\-key\(aq, withscores=True) -assert res == [b\(aqone\(aq, 1, b\(aqtwo\(aq, 2] - -# not an esiest way to make a dict -it = iter(res) -assert dict(zip(it, it)) == {b\(aqone\(aq: 1, b\(aqtwo\(aq: 2} -.ft P -.fi -.UNINDENT -.UNINDENT -T} -_ -T{ -v1.0 -T} T{ -.INDENT 0.0 -.INDENT 3.5 -.sp -.nf -.ft C -redis = await aioredis.create_redis((\(aqlocalhost\(aq, 6379)) -await redis.zadd(\(aqzset\-key\(aq, 1, \(aqone\(aq, 2, \(aqtwo\(aq) -res = await redis.zrage(\(aqzset\-key\(aq, withscores=True) -assert res == [(b\(aqone\(aq, 1), (b\(aqtwo\(aq, 2)] - -# now its easier to make a dict of it -assert dict(res) == {b\(aqone\(aq: 1, b\(aqtwo\(aq: 2} -.ft P -.fi -.UNINDENT -.UNINDENT -T} -_ -.TE -.SS Hash \fBhscan\fP command now returns list of tuples -.sp -\fBhscan\fP updated to return a list of tuples instead of plain -mixed key/value list. -.TS -center; -|l|l|. -_ -T{ -v0.3 -T} T{ -.INDENT 0.0 -.INDENT 3.5 -.sp -.nf -.ft C -redis = await aioredis.create_redis((\(aqlocalhost\(aq, 6379)) -await redis.hmset(\(aqhash\(aq, \(aqone\(aq, 1, \(aqtwo\(aq, 2) -cur, data = await redis.hscan(\(aqhash\(aq) -assert data == [b\(aqone\(aq, b\(aq1\(aq, b\(aqtwo\(aq, b\(aq2\(aq] - -# not an esiest way to make a dict -it = iter(data) -assert dict(zip(it, it)) == {b\(aqone\(aq: b\(aq1\(aq, b\(aqtwo\(aq: b\(aq2\(aq} -.ft P -.fi -.UNINDENT -.UNINDENT -T} -_ -T{ -v1.0 -T} T{ -.INDENT 0.0 -.INDENT 3.5 -.sp -.nf -.ft C -redis = await aioredis.create_redis((\(aqlocalhost\(aq, 6379)) -await redis.hmset(\(aqhash\(aq, \(aqone\(aq, 1, \(aqtwo\(aq, 2) -cur, data = await redis.hscan(\(aqhash\(aq) -assert data == [(b\(aqone\(aq, b\(aq1\(aq), (b\(aqtwo\(aq, b\(aq2\(aq)] - -# now its easier to make a dict of it -assert dict(data) == {b\(aqone\(aq: b\(aq1\(aq: b\(aqtwo\(aq: b\(aq2\(aq} -.ft P -.fi -.UNINDENT -.UNINDENT -T} -_ -.TE +.IP [1] 5 +Celery hiredis issues +(\fI\%#197\fP, +\fI\%#317\fP) .SH AIOREDIS --- API REFERENCE .SS Connection .sp @@ -846,36 +634,39 @@ import aioredis async def connect_uri(): - conn = await aioredis\&.create_connection( + conn = await aioredis.create_connection( \(aqredis://localhost/0\(aq) - val = await conn\&.execute(\(aqGET\(aq, \(aqmy\-key\(aq) + val = await conn.execute(\(aqGET\(aq, \(aqmy\-key\(aq) async def connect_tcp(): - conn = await aioredis\&.create_connection( + conn = await aioredis.create_connection( (\(aqlocalhost\(aq, 6379)) - val = await conn\&.execute(\(aqGET\(aq, \(aqmy\-key\(aq) + val = await conn.execute(\(aqGET\(aq, \(aqmy\-key\(aq) async def connect_unixsocket(): - conn = await aioredis\&.create_connection( + conn = await aioredis.create_connection( \(aq/path/to/redis/socket\(aq) # or uri \(aqunix:///path/to/redis/socket?db=1\(aq - val = await conn\&.execute(\(aqGET\(aq, \(aqmy\-key\(aq) - -asyncio\&.get_event_loop()\&.run_until_complete(connect_tcp()) -asyncio\&.get_event_loop()\&.run_until_complete(connect_unixsocket()) -.ft P -.fi -.UNINDENT -.UNINDENT -.INDENT 0.0 -.TP -.B coroutine aioredis.create_connection(address, *, db=0, password=None, ssl=None, encoding=None, parser=None, loop=None, timeout=None) + val = await conn.execute(\(aqGET\(aq, \(aqmy\-key\(aq) + +asyncio.get_event_loop().run_until_complete(connect_tcp()) +asyncio.get_event_loop().run_until_complete(connect_unixsocket()) +.ft P +.fi +.UNINDENT +.UNINDENT +.INDENT 0.0 +.TP +.B coroutine aioredis.create_connection(address, *, db=0, password=None, ssl=None, encoding=None, parser=None, timeout=None, connection_cls=None) Creates Redis connection. .sp Changed in version v0.3.1: \fBtimeout\fP argument added. .sp Changed in version v1.0: \fBparser\fP argument added. + +.sp +Deprecated since version v1.3.1: \fBloop\fP argument deprecated for Python 3.8 compatibility. .INDENT 7.0 .TP @@ -910,12 +701,11 @@ \fBparser\fP (\fIcallable\fP\fI or \fP\fI\%None\fP) \-\- Protocol parser class. Can be used to set custom protocol reader; expected same interface as \fBhiredis.Reader\fP\&. .IP \(bu 2 -\fBloop\fP (\fI\%EventLoop\fP) \-\- An optional \fIevent loop\fP instance -(uses \fI\%asyncio.get_event_loop()\fP if not specified). -.IP \(bu 2 \fBtimeout\fP (\fIfloat greater than 0\fP\fI or \fP\fI\%None\fP) \-\- Max time to open a connection, otherwise raise \fI\%asyncio.TimeoutError\fP exception. \fBNone\fP by default +.IP \(bu 2 +\fBconnection_cls\fP (\fBabc.AbcConnection\fP or None) \-\- Custom connection class. \fBNone\fP by default. .UNINDENT .TP .B Returns @@ -1024,7 +814,7 @@ .sp .nf .ft C ->>> ch1 = Channel(\(aqA\(aq, is_pattern=False, loop=loop) +>>> ch1 = Channel(\(aqA\(aq, is_pattern=False) >>> await conn.execute_pubsub(\(aqsubscribe\(aq, ch1) [[b\(aqsubscribe\(aq, b\(aqA\(aq, 1]] .ft P @@ -1133,15 +923,15 @@ import aioredis async def sample_pool(): - pool = await aioredis\&.create_pool(\(aqredis://localhost\(aq) - val = await pool\&.execute(\(aqget\(aq, \(aqmy\-key\(aq) -.ft P -.fi -.UNINDENT -.UNINDENT -.INDENT 0.0 -.TP -.B aioredis.create_pool(address, *, db=0, password=None, ssl=None, encoding=None, minsize=1, maxsize=10, parser=None, loop=None, create_connection_timeout=None, pool_cls=None, connection_cls=None) + pool = await aioredis.create_pool(\(aqredis://localhost\(aq) + val = await pool.execute(\(aqget\(aq, \(aqmy\-key\(aq) +.ft P +.fi +.UNINDENT +.UNINDENT +.INDENT 0.0 +.TP +.B aioredis.create_pool(address, *, db=0, password=None, ssl=None, encoding=None, minsize=1, maxsize=10, parser=None, create_connection_timeout=None, pool_cls=None, connection_cls=None) A \fI\%coroutine\fP that instantiates a pool of \fI\%RedisConnection\fP\&. .sp @@ -1158,6 +948,9 @@ .sp New in version v1.0: \fBparser\fP, \fBpool_cls\fP and \fBconnection_cls\fP arguments added. + +.sp +Deprecated since version v1.3.1: \fBloop\fP argument deprecated for Python 3.8 compatibility. .INDENT 7.0 .TP @@ -1197,9 +990,6 @@ .IP \(bu 2 \fBparser\fP (\fIcallable\fP\fI or \fP\fI\%None\fP) \-\- Protocol parser class. Can be used to set custom protocol reader; expected same interface as \fBhiredis.Reader\fP\&. -.IP \(bu 2 -\fBloop\fP (\fI\%EventLoop\fP) \-\- An optional \fIevent loop\fP instance -(uses \fI\%asyncio.get_event_loop()\fP if not specified). .IP \(bu 2 \fBcreate_connection_timeout\fP (\fIfloat greater than 0\fP\fI or \fP\fI\%None\fP) \-\- Max time to open a connection, otherwise raise an \fI\%asyncio.TimeoutError\fP\&. \fBNone\fP by default. @@ -1378,110 +1168,6 @@ Wait until pool gets closed (when all connections are closed). .sp New in version v0.2.8. - -.UNINDENT -.UNINDENT - -.sp -.ce ----- - -.ce 0 -.sp -.SS Pub/Sub Channel object -.sp -\fIChannel\fP object is a wrapper around queue for storing received pub/sub messages. -.INDENT 0.0 -.TP -.B class aioredis.Channel(name, is_pattern, loop=None) -Bases: \fBabc.AbcChannel\fP -.sp -Object representing Pub/Sub messages queue. -It\(aqs basically a wrapper around \fI\%asyncio.Queue\fP\&. -.INDENT 7.0 -.TP -.B name -Holds encoded channel/pattern name. -.UNINDENT -.INDENT 7.0 -.TP -.B is_pattern -Set to True for pattern channels. -.UNINDENT -.INDENT 7.0 -.TP -.B is_active -Set to True if there are messages in queue and connection is still -subscribed to this channel. -.UNINDENT -.INDENT 7.0 -.TP -.B coroutine get(*, encoding=None, decoder=None) -Coroutine that waits for and returns a message. -.sp -Return value is message received or \fBNone\fP signifying that channel has -been unsubscribed and no more messages will be received. -.INDENT 7.0 -.TP -.B Parameters -.INDENT 7.0 -.IP \(bu 2 -\fBencoding\fP (\fI\%str\fP) \-\- If not None used to decode resulting bytes message. -.IP \(bu 2 -\fBdecoder\fP (\fIcallable\fP) \-\- If specified used to decode message, -ex. \fI\%json.loads()\fP -.UNINDENT -.TP -.B Raises -\fBaioredis.ChannelClosedError\fP \-\- If channel is unsubscribed and -has no more messages. -.UNINDENT -.UNINDENT -.INDENT 7.0 -.TP -.B get_json(*, encoding="utf\-8") -Shortcut to \fBget(encoding="utf\-8", decoder=json.loads)\fP -.UNINDENT -.INDENT 7.0 -.TP -.B coroutine wait_message() -Waits for message to become available in channel -or channel is closed (unsubscribed). -.sp -Main idea is to use it in loops: -.sp -.nf -.ft C ->>> ch = redis.channels[\(aqchannel:1\(aq] ->>> while await ch.wait_message(): -\&... msg = await ch.get() -.ft P -.fi -.INDENT 7.0 -.TP -.B Return type -\fI\%bool\fP -.UNINDENT -.UNINDENT -.INDENT 7.0 -.TP -.B coroutine async\-for iter(*, encoding=None, decoder=None) -Same as \fI\%get()\fP method but it is a native coroutine. -.sp -Usage example: -.INDENT 7.0 -.INDENT 3.5 -.sp -.nf -.ft C ->>> async for msg in ch.iter(): -\&... print(msg) -.ft P -.fi -.UNINDENT -.UNINDENT -.sp -New in version 0.2.5: Available for Python 3.5 only .UNINDENT .UNINDENT @@ -1725,6 +1411,110 @@ .ce 0 .sp +.SS Pub/Sub Channel object +.sp +\fIChannel\fP object is a wrapper around queue for storing received pub/sub messages. +.INDENT 0.0 +.TP +.B class aioredis.Channel(name, is_pattern) +Bases: \fBabc.AbcChannel\fP +.sp +Object representing Pub/Sub messages queue. +It\(aqs basically a wrapper around \fI\%asyncio.Queue\fP\&. +.INDENT 7.0 +.TP +.B name +Holds encoded channel/pattern name. +.UNINDENT +.INDENT 7.0 +.TP +.B is_pattern +Set to True for pattern channels. +.UNINDENT +.INDENT 7.0 +.TP +.B is_active +Set to True if there are messages in queue and connection is still +subscribed to this channel. +.UNINDENT +.INDENT 7.0 +.TP +.B coroutine get(*, encoding=None, decoder=None) +Coroutine that waits for and returns a message. +.sp +Return value is message received or \fBNone\fP signifying that channel has +been unsubscribed and no more messages will be received. +.INDENT 7.0 +.TP +.B Parameters +.INDENT 7.0 +.IP \(bu 2 +\fBencoding\fP (\fI\%str\fP) \-\- If not None used to decode resulting bytes message. +.IP \(bu 2 +\fBdecoder\fP (\fIcallable\fP) \-\- If specified used to decode message, +ex. \fI\%json.loads()\fP +.UNINDENT +.TP +.B Raises +\fBaioredis.ChannelClosedError\fP \-\- If channel is unsubscribed and +has no more messages. +.UNINDENT +.UNINDENT +.INDENT 7.0 +.TP +.B get_json(*, encoding="utf\-8") +Shortcut to \fBget(encoding="utf\-8", decoder=json.loads)\fP +.UNINDENT +.INDENT 7.0 +.TP +.B coroutine wait_message() +Waits for message to become available in channel +or channel is closed (unsubscribed). +.sp +Main idea is to use it in loops: +.sp +.nf +.ft C +>>> ch = redis.channels[\(aqchannel:1\(aq] +>>> while await ch.wait_message(): +\&... msg = await ch.get() +.ft P +.fi +.INDENT 7.0 +.TP +.B Return type +\fI\%bool\fP +.UNINDENT +.UNINDENT +.INDENT 7.0 +.TP +.B coroutine async\-for iter(*, encoding=None, decoder=None) +Same as \fI\%get()\fP method but it is a native coroutine. +.sp +Usage example: +.INDENT 7.0 +.INDENT 3.5 +.sp +.nf +.ft C +>>> async for msg in ch.iter(): +\&... print(msg) +.ft P +.fi +.UNINDENT +.UNINDENT +.sp +New in version 0.2.5: Available for Python 3.5 only + +.UNINDENT +.UNINDENT + +.sp +.ce +---- + +.ce 0 +.sp .SS Commands Interface .sp The library provides high\-level API implementing simple interface @@ -1740,22 +1530,22 @@ # Create Redis client bound to single non\-reconnecting connection. async def single_connection(): - redis = await aioredis\&.create_redis( + redis = await aioredis.create_redis( \(aqredis://localhost\(aq) - val = await redis\&.get(\(aqmy\-key\(aq) + val = await redis.get(\(aqmy\-key\(aq) # Create Redis client bound to connections pool. async def pool_of_connections(): - redis = await aioredis\&.create_redis_pool( + redis = await aioredis.create_redis_pool( \(aqredis://localhost\(aq) - val = await redis\&.get(\(aqmy\-key\(aq) + val = await redis.get(\(aqmy\-key\(aq) # we can also use pub/sub as underlying pool # has several free connections: - ch1, ch2 = await redis\&.subscribe(\(aqchan:1\(aq, \(aqchan:2\(aq) + ch1, ch2 = await redis.subscribe(\(aqchan:1\(aq, \(aqchan:2\(aq) # publish using free connection - await redis\&.publish(\(aqchan:1\(aq, \(aqHello\(aq) - await ch1\&.get() + await redis.publish(\(aqchan:1\(aq, \(aqHello\(aq) + await ch1.get() .ft P .fi .UNINDENT @@ -1765,12 +1555,15 @@ see commands mixins reference\&. .INDENT 0.0 .TP -.B coroutine aioredis.create_redis(address, *, db=0, password=None, ssl=None, encoding=None, commands_factory=Redis, parser=None, timeout=None, connection_cls=None, loop=None) +.B coroutine aioredis.create_redis(address, *, db=0, password=None, ssl=None, encoding=None, commands_factory=Redis, parser=None, timeout=None, connection_cls=None) This \fI\%coroutine\fP creates high\-level Redis interface instance bound to single Redis connection (without auto\-reconnect). .sp New in version v1.0: \fBparser\fP, \fBtimeout\fP and \fBconnection_cls\fP arguments added. + +.sp +Deprecated since version v1.3.1: \fBloop\fP argument deprecated for Python 3.8 compatibility. .sp See also \fI\%RedisConnection\fP for parameters description. @@ -1807,9 +1600,6 @@ \fBconnection_cls\fP (\fIaioredis.abc.AbcConnection\fP) \-\- Can be used to instantiate custom connection class. This argument \fBmust be\fP a subclass of \fBAbcConnection\fP\&. -.IP \(bu 2 -\fBloop\fP (\fI\%EventLoop\fP) \-\- An optional \fIevent loop\fP instance -(uses \fI\%asyncio.get_event_loop()\fP if not specified). .UNINDENT .TP .B Returns @@ -1819,7 +1609,7 @@ .UNINDENT .INDENT 0.0 .TP -.B coroutine aioredis.create_redis_pool(address, *, db=0, password=None, ssl=None, encoding=None, commands_factory=Redis, minsize=1, maxsize=10, parser=None, timeout=None, pool_cls=None, connection_cls=None, loop=None) +.B coroutine aioredis.create_redis_pool(address, *, db=0, password=None, ssl=None, encoding=None, commands_factory=Redis, minsize=1, maxsize=10, parser=None, timeout=None, pool_cls=None, connection_cls=None) This \fI\%coroutine\fP create high\-level Redis client instance bound to connections pool (this allows auto\-reconnect and simple pub/sub use). @@ -1828,6 +1618,9 @@ .sp Changed in version v1.0: \fBparser\fP, \fBtimeout\fP, \fBpool_cls\fP and \fBconnection_cls\fP arguments added. + +.sp +Deprecated since version v1.3.1: \fBloop\fP argument deprecated for Python 3.8 compatibility. .INDENT 7.0 .TP @@ -1871,9 +1664,6 @@ \fBconnection_cls\fP (\fIaioredis.abc.AbcConnection\fP) \-\- Can be used to make pool instantiate custom connection classes. This argument \fBmust be\fP a subclass of \fBAbcConnection\fP\&. -.IP \(bu 2 -\fBloop\fP (\fI\%EventLoop\fP) \-\- An optional \fIevent loop\fP instance -(uses \fI\%asyncio.get_event_loop()\fP if not specified). .UNINDENT .TP .B Returns @@ -1902,7 +1692,7 @@ .UNINDENT .INDENT 7.0 .TP -.B address +.B property address Redis connection address (if applicable). .UNINDENT .INDENT 7.0 @@ -1919,18 +1709,18 @@ .UNINDENT .INDENT 7.0 .TP -.B closed +.B property closed True if connection is closed. .UNINDENT .INDENT 7.0 .TP -.B connection +.B property connection Either \fBaioredis.RedisConnection\fP, or \fBaioredis.ConnectionsPool\fP instance. .UNINDENT .INDENT 7.0 .TP -.B db +.B property db Currently selected db index. .UNINDENT .INDENT 7.0 @@ -1940,12 +1730,12 @@ .UNINDENT .INDENT 7.0 .TP -.B encoding +.B property encoding Current set codec or None. .UNINDENT .INDENT 7.0 .TP -.B in_transaction +.B property in_transaction Set to True when MULTI command was issued. .UNINDENT .INDENT 7.0 @@ -1963,14 +1753,7 @@ .INDENT 7.0 .TP .B select(db) -Change the selected database for the current connection. -.sp -This method wraps call to \fBaioredis.RedisConnection.select()\fP -.UNINDENT -.INDENT 7.0 -.TP -.B coroutine wait_closed() -Coroutine waiting until underlying connections are closed. +Change the selected database. .UNINDENT .UNINDENT .SS Generic commands @@ -2533,12 +2316,17 @@ .UNINDENT .INDENT 7.0 .TP -.B mset(key, value, *pairs) -Set multiple keys to multiple values. +.B mset(*args) +Set multiple keys to multiple values or unpack dict to keys & values. .INDENT 7.0 .TP .B Raises -\fI\%TypeError\fP \-\- if len of pairs is not event number +.INDENT 7.0 +.IP \(bu 2 +\fI\%TypeError\fP \-\- if len of args is not event number +.IP \(bu 2 +\fI\%TypeError\fP \-\- if len of args equals 1 and it is not a dict +.UNINDENT .UNINDENT .UNINDENT .INDENT 7.0 @@ -2999,8 +2787,8 @@ .UNINDENT .INDENT 7.0 .TP -.B spop(key, *, encoding=) -Remove and return a random member from a set. +.B spop(key, count=None, *, encoding=) +Remove and return one or multiple random members from a set. .UNINDENT .INDENT 7.0 .TP @@ -3037,6 +2825,38 @@ For commands details see: \fI\%http://redis.io/commands/#sorted_set\fP .INDENT 7.0 .TP +.B bzpopmax(key, *keys, timeout=0, encoding=) +Remove and get an element with the highest score in the sorted set, +or block until one is available. +.INDENT 7.0 +.TP +.B Raises +.INDENT 7.0 +.IP \(bu 2 +\fI\%TypeError\fP \-\- if timeout is not int +.IP \(bu 2 +\fI\%ValueError\fP \-\- if timeout is less than 0 +.UNINDENT +.UNINDENT +.UNINDENT +.INDENT 7.0 +.TP +.B bzpopmin(key, *keys, timeout=0, encoding=) +Remove and get an element with the lowest score in the sorted set, +or block until one is available. +.INDENT 7.0 +.TP +.B Raises +.INDENT 7.0 +.IP \(bu 2 +\fI\%TypeError\fP \-\- if timeout is not int +.IP \(bu 2 +\fI\%ValueError\fP \-\- if timeout is less than 0 +.UNINDENT +.UNINDENT +.UNINDENT +.INDENT 7.0 +.TP .B izscan(key, *, match=None, count=None) Incrementally iterate sorted set items using async for. .sp @@ -3051,7 +2871,7 @@ .UNINDENT .INDENT 7.0 .TP -.B zadd(key, score, member, *pairs, exist=None) +.B zadd(key, score, member, *pairs, exist=None, changed=False, incr=False) Add one or more members to a sorted set or update its score. .INDENT 7.0 .TP @@ -3124,6 +2944,28 @@ .UNINDENT .INDENT 7.0 .TP +.B zpopmax(key, count=None, *, encoding=) +Removes and returns up to count members with the highest scores +in the sorted set stored at key. +.INDENT 7.0 +.TP +.B Raises +\fI\%TypeError\fP \-\- if count is not int +.UNINDENT +.UNINDENT +.INDENT 7.0 +.TP +.B zpopmin(key, count=None, *, encoding=) +Removes and returns up to count members with the lowest scores +in the sorted set stored at key. +.INDENT 7.0 +.TP +.B Raises +\fI\%TypeError\fP \-\- if count is not int +.UNINDENT +.UNINDENT +.INDENT 7.0 +.TP .B zrange(key, start=0, stop=\-1, withscores=False, encoding=) Return a range of members in a sorted set, by index. .INDENT 7.0 @@ -3502,7 +3344,7 @@ .UNINDENT .INDENT 7.0 .TP -.B slaveof(host=, port=None) +.B slaveof(host, port=None) Make the server a slave of another instance, or promote it as master. .sp @@ -3649,13 +3491,16 @@ .UNINDENT .INDENT 0.0 .TP -.B class aioredis.commands.Pipeline(connection, commands_factory=lambda conn: conn, *, loop=None) +.B class aioredis.commands.Pipeline(connection, commands_factory=lambda conn: conn) Commands pipeline. .sp Buffers commands for execution in bulk. .sp This class implements \fI__getattr__\fP method allowing to call methods on instance created with \fBcommands_factory\fP\&. +.sp +Deprecated since version v1.3.1: \fBloop\fP argument deprecated for Python 3.8 compatibility. + .INDENT 7.0 .TP .B Parameters @@ -3664,9 +3509,6 @@ \fBconnection\fP (\fIaioredis.RedisConnection\fP) \-\- Redis connection .IP \(bu 2 \fBcommands_factory\fP (\fIcallable\fP) \-\- Commands factory to get methods from. -.IP \(bu 2 -\fBloop\fP (\fI\%EventLoop\fP) \-\- An optional \fIevent loop\fP instance -(uses \fI\%asyncio.get_event_loop()\fP if not specified). .UNINDENT .UNINDENT .INDENT 7.0 @@ -3693,12 +3535,15 @@ .UNINDENT .INDENT 0.0 .TP -.B class aioredis.commands.MultiExec(connection, commands_factory=lambda conn: conn, *, loop=None) +.B class aioredis.commands.MultiExec(connection, commands_factory=lambda conn: conn) Bases: \fI\%Pipeline\fP\&. .sp Multi/Exec pipeline wrapper. .sp See \fI\%Pipeline\fP for parameters description. +.sp +Deprecated since version v1.3.1: \fBloop\fP argument deprecated for Python 3.8 compatibility. + .INDENT 7.0 .TP .B coroutine execute(*, return_exceptions=False) @@ -3957,7 +3802,7 @@ .UNINDENT .INDENT 7.0 .TP -.B slaveof(host=, port=None) +.B slaveof(host, port=None) Make the server a slave of another instance, or promote it as master. .sp @@ -4004,21 +3849,21 @@ For commands details see: \fI\%http://redis.io/commands/#pubsub\fP .INDENT 7.0 .TP -.B channels +.B property channels Returns read\-only channels dict. .sp See \fBpubsub_channels\fP .UNINDENT .INDENT 7.0 .TP -.B in_pubsub +.B property in_pubsub Indicates that connection is in PUB/SUB mode. .sp Provides the number of subscribed channels. .UNINDENT .INDENT 7.0 .TP -.B patterns +.B property patterns Returns read\-only patterns dict. .sp See \fBpubsub_patterns\fP @@ -4091,7 +3936,7 @@ \fBWARNING:\fP .INDENT 0.0 .INDENT 3.5 -Current release (1.2.0) of the library \fBdoes not support\fP +Current release (1.3.0) of the library \fBdoes not support\fP \fI\%Redis Cluster\fP in a full manner. It provides only several API methods which may be changed in future. .UNINDENT @@ -4102,8 +3947,7 @@ .B class aioredis.commands.StreamCommandsMixin Stream commands mixin .sp -Streams are under development in Redis and -not currently released. +Streams are available in Redis since v5.0 .INDENT 7.0 .TP .B xack(stream, group_name, id, *ids) @@ -4121,7 +3965,12 @@ .UNINDENT .INDENT 7.0 .TP -.B xgroup_create(stream, group_name, latest_id=\(aq$\(aq) +.B xdel(stream, id) +Removes the specified entries(IDs) from a stream +.UNINDENT +.INDENT 7.0 +.TP +.B xgroup_create(stream, group_name, latest_id=\(aq$\(aq, mkstream=False) Create a consumer group .UNINDENT .INDENT 7.0 @@ -4165,6 +4014,11 @@ .TP .B xinfo_stream(stream) Retrieve information about the given stream. +.UNINDENT +.INDENT 7.0 +.TP +.B xlen(stream) +Returns the number of entries inside a stream .UNINDENT .INDENT 7.0 .TP @@ -4199,7 +4053,7 @@ .UNINDENT .INDENT 7.0 .TP -.B xread_group(group_name, consumer_name, streams, timeout=0, count=None, latest_ids=None) +.B xread_group(group_name, consumer_name, streams, timeout=0, count=None, latest_ids=None, no_ack=False) Perform a blocking read on the given stream as part of a consumer group .INDENT 7.0 .TP @@ -4213,6 +4067,11 @@ .B xrevrange(stream, start=\(aq+\(aq, stop=\(aq\-\(aq, count=None) Retrieve messages from a stream in reverse order. .UNINDENT +.INDENT 7.0 +.TP +.B xtrim(stream, max_len, exact_len=False) +trims the stream to a given number of items, evicting older items +.UNINDENT .UNINDENT .SH AIOREDIS.ABC --- INTERFACES REFERENCE .sp @@ -4226,60 +4085,55 @@ Abstract connection interface. .INDENT 7.0 .TP -.B address +.B abstract property address Connection address. .UNINDENT .INDENT 7.0 .TP -.B close() +.B abstract close() Perform connection(s) close and resources cleanup. .UNINDENT .INDENT 7.0 .TP -.B closed +.B abstract property closed Flag indicating if connection is closing or already closed. .UNINDENT .INDENT 7.0 .TP -.B db +.B abstract property db Current selected DB index. .UNINDENT .INDENT 7.0 .TP -.B encoding +.B abstract property encoding Current set connection codec. .UNINDENT .INDENT 7.0 .TP -.B execute(command, *args, **kwargs) +.B abstract execute(command, *args, **kwargs) Execute redis command. .UNINDENT .INDENT 7.0 .TP -.B execute_pubsub(command, *args, **kwargs) +.B abstract execute_pubsub(command, *args, **kwargs) Execute Redis (p)subscribe/(p)unsubscribe commands. .UNINDENT .INDENT 7.0 .TP -.B in_pubsub +.B abstract property in_pubsub Returns number of subscribed channels. .sp Can be tested as bool indicating Pub/Sub mode state. .UNINDENT .INDENT 7.0 .TP -.B pubsub_channels +.B abstract property pubsub_channels Read\-only channels dict. .UNINDENT .INDENT 7.0 .TP -.B pubsub_patterns +.B abstract property pubsub_patterns Read\-only patterns dict. -.UNINDENT -.INDENT 7.0 -.TP -.B coroutine wait_closed() -Coroutine waiting until all resources are closed/released/cleaned up. .UNINDENT .UNINDENT .INDENT 0.0 @@ -4293,24 +4147,19 @@ for executing Redis commands. .INDENT 7.0 .TP -.B coroutine acquire() -Acquires connection from pool. -.UNINDENT -.INDENT 7.0 -.TP -.B address +.B abstract property address Connection address or None. .UNINDENT .INDENT 7.0 .TP -.B get_connection() +.B abstract get_connection(command, args=()) Gets free connection from pool in a sync way. .sp If no connection available — returns None. .UNINDENT .INDENT 7.0 .TP -.B release(conn) +.B abstract release(conn) Releases connection to pool. .INDENT 7.0 .TP @@ -4327,7 +4176,7 @@ Abstract Pub/Sub Channel interface. .INDENT 7.0 .TP -.B close(exc=None) +.B abstract close(exc=None) Marks Channel as closed, no more messages will be sent to it. .sp Called by RedisConnection when channel is unsubscribed @@ -4335,30 +4184,23 @@ .UNINDENT .INDENT 7.0 .TP -.B coroutine get() -Wait and return new message. -.sp -Will raise \fBChannelClosedError\fP if channel is not active. -.UNINDENT -.INDENT 7.0 -.TP -.B is_active +.B abstract property is_active Flag indicating that channel has unreceived messages and not marked as closed. .UNINDENT .INDENT 7.0 .TP -.B is_pattern +.B abstract property is_pattern Boolean flag indicating if channel is pattern channel. .UNINDENT .INDENT 7.0 .TP -.B name +.B abstract property name Encoded channel name or pattern. .UNINDENT .INDENT 7.0 .TP -.B put_nowait(data) +.B abstract put_nowait(data) Send data to channel. .sp Called by RedisConnection when new message received. @@ -4386,7 +4228,7 @@ .ft C >>> from aioredis.pubsub import Receiver >>> from aioredis.abc import AbcChannel ->>> mpsc = Receiver(loop=loop) +>>> mpsc = Receiver() >>> async def reader(mpsc): \&... async for channel, msg in mpsc.iter(): \&... assert isinstance(channel, AbcChannel) @@ -4436,7 +4278,7 @@ .UNINDENT .INDENT 7.0 .TP -.B channels +.B property channels Read\-only channels dict. .UNINDENT .INDENT 7.0 @@ -4446,28 +4288,7 @@ .UNINDENT .INDENT 7.0 .TP -.B coroutine get(*, encoding=None, decoder=None) -Wait for and return pub/sub message from one of channels. -.sp -Return value is either: -.INDENT 7.0 -.IP \(bu 2 -tuple of two elements: channel & message; -.IP \(bu 2 -tuple of three elements: pattern channel, (target channel & message); -.IP \(bu 2 -or None in case Receiver is not active or has just been stopped. -.UNINDENT -.INDENT 7.0 -.TP -.B Raises -\fBaioredis.ChannelClosedError\fP \-\- If listener is stopped -and all messages have been received. -.UNINDENT -.UNINDENT -.INDENT 7.0 -.TP -.B is_active +.B property is_active Returns True if listener has any active subscription. .UNINDENT .INDENT 7.0 @@ -4494,7 +4315,7 @@ .UNINDENT .INDENT 7.0 .TP -.B patterns +.B property patterns Read\-only patterns dict. .UNINDENT .INDENT 7.0 @@ -4505,11 +4326,6 @@ All new messages after this call will be ignored, so you must call unsubscribe before stopping this listener. .UNINDENT -.INDENT 7.0 -.TP -.B coroutine wait_message() -Blocks until new message appear. -.UNINDENT .UNINDENT .INDENT 0.0 .TP @@ -4535,12 +4351,12 @@ .ft C import aioredis -sentinel = await aioredis\&.create_sentinel( +sentinel = await aioredis.create_sentinel( [(\(aqsentinel.host1\(aq, 26379), (\(aqsentinel.host2\(aq, 26379)]) -redis = sentinel\&.master_for(\(aqmymaster\(aq) -assert await redis\&.set(\(aqkey\(aq, \(aqvalue\(aq) -assert await redis\&.get(\(aqkey\(aq, encoding=\(aqutf\-8\(aq) == \(aqvalue\(aq +redis = sentinel.master_for(\(aqmymaster\(aq) +assert await redis.set(\(aqkey\(aq, \(aqvalue\(aq) +assert await redis.get(\(aqkey\(aq, encoding=\(aqutf\-8\(aq) == \(aqvalue\(aq # redis client will reconnect/reconfigure automatically # by sentinel client instance @@ -4551,8 +4367,11 @@ .SS \fBRedisSentinel\fP .INDENT 0.0 .TP -.B coroutine aioredis.sentinel.create_sentinel(sentinels, *, db=None, password=None, encoding=None, minsize=1, maxsize=10, ssl=None, parser=None, loop=None) +.B coroutine aioredis.sentinel.create_sentinel(sentinels, *, db=None, password=None, encoding=None, minsize=1, maxsize=10, ssl=None, parser=None) Creates Redis Sentinel client. +.sp +Deprecated since version v1.3.1: \fBloop\fP argument deprecated for Python 3.8 compatibility. + .INDENT 7.0 .TP .B Parameters @@ -4579,9 +4398,6 @@ .IP \(bu 2 \fBparser\fP (\fIcallable\fP\fI or \fP\fI\%None\fP) \-\- Protocol parser class. Can be used to set custom protocol reader; expected same interface as \fBhiredis.Reader\fP\&. -.IP \(bu 2 -\fBloop\fP (\fI\%EventLoop\fP) \-\- An optional \fIevent loop\fP instance -(uses \fI\%asyncio.get_event_loop()\fP if not specified). .UNINDENT .TP .B Return type @@ -4897,76 +4713,6 @@ (see for more). .sp Every example is a correct python program that can be executed. -.SS Low\-level connection usage example -.sp -\fBget source code\fP -.INDENT 0.0 -.INDENT 3.5 -.sp -.nf -.ft C -import asyncio -import aioredis - - -async def main(): - conn = await aioredis.create_connection( - \(aqredis://localhost\(aq, encoding=\(aqutf\-8\(aq) - - ok = await conn.execute(\(aqset\(aq, \(aqmy\-key\(aq, \(aqsome value\(aq) - assert ok == \(aqOK\(aq, ok - - str_value = await conn.execute(\(aqget\(aq, \(aqmy\-key\(aq) - raw_value = await conn.execute(\(aqget\(aq, \(aqmy\-key\(aq, encoding=None) - assert str_value == \(aqsome value\(aq - assert raw_value == b\(aqsome value\(aq - - print(\(aqstr value:\(aq, str_value) - print(\(aqraw value:\(aq, raw_value) - - # optionally close connection - conn.close() - await conn.wait_closed() - - -if __name__ == \(aq__main__\(aq: - asyncio.get_event_loop().run_until_complete(main()) - -.ft P -.fi -.UNINDENT -.UNINDENT -.SS Connections pool example -.sp -\fBget source code\fP -.INDENT 0.0 -.INDENT 3.5 -.sp -.nf -.ft C -import asyncio -import aioredis - - -async def main(): - pool = await aioredis.create_pool( - \(aqredis://localhost\(aq, - minsize=5, maxsize=10) - with await pool as conn: # low\-level redis connection - await conn.execute(\(aqset\(aq, \(aqmy\-key\(aq, \(aqvalue\(aq) - val = await conn.execute(\(aqget\(aq, \(aqmy\-key\(aq) - print(\(aqraw value:\(aq, val) - pool.close() - await pool.wait_closed() # closing all open connections - - -if __name__ == \(aq__main__\(aq: - asyncio.get_event_loop().run_until_complete(main()) - -.ft P -.fi -.UNINDENT -.UNINDENT .SS Commands example .sp \fBget source code\fP @@ -5006,8 +4752,8 @@ if __name__ == \(aq__main__\(aq: - asyncio.get_event_loop().run_until_complete(main()) - asyncio.get_event_loop().run_until_complete(redis_pool()) + asyncio.run(main()) + asyncio.run(redis_pool()) .ft P .fi @@ -5042,7 +4788,7 @@ if __name__ == \(aq__main__\(aq: - asyncio.get_event_loop().run_until_complete(main()) + asyncio.run(main()) .ft P .fi @@ -5086,7 +4832,7 @@ if __name__ == \(aq__main__\(aq: - asyncio.get_event_loop().run_until_complete(main()) + asyncio.run(main()) .ft P .fi @@ -5122,7 +4868,7 @@ if __name__ == \(aq__main__\(aq: import os if \(aqredis_version:2.6\(aq not in os.environ.get(\(aqREDIS_VERSION\(aq, \(aq\(aq): - asyncio.get_event_loop().run_until_complete(main()) + asyncio.run(main()) .ft P .fi @@ -5154,7 +4900,77 @@ if __name__ == \(aq__main__\(aq: - asyncio.get_event_loop().run_until_complete(main()) + asyncio.run(main()) + +.ft P +.fi +.UNINDENT +.UNINDENT +.SS Low\-level connection usage example +.sp +\fBget source code\fP +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +import asyncio +import aioredis + + +async def main(): + conn = await aioredis.create_connection( + \(aqredis://localhost\(aq, encoding=\(aqutf\-8\(aq) + + ok = await conn.execute(\(aqset\(aq, \(aqmy\-key\(aq, \(aqsome value\(aq) + assert ok == \(aqOK\(aq, ok + + str_value = await conn.execute(\(aqget\(aq, \(aqmy\-key\(aq) + raw_value = await conn.execute(\(aqget\(aq, \(aqmy\-key\(aq, encoding=None) + assert str_value == \(aqsome value\(aq + assert raw_value == b\(aqsome value\(aq + + print(\(aqstr value:\(aq, str_value) + print(\(aqraw value:\(aq, raw_value) + + # optionally close connection + conn.close() + await conn.wait_closed() + + +if __name__ == \(aq__main__\(aq: + asyncio.run(main()) + +.ft P +.fi +.UNINDENT +.UNINDENT +.SS Connections pool example +.sp +\fBget source code\fP +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +import asyncio +import aioredis + + +async def main(): + pool = await aioredis.create_pool( + \(aqredis://localhost\(aq, + minsize=5, maxsize=10) + with await pool as conn: # low\-level redis connection + await conn.execute(\(aqset\(aq, \(aqmy\-key\(aq, \(aqvalue\(aq) + val = await conn.execute(\(aqget\(aq, \(aqmy\-key\(aq) + print(\(aqraw value:\(aq, val) + pool.close() + await pool.wait_closed() # closing all open connections + + +if __name__ == \(aq__main__\(aq: + asyncio.run(main()) .ft P .fi @@ -5199,6 +5015,59 @@ \fBflake8\fP for code linting; .IP \(bu 2 and few other packages. +.UNINDENT +.sp +Make sure you have provided a \fBtowncrier\fP note. +Just add short description running following commands: +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +$ echo "Short description" > CHANGES/filename.type +.ft P +.fi +.UNINDENT +.UNINDENT +.sp +This will create new file in \fBCHANGES\fP directory. +Filename should consist of the ticket ID or other unique identifier. +Five default types are: +.INDENT 0.0 +.IP \(bu 2 +\&.feature \- signifying new feature +.IP \(bu 2 +\&.bugfix \- signifying a bug fix +.IP \(bu 2 +\&.doc \- documentation improvement +.IP \(bu 2 +\&.removal \- deprecation or removal of public API +.IP \(bu 2 +\&.misc \- a ticket has been closed, but not in interest of users +.UNINDENT +.sp +You can check if everything is correct by typing: +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +$ towncrier \-\-draft +.ft P +.fi +.UNINDENT +.UNINDENT +.sp +To produce the news file: +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +$ towncrier +.ft P +.fi +.UNINDENT .UNINDENT .SS Code style .sp @@ -5226,12 +5095,15 @@ # will run tests in a verbose mode $ make test # or -$ py.test +$ pytest + +# or with particular Redis server +$ pytest \-\-redis\-server=/usr/local/bin/redis\-server tests/errors_test.py # will run tests with coverage report $ make cov # or -$ py.test \-\-cov +$ pytest \-\-cov .ft P .fi .UNINDENT @@ -5267,7 +5139,7 @@ .sp .nf .ft C -$ py.test \-\-redis\-server=/path/to/custom/redis\-server +$ pytest \-\-redis\-server=/path/to/custom/redis\-server .ft P .fi .UNINDENT @@ -5281,7 +5153,7 @@ .nf .ft C $ pip install uvloop -$ py.test \-\-uvloop +$ pytest \-\-uvloop .ft P .fi .UNINDENT @@ -5299,20 +5171,6 @@ \fBaioredis\fP uses pytest tool. .sp Tests are located under \fB/tests\fP directory. -.sp -Pure Python 3.5 tests (ie the ones using \fBasync\fP/\fBawait\fP syntax) must be -prefixed with \fBpy35_\fP, for instance see: -.INDENT 0.0 -.INDENT 3.5 -.sp -.nf -.ft C -tests/py35_generic_commands_tests.py -tests/py35_pool_test.py -.ft P -.fi -.UNINDENT -.UNINDENT .SS Fixtures .sp There is a number of fixtures that can be used to write tests: @@ -5440,12 +5298,14 @@ \fI\%tuple\fP .UNINDENT .UNINDENT -.SS Helpers -.sp -\fBaioredis\fP also updates pytest\(aqs namespace with several helpers. -.INDENT 0.0 -.TP -.B pytest.redis_version(*version, reason) +.SS \fBredis_version\fP tests helper +.sp +In \fBtests\fP directory there is a \fB_testutils\fP module with a simple +helper \-\-\- \fBredis_version()\fP \-\-\- a function that add a pytest mark to a test +allowing to run it with requested Redis server versions. +.INDENT 0.0 +.TP +.B _testutils.redis_version(*version, reason) Marks test with minimum redis version to run. .sp Example: @@ -5454,7 +5314,9 @@ .sp .nf .ft C -@pytest.redis_version(3, 2, 0, reason="HSTRLEN new in redis 3.2.0") +from _testutil import redis_version + +@redis_version(3, 2, 0, reason="HSTRLEN new in redis 3.2.0") def test_hstrlen(redis): pass .ft P @@ -5462,39 +5324,457 @@ .UNINDENT .UNINDENT .UNINDENT -.INDENT 0.0 -.TP -.B pytest.logs(logger, level=None) -Adopted version of \fI\%unittest.TestCase.assertEqual()\fP, -see it for details. -.sp -Example: -.INDENT 7.0 -.INDENT 3.5 -.sp -.nf -.ft C -def test_logs(create_connection, server): - with pytest.logs(\(aqaioredis\(aq, \(aqDEBUG\(aq) as cm: - conn yield from create_connection(server.tcp_address) - assert cm.output[0].startswith( - \(aqDEBUG:aioredis:Creating tcp connection\(aq) -.ft P -.fi -.UNINDENT -.UNINDENT -.UNINDENT -.INDENT 0.0 -.TP -.B pytest.assert_almost_equal(first, second, places=None, msg=None, delta=None) -Adopted version of \fI\%unittest.TestCase.assertAlmostEqual()\fP\&. -.UNINDENT -.INDENT 0.0 -.TP -.B pytest.raises_regex(exc_type, message) -Adopted version of \fI\%unittest.TestCase.assertRaisesRegex()\fP\&. -.UNINDENT +.SH MIGRATING FROM V0.3 TO V1.0 +.SS API changes and backward incompatible changes: +.INDENT 0.0 +.IP \(bu 2 +\fI\%aioredis.create_pool\fP +.IP \(bu 2 +\fI\%aioredis.create_reconnecting_redis\fP +.IP \(bu 2 +\fI\%aioredis.Redis\fP +.IP \(bu 2 +\fI\%Blocking operations and connection sharing\fP +.IP \(bu 2 +\fI\%Sorted set commands return values\fP +.IP \(bu 2 +\fI\%Hash hscan command now returns list of tuples\fP +.UNINDENT + +.sp +.ce +---- + +.ce 0 +.sp +.SS aioredis.create_pool +.sp +\fBcreate_pool()\fP now returns \fBConnectionsPool\fP +instead of \fBRedisPool\fP\&. +.sp +This means that pool now operates with \fBRedisConnection\fP +objects and not \fBRedis\fP\&. +.TS +center; +|l|l|. +_ +T{ +v0.3 +T} T{ +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +pool = await aioredis.create_pool((\(aqlocalhost\(aq, 6379)) + +with await pool as redis: + # calling methods of Redis class + await redis.lpush(\(aqlist\-key\(aq, \(aqitem1\(aq, \(aqitem2\(aq) +.ft P +.fi +.UNINDENT +.UNINDENT +T} +_ +T{ +v1.0 +T} T{ +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +pool = await aioredis.create_pool((\(aqlocalhost\(aq, 6379)) + +with await pool as conn: + # calling conn.lpush will raise AttributeError exception + await conn.execute(\(aqlpush\(aq, \(aqlist\-key\(aq, \(aqitem1\(aq, \(aqitem2\(aq) +.ft P +.fi +.UNINDENT +.UNINDENT +T} +_ +.TE +.SS aioredis.create_reconnecting_redis +.sp +\fBcreate_reconnecting_redis()\fP has been dropped. +.sp +\fBcreate_redis_pool()\fP can be used instead of former function. +.TS +center; +|l|l|. +_ +T{ +v0.3 +T} T{ +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +redis = await aioredis.create_reconnecting_redis( + (\(aqlocalhost\(aq, 6379)) + +await redis.lpush(\(aqlist\-key\(aq, \(aqitem1\(aq, \(aqitem2\(aq) +.ft P +.fi +.UNINDENT +.UNINDENT +T} +_ +T{ +v1.0 +T} T{ +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +redis = await aioredis.create_redis_pool( + (\(aqlocalhost\(aq, 6379)) + +await redis.lpush(\(aqlist\-key\(aq, \(aqitem1\(aq, \(aqitem2\(aq) +.ft P +.fi +.UNINDENT +.UNINDENT +T} +_ +.TE +.sp +\fBcreate_redis_pool\fP returns \fBRedis\fP initialized with +\fBConnectionsPool\fP which is responsible for reconnecting to server. +.sp +Also \fBcreate_reconnecting_redis\fP was patching the \fBRedisConnection\fP and +breaking \fBclosed\fP property (it was always \fBTrue\fP). +.SS aioredis.Redis +.sp +\fBRedis\fP class now operates with objects implementing +\fBaioredis.abc.AbcConnection\fP interface. +\fBRedisConnection\fP and \fBConnectionsPool\fP are +both implementing \fBAbcConnection\fP so it is become possible to use same API +when working with either single connection or connections pool. +.TS +center; +|l|l|. +_ +T{ +v0.3 +T} T{ +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +redis = await aioredis.create_redis((\(aqlocalhost\(aq, 6379)) +await redis.lpush(\(aqlist\-key\(aq, \(aqitem1\(aq, \(aqitem2\(aq) + +pool = await aioredis.create_pool((\(aqlocalhost\(aq, 6379)) +redis = await pool.acquire() # get Redis object +await redis.lpush(\(aqlist\-key\(aq, \(aqitem1\(aq, \(aqitem2\(aq) +.ft P +.fi +.UNINDENT +.UNINDENT +T} +_ +T{ +v1.0 +T} T{ +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +redis = await aioredis.create_redis((\(aqlocalhost\(aq, 6379)) +await redis.lpush(\(aqlist\-key\(aq, \(aqitem1\(aq, \(aqitem2\(aq) + +redis = await aioredis.create_redis_pool((\(aqlocalhost\(aq, 6379)) +await redis.lpush(\(aqlist\-key\(aq, \(aqitem1\(aq, \(aqitem2\(aq) +.ft P +.fi +.UNINDENT +.UNINDENT +T} +_ +.TE +.SS Blocking operations and connection sharing +.sp +Current implementation of \fBConnectionsPool\fP by default \fBexecute +every command on random connection\fP\&. The \fIPros\fP of this is that it allowed +implementing \fBAbcConnection\fP interface and hide pool inside \fBRedis\fP class, +and also keep pipelining feature (like RedisConnection.execute). +The \fICons\fP of this is that \fBdifferent tasks may use same connection and block +it\fP with some long\-running command. +.sp +We can call it \fBShared Mode\fP \-\-\- commands are sent to random connections +in pool without need to lock [connection]: +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +redis = await aioredis.create_redis_pool( + (\(aqlocalhost\(aq, 6379), + minsize=1, + maxsize=1) + +async def task(): + # Shared mode + await redis.set(\(aqkey\(aq, \(aqval\(aq) + +asyncio.ensure_future(task()) +asyncio.ensure_future(task()) +# Both tasks will send commands through same connection +# without acquiring (locking) it first. +.ft P +.fi +.UNINDENT +.UNINDENT +.sp +Blocking operations (like \fBblpop\fP, \fBbrpop\fP or long\-running LUA scripts) +in \fBshared mode\fP mode will block connection and thus may lead to whole +program malfunction. +.sp +This \fIblocking\fP issue can be easily solved by using exclusive connection +for such operations: +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +redis = await aioredis.create_redis_pool( + (\(aqlocalhost\(aq, 6379), + minsize=1, + maxsize=1) + +async def task(): + # Exclusive mode + with await redis as r: + await r.set(\(aqkey\(aq, \(aqval\(aq) +asyncio.ensure_future(task()) +asyncio.ensure_future(task()) +# Both tasks will first acquire connection. +.ft P +.fi +.UNINDENT +.UNINDENT +.sp +We can call this \fBExclusive Mode\fP \-\-\- context manager is used to +acquire (lock) exclusive connection from pool and send all commands through it. +.sp +\fBNOTE:\fP +.INDENT 0.0 +.INDENT 3.5 +This technique is similar to v0.3 pool usage: +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +# in aioredis v0.3 +pool = await aioredis.create_pool((\(aqlocalhost\(aq, 6379)) +with await pool as redis: + # Redis is bound to exclusive connection + redis.set(\(aqkey\(aq, \(aqval\(aq) +.ft P +.fi +.UNINDENT +.UNINDENT +.UNINDENT +.UNINDENT +.SS Sorted set commands return values +.sp +Sorted set commands (like \fBzrange\fP, \fBzrevrange\fP and others) that accept +\fBwithscores\fP argument now \fBreturn list of tuples\fP instead of plain list. +.TS +center; +|l|l|. +_ +T{ +v0.3 +T} T{ +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +redis = await aioredis.create_redis((\(aqlocalhost\(aq, 6379)) +await redis.zadd(\(aqzset\-key\(aq, 1, \(aqone\(aq, 2, \(aqtwo\(aq) +res = await redis.zrange(\(aqzset\-key\(aq, withscores=True) +assert res == [b\(aqone\(aq, 1, b\(aqtwo\(aq, 2] + +# not an easy way to make a dict +it = iter(res) +assert dict(zip(it, it)) == {b\(aqone\(aq: 1, b\(aqtwo\(aq: 2} +.ft P +.fi +.UNINDENT +.UNINDENT +T} +_ +T{ +v1.0 +T} T{ +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +redis = await aioredis.create_redis((\(aqlocalhost\(aq, 6379)) +await redis.zadd(\(aqzset\-key\(aq, 1, \(aqone\(aq, 2, \(aqtwo\(aq) +res = await redis.zrange(\(aqzset\-key\(aq, withscores=True) +assert res == [(b\(aqone\(aq, 1), (b\(aqtwo\(aq, 2)] + +# now its easier to make a dict of it +assert dict(res) == {b\(aqone\(aq: 1, b\(aqtwo\(aq: 2} +.ft P +.fi +.UNINDENT +.UNINDENT +T} +_ +.TE +.SS Hash \fBhscan\fP command now returns list of tuples +.sp +\fBhscan\fP updated to return a list of tuples instead of plain +mixed key/value list. +.TS +center; +|l|l|. +_ +T{ +v0.3 +T} T{ +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +redis = await aioredis.create_redis((\(aqlocalhost\(aq, 6379)) +await redis.hmset(\(aqhash\(aq, \(aqone\(aq, 1, \(aqtwo\(aq, 2) +cur, data = await redis.hscan(\(aqhash\(aq) +assert data == [b\(aqone\(aq, b\(aq1\(aq, b\(aqtwo\(aq, b\(aq2\(aq] + +# not an easy way to make a dict +it = iter(data) +assert dict(zip(it, it)) == {b\(aqone\(aq: b\(aq1\(aq, b\(aqtwo\(aq: b\(aq2\(aq} +.ft P +.fi +.UNINDENT +.UNINDENT +T} +_ +T{ +v1.0 +T} T{ +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +redis = await aioredis.create_redis((\(aqlocalhost\(aq, 6379)) +await redis.hmset(\(aqhash\(aq, \(aqone\(aq, 1, \(aqtwo\(aq, 2) +cur, data = await redis.hscan(\(aqhash\(aq) +assert data == [(b\(aqone\(aq, b\(aq1\(aq), (b\(aqtwo\(aq, b\(aq2\(aq)] + +# now its easier to make a dict of it +assert dict(data) == {b\(aqone\(aq: b\(aq1\(aq: b\(aqtwo\(aq: b\(aq2\(aq} +.ft P +.fi +.UNINDENT +.UNINDENT +T} +_ +.TE .SH RELEASES +.SS 1.3.1 (2019\-12\-02) +.SS Bugfixes +.INDENT 0.0 +.IP \(bu 2 +Fix transaction data decoding +(see \fI\%#657\fP); +.IP \(bu 2 +Fix duplicate calls to \fBpool.wait_closed()\fP upon \fBcreate_pool()\fP exception. +(see \fI\%#671\fP); +.UNINDENT +.SS Deprecations and Removals +.INDENT 0.0 +.IP \(bu 2 +Drop explicit loop requirement in API. +Deprecate \fBloop\fP argument. +Throw warning in Python 3.8+ if explicit \fBloop\fP is passed to methods. +(see \fI\%#666\fP); +.UNINDENT +.SS Misc +.INDENT 0.0 +.IP \(bu 2 +\fI\%#643\fP, +\fI\%#646\fP, +\fI\%#648\fP; +.UNINDENT +.SS 1.3.0 (2019\-09\-24) +.SS Features +.INDENT 0.0 +.IP \(bu 2 +Added \fBxdel\fP and \fBxtrim\fP method which missed in \fBcommands/streams.py\fP & also added unit test code for them +(see \fI\%#438\fP); +.IP \(bu 2 +Add \fBcount\fP argument to \fBspop\fP command +(see \fI\%#485\fP); +.IP \(bu 2 +Add support for \fBzpopmax\fP and \fBzpopmin\fP redis commands +(see \fI\%#550\fP); +.IP \(bu 2 +Add \fBtowncrier\fP: change notes are now stored in \fBCHANGES.txt\fP +(see \fI\%#576\fP); +.IP \(bu 2 +Type hints for the library +(see \fI\%#584\fP); +.IP \(bu 2 +A few additions to the sorted set commands: +.INDENT 2.0 +.IP \(bu 2 +the blocking pop commands: \fBBZPOPMAX\fP and \fBBZPOPMIN\fP +.IP \(bu 2 +the \fBCH\fP and \fBINCR\fP options of the \fBZADD\fP command +.UNINDENT +.sp +(see \fI\%#618\fP); +.IP \(bu 2 +Added \fBno_ack\fP parameter to \fBxread_group\fP streams method in \fBcommands/streams.py\fP +(see \fI\%#625\fP); +.UNINDENT +.SS Bugfixes +.INDENT 0.0 +.IP \(bu 2 +Fix for sensitive logging +(see \fI\%#459\fP); +.IP \(bu 2 +Fix slow memory leak in \fBwait_closed\fP implementation +(see \fI\%#498\fP); +.IP \(bu 2 +Fix handling of instances were Redis returns null fields for a stream message +(see \fI\%#605\fP); +.UNINDENT +.SS Improved Documentation +.INDENT 0.0 +.IP \(bu 2 +Rewrite "Getting started" documentation. +(see \fI\%#641\fP); +.UNINDENT +.SS Misc +.INDENT 0.0 +.IP \(bu 2 +\fI\%#585\fP, +\fI\%#611\fP, +\fI\%#612\fP, +\fI\%#619\fP, +\fI\%#620\fP, +\fI\%#642\fP; +.UNINDENT .SS 1.2.0 (2018\-10\-24) .sp \fBNEW\fP: @@ -5858,7 +6138,7 @@ Fixed cancellation of wait_closed (see \fI\%#118\fP); .IP \(bu 2 -Fixed \fBtime()\fP convertion to float +Fixed \fBtime()\fP conversion to float (see \fI\%#126\fP); .IP \(bu 2 Fixed \fBhmset()\fP method to return bool instead of \fBb\(aqOK\(aq\fP @@ -6156,6 +6436,6 @@ .SH AUTHOR Alexey Popravka .SH COPYRIGHT -2014-2018, Alexey Popravka +2014-2019, Alexey Popravka .\" Generated by docutils manpage writer. . diff --git a/docs/api_reference.rst b/docs/api_reference.rst index 44229be..fb7da4a 100644 --- a/docs/api_reference.rst +++ b/docs/api_reference.rst @@ -42,8 +42,8 @@ .. cofunction:: create_connection(address, \*, db=0, password=None, ssl=None,\ - encoding=None, parser=None, loop=None,\ - timeout=None) + encoding=None, parser=None,\ + timeout=None, connection_cls=None) Creates Redis connection. @@ -52,6 +52,9 @@ .. versionchanged:: v1.0 ``parser`` argument added. + + .. deprecated:: v1.3.1 + ``loop`` argument deprecated for Python 3.8 compatibility. :param address: An address where to connect. Can be one of the following: @@ -80,15 +83,14 @@ :param parser: Protocol parser class. Can be used to set custom protocol reader; expected same interface as :class:`hiredis.Reader`. :type parser: callable or None - - :param loop: An optional *event loop* instance - (uses :func:`asyncio.get_event_loop` if not specified). - :type loop: :ref:`EventLoop` :param timeout: Max time to open a connection, otherwise raise :exc:`asyncio.TimeoutError` exception. ``None`` by default :type timeout: float greater than 0 or None + + :param connection_cls: Custom connection class. ``None`` by default. + :type connection_cls: :class:`abc.AbcConnection` or None :return: :class:`RedisConnection` instance. @@ -171,7 +173,7 @@ Method also accept :class:`aioredis.Channel` instances as command arguments:: - >>> ch1 = Channel('A', is_pattern=False, loop=loop) + >>> ch1 = Channel('A', is_pattern=False) >>> await conn.execute_pubsub('subscribe', ch1) [[b'subscribe', b'A', 1]] @@ -252,7 +254,7 @@ .. function:: create_pool(address, \*, db=0, password=None, ssl=None, \ encoding=None, minsize=1, maxsize=10, \ - parser=None, loop=None, \ + parser=None, \ create_connection_timeout=None, \ pool_cls=None, connection_cls=None) @@ -276,6 +278,9 @@ .. versionadded:: v1.0 ``parser``, ``pool_cls`` and ``connection_cls`` arguments added. + + .. deprecated:: v1.3.1 + ``loop`` argument deprecated for Python 3.8 compatibility. :param address: An address where to connect. Can be one of the following: @@ -310,10 +315,6 @@ :param parser: Protocol parser class. Can be used to set custom protocol reader; expected same interface as :class:`hiredis.Reader`. :type parser: callable or None - - :param loop: An optional *event loop* instance - (uses :func:`asyncio.get_event_loop` if not specified). - :type loop: :ref:`EventLoop` :param create_connection_timeout: Max time to open a connection, otherwise raise an :exc:`asyncio.TimeoutError`. ``None`` by default. @@ -449,83 +450,6 @@ Wait until pool gets closed (when all connections are closed). .. versionadded:: v0.2.8 - - ----- - -.. _aioredis-channel: - -Pub/Sub Channel object ----------------------- - -`Channel` object is a wrapper around queue for storing received pub/sub messages. - - -.. class:: Channel(name, is_pattern, loop=None) - - Bases: :class:`abc.AbcChannel` - - Object representing Pub/Sub messages queue. - It's basically a wrapper around :class:`asyncio.Queue`. - - .. attribute:: name - - Holds encoded channel/pattern name. - - .. attribute:: is_pattern - - Set to True for pattern channels. - - .. attribute:: is_active - - Set to True if there are messages in queue and connection is still - subscribed to this channel. - - .. comethod:: get(\*, encoding=None, decoder=None) - - Coroutine that waits for and returns a message. - - Return value is message received or ``None`` signifying that channel has - been unsubscribed and no more messages will be received. - - :param str encoding: If not None used to decode resulting bytes message. - - :param callable decoder: If specified used to decode message, - ex. :func:`json.loads()` - - :raise aioredis.ChannelClosedError: If channel is unsubscribed and - has no more messages. - - .. method:: get_json(\*, encoding="utf-8") - - Shortcut to ``get(encoding="utf-8", decoder=json.loads)`` - - .. comethod:: wait_message() - - Waits for message to become available in channel - or channel is closed (unsubscribed). - - Main idea is to use it in loops: - - >>> ch = redis.channels['channel:1'] - >>> while await ch.wait_message(): - ... msg = await ch.get() - - :rtype: bool - - .. comethod:: iter(, \*, encoding=None, decoder=None) - :async-for: - :coroutine: - - Same as :meth:`~.get` method but it is a native coroutine. - - Usage example:: - - >>> async for msg in ch.iter(): - ... print(msg) - - .. versionadded:: 0.2.5 - Available for Python 3.5 only ---- @@ -670,6 +594,84 @@ MasterReplyError SlaveReplyError + +---- + +.. _aioredis-channel: + +Pub/Sub Channel object +---------------------- + +`Channel` object is a wrapper around queue for storing received pub/sub messages. + + +.. class:: Channel(name, is_pattern) + + Bases: :class:`abc.AbcChannel` + + Object representing Pub/Sub messages queue. + It's basically a wrapper around :class:`asyncio.Queue`. + + .. attribute:: name + + Holds encoded channel/pattern name. + + .. attribute:: is_pattern + + Set to True for pattern channels. + + .. attribute:: is_active + + Set to True if there are messages in queue and connection is still + subscribed to this channel. + + .. comethod:: get(\*, encoding=None, decoder=None) + + Coroutine that waits for and returns a message. + + Return value is message received or ``None`` signifying that channel has + been unsubscribed and no more messages will be received. + + :param str encoding: If not None used to decode resulting bytes message. + + :param callable decoder: If specified used to decode message, + ex. :func:`json.loads()` + + :raise aioredis.ChannelClosedError: If channel is unsubscribed and + has no more messages. + + .. method:: get_json(\*, encoding="utf-8") + + Shortcut to ``get(encoding="utf-8", decoder=json.loads)`` + + .. comethod:: wait_message() + + Waits for message to become available in channel + or channel is closed (unsubscribed). + + Main idea is to use it in loops: + + >>> ch = redis.channels['channel:1'] + >>> while await ch.wait_message(): + ... msg = await ch.get() + + :rtype: bool + + .. comethod:: iter(, \*, encoding=None, decoder=None) + :async-for: + :coroutine: + + Same as :meth:`~.get` method but it is a native coroutine. + + Usage example:: + + >>> async for msg in ch.iter(): + ... print(msg) + + .. versionadded:: 0.2.5 + Available for Python 3.5 only + + ---- .. _aioredis-redis: @@ -712,7 +714,7 @@ .. cofunction:: create_redis(address, \*, db=0, password=None, ssl=None,\ encoding=None, commands_factory=Redis,\ parser=None, timeout=None,\ - connection_cls=None, loop=None) + connection_cls=None) This :ref:`coroutine` creates high-level Redis interface instance bound to single Redis connection @@ -720,6 +722,9 @@ .. versionadded:: v1.0 ``parser``, ``timeout`` and ``connection_cls`` arguments added. + + .. deprecated:: v1.3.1 + ``loop`` argument deprecated for Python 3.8 compatibility. See also :class:`~aioredis.RedisConnection` for parameters description. @@ -760,10 +765,6 @@ :class:`~aioredis.abc.AbcConnection`. :type connection_cls: aioredis.abc.AbcConnection - :param loop: An optional *event loop* instance - (uses :func:`asyncio.get_event_loop` if not specified). - :type loop: :ref:`EventLoop` - :returns: Redis client (result of ``commands_factory`` call), :class:`Redis` by default. @@ -773,7 +774,7 @@ minsize=1, maxsize=10,\ parser=None, timeout=None,\ pool_cls=None, connection_cls=None,\ - loop=None) + ) This :ref:`coroutine` create high-level Redis client instance bound to connections pool (this allows auto-reconnect and simple pub/sub @@ -784,6 +785,9 @@ .. versionchanged:: v1.0 ``parser``, ``timeout``, ``pool_cls`` and ``connection_cls`` arguments added. + + .. deprecated:: v1.3.1 + ``loop`` argument deprecated for Python 3.8 compatibility. :param address: An address where to connect. Can be a (host, port) tuple, unix domain socket path string or a Redis URI string. @@ -831,9 +835,5 @@ :class:`~aioredis.abc.AbcConnection`. :type connection_cls: aioredis.abc.AbcConnection - :param loop: An optional *event loop* instance - (uses :func:`asyncio.get_event_loop` if not specified). - :type loop: :ref:`EventLoop` - :returns: Redis client (result of ``commands_factory`` call), :class:`Redis` by default. diff --git a/docs/devel.rst b/docs/devel.rst index cfbe255..7f1bdbe 100644 --- a/docs/devel.rst +++ b/docs/devel.rst @@ -23,6 +23,29 @@ * ``flake8`` for code linting; * and few other packages. +Make sure you have provided a ``towncrier`` note. +Just add short description running following commands:: + + $ echo "Short description" > CHANGES/filename.type + +This will create new file in ``CHANGES`` directory. +Filename should consist of the ticket ID or other unique identifier. +Five default types are: + +* .feature - signifying new feature +* .bugfix - signifying a bug fix +* .doc - documentation improvement +* .removal - deprecation or removal of public API +* .misc - a ticket has been closed, but not in interest of users + +You can check if everything is correct by typing:: + + $ towncrier --draft + +To produce the news file:: + + $ towncrier + Code style ---------- @@ -41,13 +64,15 @@ # will run tests in a verbose mode $ make test # or - $ py.test + $ pytest + + # or with particular Redis server + $ pytest --redis-server=/usr/local/bin/redis-server tests/errors_test.py # will run tests with coverage report $ make cov # or - $ py.test --cov - + $ pytest --cov SSL tests ~~~~~~~~~ @@ -69,7 +94,7 @@ To run tests against different redises use ``--redis-server`` command line option:: - $ py.test --redis-server=/path/to/custom/redis-server + $ pytest --redis-server=/path/to/custom/redis-server UVLoop ~~~~~~ @@ -77,7 +102,7 @@ To run tests with :term:`uvloop`:: $ pip install uvloop - $ py.test --uvloop + $ pytest --uvloop .. note:: Until Python 3.5.2 EventLoop has no ``create_future`` method so aioredis won't benefit from uvloop's futures. @@ -89,12 +114,6 @@ :mod:`aioredis` uses :term:`pytest` tool. Tests are located under ``/tests`` directory. - -Pure Python 3.5 tests (ie the ones using ``async``/``await`` syntax) must be -prefixed with ``py35_``, for instance see:: - - tests/py35_generic_commands_tests.py - tests/py35_pool_test.py Fixtures @@ -187,12 +206,14 @@ :rtype: tuple -Helpers -~~~~~~~ - -:mod:`aioredis` also updates :term:`pytest`'s namespace with several helpers. - -.. function:: pytest.redis_version(\*version, reason) +``redis_version`` tests helper +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +In ``tests`` directory there is a :mod:`_testutils` module with a simple +helper --- :func:`redis_version` --- a function that add a pytest mark to a test +allowing to run it with requested Redis server versions. + +.. function:: _testutils.redis_version(\*version, reason) Marks test with minimum redis version to run. @@ -200,33 +221,8 @@ .. code-block:: python - @pytest.redis_version(3, 2, 0, reason="HSTRLEN new in redis 3.2.0") + from _testutil import redis_version + + @redis_version(3, 2, 0, reason="HSTRLEN new in redis 3.2.0") def test_hstrlen(redis): pass - - -.. function:: pytest.logs(logger, level=None) - - Adopted version of :meth:`unittest.TestCase.assertEqual`, - see it for details. - - Example: - - .. code-block:: python - - def test_logs(create_connection, server): - with pytest.logs('aioredis', 'DEBUG') as cm: - conn yield from create_connection(server.tcp_address) - assert cm.output[0].startswith( - 'DEBUG:aioredis:Creating tcp connection') - - -.. function:: pytest.assert_almost_equal(first, second, places=None, \ - msg=None, delta=None) - - Adopted version of :meth:`unittest.TestCase.assertAlmostEqual`. - - -.. function:: pytest.raises_regex(exc_type, message) - - Adopted version of :meth:`unittest.TestCase.assertRaisesRegex`. diff --git a/docs/examples.rst b/docs/examples.rst index 34c2312..af77f07 100644 --- a/docs/examples.rst +++ b/docs/examples.rst @@ -6,23 +6,6 @@ (see for more). Every example is a correct python program that can be executed. - -.. _aioredis-examples-simple: - -Low-level connection usage example -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -:download:`get source code<../examples/connection.py>` - -.. literalinclude:: ../examples/connection.py - - -Connections pool example -~~~~~~~~~~~~~~~~~~~~~~~~ - -:download:`get source code<../examples/pool.py>` - -.. literalinclude:: ../examples/pool.py Commands example @@ -63,3 +46,20 @@ :download:`get source code<../examples/sentinel.py>` .. literalinclude:: ../examples/sentinel.py + +.. _aioredis-examples-simple: + +Low-level connection usage example +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:download:`get source code<../examples/connection.py>` + +.. literalinclude:: ../examples/connection.py + + +Connections pool example +~~~~~~~~~~~~~~~~~~~~~~~~ + +:download:`get source code<../examples/pool.py>` + +.. literalinclude:: ../examples/pool.py diff --git a/docs/index.rst b/docs/index.rst index e046848..c9af42b 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -22,19 +22,16 @@ Connections Pool Yes Pipelining support Yes Pub/Sub support Yes -Sentinel support Yes [1]_ +Sentinel support Yes Redis Cluster support WIP Trollius (python 2.7) No -Tested CPython versions `3.5, 3.6 `_ [2]_ -Tested PyPy3 versions `5.9.0 `_ -Tested for Redis server `2.6, 2.8, 3.0, 3.2, 4.0 `_ +Tested CPython versions `3.5.3, 3.6, 3.7 `_ [1]_ +Tested PyPy3 versions `pypy3.5-7.0 pypy3.6-7.1.1 `_ +Tested for Redis server `2.6, 2.8, 3.0, 3.2, 4.0 5.0 `_ Support for dev Redis server through low-level API ================================ ============================== -.. [1] Sentinel support is available in master branch. - This feature is not yet stable and may have some issues. - -.. [2] For Python 3.3, 3.4 support use aioredis v0.3. +.. [1] For Python 3.3, 3.4 support use aioredis v0.3. Installation ------------ @@ -58,6 +55,8 @@ ---------- - Issue Tracker: https://github.com/aio-libs/aioredis/issues +- Google Group: https://groups.google.com/forum/#!forum/aio-libs +- Gitter: https://gitter.im/aio-libs/Lobby - Source Code: https://github.com/aio-libs/aioredis - Contributor's guide: :doc:`devel` @@ -78,7 +77,6 @@ :maxdepth: 3 start - migration api_reference mixins abc @@ -86,8 +84,13 @@ sentinel examples devel + migration releases glossary + +.. :: + todo insert after start + advanced Indices and tables ================== @@ -97,4 +100,4 @@ * :ref:`search` .. _MIT license: https://github.com/aio-libs/aioredis/blob/master/LICENSE -.. _travis: https://travis-ci.org/aio-libs/aioredis +.. _travis: https://travis-ci.com/aio-libs/aioredis diff --git a/docs/migration.rst b/docs/migration.rst index a1e3999..4d33ef4 100644 --- a/docs/migration.rst +++ b/docs/migration.rst @@ -182,10 +182,10 @@ | | | | | redis = await aioredis.create_redis(('localhost', 6379)) | | | await redis.zadd('zset-key', 1, 'one', 2, 'two') | -| | res = await redis.zrage('zset-key', withscores=True) | +| | res = await redis.zrange('zset-key', withscores=True) | | | assert res == [b'one', 1, b'two', 2] | | | | -| | # not an esiest way to make a dict | +| | # not an easy way to make a dict | | | it = iter(res) | | | assert dict(zip(it, it)) == {b'one': 1, b'two': 2} | | | | @@ -195,7 +195,7 @@ | | | | | redis = await aioredis.create_redis(('localhost', 6379)) | | | await redis.zadd('zset-key', 1, 'one', 2, 'two') | -| | res = await redis.zrage('zset-key', withscores=True) | +| | res = await redis.zrange('zset-key', withscores=True) | | | assert res == [(b'one', 1), (b'two', 2)] | | | | | | # now its easier to make a dict of it | @@ -219,7 +219,7 @@ | | cur, data = await redis.hscan('hash') | | | assert data == [b'one', b'1', b'two', b'2'] | | | | -| | # not an esiest way to make a dict | +| | # not an easy way to make a dict | | | it = iter(data) | | | assert dict(zip(it, it)) == {b'one': b'1', b'two': b'2'} | | | | diff --git a/docs/mixins.rst b/docs/mixins.rst index d3a81b3..8c239d3 100644 --- a/docs/mixins.rst +++ b/docs/mixins.rst @@ -119,8 +119,7 @@ .. autoclass:: TransactionsCommandsMixin :members: -.. class:: Pipeline(connection, commands_factory=lambda conn: conn, \*,\ - loop=None) +.. class:: Pipeline(connection, commands_factory=lambda conn: conn) Commands pipeline. @@ -129,14 +128,13 @@ This class implements `__getattr__` method allowing to call methods on instance created with ``commands_factory``. + .. deprecated:: v1.3.1 + ``loop`` argument deprecated for Python 3.8 compatibility. + :param connection: Redis connection :type connection: aioredis.RedisConnection :param callable commands_factory: Commands factory to get methods from. - - :param loop: An optional *event loop* instance - (uses :func:`asyncio.get_event_loop` if not specified). - :type loop: :ref:`EventLoop` .. comethod:: execute(\*, return_exceptions=False) @@ -154,14 +152,16 @@ :raise aioredis.PipelineError: Raised when any command caused error. -.. class:: MultiExec(connection, commands_factory=lambda conn: conn, \*,\ - loop=None) +.. class:: MultiExec(connection, commands_factory=lambda conn: conn) Bases: :class:`~Pipeline`. Multi/Exec pipeline wrapper. See :class:`~Pipeline` for parameters description. + + .. deprecated:: v1.3.1 + ``loop`` argument deprecated for Python 3.8 compatibility. .. comethod:: execute(\*, return_exceptions=False) diff --git a/docs/sentinel.rst b/docs/sentinel.rst index fa519a8..6f817ef 100644 --- a/docs/sentinel.rst +++ b/docs/sentinel.rst @@ -28,9 +28,12 @@ .. corofunction:: create_sentinel(sentinels, \*, db=None, password=None,\ encoding=None, minsize=1, maxsize=10,\ ssl=None, parser=None,\ - loop=None) + ) Creates Redis Sentinel client. + + .. deprecated:: v1.3.1 + ``loop`` argument deprecated for Python 3.8 compatibility. :param sentinels: A list of Sentinel node addresses. :type sentinels: list[tuple] @@ -58,10 +61,6 @@ :param parser: Protocol parser class. Can be used to set custom protocol reader; expected same interface as :class:`hiredis.Reader`. :type parser: callable or None - - :param loop: An optional *event loop* instance - (uses :func:`asyncio.get_event_loop` if not specified). - :type loop: :ref:`EventLoop` :rtype: RedisSentinel diff --git a/docs/start.rst b/docs/start.rst index 904149c..9358526 100644 --- a/docs/start.rst +++ b/docs/start.rst @@ -4,66 +4,156 @@ Getting started =============== - -Commands Pipelining -------------------- - -Commands pipelining is built-in. - -Every command is sent to transport at-once -(ofcourse if no ``TypeError``/``ValueError`` was raised) - -When you making a call with ``await`` / ``yield from`` you will be waiting result, -and then gather results. - -Simple example show both cases (:download:`get source code<../examples/pipeline.py>`): - -.. literalinclude:: ../examples/pipeline.py - :language: python3 - :lines: 9-21 - :dedent: 4 +Installation +------------ + +.. code-block:: bash + + $ pip install aioredis + +This will install aioredis along with its dependencies: + +* hiredis protocol parser; + +* async-timeout --- used in Sentinel client. + +Without dependencies +~~~~~~~~~~~~~~~~~~~~ + +In some cases [1]_ you might need to install :mod:`aioredis` without ``hiredis``, +it is achievable with the following command: + +.. code-block:: bash + + $ pip install --no-deps aioredis async-timeout + +Installing latest version from Git +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. code-block:: bash + + $ pip install git+https://github.com/aio-libs/aioredis@master#egg=aioredis + +Connecting +---------- + +:download:`get source code<../examples/getting_started/00_connect.py>` + +.. literalinclude:: ../examples/getting_started/00_connect.py + :language: python3 + +:func:`aioredis.create_redis_pool` creates a Redis client backed by a pool of +connections. The only required argument is the address of Redis server. +Redis server address can be either host and port tuple +(ex: ``('localhost', 6379)``), or a string which will be parsed into +TCP or UNIX socket address (ex: ``'unix://var/run/redis.sock'``, +``'//var/run/redis.sock'``, ``redis://redis-host-or-ip:6379/1``). + +Closing the client. Calling ``redis.close()`` and then ``redis.wait_closed()`` +is strongly encouraged as this will methods will shutdown all open connections +and cleanup resources. + +See the :doc:`commands reference ` for the full list of supported commands. + +Connecting to specific DB +~~~~~~~~~~~~~~~~~~~~~~~~~ + +There are several ways you can specify database index to select on connection: + +#. explicitly pass db index as ``db`` argument: + + .. code-block:: python + + redis = await aioredis.create_redis_pool( + 'redis://localhost', db=1) + +#. pass db index in URI as path component: + + .. code-block:: python + + redis = await aioredis.create_redis_pool( + 'redis://localhost/2') + + .. note:: + + DB index specified in URI will take precedence over + ``db`` keyword argument. + +#. call :meth:`~aioredis.Redis.select` method: + + .. code-block:: python + + redis = await aioredis.create_redis_pool( + 'redis://localhost/') + await redis.select(3) + + +Connecting to password-protected Redis instance +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The password can be specified either in keyword argument or in address URI: + +.. code-block:: python + + redis = await aioredis.create_redis_pool( + 'redis://localhost', password='sEcRet') + + redis = await aioredis.create_redis_pool( + 'redis://:sEcRet@localhost/') + + redis = await aioredis.create_redis_pool( + 'redis://localhost/?password=sEcRet') .. note:: - - For convenience :mod:`aioredis` provides - :meth:`~TransactionsCommandsMixin.pipeline` - method allowing to execute bulk of commands as one - (:download:`get source code<../examples/pipeline.py>`): - - .. literalinclude:: ../examples/pipeline.py - :language: python3 - :lines: 23-31 - :dedent: 4 + Password specified in URI will take precedence over password keyword. + + Also specifying both password as authentication component and + query parameter in URI is forbidden. + + .. code-block:: python + + # This will cause assertion error + await aioredis.create_redis_pool( + 'redis://:sEcRet@localhost/?password=SeCreT') + +Result messages decoding +------------------------ + +By default :mod:`aioredis` will return :class:`bytes` for most Redis +commands that return string replies. Redis error replies are known to be +valid UTF-8 strings so error messages are decoded automatically. + +If you know that data in Redis is valid string you can tell :mod:`aioredis` +to decode result by passing keyword-only argument ``encoding`` +in a command call: + +:download:`get source code<../examples/getting_started/01_decoding.py>` + +.. literalinclude:: ../examples/getting_started/01_decoding.py + :language: python3 + + +:mod:`aioredis` can decode messages for all Redis data types like +lists, hashes, sorted sets, etc: + +:download:`get source code<../examples/getting_started/02_decoding.py>` + +.. literalinclude:: ../examples/getting_started/02_decoding.py + :language: python3 Multi/Exec transactions ----------------------- -:mod:`aioredis` provides several ways for executing transactions: - -* when using raw connection you can issue ``Multi``/``Exec`` commands - manually; - -* when using :class:`aioredis.Redis` instance you can use - :meth:`~TransactionsCommandsMixin.multi_exec` transaction pipeline. +:download:`get source code<../examples/getting_started/03_multiexec.py>` + +.. literalinclude:: ../examples/getting_started/03_multiexec.py + :language: python3 :meth:`~TransactionsCommandsMixin.multi_exec` method creates and returns new :class:`~aioredis.commands.MultiExec` object which is used for buffering commands and then executing them inside MULTI/EXEC block. -Here is a simple example -(:download:`get source code<../examples/transaction2.py>`): - -.. literalinclude:: ../examples/transaction2.py - :language: python3 - :lines: 9-15 - :linenos: - :emphasize-lines: 5 - :dedent: 4 - -As you can notice ``await`` is **only** used at line 5 with ``tr.execute`` -and **not with** ``tr.set(...)`` calls. - .. warning:: It is very important not to ``await`` buffered command @@ -80,78 +170,53 @@ :mod:`aioredis` provides support for Redis Publish/Subscribe messaging. -To switch connection to subscribe mode you must execute ``subscribe`` command -by yield'ing from :meth:`~PubSubCommandsMixin.subscribe` it returns a list of -:class:`~aioredis.Channel` objects representing subscribed channels. - -As soon as connection is switched to subscribed mode the channel will receive -and store messages +To start listening for messages you must call either +:meth:`~PubSubCommandsMixin.subscribe` or +:meth:`~PubSubCommandsMixin.psubscribe` method. +Both methods return list of :class:`~aioredis.Channel` objects representing +subscribed channels. + +Right after that the channel will receive and store messages (the ``Channel`` object is basically a wrapper around :class:`asyncio.Queue`). To read messages from channel you need to use :meth:`~aioredis.Channel.get` or :meth:`~aioredis.Channel.get_json` coroutines. -.. note:: - In Pub/Sub mode redis connection can only receive messages or issue - (P)SUBSCRIBE / (P)UNSUBSCRIBE commands. - -Pub/Sub example (:download:`get source code<../examples/pubsub2.py>`): - -.. literalinclude:: ../examples/pubsub2.py - :language: python3 - :lines: 6-31 - :dedent: 4 - -.. .. warning:: - Using Pub/Sub mode with :class:`~aioredis.Pool` is possible but - only within ``with`` block or by explicitly ``acquiring/releasing`` - connection. See example below. - -Pub/Sub example (:download:`get source code<../examples/pool_pubsub.py>`): - -.. literalinclude:: ../examples/pool_pubsub.py - :language: python3 - :lines: 13-36 - :dedent: 4 - - -Python 3.5 ``async with`` / ``async for`` support -------------------------------------------------- - -:mod:`aioredis` is compatible with :pep:`492`. - -:class:`~aioredis.Pool` can be used with :ref:`async with` -(:download:`get source code<../examples/pool2.py>`): - -.. literalinclude:: ../examples/pool2.py - :language: python3 - :lines: 7-8,20-22 - :dedent: 4 - - -It also can be used with ``await``: - -.. literalinclude:: ../examples/pool2.py - :language: python3 - :lines: 7-8,26-30 - :dedent: 4 - - -New ``scan``-family commands added with support of :ref:`async for` -(:download:`get source code<../examples/iscan.py>`): - -.. literalinclude:: ../examples/iscan.py - :language: python3 - :lines: 7-9,29-31,34-36,39-41,44-45 - :dedent: 4 - - -SSL/TLS support +Example subscribing and reading channels: + +:download:`get source code<../examples/getting_started/04_pubsub.py>` + +.. literalinclude:: ../examples/getting_started/04_pubsub.py + :language: python3 + +Subscribing and reading patterns: + +:download:`get source code<../examples/getting_started/05_pubsub.py>` + +.. literalinclude:: ../examples/getting_started/05_pubsub.py + :language: python3 + +Sentinel client --------------- -Though Redis server `does not support data encryption `_ -it is still possible to setup Redis server behind SSL proxy. For such cases -:mod:`aioredis` library support secure connections through :mod:`asyncio` -SSL support. See `BaseEventLoop.create_connection`_ for details. - -.. _data_encryption: http://redis.io/topics/security#data-encryption-support -.. _BaseEventLoop.create_connection: https://docs.python.org/3/library/asyncio-eventloop.html#creating-connections +:download:`get source code<../examples/getting_started/06_sentinel.py>` + +.. literalinclude:: ../examples/getting_started/06_sentinel.py + :language: python3 + +Sentinel client requires a list of Redis Sentinel addresses to connect to +and start discovering services. + +Calling :meth:`~aioredis.sentinel.SentinelPool.master_for` or +:meth:`~aioredis.sentinel.SentinelPool.slave_for` methods will return +Redis clients connected to specified services monitored by Sentinel. + +Sentinel client will detect failover and reconnect Redis clients automatically. + +See detailed reference :doc:`here ` + +---- + +.. [1] + Celery hiredis issues + (`#197 `_, + `#317 `_) diff --git a/examples/commands.py b/examples/commands.py index 558ab9f..0312efe 100644 --- a/examples/commands.py +++ b/examples/commands.py @@ -29,5 +29,5 @@ if __name__ == '__main__': - asyncio.get_event_loop().run_until_complete(main()) - asyncio.get_event_loop().run_until_complete(redis_pool()) + asyncio.run(main()) + asyncio.run(redis_pool()) diff --git a/examples/connection.py b/examples/connection.py index 9266c30..c1be1cd 100644 --- a/examples/connection.py +++ b/examples/connection.py @@ -23,4 +23,4 @@ if __name__ == '__main__': - asyncio.get_event_loop().run_until_complete(main()) + asyncio.run(main()) diff --git a/examples/getting_started/00_connect.py b/examples/getting_started/00_connect.py new file mode 100644 index 0000000..a0823c1 --- /dev/null +++ b/examples/getting_started/00_connect.py @@ -0,0 +1,14 @@ +import asyncio +import aioredis + + +async def main(): + redis = await aioredis.create_redis_pool('redis://localhost') + await redis.set('my-key', 'value') + value = await redis.get('my-key', encoding='utf-8') + print(value) + + redis.close() + await redis.wait_closed() + +asyncio.run(main()) diff --git a/examples/getting_started/01_decoding.py b/examples/getting_started/01_decoding.py new file mode 100644 index 0000000..67f839d --- /dev/null +++ b/examples/getting_started/01_decoding.py @@ -0,0 +1,17 @@ +import asyncio +import aioredis + + +async def main(): + redis = await aioredis.create_redis_pool('redis://localhost') + await redis.set('key', 'string-value') + bin_value = await redis.get('key') + assert bin_value == b'string-value' + + str_value = await redis.get('key', encoding='utf-8') + assert str_value == 'string-value' + + redis.close() + await redis.wait_closed() + +asyncio.run(main()) diff --git a/examples/getting_started/02_decoding.py b/examples/getting_started/02_decoding.py new file mode 100644 index 0000000..2d988a9 --- /dev/null +++ b/examples/getting_started/02_decoding.py @@ -0,0 +1,23 @@ +import asyncio +import aioredis + + +async def main(): + redis = await aioredis.create_redis_pool('redis://localhost') + + await redis.hmset_dict('hash', + key1='value1', + key2='value2', + key3=123) + + result = await redis.hgetall('hash', encoding='utf-8') + assert result == { + 'key1': 'value1', + 'key2': 'value2', + 'key3': '123', # note that Redis returns int as string + } + + redis.close() + await redis.wait_closed() + +asyncio.run(main()) diff --git a/examples/getting_started/03_multiexec.py b/examples/getting_started/03_multiexec.py new file mode 100644 index 0000000..450cc7a --- /dev/null +++ b/examples/getting_started/03_multiexec.py @@ -0,0 +1,15 @@ +import asyncio +import aioredis + + +async def main(): + redis = await aioredis.create_redis_pool('redis://localhost') + + tr = redis.multi_exec() + tr.set('key1', 'value1') + tr.set('key2', 'value2') + ok1, ok2 = await tr.execute() + assert ok1 + assert ok2 + +asyncio.run(main()) diff --git a/examples/getting_started/04_pubsub.py b/examples/getting_started/04_pubsub.py new file mode 100644 index 0000000..fbd8828 --- /dev/null +++ b/examples/getting_started/04_pubsub.py @@ -0,0 +1,24 @@ +import asyncio +import aioredis + + +async def main(): + redis = await aioredis.create_redis_pool('redis://localhost') + + ch1, ch2 = await redis.subscribe('channel:1', 'channel:2') + assert isinstance(ch1, aioredis.Channel) + assert isinstance(ch2, aioredis.Channel) + + async def reader(channel): + async for message in channel.iter(): + print("Got message:", message) + asyncio.get_running_loop().create_task(reader(ch1)) + asyncio.get_running_loop().create_task(reader(ch2)) + + await redis.publish('channel:1', 'Hello') + await redis.publish('channel:2', 'World') + + redis.close() + await redis.wait_closed() + +asyncio.run(main()) diff --git a/examples/getting_started/05_pubsub.py b/examples/getting_started/05_pubsub.py new file mode 100644 index 0000000..cbdd089 --- /dev/null +++ b/examples/getting_started/05_pubsub.py @@ -0,0 +1,22 @@ +import asyncio +import aioredis + + +async def main(): + redis = await aioredis.create_redis_pool('redis://localhost') + + ch, = await redis.psubscribe('channel:*') + assert isinstance(ch, aioredis.Channel) + + async def reader(channel): + async for ch, message in channel.iter(): + print("Got message in channel:", ch, ":", message) + asyncio.get_running_loop().create_task(reader(ch)) + + await redis.publish('channel:1', 'Hello') + await redis.publish('channel:2', 'World') + + redis.close() + await redis.wait_closed() + +asyncio.run(main()) diff --git a/examples/getting_started/06_sentinel.py b/examples/getting_started/06_sentinel.py new file mode 100644 index 0000000..7a74f45 --- /dev/null +++ b/examples/getting_started/06_sentinel.py @@ -0,0 +1,15 @@ +import asyncio +import aioredis + + +async def main(): + sentinel = await aioredis.create_sentinel( + ['redis://localhost:26379', 'redis://sentinel2:26379']) + redis = sentinel.master_for('mymaster') + + ok = await redis.set('key', 'value') + assert ok + val = await redis.get('key', encoding='utf-8') + assert val == 'value' + +asyncio.run(main()) diff --git a/examples/iscan.py b/examples/iscan.py deleted file mode 100644 index 51a1c19..0000000 --- a/examples/iscan.py +++ /dev/null @@ -1,52 +0,0 @@ -import asyncio -import aioredis - - -async def main(): - - redis = await aioredis.create_redis( - 'redis://localhost') - - await redis.delete('something:hash', - 'something:set', - 'something:zset') - await redis.mset('something', 'value', - 'something:else', 'else') - await redis.hmset('something:hash', - 'something:1', 'value:1', - 'something:2', 'value:2') - await redis.sadd('something:set', 'something:1', - 'something:2', 'something:else') - await redis.zadd('something:zset', 1, 'something:1', - 2, 'something:2', 3, 'something:else') - - await go(redis) - redis.close() - await redis.wait_closed() - - -async def go(redis): - async for key in redis.iscan(match='something*'): - print('Matched:', key) - - key = 'something:hash' - - async for name, val in redis.ihscan(key, match='something*'): - print('Matched:', name, '->', val) - - key = 'something:set' - - async for val in redis.isscan(key, match='something*'): - print('Matched:', val) - - key = 'something:zset' - - async for val, score in redis.izscan(key, match='something*'): - print('Matched:', val, ':', score) - - -if __name__ == '__main__': - import os - if 'redis_version:2.6' not in os.environ.get('REDIS_VERSION', ''): - loop = asyncio.get_event_loop() - loop.run_until_complete(main()) diff --git a/examples/pipeline.py b/examples/pipeline.py index 21f77e0..757a3f3 100644 --- a/examples/pipeline.py +++ b/examples/pipeline.py @@ -42,5 +42,4 @@ if __name__ == '__main__': - loop = asyncio.get_event_loop() - loop.run_until_complete(main()) + asyncio.run(main()) diff --git a/examples/pool.py b/examples/pool.py index 04485fa..207c7cb 100644 --- a/examples/pool.py +++ b/examples/pool.py @@ -15,4 +15,4 @@ if __name__ == '__main__': - asyncio.get_event_loop().run_until_complete(main()) + asyncio.run(main()) diff --git a/examples/pool2.py b/examples/pool2.py deleted file mode 100644 index 39e0e35..0000000 --- a/examples/pool2.py +++ /dev/null @@ -1,35 +0,0 @@ -import asyncio -import aioredis - - -async def main(): - - pool = await aioredis.create_pool( - 'redis://localhost') - - # async with pool.get() as conn: - await pool.execute('set', 'my-key', 'value') - - await async_with(pool) - await with_await(pool) - pool.close() - await pool.wait_closed() - - -async def async_with(pool): - async with pool.get() as conn: - value = await conn.execute('get', 'my-key') - print('raw value:', value) - - -async def with_await(pool): - # This is exactly the same as: - # with (yield from pool) as conn: - with (await pool) as conn: - value = await conn.execute('get', 'my-key') - print('raw value:', value) - - -if __name__ == '__main__': - loop = asyncio.get_event_loop() - loop.run_until_complete(main()) diff --git a/examples/pubsub.py b/examples/pubsub.py index 0ec43d0..5b9aab3 100644 --- a/examples/pubsub.py +++ b/examples/pubsub.py @@ -28,4 +28,4 @@ if __name__ == '__main__': - asyncio.get_event_loop().run_until_complete(main()) + asyncio.run(main()) diff --git a/examples/pubsub2.py b/examples/pubsub2.py index 895a668..a901de4 100644 --- a/examples/pubsub2.py +++ b/examples/pubsub2.py @@ -41,9 +41,9 @@ for msg in ("Hello", ",", "world!"): for ch in ('channel:1', 'channel:2'): await pub.publish(ch, msg) - asyncio.get_event_loop().call_soon(pub.close) - asyncio.get_event_loop().call_soon(sub.close) - await asyncio.sleep(0) + await asyncio.sleep(0.1) + pub.close() + sub.close() await pub.wait_closed() await sub.wait_closed() await asyncio.gather(tsk1, tsk2) @@ -52,5 +52,4 @@ if __name__ == '__main__': import os if 'redis_version:2.6' not in os.environ.get('REDIS_VERSION', ''): - loop = asyncio.get_event_loop() - loop.run_until_complete(pubsub()) + asyncio.run(pubsub()) diff --git a/examples/scan.py b/examples/scan.py index 40b960b..e5364a4 100644 --- a/examples/scan.py +++ b/examples/scan.py @@ -20,4 +20,4 @@ if __name__ == '__main__': import os if 'redis_version:2.6' not in os.environ.get('REDIS_VERSION', ''): - asyncio.get_event_loop().run_until_complete(main()) + asyncio.run(main()) diff --git a/examples/sentinel.py b/examples/sentinel.py index ddfb968..f92be88 100644 --- a/examples/sentinel.py +++ b/examples/sentinel.py @@ -16,4 +16,4 @@ if __name__ == '__main__': - asyncio.get_event_loop().run_until_complete(main()) + asyncio.run(main()) diff --git a/examples/transaction.py b/examples/transaction.py index 920ca3a..e911131 100644 --- a/examples/transaction.py +++ b/examples/transaction.py @@ -19,4 +19,4 @@ if __name__ == '__main__': - asyncio.get_event_loop().run_until_complete(main()) + asyncio.run(main()) diff --git a/examples/transaction2.py b/examples/transaction2.py index becc7a8..de85d63 100644 --- a/examples/transaction2.py +++ b/examples/transaction2.py @@ -20,5 +20,4 @@ if __name__ == '__main__': - loop = asyncio.get_event_loop() - loop.run_until_complete(main()) + asyncio.run(main()) diff --git a/setup.cfg b/setup.cfg index 24d0160..0de80f8 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,11 +1,15 @@ [tool:pytest] minversion = 2.9.1 -addopts = --cov-report=term --cov-report=html +addopts = -r a --cov-report=term --cov-report=html restpaths = tests markers = - run_loop: Mark coroutine to be run with asyncio loop. + timeout: Set coroutine execution timeout (default is 15 seconds). redis_version(*version, reason): Mark test expecting minimum Redis version skip(reason): Skip test +python_files = + test_*.py + *_test.py + _testutils.py [coverage:run] branch = true diff --git a/setup.py b/setup.py index 10c2f77..c03e8c9 100644 --- a/setup.py +++ b/setup.py @@ -29,8 +29,7 @@ match = regexp.match(line) if match is not None: return match.group(1) - else: - raise RuntimeError('Cannot find version in aioredis/__init__.py') + raise RuntimeError('Cannot find version in {}'.format(init_py)) classifiers = [ diff --git a/tests/_testutils.py b/tests/_testutils.py new file mode 100644 index 0000000..93f29a7 --- /dev/null +++ b/tests/_testutils.py @@ -0,0 +1,11 @@ +import pytest + +__all__ = [ + 'redis_version', +] + + +def redis_version(*version, reason): + assert 1 < len(version) <= 3, version + assert all(isinstance(v, int) for v in version), version + return pytest.mark.redis_version(version=version, reason=reason) diff --git a/tests/conftest.py b/tests/conftest.py index d82f029..1f41810 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -7,9 +7,9 @@ import os import ssl import time -import logging import tempfile import atexit +import inspect from collections import namedtuple from urllib.parse import urlencode, urlunparse @@ -34,7 +34,8 @@ def loop(): """Creates new event loop.""" loop = asyncio.new_event_loop() - asyncio.set_event_loop(None) + if sys.version_info < (3, 8): + asyncio.set_event_loop(loop) try: yield loop @@ -60,11 +61,10 @@ @pytest.fixture -def create_connection(_closable, loop): +def create_connection(_closable): """Wrapper around aioredis.create_connection.""" async def f(*args, **kw): - kw.setdefault('loop', loop) conn = await aioredis.create_connection(*args, **kw) _closable(conn) return conn @@ -75,12 +75,11 @@ aioredis.create_redis, aioredis.create_redis_pool], ids=['single', 'pool']) -def create_redis(_closable, loop, request): +def create_redis(_closable, request): """Wrapper around aioredis.create_redis.""" factory = request.param async def f(*args, **kw): - kw.setdefault('loop', loop) redis = await factory(*args, **kw) _closable(redis) return redis @@ -88,11 +87,10 @@ @pytest.fixture -def create_pool(_closable, loop): +def create_pool(_closable): """Wrapper around aioredis.create_pool.""" async def f(*args, **kw): - kw.setdefault('loop', loop) redis = await aioredis.create_pool(*args, **kw) _closable(redis) return redis @@ -100,11 +98,10 @@ @pytest.fixture -def create_sentinel(_closable, loop): +def create_sentinel(_closable): """Helper instantiating RedisSentinel client.""" async def f(*args, **kw): - kw.setdefault('loop', loop) # make it fail fast on slow CIs (if timeout argument is ommitted) kw.setdefault('timeout', .001) client = await aioredis.sentinel.create_sentinel(*args, **kw) @@ -116,17 +113,18 @@ @pytest.fixture def pool(create_pool, server, loop): """Returns RedisPool instance.""" - pool = loop.run_until_complete( - create_pool(server.tcp_address, loop=loop)) - return pool + return loop.run_until_complete(create_pool(server.tcp_address)) @pytest.fixture def redis(create_redis, server, loop): """Returns Redis client instance.""" redis = loop.run_until_complete( - create_redis(server.tcp_address, loop=loop)) - loop.run_until_complete(redis.flushall()) + create_redis(server.tcp_address)) + + async def clear(): + await redis.flushall() + loop.run_until_complete(clear()) return redis @@ -134,8 +132,11 @@ def redis_sentinel(create_sentinel, sentinel, loop): """Returns Redis Sentinel client instance.""" redis_sentinel = loop.run_until_complete( - create_sentinel([sentinel.tcp_address], timeout=2, loop=loop)) - assert loop.run_until_complete(redis_sentinel.ping()) == b'PONG' + create_sentinel([sentinel.tcp_address], timeout=2)) + + async def ping(): + return await redis_sentinel.ping() + assert loop.run_until_complete(ping()) == b'PONG' return redis_sentinel @@ -143,16 +144,18 @@ def _closable(loop): conns = [] - try: - yield conns.append - finally: + async def close(): waiters = [] while conns: conn = conns.pop(0) conn.close() waiters.append(conn.wait_closed()) if waiters: - loop.run_until_complete(asyncio.gather(*waiters, loop=loop)) + await asyncio.gather(*waiters) + try: + yield conns.append + finally: + loop.run_until_complete(close()) @pytest.fixture(scope='session') @@ -381,7 +384,9 @@ yield True raise RuntimeError("Redis startup timeout expired") - def maker(name, *masters, quorum=1, noslaves=False): + def maker(name, *masters, quorum=1, noslaves=False, + down_after_milliseconds=3000, + failover_timeout=1000): key = (name,) + masters if key in sentinels: return sentinels[key] @@ -410,8 +415,10 @@ for master in masters: write('sentinel monitor', master.name, '127.0.0.1', master.tcp_address.port, quorum) - write('sentinel down-after-milliseconds', master.name, '3000') - write('sentinel failover-timeout', master.name, '3000') + write('sentinel down-after-milliseconds', master.name, + down_after_milliseconds) + write('sentinel failover-timeout', master.name, + failover_timeout) write('sentinel auth-pass', master.name, master.password) f = open(stdout_file, 'w') @@ -518,44 +525,36 @@ @pytest.mark.tryfirst -def pytest_pycollect_makeitem(collector, name, obj): - if collector.funcnamefilter(name): - if not callable(obj): - return - item = pytest.Function(name, parent=collector) - if item.get_closest_marker('run_loop') is not None: - # TODO: re-wrap with asyncio.coroutine if not native coroutine - return list(collector._genfunctions(name, obj)) - - -@pytest.mark.tryfirst def pytest_pyfunc_call(pyfuncitem): """ Run asyncio marked test functions in an event loop instead of a normal function call. """ - marker = pyfuncitem.get_closest_marker('run_loop') - if marker is not None: + if inspect.iscoroutinefunction(pyfuncitem.obj): + marker = pyfuncitem.get_closest_marker('timeout') + if marker is not None and marker.args: + timeout = marker.args[0] + else: + timeout = 15 + funcargs = pyfuncitem.funcargs loop = funcargs['loop'] testargs = {arg: funcargs[arg] for arg in pyfuncitem._fixtureinfo.argnames} loop.run_until_complete( - _wait_coro(pyfuncitem.obj, testargs, - timeout=marker.kwargs.get('timeout', 15), - loop=loop)) + _wait_coro(pyfuncitem.obj, testargs, timeout=timeout)) return True -async def _wait_coro(corofunc, kwargs, timeout, loop): - with async_timeout(timeout, loop=loop): +async def _wait_coro(corofunc, kwargs, timeout): + with async_timeout(timeout): return (await corofunc(**kwargs)) def pytest_runtest_setup(item): - run_loop = item.get_closest_marker('run_loop') - if run_loop and 'loop' not in item.fixturenames: + is_coro = inspect.iscoroutinefunction(item.obj) + if is_coro and 'loop' not in item.fixturenames: # inject an event loop fixture for all async tests item.fixturenames.append('loop') @@ -585,7 +584,17 @@ def pytest_configure(config): bins = config.getoption('--redis-server')[:] - REDIS_SERVERS[:] = bins or ['/usr/bin/redis-server'] + cmd = 'which redis-server' + if not bins: + with os.popen(cmd) as pipe: + path = pipe.read().rstrip() + assert path, ( + "There is no redis-server on your computer." + " Please install it first") + REDIS_SERVERS[:] = [path] + else: + REDIS_SERVERS[:] = bins + VERSIONS.update({srv: _read_server_version(srv) for srv in REDIS_SERVERS}) assert VERSIONS, ("Expected to detect redis versions", REDIS_SERVERS) @@ -608,99 +617,3 @@ raise RuntimeError( "Can not import uvloop, make sure it is installed") asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) - - -def logs(logger, level=None): - """Catches logs for given logger and level. - - See unittest.TestCase.assertLogs for details. - """ - return _AssertLogsContext(logger, level) - - -_LoggingWatcher = namedtuple("_LoggingWatcher", ["records", "output"]) - - -class _CapturingHandler(logging.Handler): - """ - A logging handler capturing all (raw and formatted) logging output. - """ - - def __init__(self): - logging.Handler.__init__(self) - self.watcher = _LoggingWatcher([], []) - - def flush(self): - pass - - def emit(self, record): - self.watcher.records.append(record) - msg = self.format(record) - self.watcher.output.append(msg) - - -class _AssertLogsContext: - """Standard unittest's _AssertLogsContext context manager - adopted to raise pytest failure. - """ - LOGGING_FORMAT = "%(levelname)s:%(name)s:%(message)s" - - def __init__(self, logger_name, level): - self.logger_name = logger_name - if level: - self.level = level - else: - self.level = logging.INFO - self.msg = None - - def __enter__(self): - if isinstance(self.logger_name, logging.Logger): - logger = self.logger = self.logger_name - else: - logger = self.logger = logging.getLogger(self.logger_name) - formatter = logging.Formatter(self.LOGGING_FORMAT) - handler = _CapturingHandler() - handler.setFormatter(formatter) - self.watcher = handler.watcher - self.old_handlers = logger.handlers[:] - self.old_level = logger.level - self.old_propagate = logger.propagate - logger.handlers = [handler] - logger.setLevel(self.level) - logger.propagate = False - return handler.watcher - - def __exit__(self, exc_type, exc_value, tb): - self.logger.handlers = self.old_handlers - self.logger.propagate = self.old_propagate - self.logger.setLevel(self.old_level) - if exc_type is not None: - # let unexpected exceptions pass through - return False - if len(self.watcher.records) == 0: - pytest.fail( - "no logs of level {} or higher triggered on {}" - .format(logging.getLevelName(self.level), self.logger.name)) - - -def redis_version(*version, reason): - assert 1 < len(version) <= 3, version - assert all(isinstance(v, int) for v in version), version - return pytest.mark.redis_version(version=version, reason=reason) - - -def assert_almost_equal(first, second, places=None, msg=None, delta=None): - assert not (places is None and delta is None), \ - "Both places and delta are not set, please set one" - if delta is not None: - assert abs(first - second) <= delta - else: - assert round(abs(first - second), places) == 0 - - -def pytest_namespace(): - return { - 'assert_almost_equal': assert_almost_equal, - 'redis_version': redis_version, - 'logs': logs, - } diff --git a/tests/connection_commands_test.py b/tests/connection_commands_test.py index d276dc3..6926846 100644 --- a/tests/connection_commands_test.py +++ b/tests/connection_commands_test.py @@ -4,33 +4,29 @@ from aioredis import ConnectionClosedError, ReplyError from aioredis.pool import ConnectionsPool from aioredis import Redis +from _testutils import redis_version -@pytest.mark.run_loop -async def test_repr(create_redis, loop, server): - redis = await create_redis( - server.tcp_address, db=1, loop=loop) +async def test_repr(create_redis, server): + redis = await create_redis(server.tcp_address, db=1) assert repr(redis) in { '>', '>', } - redis = await create_redis( - server.tcp_address, db=0, loop=loop) + redis = await create_redis(server.tcp_address, db=0) assert repr(redis) in { '>', '>', } -@pytest.mark.run_loop async def test_auth(redis): expected_message = "ERR Client sent AUTH, but no password is set" with pytest.raises(ReplyError, match=expected_message): await redis.auth('') -@pytest.mark.run_loop async def test_echo(redis): resp = await redis.echo('ECHO') assert resp == b'ECHO' @@ -39,13 +35,11 @@ await redis.echo(None) -@pytest.mark.run_loop async def test_ping(redis): assert await redis.ping() == b'PONG' -@pytest.mark.run_loop -async def test_quit(redis, loop): +async def test_quit(redis): expected = (ConnectionClosedError, ConnectionError) try: assert b'OK' == await redis.quit() @@ -62,14 +56,13 @@ assert False, "Cancelled error must not be raised" # wait one loop iteration until it get surely closed - await asyncio.sleep(0, loop=loop) + await asyncio.sleep(0) assert redis.connection.closed with pytest.raises(ConnectionClosedError): await redis.ping() -@pytest.mark.run_loop async def test_select(redis): assert redis.db == 0 @@ -79,18 +72,13 @@ assert redis.connection.db == 1 -@pytest.mark.run_loop -async def test_encoding(create_redis, loop, server): - redis = await create_redis( - server.tcp_address, - db=1, encoding='utf-8', - loop=loop) +async def test_encoding(create_redis, server): + redis = await create_redis(server.tcp_address, db=1, encoding='utf-8') assert redis.encoding == 'utf-8' -@pytest.mark.run_loop -async def test_yield_from_backwards_compatability(create_redis, server, loop): - redis = await create_redis(server.tcp_address, loop=loop) +async def test_yield_from_backwards_compatibility(create_redis, server): + redis = await create_redis(server.tcp_address) assert isinstance(redis, Redis) # TODO: there should not be warning @@ -101,12 +89,11 @@ assert await client.ping() -@pytest.redis_version(4, 0, 0, reason="SWAPDB is available since redis>=4.0.0") -@pytest.mark.run_loop -async def test_swapdb(create_redis, start_server, loop): +@redis_version(4, 0, 0, reason="SWAPDB is available since redis>=4.0.0") +async def test_swapdb(create_redis, start_server): server = start_server('swapdb_1') - cli1 = await create_redis(server.tcp_address, db=0, loop=loop) - cli2 = await create_redis(server.tcp_address, db=1, loop=loop) + cli1 = await create_redis(server.tcp_address, db=0) + cli2 = await create_redis(server.tcp_address, db=1) await cli1.flushall() assert await cli1.set('key', 'val') is True diff --git a/tests/connection_test.py b/tests/connection_test.py index 8a86236..40c69d6 100644 --- a/tests/connection_test.py +++ b/tests/connection_test.py @@ -14,20 +14,18 @@ Channel, MaxClientsError, ) - - -@pytest.mark.run_loop -async def test_connect_tcp(request, create_connection, loop, server): - conn = await create_connection( - server.tcp_address, loop=loop) +from _testutils import redis_version + + +async def test_connect_tcp(request, create_connection, server): + conn = await create_connection(server.tcp_address) assert conn.db == 0 assert isinstance(conn.address, tuple) assert conn.address[0] in ('127.0.0.1', '::1') assert conn.address[1] == server.tcp_address.port assert str(conn) == '' - conn = await create_connection( - ['localhost', server.tcp_address.port], loop=loop) + conn = await create_connection(['localhost', server.tcp_address.port]) assert conn.db == 0 assert isinstance(conn.address, tuple) assert conn.address[0] in ('127.0.0.1', '::1') @@ -35,120 +33,90 @@ assert str(conn) == '' -@pytest.mark.run_loop async def test_connect_inject_connection_cls( request, create_connection, - loop, server): class MyConnection(RedisConnection): pass conn = await create_connection( - server.tcp_address, loop=loop, connection_cls=MyConnection) + server.tcp_address, connection_cls=MyConnection) assert isinstance(conn, MyConnection) -@pytest.mark.run_loop async def test_connect_inject_connection_cls_invalid( request, create_connection, - loop, server): with pytest.raises(AssertionError): await create_connection( - server.tcp_address, loop=loop, connection_cls=type) - - -@pytest.mark.run_loop -async def test_connect_tcp_timeout(request, create_connection, loop, server): - with patch.object(loop, 'create_connection') as\ - open_conn_mock: - open_conn_mock.side_effect = lambda *a, **kw: asyncio.sleep(0.2, - loop=loop) + server.tcp_address, connection_cls=type) + + +async def test_connect_tcp_timeout(request, create_connection, server): + with patch('aioredis.connection.open_connection') as open_conn_mock: + open_conn_mock.side_effect = lambda *a, **kw: asyncio.sleep(0.2) with pytest.raises(asyncio.TimeoutError): - await create_connection( - server.tcp_address, loop=loop, timeout=0.1) - - -@pytest.mark.run_loop + await create_connection(server.tcp_address, timeout=0.1) + + async def test_connect_tcp_invalid_timeout( - request, create_connection, loop, server): + request, create_connection, server): with pytest.raises(ValueError): await create_connection( - server.tcp_address, loop=loop, timeout=0) - - -@pytest.mark.run_loop + server.tcp_address, timeout=0) + + @pytest.mark.skipif(sys.platform == 'win32', reason="No unixsocket on Windows") -async def test_connect_unixsocket(create_connection, loop, server): - conn = await create_connection( - server.unixsocket, db=0, loop=loop) +async def test_connect_unixsocket(create_connection, server): + conn = await create_connection(server.unixsocket, db=0) assert conn.db == 0 assert conn.address == server.unixsocket assert str(conn) == '' -@pytest.mark.run_loop @pytest.mark.skipif(sys.platform == 'win32', reason="No unixsocket on Windows") -async def test_connect_unixsocket_timeout(create_connection, loop, server): - with patch.object(loop, 'create_unix_connection') as open_conn_mock: - open_conn_mock.side_effect = lambda *a, **kw: asyncio.sleep(0.2, - loop=loop) +async def test_connect_unixsocket_timeout(create_connection, server): + with patch('aioredis.connection.open_unix_connection') as open_conn_mock: + open_conn_mock.side_effect = lambda *a, **kw: asyncio.sleep(0.2) with pytest.raises(asyncio.TimeoutError): - await create_connection( - server.unixsocket, db=0, loop=loop, timeout=0.1) - - -@pytest.mark.run_loop -@pytest.redis_version(2, 8, 0, reason="maxclients config setting") -async def test_connect_maxclients(create_connection, loop, start_server): + await create_connection(server.unixsocket, db=0, timeout=0.1) + + +@redis_version(2, 8, 0, reason="maxclients config setting") +async def test_connect_maxclients(create_connection, start_server): server = start_server('server-maxclients') - conn = await create_connection( - server.tcp_address, loop=loop) + conn = await create_connection(server.tcp_address) await conn.execute(b'CONFIG', b'SET', 'maxclients', 1) errors = (MaxClientsError, ConnectionClosedError, ConnectionError) with pytest.raises(errors): - conn2 = await create_connection( - server.tcp_address, loop=loop) + conn2 = await create_connection(server.tcp_address) await conn2.execute('ping') -def test_global_loop(create_connection, loop, server): - asyncio.set_event_loop(loop) - - conn = loop.run_until_complete(create_connection( - server.tcp_address, db=0)) +async def test_select_db(create_connection, server): + address = server.tcp_address + conn = await create_connection(address) assert conn.db == 0 - assert conn._loop is loop - - -@pytest.mark.run_loop -async def test_select_db(create_connection, loop, server): - address = server.tcp_address - conn = await create_connection(address, loop=loop) - assert conn.db == 0 with pytest.raises(ValueError): - await create_connection(address, db=-1, loop=loop) - with pytest.raises(TypeError): - await create_connection(address, db=1.0, loop=loop) - with pytest.raises(TypeError): - await create_connection( - address, db='bad value', loop=loop) - with pytest.raises(TypeError): - conn = await create_connection( - address, db=None, loop=loop) + await create_connection(address, db=-1) + with pytest.raises(TypeError): + await create_connection(address, db=1.0) + with pytest.raises(TypeError): + await create_connection(address, db='bad value') + with pytest.raises(TypeError): + conn = await create_connection(address, db=None) await conn.select(None) with pytest.raises(ReplyError): - await create_connection( - address, db=100000, loop=loop) + await create_connection(address, db=100000) await conn.select(1) assert conn.db == 1 @@ -160,10 +128,8 @@ assert conn.db == 1 -@pytest.mark.run_loop -async def test_protocol_error(create_connection, loop, server): - conn = await create_connection( - server.tcp_address, loop=loop) +async def test_protocol_error(create_connection, server): + conn = await create_connection(server.tcp_address) reader = conn._reader @@ -175,49 +141,42 @@ def test_close_connection__tcp(create_connection, loop, server): - conn = loop.run_until_complete(create_connection( - server.tcp_address, loop=loop)) + conn = loop.run_until_complete(create_connection(server.tcp_address)) conn.close() with pytest.raises(ConnectionClosedError): loop.run_until_complete(conn.select(1)) - conn = loop.run_until_complete(create_connection( - server.tcp_address, loop=loop)) + conn = loop.run_until_complete(create_connection(server.tcp_address)) conn.close() fut = None with pytest.raises(ConnectionClosedError): fut = conn.select(1) assert fut is None - conn = loop.run_until_complete(create_connection( - server.tcp_address, loop=loop)) + conn = loop.run_until_complete(create_connection(server.tcp_address)) conn.close() with pytest.raises(ConnectionClosedError): conn.execute_pubsub('subscribe', 'channel:1') -@pytest.mark.run_loop @pytest.mark.skipif(sys.platform == 'win32', reason="No unixsocket on Windows") -async def test_close_connection__socket(create_connection, loop, server): - conn = await create_connection( - server.unixsocket, loop=loop) +async def test_close_connection__socket(create_connection, server): + conn = await create_connection(server.unixsocket) conn.close() with pytest.raises(ConnectionClosedError): await conn.select(1) - conn = await create_connection( - server.unixsocket, loop=loop) + conn = await create_connection(server.unixsocket) conn.close() with pytest.raises(ConnectionClosedError): await conn.execute_pubsub('subscribe', 'channel:1') -@pytest.mark.run_loop async def test_closed_connection_with_none_reader( - create_connection, loop, server): + create_connection, server): address = server.tcp_address - conn = await create_connection(address, loop=loop) + conn = await create_connection(address) stored_reader = conn._reader conn._reader = None with pytest.raises(ConnectionClosedError): @@ -225,7 +184,7 @@ conn._reader = stored_reader conn.close() - conn = await create_connection(address, loop=loop) + conn = await create_connection(address) stored_reader = conn._reader conn._reader = None with pytest.raises(ConnectionClosedError): @@ -234,10 +193,9 @@ conn.close() -@pytest.mark.run_loop -async def test_wait_closed(create_connection, loop, server): +async def test_wait_closed(create_connection, server): address = server.tcp_address - conn = await create_connection(address, loop=loop) + conn = await create_connection(address) reader_task = conn._reader_task conn.close() assert not reader_task.done() @@ -245,14 +203,13 @@ assert reader_task.done() -@pytest.mark.run_loop async def test_cancel_wait_closed(create_connection, loop, server): # Regression test: Don't throw error if wait_closed() is cancelled. address = server.tcp_address - conn = await create_connection(address, loop=loop) + conn = await create_connection(address) reader_task = conn._reader_task conn.close() - task = asyncio.ensure_future(conn.wait_closed(), loop=loop) + task = asyncio.ensure_future(conn.wait_closed()) # Make sure the task is cancelled # after it has been started by the loop. @@ -262,16 +219,13 @@ assert reader_task.done() -@pytest.mark.run_loop -async def test_auth(create_connection, loop, server): - conn = await create_connection( - server.tcp_address, loop=loop) +async def test_auth(create_connection, server): + conn = await create_connection(server.tcp_address) res = await conn.execute('CONFIG', 'SET', 'requirepass', 'pass') assert res == b'OK' - conn2 = await create_connection( - server.tcp_address, loop=loop) + conn2 = await create_connection(server.tcp_address) with pytest.raises(ReplyError): await conn2.select(1) @@ -281,8 +235,7 @@ res = await conn2.select(1) assert res is True - conn3 = await create_connection( - server.tcp_address, password='pass', loop=loop) + conn3 = await create_connection(server.tcp_address, password='pass') res = await conn3.select(1) assert res is True @@ -291,10 +244,8 @@ assert res == b'OK' -@pytest.mark.run_loop -async def test_decoding(create_connection, loop, server): - conn = await create_connection( - server.tcp_address, encoding='utf-8', loop=loop) +async def test_decoding(create_connection, server): + conn = await create_connection(server.tcp_address, encoding='utf-8') assert conn.encoding == 'utf-8' res = await conn.execute('set', '{prefix}:key1', 'value') assert res == 'OK' @@ -315,16 +266,13 @@ await conn.execute('set', '{prefix}:key1', 'значение') await conn.execute('get', '{prefix}:key1', encoding='ascii') - conn2 = await create_connection( - server.tcp_address, loop=loop) + conn2 = await create_connection(server.tcp_address) res = await conn2.execute('get', '{prefix}:key1', encoding='utf-8') assert res == 'значение' -@pytest.mark.run_loop -async def test_execute_exceptions(create_connection, loop, server): - conn = await create_connection( - server.tcp_address, loop=loop) +async def test_execute_exceptions(create_connection, server): + conn = await create_connection(server.tcp_address) with pytest.raises(TypeError): await conn.execute(None) with pytest.raises(TypeError): @@ -334,10 +282,8 @@ assert len(conn._waiters) == 0 -@pytest.mark.run_loop -async def test_subscribe_unsubscribe(create_connection, loop, server): - conn = await create_connection( - server.tcp_address, loop=loop) +async def test_subscribe_unsubscribe(create_connection, server): + conn = await create_connection(server.tcp_address) assert conn.in_pubsub == 0 @@ -365,19 +311,15 @@ assert conn.in_pubsub == 1 -@pytest.mark.run_loop -async def test_psubscribe_punsubscribe(create_connection, loop, server): - conn = await create_connection( - server.tcp_address, loop=loop) +async def test_psubscribe_punsubscribe(create_connection, server): + conn = await create_connection(server.tcp_address) res = await conn.execute('psubscribe', 'chan:*') assert res == [[b'psubscribe', b'chan:*', 1]] assert conn.in_pubsub == 1 -@pytest.mark.run_loop -async def test_bad_command_in_pubsub(create_connection, loop, server): - conn = await create_connection( - server.tcp_address, loop=loop) +async def test_bad_command_in_pubsub(create_connection, server): + conn = await create_connection(server.tcp_address) res = await conn.execute('subscribe', 'chan:1') assert res == [[b'subscribe', b'chan:1', 1]] @@ -389,12 +331,9 @@ conn.execute('get') -@pytest.mark.run_loop -async def test_pubsub_messages(create_connection, loop, server): - sub = await create_connection( - server.tcp_address, loop=loop) - pub = await create_connection( - server.tcp_address, loop=loop) +async def test_pubsub_messages(create_connection, server): + sub = await create_connection(server.tcp_address) + pub = await create_connection(server.tcp_address) res = await sub.execute('subscribe', 'chan:1') assert res == [[b'subscribe', b'chan:1', 1]] @@ -426,9 +365,8 @@ assert msg == b'Hello!' -@pytest.mark.run_loop -async def test_multiple_subscribe_unsubscribe(create_connection, loop, server): - sub = await create_connection(server.tcp_address, loop=loop) +async def test_multiple_subscribe_unsubscribe(create_connection, server): + sub = await create_connection(server.tcp_address) res = await sub.execute_pubsub('subscribe', 'chan:1') ch = sub.pubsub_channels['chan:1'] @@ -456,10 +394,8 @@ assert res == [[b'punsubscribe', b'chan:*', 0]] -@pytest.mark.run_loop -async def test_execute_pubsub_errors(create_connection, loop, server): - sub = await create_connection( - server.tcp_address, loop=loop) +async def test_execute_pubsub_errors(create_connection, server): + sub = await create_connection(server.tcp_address) with pytest.raises(TypeError): sub.execute_pubsub('subscribe', "chan:1", None) @@ -468,24 +404,23 @@ with pytest.raises(ValueError): sub.execute_pubsub( 'subscribe', - Channel('chan:1', is_pattern=True, loop=loop)) + Channel('chan:1', is_pattern=True)) with pytest.raises(ValueError): sub.execute_pubsub( 'unsubscribe', - Channel('chan:1', is_pattern=True, loop=loop)) + Channel('chan:1', is_pattern=True)) with pytest.raises(ValueError): sub.execute_pubsub( 'psubscribe', - Channel('chan:1', is_pattern=False, loop=loop)) + Channel('chan:1', is_pattern=False)) with pytest.raises(ValueError): sub.execute_pubsub( 'punsubscribe', - Channel('chan:1', is_pattern=False, loop=loop)) - - -@pytest.mark.run_loop -async def test_multi_exec(create_connection, loop, server): - conn = await create_connection(server.tcp_address, loop=loop) + Channel('chan:1', is_pattern=False)) + + +async def test_multi_exec(create_connection, server): + conn = await create_connection(server.tcp_address) ok = await conn.execute('set', 'foo', 'bar') assert ok == b'OK' @@ -505,10 +440,8 @@ assert res == b'OK' -@pytest.mark.run_loop -async def test_multi_exec__enc(create_connection, loop, server): - conn = await create_connection( - server.tcp_address, loop=loop, encoding='utf-8') +async def test_multi_exec__enc(create_connection, server): + conn = await create_connection(server.tcp_address, encoding='utf-8') ok = await conn.execute('set', 'foo', 'bar') assert ok == 'OK' @@ -528,12 +461,10 @@ assert res == 'OK' -@pytest.mark.run_loop -async def test_connection_parser_argument(create_connection, server, loop): +async def test_connection_parser_argument(create_connection, server): klass = mock.MagicMock() klass.return_value = reader = mock.Mock() - conn = await create_connection(server.tcp_address, - parser=klass, loop=loop) + conn = await create_connection(server.tcp_address, parser=klass) assert klass.mock_calls == [ mock.call(protocolError=ProtocolError, replyError=ReplyError), @@ -549,14 +480,13 @@ assert b'+PONG\r\n' == await conn.execute('ping') -@pytest.mark.run_loop -async def test_connection_idle_close(create_connection, start_server, loop): +async def test_connection_idle_close(create_connection, start_server): server = start_server('idle') - conn = await create_connection(server.tcp_address, loop=loop) + conn = await create_connection(server.tcp_address) ok = await conn.execute("config", "set", "timeout", 1) assert ok == b'OK' - await asyncio.sleep(3, loop=loop) + await asyncio.sleep(3) with pytest.raises(ConnectionClosedError): assert await conn.execute('ping') is None @@ -567,13 +497,12 @@ {'db': 1}, {'encoding': 'utf-8'}, ], ids=repr) -@pytest.mark.run_loop async def test_create_connection__tcp_url( - create_connection, server_tcp_url, loop, kwargs): + create_connection, server_tcp_url, kwargs): url = server_tcp_url(**kwargs) db = kwargs.get('db', 0) enc = kwargs.get('encoding', None) - conn = await create_connection(url, loop=loop) + conn = await create_connection(url) pong = b'PONG' if not enc else b'PONG'.decode(enc) assert await conn.execute('ping') == pong assert conn.db == db @@ -587,13 +516,12 @@ {'db': 1}, {'encoding': 'utf-8'}, ], ids=repr) -@pytest.mark.run_loop async def test_create_connection__unix_url( - create_connection, server_unix_url, loop, kwargs): + create_connection, server_unix_url, kwargs): url = server_unix_url(**kwargs) db = kwargs.get('db', 0) enc = kwargs.get('encoding', None) - conn = await create_connection(url, loop=loop) + conn = await create_connection(url) pong = b'PONG' if not enc else b'PONG'.decode(enc) assert await conn.execute('ping') == pong assert conn.db == db diff --git a/tests/generic_commands_test.py b/tests/generic_commands_test.py index b631481..ab40ffa 100644 --- a/tests/generic_commands_test.py +++ b/tests/generic_commands_test.py @@ -7,6 +7,7 @@ from unittest import mock from aioredis import ReplyError +from _testutils import redis_version async def add(redis, key, value): @@ -14,7 +15,6 @@ assert ok == b'OK' -@pytest.mark.run_loop async def test_delete(redis): await add(redis, 'my-key', 123) await add(redis, 'other-key', 123) @@ -32,7 +32,6 @@ await redis.delete('my-key', 'my-key', None) -@pytest.mark.run_loop async def test_dump(redis): await add(redis, 'my-key', 123) @@ -48,7 +47,6 @@ await redis.dump(None) -@pytest.mark.run_loop async def test_exists(redis, server): await add(redis, 'my-key', 123) @@ -67,9 +65,8 @@ await redis.exists('key-1', 'key-2') -@pytest.redis_version( +@redis_version( 3, 0, 3, reason='Multi-key EXISTS available since redis>=2.8.0') -@pytest.mark.run_loop async def test_exists_multiple(redis): await add(redis, 'my-key', 123) @@ -86,7 +83,6 @@ assert res == 0 -@pytest.mark.run_loop async def test_expire(redis): await add(redis, 'my-key', 132) @@ -115,7 +111,6 @@ await redis.expire('my-key', 'timeout') -@pytest.mark.run_loop async def test_expireat(redis): await add(redis, 'my-key', 123) now = math.ceil(time.time()) @@ -152,7 +147,6 @@ await redis.expireat('my-key', 'timestamp') -@pytest.mark.run_loop async def test_keys(redis): res = await redis.keys('*pattern*') assert res == [] @@ -177,8 +171,7 @@ await redis.keys(None) -@pytest.mark.run_loop -async def test_migrate(create_redis, loop, server, serverB): +async def test_migrate(create_redis, server, serverB): redisA = await create_redis(server.tcp_address) redisB = await create_redis(serverB.tcp_address, db=2) @@ -210,10 +203,9 @@ await redisA.migrate('host', 6379, 'key', 1, -1000) -@pytest.redis_version( +@redis_version( 3, 0, 0, reason="Copy/Replace flags available since Redis 3.0") -@pytest.mark.run_loop -async def test_migrate_copy_replace(create_redis, loop, server, serverB): +async def test_migrate_copy_replace(create_redis, server, serverB): redisA = await create_redis(server.tcp_address) redisB = await create_redis(serverB.tcp_address, db=0) @@ -233,12 +225,11 @@ assert (await redisB.get('my-key')) -@pytest.redis_version( +@redis_version( 3, 0, 6, reason="MIGRATE…KEYS available since Redis 3.0.6") @pytest.mark.skipif( sys.platform == 'win32', reason="Seems to be unavailable in win32 build") -@pytest.mark.run_loop -async def test_migrate_keys(create_redis, loop, server, serverB): +async def test_migrate_keys(create_redis, server, serverB): redisA = await create_redis(server.tcp_address) redisB = await create_redis(serverB.tcp_address, db=0) @@ -293,8 +284,7 @@ assert (await redisA.get('key3')) is None -@pytest.mark.run_loop -async def test_migrate__exceptions(redis, loop, server, unused_port): +async def test_migrate__exceptions(redis, server, unused_port): await add(redis, 'my-key', 123) assert (await redis.exists('my-key')) @@ -305,11 +295,10 @@ 'my-key', dest_db=30, timeout=10)) -@pytest.redis_version( +@redis_version( 3, 0, 6, reason="MIGRATE…KEYS available since Redis 3.0.6") @pytest.mark.skipif( sys.platform == 'win32', reason="Seems to be unavailable in win32 build") -@pytest.mark.run_loop async def test_migrate_keys__errors(redis): with pytest.raises(TypeError, match="host .* str"): await redis.migrate_keys(None, 1234, 'key', 1, 23) @@ -329,7 +318,6 @@ await redis.migrate_keys('host', '1234', (), 2, 123) -@pytest.mark.run_loop async def test_move(redis): await add(redis, 'my-key', 123) @@ -347,7 +335,6 @@ await redis.move('my-key', 'not db') -@pytest.mark.run_loop async def test_object_refcount(redis): await add(redis, 'foo', 'bar') @@ -360,21 +347,20 @@ await redis.object_refcount(None) -@pytest.mark.run_loop async def test_object_encoding(redis, server): await add(redis, 'foo', 'bar') res = await redis.object_encoding('foo') if server.version < (3, 0, 0): - assert res == b'raw' + assert res == 'raw' else: - assert res == b'embstr' + assert res == 'embstr' res = await redis.incr('key') assert res == 1 res = await redis.object_encoding('key') - assert res == b'int' + assert res == 'int' res = await redis.object_encoding('non-existent-key') assert res is None @@ -382,8 +368,10 @@ await redis.object_encoding(None) -@pytest.mark.run_loop(timeout=20) -async def test_object_idletime(redis, loop, server): +@redis_version( + 3, 0, 0, reason="Older Redis version has lower idle time resolution") +@pytest.mark.timeout(20) +async def test_object_idletime(redis, server): await add(redis, 'foo', 'bar') res = await redis.object_idletime('foo') @@ -393,7 +381,7 @@ res = 0 while not res: res = await redis.object_idletime('foo') - await asyncio.sleep(.5, loop=loop) + await asyncio.sleep(.5) assert res >= 1 res = await redis.object_idletime('non-existent-key') @@ -403,7 +391,6 @@ await redis.object_idletime(None) -@pytest.mark.run_loop async def test_persist(redis): await add(redis, 'my-key', 123) res = await redis.expire('my-key', 10) @@ -419,8 +406,7 @@ await redis.persist(None) -@pytest.mark.run_loop -async def test_pexpire(redis, loop): +async def test_pexpire(redis): await add(redis, 'my-key', 123) res = await redis.pexpire('my-key', 100) assert res is True @@ -435,7 +421,7 @@ assert res is True # XXX: tests now looks strange to me. - await asyncio.sleep(.2, loop=loop) + await asyncio.sleep(.2) res = await redis.exists('my-key') assert not res @@ -446,16 +432,15 @@ await redis.pexpire('my-key', 1.0) -@pytest.mark.run_loop async def test_pexpireat(redis): await add(redis, 'my-key', 123) - now = math.ceil((await redis.time()) * 1000) + now = int((await redis.time()) * 1000) fut1 = redis.pexpireat('my-key', now + 2000) fut2 = redis.ttl('my-key') fut3 = redis.pttl('my-key') - assert (await fut1) is True - assert (await fut2) == 2 - pytest.assert_almost_equal((await fut3), 2000, -3) + assert await fut1 is True + assert await fut2 == 2 + assert 1000 < await fut3 <= 2000 with pytest.raises(TypeError): await redis.pexpireat(None, 1234) @@ -465,7 +450,6 @@ await redis.pexpireat('key', 1000.0) -@pytest.mark.run_loop async def test_pttl(redis, server): await add(redis, 'key', 'val') res = await redis.pttl('key') @@ -478,13 +462,12 @@ await redis.pexpire('key', 500) res = await redis.pttl('key') - pytest.assert_almost_equal(res, 500, -2) + assert 400 < res <= 500 with pytest.raises(TypeError): await redis.pttl(None) -@pytest.mark.run_loop async def test_randomkey(redis): await add(redis, 'key:1', 123) await add(redis, 'key:2', 123) @@ -502,7 +485,6 @@ assert res is None -@pytest.mark.run_loop async def test_rename(redis, server): await add(redis, 'foo', 'bar') await redis.delete('bar') @@ -524,7 +506,6 @@ await redis.rename('bar', b'bar') -@pytest.mark.run_loop async def test_renamenx(redis, server): await redis.delete('foo', 'bar') await add(redis, 'foo', 123) @@ -550,7 +531,6 @@ await redis.renamenx('foo', b'foo') -@pytest.mark.run_loop async def test_restore(redis): ok = await redis.set('key', 'value') assert ok @@ -562,8 +542,7 @@ assert (await redis.get('key')) == b'value' -@pytest.redis_version(2, 8, 0, reason='SCAN is available since redis>=2.8.0') -@pytest.mark.run_loop +@redis_version(2, 8, 0, reason='SCAN is available since redis>=2.8.0') async def test_scan(redis): for i in range(1, 11): foo_or_bar = 'bar' if i % 3 else 'foo' @@ -603,7 +582,6 @@ assert len(test_values) == 10 -@pytest.mark.run_loop async def test_sort(redis): async def _make_list(key, items): await redis.delete(key) @@ -660,21 +638,20 @@ assert res == [b'10', b'30', b'20'] -@pytest.redis_version(3, 2, 1, reason="TOUCH is available since redis>=3.2.1") -@pytest.mark.run_loop(timeout=20) -async def test_touch(redis, loop): +@redis_version(3, 2, 1, reason="TOUCH is available since redis>=3.2.1") +@pytest.mark.timeout(20) +async def test_touch(redis): await add(redis, 'key', 'val') res = 0 while not res: res = await redis.object_idletime('key') - await asyncio.sleep(.5, loop=loop) + await asyncio.sleep(.5) assert res > 0 assert await redis.touch('key', 'key', 'key') == 3 res2 = await redis.object_idletime('key') assert 0 <= res2 < res -@pytest.mark.run_loop async def test_ttl(redis, server): await add(redis, 'key', 'val') res = await redis.ttl('key') @@ -693,7 +670,6 @@ await redis.ttl(None) -@pytest.mark.run_loop async def test_type(redis): await add(redis, 'key', 'val') res = await redis.type('key') @@ -716,8 +692,7 @@ await redis.type(None) -@pytest.redis_version(2, 8, 0, reason='SCAN is available since redis>=2.8.0') -@pytest.mark.run_loop +@redis_version(2, 8, 0, reason='SCAN is available since redis>=2.8.0') async def test_iscan(redis): full = set() foo = set() @@ -761,8 +736,7 @@ assert set(ret) == full -@pytest.redis_version(4, 0, 0, reason="UNLINK is available since redis>=4.0.0") -@pytest.mark.run_loop +@redis_version(4, 0, 0, reason="UNLINK is available since redis>=4.0.0") async def test_unlink(redis): await add(redis, 'my-key', 123) await add(redis, 'other-key', 123) @@ -780,9 +754,8 @@ await redis.unlink('my-key', 'my-key', None) -@pytest.redis_version(3, 0, 0, reason="WAIT is available since redis>=3.0.0") -@pytest.mark.run_loop -async def test_wait(redis, loop): +@redis_version(3, 0, 0, reason="WAIT is available since redis>=3.0.0") +async def test_wait(redis): await add(redis, 'key', 'val1') start = await redis.time() res = await redis.wait(1, 400) diff --git a/tests/geo_commands_test.py b/tests/geo_commands_test.py index 2b39a46..4e1307f 100644 --- a/tests/geo_commands_test.py +++ b/tests/geo_commands_test.py @@ -1,10 +1,10 @@ import pytest from aioredis import GeoPoint, GeoMember - - -@pytest.mark.run_loop -@pytest.redis_version( +from _testutils import redis_version + + +@redis_version( 3, 2, 0, reason='GEOADD is available since redis >= 3.2.0') async def test_geoadd(redis): res = await redis.geoadd('geodata', 13.361389, 38.115556, 'Palermo') @@ -18,8 +18,7 @@ assert res == 2 -@pytest.mark.run_loop -@pytest.redis_version( +@redis_version( 3, 2, 0, reason='GEODIST is available since redis >= 3.2.0') async def test_geodist(redis): res = await redis.geoadd( @@ -36,8 +35,7 @@ assert res == 166.2742 -@pytest.mark.run_loop -@pytest.redis_version( +@redis_version( 3, 2, 0, reason='GEOHASH is available since redis >= 3.2.0') async def test_geohash(redis): res = await redis.geoadd( @@ -58,8 +56,7 @@ assert res == ['sqc8b49rny0', 'sqdtr74hyu0'] -@pytest.mark.run_loop -@pytest.redis_version( +@redis_version( 3, 2, 0, reason='GEOPOS is available since redis >= 3.2.0') async def test_geopos(redis): res = await redis.geoadd( @@ -81,8 +78,7 @@ ] -@pytest.mark.run_loop -@pytest.redis_version( +@redis_version( 3, 2, 0, reason='GEO* is available since redis >= 3.2.0') async def test_geo_not_exist_members(redis): res = await redis.geoadd('geodata', 13.361389, 38.115556, 'Palermo') @@ -116,8 +112,7 @@ ] -@pytest.mark.run_loop -@pytest.redis_version( +@redis_version( 3, 2, 0, reason='GEORADIUS is available since redis >= 3.2.0') async def test_georadius_validation(redis): res = await redis.geoadd( @@ -144,8 +139,7 @@ ) -@pytest.mark.run_loop -@pytest.redis_version( +@redis_version( 3, 2, 0, reason='GEORADIUS is available since redis >= 3.2.0') async def test_georadius(redis): res = await redis.geoadd( @@ -263,8 +257,7 @@ ] -@pytest.mark.run_loop -@pytest.redis_version( +@redis_version( 3, 2, 0, reason='GEORADIUSBYMEMBER is available since redis >= 3.2.0') async def test_georadiusbymember(redis): res = await redis.geoadd( @@ -317,8 +310,7 @@ ] -@pytest.mark.run_loop -@pytest.redis_version( +@redis_version( 3, 2, 0, reason='GEOHASH is available since redis >= 3.2.0') async def test_geohash_binary(redis): res = await redis.geoadd( @@ -339,8 +331,7 @@ assert res == [b'sqc8b49rny0', b'sqdtr74hyu0'] -@pytest.mark.run_loop -@pytest.redis_version( +@redis_version( 3, 2, 0, reason='GEORADIUS is available since redis >= 3.2.0') async def test_georadius_binary(redis): res = await redis.geoadd( @@ -458,8 +449,7 @@ ] -@pytest.mark.run_loop -@pytest.redis_version( +@redis_version( 3, 2, 0, reason='GEORADIUSBYMEMBER is available since redis >= 3.2.0') async def test_georadiusbymember_binary(redis): res = await redis.geoadd( diff --git a/tests/hash_commands_test.py b/tests/hash_commands_test.py index a087891..afd0b8d 100644 --- a/tests/hash_commands_test.py +++ b/tests/hash_commands_test.py @@ -1,6 +1,7 @@ import pytest from aioredis import ReplyError +from _testutils import redis_version async def add(redis, key, field, value): @@ -9,7 +10,6 @@ assert ok == 1 -@pytest.mark.run_loop async def test_hdel(redis): key, field, value = b'key:hdel', b'bar', b'zap' await add(redis, key, field, value) @@ -24,7 +24,6 @@ await redis.hdel(None, field) -@pytest.mark.run_loop async def test_hexists(redis): key, field, value = b'key:hexists', b'bar', b'zap' await add(redis, key, field, value) @@ -42,7 +41,6 @@ await redis.hexists(None, field) -@pytest.mark.run_loop async def test_hget(redis): key, field, value = b'key:hget', b'bar', b'zap' @@ -65,7 +63,6 @@ await redis.hget(None, field) -@pytest.mark.run_loop async def test_hgetall(redis): await add(redis, 'key:hgetall', 'foo', 'baz') await add(redis, 'key:hgetall', 'bar', 'zap') @@ -86,7 +83,6 @@ await redis.hgetall(None) -@pytest.mark.run_loop async def test_hincrby(redis): key, field, value = b'key:hincrby', b'bar', 1 await add(redis, key, field, value) @@ -121,7 +117,6 @@ await redis.hincrby(None, field, 2) -@pytest.mark.run_loop async def test_hincrbyfloat(redis): key, field, value = b'key:hincrbyfloat', b'bar', 2.71 await add(redis, key, field, value) @@ -146,7 +141,6 @@ await redis.hincrbyfloat(None, field, 2) -@pytest.mark.run_loop async def test_hkeys(redis): key = b'key:hkeys' field1, field2 = b'foo', b'bar' @@ -167,7 +161,6 @@ await redis.hkeys(None) -@pytest.mark.run_loop async def test_hlen(redis): key = b'key:hlen' field1, field2 = b'foo', b'bar' @@ -185,7 +178,6 @@ await redis.hlen(None) -@pytest.mark.run_loop async def test_hmget(redis): key = b'key:hmget' field1, field2 = b'foo', b'bar' @@ -210,7 +202,6 @@ await redis.hmget(None, field1, field2) -@pytest.mark.run_loop async def test_hmset(redis): key, field, value = b'key:hmset', b'bar', b'zap' await add(redis, key, field, value) @@ -248,7 +239,6 @@ await redis.hmset(key) -@pytest.mark.run_loop async def test_hmset_dict(redis): key = 'key:hmset' @@ -300,7 +290,6 @@ await redis.hmset_dict(key, {'a': 1}, {'b': 2}, 'c', 3, d=4) -@pytest.mark.run_loop async def test_hset(redis): key, field, value = b'key:hset', b'bar', b'zap' test_value = await redis.hset(key, field, value) @@ -319,7 +308,6 @@ await redis.hset(None, field, value) -@pytest.mark.run_loop async def test_hsetnx(redis): key, field, value = b'key:hsetnx', b'bar', b'zap' # field does not exists, operation should be successful @@ -339,7 +327,6 @@ await redis.hsetnx(None, field, value) -@pytest.mark.run_loop async def test_hvals(redis): key = b'key:hvals' field1, field2 = b'foo', b'bar' @@ -359,8 +346,7 @@ await redis.hvals(None) -@pytest.redis_version(2, 8, 0, reason='HSCAN is available since redis>=2.8.0') -@pytest.mark.run_loop +@redis_version(2, 8, 0, reason='HSCAN is available since redis>=2.8.0') async def test_hscan(redis): key = b'key:hscan' # setup initial values 3 "field:foo:*" items and 7 "field:bar:*" items @@ -404,10 +390,8 @@ await redis.hscan(None) -@pytest.mark.run_loop -async def test_hgetall_enc(create_redis, loop, server): - redis = await create_redis( - server.tcp_address, loop=loop, encoding='utf-8') +async def test_hgetall_enc(create_redis, server): + redis = await create_redis(server.tcp_address, encoding='utf-8') TEST_KEY = 'my-key-nx' await redis.hmset(TEST_KEY, 'foo', 'bar', 'baz', 'bad') @@ -417,8 +401,7 @@ assert res == [{'foo': 'bar', 'baz': 'bad'}] -@pytest.mark.run_loop -@pytest.redis_version(3, 2, 0, reason="HSTRLEN new in redis 3.2.0") +@redis_version(3, 2, 0, reason="HSTRLEN new in redis 3.2.0") async def test_hstrlen(redis): ok = await redis.hset('myhash', 'str_field', 'some value') assert ok == 1 @@ -442,8 +425,7 @@ assert l == 0 -@pytest.redis_version(2, 8, 0, reason='HSCAN is available since redis>=2.8.0') -@pytest.mark.run_loop +@redis_version(2, 8, 0, reason='HSCAN is available since redis>=2.8.0') async def test_ihscan(redis): key = b'key:hscan' # setup initial values 3 "field:foo:*" items and 7 "field:bar:*" items diff --git a/tests/hyperloglog_commands_test.py b/tests/hyperloglog_commands_test.py index dfd659c..5361c06 100644 --- a/tests/hyperloglog_commands_test.py +++ b/tests/hyperloglog_commands_test.py @@ -1,11 +1,11 @@ import pytest +from _testutils import redis_version -pytestmark = pytest.redis_version( +pytestmark = redis_version( 2, 8, 9, reason='HyperLogLog works only with redis>=2.8.9') -@pytest.mark.run_loop async def test_pfcount(redis): key = 'hll_pfcount' other_key = 'some-other-hll' @@ -42,7 +42,6 @@ await redis.pfcount(key, key, None) -@pytest.mark.run_loop async def test_pfadd(redis): key = 'hll_pfadd' values = ['a', 's', 'y', 'n', 'c', 'i', 'o'] @@ -54,13 +53,11 @@ assert is_changed == 0 -@pytest.mark.run_loop async def test_pfadd_wrong_input(redis): with pytest.raises(TypeError): await redis.pfadd(None, 'value') -@pytest.mark.run_loop async def test_pfmerge(redis): key = 'hll_asyncio' key_other = 'hll_aioredis' @@ -96,7 +93,6 @@ await redis.pfmerge(key_dest, key, None) -@pytest.mark.run_loop async def test_pfmerge_wrong_input(redis): with pytest.raises(TypeError): await redis.pfmerge(None, 'value') diff --git a/tests/integration_test.py b/tests/integration_test.py index 2b310a2..47e4df8 100644 --- a/tests/integration_test.py +++ b/tests/integration_test.py @@ -5,7 +5,7 @@ @pytest.fixture -def pool_or_redis(_closable, server, loop): +def pool_or_redis(_closable, server): version = tuple(map(int, aioredis.__version__.split('.')[:2])) if version >= (1, 0): factory = aioredis.create_redis_pool @@ -13,14 +13,14 @@ factory = aioredis.create_pool async def redis_factory(maxsize): - redis = await factory(server.tcp_address, loop=loop, + redis = await factory(server.tcp_address, minsize=1, maxsize=maxsize) _closable(redis) return redis return redis_factory -async def simple_get_set(pool, idx, loop): +async def simple_get_set(pool, idx): """A simple test to make sure Redis(pool) can be used as old Pool(Redis). """ val = 'val:{}'.format(idx) @@ -29,15 +29,15 @@ await redis.get('key', encoding='utf-8') -async def pipeline(pool, val, loop): +async def pipeline(pool, val): val = 'val:{}'.format(val) with await pool as redis: f1 = redis.set('key', val) f2 = redis.get('key', encoding='utf-8') - ok, res = await asyncio.gather(f1, f2, loop=loop) + ok, res = await asyncio.gather(f1, f2) -async def transaction(pool, val, loop): +async def transaction(pool, val): val = 'val:{}'.format(val) with await pool as redis: tr = redis.multi_exec() @@ -48,12 +48,12 @@ assert res == val -async def blocking_pop(pool, val, loop): +async def blocking_pop(pool, val): async def lpush(): with await pool as redis: # here v0.3 has bound connection, v1.0 does not; - await asyncio.sleep(.1, loop=loop) + await asyncio.sleep(.1) await redis.lpush('list-key', 'val') async def blpop(): @@ -62,10 +62,9 @@ res = await redis.blpop( 'list-key', timeout=2, encoding='utf-8') assert res == ['list-key', 'val'], res - await asyncio.gather(blpop(), lpush(), loop=loop) + await asyncio.gather(blpop(), lpush()) -@pytest.mark.run_loop @pytest.mark.parametrize('test_case,pool_size', [ (simple_get_set, 1), (pipeline, 1), @@ -80,12 +79,12 @@ (transaction, 10), (blocking_pop, 10), ], ids=lambda o: getattr(o, '__name__', repr(o))) -async def test_operations(pool_or_redis, test_case, pool_size, loop): +async def test_operations(pool_or_redis, test_case, pool_size): repeat = 100 redis = await pool_or_redis(pool_size) done, pending = await asyncio.wait( - [asyncio.ensure_future(test_case(redis, i, loop), loop=loop) - for i in range(repeat)], loop=loop) + [asyncio.ensure_future(test_case(redis, i)) + for i in range(repeat)]) assert not pending success = 0 diff --git a/tests/list_commands_test.py b/tests/list_commands_test.py index 0b571a6..78a7f7f 100644 --- a/tests/list_commands_test.py +++ b/tests/list_commands_test.py @@ -4,13 +4,12 @@ from aioredis import ReplyError -async def push_data_with_sleep(redis, loop, key, *values): - await asyncio.sleep(0.2, loop=loop) +async def push_data_with_sleep(redis, key, *values): + await asyncio.sleep(0.2) result = await redis.lpush(key, *values) return result -@pytest.mark.run_loop async def test_blpop(redis): key1, value1 = b'key:blpop:1', b'blpop:value:1' key2, value2 = b'key:blpop:2', b'blpop:value:2' @@ -40,21 +39,18 @@ assert test_value == ['key:blpop:2', 'blpop:value:1'] -@pytest.mark.run_loop -async def test_blpop_blocking_features(redis, create_redis, loop, server): +async def test_blpop_blocking_features(redis, create_redis, server): key1, key2 = b'key:blpop:1', b'key:blpop:2' value = b'blpop:value:2' - other_redis = await create_redis( - server.tcp_address, loop=loop) + other_redis = await create_redis(server.tcp_address) # create blocking task in separate connection consumer = other_redis.blpop(key1, key2) - producer_task = asyncio.Task( - push_data_with_sleep(redis, loop, key2, value), loop=loop) - results = await asyncio.gather( - consumer, producer_task, loop=loop) + producer_task = asyncio.ensure_future( + push_data_with_sleep(redis, key2, value)) + results = await asyncio.gather(consumer, producer_task) assert results[0] == [key2, value] assert results[1] == 1 @@ -67,7 +63,6 @@ other_redis.close() -@pytest.mark.run_loop async def test_brpop(redis): key1, value1 = b'key:brpop:1', b'brpop:value:1' key2, value2 = b'key:brpop:2', b'brpop:value:2' @@ -97,21 +92,19 @@ assert test_value == ['key:brpop:2', 'brpop:value:1'] -@pytest.mark.run_loop -async def test_brpop_blocking_features(redis, create_redis, server, loop): +async def test_brpop_blocking_features(redis, create_redis, server): key1, key2 = b'key:brpop:1', b'key:brpop:2' value = b'brpop:value:2' other_redis = await create_redis( - server.tcp_address, loop=loop) + server.tcp_address) # create blocking task in separate connection consumer_task = other_redis.brpop(key1, key2) - producer_task = asyncio.Task( - push_data_with_sleep(redis, loop, key2, value), loop=loop) - - results = await asyncio.gather( - consumer_task, producer_task, loop=loop) + producer_task = asyncio.ensure_future( + push_data_with_sleep(redis, key2, value)) + + results = await asyncio.gather(consumer_task, producer_task) assert results[0] == [key2, value] assert results[1] == 1 @@ -123,7 +116,6 @@ assert test_value is None -@pytest.mark.run_loop async def test_brpoplpush(redis): key = b'key:brpoplpush:1' value1, value2 = b'brpoplpush:value:1', b'brpoplpush:value:2' @@ -162,19 +154,17 @@ assert result == 'brpoplpush:value:2' -@pytest.mark.run_loop -async def test_brpoplpush_blocking_features(redis, create_redis, server, loop): +async def test_brpoplpush_blocking_features(redis, create_redis, server): source = b'key:brpoplpush:12' value = b'brpoplpush:value:2' destkey = b'destkey:brpoplpush:2' other_redis = await create_redis( - server.tcp_address, loop=loop) + server.tcp_address) # create blocking task consumer_task = other_redis.brpoplpush(source, destkey) - producer_task = asyncio.Task( - push_data_with_sleep(redis, loop, source, value), loop=loop) - results = await asyncio.gather( - consumer_task, producer_task, loop=loop) + producer_task = asyncio.ensure_future( + push_data_with_sleep(redis, source, value)) + results = await asyncio.gather(consumer_task, producer_task) assert results[0] == value assert results[1] == 1 @@ -190,7 +180,6 @@ other_redis.close() -@pytest.mark.run_loop async def test_lindex(redis): key, value = b'key:lindex:1', 'value:{}' # setup list @@ -223,7 +212,6 @@ await redis.lindex(key, b'one') -@pytest.mark.run_loop async def test_linsert(redis): key = b'key:linsert:1' value1, value2, value3, value4 = b'Hello', b'World', b'foo', b'bar' @@ -252,7 +240,6 @@ await redis.linsert(None, value1, value3) -@pytest.mark.run_loop async def test_llen(redis): key = b'key:llen:1' value1, value2 = b'Hello', b'World' @@ -268,7 +255,6 @@ await redis.llen(None) -@pytest.mark.run_loop async def test_lpop(redis): key = b'key:lpop:1' value1, value2 = b'lpop:value:1', b'lpop:value:2' @@ -295,7 +281,6 @@ await redis.lpop(None) -@pytest.mark.run_loop async def test_lpush(redis): key = b'key:lpush' value1, value2 = b'value:1', b'value:2' @@ -316,7 +301,6 @@ await redis.lpush(None, value1) -@pytest.mark.run_loop async def test_lpushx(redis): key = b'key:lpushx' value1, value2 = b'value:1', b'value:2' @@ -340,7 +324,6 @@ await redis.lpushx(None, value1) -@pytest.mark.run_loop async def test_lrange(redis): key, value = b'key:lrange:1', 'value:{}' values = [value.format(i).encode('utf-8') for i in range(0, 10)] @@ -369,7 +352,6 @@ await redis.lrange(key, 0, b'one') -@pytest.mark.run_loop async def test_lrem(redis): key, value = b'key:lrem:1', 'value:{}' values = [value.format(i % 2).encode('utf-8') for i in range(0, 10)] @@ -404,7 +386,6 @@ await redis.lrem(key, b'ten', b'value:0') -@pytest.mark.run_loop async def test_lset(redis): key, value = b'key:lset', 'value:{}' values = [value.format(i).encode('utf-8') for i in range(0, 3)] @@ -427,7 +408,6 @@ await redis.lset(key, b'one', b'value:0') -@pytest.mark.run_loop async def test_ltrim(redis): key, value = b'key:ltrim', 'value:{}' values = [value.format(i).encode('utf-8') for i in range(0, 10)] @@ -458,7 +438,6 @@ await redis.ltrim(key, 0, b'one') -@pytest.mark.run_loop async def test_rpop(redis): key = b'key:rpop:1' value1, value2 = b'rpop:value:1', b'rpop:value:2' @@ -485,7 +464,6 @@ await redis.rpop(None) -@pytest.mark.run_loop async def test_rpoplpush(redis): key = b'key:rpoplpush:1' value1, value2 = b'rpoplpush:value:1', b'rpoplpush:value:2' @@ -517,7 +495,6 @@ await redis.rpoplpush(key, None) -@pytest.mark.run_loop async def test_rpush(redis): key = b'key:rpush' value1, value2 = b'value:1', b'value:2' @@ -534,7 +511,6 @@ await redis.rpush(None, value1) -@pytest.mark.run_loop async def test_rpushx(redis): key = b'key:rpushx' value1, value2 = b'value:1', b'value:2' diff --git a/tests/locks_test.py b/tests/locks_test.py index 42024ff..eb56c8c 100644 --- a/tests/locks_test.py +++ b/tests/locks_test.py @@ -1,32 +1,30 @@ import asyncio -import pytest from aioredis.locks import Lock -@pytest.mark.run_loop -async def test_finished_waiter_cancelled(loop): - lock = Lock(loop=loop) +async def test_finished_waiter_cancelled(): + lock = Lock() - ta = asyncio.ensure_future(lock.acquire(), loop=loop) - await asyncio.sleep(0, loop=loop) + ta = asyncio.ensure_future(lock.acquire()) + await asyncio.sleep(0) assert lock.locked() - tb = asyncio.ensure_future(lock.acquire(), loop=loop) - await asyncio.sleep(0, loop=loop) + tb = asyncio.ensure_future(lock.acquire()) + await asyncio.sleep(0) assert len(lock._waiters) == 1 # Create a second waiter, wake up the first, and cancel it. # Without the fix, the second was not woken up and the lock # will never be locked - asyncio.ensure_future(lock.acquire(), loop=loop) - await asyncio.sleep(0, loop=loop) + asyncio.ensure_future(lock.acquire()) + await asyncio.sleep(0) lock.release() tb.cancel() - await asyncio.sleep(0, loop=loop) + await asyncio.sleep(0) assert ta.done() assert tb.cancelled() - await asyncio.sleep(0, loop=loop) + await asyncio.sleep(0) assert lock.locked() diff --git a/tests/multi_exec_test.py b/tests/multi_exec_test.py index 53c21b1..804dfc7 100644 --- a/tests/multi_exec_test.py +++ b/tests/multi_exec_test.py @@ -23,7 +23,7 @@ asyncio.set_event_loop(loop) tr = MultiExec(conn, commands_factory=Redis) - assert tr._loop is loop + # assert tr._loop is loop def make_fut(cmd, *args, **kw): fut = asyncio.get_event_loop().create_future() diff --git a/tests/pool_test.py b/tests/pool_test.py index cefd22d..b5693f5 100644 --- a/tests/pool_test.py +++ b/tests/pool_test.py @@ -1,6 +1,8 @@ import asyncio import pytest import async_timeout +import logging +import sys from unittest.mock import patch @@ -11,6 +13,9 @@ ConnectionsPool, MaxClientsError, ) +from _testutils import redis_version + +BPO_34638 = sys.version_info >= (3, 8) def _assert_defaults(pool): @@ -19,22 +24,13 @@ assert pool.maxsize == 10 assert pool.size == 1 assert pool.freesize == 1 - assert pool._close_waiter is None + assert not pool._close_state.is_set() def test_connect(pool): _assert_defaults(pool) -def test_global_loop(create_pool, loop, server): - asyncio.set_event_loop(loop) - - pool = loop.run_until_complete(create_pool( - server.tcp_address)) - _assert_defaults(pool) - - -@pytest.mark.run_loop async def test_clear(pool): _assert_defaults(pool) @@ -42,35 +38,31 @@ assert pool.freesize == 0 -@pytest.mark.run_loop @pytest.mark.parametrize('minsize', [None, -100, 0.0, 100]) -async def test_minsize(minsize, create_pool, loop, server): +async def test_minsize(minsize, create_pool, server): with pytest.raises(AssertionError): await create_pool( server.tcp_address, - minsize=minsize, maxsize=10, loop=loop) - - -@pytest.mark.run_loop + minsize=minsize, maxsize=10) + + @pytest.mark.parametrize('maxsize', [None, -100, 0.0, 1]) -async def test_maxsize(maxsize, create_pool, loop, server): +async def test_maxsize(maxsize, create_pool, server): with pytest.raises(AssertionError): await create_pool( server.tcp_address, - minsize=2, maxsize=maxsize, loop=loop) - - -@pytest.mark.run_loop -async def test_create_connection_timeout(create_pool, loop, server): - with patch.object(loop, 'create_connection') as\ + minsize=2, maxsize=maxsize) + + +async def test_create_connection_timeout(create_pool, server): + with patch('aioredis.connection.open_connection') as\ open_conn_mock: - open_conn_mock.side_effect = lambda *a, **kw: asyncio.sleep(0.2, - loop=loop) + open_conn_mock.side_effect = lambda *a, **kw: asyncio.sleep(0.2) with pytest.raises(asyncio.TimeoutError): await create_pool( - server.tcp_address, loop=loop, + server.tcp_address, create_connection_timeout=0.1) @@ -80,11 +72,10 @@ pass # pragma: no cover -@pytest.mark.run_loop -async def test_simple_command(create_pool, loop, server): - pool = await create_pool( - server.tcp_address, - minsize=10, loop=loop) +async def test_simple_command(create_pool, server): + pool = await create_pool( + server.tcp_address, + minsize=10) with (await pool) as conn: msg = await conn.execute('echo', 'hello') @@ -95,11 +86,10 @@ assert pool.freesize == 10 -@pytest.mark.run_loop -async def test_create_new(create_pool, loop, server): - pool = await create_pool( - server.tcp_address, - minsize=1, loop=loop) +async def test_create_new(create_pool, server): + pool = await create_pool( + server.tcp_address, + minsize=1) assert pool.size == 1 assert pool.freesize == 1 @@ -115,11 +105,10 @@ assert pool.freesize == 2 -@pytest.mark.run_loop -async def test_create_constraints(create_pool, loop, server): - pool = await create_pool( - server.tcp_address, - minsize=1, maxsize=1, loop=loop) +async def test_create_constraints(create_pool, server): + pool = await create_pool( + server.tcp_address, + minsize=1, maxsize=1) assert pool.size == 1 assert pool.freesize == 1 @@ -129,15 +118,13 @@ with pytest.raises(asyncio.TimeoutError): await asyncio.wait_for(pool.acquire(), - timeout=0.2, - loop=loop) - - -@pytest.mark.run_loop -async def test_create_no_minsize(create_pool, loop, server): - pool = await create_pool( - server.tcp_address, - minsize=0, maxsize=1, loop=loop) + timeout=0.2) + + +async def test_create_no_minsize(create_pool, server): + pool = await create_pool( + server.tcp_address, + minsize=0, maxsize=1) assert pool.size == 0 assert pool.freesize == 0 @@ -147,40 +134,34 @@ with pytest.raises(asyncio.TimeoutError): await asyncio.wait_for(pool.acquire(), - timeout=0.2, - loop=loop) - assert pool.size == 1 - assert pool.freesize == 1 - - -@pytest.mark.run_loop -async def test_create_pool_cls(create_pool, loop, server): + timeout=0.2) + assert pool.size == 1 + assert pool.freesize == 1 + + +async def test_create_pool_cls(create_pool, server): class MyPool(ConnectionsPool): pass pool = await create_pool( server.tcp_address, - loop=loop, pool_cls=MyPool) assert isinstance(pool, MyPool) -@pytest.mark.run_loop -async def test_create_pool_cls_invalid(create_pool, loop, server): +async def test_create_pool_cls_invalid(create_pool, server): with pytest.raises(AssertionError): await create_pool( server.tcp_address, - loop=loop, pool_cls=type) -@pytest.mark.run_loop -async def test_release_closed(create_pool, loop, server): - pool = await create_pool( - server.tcp_address, - minsize=1, loop=loop) +async def test_release_closed(create_pool, server): + pool = await create_pool( + server.tcp_address, + minsize=1) assert pool.size == 1 assert pool.freesize == 1 @@ -191,15 +172,15 @@ assert pool.freesize == 0 -@pytest.mark.run_loop -async def test_release_pending(create_pool, loop, server): - pool = await create_pool( - server.tcp_address, - minsize=1, loop=loop) - assert pool.size == 1 - assert pool.freesize == 1 - - with pytest.logs('aioredis', 'WARNING') as cm: +async def test_release_pending(create_pool, server, caplog): + pool = await create_pool( + server.tcp_address, + minsize=1) + assert pool.size == 1 + assert pool.freesize == 1 + + caplog.clear() + with caplog.at_level('WARNING', 'aioredis'): with (await pool) as conn: try: await asyncio.wait_for( @@ -207,29 +188,24 @@ b'blpop', b'somekey:not:exists', b'0'), - 0.1, - loop=loop) + 0.05, + ) except asyncio.TimeoutError: pass assert pool.size == 0 assert pool.freesize == 0 - assert cm.output == [ - 'WARNING:aioredis:Connection ' - ' has pending commands, closing it.' + assert caplog.record_tuples == [ + ('aioredis', logging.WARNING, 'Connection ' + ' has pending commands, closing it.'), ] -@pytest.mark.run_loop -async def test_release_bad_connection(create_pool, create_redis, loop, server): - pool = await create_pool( - server.tcp_address, - loop=loop) +async def test_release_bad_connection(create_pool, create_redis, server): + pool = await create_pool(server.tcp_address) conn = await pool.acquire() assert conn.address[0] in ('127.0.0.1', '::1') assert conn.address[1] == server.tcp_address.port - other_conn = await create_redis( - server.tcp_address, - loop=loop) + other_conn = await create_redis(server.tcp_address) with pytest.raises(AssertionError): pool.release(other_conn) @@ -238,23 +214,16 @@ await other_conn.wait_closed() -@pytest.mark.run_loop -async def test_select_db(create_pool, loop, server): - pool = await create_pool( - server.tcp_address, - loop=loop) +async def test_select_db(create_pool, server): + pool = await create_pool(server.tcp_address) await pool.select(1) with (await pool) as conn: assert conn.db == 1 -@pytest.mark.run_loop -async def test_change_db(create_pool, loop, server): - pool = await create_pool( - server.tcp_address, - minsize=1, db=0, - loop=loop) +async def test_change_db(create_pool, server): + pool = await create_pool(server.tcp_address, minsize=1, db=0) assert pool.size == 1 assert pool.freesize == 1 @@ -276,12 +245,8 @@ assert pool.db == 1 -@pytest.mark.run_loop -async def test_change_db_errors(create_pool, loop, server): - pool = await create_pool( - server.tcp_address, - minsize=1, db=0, - loop=loop) +async def test_change_db_errors(create_pool, server): + pool = await create_pool(server.tcp_address, minsize=1, db=0) with pytest.raises(TypeError): await pool.select(None) @@ -304,25 +269,23 @@ @pytest.mark.xfail(reason="Need to refactor this test") -@pytest.mark.run_loop -async def test_select_and_create(create_pool, loop, server): +async def test_select_and_create(create_pool, server): # trying to model situation when select and acquire # called simultaneously # but acquire freezes on _wait_select and - # then continues with propper db + # then continues with proper db # TODO: refactor this test as there's no _wait_select any more. - with async_timeout.timeout(10, loop=loop): + with async_timeout.timeout(10): pool = await create_pool( server.tcp_address, minsize=1, db=0, - loop=loop) + ) db = 0 while True: db = (db + 1) & 1 _, conn = await asyncio.gather(pool.select(db), - pool.acquire(), - loop=loop) + pool.acquire()) assert pool.db == db pool.release(conn) if conn.db == db: @@ -330,11 +293,8 @@ # await asyncio.wait_for(test(), 3, loop=loop) -@pytest.mark.run_loop -async def test_response_decoding(create_pool, loop, server): - pool = await create_pool( - server.tcp_address, - encoding='utf-8', loop=loop) +async def test_response_decoding(create_pool, server): + pool = await create_pool(server.tcp_address, encoding='utf-8') assert pool.encoding == 'utf-8' with (await pool) as conn: @@ -344,11 +304,8 @@ assert res == 'value' -@pytest.mark.run_loop -async def test_hgetall_response_decoding(create_pool, loop, server): - pool = await create_pool( - server.tcp_address, - encoding='utf-8', loop=loop) +async def test_hgetall_response_decoding(create_pool, server): + pool = await create_pool(server.tcp_address, encoding='utf-8') assert pool.encoding == 'utf-8' with (await pool) as conn: @@ -360,11 +317,10 @@ assert res == ['foo', 'bar', 'baz', 'zap'] -@pytest.mark.run_loop -async def test_crappy_multiexec(create_pool, loop, server): - pool = await create_pool( - server.tcp_address, - encoding='utf-8', loop=loop, +async def test_crappy_multiexec(create_pool, server): + pool = await create_pool( + server.tcp_address, + encoding='utf-8', minsize=1, maxsize=1) with (await pool) as conn: @@ -377,11 +333,9 @@ assert value == 'def' -@pytest.mark.run_loop -async def test_pool_size_growth(create_pool, server, loop): - pool = await create_pool( - server.tcp_address, - loop=loop, +async def test_pool_size_growth(create_pool, server): + pool = await create_pool( + server.tcp_address, minsize=1, maxsize=1) done = set() @@ -391,7 +345,7 @@ with (await pool): assert pool.size <= pool.maxsize assert pool.freesize == 0 - await asyncio.sleep(0.2, loop=loop) + await asyncio.sleep(0.2) done.add(i) async def task2(): @@ -401,16 +355,14 @@ assert done == {0, 1} for _ in range(2): - tasks.append(asyncio.ensure_future(task1(_), loop=loop)) - tasks.append(asyncio.ensure_future(task2(), loop=loop)) - await asyncio.gather(*tasks, loop=loop) - - -@pytest.mark.run_loop -async def test_pool_with_closed_connections(create_pool, server, loop): - pool = await create_pool( - server.tcp_address, - loop=loop, + tasks.append(asyncio.ensure_future(task1(_))) + tasks.append(asyncio.ensure_future(task2())) + await asyncio.gather(*tasks) + + +async def test_pool_with_closed_connections(create_pool, server): + pool = await create_pool( + server.tcp_address, minsize=1, maxsize=2) assert 1 == pool.freesize conn1 = pool._pool[0] @@ -422,10 +374,8 @@ assert conn1 is not conn2 -@pytest.mark.run_loop -async def test_pool_close(create_pool, server, loop): - pool = await create_pool( - server.tcp_address, loop=loop) +async def test_pool_close(create_pool, server): + pool = await create_pool(server.tcp_address) assert pool.closed is False @@ -441,10 +391,8 @@ assert (await conn.execute('ping')) == b'PONG' -@pytest.mark.run_loop -async def test_pool_close__used(create_pool, server, loop): - pool = await create_pool( - server.tcp_address, loop=loop) +async def test_pool_close__used(create_pool, server): + pool = await create_pool(server.tcp_address) assert pool.closed is False @@ -457,32 +405,34 @@ await conn.execute('ping') -@pytest.mark.run_loop -@pytest.redis_version(2, 8, 0, reason="maxclients config setting") +@redis_version(2, 8, 0, reason="maxclients config setting") async def test_pool_check_closed_when_exception( - create_pool, create_redis, start_server, loop): + create_pool, create_redis, start_server, caplog): server = start_server('server-small') - redis = await create_redis(server.tcp_address, loop=loop) + redis = await create_redis(server.tcp_address) await redis.config_set('maxclients', 2) errors = (MaxClientsError, ConnectionClosedError, ConnectionError) - with pytest.logs('aioredis', 'DEBUG') as cm: + caplog.clear() + with caplog.at_level('DEBUG', 'aioredis'): with pytest.raises(errors): await create_pool(address=tuple(server.tcp_address), - minsize=3, loop=loop) - - assert len(cm.output) >= 3 - connect_msg = ( - "DEBUG:aioredis:Creating tcp connection" - " to ('localhost', {})".format(server.tcp_address.port)) - assert cm.output[:2] == [connect_msg, connect_msg] - assert cm.output[-1] == "DEBUG:aioredis:Closed 1 connection(s)" - - -@pytest.mark.run_loop -async def test_pool_get_connection(create_pool, server, loop): - pool = await create_pool(server.tcp_address, minsize=1, maxsize=2, - loop=loop) + minsize=3) + + assert len(caplog.record_tuples) >= 3 + connect_msg = "Creating tcp connection to ('localhost', {})".format( + server.tcp_address.port) + assert caplog.record_tuples[:2] == [ + ('aioredis', logging.DEBUG, connect_msg), + ('aioredis', logging.DEBUG, connect_msg), + ] + assert caplog.record_tuples[-1] == ( + 'aioredis', logging.DEBUG, 'Closed 1 connection(s)' + ) + + +async def test_pool_get_connection(create_pool, server): + pool = await create_pool(server.tcp_address, minsize=1, maxsize=2) res = await pool.execute("set", "key", "val") assert res == b'OK' @@ -499,10 +449,8 @@ assert res == b'value' -@pytest.mark.run_loop -async def test_pool_get_connection_with_pipelining(create_pool, server, loop): - pool = await create_pool(server.tcp_address, minsize=1, maxsize=2, - loop=loop) +async def test_pool_get_connection_with_pipelining(create_pool, server): + pool = await create_pool(server.tcp_address, minsize=1, maxsize=2) fut1 = pool.execute('set', 'key', 'val') fut2 = pool.execute_pubsub("subscribe", "channel:1") fut3 = pool.execute('getset', 'key', 'next') @@ -520,46 +468,59 @@ assert res == b'next' -@pytest.mark.run_loop -async def test_pool_idle_close(create_pool, start_server, loop): +@pytest.mark.skipif(sys.platform == "win32", reason="flaky on windows") +async def test_pool_idle_close(create_pool, start_server, caplog): server = start_server('idle') - conn = await create_pool(server.tcp_address, minsize=2, loop=loop) + conn = await create_pool(server.tcp_address, minsize=2) ok = await conn.execute("config", "set", "timeout", 1) assert ok == b'OK' - await asyncio.sleep(2, loop=loop) - + caplog.clear() + with caplog.at_level('DEBUG', 'aioredis'): + # wait for either disconnection logged or test timeout reached. + while len(caplog.record_tuples) < 2: + await asyncio.sleep(.5) + expected = [ + ('aioredis', logging.DEBUG, + 'Connection has been closed by server, response: None'), + ('aioredis', logging.DEBUG, + 'Connection has been closed by server, response: None'), + ] + if BPO_34638: + expected += [ + ('asyncio', logging.ERROR, + 'An open stream object is being garbage collected; ' + 'call "stream.close()" explicitly.'), + ('asyncio', logging.ERROR, + 'An open stream object is being garbage collected; ' + 'call "stream.close()" explicitly.')] + # The order in which logs are collected differs each time. + assert sorted(caplog.record_tuples) == sorted(expected) + + # On CI this test fails from time to time. + # It is possible to pick 'unclosed' connection and send command, + # however on the same loop iteration it gets closed and exception is raised assert (await conn.execute('ping')) == b'PONG' -@pytest.mark.run_loop -async def test_await(create_pool, server, loop): - pool = await create_pool( - server.tcp_address, - minsize=10, loop=loop) +async def test_await(create_pool, server): + pool = await create_pool(server.tcp_address, minsize=10) with (await pool) as conn: msg = await conn.execute('echo', 'hello') assert msg == b'hello' -@pytest.mark.run_loop -async def test_async_with(create_pool, server, loop): - pool = await create_pool( - server.tcp_address, - minsize=10, loop=loop) +async def test_async_with(create_pool, server): + pool = await create_pool(server.tcp_address, minsize=10) async with pool.get() as conn: msg = await conn.execute('echo', 'hello') assert msg == b'hello' -@pytest.mark.run_loop -async def test_pool__drop_closed(create_pool, server, loop): - pool = await create_pool(server.tcp_address, - minsize=3, - maxsize=3, - loop=loop) +async def test_pool__drop_closed(create_pool, server): + pool = await create_pool(server.tcp_address, minsize=3, maxsize=3) assert pool.size == 3 assert pool.freesize == 3 assert not pool._pool[0].closed diff --git a/tests/pubsub_commands_test.py b/tests/pubsub_commands_test.py index d7fa18b..e8754e5 100644 --- a/tests/pubsub_commands_test.py +++ b/tests/pubsub_commands_test.py @@ -1,6 +1,8 @@ import asyncio import pytest import aioredis + +from _testutils import redis_version async def _reader(channel, output, waiter, conn): @@ -12,13 +14,11 @@ await output.put(msg) -@pytest.mark.run_loop async def test_publish(create_connection, redis, server, loop): - out = asyncio.Queue(loop=loop) + out = asyncio.Queue() fut = loop.create_future() - conn = await create_connection( - server.tcp_address, loop=loop) - sub = asyncio.ensure_future(_reader('chan:1', out, fut, conn), loop=loop) + conn = await create_connection(server.tcp_address) + sub = asyncio.ensure_future(_reader('chan:1', out, fut, conn)) await fut await redis.publish('chan:1', 'Hello') @@ -28,25 +28,22 @@ sub.cancel() -@pytest.mark.run_loop async def test_publish_json(create_connection, redis, server, loop): - out = asyncio.Queue(loop=loop) + out = asyncio.Queue() fut = loop.create_future() - conn = await create_connection( - server.tcp_address, loop=loop) - sub = asyncio.ensure_future(_reader('chan:1', out, fut, conn), loop=loop) + conn = await create_connection(server.tcp_address) + sub = asyncio.ensure_future(_reader('chan:1', out, fut, conn)) await fut res = await redis.publish_json('chan:1', {"Hello": "world"}) - assert res == 1 # recievers + assert res == 1 # receivers msg = await out.get() assert msg == b'{"Hello": "world"}' sub.cancel() -@pytest.mark.run_loop async def test_subscribe(redis): res = await redis.subscribe('chan:1', 'chan:2') assert redis.in_pubsub == 2 @@ -66,9 +63,8 @@ @pytest.mark.parametrize('create_redis', [ pytest.param(aioredis.create_redis_pool, id='pool'), ]) -@pytest.mark.run_loop -async def test_subscribe_empty_pool(create_redis, server, loop, _closable): - redis = await create_redis(server.tcp_address, loop=loop) +async def test_subscribe_empty_pool(create_redis, server, _closable): + redis = await create_redis(server.tcp_address) _closable(redis) await redis.connection.clear() @@ -87,8 +83,7 @@ [b'unsubscribe', b'chan:2', 0]] -@pytest.mark.run_loop -async def test_psubscribe(redis, create_redis, server, loop): +async def test_psubscribe(redis, create_redis, server): sub = redis res = await sub.psubscribe('patt:*', 'chan:*') assert sub.in_pubsub == 2 @@ -97,8 +92,7 @@ pat2 = sub.patterns['chan:*'] assert res == [pat1, pat2] - pub = await create_redis( - server.tcp_address, loop=loop) + pub = await create_redis(server.tcp_address) await pub.publish_json('chan:123', {"Hello": "World"}) res = await pat2.get_json() assert res == (b'chan:123', {"Hello": "World"}) @@ -113,10 +107,9 @@ @pytest.mark.parametrize('create_redis', [ pytest.param(aioredis.create_redis_pool, id='pool'), ]) -@pytest.mark.run_loop -async def test_psubscribe_empty_pool(create_redis, server, loop, _closable): - sub = await create_redis(server.tcp_address, loop=loop) - pub = await create_redis(server.tcp_address, loop=loop) +async def test_psubscribe_empty_pool(create_redis, server, _closable): + sub = await create_redis(server.tcp_address) + pub = await create_redis(server.tcp_address) _closable(sub) _closable(pub) await sub.connection.clear() @@ -138,20 +131,17 @@ ] -@pytest.redis_version( +@redis_version( 2, 8, 0, reason='PUBSUB CHANNELS is available since redis>=2.8.0') -@pytest.mark.run_loop -async def test_pubsub_channels(create_redis, server, loop): - redis = await create_redis( - server.tcp_address, loop=loop) +async def test_pubsub_channels(create_redis, server): + redis = await create_redis(server.tcp_address) res = await redis.pubsub_channels() assert res == [] res = await redis.pubsub_channels('chan:*') assert res == [] - sub = await create_redis( - server.tcp_address, loop=loop) + sub = await create_redis(server.tcp_address) await sub.subscribe('chan:1') res = await redis.pubsub_channels() @@ -167,20 +157,17 @@ assert res == [] -@pytest.redis_version( +@redis_version( 2, 8, 0, reason='PUBSUB NUMSUB is available since redis>=2.8.0') -@pytest.mark.run_loop -async def test_pubsub_numsub(create_redis, server, loop): - redis = await create_redis( - server.tcp_address, loop=loop) +async def test_pubsub_numsub(create_redis, server): + redis = await create_redis(server.tcp_address) res = await redis.pubsub_numsub() assert res == {} res = await redis.pubsub_numsub('chan:1') assert res == {b'chan:1': 0} - sub = await create_redis( - server.tcp_address, loop=loop) + sub = await create_redis(server.tcp_address) await sub.subscribe('chan:1') res = await redis.pubsub_numsub() @@ -202,12 +189,10 @@ assert res == {} -@pytest.redis_version( +@redis_version( 2, 8, 0, reason='PUBSUB NUMPAT is available since redis>=2.8.0') -@pytest.mark.run_loop -async def test_pubsub_numpat(create_redis, server, loop, redis): - sub = await create_redis( - server.tcp_address, loop=loop) +async def test_pubsub_numpat(create_redis, server, redis): + sub = await create_redis(server.tcp_address) res = await redis.pubsub_numpat() assert res == 0 @@ -221,51 +206,45 @@ assert res == 1 -@pytest.mark.run_loop -async def test_close_pubsub_channels(redis, loop): +async def test_close_pubsub_channels(redis): ch, = await redis.subscribe('chan:1') async def waiter(ch): assert not await ch.wait_message() - tsk = asyncio.ensure_future(waiter(ch), loop=loop) + tsk = asyncio.ensure_future(waiter(ch)) redis.close() await redis.wait_closed() await tsk -@pytest.mark.run_loop -async def test_close_pubsub_patterns(redis, loop): +async def test_close_pubsub_patterns(redis): ch, = await redis.psubscribe('chan:*') async def waiter(ch): assert not await ch.wait_message() - tsk = asyncio.ensure_future(waiter(ch), loop=loop) + tsk = asyncio.ensure_future(waiter(ch)) redis.close() await redis.wait_closed() await tsk -@pytest.mark.run_loop -async def test_close_cancelled_pubsub_channel(redis, loop): +async def test_close_cancelled_pubsub_channel(redis): ch, = await redis.subscribe('chan:1') async def waiter(ch): with pytest.raises(asyncio.CancelledError): await ch.wait_message() - tsk = asyncio.ensure_future(waiter(ch), loop=loop) - await asyncio.sleep(0, loop=loop) + tsk = asyncio.ensure_future(waiter(ch)) + await asyncio.sleep(0) tsk.cancel() -@pytest.mark.run_loop async def test_channel_get_after_close(create_redis, loop, server): - sub = await create_redis( - server.tcp_address, loop=loop) - pub = await create_redis( - server.tcp_address, loop=loop) + sub = await create_redis(server.tcp_address) + pub = await create_redis(server.tcp_address) ch, = await sub.subscribe('chan:1') await pub.publish('chan:1', 'message') @@ -276,25 +255,22 @@ assert await ch.get() -@pytest.mark.run_loop -async def test_subscribe_concurrency(create_redis, server, loop): - sub = await create_redis( - server.tcp_address, loop=loop) - pub = await create_redis( - server.tcp_address, loop=loop) +async def test_subscribe_concurrency(create_redis, server): + sub = await create_redis(server.tcp_address) + pub = await create_redis(server.tcp_address) async def subscribe(*args): return await sub.subscribe(*args) async def publish(*args): - await asyncio.sleep(0, loop=loop) + await asyncio.sleep(0) return await pub.publish(*args) res = await asyncio.gather( subscribe('channel:0'), publish('channel:0', 'Hello'), subscribe('channel:1'), - loop=loop) + ) (ch1,), subs, (ch2,) = res assert ch1.name == b'channel:0' @@ -302,9 +278,8 @@ assert ch2.name == b'channel:1' -@pytest.redis_version( +@redis_version( 3, 2, 0, reason='PUBSUB PING is available since redis>=3.2.0') -@pytest.mark.run_loop async def test_pubsub_ping(redis): await redis.subscribe('chan:1', 'chan:2') @@ -318,10 +293,9 @@ await redis.unsubscribe('chan:1', 'chan:2') -@pytest.mark.run_loop -async def test_pubsub_channel_iter(create_redis, server, loop): - sub = await create_redis(server.tcp_address, loop=loop) - pub = await create_redis(server.tcp_address, loop=loop) +async def test_pubsub_channel_iter(create_redis, server): + sub = await create_redis(server.tcp_address) + pub = await create_redis(server.tcp_address) ch, = await sub.subscribe('chan:1') @@ -331,9 +305,33 @@ lst.append(msg) return lst - tsk = asyncio.ensure_future(coro(ch), loop=loop) + tsk = asyncio.ensure_future(coro(ch)) await pub.publish_json('chan:1', {'Hello': 'World'}) await pub.publish_json('chan:1', ['message']) - await asyncio.sleep(0, loop=loop) + await asyncio.sleep(0.1) ch.close() assert await tsk == [b'{"Hello": "World"}', b'["message"]'] + + +@redis_version( + 2, 8, 12, reason="extended `client kill` format required") +async def test_pubsub_disconnection_notification(create_redis, server): + sub = await create_redis(server.tcp_address) + pub = await create_redis(server.tcp_address) + + async def coro(ch): + lst = [] + async for msg in ch.iter(): + assert ch.is_active + lst.append(msg) + return lst + + ch, = await sub.subscribe('chan:1') + tsk = asyncio.ensure_future(coro(ch)) + assert ch.is_active + await pub.publish_json('chan:1', {'Hello': 'World'}) + assert ch.is_active + assert await pub.execute('client', 'kill', 'type', 'pubsub') >= 1 + assert await pub.publish_json('chan:1', ['message']) == 0 + assert await tsk == [b'{"Hello": "World"}'] + assert not ch.is_active diff --git a/tests/pubsub_receiver_test.py b/tests/pubsub_receiver_test.py index 634947b..48aa69d 100644 --- a/tests/pubsub_receiver_test.py +++ b/tests/pubsub_receiver_test.py @@ -2,6 +2,7 @@ import asyncio import json import sys +import logging from unittest import mock @@ -10,8 +11,8 @@ from aioredis.pubsub import Receiver, _Sender -def test_listener_channel(loop): - mpsc = Receiver(loop=loop) +def test_listener_channel(): + mpsc = Receiver() assert not mpsc.is_active ch_a = mpsc.channel("channel:1") @@ -36,8 +37,8 @@ assert dict(mpsc.patterns) == {} -def test_listener_pattern(loop): - mpsc = Receiver(loop=loop) +def test_listener_pattern(): + mpsc = Receiver() assert not mpsc.is_active ch_a = mpsc.pattern("*") @@ -62,8 +63,7 @@ assert dict(mpsc.patterns) == {b'*': ch} -@pytest.mark.run_loop -async def test_sender(loop): +async def test_sender(): receiver = mock.Mock() sender = _Sender(receiver, 'name', is_pattern=False) @@ -95,12 +95,11 @@ assert receiver.mock_calls == [] -@pytest.mark.run_loop -async def test_subscriptions(create_connection, server, loop): - sub = await create_connection(server.tcp_address, loop=loop) - pub = await create_connection(server.tcp_address, loop=loop) - - mpsc = Receiver(loop=loop) +async def test_subscriptions(create_connection, server): + sub = await create_connection(server.tcp_address) + pub = await create_connection(server.tcp_address) + + mpsc = Receiver() await sub.execute_pubsub('subscribe', mpsc.channel('channel:1'), mpsc.channel('channel:3')) @@ -121,12 +120,11 @@ assert msg == b"Hello world" -@pytest.mark.run_loop -async def test_unsubscribe(create_connection, server, loop): - sub = await create_connection(server.tcp_address, loop=loop) - pub = await create_connection(server.tcp_address, loop=loop) - - mpsc = Receiver(loop=loop) +async def test_unsubscribe(create_connection, server): + sub = await create_connection(server.tcp_address) + pub = await create_connection(server.tcp_address) + + mpsc = Receiver() await sub.execute_pubsub('subscribe', mpsc.channel('channel:1'), mpsc.channel('channel:3')) @@ -159,34 +157,36 @@ assert not ch.is_pattern assert msg == b"message" - waiter = asyncio.ensure_future(mpsc.get(), loop=loop) + waiter = asyncio.ensure_future(mpsc.get()) await sub.execute_pubsub('unsubscribe', 'channel:3') assert not mpsc.is_active assert await waiter is None -@pytest.mark.run_loop -async def test_stopped(create_connection, server, loop): - sub = await create_connection(server.tcp_address, loop=loop) - pub = await create_connection(server.tcp_address, loop=loop) - - mpsc = Receiver(loop=loop) +async def test_stopped(create_connection, server, caplog): + sub = await create_connection(server.tcp_address) + pub = await create_connection(server.tcp_address) + + mpsc = Receiver() await sub.execute_pubsub('subscribe', mpsc.channel('channel:1')) assert mpsc.is_active mpsc.stop() - with pytest.logs('aioredis', 'DEBUG') as cm: + caplog.clear() + with caplog.at_level('DEBUG', 'aioredis'): await pub.execute('publish', 'channel:1', b'Hello') - await asyncio.sleep(0, loop=loop) - - assert len(cm.output) == 1 + await asyncio.sleep(0) + + assert len(caplog.record_tuples) == 1 # Receiver must have 1 EndOfStream message - warn_messaege = ( - "WARNING:aioredis:Pub/Sub listener message after stop: " + message = ( + "Pub/Sub listener message after stop: " "sender: <_Sender name:b'channel:1', is_pattern:False, receiver:" ">, data: b'Hello'" ) - assert cm.output == [warn_messaege] + assert caplog.record_tuples == [ + ('aioredis', logging.WARNING, message), + ] # assert (await mpsc.get()) is None with pytest.raises(ChannelClosedError): @@ -195,29 +195,27 @@ assert res is False -@pytest.mark.run_loop -async def test_wait_message(create_connection, server, loop): - sub = await create_connection(server.tcp_address, loop=loop) - pub = await create_connection(server.tcp_address, loop=loop) - - mpsc = Receiver(loop=loop) +async def test_wait_message(create_connection, server): + sub = await create_connection(server.tcp_address) + pub = await create_connection(server.tcp_address) + + mpsc = Receiver() await sub.execute_pubsub('subscribe', mpsc.channel('channel:1')) - fut = asyncio.ensure_future(mpsc.wait_message(), loop=loop) + fut = asyncio.ensure_future(mpsc.wait_message()) assert not fut.done() - await asyncio.sleep(0, loop=loop) + await asyncio.sleep(0) assert not fut.done() await pub.execute('publish', 'channel:1', 'hello') - await asyncio.sleep(0, loop=loop) # read in connection - await asyncio.sleep(0, loop=loop) # call Future.set_result + await asyncio.sleep(0) # read in connection + await asyncio.sleep(0) # call Future.set_result assert fut.done() res = await fut assert res is True -@pytest.mark.run_loop -async def test_decode_message(loop): - mpsc = Receiver(loop) +async def test_decode_message(): + mpsc = Receiver() ch = mpsc.channel('channel:1') ch.put_nowait(b'Some data') @@ -238,9 +236,8 @@ @pytest.mark.skipif(sys.version_info >= (3, 6), reason="json.loads accept bytes since Python 3.6") -@pytest.mark.run_loop -async def test_decode_message_error(loop): - mpsc = Receiver(loop) +async def test_decode_message_error(): + mpsc = Receiver() ch = mpsc.channel('channel:1') ch.put_nowait(b'{"hello": "world"}') @@ -255,9 +252,8 @@ assert (await mpsc.get(decoder=json.loads)) == unexpected -@pytest.mark.run_loop -async def test_decode_message_for_pattern(loop): - mpsc = Receiver(loop) +async def test_decode_message_for_pattern(): + mpsc = Receiver() ch = mpsc.pattern('*') ch.put_nowait((b'channel', b'Some data')) @@ -276,12 +272,11 @@ assert res[1] == (b'channel', {'hello': 'world'}) -@pytest.mark.run_loop async def test_pubsub_receiver_iter(create_redis, server, loop): - sub = await create_redis(server.tcp_address, loop=loop) - pub = await create_redis(server.tcp_address, loop=loop) - - mpsc = Receiver(loop=loop) + sub = await create_redis(server.tcp_address) + pub = await create_redis(server.tcp_address) + + mpsc = Receiver() async def coro(mpsc): lst = [] @@ -289,7 +284,7 @@ lst.append(msg) return lst - tsk = asyncio.ensure_future(coro(mpsc), loop=loop) + tsk = asyncio.ensure_future(coro(mpsc)) snd1, = await sub.subscribe(mpsc.channel('chan:1')) snd2, = await sub.subscribe(mpsc.channel('chan:2')) snd3, = await sub.psubscribe(mpsc.pattern('chan:*')) @@ -299,7 +294,7 @@ subscribers = await pub.publish_json('chan:2', ['message']) assert subscribers > 1 loop.call_later(0, mpsc.stop) - # await asyncio.sleep(0, loop=loop) + await asyncio.sleep(0.01) assert await tsk == [ (snd1, b'{"Hello": "World"}'), (snd3, (b'chan:1', b'{"Hello": "World"}')), @@ -309,12 +304,12 @@ assert not mpsc.is_active -@pytest.mark.run_loop(timeout=5) +@pytest.mark.timeout(5) async def test_pubsub_receiver_call_stop_with_empty_queue( create_redis, server, loop): - sub = await create_redis(server.tcp_address, loop=loop) - - mpsc = Receiver(loop=loop) + sub = await create_redis(server.tcp_address) + + mpsc = Receiver() # FIXME: currently at least one subscriber is needed snd1, = await sub.subscribe(mpsc.channel('chan:1')) @@ -328,10 +323,9 @@ assert not mpsc.is_active -@pytest.mark.run_loop -async def test_pubsub_receiver_stop_on_disconnect(create_redis, server, loop): - pub = await create_redis(server.tcp_address, loop=loop) - sub = await create_redis(server.tcp_address, loop=loop) +async def test_pubsub_receiver_stop_on_disconnect(create_redis, server): + pub = await create_redis(server.tcp_address) + sub = await create_redis(server.tcp_address) sub_name = 'sub-{:X}'.format(id(sub)) await sub.client_setname(sub_name) for sub_info in await pub.client_list(): @@ -339,12 +333,12 @@ break assert sub_info.name == sub_name - mpsc = Receiver(loop=loop) + mpsc = Receiver() await sub.subscribe(mpsc.channel('channel:1')) await sub.subscribe(mpsc.channel('channel:2')) await sub.psubscribe(mpsc.pattern('channel:*')) - q = asyncio.Queue(loop=loop) + q = asyncio.Queue() EOF = object() async def reader(): @@ -352,7 +346,7 @@ await q.put((ch.name, msg)) await q.put(EOF) - tsk = asyncio.ensure_future(reader(), loop=loop) + tsk = asyncio.ensure_future(reader()) await pub.publish_json('channel:1', ['hello']) await pub.publish_json('channel:2', ['hello']) # receive all messages @@ -363,5 +357,5 @@ # XXX: need to implement `client kill` assert await pub.execute('client', 'kill', sub_info.addr) in (b'OK', 1) - await asyncio.wait_for(tsk, timeout=1, loop=loop) + await asyncio.wait_for(tsk, timeout=1) assert await q.get() is EOF diff --git a/tests/scripting_commands_test.py b/tests/scripting_commands_test.py index ea88e35..79251bb 100644 --- a/tests/scripting_commands_test.py +++ b/tests/scripting_commands_test.py @@ -4,7 +4,6 @@ from aioredis import ReplyError -@pytest.mark.run_loop async def test_eval(redis): await redis.delete('key:eval', 'value:eval') @@ -38,7 +37,6 @@ await redis.eval(None) -@pytest.mark.run_loop async def test_evalsha(redis): script = b"return 42" sha_hash = await redis.script_load(script) @@ -62,7 +60,6 @@ await redis.evalsha(None) -@pytest.mark.run_loop async def test_script_exists(redis): sha_hash1 = await redis.script_load(b'return 1') sha_hash2 = await redis.script_load(b'return 2') @@ -82,7 +79,6 @@ await redis.script_exists('123', None) -@pytest.mark.run_loop async def test_script_flush(redis): sha_hash1 = await redis.script_load(b'return 1') assert len(sha_hash1) == 40 @@ -94,7 +90,6 @@ assert res == [0] -@pytest.mark.run_loop async def test_script_load(redis): sha_hash1 = await redis.script_load(b'return 1') sha_hash2 = await redis.script_load(b'return 2') @@ -104,18 +99,16 @@ assert res == [1, 1] -@pytest.mark.run_loop -async def test_script_kill(create_redis, loop, server, redis): +async def test_script_kill(create_redis, server, redis): script = "while (1) do redis.call('TIME') end" - other_redis = await create_redis( - server.tcp_address, loop=loop) + other_redis = await create_redis(server.tcp_address) ok = await redis.set('key1', 'value') assert ok is True fut = other_redis.eval(script, keys=['non-existent-key'], args=[10]) - await asyncio.sleep(0.1, loop=loop) + await asyncio.sleep(0.1) resp = await redis.script_kill() assert resp is True diff --git a/tests/sentinel_commands_test.py b/tests/sentinel_commands_test.py index be386cc..bf7e0ac 100644 --- a/tests/sentinel_commands_test.py +++ b/tests/sentinel_commands_test.py @@ -1,20 +1,21 @@ import asyncio import pytest import sys +import logging from aioredis import RedisError, ReplyError, PoolClosedError from aioredis.errors import MasterReplyError from aioredis.sentinel.commands import RedisSentinel from aioredis.abc import AbcPool - -pytestmark = pytest.redis_version(2, 8, 12, reason="Sentinel v2 required") +from _testutils import redis_version + +pytestmark = redis_version(2, 8, 12, reason="Sentinel v2 required") if sys.platform == 'win32': pytestmark = pytest.mark.skip(reason="unstable on windows") BPO_30399 = sys.version_info >= (3, 7, 0, 'alpha', 3) -@pytest.mark.run_loop async def test_client_close(redis_sentinel): assert isinstance(redis_sentinel, RedisSentinel) assert not redis_sentinel.closed @@ -27,24 +28,10 @@ await redis_sentinel.wait_closed() -@pytest.mark.run_loop -async def test_global_loop(sentinel, create_sentinel, loop): - asyncio.set_event_loop(loop) - - # force global loop - client = await create_sentinel([sentinel.tcp_address], - timeout=1, loop=None) - assert client._pool._loop is loop - - asyncio.set_event_loop(None) - - -@pytest.mark.run_loop async def test_ping(redis_sentinel): assert b'PONG' == (await redis_sentinel.ping()) -@pytest.mark.run_loop async def test_master_info(redis_sentinel, sentinel): info = await redis_sentinel.master('master-no-fail') assert isinstance(info, dict) @@ -82,21 +69,18 @@ assert 'link-refcount' in info -@pytest.mark.run_loop -async def test_master__auth(create_sentinel, start_sentinel, - start_server, loop): +async def test_master__auth(create_sentinel, start_sentinel, start_server): master = start_server('master_1', password='123') start_server('slave_1', slaveof=master, password='123') sentinel = start_sentinel('auth_sentinel_1', master) client1 = await create_sentinel( - [sentinel.tcp_address], password='123', timeout=1, loop=loop) + [sentinel.tcp_address], password='123', timeout=1) client2 = await create_sentinel( - [sentinel.tcp_address], password='111', timeout=1, loop=loop) - - client3 = await create_sentinel( - [sentinel.tcp_address], timeout=1, loop=loop) + [sentinel.tcp_address], password='111', timeout=1) + + client3 = await create_sentinel([sentinel.tcp_address], timeout=1) m1 = client1.master_for(master.name) await m1.set('mykey', 'myval') @@ -117,35 +101,30 @@ await m3.set('mykey', 'myval') -@pytest.mark.run_loop -async def test_master__no_auth(create_sentinel, sentinel, loop): +async def test_master__no_auth(create_sentinel, sentinel): client = await create_sentinel( - [sentinel.tcp_address], password='123', timeout=1, loop=loop) + [sentinel.tcp_address], password='123', timeout=1) master = client.master_for('masterA') with pytest.raises(MasterReplyError): await master.set('mykey', 'myval') -@pytest.mark.run_loop async def test_master__unknown(redis_sentinel): with pytest.raises(ReplyError): await redis_sentinel.master('unknown-master') -@pytest.mark.run_loop async def test_master_address(redis_sentinel, sentinel): _, port = await redis_sentinel.master_address('master-no-fail') assert port == sentinel.masters['master-no-fail'].tcp_address.port -@pytest.mark.run_loop async def test_master_address__unknown(redis_sentinel): res = await redis_sentinel.master_address('unknown-master') assert res is None -@pytest.mark.run_loop async def test_masters(redis_sentinel): masters = await redis_sentinel.masters() assert isinstance(masters, dict) @@ -154,7 +133,6 @@ assert isinstance(masters['master-no-fail'], dict) -@pytest.mark.run_loop async def test_slave_info(sentinel, redis_sentinel): info = await redis_sentinel.slaves('master-no-fail') assert len(info) == 1 @@ -196,13 +174,11 @@ assert not missing -@pytest.mark.run_loop async def test_slave__unknown(redis_sentinel): with pytest.raises(ReplyError): await redis_sentinel.slaves('unknown-master') -@pytest.mark.run_loop async def test_sentinels_empty(redis_sentinel): res = await redis_sentinel.sentinels('master-no-fail') assert res == [] @@ -211,9 +187,9 @@ await redis_sentinel.sentinels('unknown-master') -@pytest.mark.run_loop(timeout=30) +@pytest.mark.timeout(30) async def test_sentinels__exist(create_sentinel, start_sentinel, - start_server, loop): + start_server): m1 = start_server('master-two-sentinels') s1 = start_sentinel('peer-sentinel-1', m1, quorum=2, noslaves=True) s2 = start_sentinel('peer-sentinel-2', m1, quorum=2, noslaves=True) @@ -226,14 +202,13 @@ info = await redis_sentinel.master('master-two-sentinels') if info['num-other-sentinels'] > 0: break - await asyncio.sleep(.2, loop=loop) + await asyncio.sleep(.2) info = await redis_sentinel.sentinels('master-two-sentinels') assert len(info) == 1 assert 'sentinel' in info[0]['flags'] assert info[0]['port'] in (s1.tcp_address.port, s2.tcp_address.port) -@pytest.mark.run_loop async def test_ckquorum(redis_sentinel): assert (await redis_sentinel.check_quorum('master-no-fail')) @@ -248,7 +223,6 @@ assert (await redis_sentinel.check_quorum('master-no-fail')) -@pytest.mark.run_loop async def test_set_option(redis_sentinel): assert (await redis_sentinel.set('master-no-fail', 'quorum', 10)) master = await redis_sentinel.master('master-no-fail') @@ -262,17 +236,16 @@ await redis_sentinel.set('masterA', 'foo', 'bar') -@pytest.mark.run_loop -async def test_sentinel_role(sentinel, create_redis, loop): - redis = await create_redis(sentinel.tcp_address, loop=loop) +async def test_sentinel_role(sentinel, create_redis): + redis = await create_redis(sentinel.tcp_address) info = await redis.role() assert info.role == 'sentinel' assert isinstance(info.masters, list) assert 'master-no-fail' in info.masters -@pytest.mark.run_loop(timeout=30) -async def test_remove(redis_sentinel, start_server, loop): +@pytest.mark.timeout(30) +async def test_remove(redis_sentinel, start_server): m1 = start_server('master-to-remove') ok = await redis_sentinel.monitor( m1.name, '127.0.0.1', m1.tcp_address.port, 1) @@ -285,8 +258,8 @@ await redis_sentinel.remove('unknown-master') -@pytest.mark.run_loop(timeout=30) -async def test_monitor(redis_sentinel, start_server, loop, unused_port): +@pytest.mark.timeout(30) +async def test_monitor(redis_sentinel, start_server, unused_port): m1 = start_server('master-to-monitor') ok = await redis_sentinel.monitor( m1.name, '127.0.0.1', m1.tcp_address.port, 1) @@ -296,20 +269,23 @@ assert port == m1.tcp_address.port -@pytest.mark.run_loop(timeout=5) -async def test_sentinel_master_pool_size(sentinel, create_sentinel): +@pytest.mark.timeout(5) +async def test_sentinel_master_pool_size(sentinel, create_sentinel, caplog): redis_s = await create_sentinel([sentinel.tcp_address], timeout=1, minsize=10, maxsize=10) master = redis_s.master_for('master-no-fail') assert isinstance(master.connection, AbcPool) assert master.connection.size == 0 - with pytest.logs('aioredis.sentinel', 'DEBUG') as cm: + caplog.clear() + with caplog.at_level('DEBUG', 'aioredis.sentinel'): assert await master.ping() - assert len(cm.output) == 1 - assert cm.output == [ - "DEBUG:aioredis.sentinel:Discoverred new address {}" - " for master-no-fail".format(master.address), + assert len(caplog.record_tuples) == 1 + assert caplog.record_tuples == [ + ('aioredis.sentinel', logging.DEBUG, + "Discoverred new address {} for master-no-fail".format( + master.address) + ), ] assert master.connection.size == 10 assert master.connection.freesize == 10 diff --git a/tests/sentinel_failover_test.py b/tests/sentinel_failover_test.py index 4ead538..c4c516f 100644 --- a/tests/sentinel_failover_test.py +++ b/tests/sentinel_failover_test.py @@ -6,23 +6,29 @@ SlaveNotFoundError, ReadOnlyError, ) - - -pytestmark = pytest.redis_version(2, 8, 12, reason="Sentinel v2 required") +from _testutils import redis_version + + +pytestmark = redis_version(2, 8, 12, reason="Sentinel v2 required") if sys.platform == 'win32': pytestmark = pytest.mark.skip(reason="unstable on windows") -@pytest.mark.xfail -@pytest.mark.run_loop(timeout=40) +@pytest.mark.timeout(40) async def test_auto_failover(start_sentinel, start_server, - create_sentinel, create_connection, loop): + create_sentinel, create_connection): server1 = start_server('master-failover', ['slave-read-only yes']) start_server('slave-failover1', ['slave-read-only yes'], slaveof=server1) start_server('slave-failover2', ['slave-read-only yes'], slaveof=server1) - sentinel1 = start_sentinel('sentinel-failover1', server1, quorum=2) - sentinel2 = start_sentinel('sentinel-failover2', server1, quorum=2) + sentinel1 = start_sentinel('sentinel-failover1', server1, quorum=2, + down_after_milliseconds=300, + failover_timeout=1000) + sentinel2 = start_sentinel('sentinel-failover2', server1, quorum=2, + down_after_milliseconds=300, + failover_timeout=1000) + # Wait a bit for sentinels to sync + await asyncio.sleep(3) sp = await create_sentinel([sentinel1.tcp_address, sentinel2.tcp_address], @@ -39,8 +45,7 @@ # wait failover conn = await create_connection(server1.tcp_address) - await conn.execute("debug", "sleep", 6) - await asyncio.sleep(3, loop=loop) + await conn.execute("debug", "sleep", 2) # _, new_port = await sp.master_address(server1.name) # assert new_port != old_port @@ -50,7 +55,6 @@ assert master.address[1] != old_port -@pytest.mark.run_loop async def test_sentinel_normal(sentinel, create_sentinel): redis_sentinel = await create_sentinel([sentinel.tcp_address], timeout=1) redis = redis_sentinel.master_for('masterA') @@ -71,7 +75,6 @@ @pytest.mark.xfail(reason="same sentinel; single master;") -@pytest.mark.run_loop async def test_sentinel_slave(sentinel, create_sentinel): redis_sentinel = await create_sentinel([sentinel.tcp_address], timeout=1) redis = redis_sentinel.slave_for('masterA') @@ -91,8 +94,7 @@ @pytest.mark.xfail(reason="Need proper sentinel configuration") -@pytest.mark.run_loop # (timeout=600) -async def test_sentinel_slave_fail(sentinel, create_sentinel, loop): +async def test_sentinel_slave_fail(sentinel, create_sentinel): redis_sentinel = await create_sentinel([sentinel.tcp_address], timeout=1) key, field, value = b'key:hset', b'bar', b'zap' @@ -108,17 +110,17 @@ ret = await redis_sentinel.failover('masterA') assert ret is True - await asyncio.sleep(2, loop=loop) + await asyncio.sleep(2) with pytest.raises(ReadOnlyError): await redis.hset(key, field, value) ret = await redis_sentinel.failover('masterA') assert ret is True - await asyncio.sleep(2, loop=loop) + await asyncio.sleep(2) while True: try: - await asyncio.sleep(1, loop=loop) + await asyncio.sleep(1) await redis.hset(key, field, value) except SlaveNotFoundError: continue @@ -127,8 +129,7 @@ @pytest.mark.xfail(reason="Need proper sentinel configuration") -@pytest.mark.run_loop -async def test_sentinel_normal_fail(sentinel, create_sentinel, loop): +async def test_sentinel_normal_fail(sentinel, create_sentinel): redis_sentinel = await create_sentinel([sentinel.tcp_address], timeout=1) key, field, value = b'key:hset', b'bar', b'zap' @@ -142,50 +143,60 @@ assert ret == 1 ret = await redis_sentinel.failover('masterA') assert ret is True - await asyncio.sleep(2, loop=loop) + await asyncio.sleep(2) ret = await redis.hset(key, field, value) assert ret == 0 ret = await redis_sentinel.failover('masterA') assert ret is True - await asyncio.sleep(2, loop=loop) + await asyncio.sleep(2) redis = redis_sentinel.slave_for('masterA') while True: try: await redis.hset(key, field, value) - await asyncio.sleep(1, loop=loop) + await asyncio.sleep(1) # redis = await get_slave_connection() except ReadOnlyError: break -@pytest.mark.xfail(reason="same sentinel; single master;") -@pytest.mark.run_loop -async def test_failover_command(sentinel, create_sentinel, loop): - master_name = 'masterA' - redis_sentinel = await create_sentinel([sentinel.tcp_address], timeout=1) - - orig_master = await redis_sentinel.master_address(master_name) - ret = await redis_sentinel.failover(master_name) - assert ret is True - await asyncio.sleep(2, loop=loop) - - new_master = await redis_sentinel.master_address(master_name) +@pytest.mark.timeout(30) +async def test_failover_command(start_server, start_sentinel, + create_sentinel): + server = start_server('master-failover-cmd', ['slave-read-only yes']) + start_server('slave-failover-cmd', ['slave-read-only yes'], slaveof=server) + + sentinel = start_sentinel('sentinel-failover-cmd', server, quorum=1, + down_after_milliseconds=300, + failover_timeout=1000) + + name = 'master-failover-cmd' + redis_sentinel = await create_sentinel([sentinel.tcp_address], timeout=1) + # Wait a bit for sentinels to sync + await asyncio.sleep(3) + + orig_master = await redis_sentinel.master_address(name) + assert await redis_sentinel.failover(name) is True + await asyncio.sleep(2) + + new_master = await redis_sentinel.master_address(name) assert orig_master != new_master - ret = await redis_sentinel.failover(master_name) - assert ret is True - await asyncio.sleep(2, loop=loop) - - new_master = await redis_sentinel.master_address(master_name) + ret = await redis_sentinel.failover(name) + assert ret is True + await asyncio.sleep(2) + + new_master = await redis_sentinel.master_address(name) assert orig_master == new_master - redis = redis_sentinel.slave_for(master_name) - key, field, value = b'key:hset', b'bar', b'zap' - while True: - try: - await asyncio.sleep(1, loop=loop) - await redis.hset(key, field, value) - except SlaveNotFoundError: - pass - except ReadOnlyError: - break + # This part takes almost 10 seconds (waiting for '+convert-to-slave'). + # Disabled for time being. + + # redis = redis_sentinel.slave_for(name) + # while True: + # try: + # await asyncio.sleep(.2) + # await redis.set('foo', 'bar') + # except SlaveNotFoundError: + # pass + # except ReadOnlyError: + # break diff --git a/tests/server_commands_test.py b/tests/server_commands_test.py index 2ae1b95..fb4b80f 100644 --- a/tests/server_commands_test.py +++ b/tests/server_commands_test.py @@ -5,9 +5,9 @@ from unittest import mock from aioredis import ReplyError - - -@pytest.mark.run_loop +from _testutils import redis_version + + async def test_client_list(redis, server, request): name = request.node.callspec.id assert (await redis.client_setname(name)) @@ -40,11 +40,10 @@ assert expected in res -@pytest.mark.run_loop @pytest.mark.skipif(sys.platform == 'win32', reason="No unixsocket on Windows") -async def test_client_list__unixsocket(create_redis, loop, server, request): - redis = await create_redis(server.unixsocket, loop=loop) +async def test_client_list__unixsocket(create_redis, server, request): + redis = await create_redis(server.unixsocket) name = request.node.callspec.id assert (await redis.client_setname(name)) res = await redis.client_list() @@ -75,15 +74,16 @@ assert expected in info -@pytest.mark.run_loop -@pytest.redis_version( +@redis_version( 2, 9, 50, reason='CLIENT PAUSE is available since redis >= 2.9.50') async def test_client_pause(redis): - ts = time.time() - res = await redis.client_pause(2000) - assert res is True - await redis.ping() - assert int(time.time() - ts) >= 2 + tr = redis.pipeline() + tr.time() + tr.client_pause(100) + tr.time() + t1, ok, t2 = await tr.execute() + assert ok + assert t2 - t1 >= .1 with pytest.raises(TypeError): await redis.client_pause(2.0) @@ -91,7 +91,6 @@ await redis.client_pause(-1) -@pytest.mark.run_loop async def test_client_getname(redis): res = await redis.client_getname() assert res is None @@ -104,23 +103,20 @@ assert res == 'TestClient' -@pytest.redis_version(2, 8, 13, reason="available since Redis 2.8.13") -@pytest.mark.run_loop +@redis_version(2, 8, 13, reason="available since Redis 2.8.13") async def test_command(redis): res = await redis.command() assert isinstance(res, list) assert len(res) > 0 -@pytest.redis_version(2, 8, 13, reason="available since Redis 2.8.13") -@pytest.mark.run_loop +@redis_version(2, 8, 13, reason="available since Redis 2.8.13") async def test_command_count(redis): res = await redis.command_count() assert res > 0 -@pytest.redis_version(3, 0, 0, reason="available since Redis 3.0.0") -@pytest.mark.run_loop +@redis_version(3, 0, 0, reason="available since Redis 3.0.0") async def test_command_getkeys(redis): res = await redis.command_getkeys('get', 'key') assert res == ['key'] @@ -137,8 +133,7 @@ assert not (await redis.command_getkeys(None)) -@pytest.redis_version(2, 8, 13, reason="available since Redis 2.8.13") -@pytest.mark.run_loop +@redis_version(2, 8, 13, reason="available since Redis 2.8.13") async def test_command_info(redis): res = await redis.command_info('get') assert res == [ @@ -151,7 +146,6 @@ assert res == [None, None] -@pytest.mark.run_loop async def test_config_get(redis, server): res = await redis.config_get('port') assert res == {'port': str(server.tcp_address.port)} @@ -166,13 +160,11 @@ await redis.config_get(b'port') -@pytest.mark.run_loop async def test_config_rewrite(redis): with pytest.raises(ReplyError): await redis.config_rewrite() -@pytest.mark.run_loop async def test_config_set(redis): cur_value = await redis.config_get('slave-read-only') res = await redis.config_set('slave-read-only', 'no') @@ -187,12 +179,10 @@ await redis.config_set(100, 'databases') -# @pytest.mark.run_loop # @pytest.mark.skip("Not implemented") # def test_config_resetstat(): # pass -@pytest.mark.run_loop async def test_debug_object(redis): with pytest.raises(ReplyError): assert (await redis.debug_object('key')) is None @@ -203,16 +193,14 @@ assert res is not None -@pytest.mark.run_loop async def test_debug_sleep(redis): t1 = await redis.time() - ok = await redis.debug_sleep(2) + ok = await redis.debug_sleep(.2) assert ok t2 = await redis.time() - assert t2 - t1 >= 2 - - -@pytest.mark.run_loop + assert t2 - t1 >= .2 + + async def test_dbsize(redis): res = await redis.dbsize() assert res == 0 @@ -230,7 +218,6 @@ assert res == 1 -@pytest.mark.run_loop async def test_info(redis): res = await redis.info() assert isinstance(res, dict) @@ -242,14 +229,12 @@ await redis.info('') -@pytest.mark.run_loop async def test_lastsave(redis): res = await redis.lastsave() assert res > 0 -@pytest.mark.run_loop -@pytest.redis_version(2, 8, 12, reason='ROLE is available since redis>=2.8.12') +@redis_version(2, 8, 12, reason='ROLE is available since redis>=2.8.12') async def test_role(redis): res = await redis.role() assert dict(res._asdict()) == { @@ -259,7 +244,6 @@ } -@pytest.mark.run_loop async def test_save(redis): res = await redis.dbsize() assert res == 0 @@ -270,29 +254,23 @@ assert t2 >= t1 -@pytest.mark.run_loop -async def test_time(redis): +@pytest.mark.parametrize('encoding', [ + pytest.param(None, id='no decoding'), + pytest.param('utf-8', id='with decoding'), +]) +async def test_time(create_redis, server, encoding): + redis = await create_redis(server.tcp_address, encoding='utf-8') + now = time.time() res = await redis.time() assert isinstance(res, float) - pytest.assert_almost_equal(int(res), int(time.time()), delta=10) - - -@pytest.mark.run_loop -async def test_time_with_encoding(create_redis, server, loop): - redis = await create_redis(server.tcp_address, loop=loop, - encoding='utf-8') - res = await redis.time() - assert isinstance(res, float) - pytest.assert_almost_equal(int(res), int(time.time()), delta=10) - - -@pytest.mark.run_loop + assert res == pytest.approx(now, abs=10) + + async def test_slowlog_len(redis): res = await redis.slowlog_len() assert res >= 0 -@pytest.mark.run_loop async def test_slowlog_get(redis): res = await redis.slowlog_get() assert isinstance(res, list) @@ -308,7 +286,6 @@ assert not (await redis.slowlog_get('1')) -@pytest.mark.run_loop async def test_slowlog_reset(redis): ok = await redis.slowlog_reset() assert ok is True diff --git a/tests/set_commands_test.py b/tests/set_commands_test.py index d88a129..43fd110 100644 --- a/tests/set_commands_test.py +++ b/tests/set_commands_test.py @@ -1,4 +1,7 @@ import pytest + +from aioredis import ReplyError +from _testutils import redis_version async def add(redis, key, members): @@ -6,7 +9,6 @@ assert ok == 1 -@pytest.mark.run_loop async def test_sadd(redis): key, member = b'key:sadd', b'hello' # add member to the set, expected result: 1 @@ -25,7 +27,6 @@ await redis.sadd(None, 10) -@pytest.mark.run_loop async def test_scard(redis): key, member = b'key:scard', b'hello' @@ -44,7 +45,6 @@ await redis.scard(None) -@pytest.mark.run_loop async def test_sdiff(redis): key1 = b'key:sdiff:1' key2 = b'key:sdiff:2' @@ -72,7 +72,6 @@ await redis.sdiff(key1, None) -@pytest.mark.run_loop async def test_sdiffstore(redis): key1 = b'key:sdiffstore:1' key2 = b'key:sdiffstore:2' @@ -104,7 +103,6 @@ await redis.sdiffstore(destkey, key1, None) -@pytest.mark.run_loop async def test_sinter(redis): key1 = b'key:sinter:1' key2 = b'key:sinter:2' @@ -132,7 +130,6 @@ await redis.sinter(key1, None) -@pytest.mark.run_loop async def test_sinterstore(redis): key1 = b'key:sinterstore:1' key2 = b'key:sinterstore:2' @@ -164,7 +161,6 @@ await redis.sinterstore(destkey, key1, None) -@pytest.mark.run_loop async def test_sismember(redis): key, member = b'key:sismember', b'hello' # add member to the set, expected result: 1 @@ -182,7 +178,6 @@ await redis.sismember(None, b'world') -@pytest.mark.run_loop async def test_smembers(redis): key = b'key:smembers' member1 = b'hello' @@ -207,7 +202,6 @@ await redis.smembers(None) -@pytest.mark.run_loop async def test_smove(redis): key1 = b'key:smove:1' key2 = b'key:smove:2' @@ -247,7 +241,6 @@ await redis.smove(key1, None, member1) -@pytest.mark.run_loop async def test_spop(redis): key = b'key:spop:1' members = b'one', b'two', b'three' @@ -277,7 +270,41 @@ await redis.spop(None) -@pytest.mark.run_loop +@redis_version( + 3, 2, 0, + reason="The count argument in SPOP is available since redis>=3.2.0" +) +async def test_spop_count(redis): + key = b'key:spop:1' + members1 = b'one', b'two', b'three' + await redis.sadd(key, *members1) + + # fetch 3 random members + test_result1 = await redis.spop(key, 3) + assert len(test_result1) == 3 + assert set(test_result1).issubset(members1) is True + + members2 = 'four', 'five', 'six' + await redis.sadd(key, *members2) + + # test with encoding, fetch 3 random members + test_result2 = await redis.spop(key, 3, encoding='utf-8') + assert len(test_result2) == 3 + assert set(test_result2).issubset(members2) is True + + # try to pop data from empty set + test_result = await redis.spop(b'not:' + key, 2) + assert len(test_result) == 0 + + # test with negative counter + with pytest.raises(ReplyError): + await redis.spop(key, -2) + + # test with counter is zero + test_result3 = await redis.spop(key, 0) + assert len(test_result3) == 0 + + async def test_srandmember(redis): key = b'key:srandmember:1' members = b'one', b'two', b'three', b'four', b'five', b'six', b'seven' @@ -315,7 +342,6 @@ await redis.srandmember(None) -@pytest.mark.run_loop async def test_srem(redis): key = b'key:srem:1' members = b'one', b'two', b'three', b'four', b'five', b'six', b'seven' @@ -340,7 +366,6 @@ await redis.srem(None, members) -@pytest.mark.run_loop async def test_sunion(redis): key1 = b'key:sunion:1' key2 = b'key:sunion:2' @@ -368,7 +393,6 @@ await redis.sunion(key1, None) -@pytest.mark.run_loop async def test_sunionstore(redis): key1 = b'key:sunionstore:1' key2 = b'key:sunionstore:2' @@ -400,8 +424,7 @@ await redis.sunionstore(destkey, key1, None) -@pytest.redis_version(2, 8, 0, reason='SSCAN is available since redis>=2.8.0') -@pytest.mark.run_loop +@redis_version(2, 8, 0, reason='SSCAN is available since redis>=2.8.0') async def test_sscan(redis): key = b'key:sscan' for i in range(1, 11): @@ -431,8 +454,7 @@ await redis.sscan(None) -@pytest.redis_version(2, 8, 0, reason='SSCAN is available since redis>=2.8.0') -@pytest.mark.run_loop +@redis_version(2, 8, 0, reason='SSCAN is available since redis>=2.8.0') async def test_isscan(redis): key = b'key:sscan' for i in range(1, 11): diff --git a/tests/sorted_set_commands_test.py b/tests/sorted_set_commands_test.py index 1dfde23..3a8eb82 100644 --- a/tests/sorted_set_commands_test.py +++ b/tests/sorted_set_commands_test.py @@ -1,8 +1,54 @@ import itertools + import pytest - -@pytest.mark.run_loop +from _testutils import redis_version + + +@redis_version(5, 0, 0, reason='BZPOPMAX is available since redis>=5.0.0') +async def test_bzpopmax(redis): + key1 = b'key:zpopmax:1' + key2 = b'key:zpopmax:2' + + pairs = [ + (0, b'a'), (5, b'c'), (2, b'd'), (8, b'e'), (9, b'f'), (3, b'g') + ] + await redis.zadd(key1, *pairs[0]) + await redis.zadd(key2, *itertools.chain.from_iterable(pairs)) + + res = await redis.bzpopmax(key1, timeout=0) + assert res == [key1, b'a', b'0'] + res = await redis.bzpopmax(key1, key2, timeout=0) + assert res == [key2, b'f', b'9'] + + with pytest.raises(TypeError): + await redis.bzpopmax(key1, timeout=b'one') + with pytest.raises(ValueError): + await redis.bzpopmax(key2, timeout=-10) + + +@redis_version(5, 0, 0, reason='BZPOPMIN is available since redis>=5.0.0') +async def test_bzpopmin(redis): + key1 = b'key:zpopmin:1' + key2 = b'key:zpopmin:2' + + pairs = [ + (0, b'a'), (5, b'c'), (2, b'd'), (8, b'e'), (9, b'f'), (3, b'g') + ] + await redis.zadd(key1, *pairs[0]) + await redis.zadd(key2, *itertools.chain.from_iterable(pairs)) + + res = await redis.bzpopmin(key1, timeout=0) + assert res == [key1, b'a', b'0'] + res = await redis.bzpopmin(key1, key2, timeout=0) + assert res == [key2, b'a', b'0'] + + with pytest.raises(TypeError): + await redis.bzpopmin(key1, timeout=b'one') + with pytest.raises(ValueError): + await redis.bzpopmin(key2, timeout=-10) + + async def test_zadd(redis): key = b'key:zadd' res = await redis.zadd(key, 1, b'one') @@ -29,10 +75,9 @@ await redis.zadd(key, 3, b'three', 'four', 4) -@pytest.redis_version( +@redis_version( 3, 0, 2, reason='ZADD options is available since redis>=3.0.2', ) -@pytest.mark.run_loop async def test_zadd_options(redis): key = b'key:zaddopt' @@ -66,8 +111,16 @@ res = await redis.zrange(key, 0, -1, withscores=False) assert res == [b'one', b'two'] - -@pytest.mark.run_loop + res = await redis.zadd(key, 1, b'two', changed=True) + assert res == 1 + + res = await redis.zadd(key, 1, b'two', incr=True) + assert int(res) == 2 + + with pytest.raises(ValueError): + await redis.zadd(key, 1, b'one', 2, b'two', incr=True) + + async def test_zcard(redis): key = b'key:zcard' pairs = [1, b'one', 2, b'two', 3, b'three'] @@ -84,7 +137,6 @@ await redis.zcard(None) -@pytest.mark.run_loop async def test_zcount(redis): key = b'key:zcount' pairs = [1, b'one', 1, b'uno', 2.5, b'two', 3, b'three', 7, b'seven'] @@ -128,7 +180,6 @@ await redis.zcount(key, 10, 1) -@pytest.mark.run_loop async def test_zincrby(redis): key = b'key:zincrby' pairs = [1, b'one', 1, b'uno', 2.5, b'two', 3, b'three'] @@ -148,7 +199,6 @@ await redis.zincrby(key, 'one', 5) -@pytest.mark.run_loop async def test_zinterstore(redis): zset1 = [2, 'one', 2, 'two'] zset2 = [3, 'one', 3, 'three'] @@ -196,9 +246,8 @@ assert res == [(b'one', 10)] -@pytest.redis_version( +@redis_version( 2, 8, 9, reason='ZLEXCOUNT is available since redis>=2.8.9') -@pytest.mark.run_loop async def test_zlexcount(redis): key = b'key:zlexcount' pairs = [0, b'a', 0, b'b', 0, b'c', 0, b'd', 0, b'e'] @@ -222,7 +271,6 @@ @pytest.mark.parametrize('encoding', [None, 'utf-8']) -@pytest.mark.run_loop async def test_zrange(redis, encoding): key = b'key:zrange' scores = [1, 1, 2.5, 3, 7] @@ -253,9 +301,8 @@ await redis.zrange(key, 0, 'last') -@pytest.redis_version( +@redis_version( 2, 8, 9, reason='ZRANGEBYLEX is available since redis>=2.8.9') -@pytest.mark.run_loop async def test_zrangebylex(redis): key = b'key:zrangebylex' scores = [0] * 5 @@ -299,7 +346,6 @@ offset=1, count='one') -@pytest.mark.run_loop async def test_zrank(redis): key = b'key:zrank' scores = [1, 1, 2.5, 3, 7] @@ -321,7 +367,6 @@ @pytest.mark.parametrize('encoding', [None, 'utf-8']) -@pytest.mark.run_loop async def test_zrangebyscore(redis, encoding): key = b'key:zrangebyscore' scores = [1, 1, 2.5, 3, 7] @@ -365,7 +410,6 @@ await redis.zrangebyscore(key, 1, 7, offset=1, count='one') -@pytest.mark.run_loop async def test_zrem(redis): key = b'key:zrem' scores = [1, 1, 2.5, 3, 7] @@ -391,9 +435,8 @@ await redis.zrem(None, b'one') -@pytest.redis_version( +@redis_version( 2, 8, 9, reason='ZREMRANGEBYLEX is available since redis>=2.8.9') -@pytest.mark.run_loop async def test_zremrangebylex(redis): key = b'key:zremrangebylex' members = [b'aaaa', b'b', b'c', b'd', b'e', b'foo', b'zap', b'zip', @@ -432,7 +475,6 @@ await redis.zremrangebylex(key, b'a', 20) -@pytest.mark.run_loop async def test_zremrangebyrank(redis): key = b'key:zremrangebyrank' scores = [0, 1, 2, 3, 4, 5] @@ -459,7 +501,6 @@ await redis.zremrangebyrank(key, 0, 'last') -@pytest.mark.run_loop async def test_zremrangebyscore(redis): key = b'key:zremrangebyscore' scores = [1, 1, 2.5, 3, 7] @@ -494,7 +535,6 @@ @pytest.mark.parametrize('encoding', [None, 'utf-8']) -@pytest.mark.run_loop async def test_zrevrange(redis, encoding): key = b'key:zrevrange' scores = [1, 1, 2.5, 3, 7] @@ -529,7 +569,6 @@ await redis.zrevrange(key, 0, 'last') -@pytest.mark.run_loop async def test_zrevrank(redis): key = b'key:zrevrank' scores = [1, 1, 2.5, 3, 7] @@ -550,7 +589,6 @@ await redis.zrevrank(None, b'one') -@pytest.mark.run_loop async def test_zscore(redis): key = b'key:zscore' scores = [1, 1, 2.5, 3, 7] @@ -570,7 +608,6 @@ assert res is None -@pytest.mark.run_loop async def test_zunionstore(redis): zset1 = [2, 'one', 2, 'two'] zset2 = [3, 'one', 3, 'three'] @@ -619,7 +656,6 @@ @pytest.mark.parametrize('encoding', [None, 'utf-8']) -@pytest.mark.run_loop async def test_zrevrangebyscore(redis, encoding): key = b'key:zrevrangebyscore' scores = [1, 1, 2.5, 3, 7] @@ -664,9 +700,8 @@ await redis.zrevrangebyscore(key, 1, 7, offset=1, count='one') -@pytest.redis_version( +@redis_version( 2, 8, 9, reason='ZREVRANGEBYLEX is available since redis>=2.8.9') -@pytest.mark.run_loop async def test_zrevrangebylex(redis): key = b'key:zrevrangebylex' scores = [0] * 5 @@ -712,8 +747,7 @@ offset=1, count='one') -@pytest.redis_version(2, 8, 0, reason='ZSCAN is available since redis>=2.8.0') -@pytest.mark.run_loop +@redis_version(2, 8, 0, reason='ZSCAN is available since redis>=2.8.0') async def test_zscan(redis): key = b'key:zscan' scores, members = [], [] @@ -746,8 +780,7 @@ await redis.zscan(None) -@pytest.redis_version(2, 8, 0, reason='ZSCAN is available since redis>=2.8.0') -@pytest.mark.run_loop +@redis_version(2, 8, 0, reason='ZSCAN is available since redis>=2.8.0') async def test_izscan(redis): key = b'key:zscan' scores, members = [], [] @@ -784,3 +817,35 @@ with pytest.raises(TypeError): await redis.izscan(None) + + +@redis_version(5, 0, 0, reason='ZPOPMAX is available since redis>=5.0.0') +async def test_zpopmax(redis): + key = b'key:zpopmax' + + pairs = [ + (0, b'a'), (5, b'c'), (2, b'd'), (8, b'e'), (9, b'f'), (3, b'g') + ] + await redis.zadd(key, *itertools.chain.from_iterable(pairs)) + + assert await redis.zpopmax(key) == [b'f', b'9'] + assert await redis.zpopmax(key, 3) == [b'e', b'8', b'c', b'5', b'g', b'3'] + + with pytest.raises(TypeError): + await redis.zpopmax(key, b'b') + + +@redis_version(5, 0, 0, reason='ZPOPMIN is available since redis>=5.0.0') +async def test_zpopmin(redis): + key = b'key:zpopmin' + + pairs = [ + (0, b'a'), (5, b'c'), (2, b'd'), (8, b'e'), (9, b'f'), (3, b'g') + ] + await redis.zadd(key, *itertools.chain.from_iterable(pairs)) + + assert await redis.zpopmin(key) == [b'a', b'0'] + assert await redis.zpopmin(key, 3) == [b'd', b'2', b'g', b'3', b'c', b'5'] + + with pytest.raises(TypeError): + await redis.zpopmin(key, b'b') diff --git a/tests/ssl_test.py b/tests/ssl_test.py index e31a99c..3ce1496 100644 --- a/tests/ssl_test.py +++ b/tests/ssl_test.py @@ -1,32 +1,27 @@ -import pytest - -@pytest.mark.run_loop -async def test_ssl_connection(create_connection, loop, server, ssl_proxy): +async def test_ssl_connection(create_connection, server, ssl_proxy): ssl_port, ssl_ctx = ssl_proxy(server.tcp_address.port) conn = await create_connection( - ('localhost', ssl_port), ssl=ssl_ctx, loop=loop) + ('localhost', ssl_port), ssl=ssl_ctx) res = await conn.execute('ping') assert res == b'PONG' -@pytest.mark.run_loop -async def test_ssl_redis(create_redis, loop, server, ssl_proxy): +async def test_ssl_redis(create_redis, server, ssl_proxy): ssl_port, ssl_ctx = ssl_proxy(server.tcp_address.port) redis = await create_redis( - ('localhost', ssl_port), ssl=ssl_ctx, loop=loop) + ('localhost', ssl_port), ssl=ssl_ctx) res = await redis.ping() assert res == b'PONG' -@pytest.mark.run_loop -async def test_ssl_pool(create_pool, server, loop, ssl_proxy): +async def test_ssl_pool(create_pool, server, ssl_proxy): ssl_port, ssl_ctx = ssl_proxy(server.tcp_address.port) pool = await create_pool( - ('localhost', ssl_port), ssl=ssl_ctx, loop=loop) + ('localhost', ssl_port), ssl=ssl_ctx) with (await pool) as conn: res = await conn.execute('PING') assert res == b'PONG' diff --git a/tests/stream_commands_test.py b/tests/stream_commands_test.py index a3c0fdd..ead9809 100644 --- a/tests/stream_commands_test.py +++ b/tests/stream_commands_test.py @@ -4,19 +4,20 @@ from collections import OrderedDict from unittest import mock -from aioredis import ReplyError - - -@asyncio.coroutine -async def add_message_with_sleep(redis, loop, stream, fields): - await asyncio.sleep(0.2, loop=loop) +from aioredis.commands.streams import parse_messages +from aioredis.errors import BusyGroupError +from _testutils import redis_version + +pytestmark = redis_version( + 5, 0, 0, reason="Streams only available since Redis 5.0.0") + + +async def add_message_with_sleep(redis, stream, fields): + await asyncio.sleep(0.2) result = await redis.xadd(stream, fields) return result -@pytest.mark.run_loop -@pytest.redis_version(999, 999, 999, reason="Streams only available on redis " - "unstable branch") async def test_xadd(redis, server_bin): fields = OrderedDict(( (b'field1', b'value1'), @@ -41,9 +42,6 @@ ) -@pytest.mark.run_loop -@pytest.redis_version(999, 999, 999, reason="Streams only available on redis " - "unstable branch") async def test_xadd_maxlen_exact(redis, server_bin): message_id1 = await redis.xadd('test_stream', {'f1': 'v1'}) # noqa @@ -70,9 +68,6 @@ assert message3[1] == OrderedDict([(b'f3', b'v3')]) -@pytest.mark.run_loop -@pytest.redis_version(999, 999, 999, reason="Streams only available on redis " - "unstable branch") async def test_xadd_manual_message_ids(redis, server_bin): await redis.xadd('test_stream', {'f1': 'v1'}, message_id='1515958771000-0') await redis.xadd('test_stream', {'f1': 'v1'}, message_id='1515958771000-1') @@ -87,9 +82,6 @@ ] -@pytest.mark.run_loop -@pytest.redis_version(999, 999, 999, reason="Streams only available on redis " - "unstable branch") async def test_xadd_maxlen_inexact(redis, server_bin): await redis.xadd('test_stream', {'f1': 'v1'}) # Ensure the millisecond-based message ID increments @@ -111,9 +103,6 @@ assert len(messages) < 1000 -@pytest.mark.run_loop -@pytest.redis_version(999, 999, 999, reason="Streams only available on redis " - "unstable branch") async def test_xrange(redis, server_bin): stream = 'test_stream' fields = OrderedDict(( @@ -167,9 +156,6 @@ assert len(messages) == 2 -@pytest.mark.run_loop -@pytest.redis_version(999, 999, 999, reason="Streams only available on redis " - "unstable branch") async def test_xrevrange(redis, server_bin): stream = 'test_stream' fields = OrderedDict(( @@ -223,9 +209,6 @@ assert len(messages) == 2 -@pytest.mark.run_loop -@pytest.redis_version(999, 999, 999, reason="Streams only available on redis " - "unstable branch") async def test_xread_selection(redis, server_bin): """Test use of counts and starting IDs""" stream = 'test_stream' @@ -258,25 +241,21 @@ assert len(messages) == 2 -@pytest.mark.run_loop -@pytest.redis_version(999, 999, 999, reason="Streams only available on redis " - "unstable branch") -async def test_xread_blocking(redis, create_redis, loop, server, server_bin): +async def test_xread_blocking(redis, create_redis, server, server_bin): """Test the blocking read features""" fields = OrderedDict(( (b'field1', b'value1'), (b'field2', b'value2'), )) other_redis = await create_redis( - server.tcp_address, loop=loop) + server.tcp_address) # create blocking task in separate connection consumer = other_redis.xread(['test_stream'], timeout=1000) producer_task = asyncio.Task( - add_message_with_sleep(redis, loop, 'test_stream', fields), loop=loop) - results = await asyncio.gather( - consumer, producer_task, loop=loop) + add_message_with_sleep(redis, 'test_stream', fields)) + results = await asyncio.gather(consumer, producer_task) received_messages, sent_message_id = results assert len(received_messages) == 1 @@ -296,13 +275,8 @@ other_redis.close() -@pytest.mark.run_loop -@pytest.redis_version(999, 999, 999, reason="Streams only available on redis " - "unstable branch") async def test_xgroup_create(redis, server_bin): # Also tests xinfo_groups() - # TODO: Remove xadd() if resolved: - # https://github.com/antirez/redis/issues/4824 await redis.xadd('test_stream', {'a': 1}) await redis.xgroup_create('test_stream', 'test_group') info = await redis.xinfo_groups('test_stream') @@ -314,28 +288,30 @@ }] -@pytest.mark.run_loop -@pytest.redis_version(999, 999, 999, reason="Streams only available on redis " - "unstable branch") +async def test_xgroup_create_mkstream(redis, server_bin): + await redis.xgroup_create('test_stream', 'test_group', mkstream=True) + info = await redis.xinfo_groups('test_stream') + assert info == [{ + b'name': b'test_group', + b'last-delivered-id': mock.ANY, + b'pending': 0, + b'consumers': 0 + }] + + async def test_xgroup_create_already_exists(redis, server_bin): await redis.xadd('test_stream', {'a': 1}) await redis.xgroup_create('test_stream', 'test_group') - with pytest.raises(ReplyError): + with pytest.raises(BusyGroupError): await redis.xgroup_create('test_stream', 'test_group') -@pytest.mark.run_loop -@pytest.redis_version(999, 999, 999, reason="Streams only available on redis " - "unstable branch") async def test_xgroup_setid(redis, server_bin): await redis.xadd('test_stream', {'a': 1}) await redis.xgroup_create('test_stream', 'test_group') await redis.xgroup_setid('test_stream', 'test_group', '$') -@pytest.mark.run_loop -@pytest.redis_version(999, 999, 999, reason="Streams only available on redis " - "unstable branch") async def test_xgroup_destroy(redis, server_bin): await redis.xadd('test_stream', {'a': 1}) await redis.xgroup_create('test_stream', 'test_group') @@ -344,16 +320,14 @@ assert not info -@pytest.mark.run_loop -@pytest.redis_version(999, 999, 999, reason="Streams only available on redis " - "unstable branch") async def test_xread_group(redis): await redis.xadd('test_stream', {'a': 1}) await redis.xgroup_create('test_stream', 'test_group', latest_id='0') + # read all pending messages messages = await redis.xread_group( 'test_group', 'test_consumer', ['test_stream'], - timeout=1000, latest_ids=[0] + timeout=1000, latest_ids=['>'] ) assert len(messages) == 1 stream, message_id, fields = messages[0] @@ -362,9 +336,22 @@ assert fields == {b'a': b'1'} -@pytest.mark.run_loop -@pytest.redis_version(999, 999, 999, reason="Streams only available on redis " - "unstable branch") +async def test_xread_group_with_no_ack(redis): + await redis.xadd('test_stream', {'a': 1}) + await redis.xgroup_create('test_stream', 'test_group', latest_id='0') + + # read all pending messages + messages = await redis.xread_group( + 'test_group', 'test_consumer', ['test_stream'], + timeout=1000, latest_ids=['>'], no_ack=True + ) + assert len(messages) == 1 + stream, message_id, fields = messages[0] + assert stream == b'test_stream' + assert message_id + assert fields == {b'a': b'1'} + + async def test_xack_and_xpending(redis): # Test a full xread -> xack cycle, using xpending to check the status message_id = await redis.xadd('test_stream', {'a': 1}) @@ -378,7 +365,7 @@ # Read the message await redis.xread_group( 'test_group', 'test_consumer', ['test_stream'], - timeout=1000, latest_ids=[0] + timeout=1000, latest_ids=['>'] ) # It is now pending @@ -398,9 +385,6 @@ assert pending_count == 0 -@pytest.mark.run_loop -@pytest.redis_version(999, 999, 999, reason="Streams only available on redis " - "unstable branch") async def test_xpending_get_messages(redis): # Like test_xack_and_xpending(), but using the start/end xpending() # params to get the messages @@ -408,7 +392,7 @@ await redis.xgroup_create('test_stream', 'test_group', latest_id='0') await redis.xread_group( 'test_group', 'test_consumer', ['test_stream'], - timeout=1000, latest_ids=[0] + timeout=1000, latest_ids=['>'] ) await asyncio.sleep(0.05) @@ -426,9 +410,6 @@ assert num_deliveries == 1 -@pytest.mark.run_loop -@pytest.redis_version(999, 999, 999, reason="Streams only available on redis " - "unstable branch") async def test_xpending_start_of_zero(redis): await redis.xadd('test_stream', {'a': 1}) await redis.xgroup_create('test_stream', 'test_group', latest_id='0') @@ -436,16 +417,13 @@ await redis.xpending('test_stream', 'test_group', 0, '+', 10) -@pytest.mark.run_loop -@pytest.redis_version(999, 999, 999, reason="Streams only available on redis " - "unstable branch") async def test_xclaim_simple(redis): # Put a message in a pending state then reclaim it is XCLAIM message_id = await redis.xadd('test_stream', {'a': 1}) await redis.xgroup_create('test_stream', 'test_group', latest_id='0') await redis.xread_group( 'test_group', 'test_consumer', ['test_stream'], - timeout=1000, latest_ids=[0] + timeout=1000, latest_ids=['>'] ) # Message is now pending @@ -469,15 +447,12 @@ assert pel == [[b'new_consumer', b'1']] -@pytest.mark.run_loop -@pytest.redis_version(999, 999, 999, reason="Streams only available on redis " - "unstable branch") async def test_xclaim_min_idle_time_includes_messages(redis): message_id = await redis.xadd('test_stream', {'a': 1}) await redis.xgroup_create('test_stream', 'test_group', latest_id='0') await redis.xread_group( 'test_group', 'test_consumer', ['test_stream'], - timeout=1000, latest_ids=[0] + timeout=1000, latest_ids=['>'] ) # Message is now pending. Wait 100ms @@ -489,15 +464,12 @@ assert result -@pytest.mark.run_loop -@pytest.redis_version(999, 999, 999, reason="Streams only available on redis " - "unstable branch") async def test_xclaim_min_idle_time_excludes_messages(redis): message_id = await redis.xadd('test_stream', {'a': 1}) await redis.xgroup_create('test_stream', 'test_group', latest_id='0') await redis.xread_group( 'test_group', 'test_consumer', ['test_stream'], - timeout=1000, latest_ids=[0] + timeout=1000, latest_ids=['>'] ) # Message is now pending. Wait no time at all @@ -508,9 +480,6 @@ assert not result -@pytest.mark.run_loop -@pytest.redis_version(999, 999, 999, reason="Streams only available on redis " - "unstable branch") async def test_xgroup_delconsumer(redis, create_redis, server): await redis.xadd('test_stream', {'a': 1}) await redis.xgroup_create('test_stream', 'test_group') @@ -531,9 +500,26 @@ assert not info -@pytest.mark.run_loop -@pytest.redis_version(999, 999, 999, reason="Streams only available on redis " - "unstable branch") +async def test_xdel_stream(redis): + message_id = await redis.xadd('test_stream', {'a': 1}) + response = await redis.xdel('test_stream', id=message_id) + assert response >= 0 + + +async def test_xtrim_stream(redis): + await redis.xadd('test_stream', {'a': 1}) + await redis.xadd('test_stream', {'b': 1}) + await redis.xadd('test_stream', {'c': 1}) + response = await redis.xtrim('test_stream', max_len=1, exact_len=False) + assert response >= 0 + + +async def test_xlen_stream(redis): + await redis.xadd('test_stream', {'a': 1}) + response = await redis.xlen('test_stream') + assert response >= 0 + + async def test_xinfo_consumers(redis): await redis.xadd('test_stream', {'a': 1}) await redis.xgroup_create('test_stream', 'test_group') @@ -551,9 +537,6 @@ assert isinstance(info[0], dict) -@pytest.mark.run_loop -@pytest.redis_version(999, 999, 999, reason="Streams only available on redis " - "unstable branch") async def test_xinfo_stream(redis): await redis.xadd('test_stream', {'a': 1}) await redis.xgroup_create('test_stream', 'test_group') @@ -575,9 +558,34 @@ assert isinstance(info, dict) -@pytest.mark.run_loop -@pytest.redis_version(999, 999, 999, reason="Streams only available on redis " - "unstable branch") async def test_xinfo_help(redis): info = await redis.xinfo_help() assert info + + +@pytest.mark.parametrize('param', [0.1, '1']) +async def test_xread_param_types(redis, param): + with pytest.raises(TypeError): + await redis.xread( + ["system_event_stream"], + timeout=param, latest_ids=[0] + ) + + +def test_parse_messages_ok(): + message = [(b'123', [b'f1', b'v1', b'f2', b'v2'])] + assert parse_messages(message) == [(b'123', {b'f1': b'v1', b'f2': b'v2'})] + + +def test_parse_messages_null_fields(): + # Redis can sometimes respond with a fields value of 'null', + # so ensure we handle that sensibly + message = [(b'123', None)] + assert parse_messages(message) == [] + + +def test_parse_messages_null_message(): + # Redis can sometimes respond with a fields value of 'null', + # so ensure we handle that sensibly + message = [None] + assert parse_messages(message) == [] diff --git a/tests/stream_test.py b/tests/stream_test.py index 39ff358..1634c90 100644 --- a/tests/stream_test.py +++ b/tests/stream_test.py @@ -17,13 +17,11 @@ return reader -@pytest.mark.run_loop async def test_feed_and_parse(reader): reader.feed_data(b'+PONG\r\n') assert (await reader.readobj()) == b'PONG' -@pytest.mark.run_loop async def test_buffer_available_after_RST(reader): reader.feed_data(b'+PONG\r\n') reader.set_exception(Exception()) @@ -46,7 +44,6 @@ 'read_method', ['read', 'readline', 'readuntil', 'readexactly'] ) -@pytest.mark.run_loop async def test_read_flavors_not_supported(reader, read_method): with pytest.raises(RuntimeError): await getattr(reader, read_method)() diff --git a/tests/string_commands_test.py b/tests/string_commands_test.py index 5e864c8..4792c88 100644 --- a/tests/string_commands_test.py +++ b/tests/string_commands_test.py @@ -2,6 +2,7 @@ import pytest from aioredis import ReplyError +from _testutils import redis_version async def add(redis, key, value): @@ -9,7 +10,6 @@ assert ok is True -@pytest.mark.run_loop async def test_append(redis): len_ = await redis.append('my-key', 'Hello') assert len_ == 5 @@ -25,7 +25,6 @@ await redis.append('none-key', None) -@pytest.mark.run_loop async def test_bitcount(redis): await add(redis, 'my-key', b'\x00\x10\x01') @@ -56,7 +55,6 @@ await redis.bitcount('my-key', 2, None) -@pytest.mark.run_loop async def test_bitop_and(redis): key1, value1 = b'key:bitop:and:1', 5 key2, value2 = b'key:bitop:and:2', 7 @@ -78,7 +76,6 @@ await redis.bitop_and(destkey, key1, None) -@pytest.mark.run_loop async def test_bitop_or(redis): key1, value1 = b'key:bitop:or:1', 5 key2, value2 = b'key:bitop:or:2', 7 @@ -100,7 +97,6 @@ await redis.bitop_or(destkey, key1, None) -@pytest.mark.run_loop async def test_bitop_xor(redis): key1, value1 = b'key:bitop:xor:1', 5 key2, value2 = b'key:bitop:xor:2', 7 @@ -122,7 +118,6 @@ await redis.bitop_xor(destkey, key1, None) -@pytest.mark.run_loop async def test_bitop_not(redis): key1, value1 = b'key:bitop:not:1', 5 await add(redis, key1, value1) @@ -139,8 +134,7 @@ await redis.bitop_not(destkey, None) -@pytest.redis_version(2, 8, 0, reason='BITPOS is available since redis>=2.8.0') -@pytest.mark.run_loop +@redis_version(2, 8, 0, reason='BITPOS is available since redis>=2.8.0') async def test_bitpos(redis): key, value = b'key:bitop', b'\xff\xf0\x00' await add(redis, key, value) @@ -173,7 +167,6 @@ test_value = await redis.bitpos(key, 7) -@pytest.mark.run_loop async def test_decr(redis): await redis.delete('key') @@ -192,7 +185,6 @@ await redis.decr(None) -@pytest.mark.run_loop async def test_decrby(redis): await redis.delete('key') @@ -215,7 +207,6 @@ await redis.decrby('key', None) -@pytest.mark.run_loop async def test_get(redis): await add(redis, 'my-key', 'value') ret = await redis.get('my-key') @@ -232,7 +223,6 @@ await redis.get(None) -@pytest.mark.run_loop async def test_getbit(redis): key, value = b'key:getbit', 10 await add(redis, key, value) @@ -260,7 +250,6 @@ await redis.getbit(key, -7) -@pytest.mark.run_loop async def test_getrange(redis): key, value = b'key:getrange', b'This is a string' await add(redis, key, value) @@ -294,7 +283,6 @@ await redis.getrange(key, 0, b'seven') -@pytest.mark.run_loop async def test_getset(redis): key, value = b'key:getset', b'hello' await add(redis, key, value) @@ -319,7 +307,6 @@ await redis.getset(None, b'asyncio') -@pytest.mark.run_loop async def test_incr(redis): await redis.delete('key') @@ -338,7 +325,6 @@ await redis.incr(None) -@pytest.mark.run_loop async def test_incrby(redis): await redis.delete('key') @@ -361,7 +347,6 @@ await redis.incrby('key', None) -@pytest.mark.run_loop async def test_incrbyfloat(redis): await redis.delete('key') @@ -388,7 +373,6 @@ await redis.incrbyfloat('key', '1.0') -@pytest.mark.run_loop async def test_mget(redis): key1, value1 = b'foo', b'bar' key2, value2 = b'baz', b'bzz' @@ -413,7 +397,6 @@ await redis.mget(key1, None) -@pytest.mark.run_loop async def test_mset(redis): key1, value1 = b'key:mset:1', b'hello' key2, value2 = b'key:mset:2', b'world' @@ -433,7 +416,19 @@ await redis.mset(key1, value1, key1) -@pytest.mark.run_loop +async def test_mset_with_dict(redis): + array = [str(n) for n in range(10)] + _dict = dict.fromkeys(array, 'default value', ) + + await redis.mset(_dict) + + test_values = await redis.mget(*_dict.keys()) + assert test_values == [str.encode(val) for val in _dict.values()] + + with pytest.raises(TypeError): + await redis.mset('param', ) + + async def test_msetnx(redis): key1, value1 = b'key:msetnx:1', b'Hello' key2, value2 = b'key:msetnx:2', b'there' @@ -454,8 +449,7 @@ await redis.msetnx(key1, value1, key2) -@pytest.mark.run_loop -async def test_psetex(redis, loop): +async def test_psetex(redis): key, value = b'key:psetex:1', b'Hello' # test expiration in milliseconds tr = redis.multi_exec() @@ -466,7 +460,7 @@ test_value = await fut2 assert test_value == value - await asyncio.sleep(0.050, loop=loop) + await asyncio.sleep(0.050) test_value = await redis.get(key) assert test_value is None @@ -476,7 +470,6 @@ await redis.psetex(key, 7.5, value) -@pytest.mark.run_loop async def test_set(redis): ok = await redis.set('my-key', 'value') assert ok is True @@ -491,8 +484,7 @@ await redis.set(None, 'value') -@pytest.mark.run_loop -async def test_set_expire(redis, loop): +async def test_set_expire(redis): key, value = b'key:set:expire', b'foo' # test expiration in milliseconds tr = redis.multi_exec() @@ -502,7 +494,7 @@ await fut1 result_1 = await fut2 assert result_1 == value - await asyncio.sleep(0.050, loop=loop) + await asyncio.sleep(0.050) result_2 = await redis.get(key) assert result_2 is None @@ -514,12 +506,11 @@ await fut1 result_3 = await fut2 assert result_3 == value - await asyncio.sleep(1.050, loop=loop) + await asyncio.sleep(1.050) result_4 = await redis.get(key) assert result_4 is None -@pytest.mark.run_loop async def test_set_only_if_not_exists(redis): key, value = b'key:set:only_if_not_exists', b'foo' await redis.set( @@ -535,7 +526,6 @@ assert result_2 == value -@pytest.mark.run_loop async def test_set_only_if_exists(redis): key, value = b'key:set:only_if_exists', b'only_if_exists:foo' # ensure that such key does not exits, and value not sets @@ -551,7 +541,6 @@ assert result_2 == b'foo' -@pytest.mark.run_loop async def test_set_wrong_input(redis): key, value = b'key:set:', b'foo' @@ -563,7 +552,6 @@ await redis.set(key, value, pexpire=7.8) -@pytest.mark.run_loop async def test_setbit(redis): key = b'key:setbit' result = await redis.setbit(key, 7, 1) @@ -581,8 +569,7 @@ await redis.setbit(key, 1, 7) -@pytest.mark.run_loop -async def test_setex(redis, loop): +async def test_setex(redis): key, value = b'key:setex:1', b'Hello' tr = redis.multi_exec() fut1 = tr.setex(key, 1, value) @@ -591,7 +578,7 @@ await fut1 test_value = await fut2 assert test_value == value - await asyncio.sleep(1.050, loop=loop) + await asyncio.sleep(1.050) test_value = await redis.get(key) assert test_value is None @@ -602,7 +589,7 @@ await fut1 test_value = await fut2 assert test_value == value - await asyncio.sleep(0.50, loop=loop) + await asyncio.sleep(0.50) test_value = await redis.get(key) assert test_value is None @@ -612,7 +599,6 @@ await redis.setex(key, b'one', value) -@pytest.mark.run_loop async def test_setnx(redis): key, value = b'key:setnx:1', b'Hello' # set fresh new value @@ -634,7 +620,6 @@ await redis.setnx(None, value) -@pytest.mark.run_loop async def test_setrange(redis): key, value = b'key:setrange', b'Hello World' await add(redis, key, value) @@ -656,7 +641,6 @@ await redis.setrange(key, -1, b'Redis') -@pytest.mark.run_loop async def test_strlen(redis): key, value = b'key:strlen', b'asyncio' await add(redis, key, value) @@ -670,7 +654,6 @@ await redis.strlen(None) -@pytest.mark.run_loop async def test_cancel_hang(redis): exists_coro = redis.execute("EXISTS", b"key:test1") exists_coro.cancel() @@ -678,10 +661,8 @@ assert not exists_check -@pytest.mark.run_loop -async def test_set_enc(create_redis, loop, server): - redis = await create_redis( - server.tcp_address, loop=loop, encoding='utf-8') +async def test_set_enc(create_redis, server): + redis = await create_redis(server.tcp_address, encoding='utf-8') TEST_KEY = 'my-key' ok = await redis.set(TEST_KEY, 'value') assert ok is True diff --git a/tests/task_cancellation_test.py b/tests/task_cancellation_test.py index 9a37652..9355d51 100644 --- a/tests/task_cancellation_test.py +++ b/tests/task_cancellation_test.py @@ -3,15 +3,13 @@ import asyncio -@pytest.mark.run_loop async def test_future_cancellation(create_connection, loop, server): - conn = await create_connection( - server.tcp_address, loop=loop) + conn = await create_connection(server.tcp_address) ts = loop.time() fut = conn.execute('BLPOP', 'some-list', 5) with pytest.raises(asyncio.TimeoutError): - await asyncio.wait_for(fut, 1, loop=loop) + await asyncio.wait_for(fut, 1) assert fut.cancelled() # NOTE: Connection becomes available only after timeout expires diff --git a/tests/transaction_commands_test.py b/tests/transaction_commands_test.py index 4582d63..1e9b4da 100644 --- a/tests/transaction_commands_test.py +++ b/tests/transaction_commands_test.py @@ -5,8 +5,7 @@ from aioredis import ConnectionClosedError -@pytest.mark.run_loop -async def test_multi_exec(redis, loop): +async def test_multi_exec(redis): await redis.delete('foo', 'bar') tr = redis.multi_exec() @@ -14,7 +13,7 @@ f2 = tr.incr('bar') res = await tr.execute() assert res == [1, 1] - res2 = await asyncio.gather(f1, f2, loop=loop) + res2 = await asyncio.gather(f1, f2) assert res == res2 tr = redis.multi_exec() @@ -29,7 +28,7 @@ f2 = tr.incrbyfloat('foo', 1.2) res = await tr.execute() assert res == [True, 2.2] - res2 = await asyncio.gather(f1, f2, loop=loop) + res2 = await asyncio.gather(f1, f2) assert res == res2 tr = redis.multi_exec() @@ -40,14 +39,12 @@ await f1 -@pytest.mark.run_loop async def test_empty(redis): tr = redis.multi_exec() res = await tr.execute() assert res == [] -@pytest.mark.run_loop async def test_double_execute(redis): tr = redis.multi_exec() await tr.execute() @@ -57,7 +54,6 @@ await tr.incr('foo') -@pytest.mark.run_loop async def test_connection_closed(redis): tr = redis.multi_exec() fut1 = tr.quit() @@ -89,7 +85,6 @@ (ConnectionClosedError, ConnectionError)) -@pytest.mark.run_loop async def test_discard(redis): await redis.delete('foo') tr = redis.multi_exec() @@ -108,7 +103,6 @@ assert res == 1 -@pytest.mark.run_loop async def test_exec_error(redis): tr = redis.multi_exec() fut = tr.connection.execute('INCRBY', 'key', '1.0') @@ -126,7 +120,6 @@ await fut -@pytest.mark.run_loop async def test_command_errors(redis): tr = redis.multi_exec() fut = tr.incrby('key', 1.0) @@ -136,7 +129,6 @@ await fut -@pytest.mark.run_loop async def test_several_command_errors(redis): tr = redis.multi_exec() fut1 = tr.incrby('key', 1.0) @@ -149,7 +141,6 @@ await fut2 -@pytest.mark.run_loop async def test_error_in_connection(redis): await redis.set('foo', 1) tr = redis.multi_exec() @@ -162,7 +153,6 @@ await fut2 -@pytest.mark.run_loop async def test_watch_unwatch(redis): res = await redis.watch('key') assert res is True @@ -180,7 +170,6 @@ assert res is True -@pytest.mark.run_loop async def test_encoding(redis): res = await redis.set('key', 'value') assert res is True @@ -201,11 +190,8 @@ assert res == {'foo': 'val1', 'bar': 'val2'} -@pytest.mark.run_loop -async def test_global_encoding(redis, create_redis, server, loop): - redis = await create_redis( - server.tcp_address, - loop=loop, encoding='utf-8') +async def test_global_encoding(redis, create_redis, server): + redis = await create_redis(server.tcp_address, encoding='utf-8') res = await redis.set('key', 'value') assert res is True res = await redis.hmset( @@ -215,20 +201,21 @@ tr = redis.multi_exec() fut1 = tr.get('key') fut2 = tr.get('key', encoding='utf-8') - fut3 = tr.hgetall('hash-key', encoding='utf-8') + fut3 = tr.get('key', encoding=None) + fut4 = tr.hgetall('hash-key', encoding='utf-8') await tr.execute() res = await fut1 assert res == 'value' res = await fut2 assert res == 'value' res = await fut3 + assert res == b'value' + res = await fut4 assert res == {'foo': 'val1', 'bar': 'val2'} -@pytest.mark.run_loop -async def test_transaction__watch_error(redis, create_redis, server, loop): - other = await create_redis( - server.tcp_address, loop=loop) +async def test_transaction__watch_error(redis, create_redis, server): + other = await create_redis(server.tcp_address) ok = await redis.set('foo', 'bar') assert ok is True @@ -250,7 +237,6 @@ await fut2 -@pytest.mark.run_loop async def test_multi_exec_and_pool_release(redis): # Test the case when pool connection is released before # `exec` result is received. @@ -271,7 +257,6 @@ assert (await fut1) is None -@pytest.mark.run_loop async def test_multi_exec_db_select(redis): await redis.set('foo', 'bar')