New Upstream Release - python-redis

Ready changes

Summary

Merged new upstream version: 4.5.1 (was: 4.3.4).

Diff

diff --git a/.github/release-drafter-config.yml b/.github/release-drafter-config.yml
index aab645f..9ccb28a 100644
--- a/.github/release-drafter-config.yml
+++ b/.github/release-drafter-config.yml
@@ -1,5 +1,5 @@
-name-template: 'Version $NEXT_PATCH_VERSION'
-tag-template: 'v$NEXT_PATCH_VERSION'
+name-template: '$NEXT_MINOR_VERSION'
+tag-template: 'v$NEXT_MINOR_VERSION'
 autolabeler:
   - label: 'maintenance'
     files:
@@ -15,9 +15,12 @@ autolabeler:
     branch:
       - '/feature-.+'
 categories:
-  - title: '🔥 Breaking Changes'
+  - title: 'Breaking Changes'
     labels:
       - 'breakingchange'
+  - title: '🧪 Experimental Features'
+    labels:
+      - 'experimental'
   - title: '🚀 New Features'
     labels:
       - 'feature'
@@ -27,13 +30,14 @@ categories:
       - 'fix'
       - 'bugfix'
       - 'bug'
+      - 'BUG'
   - title: '🧰 Maintenance'
     label: 'maintenance'
 change-template: '- $TITLE (#$NUMBER)'
 exclude-labels:
   - 'skip-changelog'
 template: |
-  ## Changes
+  # Changes
 
   $CHANGES
 
diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml
index b92c52f..e82e7e1 100644
--- a/.github/workflows/codeql-analysis.yml
+++ b/.github/workflows/codeql-analysis.yml
@@ -36,11 +36,11 @@ jobs:
 
     steps:
     - name: Checkout repository
-      uses: actions/checkout@v2
+      uses: actions/checkout@v3
 
     # Initializes the CodeQL tools for scanning.
     - name: Initialize CodeQL
-      uses: github/codeql-action/init@v1
+      uses: github/codeql-action/init@v2
       with:
         languages: ${{ matrix.language }}
         # If you wish to specify custom queries, you can do so here or in a config file.
@@ -51,7 +51,7 @@ jobs:
     # Autobuild attempts to build any compiled languages  (C/C++, C#, or Java).
     # If this step fails, then you should remove it and run the build manually (see below)
     - name: Autobuild
-      uses: github/codeql-action/autobuild@v1
+      uses: github/codeql-action/autobuild@v2
 
     # ℹ️ Command-line programs to run using the OS shell.
     # 📚 https://git.io/JvXDl
@@ -65,4 +65,4 @@ jobs:
     #   make release
 
     - name: Perform CodeQL Analysis
-      uses: github/codeql-action/analyze@v1
+      uses: github/codeql-action/analyze@v2
diff --git a/.github/workflows/integration.yaml b/.github/workflows/integration.yaml
index 59f6c7d..8d38cd4 100644
--- a/.github/workflows/integration.yaml
+++ b/.github/workflows/integration.yaml
@@ -13,16 +13,31 @@ on:
     branches:
       - master
       - '[0-9].[0-9]'
+  schedule:
+    - cron: '0 1 * * *' # nightly build
+
+permissions:
+  contents: read  #  to fetch code (actions/checkout)
 
 jobs:
 
+   dependency-audit:
+     name: Dependency audit
+     runs-on: ubuntu-latest
+     steps:
+       - uses: actions/checkout@v3
+       - uses: pypa/gh-action-pip-audit@v1.0.0
+         with:
+           inputs: requirements.txt dev_requirements.txt
+           ignore-vulns: |
+             GHSA-w596-4wvx-j9j6  # subversion related git pull, dependency for pytest. There is no impact here.
+
    lint:
      name: Code linters
      runs-on: ubuntu-latest
      steps:
-       - uses: actions/checkout@v2
-       - name: install python
-         uses: actions/setup-python@v3
+       - uses: actions/checkout@v3
+       - uses: actions/setup-python@v4
          with:
            python-version: 3.9
            cache: 'pip'
@@ -37,16 +52,15 @@ jobs:
      strategy:
        max-parallel: 15
        matrix:
-         python-version: ['3.6', '3.7', '3.8', '3.9', '3.10', 'pypy-3.7']
+         python-version: ['3.7', '3.8', '3.9', '3.10', '3.11', 'pypy-3.7', 'pypy-3.8']
          test-type: ['standalone', 'cluster']
          connection-type: ['hiredis', 'plain']
      env:
        ACTIONS_ALLOW_UNSECURE_COMMANDS: true
      name: Python ${{ matrix.python-version }} ${{matrix.test-type}}-${{matrix.connection-type}} tests
      steps:
-       - uses: actions/checkout@v2
-       - name: install python
-         uses: actions/setup-python@v3
+       - uses: actions/checkout@v3
+       - uses: actions/setup-python@v4
          with:
            python-version: ${{ matrix.python-version }}
            cache: 'pip'
@@ -55,11 +69,25 @@ jobs:
            pip install -U setuptools wheel
            pip install -r dev_requirements.txt
            tox -e ${{matrix.test-type}}-${{matrix.connection-type}}
+       - uses: actions/upload-artifact@v2
+         if: success() || failure()
+         with:
+           name: pytest-results-${{matrix.test-type}}
+           path: '${{matrix.test-type}}*results.xml'
        - name: Upload codecov coverage
-         uses: codecov/codecov-action@v2
+         uses: codecov/codecov-action@v3
          with:
            fail_ci_if_error: false
-           token: ${{ secrets.CODECOV_TOKEN }}
+      #  - name: View Test Results
+      #    uses: dorny/test-reporter@v1
+      #    if: success() || failure()
+      #    with:
+      #      name: Test Results ${{matrix.python-version}} ${{matrix.test-type}}-${{matrix.connection-type}}
+      #      path: '${{matrix.test-type}}*results.xml'
+      #      reporter: java-junit
+      #      list-suites: failed
+      #      list-tests: failed
+      #      max-annotations: 10
 
    build_and_test_package:
     name: Validate building and installing the package
@@ -68,9 +96,8 @@ jobs:
       matrix:
         extension: ['tar.gz', 'whl']
     steps:
-      - uses: actions/checkout@v2
-      - name: install python
-        uses: actions/setup-python@v3
+      - uses: actions/checkout@v3
+      - uses: actions/setup-python@v4
         with:
           python-version: 3.9
       - name: Run installed unit tests
@@ -82,11 +109,10 @@ jobs:
     runs-on: ubuntu-latest
     strategy:
       matrix:
-        python-version: ['3.6', '3.7', '3.8', '3.9', '3.10', 'pypy-3.7']
+        python-version: ['3.7', '3.8', '3.9', '3.10', '3.11', 'pypy-3.7']
     steps:
-      - uses: actions/checkout@v2
-      - name: install python ${{ matrix.python-version }}
-        uses: actions/setup-python@v3
+      - uses: actions/checkout@v3
+      - uses: actions/setup-python@v4
         with:
           python-version: ${{ matrix.python-version }}
           cache: 'pip'
diff --git a/.github/workflows/pypi-publish.yaml b/.github/workflows/pypi-publish.yaml
index 0d100dd..50332c1 100644
--- a/.github/workflows/pypi-publish.yaml
+++ b/.github/workflows/pypi-publish.yaml
@@ -4,14 +4,17 @@ on:
   release:
     types: [published]
 
+permissions:
+  contents: read  #  to fetch code (actions/checkout)
+
 jobs:
 
   build_and_package:
     runs-on: ubuntu-latest
     steps:
-      - uses: actions/checkout@v2
+      - uses: actions/checkout@v3
       - name: install python
-        uses: actions/setup-python@v3
+        uses: actions/setup-python@v4
         with:
           python-version: 3.9
       - name: Install dev tools
diff --git a/.github/workflows/release-drafter.yml b/.github/workflows/release-drafter.yml
index ec2d88b..eebb3e6 100644
--- a/.github/workflows/release-drafter.yml
+++ b/.github/workflows/release-drafter.yml
@@ -6,8 +6,13 @@ on:
     branches:
       - master
 
+permissions: {}
 jobs:
   update_release_draft:
+    permissions:
+      pull-requests: write  #  to add label to PR (release-drafter/release-drafter)
+      contents: write  #  to create a github release (release-drafter/release-drafter)
+
     runs-on: ubuntu-latest
     steps:
       # Drafts your next Release notes as Pull Requests are merged into "master"
diff --git a/.github/workflows/stale-issues.yml b/.github/workflows/stale-issues.yml
index 562cd58..32fd9e8 100644
--- a/.github/workflows/stale-issues.yml
+++ b/.github/workflows/stale-issues.yml
@@ -3,8 +3,13 @@ on:
   schedule:
   - cron: "0 0 * * *"
 
+permissions: {}
 jobs:
   stale:
+    permissions:
+      issues: write  #  to close stale issues (actions/stale)
+      pull-requests: write  #  to close stale PRs (actions/stale)
+
     runs-on: ubuntu-latest
     steps:
     - uses: actions/stale@v3
diff --git a/.readthedocs.yml b/.readthedocs.yml
index 80b9738..800cb14 100644
--- a/.readthedocs.yml
+++ b/.readthedocs.yml
@@ -3,6 +3,7 @@ version: 2
 python:
   install:
     - requirements: ./docs/requirements.txt
+    - requirements: requirements.txt
 
 build:
   os: ubuntu-20.04
diff --git a/CHANGES b/CHANGES
index 0af421b..e83660d 100644
--- a/CHANGES
+++ b/CHANGES
@@ -1,4 +1,18 @@
-
+    * Add test and fix async HiredisParser when reading during a disconnect() (#2349)
+    * Use hiredis-py pack_command if available.
+    * Support `.unlink()` in ClusterPipeline
+    * Simplify synchronous SocketBuffer state management
+    * Fix string cleanse in Redis Graph
+    * Make PythonParser resumable in case of error (#2510)
+    * Add `timeout=None` in `SentinelConnectionManager.read_response`
+    * Documentation fix: password protected socket connection (#2374)
+    * Allow `timeout=None` in `PubSub.get_message()` to wait forever
+    * add `nowait` flag to `asyncio.Connection.disconnect()`
+    * Update README.md links
+    * Fix timezone handling for datetime to unixtime conversions
+    * Fix start_id type for XAUTOCLAIM
+    * Remove verbose logging from cluster.py
+    * Add retry mechanism to async version of Connection
     * Compare commands case-insensitively in the asyncio command parser
     * Allow negative `retries` for `Retry` class to retry forever
     * Add `items` parameter to `hset` signature
@@ -14,6 +28,17 @@
     * Added dynaminc_startup_nodes configuration to RedisCluster
     * Fix reusing the old nodes' connections when cluster topology refresh is being done
     * Fix RedisCluster to immediately raise AuthenticationError without a retry
+    * ClusterPipeline Doesn't Handle ConnectionError for Dead Hosts (#2225)
+    * Remove compatibility code for old versions of Hiredis, drop Packaging dependency
+    * The `deprecated` library is no longer a dependency
+    * Failover handling improvements for RedisCluster and Async RedisCluster (#2377)
+    * Fixed "cannot pickle '_thread.lock' object" bug (#2354, #2297)
+    * Added CredentialsProvider class to support password rotation
+    * Enable Lock for asyncio cluster mode
+    * Fix Sentinel.execute_command doesn't execute across the entire sentinel cluster bug (#2458)
+    * Added a replacement for the default cluster node in the event of failure (#2463)
+    * Fix for Unhandled exception related to self.host with unix socket (#2496)
+
 * 4.1.3 (Feb 8, 2022)
     * Fix flushdb and flushall (#1926)
     * Add redis5 and redis4 dockers (#1871)
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 827a25f..e31ec34 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -126,7 +126,7 @@ this, instead of using make test, you need to pass
 Our test suite uses `pytest`. You can run a specific test suite against
 a specific Python version like this:
 
-`$ docker-compose run test tox -e py36 -- --redis-url=redis://master:6379/9 tests/test_commands.py`
+`$ docker-compose run test tox -e py37 -- --redis-url=redis://master:6379/9 tests/test_commands.py`
 
 ### Troubleshooting
 
diff --git a/LICENSE b/LICENSE
index 29a3fe3..00aee10 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,22 +1,21 @@
-Copyright (c) 2012 Andy McCurdy
+MIT License
 
- Permission is hereby granted, free of charge, to any person
- obtaining a copy of this software and associated documentation
- files (the "Software"), to deal in the Software without
- restriction, including without limitation the rights to use,
- copy, modify, merge, publish, distribute, sublicense, and/or sell
- copies of the Software, and to permit persons to whom the
- Software is furnished to do so, subject to the following
- conditions:
+Copyright (c) 2022, Redis, inc.
 
- The above copyright notice and this permission notice shall be
- included in all copies or substantial portions of the Software.
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
 
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- OTHER DEALINGS IN THE SOFTWARE.
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/README.md b/README.md
index 5a4bb71..c02483f 100644
--- a/README.md
+++ b/README.md
@@ -6,33 +6,26 @@ The Python interface to the Redis key-value store.
 [![docs](https://readthedocs.org/projects/redis/badge/?version=stable&style=flat)](https://redis-py.readthedocs.io/en/stable/)
 [![MIT licensed](https://img.shields.io/badge/license-MIT-blue.svg)](./LICENSE)
 [![pypi](https://badge.fury.io/py/redis.svg)](https://pypi.org/project/redis/)
+[![pre-release](https://img.shields.io/github/v/release/redis/redis-py?include_prereleases&label=latest-prerelease)](https://github.com/redis/redis-py/releases)
 [![codecov](https://codecov.io/gh/redis/redis-py/branch/master/graph/badge.svg?token=yenl5fzxxr)](https://codecov.io/gh/redis/redis-py)
-[![Total alerts](https://img.shields.io/lgtm/alerts/g/redis/redis-py.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/redis/redis-py/alerts/)
 
-[Installation](#installation) | [Contributing](#contributing) |  [Getting Started](#getting-started) | [Connecting To Redis](#connecting-to-redis)
+[Installation](#installation) |  [Usage](#usage) | [Advanced Topics](#advanced-topics) | [Contributing](https://github.com/redis/redis-py/blob/master/CONTRIBUTING.md)
 
 ---------------------------------------------
 
 ## Python Notice
 
-redis-py 4.2.x will be the last generation of redis-py to support python 3.6 as it has been [End of Life'd](https://www.python.org/dev/peps/pep-0494/#schedule-last-security-only-release).  Async support was introduced in redis-py 4.2.x thanks to [aioredis](https://github.com/aio-libs/aioredis-py), which necessitates this change. We will continue to maintain 3.6 support as long as possible - but the plan is for redis-py version 5+ to officially remove 3.6.
+redis-py 4.3.x will be the last generation of redis-py to support python 3.6 as it has been [End of Life'd](https://www.python.org/dev/peps/pep-0494/#schedule-last-security-only-release).  Async support was introduced in redis-py 4.2.x thanks to [aioredis](https://github.com/aio-libs/aioredis-py), which necessitates this change. We will continue to maintain 3.6 support as long as possible - but the plan is for redis-py version 4.4+ to officially remove 3.6.
 
 ---------------------------
 
 ## Installation
 
-redis-py requires a running Redis server. See [Redis's
-quickstart](https://redis.io/topics/quickstart) for installation
-instructions.
+Start a redis via docker:
 
-redis-py can be installed using pip similar to other
-Python packages. Do not use sudo with pip.
-It is usually good to work in a
-[virtualenv](https://virtualenv.pypa.io/en/latest/) or
-[venv](https://docs.python.org/3/library/venv.html) to avoid conflicts
-with other package managers and Python projects. For a quick
-introduction see [Python Virtual Environments in Five
-Minutes](https://bit.ly/py-env).
+``` bash
+docker run -p 6379:6379 -it redis/redis-stack:latest
+```
 
 To install redis-py, simply:
 
@@ -40,25 +33,20 @@ To install redis-py, simply:
 $ pip install redis
 ```
 
-or from source:
+For faster performance, install redis with hiredis support, this provides a compiled response parser, and *for most cases* requires zero code changes.
+By default, if hiredis >= 1.0 is available, redis-py will attempt to use it for response parsing.
 
 ``` bash
-$ python setup.py install
+$ pip install redis[hiredis]
 ```
 
-View the current documentation [here](https://readthedocs.org/projects/redis/).
-
-## Contributing
+Looking for a high-level library to handle object mapping? See [redis-om-python](https://github.com/redis/redis-om-python)!
 
-Want to contribute a feature, bug fix, or report an issue? Check out
-our [guide to
-contributing](https://github.com/redis/redis-py/blob/master/CONTRIBUTING.md).
+## Usage
 
-## Getting Started
+### Basic Example
 
-redis-py supports Python 3.7+.
-
-``` pycon
+``` python
 >>> import redis
 >>> r = redis.Redis(host='localhost', port=6379, db=0)
 >>> r.set('foo', 'bar')
@@ -67,119 +55,34 @@ True
 b'bar'
 ```
 
-By default, all responses are returned as bytes in Python
-3.
-
-If **all** string responses from a client should be decoded, the user
-can specify *decode_responses=True* in
-```Redis.__init__```. In this case, any Redis command that
-returns a string type will be decoded with the encoding
-specified.
-
-The default encoding is utf-8, but this can be customized by specifying the
-encoding argument for the redis.Redis class.
-The encoding will be used to automatically encode any
-strings passed to commands, such as key names and values.
-
-
---------------------
-
-### MSET, MSETNX and ZADD
-
-These commands all accept a mapping of key/value pairs. In redis-py 2.X
-this mapping could be specified as **args* or as `**kwargs`. Both of
-these styles caused issues when Redis introduced optional flags to ZADD.
-Relying on `*args` caused issues with the optional argument order,
-especially in Python 2.7. Relying on `**kwargs` caused potential
-collision issues of user keys with the argument names in the method
-signature.
-
-To resolve this, redis-py 3.0 has changed these three commands to all
-accept a single positional argument named mapping that is expected to be
-a dict. For MSET and MSETNX, the dict is a mapping of key-names -\>
-values. For ZADD, the dict is a mapping of element-names -\> score.
-
-MSET, MSETNX and ZADD now look like:
-
-``` pycon
-def mset(self, mapping):
-def msetnx(self, mapping):
-def zadd(self, name, mapping, nx=False, xx=False, ch=False, incr=False):
-```
-
-All 2.X users that use these commands must modify their code to supply
-keys and values as a dict to these commands.
+The above code connects to localhost on port 6379, sets a value in Redis, and retrieves it. All responses are returned as bytes in Python, to receive decoded strings, set *decode_responses=True*.  For this, and more connection options, see [these examples](https://redis.readthedocs.io/en/stable/examples.html).
 
-### ZINCRBY
+### Connection Pools
 
-redis-py 2.X accidentally modified the argument order of ZINCRBY,
-swapping the order of value and amount. ZINCRBY now looks like:
+By default, redis-py uses a connection pool to manage connections. Each instance of a Redis class receives its own connection pool. You can however define your own [redis.ConnectionPool](https://redis.readthedocs.io/en/stable/connections.html#connection-pools).
 
 ``` python
-def zincrby(self, name, amount, value):
+>>> pool = redis.ConnectionPool(host='localhost', port=6379, db=0)
+>>> r = redis.Redis(connection_pool=pool)
 ```
 
-All 2.X users that rely on ZINCRBY must swap the order of amount and
-value for the command to continue to work as intended.
-
-### Encoding of User Input
-
-redis-py 3.0 only accepts user data as bytes, strings or numbers (ints,
-longs and floats). Attempting to specify a key or a value as any other
-type will raise a DataError exception.
-
-redis-py 2.X attempted to coerce any type of input into a string. While
-occasionally convenient, this caused all sorts of hidden errors when
-users passed boolean values (which were coerced to \'True\' or
-\'False\'), a None value (which was coerced to \'None\') or other
-values, such as user defined types.
-
-All 2.X users should make sure that the keys and values they pass into
-redis-py are either bytes, strings or numbers.
-
-### Locks
-
-redis-py 3.0 drops support for the pipeline-based Lock and now only
-supports the Lua-based lock. In doing so, LuaLock has been renamed to
-Lock. This also means that redis-py Lock objects require Redis server
-2.6 or greater.
-
-2.X users that were explicitly referring to *LuaLock* will have to now
-refer to *Lock* instead.
-
-### Locks as Context Managers
+Alternatively, you might want to look at [Async connections](https://redis.readthedocs.io/en/stable/examples/asyncio_examples.html), or [Cluster connections](https://redis.readthedocs.io/en/stable/connections.html#cluster-client), or even [Async Cluster connections](https://redis.readthedocs.io/en/stable/connections.html#async-cluster-client).
 
-redis-py 3.0 now raises a LockError when using a lock as a context
-manager and the lock cannot be acquired within the specified timeout.
-This is more of a bug fix than a backwards incompatible change. However,
-given an error is now raised where none was before, this might alarm
-some users.
+### Redis Commands
 
-2.X users should make sure they're wrapping their lock code in a
-try/catch like this:
+There is built-in support for all of the [out-of-the-box Redis commands](https://redis.io/commands). They are exposed using the raw Redis command names (`HSET`, `HGETALL`, etc.) except where a word (i.e. del) is reserved by the language. The complete set of commands can be found [here](https://github.com/redis/redis-py/tree/master/redis/commands), or [the documentation](https://redis.readthedocs.io/en/stable/commands.html).
 
-``` python
-try:
-    with r.lock('my-lock-key', blocking_timeout=5) as lock:
-        # code you want executed only after the lock has been acquired
-except LockError:
-    # the lock wasn't acquired
-```
-
-## API Reference
+## Advanced Topics
 
 The [official Redis command documentation](https://redis.io/commands)
 does a great job of explaining each command in detail. redis-py attempts
 to adhere to the official command syntax. There are a few exceptions:
 
--   **SELECT**: Not implemented. See the explanation in the Thread
-    Safety section below.
--   **DEL**: *del* is a reserved keyword in the Python syntax.
-    Therefore redis-py uses *delete* instead.
 -   **MULTI/EXEC**: These are implemented as part of the Pipeline class.
     The pipeline is wrapped with the MULTI and EXEC statements by
     default when it is executed, which can be disabled by specifying
     transaction=False. See more about Pipelines below.
+
 -   **SUBSCRIBE/LISTEN**: Similar to pipelines, PubSub is implemented as
     a separate class as it places the underlying connection in a state
     where it can\'t execute non-pubsub commands. Calling the pubsub
@@ -188,1112 +91,37 @@ to adhere to the official command syntax. There are a few exceptions:
     PUBLISH from the Redis client (see [this comment on issue
     #151](https://github.com/redis/redis-py/issues/151#issuecomment-1545015)
     for details).
--   **SCAN/SSCAN/HSCAN/ZSCAN**: The *SCAN commands are implemented as
-    they exist in the Redis documentation. In addition, each command has
-    an equivalent iterator method. These are purely for convenience so
-    the user doesn't have to keep track of the cursor while iterating.
-    Use the scan_iter/sscan_iter/hscan_iter/zscan_iter methods for this
-    behavior.
-
-## Connecting to Redis
-
-### Client Classes: Redis and StrictRedis
-
-redis-py 3.0 drops support for the legacy *Redis* client class.
-*StrictRedis* has been renamed to *Redis* and an alias named
-*StrictRedis* is provided so that users previously using
-*StrictRedis* can continue to run unchanged.
-
-The 2.X *Redis* class provided alternative implementations of a few
-commands. This confused users (rightfully so) and caused a number of
-support issues. To make things easier going forward, it was decided to
-drop support for these alternate implementations and instead focus on a
-single client class.
-
-2.X users that are already using StrictRedis don\'t have to change the
-class name. StrictRedis will continue to work for the foreseeable
-future.
 
-2.X users that are using the Redis class will have to make changes if
-they use any of the following commands:
-
--   SETEX: The argument order has changed. The new order is (name, time,
-    value).
--   LREM: The argument order has changed. The new order is (name, num,
-    value).
--   TTL and PTTL: The return value is now always an int and matches the
-    official Redis command (>0 indicates the timeout, -1 indicates that
-    the key exists but that it has no expire time set, -2 indicates that
-    the key does not exist)
-
-
-### Connection Pools
-
-Behind the scenes, redis-py uses a connection pool to manage connections
-to a Redis server. By default, each Redis instance you create will in
-turn create its own connection pool. You can override this behavior and
-use an existing connection pool by passing an already created connection
-pool instance to the connection_pool argument of the Redis class. You
-may choose to do this in order to implement client side sharding or have
-fine-grain control of how connections are managed.
-
-``` pycon
->>> pool = redis.ConnectionPool(host='localhost', port=6379, db=0)
->>> r = redis.Redis(connection_pool=pool)
-```
-
-### Connections
-
-ConnectionPools manage a set of Connection instances. redis-py ships
-with two types of Connections. The default, Connection, is a normal TCP
-socket based connection. The UnixDomainSocketConnection allows for
-clients running on the same device as the server to connect via a unix
-domain socket. To use a UnixDomainSocketConnection connection, simply
-pass the unix_socket_path argument, which is a string to the unix domain
-socket file. Additionally, make sure the unixsocket parameter is defined
-in your redis.conf file. It\'s commented out by default.
-
-``` pycon
->>> r = redis.Redis(unix_socket_path='/tmp/redis.sock')
-```
-
-You can create your own Connection subclasses as well. This may be
-useful if you want to control the socket behavior within an async
-framework. To instantiate a client class using your own connection, you
-need to create a connection pool, passing your class to the
-connection_class argument. Other keyword parameters you pass to the pool
-will be passed to the class specified during initialization.
-
-``` pycon
->>> pool = redis.ConnectionPool(connection_class=YourConnectionClass,
-                                your_arg='...', ...)
-```
-
-Connections maintain an open socket to the Redis server. Sometimes these
-sockets are interrupted or disconnected for a variety of reasons. For
-example, network appliances, load balancers and other services that sit
-between clients and servers are often configured to kill connections
-that remain idle for a given threshold.
-
-When a connection becomes disconnected, the next command issued on that
-connection will fail and redis-py will raise a ConnectionError to the
-caller. This allows each application that uses redis-py to handle errors
-in a way that\'s fitting for that specific application. However,
-constant error handling can be verbose and cumbersome, especially when
-socket disconnections happen frequently in many production environments.
-
-To combat this, redis-py can issue regular health checks to assess the
-liveliness of a connection just before issuing a command. Users can pass
-`health_check_interval=N` to the Redis or ConnectionPool classes or as a
-query argument within a Redis URL. The value of `health_check_interval`
-must be an integer. A value of `0`, the default, disables health checks.
-Any positive integer will enable health checks. Health checks are
-performed just before a command is executed if the underlying connection
-has been idle for more than `health_check_interval` seconds. For
-example, `health_check_interval=30` will ensure that a health check is
-run on any connection that has been idle for 30 or more seconds just
-before a command is executed on that connection.
-
-If your application is running in an environment that disconnects idle
-connections after 30 seconds you should set the `health_check_interval`
-option to a value less than 30.
-
-This option also works on any PubSub connection that is created from a
-client with `health_check_interval` enabled. PubSub users need to ensure
-that *get_message()* or `listen()` are called more frequently than
-`health_check_interval` seconds. It is assumed that most workloads
-already do this.
-
-If your PubSub use case doesn\'t call `get_message()` or `listen()`
-frequently, you should call `pubsub.check_health()` explicitly on a
-regularly basis.
-
-### SSL Connections
-
-redis-py 3.0 changes the default value of the
-ssl_cert_reqs option from None to
-\'required\'. See [Issue
-1016](https://github.com/redis/redis-py/issues/1016). This change
-enforces hostname validation when accepting a cert from a remote SSL
-terminator. If the terminator doesn\'t properly set the hostname on the
-cert this will cause redis-py 3.0 to raise a ConnectionError.
-
-This check can be disabled by setting ssl_cert_reqs to
-None. Note that doing so removes the security check. Do so
-at your own risk.
-
-Example with hostname verification using a local certificate bundle
-(linux):
-
-``` pycon
->>> import redis
->>> r = redis.Redis(host='xxxxxx.cache.amazonaws.com', port=6379, db=0,
-                    ssl=True,
-                    ssl_ca_certs='/etc/ssl/certs/ca-certificates.crt')
->>> r.set('foo', 'bar')
-True
->>> r.get('foo')
-b'bar'
-```
-
-Example with hostname verification using
-[certifi](https://pypi.org/project/certifi/):
-
-``` pycon
->>> import redis, certifi
->>> r = redis.Redis(host='xxxxxx.cache.amazonaws.com', port=6379, db=0,
-                    ssl=True, ssl_ca_certs=certifi.where())
->>> r.set('foo', 'bar')
-True
->>> r.get('foo')
-b'bar'
-```
-
-Example turning off hostname verification (not recommended):
-
-``` pycon
->>> import redis
->>> r = redis.Redis(host='xxxxxx.cache.amazonaws.com', port=6379, db=0,
-                    ssl=True, ssl_cert_reqs=None)
->>> r.set('foo', 'bar')
-True
->>> r.get('foo')
-b'bar'
-```
-
-### Sentinel support
-
-redis-py can be used together with [Redis
-Sentinel](https://redis.io/topics/sentinel) to discover Redis nodes. You
-need to have at least one Sentinel daemon running in order to use
-redis-py's Sentinel support.
-
-Connecting redis-py to the Sentinel instance(s) is easy. You can use a
-Sentinel connection to discover the master and slaves network addresses:
-
-``` pycon
->>> from redis import Sentinel
->>> sentinel = Sentinel([('localhost', 26379)], socket_timeout=0.1)
->>> sentinel.discover_master('mymaster')
-('127.0.0.1', 6379)
->>> sentinel.discover_slaves('mymaster')
-[('127.0.0.1', 6380)]
-```
-
-To connect to a sentinel which uses SSL ([see SSL
-connections](#ssl-connections) for more examples of SSL configurations):
-
-``` pycon
->>> from redis import Sentinel
->>> sentinel = Sentinel([('localhost', 26379)],
-                        ssl=True,
-                        ssl_ca_certs='/etc/ssl/certs/ca-certificates.crt')
->>> sentinel.discover_master('mymaster')
-('127.0.0.1', 6379)
-```
-
-You can also create Redis client connections from a Sentinel instance.
-You can connect to either the master (for write operations) or a slave
-(for read-only operations).
-
-``` pycon
->>> master = sentinel.master_for('mymaster', socket_timeout=0.1)
->>> slave = sentinel.slave_for('mymaster', socket_timeout=0.1)
->>> master.set('foo', 'bar')
->>> slave.get('foo')
-b'bar'
-```
-
-The master and slave objects are normal Redis instances with their
-connection pool bound to the Sentinel instance. When a Sentinel backed
-client attempts to establish a connection, it first queries the Sentinel
-servers to determine an appropriate host to connect to. If no server is
-found, a MasterNotFoundError or SlaveNotFoundError is raised. Both
-exceptions are subclasses of ConnectionError.
-
-When trying to connect to a slave client, the Sentinel connection pool
-will iterate over the list of slaves until it finds one that can be
-connected to. If no slaves can be connected to, a connection will be
-established with the master.
-
-See [Guidelines for Redis clients with support for Redis
-Sentinel](https://redis.io/topics/sentinel-clients) to learn more about
-Redis Sentinel.
-
---------------------------
-
-### Parsers
-
-Parser classes provide a way to control how responses from the Redis
-server are parsed. redis-py ships with two parser classes, the
-PythonParser and the HiredisParser. By default, redis-py will attempt to
-use the HiredisParser if you have the hiredis module installed and will
-fallback to the PythonParser otherwise.
-
-Hiredis is a C library maintained by the core Redis team. Pieter
-Noordhuis was kind enough to create Python bindings. Using Hiredis can
-provide up to a 10x speed improvement in parsing responses from the
-Redis server. The performance increase is most noticeable when
-retrieving many pieces of data, such as from LRANGE or SMEMBERS
-operations.
-
-Hiredis is available on PyPI, and can be installed via pip just like
-redis-py.
-
-``` bash
-$ pip install hiredis
-```
-
-### Response Callbacks
-
-The client class uses a set of callbacks to cast Redis responses to the
-appropriate Python type. There are a number of these callbacks defined
-on the Redis client class in a dictionary called RESPONSE_CALLBACKS.
-
-Custom callbacks can be added on a per-instance basis using the
-set_response_callback method. This method accepts two arguments: a
-command name and the callback. Callbacks added in this manner are only
-valid on the instance the callback is added to. If you want to define or
-override a callback globally, you should make a subclass of the Redis
-client and add your callback to its RESPONSE_CALLBACKS class dictionary.
-
-Response callbacks take at least one parameter: the response from the
-Redis server. Keyword arguments may also be accepted in order to further
-control how to interpret the response. These keyword arguments are
-specified during the command\'s call to execute_command. The ZRANGE
-implementation demonstrates the use of response callback keyword
-arguments with its \"withscores\" argument.
-
-### Thread Safety
-
-Redis client instances can safely be shared between threads. Internally,
-connection instances are only retrieved from the connection pool during
-command execution, and returned to the pool directly after. Command
-execution never modifies state on the client instance.
-
-However, there is one caveat: the Redis SELECT command. The SELECT
-command allows you to switch the database currently in use by the
-connection. That database remains selected until another is selected or
-until the connection is closed. This creates an issue in that
-connections could be returned to the pool that are connected to a
-different database.
-
-As a result, redis-py does not implement the SELECT command on client
-instances. If you use multiple Redis databases within the same
-application, you should create a separate client instance (and possibly
-a separate connection pool) for each database.
-
-It is not safe to pass PubSub or Pipeline objects between threads.
+For more details, please see the documentation on [advanced topics page](https://redis.readthedocs.io/en/stable/advanced_features.html).
 
 ### Pipelines
 
-Pipelines are a subclass of the base Redis class that provide support
-for buffering multiple commands to the server in a single request. They
-can be used to dramatically increase the performance of groups of
-commands by reducing the number of back-and-forth TCP packets between
-the client and server.
+The following is a basic example of a [Redis pipeline](https://redis.io/docs/manual/pipelining/), a method to optimize round-trip calls, by batching Redis commands, and receiving their results as a list.
 
-Pipelines are quite simple to use:
 
-``` pycon
->>> r = redis.Redis(...)
->>> r.set('bing', 'baz')
->>> # Use the pipeline() method to create a pipeline instance
+``` python
 >>> pipe = r.pipeline()
->>> # The following SET commands are buffered
->>> pipe.set('foo', 'bar')
->>> pipe.get('bing')
->>> # the EXECUTE call sends all buffered commands to the server, returning
->>> # a list of responses, one for each command.
+>>> pipe.set('foo', 5)
+>>> pipe.set('bar', 18.5)
+>>> pipe.set('blee', "hello world!")
 >>> pipe.execute()
-[True, b'baz']
-```
-
-For ease of use, all commands being buffered into the pipeline return
-the pipeline object itself. Therefore calls can be chained like:
-
-``` pycon
->>> pipe.set('foo', 'bar').sadd('faz', 'baz').incr('auto_number').execute()
-[True, True, 6]
-```
-
-In addition, pipelines can also ensure the buffered commands are
-executed atomically as a group. This happens by default. If you want to
-disable the atomic nature of a pipeline but still want to buffer
-commands, you can turn off transactions.
-
-``` pycon
->>> pipe = r.pipeline(transaction=False)
+[True, True, True]
 ```
 
-A common issue occurs when requiring atomic transactions but needing to
-retrieve values in Redis prior for use within the transaction. For
-instance, let\'s assume that the INCR command didn\'t exist and we need
-to build an atomic version of INCR in Python.
+### PubSub
 
-The completely naive implementation could GET the value, increment it in
-Python, and SET the new value back. However, this is not atomic because
-multiple clients could be doing this at the same time, each getting the
-same value from GET.
+The following example shows how to utilize [Redis Pub/Sub](https://redis.io/docs/manual/pubsub/) to subscribe to specific channels.
 
-Enter the WATCH command. WATCH provides the ability to monitor one or
-more keys prior to starting a transaction. If any of those keys change
-prior the execution of that transaction, the entire transaction will be
-canceled and a WatchError will be raised. To implement our own
-client-side INCR command, we could do something like this:
-
-``` pycon
->>> with r.pipeline() as pipe:
-...     while True:
-...         try:
-...             # put a WATCH on the key that holds our sequence value
-...             pipe.watch('OUR-SEQUENCE-KEY')
-...             # after WATCHing, the pipeline is put into immediate execution
-...             # mode until we tell it to start buffering commands again.
-...             # this allows us to get the current value of our sequence
-...             current_value = pipe.get('OUR-SEQUENCE-KEY')
-...             next_value = int(current_value) + 1
-...             # now we can put the pipeline back into buffered mode with MULTI
-...             pipe.multi()
-...             pipe.set('OUR-SEQUENCE-KEY', next_value)
-...             # and finally, execute the pipeline (the set command)
-...             pipe.execute()
-...             # if a WatchError wasn't raised during execution, everything
-...             # we just did happened atomically.
-...             break
-...        except WatchError:
-...             # another client must have changed 'OUR-SEQUENCE-KEY' between
-...             # the time we started WATCHing it and the pipeline's execution.
-...             # our best bet is to just retry.
-...             continue
-```
-
-Note that, because the Pipeline must bind to a single connection for the
-duration of a WATCH, care must be taken to ensure that the connection is
-returned to the connection pool by calling the reset() method. If the
-Pipeline is used as a context manager (as in the example above) reset()
-will be called automatically. Of course you can do this the manual way
-by explicitly calling reset():
-
-``` pycon
->>> pipe = r.pipeline()
->>> while True:
-...     try:
-...         pipe.watch('OUR-SEQUENCE-KEY')
-...         ...
-...         pipe.execute()
-...         break
-...     except WatchError:
-...         continue
-...     finally:
-...         pipe.reset()
-```
-
-A convenience method named \"transaction\" exists for handling all the
-boilerplate of handling and retrying watch errors. It takes a callable
-that should expect a single parameter, a pipeline object, and any number
-of keys to be WATCHed. Our client-side INCR command above can be written
-like this, which is much easier to read:
-
-``` pycon
->>> def client_side_incr(pipe):
-...     current_value = pipe.get('OUR-SEQUENCE-KEY')
-...     next_value = int(current_value) + 1
-...     pipe.multi()
-...     pipe.set('OUR-SEQUENCE-KEY', next_value)
->>>
->>> r.transaction(client_side_incr, 'OUR-SEQUENCE-KEY')
-[True]
-```
-
-Be sure to call pipe.multi() in the callable passed to
-Redis.transaction prior to any write commands.
-
-### Publish / Subscribe
-
-redis-py includes a PubSub object that subscribes to
-channels and listens for new messages. Creating a PubSub
-object is easy.
-
-``` pycon
+``` python
 >>> r = redis.Redis(...)
 >>> p = r.pubsub()
-```
-
-Once a PubSub instance is created, channels and patterns
-can be subscribed to.
-
-``` pycon
 >>> p.subscribe('my-first-channel', 'my-second-channel', ...)
->>> p.psubscribe('my-*', ...)
-```
-
-The PubSub instance is now subscribed to those
-channels/patterns. The subscription confirmations can be seen by reading
-messages from the PubSub instance.
-
-``` pycon
 >>> p.get_message()
 {'pattern': None, 'type': 'subscribe', 'channel': b'my-second-channel', 'data': 1}
->>> p.get_message()
-{'pattern': None, 'type': 'subscribe', 'channel': b'my-first-channel', 'data': 2}
->>> p.get_message()
-{'pattern': None, 'type': 'psubscribe', 'channel': b'my-*', 'data': 3}
-```
-
-Every message read from a PubSub instance will be a
-dictionary with the following keys.
-
--   **type**: One of the following: \'subscribe\', \'unsubscribe\',
-    \'psubscribe\', \'punsubscribe\', \'message\', \'pmessage\'
--   **channel**: The channel \[un\]subscribed to or the channel a
-    message was published to
--   **pattern**: The pattern that matched a published message\'s
-    channel. Will be None in all cases except for
-    \'pmessage\' types.
--   **data**: The message data. With \[un\]subscribe messages, this
-    value will be the number of channels and patterns the connection is
-    currently subscribed to. With \[p\]message messages, this value will
-    be the actual published message.
-
-Let\'s send a message now.
-
-``` pycon
-# the publish method returns the number matching channel and pattern
-# subscriptions. 'my-first-channel' matches both the 'my-first-channel'
-# subscription and the 'my-*' pattern subscription, so this message will
-# be delivered to 2 channels/patterns
->>> r.publish('my-first-channel', 'some data')
-2
->>> p.get_message()
-{'channel': b'my-first-channel', 'data': b'some data', 'pattern': None, 'type': 'message'}
->>> p.get_message()
-{'channel': b'my-first-channel', 'data': b'some data', 'pattern': b'my-*', 'type': 'pmessage'}
-```
-
-Unsubscribing works just like subscribing. If no arguments are passed to
-\[p\]unsubscribe, all channels or patterns will be unsubscribed from.
-
-``` pycon
->>> p.unsubscribe()
->>> p.punsubscribe('my-*')
->>> p.get_message()
-{'channel': b'my-second-channel', 'data': 2, 'pattern': None, 'type': 'unsubscribe'}
->>> p.get_message()
-{'channel': b'my-first-channel', 'data': 1, 'pattern': None, 'type': 'unsubscribe'}
->>> p.get_message()
-{'channel': b'my-*', 'data': 0, 'pattern': None, 'type': 'punsubscribe'}
-```
-
-redis-py also allows you to register callback functions to handle
-published messages. Message handlers take a single argument, the
-message, which is a dictionary just like the examples above. To
-subscribe to a channel or pattern with a message handler, pass the
-channel or pattern name as a keyword argument with its value being the
-callback function.
-
-When a message is read on a channel or pattern with a message handler,
-the message dictionary is created and passed to the message handler. In
-this case, a None value is returned from get_message()
-since the message was already handled.
-
-``` pycon
->>> def my_handler(message):
-...     print('MY HANDLER: ', message['data'])
->>> p.subscribe(**{'my-channel': my_handler})
-# read the subscribe confirmation message
->>> p.get_message()
-{'pattern': None, 'type': 'subscribe', 'channel': b'my-channel', 'data': 1}
->>> r.publish('my-channel', 'awesome data')
-1
-# for the message handler to work, we need tell the instance to read data.
-# this can be done in several ways (read more below). we'll just use
-# the familiar get_message() function for now
->>> message = p.get_message()
-MY HANDLER:  awesome data
-# note here that the my_handler callback printed the string above.
-# `message` is None because the message was handled by our handler.
->>> print(message)
-None
-```
-
-If your application is not interested in the (sometimes noisy)
-subscribe/unsubscribe confirmation messages, you can ignore them by
-passing ignore_subscribe_messages=True to
-r.pubsub(). This will cause all subscribe/unsubscribe
-messages to be read, but they won\'t bubble up to your application.
-
-``` pycon
->>> p = r.pubsub(ignore_subscribe_messages=True)
->>> p.subscribe('my-channel')
->>> p.get_message()  # hides the subscribe message and returns None
->>> r.publish('my-channel', 'my data')
-1
->>> p.get_message()
-{'channel': b'my-channel', 'data': b'my data', 'pattern': None, 'type': 'message'}
-```
-
-There are three different strategies for reading messages.
-
-The examples above have been using pubsub.get_message().
-Behind the scenes, get_message() uses the system\'s
-\'select\' module to quickly poll the connection\'s socket. If there\'s
-data available to be read, get_message() will read it,
-format the message and return it or pass it to a message handler. If
-there\'s no data to be read, get_message() will
-immediately return None. This makes it trivial to integrate into an
-existing event loop inside your application.
-
-``` pycon
->>> while True:
->>>     message = p.get_message()
->>>     if message:
->>>         # do something with the message
->>>     time.sleep(0.001)  # be nice to the system :)
-```
-
-Older versions of redis-py only read messages with
-pubsub.listen(). listen() is a generator that blocks until
-a message is available. If your application doesn\'t need to do anything
-else but receive and act on messages received from redis, listen() is an
-easy way to get up an running.
-
-``` pycon
->>> for message in p.listen():
-...     # do something with the message
-```
-
-The third option runs an event loop in a separate thread.
-pubsub.run_in_thread() creates a new thread and starts the
-event loop. The thread object is returned to the caller of
-[un_in_thread(). The caller can use the
-thread.stop() method to shut down the event loop and
-thread. Behind the scenes, this is simply a wrapper around
-get_message() that runs in a separate thread, essentially
-creating a tiny non-blocking event loop for you.
-run_in_thread() takes an optional sleep_time
-argument. If specified, the event loop will call
-time.sleep() with the value in each iteration of the loop.
-
-Note: Since we\'re running in a separate thread, there\'s no way to
-handle messages that aren\'t automatically handled with registered
-message handlers. Therefore, redis-py prevents you from calling
-run_in_thread() if you\'re subscribed to patterns or
-channels that don\'t have message handlers attached.
-
-``` pycon
->>> p.subscribe(**{'my-channel': my_handler})
->>> thread = p.run_in_thread(sleep_time=0.001)
-# the event loop is now running in the background processing messages
-# when it's time to shut it down...
->>> thread.stop()
-```
-
-run_in_thread also supports an optional exception handler,
-which lets you catch exceptions that occur within the worker thread and
-handle them appropriately. The exception handler will take as arguments
-the exception itself, the pubsub object, and the worker thread returned
-by run_in_thread.
-
-``` pycon
->>> p.subscribe(**{'my-channel': my_handler})
->>> def exception_handler(ex, pubsub, thread):
->>>     print(ex)
->>>     thread.stop()
->>>     thread.join(timeout=1.0)
->>>     pubsub.close()
->>> thread = p.run_in_thread(exception_handler=exception_handler)
-```
-
-A PubSub object adheres to the same encoding semantics as the client
-instance it was created from. Any channel or pattern that\'s unicode
-will be encoded using the charset specified on the client
-before being sent to Redis. If the client\'s
-decode_responses flag is set the False (the default), the
-\'channel\', \'pattern\' and \'data\' values in message dictionaries
-will be byte strings (str on Python 2, bytes on Python 3). If the
-client\'s decode_responses is True, then the \'channel\',
-\'pattern\' and \'data\' values will be automatically decoded to unicode
-strings using the client\'s charset.
-
-PubSub objects remember what channels and patterns they are subscribed
-to. In the event of a disconnection such as a network error or timeout,
-the PubSub object will re-subscribe to all prior channels and patterns
-when reconnecting. Messages that were published while the client was
-disconnected cannot be delivered. When you\'re finished with a PubSub
-object, call its .close() method to shutdown the
-connection.
-
-``` pycon
->>> p = r.pubsub()
->>> ...
->>> p.close()
-```
-
-The PUBSUB set of subcommands CHANNELS, NUMSUB and NUMPAT are also
-supported:
-
-``` pycon
->>> r.pubsub_channels()
-[b'foo', b'bar']
->>> r.pubsub_numsub('foo', 'bar')
-[(b'foo', 9001), (b'bar', 42)]
->>> r.pubsub_numsub('baz')
-[(b'baz', 0)]
->>> r.pubsub_numpat()
-1204
-```
-
-### Monitor
-
-redis-py includes a Monitor object that streams every
-command processed by the Redis server. Use listen() on the
-Monitor object to block until a command is received.
-
-``` pycon
->>> r = redis.Redis(...)
->>> with r.monitor() as m:
->>>     for command in m.listen():
->>>         print(command)
 ```
 
-### Lua Scripting
 
-redis-py supports the EVAL, EVALSHA, and SCRIPT commands. However, there
-are a number of edge cases that make these commands tedious to use in
-real world scenarios. Therefore, redis-py exposes a Script object that
-makes scripting much easier to use. (RedisClusters have limited support for
-scripting.)
-
-To create a Script instance, use the register_script
-function on a client instance passing the Lua code as the first
-argument. register_script returns a Script instance that
-you can use throughout your code.
-
-The following trivial Lua script accepts two parameters: the name of a
-key and a multiplier value. The script fetches the value stored in the
-key, multiplies it with the multiplier value and returns the result.
-
-``` pycon
->>> r = redis.Redis()
->>> lua = """
-... local value = redis.call('GET', KEYS[1])
-... value = tonumber(value)
-... return value * ARGV[1]"""
->>> multiply = r.register_script(lua)
-```
-
-multiply is now a Script instance that is invoked by
-calling it like a function. Script instances accept the following
-optional arguments:
-
--   **keys**: A list of key names that the script will access. This
-    becomes the KEYS list in Lua.
--   **args**: A list of argument values. This becomes the ARGV list in
-    Lua.
--   **client**: A redis-py Client or Pipeline instance that will invoke
-    the script. If client isn\'t specified, the client that initially
-    created the Script instance (the one that
-    register_script was invoked from) will be used.
-
-Continuing the example from above:
-
-``` pycon
->>> r.set('foo', 2)
->>> multiply(keys=['foo'], args=[5])
-10
-```
-
-The value of key \'foo\' is set to 2. When multiply is invoked, the
-\'foo\' key is passed to the script along with the multiplier value of
-5. Lua executes the script and returns the result, 10.
-
-Script instances can be executed using a different client instance, even
-one that points to a completely different Redis server.
-
-``` pycon
->>> r2 = redis.Redis('redis2.example.com')
->>> r2.set('foo', 3)
->>> multiply(keys=['foo'], args=[5], client=r2)
-15
-```
-
-The Script object ensures that the Lua script is loaded into Redis\'s
-script cache. In the event of a NOSCRIPT error, it will load the script
-and retry executing it.
-
-Script objects can also be used in pipelines. The pipeline instance
-should be passed as the client argument when calling the script. Care is
-taken to ensure that the script is registered in Redis\'s script cache
-just prior to pipeline execution.
-
-``` pycon
->>> pipe = r.pipeline()
->>> pipe.set('foo', 5)
->>> multiply(keys=['foo'], args=[5], client=pipe)
->>> pipe.execute()
-[True, 25]
-```
-
-
-### Scan Iterators
-
-The \*SCAN commands introduced in Redis 2.8 can be cumbersome to use.
-While these commands are fully supported, redis-py also exposes the
-following methods that return Python iterators for convenience:
-scan_iter, hscan_iter,
-sscan_iter and zscan_iter.
-
-``` pycon
->>> for key, value in (('A', '1'), ('B', '2'), ('C', '3')):
-...     r.set(key, value)
->>> for key in r.scan_iter():
-...     print(key, r.get(key))
-A 1
-B 2
-C 3
-```
-
-### Cluster Mode
-
-redis-py now supports cluster mode and provides a client for
-[Redis Cluster](<https://redis.io/topics/cluster-tutorial>).
-
-The cluster client is based on Grokzen's
-[redis-py-cluster](https://github.com/Grokzen/redis-py-cluster), has added bug
-fixes, and now supersedes that library. Support for these changes is thanks to
-his contributions.
-
-To learn more about Redis Cluster, see
-[Redis Cluster specifications](https://redis.io/topics/cluster-spec).
-
-**Create RedisCluster:**
-
-Connecting redis-py to a Redis Cluster instance(s) requires at a minimum a
-single node for cluster discovery. There are multiple ways in which a cluster
-instance can be created:
-
-- Using 'host' and 'port' arguments:
-
-``` pycon
->>> from redis.cluster import RedisCluster as Redis
->>> rc = Redis(host='localhost', port=6379)
->>> print(rc.get_nodes())
-    [[host=127.0.0.1,port=6379,name=127.0.0.1:6379,server_type=primary,redis_connection=Redis<ConnectionPool<Connection<host=127.0.0.1,port=6379,db=0>>>], [host=127.0.0.1,port=6378,name=127.0.0.1:6378,server_type=primary,redis_connection=Redis<ConnectionPool<Connection<host=127.0.0.1,port=6378,db=0>>>], [host=127.0.0.1,port=6377,name=127.0.0.1:6377,server_type=replica,redis_connection=Redis<ConnectionPool<Connection<host=127.0.0.1,port=6377,db=0>>>]]
-```
-- Using the Redis URL specification:
-
-``` pycon
->>> from redis.cluster import RedisCluster as Redis
->>> rc = Redis.from_url("redis://localhost:6379/0")
-```
-
-- Directly, via the ClusterNode class:
-
-``` pycon
->>> from redis.cluster import RedisCluster as Redis
->>> from redis.cluster import ClusterNode
->>> nodes = [ClusterNode('localhost', 6379), ClusterNode('localhost', 6378)]
->>> rc = Redis(startup_nodes=nodes)
-```
-
-When a RedisCluster instance is being created it first attempts to establish a
-connection to one of the provided startup nodes. If none of the startup nodes
-are reachable, a 'RedisClusterException' will be thrown.
-After a connection to the one of the cluster's nodes is established, the
-RedisCluster instance will be initialized with 3 caches:
-a slots cache which maps each of the 16384 slots to the node/s handling them,
-a nodes cache that contains ClusterNode objects (name, host, port, redis connection)
-for all of the cluster's nodes, and a commands cache contains all the server
-supported commands that were retrieved using the Redis 'COMMAND' output.
-See *RedisCluster specific options* below for more.
-
-RedisCluster instance can be directly used to execute Redis commands. When a
-command is being executed through the cluster instance, the target node(s) will
-be internally determined. When using a key-based command, the target node will
-be the node that holds the key's slot.
-Cluster management commands and other commands that are not key-based have a
-parameter called 'target_nodes' where you can specify which nodes to execute
-the command on. In the absence of target_nodes, the command will be executed
-on the default cluster node. As part of cluster instance initialization, the
-cluster's default node is randomly selected from the cluster's primaries, and
-will be updated upon reinitialization. Using r.get_default_node(), you can
-get the cluster's default node, or you can change it using the
-'set_default_node' method.
-
-The 'target_nodes' parameter is explained in the following section,
-'Specifying Target Nodes'.
-
-``` pycon
->>> # target-nodes: the node that holds 'foo1's key slot
->>> rc.set('foo1', 'bar1')
->>> # target-nodes: the node that holds 'foo2's key slot
->>> rc.set('foo2', 'bar2')
->>> # target-nodes: the node that holds 'foo1's key slot
->>> print(rc.get('foo1'))
-b'bar'
->>> # target-node: default-node
->>> print(rc.keys())
-[b'foo1']
->>> # target-node: default-node
->>> rc.ping()
-```
-
-**Specifying Target Nodes:**
-
-As mentioned above, all non key-based RedisCluster commands accept the kwarg
-parameter 'target_nodes' that specifies the node/nodes that the command should
-be executed on.
-The best practice is to specify target nodes using RedisCluster class's node
-flags: PRIMARIES, REPLICAS, ALL_NODES, RANDOM. When a nodes flag is passed
-along with a command, it will be internally resolved to the relevant node/s.
-If the nodes topology of the cluster changes during the execution of a command,
-the client will be able to resolve the nodes flag again with the new topology
-and attempt to retry executing the command.
-
-``` pycon
->>> from redis.cluster import RedisCluster as Redis
->>> # run cluster-meet command on all of the cluster's nodes
->>> rc.cluster_meet('127.0.0.1', 6379, target_nodes=Redis.ALL_NODES)
->>> # ping all replicas
->>> rc.ping(target_nodes=Redis.REPLICAS)
->>> # ping a random node
->>> rc.ping(target_nodes=Redis.RANDOM)
->>> # get the keys from all cluster nodes
->>> rc.keys(target_nodes=Redis.ALL_NODES)
-[b'foo1', b'foo2']
->>> # execute bgsave in all primaries
->>> rc.bgsave(Redis.PRIMARIES)
-```
-
-You could also pass ClusterNodes directly if you want to execute a command on a
-specific node / node group that isn't addressed by the nodes flag. However, if
-the command execution fails due to cluster topology changes, a retry attempt
-will not be made, since the passed target node/s may no longer be valid, and
-the relevant cluster or connection error will be returned.
-
-``` pycon
->>> node = rc.get_node('localhost', 6379)
->>> # Get the keys only for that specific node
->>> rc.keys(target_nodes=node)
->>> # get Redis info from a subset of primaries
->>> subset_primaries = [node for node in rc.get_primaries() if node.port > 6378]
->>> rc.info(target_nodes=subset_primaries)
-```
-
-In addition, the RedisCluster instance can query the Redis instance of a
-specific node and execute commands on that node directly. The Redis client,
-however, does not handle cluster failures and retries.
-
-``` pycon
->>> cluster_node = rc.get_node(host='localhost', port=6379)
->>> print(cluster_node)
-[host=127.0.0.1,port=6379,name=127.0.0.1:6379,server_type=primary,redis_connection=Redis<ConnectionPool<Connection<host=127.0.0.1,port=6379,db=0>>>]
->>> r = cluster_node.redis_connection
->>> r.client_list()
-[{'id': '276', 'addr': '127.0.0.1:64108', 'fd': '16', 'name': '', 'age': '0', 'idle': '0', 'flags': 'N', 'db': '0', 'sub': '0', 'psub': '0', 'multi': '-1', 'qbuf': '26', 'qbuf-free': '32742', 'argv-mem': '10', 'obl': '0', 'oll': '0', 'omem': '0', 'tot-mem': '54298', 'events': 'r', 'cmd': 'client', 'user': 'default'}]
->>> # Get the keys only for that specific node
->>> r.keys()
-[b'foo1']
-```
-
-**Multi-key commands:**
-
-Redis supports multi-key commands in Cluster Mode, such as Set type unions or
-intersections, mset and mget, as long as the keys all hash to the same slot.
-By using RedisCluster client, you can use the known functions (e.g. mget, mset)
-to perform an atomic multi-key operation. However, you must ensure all keys are
-mapped to the same slot, otherwise a RedisClusterException will be thrown.
-Redis Cluster implements a concept called hash tags that can be used in order
-to force certain keys to be stored in the same hash slot, see
-[Keys hash tag](https://redis.io/topics/cluster-spec#keys-hash-tags).
-You can also use nonatomic for some of the multikey operations, and pass keys
-that aren't mapped to the same slot. The client will then map the keys to the
-relevant slots, sending the commands to the slots' node owners. Non-atomic
-operations batch the keys according to their hash value, and then each batch is
-sent separately to the slot's owner.
-
-``` pycon
-# Atomic operations can be used when all keys are mapped to the same slot
->>> rc.mset({'{foo}1': 'bar1', '{foo}2': 'bar2'})
->>> rc.mget('{foo}1', '{foo}2')
-[b'bar1', b'bar2']
-# Non-atomic multi-key operations splits the keys into different slots
->>> rc.mset_nonatomic({'foo': 'value1', 'bar': 'value2', 'zzz': 'value3')
->>> rc.mget_nonatomic('foo', 'bar', 'zzz')
-[b'value1', b'value2', b'value3']
-```
-
-**Cluster PubSub:**
-
-When a ClusterPubSub instance is created without specifying a node, a single
-node will be transparently chosen for the pubsub connection on the
-first command execution. The node will be determined by:
- 1. Hashing the channel name in the request to find its keyslot
- 2. Selecting a node that handles the keyslot: If read_from_replicas is
-    set to true, a replica can be selected.
-
-*Known limitations with pubsub:*
-
-Pattern subscribe and publish do not currently work properly due to key slots.
-If we hash a pattern like fo* we will receive a keyslot for that string but
-there are endless possibilities for channel names based on this pattern -
-unknowable in advance. This feature is not disabled but the commands are not
-currently recommended for use.
-See [redis-py-cluster documentation](https://redis-py-cluster.readthedocs.io/en/stable/pubsub.html)
- for more.
-
-``` pycon
->>> p1 = rc.pubsub()
-# p1 connection will be set to the node that holds 'foo' keyslot
->>> p1.subscribe('foo')
-# p2 connection will be set to node 'localhost:6379'
->>> p2 = rc.pubsub(rc.get_node('localhost', 6379))
-```
-
-**Read Only Mode**
-
-By default, Redis Cluster always returns MOVE redirection response on accessing
-a replica node. You can overcome this limitation and scale read commands by
-triggering READONLY mode.
-
-To enable READONLY mode pass read_from_replicas=True to RedisCluster
-constructor. When set to true, read commands will be assigned between the
-primary and its replications in a Round-Robin manner.
-
-READONLY mode can be set at runtime by calling the readonly() method with
-target_nodes='replicas', and read-write access can be restored by calling the
-readwrite() method.
-
-``` pycon
->>> from cluster import RedisCluster as Redis
-# Use 'debug' log level to print the node that the command is executed on
->>> rc_readonly = Redis(startup_nodes=startup_nodes,
-...                     read_from_replicas=True)
->>> rc_readonly.set('{foo}1', 'bar1')
->>> for i in range(0, 4):
-...     # Assigns read command to the slot's hosts in a Round-Robin manner
-...     rc_readonly.get('{foo}1')
-# set command would be directed only to the slot's primary node
->>> rc_readonly.set('{foo}2', 'bar2')
-# reset READONLY flag
->>> rc_readonly.readwrite(target_nodes='replicas')
-# now the get command would be directed only to the slot's primary node
->>> rc_readonly.get('{foo}1')
-```
-
-**Cluster Pipeline**
-
-ClusterPipeline is a subclass of RedisCluster that provides support for Redis
-pipelines in cluster mode.
-When calling the execute() command, all the commands are grouped by the node
-on which they will be executed, and are then executed by the respective nodes
-in parallel. The pipeline instance will wait for all the nodes to respond
-before returning the result to the caller. Command responses are returned as a
-list sorted in the same order in which they were sent.
-Pipelines can be used to dramatically increase the throughput of Redis Cluster
-by significantly reducing the the number of network round trips between the
-client and the server.
-
-``` pycon
->>> with rc.pipeline() as pipe:
-...     pipe.set('foo', 'value1')
-...     pipe.set('bar', 'value2')
-...     pipe.get('foo')
-...     pipe.get('bar')
-...     print(pipe.execute())
-[True, True, b'value1', b'value2']
-...     pipe.set('foo1', 'bar1').get('foo1').execute()
-[True, b'bar1']
-```
-
-Please note:
-- RedisCluster pipelines currently only support key-based commands.
-- The pipeline gets its 'read_from_replicas' value from the cluster's parameter.
-Thus, if read from replications is enabled in the cluster instance, the pipeline
-will also direct read commands to replicas.
-- The 'transaction' option is NOT supported in cluster-mode. In non-cluster mode,
-the 'transaction' option is available when executing pipelines. This wraps the
-pipeline commands with MULTI/EXEC commands, and effectively turns the pipeline
-commands into a single transaction block. This means that all commands are
-executed sequentially without any interruptions from other clients. However,
-in cluster-mode this is not possible, because commands are partitioned
-according to their respective destination nodes. This means that we can not
-turn the pipeline commands into one transaction block, because in most cases
-they are split up into several smaller pipelines.
-
-**Lua Scripting in Cluster Mode**
-
-Cluster mode has limited support for lua scripting.
-
-The following commands are supported, with caveats:
-- `EVAL` and `EVALSHA`: The command is sent to the relevant node, depending on
-the keys (i.e., in `EVAL "<script>" num_keys key_1 ... key_n ...`). The keys
-_must_ all be on the same node. If the script requires 0 keys, _the command is
-sent to a random (primary) node_.
-- `SCRIPT EXISTS`: The command is sent to all primaries. The result is a list
-of booleans corresponding to the input SHA hashes. Each boolean is an AND of
-"does the script exist on each node?". In other words, each boolean is True iff
-the script exists on all nodes.
-- `SCRIPT FLUSH`: The command is sent to all primaries. The result is a bool
-AND over all nodes' responses.
-- `SCRIPT LOAD`: The command is sent to all primaries. The result is the SHA1
-digest.
-
-The following commands are not supported:
-- `EVAL_RO`
-- `EVALSHA_RO`
-
-Using scripting within pipelines in cluster mode is **not supported**.
-
-
-**RedisCluster specific options**
-
- require_full_coverage: (default=False)
- 
-    When set to False (default value): the client will not require a
-    full coverage of the slots. However, if not all slots are covered,
-    and at least one node has 'cluster-require-full-coverage' set to
-    'yes,' the server will throw a ClusterDownError for some key-based
-    commands. See -
-    https://redis.io/topics/cluster-tutorial#redis-cluster-configuration-parameters
-    When set to True: all slots must be covered to construct the
-    cluster client. If not all slots are covered, RedisClusterException
-    will be thrown.
-    
- read_from_replicas: (default=False)
-
-     Enable read from replicas in READONLY mode. You can read possibly
-     stale data.
-     When set to true, read commands will be assigned between the
-     primary and its replications in a Round-Robin manner.
-     
- dynamic_startup_nodes: (default=True)
-
-     Set the RedisCluster's startup nodes to all of the discovered nodes.
-     If true, the cluster's discovered nodes will be used to determine the
-     cluster nodes-slots mapping in the next topology refresh.
-     It will remove the initial passed startup nodes if their endpoints aren't
-     listed in the CLUSTER SLOTS output.
-     If you use dynamic DNS endpoints for startup nodes but CLUSTER SLOTS lists
-     specific IP addresses, it is best to set it to false.
-     
- cluster_error_retry_attempts: (default=3)
-
-     Retry command execution attempts when encountering ClusterDownError
-     or ConnectionError
-     
- reinitialize_steps: (default=10)
-
-    Specifies the number of MOVED errors that need to occur before
-    reinitializing the whole cluster topology. If a MOVED error occurs
-    and the cluster does not need to be reinitialized on this current
-    error handling, only the MOVED slot will be patched with the
-    redirected node.
-    To reinitialize the cluster on every MOVED error, set
-    reinitialize_steps to 1.
-    To avoid reinitializing the cluster on moved errors, set
-    reinitialize_steps to 0.
+--------------------------
 
 ### Author
 
diff --git a/benchmarks/basic_operations.py b/benchmarks/basic_operations.py
index 1dc4a87..c9f5853 100644
--- a/benchmarks/basic_operations.py
+++ b/benchmarks/basic_operations.py
@@ -13,7 +13,7 @@ def parse_args():
     parser.add_argument(
         "-P",
         type=int,
-        help=("Pipeline <numreq> requests." " Default 1 (no pipeline)."),
+        help=("Pipeline <numreq> requests. Default 1 (no pipeline)."),
         default=1,
     )
     parser.add_argument(
diff --git a/debian/changelog b/debian/changelog
index a7d4919..36ef0e7 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,9 @@
+python-redis (4.5.1-1) UNRELEASED; urgency=low
+
+  * New upstream release.
+
+ -- Debian Janitor <janitor@jelmer.uk>  Mon, 27 Feb 2023 05:50:50 -0000
+
 python-redis (4.3.4-3) unstable; urgency=medium
 
   * Skip a bunch of known-to-fail autopkgtests, especially ones that require
diff --git a/dev_requirements.txt b/dev_requirements.txt
index 31ae26e..8285b04 100644
--- a/dev_requirements.txt
+++ b/dev_requirements.txt
@@ -1,16 +1,17 @@
 click==8.0.4
 black==22.3.0
-flake8==4.0.1
+flake8==5.0.4
 flynt~=0.69.0
 isort==5.10.1
 mock==4.0.3
-pytest==6.2.5
+packaging>=20.4
+pytest==7.2.0
 pytest-timeout==2.0.1
-pytest-asyncio>=0.16.0
-tox==3.24.4
+pytest-asyncio>=0.20.2
+tox==3.27.1
 tox-docker==3.1.0
-invoke==1.6.0
-pytest-cov>=3.0.0
+invoke==1.7.3
+pytest-cov>=4.0.0
 vulture>=2.3.0
 ujson>=4.2.0
 wheel>=0.30.0
diff --git a/docs/_static/redis-cube-red-white-rgb.svg b/docs/_static/redis-cube-red-white-rgb.svg
new file mode 100644
index 0000000..936eb23
--- /dev/null
+++ b/docs/_static/redis-cube-red-white-rgb.svg
@@ -0,0 +1,30 @@
+<?xml version="1.0" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.0//EN" "http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd">
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" id="body_1" width="70" height="52">
+
+<defs>
+                <clipPath  id="1">
+
+                    <path clip-rule="evenodd" transform="matrix(1 0 0 1 0.01 0.01)"  d="M0 0L146.42 0L146.42 125.48L0 125.48z" />                </clipPath>
+</defs>
+
+<g transform="matrix(0.41404575 0 0 0.41404575 4.689784 -0)">
+	<g>
+		<g>
+			<g clip-path="url(#1)" >
+                <path transform="matrix(1 0 0 1 0.01 0.01)"  d="M140.66 96.74C 132.86 100.82 92.380005 117.47 83.66 122.03999C 74.94 126.609985 70.270004 126.479996 63.450005 123.21999C 56.630005 119.95999 13.7 102.5 5.92 98.78C 2 96.94 0 95.37 0 93.9L0 93.9L0 79.09C 0 79.09 56.08 66.92 65.13 63.64C 74.17999 60.36 77.31 60.28 85 63.1C 92.69 65.92 138.79 74.24 146.4 77L146.4 77L146.4 91.6C 146.4 93.119995 144.65 94.6 140.65999 96.729996L140.65999 96.729996L140.66 96.74z" stroke="none" fill="#A32422" fill-rule="nonzero" />
+                <path transform="matrix(1 0 0 1 0.01 0.01)"  d="M140.66 82C 132.86 86.06 92.380005 102.71 83.66 107.2C 74.94 111.689995 70.270004 111.649994 63.450005 108.399994C 56.630005 105.149994 13.7 87.69 5.92 84C -1.8599997 80.31 -2.0099998 77.73 5.62 74.75C 13.25 71.77 56.08 55 65.13 51.7C 74.17999 48.4 77.31 48.33 85 51.16C 92.69 53.989998 133 70 140.57 72.79C 148.14001 75.58 148.49 77.87 140.66 81.92L140.66 81.92L140.66 82z" stroke="none" fill="#DC382C" fill-rule="nonzero" />
+                <path transform="matrix(1 0 0 1 0.01 0.01)"  d="M140.66 72.62C 132.86 76.69 92.380005 93.33 83.66 97.82001C 74.94 102.31001 70.31 102.27 63.49 99C 56.670006 95.73 13.7 78.37 5.92 74.66C 2 72.8 0 71.24 0 69.76L0 69.76L0 55C 0 55 56.08 42.79 65.13 39.51C 74.17999 36.229996 77.31 36.14 85 39C 92.69 41.86 138.79 50.1 146.4 52.88L146.4 52.88L146.4 67.48C 146.4 69 144.65 70.52 140.66 72.62z" stroke="none" fill="#A32422" fill-rule="nonzero" />
+                <path transform="matrix(1 0 0 1 0.01 0.01)"  d="M140.66 57.81C 132.86 61.89 92.380005 78.53 83.66 83.020004C 74.94 87.51001 70.270004 87.48 63.450005 84.22C 56.630005 80.96 13.7 63.57 5.92 59.85C -1.8599997 56.129997 -2 53.6 5.62 50.62C 13.24 47.64 56.079998 30.829998 65.13 27.619999C 74.17999 24.41 77.31 24.21 85 27C 92.69 29.79 133 45.94 140.57 48.65C 148.14001 51.360004 148.49 53.74 140.66 57.780003L140.66 57.780003L140.66 57.81z" stroke="none" fill="#DC382C" fill-rule="nonzero" />
+                <path transform="matrix(1 0 0 1 0.01 0.01)"  d="M140.66 47.59C 132.86 51.67 92.380005 68.32 83.66 72.8C 74.94 77.28001 70.31 77.25 63.49 74C 56.670006 70.75 13.7 53.34 5.92 49.63C 2 47.79 0 46.22 0 44.74L0 44.74L0 29.93C 0 29.93 56.08 17.76 65.13 14.49C 74.17999 11.219999 77.31 11.12 85 13.94C 92.69 16.759998 138.77 25.08 146.38 27.86L146.38 27.86L146.38 42.46C 146.4 44 144.65 45.5 140.66 47.59z" stroke="none" fill="#A32422" fill-rule="nonzero" />
+                <path transform="matrix(1 0 0 1 0.01 0.01)"  d="M140.66 32.8C 132.86 36.8 92.380005 53.55 83.66 58C 74.94 62.45 70.270004 62.44 63.450005 59.2C 56.630005 55.960003 13.7 38.53 5.92 34.83C -1.8599997 31.130005 -2.0099998 28.560001 5.62 25.580002C 13.25 22.600002 56.08 5.8 65.13 2.54C 74.17999 -0.72000027 77.31 -0.82 85 2C 92.69 4.8199997 133 20.85 140.57 23.63C 148.14001 26.409998 148.49 28.72 140.66 32.77L140.66 32.77L140.66 32.8z" stroke="none" fill="#DC382C" fill-rule="nonzero" />
+			</g>
+            <path d="M75.51 11.78L85.17 8.61L82.55 14.87L92.38 18.55L79.71 19.87L76.86 26.71L72.28 19.08L57.63 17.76L68.57 13.82L65.28 7.76L75.51 11.78L75.51 11.78L75.51 11.78L75.51 11.78" stroke="none" fill="#FFFFFF" fill-rule="nonzero" />
+            <path d="M76.12 51.71L52.44 41.88L86.36 36.67L76.12 51.71L76.12 51.71L76.12 51.71L76.12 51.71" stroke="none" fill="#FFFFFF" fill-rule="nonzero" />
+            <path transform="matrix(1 0 0 1 0.01 0.01)"  d="M43.28 22.34C 53.28 22.34 61.409996 25.49 61.409996 29.34C 61.409996 33.190002 53.259995 36.34 43.28 36.34C 33.300003 36.34 25.14 33.19 25.14 29.34C 25.14 25.490002 33.27 22.34 43.28 22.34z" stroke="none" fill="#FFFFFF" fill-rule="nonzero" />
+            <path d="M107.39 20.42L127.46 28.35L107.41 36.28L107.39 20.42L107.39 20.42L107.39 20.42L107.39 20.42" stroke="none" fill="#741113" fill-rule="nonzero" />
+            <path d="M107.39 20.42L107.41 36.28L105.23 37.13L85.17 29.2L107.39 20.42L107.39 20.42L107.39 20.42L107.39 20.42" stroke="none" fill="#AC2724" fill-rule="nonzero" />
+		</g>
+	</g>
+</g>
+</svg>
\ No newline at end of file
diff --git a/docs/advanced_features.rst b/docs/advanced_features.rst
new file mode 100644
index 0000000..5fd20c2
--- /dev/null
+++ b/docs/advanced_features.rst
@@ -0,0 +1,436 @@
+Advanced Features
+=================
+
+A note about threading
+----------------------
+
+Redis client instances can safely be shared between threads. Internally,
+connection instances are only retrieved from the connection pool during
+command execution, and returned to the pool directly after. Command
+execution never modifies state on the client instance.
+
+However, there is one caveat: the Redis SELECT command. The SELECT
+command allows you to switch the database currently in use by the
+connection. That database remains selected until another is selected or
+until the connection is closed. This creates an issue in that
+connections could be returned to the pool that are connected to a
+different database.
+
+As a result, redis-py does not implement the SELECT command on client
+instances. If you use multiple Redis databases within the same
+application, you should create a separate client instance (and possibly
+a separate connection pool) for each database.
+
+It is not safe to pass PubSub or Pipeline objects between threads.
+
+Pipelines
+---------
+
+Default pipelines
+~~~~~~~~~~~~~~~~~
+
+Pipelines are a subclass of the base Redis class that provide support
+for buffering multiple commands to the server in a single request. They
+can be used to dramatically increase the performance of groups of
+commands by reducing the number of back-and-forth TCP packets between
+the client and server.
+
+Pipelines are quite simple to use:
+
+.. code:: pycon
+
+   >>> r = redis.Redis(...)
+   >>> r.set('bing', 'baz')
+   >>> # Use the pipeline() method to create a pipeline instance
+   >>> pipe = r.pipeline()
+   >>> # The following SET commands are buffered
+   >>> pipe.set('foo', 'bar')
+   >>> pipe.get('bing')
+   >>> # the EXECUTE call sends all buffered commands to the server, returning
+   >>> # a list of responses, one for each command.
+   >>> pipe.execute()
+   [True, b'baz']
+
+For ease of use, all commands being buffered into the pipeline return
+the pipeline object itself. Therefore calls can be chained like:
+
+.. code:: pycon
+
+   >>> pipe.set('foo', 'bar').sadd('faz', 'baz').incr('auto_number').execute()
+   [True, True, 6]
+
+In addition, pipelines can also ensure the buffered commands are
+executed atomically as a group. This happens by default. If you want to
+disable the atomic nature of a pipeline but still want to buffer
+commands, you can turn off transactions.
+
+.. code:: pycon
+
+   >>> pipe = r.pipeline(transaction=False)
+
+A common issue occurs when requiring atomic transactions but needing to
+retrieve values in Redis prior for use within the transaction. For
+instance, let's assume that the INCR command didn't exist and we need to
+build an atomic version of INCR in Python.
+
+The completely naive implementation could GET the value, increment it in
+Python, and SET the new value back. However, this is not atomic because
+multiple clients could be doing this at the same time, each getting the
+same value from GET.
+
+Enter the WATCH command. WATCH provides the ability to monitor one or
+more keys prior to starting a transaction. If any of those keys change
+prior the execution of that transaction, the entire transaction will be
+canceled and a WatchError will be raised. To implement our own
+client-side INCR command, we could do something like this:
+
+.. code:: pycon
+
+   >>> with r.pipeline() as pipe:
+   ...     while True:
+   ...         try:
+   ...             # put a WATCH on the key that holds our sequence value
+   ...             pipe.watch('OUR-SEQUENCE-KEY')
+   ...             # after WATCHing, the pipeline is put into immediate execution
+   ...             # mode until we tell it to start buffering commands again.
+   ...             # this allows us to get the current value of our sequence
+   ...             current_value = pipe.get('OUR-SEQUENCE-KEY')
+   ...             next_value = int(current_value) + 1
+   ...             # now we can put the pipeline back into buffered mode with MULTI
+   ...             pipe.multi()
+   ...             pipe.set('OUR-SEQUENCE-KEY', next_value)
+   ...             # and finally, execute the pipeline (the set command)
+   ...             pipe.execute()
+   ...             # if a WatchError wasn't raised during execution, everything
+   ...             # we just did happened atomically.
+   ...             break
+   ...        except WatchError:
+   ...             # another client must have changed 'OUR-SEQUENCE-KEY' between
+   ...             # the time we started WATCHing it and the pipeline's execution.
+   ...             # our best bet is to just retry.
+   ...             continue
+
+Note that, because the Pipeline must bind to a single connection for the
+duration of a WATCH, care must be taken to ensure that the connection is
+returned to the connection pool by calling the reset() method. If the
+Pipeline is used as a context manager (as in the example above) reset()
+will be called automatically. Of course you can do this the manual way
+by explicitly calling reset():
+
+.. code:: pycon
+
+   >>> pipe = r.pipeline()
+   >>> while True:
+   ...     try:
+   ...         pipe.watch('OUR-SEQUENCE-KEY')
+   ...         ...
+   ...         pipe.execute()
+   ...         break
+   ...     except WatchError:
+   ...         continue
+   ...     finally:
+   ...         pipe.reset()
+
+A convenience method named "transaction" exists for handling all the
+boilerplate of handling and retrying watch errors. It takes a callable
+that should expect a single parameter, a pipeline object, and any number
+of keys to be WATCHed. Our client-side INCR command above can be written
+like this, which is much easier to read:
+
+.. code:: pycon
+
+   >>> def client_side_incr(pipe):
+   ...     current_value = pipe.get('OUR-SEQUENCE-KEY')
+   ...     next_value = int(current_value) + 1
+   ...     pipe.multi()
+   ...     pipe.set('OUR-SEQUENCE-KEY', next_value)
+   >>>
+   >>> r.transaction(client_side_incr, 'OUR-SEQUENCE-KEY')
+   [True]
+
+Be sure to call pipe.multi() in the callable passed to Redis.transaction
+prior to any write commands.
+
+Pipelines in clusters
+~~~~~~~~~~~~~~~~~~~~~
+
+ClusterPipeline is a subclass of RedisCluster that provides support for
+Redis pipelines in cluster mode. When calling the execute() command, all
+the commands are grouped by the node on which they will be executed, and
+are then executed by the respective nodes in parallel. The pipeline
+instance will wait for all the nodes to respond before returning the
+result to the caller. Command responses are returned as a list sorted in
+the same order in which they were sent. Pipelines can be used to
+dramatically increase the throughput of Redis Cluster by significantly
+reducing the number of network round trips between the client and
+the server.
+
+.. code:: pycon
+
+   >>> with rc.pipeline() as pipe:
+   ...     pipe.set('foo', 'value1')
+   ...     pipe.set('bar', 'value2')
+   ...     pipe.get('foo')
+   ...     pipe.get('bar')
+   ...     print(pipe.execute())
+   [True, True, b'value1', b'value2']
+   ...     pipe.set('foo1', 'bar1').get('foo1').execute()
+   [True, b'bar1']
+
+Please note: - RedisCluster pipelines currently only support key-based
+commands. - The pipeline gets its ‘read_from_replicas’ value from the
+cluster’s parameter. Thus, if read from replications is enabled in the
+cluster instance, the pipeline will also direct read commands to
+replicas. - The ‘transaction’ option is NOT supported in cluster-mode.
+In non-cluster mode, the ‘transaction’ option is available when
+executing pipelines. This wraps the pipeline commands with MULTI/EXEC
+commands, and effectively turns the pipeline commands into a single
+transaction block. This means that all commands are executed
+sequentially without any interruptions from other clients. However, in
+cluster-mode this is not possible, because commands are partitioned
+according to their respective destination nodes. This means that we can
+not turn the pipeline commands into one transaction block, because in
+most cases they are split up into several smaller pipelines.
+
+Publish / Subscribe
+-------------------
+
+redis-py includes a PubSub object that subscribes to channels and
+listens for new messages. Creating a PubSub object is easy.
+
+.. code:: pycon
+
+   >>> r = redis.Redis(...)
+   >>> p = r.pubsub()
+
+Once a PubSub instance is created, channels and patterns can be
+subscribed to.
+
+.. code:: pycon
+
+   >>> p.subscribe('my-first-channel', 'my-second-channel', ...)
+   >>> p.psubscribe('my-*', ...)
+
+The PubSub instance is now subscribed to those channels/patterns. The
+subscription confirmations can be seen by reading messages from the
+PubSub instance.
+
+.. code:: pycon
+
+   >>> p.get_message()
+   {'pattern': None, 'type': 'subscribe', 'channel': b'my-second-channel', 'data': 1}
+   >>> p.get_message()
+   {'pattern': None, 'type': 'subscribe', 'channel': b'my-first-channel', 'data': 2}
+   >>> p.get_message()
+   {'pattern': None, 'type': 'psubscribe', 'channel': b'my-*', 'data': 3}
+
+Every message read from a PubSub instance will be a dictionary with the
+following keys.
+
+-  **type**: One of the following: 'subscribe', 'unsubscribe',
+   'psubscribe', 'punsubscribe', 'message', 'pmessage'
+-  **channel**: The channel [un]subscribed to or the channel a message
+   was published to
+-  **pattern**: The pattern that matched a published message's channel.
+   Will be None in all cases except for 'pmessage' types.
+-  **data**: The message data. With [un]subscribe messages, this value
+   will be the number of channels and patterns the connection is
+   currently subscribed to. With [p]message messages, this value will be
+   the actual published message.
+
+Let's send a message now.
+
+.. code:: pycon
+
+   # the publish method returns the number matching channel and pattern
+   # subscriptions. 'my-first-channel' matches both the 'my-first-channel'
+   # subscription and the 'my-*' pattern subscription, so this message will
+   # be delivered to 2 channels/patterns
+   >>> r.publish('my-first-channel', 'some data')
+   2
+   >>> p.get_message()
+   {'channel': b'my-first-channel', 'data': b'some data', 'pattern': None, 'type': 'message'}
+   >>> p.get_message()
+   {'channel': b'my-first-channel', 'data': b'some data', 'pattern': b'my-*', 'type': 'pmessage'}
+
+Unsubscribing works just like subscribing. If no arguments are passed to
+[p]unsubscribe, all channels or patterns will be unsubscribed from.
+
+.. code:: pycon
+
+   >>> p.unsubscribe()
+   >>> p.punsubscribe('my-*')
+   >>> p.get_message()
+   {'channel': b'my-second-channel', 'data': 2, 'pattern': None, 'type': 'unsubscribe'}
+   >>> p.get_message()
+   {'channel': b'my-first-channel', 'data': 1, 'pattern': None, 'type': 'unsubscribe'}
+   >>> p.get_message()
+   {'channel': b'my-*', 'data': 0, 'pattern': None, 'type': 'punsubscribe'}
+
+redis-py also allows you to register callback functions to handle
+published messages. Message handlers take a single argument, the
+message, which is a dictionary just like the examples above. To
+subscribe to a channel or pattern with a message handler, pass the
+channel or pattern name as a keyword argument with its value being the
+callback function.
+
+When a message is read on a channel or pattern with a message handler,
+the message dictionary is created and passed to the message handler. In
+this case, a None value is returned from get_message() since the message
+was already handled.
+
+.. code:: pycon
+
+   >>> def my_handler(message):
+   ...     print('MY HANDLER: ', message['data'])
+   >>> p.subscribe(**{'my-channel': my_handler})
+   # read the subscribe confirmation message
+   >>> p.get_message()
+   {'pattern': None, 'type': 'subscribe', 'channel': b'my-channel', 'data': 1}
+   >>> r.publish('my-channel', 'awesome data')
+   1
+   # for the message handler to work, we need tell the instance to read data.
+   # this can be done in several ways (read more below). we'll just use
+   # the familiar get_message() function for now
+   >>> message = p.get_message()
+   MY HANDLER:  awesome data
+   # note here that the my_handler callback printed the string above.
+   # `message` is None because the message was handled by our handler.
+   >>> print(message)
+   None
+
+If your application is not interested in the (sometimes noisy)
+subscribe/unsubscribe confirmation messages, you can ignore them by
+passing ignore_subscribe_messages=True to r.pubsub(). This will cause
+all subscribe/unsubscribe messages to be read, but they won't bubble up
+to your application.
+
+.. code:: pycon
+
+   >>> p = r.pubsub(ignore_subscribe_messages=True)
+   >>> p.subscribe('my-channel')
+   >>> p.get_message()  # hides the subscribe message and returns None
+   >>> r.publish('my-channel', 'my data')
+   1
+   >>> p.get_message()
+   {'channel': b'my-channel', 'data': b'my data', 'pattern': None, 'type': 'message'}
+
+There are three different strategies for reading messages.
+
+The examples above have been using pubsub.get_message(). Behind the
+scenes, get_message() uses the system's 'select' module to quickly poll
+the connection's socket. If there's data available to be read,
+get_message() will read it, format the message and return it or pass it
+to a message handler. If there's no data to be read, get_message() will
+immediately return None. This makes it trivial to integrate into an
+existing event loop inside your application.
+
+.. code:: pycon
+
+   >>> while True:
+   >>>     message = p.get_message()
+   >>>     if message:
+   >>>         # do something with the message
+   >>>     time.sleep(0.001)  # be nice to the system :)
+
+Older versions of redis-py only read messages with pubsub.listen().
+listen() is a generator that blocks until a message is available. If
+your application doesn't need to do anything else but receive and act on
+messages received from redis, listen() is an easy way to get up an
+running.
+
+.. code:: pycon
+
+   >>> for message in p.listen():
+   ...     # do something with the message
+
+The third option runs an event loop in a separate thread.
+pubsub.run_in_thread() creates a new thread and starts the event loop.
+The thread object is returned to the caller of [un_in_thread(). The
+caller can use the thread.stop() method to shut down the event loop and
+thread. Behind the scenes, this is simply a wrapper around get_message()
+that runs in a separate thread, essentially creating a tiny non-blocking
+event loop for you. run_in_thread() takes an optional sleep_time
+argument. If specified, the event loop will call time.sleep() with the
+value in each iteration of the loop.
+
+Note: Since we're running in a separate thread, there's no way to handle
+messages that aren't automatically handled with registered message
+handlers. Therefore, redis-py prevents you from calling run_in_thread()
+if you're subscribed to patterns or channels that don't have message
+handlers attached.
+
+.. code:: pycon
+
+   >>> p.subscribe(**{'my-channel': my_handler})
+   >>> thread = p.run_in_thread(sleep_time=0.001)
+   # the event loop is now running in the background processing messages
+   # when it's time to shut it down...
+   >>> thread.stop()
+
+run_in_thread also supports an optional exception handler, which lets
+you catch exceptions that occur within the worker thread and handle them
+appropriately. The exception handler will take as arguments the
+exception itself, the pubsub object, and the worker thread returned by
+run_in_thread.
+
+.. code:: pycon
+
+   >>> p.subscribe(**{'my-channel': my_handler})
+   >>> def exception_handler(ex, pubsub, thread):
+   >>>     print(ex)
+   >>>     thread.stop()
+   >>>     thread.join(timeout=1.0)
+   >>>     pubsub.close()
+   >>> thread = p.run_in_thread(exception_handler=exception_handler)
+
+A PubSub object adheres to the same encoding semantics as the client
+instance it was created from. Any channel or pattern that's unicode will
+be encoded using the charset specified on the client before being sent
+to Redis. If the client's decode_responses flag is set the False (the
+default), the 'channel', 'pattern' and 'data' values in message
+dictionaries will be byte strings (str on Python 2, bytes on Python 3).
+If the client's decode_responses is True, then the 'channel', 'pattern'
+and 'data' values will be automatically decoded to unicode strings using
+the client's charset.
+
+PubSub objects remember what channels and patterns they are subscribed
+to. In the event of a disconnection such as a network error or timeout,
+the PubSub object will re-subscribe to all prior channels and patterns
+when reconnecting. Messages that were published while the client was
+disconnected cannot be delivered. When you're finished with a PubSub
+object, call its .close() method to shutdown the connection.
+
+.. code:: pycon
+
+   >>> p = r.pubsub()
+   >>> ...
+   >>> p.close()
+
+The PUBSUB set of subcommands CHANNELS, NUMSUB and NUMPAT are also
+supported:
+
+.. code:: pycon
+
+   >>> r.pubsub_channels()
+   [b'foo', b'bar']
+   >>> r.pubsub_numsub('foo', 'bar')
+   [(b'foo', 9001), (b'bar', 42)]
+   >>> r.pubsub_numsub('baz')
+   [(b'baz', 0)]
+   >>> r.pubsub_numpat()
+   1204
+
+Monitor
+~~~~~~~
+
+redis-py includes a Monitor object that streams every command processed
+by the Redis server. Use listen() on the Monitor object to block until a
+command is received.
+
+.. code:: pycon
+
+   >>> r = redis.Redis(...)
+   >>> with r.monitor() as m:
+   >>>     for command in m.listen():
+   >>>         print(command)
diff --git a/docs/backoff.rst b/docs/backoff.rst
index e640b56..c5ab01a 100644
--- a/docs/backoff.rst
+++ b/docs/backoff.rst
@@ -1,3 +1,5 @@
+.. _backoff-label:
+
 Backoff
 #############
 
diff --git a/docs/clustering.rst b/docs/clustering.rst
new file mode 100644
index 0000000..34cb7f1
--- /dev/null
+++ b/docs/clustering.rst
@@ -0,0 +1,242 @@
+Clustering
+==========
+
+redis-py now supports cluster mode and provides a client for `Redis
+Cluster <https://redis.io/topics/cluster-tutorial>`__.
+
+The cluster client is based on Grokzen’s
+`redis-py-cluster <https://github.com/Grokzen/redis-py-cluster>`__, has
+added bug fixes, and now supersedes that library. Support for these
+changes is thanks to his contributions.
+
+To learn more about Redis Cluster, see `Redis Cluster
+specifications <https://redis.io/topics/cluster-spec>`__.
+
+`Creating clusters <#creating-clusters>`__ \| `Specifying Target
+Nodes <#specifying-target-nodes>`__ \| `Multi-key
+Commands <#multi-key-commands>`__ \| `Known PubSub
+Limitations <#known-pubsub-limitations>`__
+
+Creating clusters
+-----------------
+
+Connecting redis-py to a Redis Cluster instance(s) requires at a minimum
+a single node for cluster discovery. There are multiple ways in which a
+cluster instance can be created:
+
+-  Using ‘host’ and ‘port’ arguments:
+
+.. code:: pycon
+
+   >>> from redis.cluster import RedisCluster as Redis
+   >>> rc = Redis(host='localhost', port=6379)
+   >>> print(rc.get_nodes())
+       [[host=127.0.0.1,port=6379,name=127.0.0.1:6379,server_type=primary,redis_connection=Redis<ConnectionPool<Connection<host=127.0.0.1,port=6379,db=0>>>], [host=127.0.0.1,port=6378,name=127.0.0.1:6378,server_type=primary,redis_connection=Redis<ConnectionPool<Connection<host=127.0.0.1,port=6378,db=0>>>], [host=127.0.0.1,port=6377,name=127.0.0.1:6377,server_type=replica,redis_connection=Redis<ConnectionPool<Connection<host=127.0.0.1,port=6377,db=0>>>]]
+
+-  Using the Redis URL specification:
+
+.. code:: pycon
+
+   >>> from redis.cluster import RedisCluster as Redis
+   >>> rc = Redis.from_url("redis://localhost:6379/0")
+
+-  Directly, via the ClusterNode class:
+
+.. code:: pycon
+
+   >>> from redis.cluster import RedisCluster as Redis
+   >>> from redis.cluster import ClusterNode
+   >>> nodes = [ClusterNode('localhost', 6379), ClusterNode('localhost', 6378)]
+   >>> rc = Redis(startup_nodes=nodes)
+
+When a RedisCluster instance is being created it first attempts to
+establish a connection to one of the provided startup nodes. If none of
+the startup nodes are reachable, a ‘RedisClusterException’ will be
+thrown. After a connection to the one of the cluster’s nodes is
+established, the RedisCluster instance will be initialized with 3
+caches: a slots cache which maps each of the 16384 slots to the node/s
+handling them, a nodes cache that contains ClusterNode objects (name,
+host, port, redis connection) for all of the cluster’s nodes, and a
+commands cache contains all the server supported commands that were
+retrieved using the Redis ‘COMMAND’ output. See *RedisCluster specific
+options* below for more.
+
+RedisCluster instance can be directly used to execute Redis commands.
+When a command is being executed through the cluster instance, the
+target node(s) will be internally determined. When using a key-based
+command, the target node will be the node that holds the key’s slot.
+Cluster management commands and other commands that are not key-based
+have a parameter called ‘target_nodes’ where you can specify which nodes
+to execute the command on. In the absence of target_nodes, the command
+will be executed on the default cluster node. As part of cluster
+instance initialization, the cluster’s default node is randomly selected
+from the cluster’s primaries, and will be updated upon reinitialization.
+Using r.get_default_node(), you can get the cluster’s default node, or
+you can change it using the ‘set_default_node’ method.
+
+The ‘target_nodes’ parameter is explained in the following section,
+‘Specifying Target Nodes’.
+
+.. code:: pycon
+
+   >>> # target-nodes: the node that holds 'foo1's key slot
+   >>> rc.set('foo1', 'bar1')
+   >>> # target-nodes: the node that holds 'foo2's key slot
+   >>> rc.set('foo2', 'bar2')
+   >>> # target-nodes: the node that holds 'foo1's key slot
+   >>> print(rc.get('foo1'))
+   b'bar'
+   >>> # target-node: default-node
+   >>> print(rc.keys())
+   [b'foo1']
+   >>> # target-node: default-node
+   >>> rc.ping()
+
+Specfiying Target Nodes
+-----------------------
+
+As mentioned above, all non key-based RedisCluster commands accept the
+kwarg parameter ‘target_nodes’ that specifies the node/nodes that the
+command should be executed on. The best practice is to specify target
+nodes using RedisCluster class’s node flags: PRIMARIES, REPLICAS,
+ALL_NODES, RANDOM. When a nodes flag is passed along with a command, it
+will be internally resolved to the relevant node/s. If the nodes
+topology of the cluster changes during the execution of a command, the
+client will be able to resolve the nodes flag again with the new
+topology and attempt to retry executing the command.
+
+.. code:: pycon
+
+   >>> from redis.cluster import RedisCluster as Redis
+   >>> # run cluster-meet command on all of the cluster's nodes
+   >>> rc.cluster_meet('127.0.0.1', 6379, target_nodes=Redis.ALL_NODES)
+   >>> # ping all replicas
+   >>> rc.ping(target_nodes=Redis.REPLICAS)
+   >>> # ping a random node
+   >>> rc.ping(target_nodes=Redis.RANDOM)
+   >>> # get the keys from all cluster nodes
+   >>> rc.keys(target_nodes=Redis.ALL_NODES)
+   [b'foo1', b'foo2']
+   >>> # execute bgsave in all primaries
+   >>> rc.bgsave(Redis.PRIMARIES)
+
+You could also pass ClusterNodes directly if you want to execute a
+command on a specific node / node group that isn’t addressed by the
+nodes flag. However, if the command execution fails due to cluster
+topology changes, a retry attempt will not be made, since the passed
+target node/s may no longer be valid, and the relevant cluster or
+connection error will be returned.
+
+.. code:: pycon
+
+   >>> node = rc.get_node('localhost', 6379)
+   >>> # Get the keys only for that specific node
+   >>> rc.keys(target_nodes=node)
+   >>> # get Redis info from a subset of primaries
+   >>> subset_primaries = [node for node in rc.get_primaries() if node.port > 6378]
+   >>> rc.info(target_nodes=subset_primaries)
+
+In addition, the RedisCluster instance can query the Redis instance of a
+specific node and execute commands on that node directly. The Redis
+client, however, does not handle cluster failures and retries.
+
+.. code:: pycon
+
+   >>> cluster_node = rc.get_node(host='localhost', port=6379)
+   >>> print(cluster_node)
+   [host=127.0.0.1,port=6379,name=127.0.0.1:6379,server_type=primary,redis_connection=Redis<ConnectionPool<Connection<host=127.0.0.1,port=6379,db=0>>>]
+   >>> r = cluster_node.redis_connection
+   >>> r.client_list()
+   [{'id': '276', 'addr': '127.0.0.1:64108', 'fd': '16', 'name': '', 'age': '0', 'idle': '0', 'flags': 'N', 'db': '0', 'sub': '0', 'psub': '0', 'multi': '-1', 'qbuf': '26', 'qbuf-free': '32742', 'argv-mem': '10', 'obl': '0', 'oll': '0', 'omem': '0', 'tot-mem': '54298', 'events': 'r', 'cmd': 'client', 'user': 'default'}]
+   >>> # Get the keys only for that specific node
+   >>> r.keys()
+   [b'foo1']
+
+Multi-key Commands
+------------------
+
+Redis supports multi-key commands in Cluster Mode, such as Set type
+unions or intersections, mset and mget, as long as the keys all hash to
+the same slot. By using RedisCluster client, you can use the known
+functions (e.g. mget, mset) to perform an atomic multi-key operation.
+However, you must ensure all keys are mapped to the same slot, otherwise
+a RedisClusterException will be thrown. Redis Cluster implements a
+concept called hash tags that can be used in order to force certain keys
+to be stored in the same hash slot, see `Keys hash
+tag <https://redis.io/topics/cluster-spec#keys-hash-tags>`__. You can
+also use nonatomic for some of the multikey operations, and pass keys
+that aren’t mapped to the same slot. The client will then map the keys
+to the relevant slots, sending the commands to the slots’ node owners.
+Non-atomic operations batch the keys according to their hash value, and
+then each batch is sent separately to the slot’s owner.
+
+.. code:: pycon
+
+   # Atomic operations can be used when all keys are mapped to the same slot
+   >>> rc.mset({'{foo}1': 'bar1', '{foo}2': 'bar2'})
+   >>> rc.mget('{foo}1', '{foo}2')
+   [b'bar1', b'bar2']
+   # Non-atomic multi-key operations splits the keys into different slots
+   >>> rc.mset_nonatomic({'foo': 'value1', 'bar': 'value2', 'zzz': 'value3')
+   >>> rc.mget_nonatomic('foo', 'bar', 'zzz')
+   [b'value1', b'value2', b'value3']
+
+**Cluster PubSub:**
+
+When a ClusterPubSub instance is created without specifying a node, a
+single node will be transparently chosen for the pubsub connection on
+the first command execution. The node will be determined by: 1. Hashing
+the channel name in the request to find its keyslot 2. Selecting a node
+that handles the keyslot: If read_from_replicas is set to true, a
+replica can be selected.
+
+Known PubSub Limitations
+------------------------
+
+Pattern subscribe and publish do not currently work properly due to key
+slots. If we hash a pattern like fo\* we will receive a keyslot for that
+string but there are endless possibilities for channel names based on
+this pattern - unknowable in advance. This feature is not disabled but
+the commands are not currently recommended for use. See
+`redis-py-cluster
+documentation <https://redis-py-cluster.readthedocs.io/en/stable/pubsub.html>`__
+for more.
+
+.. code:: pycon
+
+   >>> p1 = rc.pubsub()
+   # p1 connection will be set to the node that holds 'foo' keyslot
+   >>> p1.subscribe('foo')
+   # p2 connection will be set to node 'localhost:6379'
+   >>> p2 = rc.pubsub(rc.get_node('localhost', 6379))
+
+**Read Only Mode**
+
+By default, Redis Cluster always returns MOVE redirection response on
+accessing a replica node. You can overcome this limitation and scale
+read commands by triggering READONLY mode.
+
+To enable READONLY mode pass read_from_replicas=True to RedisCluster
+constructor. When set to true, read commands will be assigned between
+the primary and its replications in a Round-Robin manner.
+
+READONLY mode can be set at runtime by calling the readonly() method
+with target_nodes=‘replicas’, and read-write access can be restored by
+calling the readwrite() method.
+
+.. code:: pycon
+
+   >>> from cluster import RedisCluster as Redis
+   # Use 'debug' log level to print the node that the command is executed on
+   >>> rc_readonly = Redis(startup_nodes=startup_nodes,
+   ...                     read_from_replicas=True)
+   >>> rc_readonly.set('{foo}1', 'bar1')
+   >>> for i in range(0, 4):
+   ...     # Assigns read command to the slot's hosts in a Round-Robin manner
+   ...     rc_readonly.get('{foo}1')
+   # set command would be directed only to the slot's primary node
+   >>> rc_readonly.set('{foo}2', 'bar2')
+   # reset READONLY flag
+   >>> rc_readonly.readwrite(target_nodes='replicas')
+   # now the get command would be directed only to the slot's primary node
+   >>> rc_readonly.get('{foo}1')
diff --git a/docs/conf.py b/docs/conf.py
index 618d95a..cdbeb02 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -60,7 +60,7 @@ master_doc = "index"
 
 # General information about the project.
 project = "redis-py"
-copyright = "2021, Redis Inc"
+copyright = "2022, Redis Inc"
 
 # The version info for the project you're documenting, acts as replacement for
 # |version| and |release|, also used in various other places throughout the
@@ -104,7 +104,7 @@ exclude_patterns = ["_build", "**.ipynb_checkponts"]
 # show_authors = False
 
 # The name of the Pygments (syntax highlighting) style to use.
-pygments_style = "sphinx"
+pygments_style = "tango"
 
 # A list of ignored prefixes for module index sorting.
 # modindex_common_prefix = []
@@ -116,19 +116,28 @@ nitpicky = True
 
 # The theme to use for HTML and HTML Help pages.  See the documentation for
 # a list of builtin themes.
-html_theme = "sphinx_rtd_theme"
+html_theme = "furo"
 
 # Theme options are theme-specific and customize the look and feel of a theme
 # further.  For a list of options available for each theme, see the
 # documentation.
 html_theme_options = {
     "display_version": True,
-    "prev_next_buttons_location": "bottom",
-    "style_external_links": False,
-    # Toc options
-    "collapse_navigation": True,
-    "sticky_navigation": True,
-    "navigation_depth": 4,
+    "footer_icons": [
+        {
+            "name": "GitHub",
+            "url": "https://github.com/redis/redis-py",
+            "html": """
+            <svg stroke="currentColor" fill="currentColor" stroke-width="0" viewBox="0 0 16 16">
+                <path fill-rule="evenodd" d="M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59.4.07.55-.17.55-.38 0-.19-.01-.82-.01-1.49-2.01.37-2.53-.49-2.69-.94-.09-.23-.48-.94-.82-1.13-.28-.15-.68-.52-.01-.53.63-.01 1.08.58 1.23.82.72 1.21 1.87.87 2.33.66.07-.52.28-.87.51-1.07-1.78-.2-3.64-.89-3.64-3.95 0-.87.31-1.59.82-2.15-.08-.2-.36-1.02.08-2.12 0 0 .67-.21 2.2.82.64-.18 1.32-.27 2-.27.68 0 1.36.09 2 .27 1.53-1.04 2.2-.82 2.2-.82.44 1.1.16 1.92.08 2.12.51.56.82 1.27.82 2.15 0 3.07-1.87 3.75-3.65 3.95.29.25.54.73.54 1.48 0 1.07-.01 1.93-.01 2.2 0 .21.15.46.55.38A8.013 8.013 0 0 0 16 8c0-4.42-3.58-8-8-8z"></path>
+            </svg>
+        """,
+            "class": "",
+        },
+    ],
+    "source_repository": "https://github.com/redis/redis-py/",
+    "source_branch": "master",
+    "source_directory": "docs/",
 }
 
 # Add any paths that contain custom themes here, relative to this directory.
@@ -143,7 +152,7 @@ html_theme_options = {
 
 # The name of an image file (relative to this directory) to place at the top
 # of the sidebar.
-# html_logo = None
+html_logo = "_static/redis-cube-red-white-rgb.svg"
 
 # The name of an image file (within the static path) to use as favicon of the
 # docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
@@ -153,7 +162,7 @@ html_theme_options = {
 # Add any paths that contain custom static files (such as style sheets) here,
 # relative to this directory. They are copied after the builtin static files,
 # so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ["_static"]
+html_static_path = ["_static", "images"]
 
 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
 # using the given strftime format.
@@ -278,4 +287,4 @@ texinfo_documents = [
 epub_title = "redis-py"
 epub_author = "Redis Inc"
 epub_publisher = "Redis Inc"
-epub_copyright = "2021, Redis Inc"
+epub_copyright = "2022, Redis Inc"
diff --git a/docs/examples.rst b/docs/examples.rst
index 722fae2..47fdbdf 100644
--- a/docs/examples.rst
+++ b/docs/examples.rst
@@ -12,3 +12,6 @@ Examples
    examples/set_and_get_examples
    examples/search_vector_similarity_examples
    examples/pipeline_examples
+   examples/timeseries_examples
+   examples/redis-stream-example
+   examples/opentelemetry_api_examples
diff --git a/docs/examples/asyncio_examples.ipynb b/docs/examples/asyncio_examples.ipynb
index 66d4358..855255c 100644
--- a/docs/examples/asyncio_examples.ipynb
+++ b/docs/examples/asyncio_examples.ipynb
@@ -21,11 +21,6 @@
   {
    "cell_type": "code",
    "execution_count": 1,
-   "metadata": {
-    "pycharm": {
-     "name": "#%%\n"
-    }
-   },
    "outputs": [
     {
      "name": "stdout",
@@ -41,27 +36,29 @@
     "connection = redis.Redis()\n",
     "print(f\"Ping successful: {await connection.ping()}\")\n",
     "await connection.close()"
-   ]
+   ],
+   "metadata": {
+    "collapsed": false,
+    "pycharm": {
+     "name": "#%%\n"
+    }
+   }
   },
   {
    "cell_type": "markdown",
+   "source": [
+    "If you supply a custom `ConnectionPool` that is supplied to several `Redis` instances, you may want to disconnect the connection pool explicitly. Disconnecting the connection pool simply disconnects all connections hosted in the pool."
+   ],
    "metadata": {
+    "collapsed": false,
     "pycharm": {
      "name": "#%% md\n"
     }
-   },
-   "source": [
-    "If you supply a custom `ConnectionPool` that is supplied to several `Redis` instances, you may want to disconnect the connection pool explicitly. Disconnecting the connection pool simply disconnects all connections hosted in the pool."
-   ]
+   }
   },
   {
    "cell_type": "code",
    "execution_count": 2,
-   "metadata": {
-    "pycharm": {
-     "name": "#%%\n"
-    }
-   },
    "outputs": [],
    "source": [
     "import redis.asyncio as redis\n",
@@ -70,15 +67,16 @@
     "await connection.close()\n",
     "# Or: await connection.close(close_connection_pool=False)\n",
     "await connection.connection_pool.disconnect()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
+   ],
    "metadata": {
+    "collapsed": false,
     "pycharm": {
-     "name": "#%% md\n"
+     "name": "#%%\n"
     }
-   },
+   }
+  },
+  {
+   "cell_type": "markdown",
    "source": [
     "## Transactions (Multi/Exec)\n",
     "\n",
@@ -87,16 +85,17 @@
     "The commands will not be reflected in Redis until execute() is called & awaited.\n",
     "\n",
     "Usually, when performing a bulk operation, taking advantage of a “transaction” (e.g., Multi/Exec) is to be desired, as it will also add a layer of atomicity to your bulk operation."
-   ]
+   ],
+   "metadata": {
+    "collapsed": false,
+    "pycharm": {
+     "name": "#%% md\n"
+    }
+   }
   },
   {
    "cell_type": "code",
    "execution_count": 3,
-   "metadata": {
-    "pycharm": {
-     "name": "#%%\n"
-    }
-   },
    "outputs": [],
    "source": [
     "import redis.asyncio as redis\n",
@@ -106,25 +105,31 @@
     "    ok1, ok2 = await (pipe.set(\"key1\", \"value1\").set(\"key2\", \"value2\").execute())\n",
     "assert ok1\n",
     "assert ok2"
-   ]
+   ],
+   "metadata": {
+    "collapsed": false,
+    "pycharm": {
+     "name": "#%%\n"
+    }
+   }
   },
   {
    "cell_type": "markdown",
-   "metadata": {},
    "source": [
     "## Pub/Sub Mode\n",
     "\n",
     "Subscribing to specific channels:"
-   ]
+   ],
+   "metadata": {
+    "collapsed": false,
+    "pycharm": {
+     "name": "#%% md\n"
+    }
+   }
   },
   {
    "cell_type": "code",
    "execution_count": 4,
-   "metadata": {
-    "pycharm": {
-     "name": "#%%\n"
-    }
-   },
    "outputs": [
     {
      "name": "stdout",
@@ -140,8 +145,6 @@
    "source": [
     "import asyncio\n",
     "\n",
-    "import async_timeout\n",
-    "\n",
     "import redis.asyncio as redis\n",
     "\n",
     "STOPWORD = \"STOP\"\n",
@@ -149,46 +152,47 @@
     "\n",
     "async def reader(channel: redis.client.PubSub):\n",
     "    while True:\n",
-    "        try:\n",
-    "            async with async_timeout.timeout(1):\n",
-    "                message = await channel.get_message(ignore_subscribe_messages=True)\n",
-    "                if message is not None:\n",
-    "                    print(f\"(Reader) Message Received: {message}\")\n",
-    "                    if message[\"data\"].decode() == STOPWORD:\n",
-    "                        print(\"(Reader) STOP\")\n",
-    "                        break\n",
-    "                await asyncio.sleep(0.01)\n",
-    "        except asyncio.TimeoutError:\n",
-    "            pass\n",
+    "        message = await channel.get_message(ignore_subscribe_messages=True)\n",
+    "        if message is not None:\n",
+    "            print(f\"(Reader) Message Received: {message}\")\n",
+    "            if message[\"data\"].decode() == STOPWORD:\n",
+    "                print(\"(Reader) STOP\")\n",
+    "                break\n",
     "\n",
     "r = redis.from_url(\"redis://localhost\")\n",
-    "pubsub = r.pubsub()\n",
-    "await pubsub.subscribe(\"channel:1\", \"channel:2\")\n",
+    "async with r.pubsub() as pubsub:\n",
+    "    await pubsub.subscribe(\"channel:1\", \"channel:2\")\n",
     "\n",
-    "future = asyncio.create_task(reader(pubsub))\n",
+    "    future = asyncio.create_task(reader(pubsub))\n",
     "\n",
-    "await r.publish(\"channel:1\", \"Hello\")\n",
-    "await r.publish(\"channel:2\", \"World\")\n",
-    "await r.publish(\"channel:1\", STOPWORD)\n",
+    "    await r.publish(\"channel:1\", \"Hello\")\n",
+    "    await r.publish(\"channel:2\", \"World\")\n",
+    "    await r.publish(\"channel:1\", STOPWORD)\n",
     "\n",
-    "await future"
-   ]
+    "    await future"
+   ],
+   "metadata": {
+    "collapsed": false,
+    "pycharm": {
+     "name": "#%%\n"
+    }
+   }
   },
   {
    "cell_type": "markdown",
-   "metadata": {},
    "source": [
     "Subscribing to channels matching a glob-style pattern:"
-   ]
+   ],
+   "metadata": {
+    "collapsed": false,
+    "pycharm": {
+     "name": "#%% md\n"
+    }
+   }
   },
   {
    "cell_type": "code",
    "execution_count": 5,
-   "metadata": {
-    "pycharm": {
-     "name": "#%%\n"
-    }
-   },
    "outputs": [
     {
      "name": "stdout",
@@ -204,8 +208,6 @@
    "source": [
     "import asyncio\n",
     "\n",
-    "import async_timeout\n",
-    "\n",
     "import redis.asyncio as redis\n",
     "\n",
     "STOPWORD = \"STOP\"\n",
@@ -213,35 +215,35 @@
     "\n",
     "async def reader(channel: redis.client.PubSub):\n",
     "    while True:\n",
-    "        try:\n",
-    "            async with async_timeout.timeout(1):\n",
-    "                message = await channel.get_message(ignore_subscribe_messages=True)\n",
-    "                if message is not None:\n",
-    "                    print(f\"(Reader) Message Received: {message}\")\n",
-    "                    if message[\"data\"].decode() == STOPWORD:\n",
-    "                        print(\"(Reader) STOP\")\n",
-    "                        break\n",
-    "                await asyncio.sleep(0.01)\n",
-    "        except asyncio.TimeoutError:\n",
-    "            pass\n",
+    "        message = await channel.get_message(ignore_subscribe_messages=True)\n",
+    "        if message is not None:\n",
+    "            print(f\"(Reader) Message Received: {message}\")\n",
+    "            if message[\"data\"].decode() == STOPWORD:\n",
+    "                print(\"(Reader) STOP\")\n",
+    "                break\n",
     "\n",
     "\n",
     "r = await redis.from_url(\"redis://localhost\")\n",
-    "pubsub = r.pubsub()\n",
-    "await pubsub.psubscribe(\"channel:*\")\n",
+    "async with r.pubsub() as pubsub:\n",
+    "    await pubsub.psubscribe(\"channel:*\")\n",
     "\n",
-    "future = asyncio.create_task(reader(pubsub))\n",
+    "    future = asyncio.create_task(reader(pubsub))\n",
     "\n",
-    "await r.publish(\"channel:1\", \"Hello\")\n",
-    "await r.publish(\"channel:2\", \"World\")\n",
-    "await r.publish(\"channel:1\", STOPWORD)\n",
+    "    await r.publish(\"channel:1\", \"Hello\")\n",
+    "    await r.publish(\"channel:2\", \"World\")\n",
+    "    await r.publish(\"channel:1\", STOPWORD)\n",
     "\n",
-    "await future"
-   ]
+    "    await future"
+   ],
+   "metadata": {
+    "collapsed": false,
+    "pycharm": {
+     "name": "#%%\n"
+    }
+   }
   },
   {
    "cell_type": "markdown",
-   "metadata": {},
    "source": [
     "## Sentinel Client\n",
     "\n",
@@ -250,16 +252,17 @@
     "Calling aioredis.sentinel.Sentinel.master_for or aioredis.sentinel.Sentinel.slave_for methods will return Redis clients connected to specified services monitored by Sentinel.\n",
     "\n",
     "Sentinel client will detect failover and reconnect Redis clients automatically."
-   ]
+   ],
+   "metadata": {
+    "collapsed": false,
+    "pycharm": {
+     "name": "#%% md\n"
+    }
+   }
   },
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "pycharm": {
-     "name": "#%%\n"
-    }
-   },
    "outputs": [],
    "source": [
     "import asyncio\n",
@@ -274,7 +277,13 @@
     "assert ok\n",
     "val = await r.get(\"key\")\n",
     "assert val == b\"value\""
-   ]
+   ],
+   "metadata": {
+    "collapsed": false,
+    "pycharm": {
+     "name": "#%%\n"
+    }
+   }
   }
  ],
  "metadata": {
diff --git a/docs/examples/connection_examples.ipynb b/docs/examples/connection_examples.ipynb
index b0084ff..a15b4c6 100644
--- a/docs/examples/connection_examples.ipynb
+++ b/docs/examples/connection_examples.ipynb
@@ -97,6 +97,192 @@
     "user_connection.ping()"
    ]
   },
+  {
+   "cell_type": "markdown",
+   "source": [
+    "## Connecting to a redis instance with username and password credential provider"
+   ],
+   "metadata": {}
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "outputs": [],
+   "source": [
+    "import redis\n",
+    "\n",
+    "creds_provider = redis.UsernamePasswordCredentialProvider(\"username\", \"password\")\n",
+    "user_connection = redis.Redis(host=\"localhost\", port=6379, credential_provider=creds_provider)\n",
+    "user_connection.ping()"
+   ],
+   "metadata": {}
+  },
+  {
+   "cell_type": "markdown",
+   "source": [
+    "## Connecting to a redis instance with standard credential provider"
+   ],
+   "metadata": {}
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "outputs": [],
+   "source": [
+    "from typing import Tuple\n",
+    "import redis\n",
+    "\n",
+    "creds_map = {\"user_1\": \"pass_1\",\n",
+    "             \"user_2\": \"pass_2\"}\n",
+    "\n",
+    "class UserMapCredentialProvider(redis.CredentialProvider):\n",
+    "    def __init__(self, username: str):\n",
+    "        self.username = username\n",
+    "\n",
+    "    def get_credentials(self) -> Tuple[str, str]:\n",
+    "        return self.username, creds_map.get(self.username)\n",
+    "\n",
+    "# Create a default connection to set the ACL user\n",
+    "default_connection = redis.Redis(host=\"localhost\", port=6379)\n",
+    "default_connection.acl_setuser(\n",
+    "    \"user_1\",\n",
+    "    enabled=True,\n",
+    "    passwords=[\"+\" + \"pass_1\"],\n",
+    "    keys=\"~*\",\n",
+    "    commands=[\"+ping\", \"+command\", \"+info\", \"+select\", \"+flushdb\"],\n",
+    ")\n",
+    "\n",
+    "# Create a UserMapCredentialProvider instance for user_1\n",
+    "creds_provider = UserMapCredentialProvider(\"user_1\")\n",
+    "# Initiate user connection with the credential provider\n",
+    "user_connection = redis.Redis(host=\"localhost\", port=6379,\n",
+    "                              credential_provider=creds_provider)\n",
+    "user_connection.ping()"
+   ],
+   "metadata": {}
+  },
+  {
+   "cell_type": "markdown",
+   "source": [
+    "## Connecting to a redis instance first with an initial credential set and then calling the credential provider"
+   ],
+   "metadata": {}
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "outputs": [],
+   "source": [
+    "from typing import Union\n",
+    "import redis\n",
+    "\n",
+    "class InitCredsSetCredentialProvider(redis.CredentialProvider):\n",
+    "    def __init__(self, username, password):\n",
+    "        self.username = username\n",
+    "        self.password = password\n",
+    "        self.call_supplier = False\n",
+    "\n",
+    "    def call_external_supplier(self) -> Union[Tuple[str], Tuple[str, str]]:\n",
+    "        # Call to an external credential supplier\n",
+    "        raise NotImplementedError\n",
+    "\n",
+    "    def get_credentials(self) -> Union[Tuple[str], Tuple[str, str]]:\n",
+    "        if self.call_supplier:\n",
+    "            return self.call_external_supplier()\n",
+    "        # Use the init set only for the first time\n",
+    "        self.call_supplier = True\n",
+    "        return self.username, self.password\n",
+    "\n",
+    "cred_provider = InitCredsSetCredentialProvider(username=\"init_user\", password=\"init_pass\")"
+   ],
+   "metadata": {}
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "collapsed": false
+   },
+   "source": [
+    "## Connecting to a redis instance with AWS Secrets Manager credential provider."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false,
+    "pycharm": {
+     "name": "#%%\n"
+    }
+   },
+   "outputs": [],
+   "source": [
+    "import redis\n",
+    "import boto3\n",
+    "import json\n",
+    "import cachetools.func\n",
+    "\n",
+    "sm_client = boto3.client('secretsmanager')\n",
+    "  \n",
+    "def sm_auth_provider(self, secret_id, version_id=None, version_stage='AWSCURRENT'):\n",
+    "    @cachetools.func.ttl_cache(maxsize=128, ttl=24 * 60 * 60) #24h\n",
+    "    def get_sm_user_credentials(secret_id, version_id, version_stage):\n",
+    "        secret = sm_client.get_secret_value(secret_id, version_id)\n",
+    "        return json.loads(secret['SecretString'])\n",
+    "    creds = get_sm_user_credentials(secret_id, version_id, version_stage)\n",
+    "    return creds['username'], creds['password']\n",
+    "\n",
+    "secret_id = \"EXAMPLE1-90ab-cdef-fedc-ba987SECRET1\"\n",
+    "creds_provider = redis.CredentialProvider(supplier=sm_auth_provider, secret_id=secret_id)\n",
+    "user_connection = redis.Redis(host=\"localhost\", port=6379, credential_provider=creds_provider)\n",
+    "user_connection.ping()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Connecting to a redis instance with ElastiCache IAM credential provider."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 4,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "True"
+      ]
+     },
+     "execution_count": 4,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "import redis\n",
+    "import boto3\n",
+    "import cachetools.func\n",
+    "\n",
+    "ec_client = boto3.client('elasticache')\n",
+    "\n",
+    "def iam_auth_provider(self, user, endpoint, port=6379, region=\"us-east-1\"):\n",
+    "    @cachetools.func.ttl_cache(maxsize=128, ttl=15 * 60) # 15m\n",
+    "    def get_iam_auth_token(user, endpoint, port, region):\n",
+    "        return ec_client.generate_iam_auth_token(user, endpoint, port, region)\n",
+    "    iam_auth_token = get_iam_auth_token(endpoint, port, user, region)\n",
+    "    return iam_auth_token\n",
+    "\n",
+    "username = \"barshaul\"\n",
+    "endpoint = \"test-001.use1.cache.amazonaws.com\"\n",
+    "creds_provider = redis.CredentialProvider(supplier=iam_auth_provider, user=username,\n",
+    "                                           endpoint=endpoint)\n",
+    "user_connection = redis.Redis(host=endpoint, port=6379, credential_provider=creds_provider)\n",
+    "user_connection.ping()"
+   ]
+  },
   {
    "cell_type": "markdown",
    "metadata": {},
@@ -176,4 +362,4 @@
  },
  "nbformat": 4,
  "nbformat_minor": 2
-}
+}
\ No newline at end of file
diff --git a/docs/examples/opentelemetry/README.md b/docs/examples/opentelemetry/README.md
new file mode 100644
index 0000000..a1d1c04
--- /dev/null
+++ b/docs/examples/opentelemetry/README.md
@@ -0,0 +1,47 @@
+# Example for redis-py OpenTelemetry instrumentation
+
+This example demonstrates how to monitor Redis using [OpenTelemetry](https://opentelemetry.io/) and
+[Uptrace](https://github.com/uptrace/uptrace). It requires Docker to start Redis Server and Uptrace.
+
+See
+[Monitoring redis-py performance with OpenTelemetry](https://redis-py.readthedocs.io/en/latest/opentelemetry.html)
+for details.
+
+**Step 1**. Download the example using Git:
+
+```shell
+git clone https://github.com/redis/redis-py.git
+cd example/opentelemetry
+```
+
+**Step 2**. Optionally, create a virtualenv:
+
+```shell
+python3 -m venv .venv
+source .venv/bin/active
+```
+
+**Step 3**. Install dependencies:
+
+```shell
+pip install -r requirements.txt
+```
+
+**Step 4**. Start the services using Docker and make sure Uptrace is running:
+
+```shell
+docker-compose up -d
+docker-compose logs uptrace
+```
+
+**Step 5**. Run the Redis client example and follow the link from the CLI to view the trace:
+
+```shell
+python3 main.py
+trace: http://localhost:14318/traces/ee029d8782242c8ed38b16d961093b35
+```
+
+![Redis trace](./image/redis-py-trace.png)
+
+You can also open Uptrace UI at [http://localhost:14318](http://localhost:14318) to view available
+spans, logs, and metrics.
diff --git a/docs/examples/opentelemetry/config/alertmanager.yml b/docs/examples/opentelemetry/config/alertmanager.yml
new file mode 100644
index 0000000..ac3e340
--- /dev/null
+++ b/docs/examples/opentelemetry/config/alertmanager.yml
@@ -0,0 +1,53 @@
+# See https://prometheus.io/docs/alerting/latest/configuration/ for details.
+
+global:
+  # The smarthost and SMTP sender used for mail notifications.
+  smtp_smarthost: "mailhog:1025"
+  smtp_from: "alertmanager@example.com"
+  smtp_require_tls: false
+
+receivers:
+  - name: "team-X"
+    email_configs:
+      - to: "some-receiver@example.com"
+        send_resolved: true
+
+# The root route on which each incoming alert enters.
+route:
+  # The labels by which incoming alerts are grouped together. For example,
+  # multiple alerts coming in for cluster=A and alertname=LatencyHigh would
+  # be batched into a single group.
+  group_by: ["alertname", "cluster", "service"]
+
+  # When a new group of alerts is created by an incoming alert, wait at
+  # least 'group_wait' to send the initial notification.
+  # This way ensures that you get multiple alerts for the same group that start
+  # firing shortly after another are batched together on the first
+  # notification.
+  group_wait: 30s
+
+  # When the first notification was sent, wait 'group_interval' to send a batch
+  # of new alerts that started firing for that group.
+  group_interval: 5m
+
+  # If an alert has successfully been sent, wait 'repeat_interval' to
+  # resend them.
+  repeat_interval: 3h
+
+  # A default receiver
+  receiver: team-X
+
+  # All the above attributes are inherited by all child routes and can
+  # overwritten on each.
+
+  # The child route trees.
+  routes:
+    # This route matches error alerts created from spans or logs.
+    - matchers:
+        - alert_kind="error"
+      group_interval: 24h
+      receiver: team-X
+
+# The directory from which notification templates are read.
+templates:
+  - "/etc/alertmanager/template/*.tmpl"
diff --git a/docs/examples/opentelemetry/config/otel-collector.yaml b/docs/examples/opentelemetry/config/otel-collector.yaml
new file mode 100644
index 0000000..b44dd1f
--- /dev/null
+++ b/docs/examples/opentelemetry/config/otel-collector.yaml
@@ -0,0 +1,68 @@
+extensions:
+  health_check:
+  pprof:
+    endpoint: 0.0.0.0:1777
+  zpages:
+    endpoint: 0.0.0.0:55679
+
+receivers:
+  otlp:
+    protocols:
+      grpc:
+      http:
+  hostmetrics:
+    collection_interval: 10s
+    scrapers:
+      cpu:
+      disk:
+      load:
+      filesystem:
+      memory:
+      network:
+      paging:
+  redis:
+    endpoint: "redis-server:6379"
+    collection_interval: 10s
+  jaeger:
+    protocols:
+      grpc:
+
+processors:
+  resourcedetection:
+    detectors: ["system"]
+  batch:
+    send_batch_size: 10000
+    timeout: 10s
+
+exporters:
+  logging:
+    logLevel: debug
+  otlp:
+    endpoint: uptrace:14317
+    tls:
+      insecure: true
+    headers: { "uptrace-dsn": "http://project2_secret_token@localhost:14317/2" }
+
+service:
+  # telemetry:
+  #   logs:
+  #     level: DEBUG
+  pipelines:
+    traces:
+      receivers: [otlp, jaeger]
+      processors: [batch]
+      exporters: [otlp, logging]
+    metrics:
+      receivers: [otlp]
+      processors: [batch]
+      exporters: [otlp]
+    metrics/hostmetrics:
+      receivers: [hostmetrics, redis]
+      processors: [batch, resourcedetection]
+      exporters: [otlp]
+    logs:
+      receivers: [otlp]
+      processors: [batch]
+      exporters: [otlp]
+
+  extensions: [health_check, pprof, zpages]
diff --git a/docs/examples/opentelemetry/config/vector.toml b/docs/examples/opentelemetry/config/vector.toml
new file mode 100644
index 0000000..10db91d
--- /dev/null
+++ b/docs/examples/opentelemetry/config/vector.toml
@@ -0,0 +1,39 @@
+[sources.syslog_logs]
+type = "demo_logs"
+format = "syslog"
+interval = 0.1
+
+[sources.apache_common_logs]
+type = "demo_logs"
+format = "apache_common"
+interval = 0.1
+
+[sources.apache_error_logs]
+type = "demo_logs"
+format = "apache_error"
+interval = 0.1
+
+[sources.json_logs]
+type = "demo_logs"
+format = "json"
+interval = 0.1
+
+# Parse Syslog logs
+# See the Vector Remap Language reference for more info: https://vrl.dev
+[transforms.parse_logs]
+type = "remap"
+inputs = ["syslog_logs"]
+source = '''
+. = parse_syslog!(string!(.message))
+'''
+
+# Export data to Uptrace.
+[sinks.uptrace]
+type = "http"
+inputs = ["parse_logs", "apache_common_logs", "apache_error_logs", "json_logs"]
+encoding.codec = "json"
+framing.method = "newline_delimited"
+compression = "gzip"
+uri = "http://uptrace:14318/api/v1/vector/logs"
+#uri = "https://api.uptrace.dev/api/v1/vector/logs"
+headers.uptrace-dsn = "http://project2_secret_token@localhost:14317/2"
diff --git a/docs/examples/opentelemetry/docker-compose.yml b/docs/examples/opentelemetry/docker-compose.yml
new file mode 100644
index 0000000..ea1d6dc
--- /dev/null
+++ b/docs/examples/opentelemetry/docker-compose.yml
@@ -0,0 +1,81 @@
+version: "3"
+
+services:
+  clickhouse:
+    image: clickhouse/clickhouse-server:22.7
+    restart: on-failure
+    environment:
+      CLICKHOUSE_DB: uptrace
+    healthcheck:
+      test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
+      interval: 1s
+      timeout: 1s
+      retries: 30
+    volumes:
+      - ch_data:/var/lib/clickhouse
+    ports:
+      - "8123:8123"
+      - "9000:9000"
+
+  uptrace:
+    image: "uptrace/uptrace:1.2.0"
+    #image: 'uptrace/uptrace-dev:latest'
+    restart: on-failure
+    volumes:
+      - uptrace_data:/var/lib/uptrace
+      - ./uptrace.yml:/etc/uptrace/uptrace.yml
+    #environment:
+    #  - DEBUG=2
+    ports:
+      - "14317:14317"
+      - "14318:14318"
+    depends_on:
+      clickhouse:
+        condition: service_healthy
+
+  otel-collector:
+    image: otel/opentelemetry-collector-contrib:0.58.0
+    restart: on-failure
+    volumes:
+      - ./config/otel-collector.yaml:/etc/otelcol-contrib/config.yaml
+    ports:
+      - "4317:4317"
+      - "4318:4318"
+
+  vector:
+    image: timberio/vector:0.24.X-alpine
+    volumes:
+      - ./config/vector.toml:/etc/vector/vector.toml:ro
+
+  alertmanager:
+    image: prom/alertmanager:v0.24.0
+    restart: on-failure
+    volumes:
+      - ./config/alertmanager.yml:/etc/alertmanager/config.yml
+      - alertmanager_data:/alertmanager
+    ports:
+      - 9093:9093
+    command:
+      - "--config.file=/etc/alertmanager/config.yml"
+      - "--storage.path=/alertmanager"
+
+  mailhog:
+    image: mailhog/mailhog:v1.0.1
+    restart: on-failure
+    ports:
+      - "8025:8025"
+
+  redis-server:
+    image: redis
+    ports:
+      - "6379:6379"
+  redis-cli:
+    image: redis
+
+volumes:
+  uptrace_data:
+    driver: local
+  ch_data:
+    driver: local
+  alertmanager_data:
+    driver: local
diff --git a/docs/examples/opentelemetry/image/redis-py-trace.png b/docs/examples/opentelemetry/image/redis-py-trace.png
new file mode 100644
index 0000000..e443238
Binary files /dev/null and b/docs/examples/opentelemetry/image/redis-py-trace.png differ
diff --git a/docs/examples/opentelemetry/main.py b/docs/examples/opentelemetry/main.py
new file mode 100755
index 0000000..b140dd0
--- /dev/null
+++ b/docs/examples/opentelemetry/main.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python3
+
+import time
+
+import uptrace
+from opentelemetry import trace
+from opentelemetry.instrumentation.redis import RedisInstrumentor
+
+import redis
+
+tracer = trace.get_tracer("app_or_package_name", "1.0.0")
+
+
+def main():
+    uptrace.configure_opentelemetry(
+        dsn="http://project2_secret_token@localhost:14317/2",
+        service_name="myservice",
+        service_version="1.0.0",
+    )
+    RedisInstrumentor().instrument()
+
+    client = redis.StrictRedis(host="localhost", port=6379)
+
+    span = handle_request(client)
+    print("trace:", uptrace.trace_url(span))
+
+    for i in range(10000):
+        handle_request(client)
+        time.sleep(1)
+
+
+def handle_request(client):
+    with tracer.start_as_current_span(
+        "handle-request", kind=trace.SpanKind.CLIENT
+    ) as span:
+        client.get("my-key")
+        client.set("hello", "world")
+        client.mset(
+            {
+                "employee_name": "Adam Adams",
+                "employee_age": 30,
+                "position": "Software Engineer",
+            }
+        )
+
+        pipe = client.pipeline()
+        pipe.set("foo", 5)
+        pipe.set("bar", 18.5)
+        pipe.set("blee", "hello world!")
+        pipe.execute()
+
+        return span
+
+
+if __name__ == "__main__":
+    main()
diff --git a/docs/examples/opentelemetry/requirements.txt b/docs/examples/opentelemetry/requirements.txt
new file mode 100644
index 0000000..2132801
--- /dev/null
+++ b/docs/examples/opentelemetry/requirements.txt
@@ -0,0 +1,3 @@
+redis==4.3.4
+uptrace==1.14.0
+opentelemetry-instrumentation-redis==0.35b0
diff --git a/docs/examples/opentelemetry/uptrace.yml b/docs/examples/opentelemetry/uptrace.yml
new file mode 100644
index 0000000..4cb39f8
--- /dev/null
+++ b/docs/examples/opentelemetry/uptrace.yml
@@ -0,0 +1,297 @@
+##
+## Uptrace configuration file.
+## See https://uptrace.dev/get/config.html for details.
+##
+## You can use environment variables anywhere in this file, for example:
+##
+##   foo: $FOO
+##   bar: ${BAR}
+##   baz: ${BAZ:default}
+##
+## To escape `$`, use `$$`, for example:
+##
+##   foo: $$FOO_BAR
+##
+
+##
+## ClickHouse database credentials.
+##
+ch:
+  # Connection string for ClickHouse database. For example:
+  # clickhouse://<user>:<password>@<host>:<port>/<database>?sslmode=disable
+  #
+  # See https://clickhouse.uptrace.dev/guide/golang-clickhouse.html#options
+  dsn: "clickhouse://default:@clickhouse:9000/uptrace?sslmode=disable"
+
+##
+## A list of pre-configured projects. Each project is fully isolated.
+##
+projects:
+  # Conventionally, the first project is used to monitor Uptrace itself.
+  - id: 1
+    name: Uptrace
+    # Token grants write access to the project. Keep a secret.
+    token: project1_secret_token
+    pinned_attrs:
+      - service.name
+      - host.name
+      - deployment.environment
+    # Group spans by deployment.environment attribute.
+    group_by_env: false
+    # Group funcs spans by service.name attribute.
+    group_funcs_by_service: false
+
+  # Other projects can be used to monitor your applications.
+  # To monitor micro-services or multiple related services, use a single project.
+  - id: 2
+    name: My project
+    token: project2_secret_token
+    pinned_attrs:
+      - service.name
+      - host.name
+      - deployment.environment
+    # Group spans by deployment.environment attribute.
+    group_by_env: false
+    # Group funcs spans by service.name attribute.
+    group_funcs_by_service: false
+
+##
+## Create metrics from spans and events.
+##
+metrics_from_spans:
+  - name: uptrace.tracing.spans_duration
+    description: Spans duration (excluding events)
+    instrument: histogram
+    unit: microseconds
+    value: span.duration / 1000
+    attrs:
+      - span.system as system
+      - service.name as service
+      - host.name as host
+      - span.status_code as status
+    where: not span.is_event
+
+  - name: uptrace.tracing.spans
+    description: Spans count (excluding events)
+    instrument: counter
+    unit: 1
+    value: span.count
+    attrs:
+      - span.system as system
+      - service.name as service
+      - host.name as host
+      - span.status_code as status
+    where: not span.is_event
+
+  - name: uptrace.tracing.events
+    description: Events count (excluding spans)
+    instrument: counter
+    unit: 1
+    value: span.count
+    attrs:
+      - span.system as system
+      - service.name as service
+      - host.name as host
+    where: span.is_event
+
+##
+## To require authentication, uncomment the following section.
+##
+auth:
+  # users:
+  #   - username: uptrace
+  #     password: uptrace
+  #   - username: admin
+  #     password: admin
+
+  # # Cloudflare user provider: uses Cloudflare Zero Trust Access (Identity)
+  # # See https://developers.cloudflare.com/cloudflare-one/identity/ for more info.
+  # cloudflare:
+  #   # The base URL of the Cloudflare Zero Trust team.
+  #   - team_url: https://myteam.cloudflareaccess.com
+  #     # The Application Audience (AUD) Tag for this application.
+  #     # You can retrieve this from the Cloudflare Zero Trust 'Access' Dashboard.
+  #     audience: bea6df23b944e4a0cd178609ba1bb64dc98dfe1f66ae7b918e563f6cf28b37e0
+
+  # # OpenID Connect (Single Sign-On)
+  # oidc:
+  #   # The ID is used in API endpoints, for example, in redirect URL
+  #   # `http://<uptrace-host>/api/v1/sso/<oidc-id>/callback`.
+  #   - id: keycloak
+  #     # Display name for the button in the login form.
+  #     # Default to 'OpenID Connect'
+  #     display_name: Keycloak
+  #     # The base URL for the OIDC provider.
+  #     issuer_url: http://localhost:8080/realms/uptrace
+  #     # The OAuth 2.0 Client ID
+  #     client_id: uptrace
+  #     # The OAuth 2.0 Client Secret
+  #     client_secret: ogbhd8Q0X0e5AZFGSG3m9oirPvnetqkA
+  #     # Additional OAuth 2.0 scopes to request from the OIDC provider.
+  #     # Defaults to 'profile'. 'openid' is requested by default and need not be specified.
+  #     scopes:
+  #       - profile
+  #     # The OIDC UserInfo claim to use as the user's username.
+  #     # Defaults to 'preferred_username'.
+  #     claim: preferred_username
+
+##
+## Alerting rules for monitoring metrics.
+##
+## See https://uptrace.dev/get/alerting.html for details.
+##
+alerting:
+  rules:
+    - name: Network errors
+      metrics:
+        - system.network.errors as $net_errors
+      query:
+        - $net_errors > 0 group by host.name
+      # for the last 5 minutes
+      for: 5m
+      annotations:
+        summary: "{{ $labels.host_name }} has high number of net errors: {{ $values.net_errors }}"
+
+    - name: Filesystem usage >= 90%
+      metrics:
+        - system.filesystem.usage as $fs_usage
+      query:
+        - group by host.name
+        - group by device
+        - where device !~ "loop"
+        - $fs_usage{state="used"} / $fs_usage >= 0.9
+      for: 5m
+      annotations:
+        summary: "{{ $labels.host_name }} has high FS usage: {{ $values.fs_usage }}"
+
+    - name: Uptrace is dropping spans
+      metrics:
+        - uptrace.projects.spans as $spans
+      query:
+        - $spans{type=dropped} > 0
+      for: 1m
+      annotations:
+        summary: "Uptrace has dropped {{ $values.spans }} spans"
+
+    - name: Always firing (for fun and testing)
+      metrics:
+        - process.runtime.go.goroutines as $goroutines
+      query:
+        - $goroutines >= 0 group by host.name
+      for: 1m
+      annotations:
+        summary: "{{ $labels.host_name }} has high number of goroutines: {{ $values.goroutines }}"
+
+  # Create alerts from error logs and span events.
+  create_alerts_from_spans:
+    enabled: true
+    labels:
+      alert_kind: error
+
+##
+## AlertManager client configuration.
+## See https://uptrace.dev/get/alerting.html for details.
+##
+## Note that this is NOT an AlertManager config and you need to configure AlertManager separately.
+## See https://prometheus.io/docs/alerting/latest/configuration/ for details.
+##
+alertmanager_client:
+  # AlertManager API endpoints that Uptrace uses to manage alerts.
+  urls:
+    - "http://alertmanager:9093/api/v2/alerts"
+
+##
+## Various options to tweak ClickHouse schema.
+## For changes to take effect, you need reset the ClickHouse database with `ch reset`.
+##
+ch_schema:
+  # Compression codec, for example, LZ4, ZSTD(3), or Default.
+  compression: ZSTD(3)
+
+  # Whether to use ReplicatedMergeTree instead of MergeTree.
+  replicated: false
+
+  # Cluster name for Distributed tables and ON CLUSTER clause.
+  #cluster: uptrace1
+
+  spans:
+    storage_policy: "default"
+    # Delete spans data after 30 days.
+    ttl_delete: 30 DAY
+
+  metrics:
+    storage_policy: "default"
+    # Delete metrics data after 90 days.
+    ttl_delete: 90 DAY
+
+##
+## Addresses on which Uptrace receives gRPC and HTTP requests.
+##
+listen:
+  # OTLP/gRPC API.
+  grpc:
+    addr: ":14317"
+    # tls:
+    #   cert_file: config/tls/uptrace.crt
+    #   key_file: config/tls/uptrace.key
+
+  # OTLP/HTTP API and Uptrace API with UI.
+  http:
+    addr: ":14318"
+    # tls:
+    #   cert_file: config/tls/uptrace.crt
+    #   key_file: config/tls/uptrace.key
+
+##
+## Various options for Uptrace UI.
+##
+site:
+  # Overrides public URL for Vue-powered UI in case you put Uptrace behind a proxy.
+  #addr: 'https://uptrace.mydomain.com'
+
+##
+## Spans processing options.
+##
+spans:
+  # The size of the Go chan used to buffer incoming spans.
+  # If the buffer is full, Uptrace starts to drop spans.
+  #buffer_size: 100000
+
+  # The number of spans to insert in a single query.
+  #batch_size: 10000
+
+##
+## Metrics processing options.
+##
+metrics:
+  # List of attributes to drop for being noisy.
+  drop_attrs:
+    - telemetry.sdk.language
+    - telemetry.sdk.name
+    - telemetry.sdk.version
+
+  # The size of the Go chan used to buffer incoming measures.
+  # If the buffer is full, Uptrace starts to drop measures.
+  #buffer_size: 100000
+
+  # The number of measures to insert in a single query.
+  #batch_size: 10000
+
+##
+## SQLite/PostgreSQL db that is used to store metadata such us metric names, dashboards, alerts,
+## and so on.
+##
+db:
+  # Either sqlite or postgres.
+  driver: sqlite
+  # Database connection string.
+  #
+  # Uptrace automatically creates SQLite database file in the current working directory.
+  # Make sure the directory is writable by Uptrace process.
+  dsn: "file:uptrace.sqlite3?_pragma=foreign_keys(1)&_pragma=busy_timeout(1000)"
+
+# Secret key that is used to sign JWT tokens etc.
+secret_key: 102c1a557c314fc28198acd017960843
+
+# Enable to log HTTP requests and database queries.
+debug: false
diff --git a/docs/examples/opentelemetry_api_examples.ipynb b/docs/examples/opentelemetry_api_examples.ipynb
new file mode 100644
index 0000000..28fe758
--- /dev/null
+++ b/docs/examples/opentelemetry_api_examples.ipynb
@@ -0,0 +1,423 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "id": "7b02ea52",
+   "metadata": {},
+   "source": [
+    "# OpenTelemetry Python API"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "56520927",
+   "metadata": {},
+   "source": [
+    "## Install OpenTelemetry"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "id": "c0ed8440",
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Defaulting to user installation because normal site-packages is not writeable\n",
+      "Requirement already satisfied: opentelemetry-api in /home/vmihailenco/.local/lib/python3.10/site-packages (1.14.0)\n",
+      "Requirement already satisfied: opentelemetry-sdk in /home/vmihailenco/.local/lib/python3.10/site-packages (1.14.0)\n",
+      "Requirement already satisfied: setuptools>=16.0 in /usr/lib/python3/dist-packages (from opentelemetry-api) (59.6.0)\n",
+      "Requirement already satisfied: deprecated>=1.2.6 in /home/vmihailenco/.local/lib/python3.10/site-packages (from opentelemetry-api) (1.2.13)\n",
+      "Requirement already satisfied: opentelemetry-semantic-conventions==0.35b0 in /home/vmihailenco/.local/lib/python3.10/site-packages (from opentelemetry-sdk) (0.35b0)\n",
+      "Requirement already satisfied: typing-extensions>=3.7.4 in /home/vmihailenco/.local/lib/python3.10/site-packages (from opentelemetry-sdk) (4.4.0)\n",
+      "Requirement already satisfied: wrapt<2,>=1.10 in /home/vmihailenco/.local/lib/python3.10/site-packages (from deprecated>=1.2.6->opentelemetry-api) (1.14.1)\n",
+      "Note: you may need to restart the kernel to use updated packages.\n"
+     ]
+    }
+   ],
+   "source": [
+    "pip install opentelemetry-api opentelemetry-sdk"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "861fa9cb",
+   "metadata": {},
+   "source": [
+    "### Configure OpenTelemetry with console exporter"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 2,
+   "id": "c061b6cb",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from opentelemetry import trace\n",
+    "from opentelemetry.sdk.trace import TracerProvider\n",
+    "from opentelemetry.sdk.trace.export import BatchSpanProcessor, ConsoleSpanExporter\n",
+    "\n",
+    "trace.set_tracer_provider(TracerProvider())\n",
+    "trace.get_tracer_provider().add_span_processor(\n",
+    "    BatchSpanProcessor(ConsoleSpanExporter())\n",
+    ")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "ae4a626c",
+   "metadata": {},
+   "source": [
+    "### Create a span using the tracer"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "id": "f918501b",
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "{\n",
+      "    \"name\": \"operation-name\",\n",
+      "    \"context\": {\n",
+      "        \"trace_id\": \"0xff14cec5f33afeca0d04ced2c2185b39\",\n",
+      "        \"span_id\": \"0xd06e73b03bd55b4a\",\n",
+      "        \"trace_state\": \"[]\"\n",
+      "    },\n",
+      "    \"kind\": \"SpanKind.INTERNAL\",\n",
+      "    \"parent_id\": null,\n",
+      "    \"start_time\": \"2022-12-07T13:46:11.050878Z\",\n",
+      "    \"end_time\": \"2022-12-07T13:46:12.051944Z\",\n",
+      "    \"status\": {\n",
+      "        \"status_code\": \"UNSET\"\n",
+      "    },\n",
+      "    \"attributes\": {},\n",
+      "    \"events\": [],\n",
+      "    \"links\": [],\n",
+      "    \"resource\": {\n",
+      "        \"attributes\": {\n",
+      "            \"telemetry.sdk.language\": \"python\",\n",
+      "            \"telemetry.sdk.name\": \"opentelemetry\",\n",
+      "            \"telemetry.sdk.version\": \"1.14.0\",\n",
+      "            \"service.name\": \"unknown_service\"\n",
+      "        },\n",
+      "        \"schema_url\": \"\"\n",
+      "    }\n",
+      "}\n"
+     ]
+    }
+   ],
+   "source": [
+    "import time\n",
+    "\n",
+    "tracer = trace.get_tracer(\"app_or_package_name\", \"1.0.0\")\n",
+    "\n",
+    "# measure the timing of the operation\n",
+    "with tracer.start_as_current_span(\"operation-name\") as span:\n",
+    "    time.sleep(1)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "ec4267aa",
+   "metadata": {},
+   "source": [
+    "### Record attributes"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 4,
+   "id": "fa9d265f",
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "{\n",
+      "    \"name\": \"operation-name\",\n",
+      "    \"context\": {\n",
+      "        \"trace_id\": \"0xfc11f0cc7afeefd79134eea639f5c78b\",\n",
+      "        \"span_id\": \"0xee791bf3cab65079\",\n",
+      "        \"trace_state\": \"[]\"\n",
+      "    },\n",
+      "    \"kind\": \"SpanKind.INTERNAL\",\n",
+      "    \"parent_id\": null,\n",
+      "    \"start_time\": \"2022-12-07T13:46:30.886188Z\",\n",
+      "    \"end_time\": \"2022-12-07T13:46:31.887323Z\",\n",
+      "    \"status\": {\n",
+      "        \"status_code\": \"UNSET\"\n",
+      "    },\n",
+      "    \"attributes\": {\n",
+      "        \"enduser.id\": \"jupyter\",\n",
+      "        \"enduser.email\": \"jupyter@redis-py\"\n",
+      "    },\n",
+      "    \"events\": [],\n",
+      "    \"links\": [],\n",
+      "    \"resource\": {\n",
+      "        \"attributes\": {\n",
+      "            \"telemetry.sdk.language\": \"python\",\n",
+      "            \"telemetry.sdk.name\": \"opentelemetry\",\n",
+      "            \"telemetry.sdk.version\": \"1.14.0\",\n",
+      "            \"service.name\": \"unknown_service\"\n",
+      "        },\n",
+      "        \"schema_url\": \"\"\n",
+      "    }\n",
+      "}\n"
+     ]
+    }
+   ],
+   "source": [
+    "with tracer.start_as_current_span(\"operation-name\") as span:\n",
+    "    if span.is_recording():\n",
+    "        span.set_attribute(\"enduser.id\", \"jupyter\")\n",
+    "        span.set_attribute(\"enduser.email\", \"jupyter@redis-py\")\n",
+    "    time.sleep(1)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "e40655de",
+   "metadata": {},
+   "source": [
+    "### Change the span kind"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 5,
+   "id": "af2980ac",
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "{\n",
+      "    \"name\": \"operation-name\",\n",
+      "    \"context\": {\n",
+      "        \"trace_id\": \"0x2b4d1ba36423e6c17067079f044b5b62\",\n",
+      "        \"span_id\": \"0x323d6107cfe594bd\",\n",
+      "        \"trace_state\": \"[]\"\n",
+      "    },\n",
+      "    \"kind\": \"SpanKind.SERVER\",\n",
+      "    \"parent_id\": null,\n",
+      "    \"start_time\": \"2022-12-07T13:53:20.538393Z\",\n",
+      "    \"end_time\": \"2022-12-07T13:53:20.638595Z\",\n",
+      "    \"status\": {\n",
+      "        \"status_code\": \"UNSET\"\n",
+      "    },\n",
+      "    \"attributes\": {},\n",
+      "    \"events\": [],\n",
+      "    \"links\": [],\n",
+      "    \"resource\": {\n",
+      "        \"attributes\": {\n",
+      "            \"telemetry.sdk.language\": \"python\",\n",
+      "            \"telemetry.sdk.name\": \"opentelemetry\",\n",
+      "            \"telemetry.sdk.version\": \"1.14.0\",\n",
+      "            \"service.name\": \"unknown_service\"\n",
+      "        },\n",
+      "        \"schema_url\": \"\"\n",
+      "    }\n",
+      "}\n"
+     ]
+    }
+   ],
+   "source": [
+    "with tracer.start_as_current_span(\"operation-name\", kind=trace.SpanKind.SERVER) as span:\n",
+    "    time.sleep(0.1)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "2a9f1d99",
+   "metadata": {},
+   "source": [
+    "### Exceptions are automatically recorded"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 6,
+   "id": "1b453d66",
+   "metadata": {},
+   "outputs": [
+    {
+     "ename": "ValueError",
+     "evalue": "",
+     "output_type": "error",
+     "traceback": [
+      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
+      "\u001b[0;31mValueError\u001b[0m                                Traceback (most recent call last)",
+      "Cell \u001b[0;32mIn[6], line 3\u001b[0m\n\u001b[1;32m      1\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m tracer\u001b[38;5;241m.\u001b[39mstart_as_current_span(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124moperation-name\u001b[39m\u001b[38;5;124m\"\u001b[39m, kind\u001b[38;5;241m=\u001b[39mtrace\u001b[38;5;241m.\u001b[39mSpanKind\u001b[38;5;241m.\u001b[39mSERVER) \u001b[38;5;28;01mas\u001b[39;00m span:\n\u001b[1;32m      2\u001b[0m     time\u001b[38;5;241m.\u001b[39msleep(\u001b[38;5;241m0.1\u001b[39m)\n\u001b[0;32m----> 3\u001b[0m     \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m\n",
+      "\u001b[0;31mValueError\u001b[0m: "
+     ]
+    },
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "{\n",
+      "    \"name\": \"operation-name\",\n",
+      "    \"context\": {\n",
+      "        \"trace_id\": \"0x20457d98d4456b99810163027c7899de\",\n",
+      "        \"span_id\": \"0xf16e4c1620091c72\",\n",
+      "        \"trace_state\": \"[]\"\n",
+      "    },\n",
+      "    \"kind\": \"SpanKind.SERVER\",\n",
+      "    \"parent_id\": null,\n",
+      "    \"start_time\": \"2022-12-07T13:55:24.108227Z\",\n",
+      "    \"end_time\": \"2022-12-07T13:55:24.208771Z\",\n",
+      "    \"status\": {\n",
+      "        \"status_code\": \"ERROR\",\n",
+      "        \"description\": \"ValueError: \"\n",
+      "    },\n",
+      "    \"attributes\": {},\n",
+      "    \"events\": [\n",
+      "        {\n",
+      "            \"name\": \"exception\",\n",
+      "            \"timestamp\": \"2022-12-07T13:55:24.208730Z\",\n",
+      "            \"attributes\": {\n",
+      "                \"exception.type\": \"ValueError\",\n",
+      "                \"exception.message\": \"\",\n",
+      "                \"exception.stacktrace\": \"Traceback (most recent call last):\\n  File \\\"/home/vmihailenco/.local/lib/python3.10/site-packages/opentelemetry/trace/__init__.py\\\", line 573, in use_span\\n    yield span\\n  File \\\"/home/vmihailenco/.local/lib/python3.10/site-packages/opentelemetry/sdk/trace/__init__.py\\\", line 1033, in start_as_current_span\\n    yield span_context\\n  File \\\"/tmp/ipykernel_241440/2787006841.py\\\", line 3, in <module>\\n    raise ValueError\\nValueError\\n\",\n",
+      "                \"exception.escaped\": \"False\"\n",
+      "            }\n",
+      "        }\n",
+      "    ],\n",
+      "    \"links\": [],\n",
+      "    \"resource\": {\n",
+      "        \"attributes\": {\n",
+      "            \"telemetry.sdk.language\": \"python\",\n",
+      "            \"telemetry.sdk.name\": \"opentelemetry\",\n",
+      "            \"telemetry.sdk.version\": \"1.14.0\",\n",
+      "            \"service.name\": \"unknown_service\"\n",
+      "        },\n",
+      "        \"schema_url\": \"\"\n",
+      "    }\n",
+      "}\n"
+     ]
+    }
+   ],
+   "source": [
+    "with tracer.start_as_current_span(\"operation-name\", kind=trace.SpanKind.SERVER) as span:\n",
+    "    time.sleep(0.1)\n",
+    "    raise ValueError"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "23708329",
+   "metadata": {},
+   "source": [
+    "### Use nested blocks to create child spans"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 7,
+   "id": "9eb261d7",
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "{\n",
+      "    \"name\": \"child-span\",\n",
+      "    \"context\": {\n",
+      "        \"trace_id\": \"0x5625fbd0a1be15b49cda0d2bb236d158\",\n",
+      "        \"span_id\": \"0xc13b2c102566ffaf\",\n",
+      "        \"trace_state\": \"[]\"\n",
+      "    },\n",
+      "    \"kind\": \"SpanKind.INTERNAL\",\n",
+      "    \"parent_id\": \"0xa5f1a9afdf26173c\",\n",
+      "    \"start_time\": \"2022-12-07T13:57:14.011221Z\",\n",
+      "    \"end_time\": \"2022-12-07T13:57:14.011279Z\",\n",
+      "    \"status\": {\n",
+      "        \"status_code\": \"UNSET\"\n",
+      "    },\n",
+      "    \"attributes\": {\n",
+      "        \"foo\": \"bar\"\n",
+      "    },\n",
+      "    \"events\": [],\n",
+      "    \"links\": [],\n",
+      "    \"resource\": {\n",
+      "        \"attributes\": {\n",
+      "            \"telemetry.sdk.language\": \"python\",\n",
+      "            \"telemetry.sdk.name\": \"opentelemetry\",\n",
+      "            \"telemetry.sdk.version\": \"1.14.0\",\n",
+      "            \"service.name\": \"unknown_service\"\n",
+      "        },\n",
+      "        \"schema_url\": \"\"\n",
+      "    }\n",
+      "}\n",
+      "{\n",
+      "    \"name\": \"operation-name\",\n",
+      "    \"context\": {\n",
+      "        \"trace_id\": \"0x5625fbd0a1be15b49cda0d2bb236d158\",\n",
+      "        \"span_id\": \"0xa5f1a9afdf26173c\",\n",
+      "        \"trace_state\": \"[]\"\n",
+      "    },\n",
+      "    \"kind\": \"SpanKind.INTERNAL\",\n",
+      "    \"parent_id\": null,\n",
+      "    \"start_time\": \"2022-12-07T13:57:13.910849Z\",\n",
+      "    \"end_time\": \"2022-12-07T13:57:14.011320Z\",\n",
+      "    \"status\": {\n",
+      "        \"status_code\": \"UNSET\"\n",
+      "    },\n",
+      "    \"attributes\": {},\n",
+      "    \"events\": [],\n",
+      "    \"links\": [],\n",
+      "    \"resource\": {\n",
+      "        \"attributes\": {\n",
+      "            \"telemetry.sdk.language\": \"python\",\n",
+      "            \"telemetry.sdk.name\": \"opentelemetry\",\n",
+      "            \"telemetry.sdk.version\": \"1.14.0\",\n",
+      "            \"service.name\": \"unknown_service\"\n",
+      "        },\n",
+      "        \"schema_url\": \"\"\n",
+      "    }\n",
+      "}\n"
+     ]
+    }
+   ],
+   "source": [
+    "with tracer.start_as_current_span(\"operation-name\") as span:\n",
+    "    time.sleep(0.1)\n",
+    "    with tracer.start_as_current_span(\"child-span\") as span:\n",
+    "        span.set_attribute(\"foo\", \"bar\")"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3 (ipykernel)",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.10.6"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/docs/examples/pipeline_examples.ipynb b/docs/examples/pipeline_examples.ipynb
index 490d221..4e20375 100644
--- a/docs/examples/pipeline_examples.ipynb
+++ b/docs/examples/pipeline_examples.ipynb
@@ -123,7 +123,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "The responses of the three commands are stored in a list. In the above example, the two first boolean indicates that the the `set` commands were successfull and the last element of the list is the result of the `get(\"a\")` comand."
+    "The responses of the three commands are stored in a list. In the above example, the two first boolean indicates that the `set` commands were successfull and the last element of the list is the result of the `get(\"a\")` comand."
    ]
   },
   {
diff --git a/docs/examples/redis-stream-example.ipynb b/docs/examples/redis-stream-example.ipynb
new file mode 100644
index 0000000..9303b52
--- /dev/null
+++ b/docs/examples/redis-stream-example.ipynb
@@ -0,0 +1,754 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Redis Stream Examples"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## basic config"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "redis_host = \"redis\"\n",
+    "stream_key = \"skey\"\n",
+    "stream2_key = \"s2key\"\n",
+    "group1 = \"grp1\"\n",
+    "group2 = \"grp2\""
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## connection"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 2,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "True"
+      ]
+     },
+     "execution_count": 2,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "import redis\n",
+    "from time import time\n",
+    "from redis.exceptions import ConnectionError, DataError, NoScriptError, RedisError, ResponseError\n",
+    "\n",
+    "r = redis.Redis( redis_host )\n",
+    "r.ping()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## xadd and xread"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### add some data to the stream"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "stream length: 10\n"
+     ]
+    }
+   ],
+   "source": [
+    "for i in range(0,10):\n",
+    "    r.xadd( stream_key, { 'ts': time(), 'v': i } )\n",
+    "print( f\"stream length: {r.xlen( stream_key )}\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### read some data from the stream"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 4,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "[[b'skey', [(b'1657571033115-0', {b'ts': b'1657571033.1128936', b'v': b'0'}), (b'1657571033117-0', {b'ts': b'1657571033.1176307', b'v': b'1'})]]]\n"
+     ]
+    }
+   ],
+   "source": [
+    "## read 2 entries from stream_key\n",
+    "l = r.xread( count=2, streams={stream_key:0} )\n",
+    "print(l)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### extract data from the returned structure"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 5,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "got data from stream: b'skey'\n",
+      "id: b'1657571033115-0' value: b'0'\n",
+      "id: b'1657571033117-0' value: b'1'\n"
+     ]
+    }
+   ],
+   "source": [
+    "first_stream = l[0]\n",
+    "print( f\"got data from stream: {first_stream[0]}\")\n",
+    "fs_data = first_stream[1]\n",
+    "for id, value in fs_data:\n",
+    "    print( f\"id: {id} value: {value[b'v']}\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### read more data from the stream\n",
+    "if we call the `xread` with the same arguments we will get the same data"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 6,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "id: b'1657571033115-0' value: b'0'\n",
+      "id: b'1657571033117-0' value: b'1'\n"
+     ]
+    }
+   ],
+   "source": [
+    "l = r.xread( count=2, streams={stream_key:0} )\n",
+    "for id, value in l[0][1]:\n",
+    "    print( f\"id: {id} value: {value[b'v']}\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "to get new data we need to change the key passed to the call"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 7,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "id: b'1657571033118-0' value: b'2'\n",
+      "id: b'1657571033119-0' value: b'3'\n"
+     ]
+    }
+   ],
+   "source": [
+    "last_id_returned = l[0][1][-1][0]\n",
+    "l = r.xread( count=2, streams={stream_key: last_id_returned} )\n",
+    "for id, value in l[0][1]:\n",
+    "    print( f\"id: {id} value: {value[b'v']}\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 8,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "id: b'1657571033119-1' value: b'4'\n",
+      "id: b'1657571033121-0' value: b'5'\n"
+     ]
+    }
+   ],
+   "source": [
+    "last_id_returned = l[0][1][-1][0]\n",
+    "l = r.xread( count=2, streams={stream_key: last_id_returned} )\n",
+    "for id, value in l[0][1]:\n",
+    "    print( f\"id: {id} value: {value[b'v']}\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "to get only newer entries"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 9,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "stream length: 10\n",
+      "after 5s block, got an empty list [], no *new* messages on the stream\n",
+      "stream length: 10\n"
+     ]
+    }
+   ],
+   "source": [
+    "print( f\"stream length: {r.xlen( stream_key )}\")\n",
+    "# wait for 5s for new messages\n",
+    "l = r.xread( count=1, block=5000, streams={stream_key: '$'} )\n",
+    "print( f\"after 5s block, got an empty list {l}, no *new* messages on the stream\")\n",
+    "print( f\"stream length: {r.xlen( stream_key )}\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### 2nd stream\n",
+    "Add some messages to a 2nd stream"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 10,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "stream length: 10\n"
+     ]
+    }
+   ],
+   "source": [
+    "for i in range(1000,1010):\n",
+    "    r.xadd( stream2_key, { 'v': i } )\n",
+    "print( f\"stream length: {r.xlen( stream2_key )}\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "get messages from the 2 streams"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 11,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "got from b'skey' the entry [(b'1657571033115-0', {b'ts': b'1657571033.1128936', b'v': b'0'})]\n",
+      "got from b's2key' the entry [(b'1657571042111-0', {b'v': b'1000'})]\n"
+     ]
+    }
+   ],
+   "source": [
+    "l = r.xread( count=1, streams={stream_key:0,stream2_key:0} )\n",
+    "for k,d in l:\n",
+    "    print(f\"got from {k} the entry {d}\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# stream groups\n",
+    "With the groups is possible track, for many consumers, and at the Redis side, which message have been already consumed.\n",
+    "## add some data to streams\n",
+    "Creating 2 streams with 10 messages each."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 12,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "stream 'skey' length: 20\n",
+      "stream 's2key' length: 20\n"
+     ]
+    }
+   ],
+   "source": [
+    "def add_some_data_to_stream( sname, key_range ):\n",
+    "    for i in key_range:\n",
+    "        r.xadd( sname, { 'ts': time(), 'v': i } )\n",
+    "    print( f\"stream '{sname}' length: {r.xlen( stream_key )}\")\n",
+    "\n",
+    "add_some_data_to_stream( stream_key, range(0,10) )\n",
+    "add_some_data_to_stream( stream2_key, range(1000,1010) )"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## use a group to read from the stream\n",
+    "* create a group `grp1` with the stream `skey`, and\n",
+    "* create a group `grp2` with the streams `skey` and `s2key`\n",
+    "\n",
+    "Use the `xinfo_group` to verify the result of the group creation."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 13,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "skey -> group name: b'grp1' with 0 consumers and b'0-0' as last read id\n",
+      "skey -> group name: b'grp2' with 0 consumers and b'0-0' as last read id\n",
+      "s2key -> group name: b'grp2' with 0 consumers and b'0-0' as last read id\n"
+     ]
+    }
+   ],
+   "source": [
+    "## create the group\n",
+    "def create_group( skey, gname ):\n",
+    "    try:\n",
+    "        r.xgroup_create( name=skey, groupname=gname, id=0 )\n",
+    "    except ResponseError as e:\n",
+    "        print(f\"raised: {e}\")\n",
+    "\n",
+    "# group1 read the stream 'skey'\n",
+    "create_group( stream_key, group1 )\n",
+    "# group2 read the streams 'skey' and 's2key'\n",
+    "create_group( stream_key, group2 )\n",
+    "create_group( stream2_key, group2 )\n",
+    "\n",
+    "def group_info( skey ):\n",
+    "    res = r.xinfo_groups( name=skey )\n",
+    "    for i in res:\n",
+    "        print( f\"{skey} -> group name: {i['name']} with {i['consumers']} consumers and {i['last-delivered-id']}\"\n",
+    "              + f\" as last read id\")\n",
+    "    \n",
+    "group_info( stream_key )\n",
+    "group_info( stream2_key )"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## group read\n",
+    "The `xreadgroup` method permit to read from a stream group."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 14,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def print_xreadgroup_reply( reply, group = None, run = None):\n",
+    "    for d_stream in reply:\n",
+    "        for element in d_stream[1]:\n",
+    "            print(  f\"got element {element[0]}\"\n",
+    "                  + f\"from stream {d_stream[0]}\" )\n",
+    "            if run is not None:\n",
+    "                run( d_stream[0], group, element[0] )"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 15,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "got element b'1657571033115-0'from stream b'skey'\n",
+      "got element b'1657571033117-0'from stream b'skey'\n"
+     ]
+    }
+   ],
+   "source": [
+    "# read some messages on group1 with consumer 'c' \n",
+    "d = r.xreadgroup( groupname=group1, consumername='c', block=10,\n",
+    "                  count=2, streams={stream_key:'>'})\n",
+    "print_xreadgroup_reply( d )"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "A **2nd consumer** for the same stream group will get not delivered messages."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 16,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "got element b'1657571033118-0'from stream b'skey'\n",
+      "got element b'1657571033119-0'from stream b'skey'\n"
+     ]
+    }
+   ],
+   "source": [
+    "# read some messages on group1 with consumer 'c' \n",
+    "d = r.xreadgroup( groupname=group1, consumername='c2', block=10,\n",
+    "                  count=2, streams={stream_key:'>'})\n",
+    "print_xreadgroup_reply( d )"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "But a **2nd stream group** can read the already delivered messages again.\n",
+    "\n",
+    "Note that the 2nd stream group include also the 2nd stream.\n",
+    "That can be identified in the reply (1st element of the reply list)."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 18,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "got element b'1657571033115-0'from stream b'skey'\n",
+      "got element b'1657571033117-0'from stream b'skey'\n",
+      "got element b'1657571042111-0'from stream b's2key'\n",
+      "got element b'1657571042113-0'from stream b's2key'\n"
+     ]
+    }
+   ],
+   "source": [
+    "d2 = r.xreadgroup( groupname=group2, consumername='c', block=10,\n",
+    "                   count=2, streams={stream_key:'>',stream2_key:'>'})\n",
+    "print_xreadgroup_reply( d2 )"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "To check for pending messages (delivered messages without acknowledgment) we can use the `xpending`."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 19,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "4 pending messages on 'skey' for group 'grp1'\n",
+      "2 pending messages on 'skey' for group 'grp2'\n",
+      "2 pending messages on 's2key' for group 'grp2'\n"
+     ]
+    }
+   ],
+   "source": [
+    "# check pending status (read messages without a ack)\n",
+    "def print_pending_info( key_group ):\n",
+    "    for s,k in key_group:\n",
+    "        pr = r.xpending( name=s, groupname=k )\n",
+    "        print( f\"{pr.get('pending')} pending messages on '{s}' for group '{k}'\" )\n",
+    "    \n",
+    "print_pending_info( ((stream_key,group1),(stream_key,group2),(stream2_key,group2)) )"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## ack\n",
+    "Acknowledge some messages with `xack`."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 20,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "got element b'1657571033118-0'from stream b'skey'\n",
+      "got element b'1657571033119-0'from stream b'skey'\n"
+     ]
+    }
+   ],
+   "source": [
+    "# do acknowledges for group1\n",
+    "toack = lambda k,g,e: r.xack( k,g, e )\n",
+    "print_xreadgroup_reply( d, group=group1, run=toack )"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 21,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "2 pending messages on 'skey' for group 'grp1'\n",
+      "2 pending messages on 'skey' for group 'grp2'\n",
+      "2 pending messages on 's2key' for group 'grp2'\n"
+     ]
+    }
+   ],
+   "source": [
+    "# check pending again\n",
+    "print_pending_info( ((stream_key,group1),(stream_key,group2),(stream2_key,group2)) )"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "ack all messages on the `group1`."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 22,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "got element b'1657571033119-1'from stream b'skey'\n",
+      "got element b'1657571033121-0'from stream b'skey'\n",
+      "got element b'1657571033121-1'from stream b'skey'\n",
+      "got element b'1657571033121-2'from stream b'skey'\n",
+      "got element b'1657571033122-0'from stream b'skey'\n",
+      "got element b'1657571033122-1'from stream b'skey'\n",
+      "got element b'1657571049557-0'from stream b'skey'\n",
+      "got element b'1657571049557-1'from stream b'skey'\n",
+      "got element b'1657571049558-0'from stream b'skey'\n",
+      "got element b'1657571049559-0'from stream b'skey'\n",
+      "got element b'1657571049559-1'from stream b'skey'\n",
+      "got element b'1657571049559-2'from stream b'skey'\n",
+      "got element b'1657571049560-0'from stream b'skey'\n",
+      "got element b'1657571049562-0'from stream b'skey'\n",
+      "got element b'1657571049563-0'from stream b'skey'\n",
+      "got element b'1657571049563-1'from stream b'skey'\n",
+      "2 pending messages on 'skey' for group 'grp1'\n"
+     ]
+    }
+   ],
+   "source": [
+    "d = r.xreadgroup( groupname=group1, consumername='c', block=10,\n",
+    "                      count=100, streams={stream_key:'>'})\n",
+    "print_xreadgroup_reply( d, group=group1, run=toack)\n",
+    "print_pending_info( ((stream_key,group1),) )"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "But stream length will be the same after the `xack` of all messages on the `group1`."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 24,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "20"
+      ]
+     },
+     "execution_count": 24,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "r.xlen(stream_key)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## delete all\n",
+    "To remove the messages with need to remote them explicitly with `xdel`."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 25,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "s1 = r.xread( streams={stream_key:0} )\n",
+    "for streams in s1:\n",
+    "    stream_name, messages = streams\n",
+    "    # del all ids from the message list\n",
+    "    [ r.xdel( stream_name, i[0] ) for i in messages ]"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "stream length"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 26,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "0"
+      ]
+     },
+     "execution_count": 26,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "r.xlen(stream_key)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "But with the `xdel` the 2nd group can read any not processed message from the `skey`."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 27,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "got element b'1657571042113-1'from stream b's2key'\n",
+      "got element b'1657571042114-0'from stream b's2key'\n"
+     ]
+    }
+   ],
+   "source": [
+    "d2 = r.xreadgroup( groupname=group2, consumername='c', block=10,\n",
+    "                   count=2, streams={stream_key:'>',stream2_key:'>'})\n",
+    "print_xreadgroup_reply( d2 )"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.8.10"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 4
+}
diff --git a/docs/examples/search_vector_similarity_examples.ipynb b/docs/examples/search_vector_similarity_examples.ipynb
index 42c03f0..2b02610 100644
--- a/docs/examples/search_vector_similarity_examples.ipynb
+++ b/docs/examples/search_vector_similarity_examples.ipynb
@@ -74,7 +74,7 @@
     "r.hset(\"b\", \"v\", \"aaaabaaa\")\n",
     "r.hset(\"c\", \"v\", \"aaaaabaa\")\n",
     "\n",
-    "q = Query(\"*=>[KNN 2 @v $vec]\").return_field(\"__v_score\")\n",
+    "q = Query(\"*=>[KNN 2 @v $vec]\").return_field(\"__v_score\").dialect(2)\n",
     "r.ft().search(q, query_params={\"vec\": \"aaaaaaaa\"})"
    ]
   }
diff --git a/docs/examples/timeseries_examples.ipynb b/docs/examples/timeseries_examples.ipynb
new file mode 100644
index 0000000..691e133
--- /dev/null
+++ b/docs/examples/timeseries_examples.ipynb
@@ -0,0 +1,672 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Timeseries\n",
+    "\n",
+    "`redis-py` supports [RedisTimeSeries](https://github.com/RedisTimeSeries/RedisTimeSeries/) which is a time-series-database module for Redis.\n",
+    "\n",
+    "This example shows how to handle timeseries data with `redis-py`."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Health check"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "True"
+      ]
+     },
+     "execution_count": 1,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "import redis \n",
+    "\n",
+    "r = redis.Redis(decode_responses=True)\n",
+    "ts = r.ts()\n",
+    "\n",
+    "r.ping()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Simple example"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Create a timeseries"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 2,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "True"
+      ]
+     },
+     "execution_count": 2,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "ts.create(\"ts_key\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Add samples to the timeseries\n",
+    "\n",
+    "We can either set the timestamp with an UNIX timestamp in milliseconds or use * to set the timestamp based en server's clock."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "1657272304448"
+      ]
+     },
+     "execution_count": 3,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "ts.add(\"ts_key\", 1657265437756, 1)\n",
+    "ts.add(\"ts_key\", \"1657265437757\", 2)\n",
+    "ts.add(\"ts_key\", \"*\", 3)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Get the last sample"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 4,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "(1657272304448, 3.0)"
+      ]
+     },
+     "execution_count": 4,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "ts.get(\"ts_key\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Get samples between two timestamps\n",
+    "\n",
+    "The minimum and maximum possible timestamps can be expressed with respectfully - and +."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 5,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "[(1657265437756, 1.0), (1657265437757, 2.0), (1657272304448, 3.0)]"
+      ]
+     },
+     "execution_count": 5,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "ts.range(\"ts_key\", \"-\", \"+\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 6,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "[(1657265437756, 1.0), (1657265437757, 2.0)]"
+      ]
+     },
+     "execution_count": 6,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "ts.range(\"ts_key\", 1657265437756, 1657265437757)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Delete samples between two timestamps"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 7,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Before deletion:  [(1657265437756, 1.0), (1657265437757, 2.0), (1657272304448, 3.0)]\n",
+      "After deletion:   [(1657272304448, 3.0)]\n"
+     ]
+    }
+   ],
+   "source": [
+    "print(\"Before deletion: \", ts.range(\"ts_key\", \"-\", \"+\"))\n",
+    "ts.delete(\"ts_key\", 1657265437756, 1657265437757)\n",
+    "print(\"After deletion:  \", ts.range(\"ts_key\", \"-\", \"+\"))"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Multiple timeseries with labels"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 8,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "True"
+      ]
+     },
+     "execution_count": 8,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "ts.create(\"ts_key1\")\n",
+    "ts.create(\"ts_key2\", labels={\"label1\": 1, \"label2\": 2})"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Add samples to multiple timeseries"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 9,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "[1657272306147, 1657272306147]"
+      ]
+     },
+     "execution_count": 9,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "ts.madd([(\"ts_key1\", \"*\", 1), (\"ts_key2\", \"*\", 2)])"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Add samples with labels"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 10,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "1657272306457"
+      ]
+     },
+     "execution_count": 10,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "ts.add(\"ts_key2\", \"*\", 2,  labels={\"label1\": 1, \"label2\": 2})\n",
+    "ts.add(\"ts_key2\", \"*\", 2,  labels={\"label1\": 3, \"label2\": 4})"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Get the last sample matching specific label\n",
+    "\n",
+    "Get the last sample that matches \"label1=1\", see [Redis documentation](https://redis.io/commands/ts.mget/) to see the posible filter values."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 11,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "[{'ts_key2': [{}, 1657272306457, 2.0]}]"
+      ]
+     },
+     "execution_count": 11,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "ts.mget([\"label1=1\"])"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Get also the label-value pairs of the sample:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 12,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "[{'ts_key2': [{'label1': '1', 'label2': '2'}, 1657272306457, 2.0]}]"
+      ]
+     },
+     "execution_count": 12,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "ts.mget([\"label1=1\"], with_labels=True)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Retention period\n",
+    "\n",
+    "You can specify a retention period when creating timeseries objects or when adding a sample timeseries object. Once the retention period has elapsed, the sample is removed from the timeseries."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 13,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "True"
+      ]
+     },
+     "execution_count": 13,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "retention_time = 1000\n",
+    "ts.create(\"ts_key_ret\", retention_msecs=retention_time)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 14,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Base timeseries:                      [(1657272307670, 1.0)]\n",
+      "Timeseries after 1000 milliseconds:   [(1657272307670, 1.0)]\n"
+     ]
+    }
+   ],
+   "source": [
+    "import time\n",
+    "# this will be deleted in 1000 milliseconds\n",
+    "ts.add(\"ts_key_ret\", \"*\", 1, retention_msecs=retention_time)\n",
+    "print(\"Base timeseries:                     \", ts.range(\"ts_key_ret\", \"-\", \"+\"))\n",
+    "# sleeping for 1000 milliseconds (1 second)\n",
+    "time.sleep(1)\n",
+    "print(\"Timeseries after 1000 milliseconds:  \", ts.range(\"ts_key_ret\", \"-\", \"+\"))"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "The two lists are the same, this is because the oldest values are deleted when a new sample is added."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 15,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "1657272308849"
+      ]
+     },
+     "execution_count": 15,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "ts.add(\"ts_key_ret\", \"*\", 10)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 16,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "[(1657272308849, 10.0)]"
+      ]
+     },
+     "execution_count": 16,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "ts.range(\"ts_key_ret\", \"-\", \"+\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Here the first sample has been deleted."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Specify duplicate policies\n",
+    "\n",
+    "By default, the policy for duplicates timestamp keys is set to \"BLOCK\", we cannot create two samples with the same timestamp:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 17,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "TSDB: Error at upsert, update is not supported when DUPLICATE_POLICY is set to BLOCK mode\n"
+     ]
+    }
+   ],
+   "source": [
+    "ts.add(\"ts_key\", 123456789, 1)\n",
+    "try:\n",
+    "    ts.add(\"ts_key\", 123456789, 2)\n",
+    "except Exception as err:\n",
+    "    print(err)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "You can change this default behaviour using `duplicate_policy` parameter, for instance:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 18,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "[(123456789, 2.0), (1657272304448, 3.0)]"
+      ]
+     },
+     "execution_count": 18,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "# using policy \"LAST\", we keep the last added sample\n",
+    "ts.add(\"ts_key\", 123456789, 2, duplicate_policy=\"LAST\")\n",
+    "ts.range(\"ts_key\", \"-\", \"+\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "For more informations about duplicate policies, see [Redis documentation](https://redis.io/commands/ts.add/)."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Using Redis TSDB to keep track of a value"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 19,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "1657272310241"
+      ]
+     },
+     "execution_count": 19,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "ts.add(\"ts_key_incr\", \"*\", 0)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Increment the value:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 20,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "for _ in range(10):\n",
+    "    ts.incrby(\"ts_key_incr\", 1)\n",
+    "    # sleeping a bit so the timestamp are not duplicates\n",
+    "    time.sleep(0.01)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 21,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "[(1657272310241, 0.0),\n",
+       " (1657272310533, 1.0),\n",
+       " (1657272310545, 2.0),\n",
+       " (1657272310556, 3.0),\n",
+       " (1657272310567, 4.0),\n",
+       " (1657272310578, 5.0),\n",
+       " (1657272310589, 6.0),\n",
+       " (1657272310600, 7.0),\n",
+       " (1657272310611, 8.0),\n",
+       " (1657272310622, 9.0),\n",
+       " (1657272310632, 10.0)]"
+      ]
+     },
+     "execution_count": 21,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "ts.range(\"ts_key_incr\", \"-\", \"+\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "source": [
+    "## How to execute multi-key commands on Open Source Redis Cluster"
+   ],
+   "metadata": {
+    "collapsed": false
+   }
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 4,
+   "outputs": [
+    {
+     "data": {
+      "text/plain": "[{'ts_key1': [{}, 1670927124746, 2.0]}, {'ts_key2': [{}, 1670927124748, 10.0]}]"
+     },
+     "execution_count": 4,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "import redis\n",
+    "\n",
+    "r = redis.RedisCluster(host=\"localhost\", port=46379)\n",
+    "\n",
+    "# This command should be executed on all cluster nodes after creation and any re-sharding\n",
+    "# Please note that this command is internal and will be deprecated in the future\n",
+    "r.execute_command(\"timeseries.REFRESHCLUSTER\", target_nodes=\"primaries\")\n",
+    "\n",
+    "# Now multi-key commands can be executed\n",
+    "ts = r.ts()\n",
+    "ts.add(\"ts_key1\", \"*\", 2,  labels={\"label1\": 1, \"label2\": 2})\n",
+    "ts.add(\"ts_key2\", \"*\", 10,  labels={\"label1\": 1, \"label2\": 2})\n",
+    "ts.mget([\"label1=1\"])"
+   ],
+   "metadata": {
+    "collapsed": false
+   }
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3.9.2 64-bit",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.9.2"
+  },
+  "orig_nbformat": 4,
+  "vscode": {
+   "interpreter": {
+    "hash": "916dbcbb3f70747c44a77c7bcd40155683ae19c65e1c03b4aa3499c5328201f1"
+   }
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/docs/exceptions.rst b/docs/exceptions.rst
index b8aeb33..8a9fe45 100644
--- a/docs/exceptions.rst
+++ b/docs/exceptions.rst
@@ -1,4 +1,4 @@
-
+.. _exceptions-label:
 
 Exceptions
 ##########
diff --git a/docs/images/opentelemetry/distributed-tracing.png b/docs/images/opentelemetry/distributed-tracing.png
new file mode 100644
index 0000000..a011697
Binary files /dev/null and b/docs/images/opentelemetry/distributed-tracing.png differ
diff --git a/docs/images/opentelemetry/redis-metrics.png b/docs/images/opentelemetry/redis-metrics.png
new file mode 100644
index 0000000..7c2beb4
Binary files /dev/null and b/docs/images/opentelemetry/redis-metrics.png differ
diff --git a/docs/images/opentelemetry/redis-py-trace.png b/docs/images/opentelemetry/redis-py-trace.png
new file mode 100644
index 0000000..e443238
Binary files /dev/null and b/docs/images/opentelemetry/redis-py-trace.png differ
diff --git a/docs/images/opentelemetry/tree-of-spans.png b/docs/images/opentelemetry/tree-of-spans.png
new file mode 100644
index 0000000..399c8a0
Binary files /dev/null and b/docs/images/opentelemetry/tree-of-spans.png differ
diff --git a/docs/index.rst b/docs/index.rst
index 630bad4..a6ee05e 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -3,7 +3,7 @@
    You can adapt this file completely to your liking, but it should at least
    contain the root `toctree` directive.
 
-Welcome to redis-py's documentation!
+redis-py - Python Client for Redis
 ====================================
 
 Getting Started
@@ -69,6 +69,10 @@ Module Documentation
    exceptions
    lock
    retry
+   advanced_features
+   clustering
+   lua_scripting
+   opentelemetry
    examples
 
 Contributing
diff --git a/docs/lua_scripting.rst b/docs/lua_scripting.rst
new file mode 100644
index 0000000..8276dad
--- /dev/null
+++ b/docs/lua_scripting.rst
@@ -0,0 +1,110 @@
+Lua Scripting
+===
+
+`Lua Scripting <#lua-scripting-in-default-connections>`__ \|
+`Pipelines <#pipelines>`__ \| `Cluster mode <#cluster-mode>`__
+
+--------------
+
+Lua Scripting in default connections
+------------------------------------
+
+redis-py supports the EVAL, EVALSHA, and SCRIPT commands. However, there
+are a number of edge cases that make these commands tedious to use in
+real world scenarios. Therefore, redis-py exposes a Script object that
+makes scripting much easier to use. (RedisClusters have limited support
+for scripting.)
+
+To create a Script instance, use the register_script function on a
+client instance passing the Lua code as the first argument.
+register_script returns a Script instance that you can use throughout
+your code.
+
+The following trivial Lua script accepts two parameters: the name of a
+key and a multiplier value. The script fetches the value stored in the
+key, multiplies it with the multiplier value and returns the result.
+
+.. code:: pycon
+
+   >>> r = redis.Redis()
+   >>> lua = """
+   ... local value = redis.call('GET', KEYS[1])
+   ... value = tonumber(value)
+   ... return value * ARGV[1]"""
+   >>> multiply = r.register_script(lua)
+
+multiply is now a Script instance that is invoked by calling it like a
+function. Script instances accept the following optional arguments:
+
+-  **keys**: A list of key names that the script will access. This
+   becomes the KEYS list in Lua.
+-  **args**: A list of argument values. This becomes the ARGV list in
+   Lua.
+-  **client**: A redis-py Client or Pipeline instance that will invoke
+   the script. If client isn't specified, the client that initially
+   created the Script instance (the one that register_script was invoked
+   from) will be used.
+
+Continuing the example from above:
+
+.. code:: pycon
+
+   >>> r.set('foo', 2)
+   >>> multiply(keys=['foo'], args=[5])
+   10
+
+The value of key 'foo' is set to 2. When multiply is invoked, the 'foo'
+key is passed to the script along with the multiplier value of 5. Lua
+executes the script and returns the result, 10.
+
+Script instances can be executed using a different client instance, even
+one that points to a completely different Redis server.
+
+.. code:: pycon
+
+   >>> r2 = redis.Redis('redis2.example.com')
+   >>> r2.set('foo', 3)
+   >>> multiply(keys=['foo'], args=[5], client=r2)
+   15
+
+The Script object ensures that the Lua script is loaded into Redis's
+script cache. In the event of a NOSCRIPT error, it will load the script
+and retry executing it.
+
+Pipelines
+---------
+
+Script objects can also be used in pipelines. The pipeline instance
+should be passed as the client argument when calling the script. Care is
+taken to ensure that the script is registered in Redis's script cache
+just prior to pipeline execution.
+
+.. code:: pycon
+
+   >>> pipe = r.pipeline()
+   >>> pipe.set('foo', 5)
+   >>> multiply(keys=['foo'], args=[5], client=pipe)
+   >>> pipe.execute()
+   [True, 25]
+
+Cluster Mode
+------------
+
+Cluster mode has limited support for lua scripting.
+
+The following commands are supported, with caveats: - ``EVAL`` and
+``EVALSHA``: The command is sent to the relevant node, depending on the
+keys (i.e., in ``EVAL "<script>" num_keys key_1 ... key_n ...``). The
+keys *must* all be on the same node. If the script requires 0 keys, *the
+command is sent to a random (primary) node*. - ``SCRIPT EXISTS``: The
+command is sent to all primaries. The result is a list of booleans
+corresponding to the input SHA hashes. Each boolean is an AND of “does
+the script exist on each node?”. In other words, each boolean is True
+iff the script exists on all nodes. - ``SCRIPT FLUSH``: The command is
+sent to all primaries. The result is a bool AND over all nodes’
+responses. - ``SCRIPT LOAD``: The command is sent to all primaries. The
+result is the SHA1 digest.
+
+The following commands are not supported: - ``EVAL_RO`` - ``EVALSHA_RO``
+
+Using scripting within pipelines in cluster mode is **not supported**.
diff --git a/docs/opentelemetry.rst b/docs/opentelemetry.rst
new file mode 100644
index 0000000..9678102
--- /dev/null
+++ b/docs/opentelemetry.rst
@@ -0,0 +1,177 @@
+Integrating OpenTelemetry
+=========================
+
+What is OpenTelemetry?
+----------------------
+
+`OpenTelemetry <https://opentelemetry.io>`_ is an open-source observability framework for traces, metrics, and logs.
+
+OpenTelemetry allows developers to collect and export telemetry data in a vendor agnostic way. With OpenTelemetry, you can instrument your application once and then add or change vendors without changing the instrumentation, for example, here is a list of `popular DataDog competitors <https://uptrace.dev/get/compare/datadog-competitors.html>`_ that support OpenTelemetry.
+
+What is tracing?
+----------------
+
+`OpenTelemetry tracing <https://uptrace.dev/opentelemetry/distributed-tracing.html>`_ allows you to see how a request progresses through different services and systems, timings of each operation, any logs and errors as they occur.
+
+In a distributed environment, tracing also helps you understand relationships and interactions between microservices. Distributed tracing gives an insight into how a particular microservice is performing and how that service affects other microservices.
+
+.. image:: images/opentelemetry/distributed-tracing.png
+  :alt: Trace
+
+Using tracing, you can break down requests into spans. **Span** is an operation (unit of work) your app performs handling a request, for example, a database query or a network call.
+
+**Trace** is a tree of spans that shows the path that a request makes through an app. Root span is the first span in a trace.
+
+.. image:: images/opentelemetry/tree-of-spans.png
+  :alt: Trace
+
+To learn more about tracing, see `Distributed Tracing using OpenTelemetry <https://uptrace.dev/opentelemetry/distributed-tracing.html>`_.
+
+OpenTelemetry instrumentation
+-----------------------------
+
+Instrumentations are plugins for popular frameworks and libraries that use OpenTelemetry API to record important operations, for example, HTTP requests, DB queries, logs, errors, and more.
+
+To install OpenTelemetry `instrumentation <https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/redis/redis.html>`_ for redis-py:
+
+.. code-block:: shell
+
+   pip install opentelemetry-instrumentation-redis
+
+You can then use it to instrument code like this:
+
+.. code-block:: python
+
+   from opentelemetry.instrumentation.redis import RedisInstrumentor
+
+   RedisInstrumentor().instrument()
+
+Once the code is patched, you can use redis-py as usually:
+
+.. code-block:: python
+
+   # Sync client
+   client = redis.Redis()
+   client.get("my-key")
+
+   # Async client
+   client = redis.asyncio.Redis()
+   await client.get("my-key")
+
+OpenTelemetry API
+-----------------
+
+`OpenTelemetry <https://uptrace.dev/opentelemetry/>`_ API is a programming interface that you can use to instrument code and collect telemetry data such as traces, metrics, and logs.
+
+You can use OpenTelemetry API to measure important operations:
+
+.. code-block:: python
+
+   from opentelemetry import trace
+
+   tracer = trace.get_tracer("app_or_package_name", "1.0.0")
+
+   # Create a span with name "operation-name" and kind="server".
+   with tracer.start_as_current_span("operation-name", kind=trace.SpanKind.CLIENT) as span:
+       do_some_work()
+
+Record contextual information using attributes:
+
+.. code-block:: python
+
+   if span.is_recording():
+       span.set_attribute("http.method", "GET")
+       span.set_attribute("http.route", "/projects/:id")
+
+And monitor exceptions:
+
+.. code-block:: python
+
+   except ValueError as exc:
+       # Record the exception and update the span status.
+       span.record_exception(exc)
+       span.set_status(trace.Status(trace.StatusCode.ERROR, str(exc)))
+
+See `OpenTelemetry Python Tracing API <https://uptrace.dev/opentelemetry/python-tracing.html>`_ for details.
+
+Uptrace
+-------
+
+Uptrace is an `open-source APM <https://uptrace.dev/get/open-source-apm.html>`_ that supports distributed tracing, metrics, and logs. You can use it to monitor applications and set up automatic alerts to receive notifications via email, Slack, Telegram, and more.
+
+You can use Uptrace to monitor redis-py using this `GitHub example <https://github.com/redis/redis-py/tree/master/docs/examples/opentelemetry>`_ as a starting point.
+
+.. image:: images/opentelemetry/redis-py-trace.png
+  :alt: Redis-py trace
+
+You can `install Uptrace <https://uptrace.dev/get/install.html>`_ by downloading a DEB/RPM package or a pre-compiled binary.
+
+Monitoring Redis Server performance
+-----------------------------------
+
+In addition to monitoring redis-py client, you can also monitor Redis Server performance using OpenTelemetry Collector Agent.
+
+OpenTelemetry Collector is a proxy/middleman between your application and a `distributed tracing tool <https://uptrace.dev/get/compare/distributed-tracing-tools.html>`_ such as Uptrace or Jaeger. Collector receives telemetry data, processes it, and then exports the data to APM tools that can store it permanently.
+
+For example, you can use the Redis receiver provided by Otel Collector to `monitor Redis performance <https://uptrace.dev/opentelemetry/redis-monitoring.html>`_:
+
+.. image:: images/opentelemetry/redis-metrics.png
+  :alt: Redis metrics
+
+See introduction to `OpenTelemetry Collector <https://uptrace.dev/opentelemetry/collector.html>`_ for details.
+
+Alerting and notifications
+--------------------------
+
+Uptrace also allows you to monitor `OpenTelemetry metrics <https://uptrace.dev/opentelemetry/metrics.html>`_ using alerting rules. For example, the following rule uses the group by node expression to create an alert whenever an individual Redis shard is down:
+
+.. code-block:: python
+
+   # /etc/uptrace/uptrace.yml
+
+   alerting:
+     rules:
+       - name: Redis shard is down
+         metrics:
+           - redis_up as $redis_up
+         query:
+           - group by cluster # monitor each cluster,
+           - group by bdb # each database,
+           - group by node # and each shard
+           - $redis_up == 0
+         # shard should be down for 5 minutes to trigger an alert
+         for: 5m
+
+You can also create queries with more complex expressions. For example, the following rule creates an alert when the keyspace hit rate is lower than 75%:
+
+.. code-block:: python
+
+   # /etc/uptrace/uptrace.yml
+
+   alerting:
+     rules:
+       - name: Redis read hit rate < 75%
+         metrics:
+           - redis_keyspace_read_hits as $hits
+           - redis_keyspace_read_misses as $misses
+         query:
+           - group by cluster
+           - group by bdb
+           - group by node
+           - $hits / ($hits + $misses) < 0.75
+         for: 5m
+
+See `Alerting and Notifications <https://uptrace.dev/get/alerting.html>`_ for details.
+
+What's next?
+------------
+
+Next, you can learn how to configure `uptrace-python <https://uptrace.dev/get/uptrace-python.html>`_ to export spans, metrics, and logs to Uptrace.
+
+You may also be interested in the following guides:
+
+- `OpenTelemetry Django <https://uptrace.dev/opentelemetry/instrumentations/python-django.html>`_
+- `OpenTelemetry Flask <https://uptrace.dev/opentelemetry/instrumentations/python-flask.html>`_
+- `OpenTelemetry FastAPI <https://uptrace.dev/opentelemetry/instrumentations/python-fastapi.html>`_
+- `OpenTelemetry SQLAlchemy <https://uptrace.dev/opentelemetry/instrumentations/python-sqlalchemy.html>`_
+- `OpenTelemetry instrumentations <https://uptrace.dev/opentelemetry/instrumentations/>`_
diff --git a/docs/redismodules.rst b/docs/redismodules.rst
index 86a323a..2b0b3c6 100644
--- a/docs/redismodules.rst
+++ b/docs/redismodules.rst
@@ -14,23 +14,26 @@ These are the commands for interacting with the `RedisBloom module <https://redi
 .. code-block:: python
 
     import redis
-    filter = redis.bf().create("bloom", 0.01, 1000)
-    filter.add("bloom", "foo")
+    r = redis.Redis()
+    r.bf().create("bloom", 0.01, 1000)
+    r.bf().add("bloom", "foo")
 
 **Create and add to a cuckoo filter**
 
 .. code-block:: python
 
     import redis
-    filter = redis.cf().create("cuckoo", 1000)
-    filter.add("cuckoo", "filter")
+    r = redis.Redis()
+    r.cf().create("cuckoo", 1000)
+    r.cf().add("cuckoo", "filter")
 
 **Create Count-Min Sketch and get information**
 
 .. code-block:: python
 
     import redis
-    r = redis.cms().initbydim("dim", 1000, 5)
+    r = redis.Redis()
+    r.cms().initbydim("dim", 1000, 5)
     r.cms().incrby("dim", ["foo"], [5])
     r.cms().info("dim")
 
@@ -39,8 +42,9 @@ These are the commands for interacting with the `RedisBloom module <https://redi
 .. code-block:: python
 
     import redis
-    r = redis.topk().reserve("mytopk", 3, 50, 4, 0.9)
-    info = r.topk().info("mytopk)
+    r = redis.Redis()
+    r.topk().reserve("mytopk", 3, 50, 4, 0.9)
+    r.topk().info("mytopk)
 
 .. automodule:: redis.commands.bf.commands
     :members: BFCommands, CFCommands, CMSCommands, TOPKCommands
@@ -91,7 +95,7 @@ These are the commands for interacting with the `RedisJSON module <https://redis
 
     import redis
     r = redis.Redis()
-    r.json().set("mykey", ".", {"hello": "world", "i am": ["a", "json", "object!"]}
+    r.json().set("mykey", ".", {"hello": "world", "i am": ["a", "json", "object!"]})
 
 Examples of how to combine search and json can be found `here <examples/search_json_examples.html>`_.
 
@@ -103,7 +107,8 @@ Examples of how to combine search and json can be found `here <examples/search_j
 RediSearch Commands
 *******************
 
-These are the commands for interacting with the `RediSearch module <https://redisearch.io>`_. Below is a brief example, as well as documentation on the commands themselves.
+These are the commands for interacting with the `RediSearch module <https://redisearch.io>`_. Below is a brief example, as well as documentation on the commands themselves. In the example
+below, an index named *my_index* is being created. When an index name is not specified, an index named *idx* is created.
 
 **Create a search index, and display its information**
 
@@ -113,8 +118,9 @@ These are the commands for interacting with the `RediSearch module <https://redi
     from redis.commands.search.field import TextField
 
     r = redis.Redis()
-    r.ft().create_index(TextField("play", weight=5.0), TextField("ball"))
-    print(r.ft().info())
+    index_name = "my_index"
+    r.ft(index_name).create_index(TextField("play", weight=5.0), TextField("ball"))
+    print(r.ft(index_name).info())
 
 
 .. automodule:: redis.commands.search.commands
diff --git a/docs/requirements.txt b/docs/requirements.txt
index 23ddc94..edecdff 100644
--- a/docs/requirements.txt
+++ b/docs/requirements.txt
@@ -1,7 +1,7 @@
-sphinx<5
+sphinx>=5.0,<7.0
 docutils<0.18
-sphinx-rtd-theme
 nbsphinx
 sphinx_gallery
 ipython
 sphinx-autodoc-typehints
+furo
diff --git a/docs/retry.rst b/docs/retry.rst
index 2b4f22c..acf198e 100644
--- a/docs/retry.rst
+++ b/docs/retry.rst
@@ -2,4 +2,69 @@ Retry Helpers
 #############
 
 .. automodule:: redis.retry
-    :members: 
\ No newline at end of file
+    :members:
+
+
+Retry in Redis Standalone
+**************************
+
+>>> from redis.backoff import ExponentialBackoff
+>>> from redis.retry import Retry
+>>> from redis.client import Redis
+>>> from redis.exceptions import (
+>>>    BusyLoadingError,
+>>>    ConnectionError,
+>>>    TimeoutError
+>>> )
+>>>
+>>> # Run 3 retries with exponential backoff strategy
+>>> retry = Retry(ExponentialBackoff(), 3)
+>>> # Redis client with retries on custom errors
+>>> r = Redis(host='localhost', port=6379, retry=retry, retry_on_error=[BusyLoadingError, ConnectionError, TimeoutError])
+>>> # Redis client with retries on TimeoutError only
+>>> r_only_timeout = Redis(host='localhost', port=6379, retry=retry, retry_on_timeout=True)
+
+As you can see from the example above, Redis client supports 3 parameters to configure the retry behaviour:
+
+* ``retry``: :class:`~.Retry` instance with a :ref:`backoff-label` strategy and the max number of retries
+* ``retry_on_error``: list of :ref:`exceptions-label` to retry on
+* ``retry_on_timeout``: if ``True``, retry on :class:`~.TimeoutError` only
+
+If either ``retry_on_error`` or ``retry_on_timeout`` are passed and no ``retry`` is given,
+by default it uses a ``Retry(NoBackoff(), 1)`` (meaning 1 retry right after the first failure).
+
+
+Retry in Redis Cluster
+**************************
+
+>>> from redis.backoff import ExponentialBackoff
+>>> from redis.retry import Retry
+>>> from redis.cluster import RedisCluster
+>>>
+>>> # Run 3 retries with exponential backoff strategy
+>>> retry = Retry(ExponentialBackoff(), 3)
+>>> # Redis Cluster client with retries
+>>> rc = RedisCluster(host='localhost', port=6379, retry=retry, cluster_error_retry_attempts=2)
+
+Retry behaviour in Redis Cluster is a little bit different from Standalone:
+
+* ``retry``: :class:`~.Retry` instance with a :ref:`backoff-label` strategy and the max number of retries, default value is ``Retry(NoBackoff(), 0)``
+* ``cluster_error_retry_attempts``: number of times to retry before raising an error when :class:`~.TimeoutError` or :class:`~.ConnectionError` or :class:`~.ClusterDownError` are encountered, default value is ``3``
+
+Let's consider the following example:
+
+>>> from redis.backoff import ExponentialBackoff
+>>> from redis.retry import Retry
+>>> from redis.cluster import RedisCluster
+>>>
+>>> rc = RedisCluster(host='localhost', port=6379, retry=Retry(ExponentialBackoff(), 6), cluster_error_retry_attempts=1)
+>>> rc.set('foo', 'bar')
+
+#. the client library calculates the hash slot for key 'foo'.
+#. given the hash slot, it then determines which node to connect to, in order to execute the command.
+#. during the connection, a :class:`~.ConnectionError` is raised.
+#. because we set ``retry=Retry(ExponentialBackoff(), 6)``, the client tries to reconnect to the node up to 6 times, with an exponential backoff between each attempt.
+#. even after 6 retries, the client is still unable to connect.
+#. because we set ``cluster_error_retry_attempts=1``, before giving up, the client starts a cluster update, removes the failed node from the startup nodes, and re-initializes the cluster.
+#. after the cluster has been re-initialized, it starts a new cycle of retries, up to 6 retries, with an exponential backoff.
+#. if the client can connect, we're good. Otherwise, the exception is finally raised to the caller, because we've run out of attempts.
\ No newline at end of file
diff --git a/redis/__init__.py b/redis/__init__.py
index b7560a6..b8850ad 100644
--- a/redis/__init__.py
+++ b/redis/__init__.py
@@ -1,5 +1,6 @@
 import sys
 
+from redis.backoff import default_backoff
 from redis.client import Redis, StrictRedis
 from redis.cluster import RedisCluster
 from redis.connection import (
@@ -9,6 +10,7 @@ from redis.connection import (
     SSLConnection,
     UnixDomainSocketConnection,
 )
+from redis.credentials import CredentialProvider, UsernamePasswordCredentialProvider
 from redis.exceptions import (
     AuthenticationError,
     AuthenticationWrongNumberOfArgsError,
@@ -51,7 +53,10 @@ except metadata.PackageNotFoundError:
     __version__ = "99.99.99"
 
 
-VERSION = tuple(map(int_or_str, __version__.split(".")))
+try:
+    VERSION = tuple(map(int_or_str, __version__.split(".")))
+except AttributeError:
+    VERSION = tuple([99, 99, 99])
 
 __all__ = [
     "AuthenticationError",
@@ -62,8 +67,10 @@ __all__ = [
     "Connection",
     "ConnectionError",
     "ConnectionPool",
+    "CredentialProvider",
     "DataError",
     "from_url",
+    "default_backoff",
     "InvalidResponse",
     "PubSubError",
     "ReadOnlyError",
@@ -76,6 +83,7 @@ __all__ = [
     "SentinelManagedConnection",
     "SentinelManagedSSLConnection",
     "SSLConnection",
+    "UsernamePasswordCredentialProvider",
     "StrictRedis",
     "TimeoutError",
     "UnixDomainSocketConnection",
diff --git a/redis/asyncio/__init__.py b/redis/asyncio/__init__.py
index 598791a..bf90dde 100644
--- a/redis/asyncio/__init__.py
+++ b/redis/asyncio/__init__.py
@@ -15,6 +15,7 @@ from redis.asyncio.sentinel import (
     SentinelManagedSSLConnection,
 )
 from redis.asyncio.utils import from_url
+from redis.backoff import default_backoff
 from redis.exceptions import (
     AuthenticationError,
     AuthenticationWrongNumberOfArgsError,
@@ -43,6 +44,7 @@ __all__ = [
     "ConnectionPool",
     "DataError",
     "from_url",
+    "default_backoff",
     "InvalidResponse",
     "PubSubError",
     "ReadOnlyError",
diff --git a/redis/asyncio/client.py b/redis/asyncio/client.py
index 3d59016..3fc7fad 100644
--- a/redis/asyncio/client.py
+++ b/redis/asyncio/client.py
@@ -46,6 +46,7 @@ from redis.commands import (
     list_or_args,
 )
 from redis.compat import Protocol, TypedDict
+from redis.credentials import CredentialProvider
 from redis.exceptions import (
     ConnectionError,
     ExecAbortError,
@@ -106,7 +107,7 @@ class Redis(
 
             redis://[[username]:[password]]@localhost:6379/0
             rediss://[[username]:[password]]@localhost:6379/0
-            unix://[[username]:[password]]@/path/to/socket.sock?db=0
+            unix://[username@]/path/to/socket.sock?db=0[&password=password]
 
         Three URL schemes are supported:
 
@@ -174,6 +175,7 @@ class Redis(
         retry: Optional[Retry] = None,
         auto_close_connection_pool: bool = True,
         redis_connect_func=None,
+        credential_provider: Optional[CredentialProvider] = None,
     ):
         """
         Initialize a new Redis client.
@@ -199,6 +201,7 @@ class Redis(
                 "db": db,
                 "username": username,
                 "password": password,
+                "credential_provider": credential_provider,
                 "socket_timeout": socket_timeout,
                 "encoding": encoding,
                 "encoding_errors": encoding_errors,
@@ -250,6 +253,11 @@ class Redis(
 
         self.response_callbacks = CaseInsensitiveDict(self.__class__.RESPONSE_CALLBACKS)
 
+        # If using a single connection client, we need to lock creation-of and use-of
+        # the client in order to avoid race conditions such as using asyncio.gather
+        # on a set of redis commands
+        self._single_conn_lock = asyncio.Lock()
+
     def __repr__(self):
         return f"{self.__class__.__name__}<{self.connection_pool!r}>"
 
@@ -257,8 +265,10 @@ class Redis(
         return self.initialize().__await__()
 
     async def initialize(self: _RedisT) -> _RedisT:
-        if self.single_connection_client and self.connection is None:
-            self.connection = await self.connection_pool.get_connection("_")
+        if self.single_connection_client:
+            async with self._single_conn_lock:
+                if self.connection is None:
+                    self.connection = await self.connection_pool.get_connection("_")
         return self
 
     def set_response_callback(self, command: str, callback: ResponseCallbackT):
@@ -273,6 +283,13 @@ class Redis(
         """Get the connection's key-word arguments"""
         return self.connection_pool.connection_kwargs
 
+    def get_retry(self) -> Optional["Retry"]:
+        return self.get_connection_kwargs().get("retry")
+
+    def set_retry(self, retry: "Retry") -> None:
+        self.get_connection_kwargs().update({"retry": retry})
+        self.connection_pool.set_retry(retry)
+
     def load_external_module(self, funcname, func):
         """
         This function can be used to add externally defined redis modules,
@@ -344,6 +361,7 @@ class Redis(
         name: KeyT,
         timeout: Optional[float] = None,
         sleep: float = 0.1,
+        blocking: bool = True,
         blocking_timeout: Optional[float] = None,
         lock_class: Optional[Type[Lock]] = None,
         thread_local: bool = True,
@@ -359,6 +377,12 @@ class Redis(
         when the lock is in blocking mode and another client is currently
         holding the lock.
 
+        ``blocking`` indicates whether calling ``acquire`` should block until
+        the lock has been acquired or to fail immediately, causing ``acquire``
+        to return False and the lock not being acquired. Defaults to True.
+        Note this value can be overridden by passing a ``blocking``
+        argument to ``acquire``.
+
         ``blocking_timeout`` indicates the maximum amount of time in seconds to
         spend trying to acquire the lock. A value of ``None`` indicates
         continue trying forever. ``blocking_timeout`` can be specified as a
@@ -401,6 +425,7 @@ class Redis(
             name,
             timeout=timeout,
             sleep=sleep,
+            blocking=blocking,
             blocking_timeout=blocking_timeout,
             thread_local=thread_local,
         )
@@ -435,7 +460,7 @@ class Redis(
                 f"Unclosed client session {self!r}", ResourceWarning, source=self
             )
             context = {"client": self, "message": self._DEL_MESSAGE}
-            asyncio.get_event_loop().call_exception_handler(context)
+            asyncio.get_running_loop().call_exception_handler(context)
 
     async def close(self, close_connection_pool: Optional[bool] = None) -> None:
         """
@@ -465,8 +490,8 @@ class Redis(
     async def _disconnect_raise(self, conn: Connection, error: Exception):
         """
         Close the connection and raise an exception
-        if retry_on_timeout is not set or the error
-        is not a TimeoutError
+        if retry_on_error is not set or the error
+        is not one of the specified error types
         """
         await conn.disconnect()
         if (
@@ -483,6 +508,8 @@ class Redis(
         command_name = args[0]
         conn = self.connection or await pool.get_connection(command_name, **options)
 
+        if self.single_connection_client:
+            await self._single_conn_lock.acquire()
         try:
             return await conn.retry.call_with_retry(
                 lambda: self._send_command_parse_response(
@@ -491,6 +518,8 @@ class Redis(
                 lambda error: self._disconnect_raise(conn, error),
             )
         finally:
+            if self.single_connection_client:
+                self._single_conn_lock.release()
             if not self.connection:
                 await pool.release(conn)
 
@@ -501,12 +530,17 @@ class Redis(
         try:
             if NEVER_DECODE in options:
                 response = await connection.read_response(disable_decoding=True)
+                options.pop(NEVER_DECODE)
             else:
                 response = await connection.read_response()
         except ResponseError:
             if EMPTY_RESPONSE in options:
                 return options[EMPTY_RESPONSE]
             raise
+
+        if EMPTY_RESPONSE in options:
+            options.pop(EMPTY_RESPONSE)
+
         if command_name in self.response_callbacks:
             # Mypy bug: https://github.com/python/mypy/issues/10977
             command_name = cast(str, command_name)
@@ -754,9 +788,11 @@ class PubSub:
 
         await self.check_health()
 
-        if not block and not await self._execute(conn, conn.can_read, timeout=timeout):
-            return None
-        response = await self._execute(conn, conn.read_response)
+        if not conn.is_connected:
+            await conn.connect()
+
+        read_timeout = None if block else timeout
+        response = await self._execute(conn, conn.read_response, timeout=read_timeout)
 
         if conn.health_check_interval and response == self.health_check_response:
             # ignore the health check message as user might not expect it
@@ -773,7 +809,7 @@ class PubSub:
 
         if (
             conn.health_check_interval
-            and asyncio.get_event_loop().time() > conn.next_health_check
+            and asyncio.get_running_loop().time() > conn.next_health_check
         ):
             await conn.send_command(
                 "PING", self.HEALTH_CHECK_MESSAGE, check_health=False
@@ -868,16 +904,16 @@ class PubSub:
                 yield response
 
     async def get_message(
-        self, ignore_subscribe_messages: bool = False, timeout: float = 0.0
+        self, ignore_subscribe_messages: bool = False, timeout: Optional[float] = 0.0
     ):
         """
         Get the next message if one is available, otherwise None.
 
         If timeout is specified, the system will wait for `timeout` seconds
         before returning. Timeout should be specified as a floating point
-        number.
+        number or None to wait indefinitely.
         """
-        response = await self.parse_response(block=False, timeout=timeout)
+        response = await self.parse_response(block=(timeout is None), timeout=timeout)
         if response:
             return await self.handle_message(response, ignore_subscribe_messages)
         return None
@@ -1107,7 +1143,7 @@ class Pipeline(Redis):  # lgtm [py/init-calls-subclass]
             raise RedisError("Cannot issue nested calls to MULTI")
         if self.command_stack:
             raise RedisError(
-                "Commands without an initial WATCH have already " "been issued"
+                "Commands without an initial WATCH have already been issued"
             )
         self.explicit_transaction = True
 
@@ -1132,7 +1168,7 @@ class Pipeline(Redis):  # lgtm [py/init-calls-subclass]
         if self.watching:
             await self.reset()
             raise WatchError(
-                "A ConnectionError occurred on while " "watching one or more keys"
+                "A ConnectionError occurred on while watching one or more keys"
             )
         # if retry_on_timeout is not set, or the error is not
         # a TimeoutError, raise it
@@ -1320,7 +1356,7 @@ class Pipeline(Redis):  # lgtm [py/init-calls-subclass]
         # indicates the user should retry this transaction.
         if self.watching:
             raise WatchError(
-                "A ConnectionError occurred on while " "watching one or more keys"
+                "A ConnectionError occurred on while watching one or more keys"
             )
         # if retry_on_timeout is not set, or the error is not
         # a TimeoutError, raise it
diff --git a/redis/asyncio/cluster.py b/redis/asyncio/cluster.py
index 2894004..5a2dffd 100644
--- a/redis/asyncio/cluster.py
+++ b/redis/asyncio/cluster.py
@@ -17,8 +17,17 @@ from typing import (
 )
 
 from redis.asyncio.client import ResponseCallbackT
-from redis.asyncio.connection import Connection, DefaultParser, Encoder, parse_url
+from redis.asyncio.connection import (
+    Connection,
+    DefaultParser,
+    Encoder,
+    SSLConnection,
+    parse_url,
+)
+from redis.asyncio.lock import Lock
 from redis.asyncio.parser import CommandsParser
+from redis.asyncio.retry import Retry
+from redis.backoff import default_backoff
 from redis.client import EMPTY_RESPONSE, NEVER_DECODE, AbstractRedis
 from redis.cluster import (
     PIPELINE_BLOCKED_COMMANDS,
@@ -33,6 +42,7 @@ from redis.cluster import (
 )
 from redis.commands import READ_COMMANDS, AsyncRedisClusterCommands
 from redis.crc import REDIS_CLUSTER_HASH_SLOTS, key_slot
+from redis.credentials import CredentialProvider
 from redis.exceptions import (
     AskError,
     BusyLoadingError,
@@ -42,6 +52,7 @@ from redis.exceptions import (
     ConnectionError,
     DataError,
     MasterDownError,
+    MaxConnectionsError,
     MovedError,
     RedisClusterException,
     ResponseError,
@@ -56,44 +67,17 @@ TargetNodesT = TypeVar(
     "TargetNodesT", str, "ClusterNode", List["ClusterNode"], Dict[Any, "ClusterNode"]
 )
 
-CONNECTION_ALLOWED_KEYS = (
-    "client_name",
-    "db",
-    "decode_responses",
-    "encoder_class",
-    "encoding",
-    "encoding_errors",
-    "health_check_interval",
-    "parser_class",
-    "password",
-    "redis_connect_func",
-    "retry",
-    "retry_on_timeout",
-    "socket_connect_timeout",
-    "socket_keepalive",
-    "socket_keepalive_options",
-    "socket_read_size",
-    "socket_timeout",
-    "socket_type",
-    "username",
-)
-
-
-def cleanup_kwargs(**kwargs: Any) -> Dict[str, Any]:
-    """Remove unsupported or disabled keys from kwargs."""
-    return {k: v for k, v in kwargs.items() if k in CONNECTION_ALLOWED_KEYS}
-
 
 class ClusterParser(DefaultParser):
     EXCEPTION_CLASSES = dict_merge(
         DefaultParser.EXCEPTION_CLASSES,
         {
             "ASK": AskError,
-            "TRYAGAIN": TryAgainError,
-            "MOVED": MovedError,
             "CLUSTERDOWN": ClusterDownError,
             "CROSSSLOT": ClusterCrossSlotError,
             "MASTERDOWN": MasterDownError,
+            "MOVED": MovedError,
+            "TRYAGAIN": TryAgainError,
         },
     )
 
@@ -104,7 +88,6 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
 
     Pass one of parameters:
 
-      - `url`
       - `host` & `port`
       - `startup_nodes`
 
@@ -128,19 +111,20 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
         | Port used if **host** is provided
     :param startup_nodes:
         | :class:`~.ClusterNode` to used as a startup node
-    :param cluster_error_retry_attempts:
-        | Retry command execution attempts when encountering :class:`~.ClusterDownError`
-          or :class:`~.ConnectionError`
     :param require_full_coverage:
-        | When set to ``False``: the client will not require a full coverage of the
-          slots. However, if not all slots are covered, and at least one node has
-          ``cluster-require-full-coverage`` set to ``yes``, the server will throw a
-          :class:`~.ClusterDownError` for some key-based commands.
+        | When set to ``False``: the client will not require a full coverage of
+          the slots. However, if not all slots are covered, and at least one node
+          has ``cluster-require-full-coverage`` set to ``yes``, the server will throw
+          a :class:`~.ClusterDownError` for some key-based commands.
         | When set to ``True``: all slots must be covered to construct the cluster
           client. If not all slots are covered, :class:`~.RedisClusterException` will be
           thrown.
         | See:
           https://redis.io/docs/manual/scaling/#redis-cluster-configuration-parameters
+    :param read_from_replicas:
+        | Enable read from replicas in READONLY mode. You can read possibly stale data.
+          When set to true, read commands will be assigned between the primary and
+          its replications in a Round-Robin manner.
     :param reinitialize_steps:
         | Specifies the number of MOVED errors that need to occur before reinitializing
           the whole cluster topology. If a MOVED error occurs and the cluster does not
@@ -149,23 +133,30 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
           To reinitialize the cluster on every MOVED error, set reinitialize_steps to 1.
           To avoid reinitializing the cluster on moved errors, set reinitialize_steps to
           0.
-    :param read_from_replicas:
-        | Enable read from replicas in READONLY mode. You can read possibly stale data.
-          When set to true, read commands will be assigned between the primary and
-          its replications in a Round-Robin manner.
-    :param url:
-        | See :meth:`.from_url`
-    :param kwargs:
-        | Extra arguments that will be passed to the
-          :class:`~redis.asyncio.connection.Connection` instances when created
+    :param cluster_error_retry_attempts:
+        | Number of times to retry before raising an error when :class:`~.TimeoutError`
+          or :class:`~.ConnectionError` or :class:`~.ClusterDownError` are encountered
+    :param connection_error_retry_attempts:
+        | Number of times to retry before reinitializing when :class:`~.TimeoutError`
+          or :class:`~.ConnectionError` are encountered.
+          The default backoff strategy will be set if Retry object is not passed (see
+          default_backoff in backoff.py). To change it, pass a custom Retry object
+          using the "retry" keyword.
+    :param max_connections:
+        | Maximum number of connections per node. If there are no free connections & the
+          maximum number of connections are already created, a
+          :class:`~.MaxConnectionsError` is raised. This error may be retried as defined
+          by :attr:`connection_error_retry_attempts`
+
+    | Rest of the arguments will be passed to the
+      :class:`~redis.asyncio.connection.Connection` instances when created
 
     :raises RedisClusterException:
-        if any arguments are invalid. Eg:
+        if any arguments are invalid or unknown. Eg:
 
-        - db kwarg
-        - db != 0 in url
-        - unix socket connection
-        - none of host & url & startup_nodes were provided
+        - `db` != 0 or None
+        - `path` argument for unix socket connection
+        - none of the `host`/`port` & `startup_nodes` were provided
 
     """
 
@@ -178,7 +169,6 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
 
             redis://[[username]:[password]]@localhost:6379/0
             rediss://[[username]:[password]]@localhost:6379/0
-            unix://[[username]:[password]]@/path/to/socket.sock?db=0
 
         Three URL schemes are supported:
 
@@ -186,32 +176,22 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
           <https://www.iana.org/assignments/uri-schemes/prov/redis>
         - `rediss://` creates a SSL wrapped TCP socket connection. See more at:
           <https://www.iana.org/assignments/uri-schemes/prov/rediss>
-        - ``unix://``: creates a Unix Domain Socket connection.
-
-        The username, password, hostname, path and all querystring values
-        are passed through urllib.parse.unquote in order to replace any
-        percent-encoded values with their corresponding characters.
-
-        There are several ways to specify a database number. The first value
-        found will be used:
 
-            1. A ``db`` querystring option, e.g. redis://localhost?db=0
-            2. If using the redis:// or rediss:// schemes, the path argument
-               of the url, e.g. redis://localhost/0
-            3. A ``db`` keyword argument to this function.
-
-        If none of these options are specified, the default db=0 is used.
-
-        All querystring options are cast to their appropriate Python types.
-        Boolean arguments can be specified with string values "True"/"False"
-        or "Yes"/"No". Values that cannot be properly cast cause a
-        ``ValueError`` to be raised. Once parsed, the querystring arguments and
-        keyword arguments are passed to :class:`~redis.asyncio.connection.Connection`
-        when created. In the case of conflicting arguments, querystring
-        arguments always win.
+        The username, password, hostname, path and all querystring values are passed
+        through ``urllib.parse.unquote`` in order to replace any percent-encoded values
+        with their corresponding characters.
 
+        All querystring options are cast to their appropriate Python types. Boolean
+        arguments can be specified with string values "True"/"False" or "Yes"/"No".
+        Values that cannot be properly cast cause a ``ValueError`` to be raised. Once
+        parsed, the querystring arguments and keyword arguments are passed to
+        :class:`~redis.asyncio.connection.Connection` when created.
+        In the case of conflicting arguments, querystring arguments are used.
         """
-        return cls(url=url, **kwargs)
+        kwargs.update(parse_url(url))
+        if kwargs.pop("connection_class", None) is SSLConnection:
+            kwargs["ssl"] = True
+        return cls(**kwargs)
 
     __slots__ = (
         "_initialize",
@@ -219,6 +199,7 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
         "cluster_error_retry_attempts",
         "command_flags",
         "commands_parser",
+        "connection_error_retry_attempts",
         "connection_kwargs",
         "encoder",
         "node_flags",
@@ -233,93 +214,155 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
     def __init__(
         self,
         host: Optional[str] = None,
-        port: int = 6379,
+        port: Union[str, int] = 6379,
+        # Cluster related kwargs
         startup_nodes: Optional[List["ClusterNode"]] = None,
-        require_full_coverage: bool = False,
+        require_full_coverage: bool = True,
         read_from_replicas: bool = False,
+        reinitialize_steps: int = 5,
         cluster_error_retry_attempts: int = 3,
-        reinitialize_steps: int = 10,
-        url: Optional[str] = None,
-        **kwargs: Any,
+        connection_error_retry_attempts: int = 3,
+        max_connections: int = 2**31,
+        # Client related kwargs
+        db: Union[str, int] = 0,
+        path: Optional[str] = None,
+        credential_provider: Optional[CredentialProvider] = None,
+        username: Optional[str] = None,
+        password: Optional[str] = None,
+        client_name: Optional[str] = None,
+        # Encoding related kwargs
+        encoding: str = "utf-8",
+        encoding_errors: str = "strict",
+        decode_responses: bool = False,
+        # Connection related kwargs
+        health_check_interval: float = 0,
+        socket_connect_timeout: Optional[float] = None,
+        socket_keepalive: bool = False,
+        socket_keepalive_options: Optional[Mapping[int, Union[int, bytes]]] = None,
+        socket_timeout: Optional[float] = None,
+        retry: Optional["Retry"] = None,
+        retry_on_error: Optional[List[Exception]] = None,
+        # SSL related kwargs
+        ssl: bool = False,
+        ssl_ca_certs: Optional[str] = None,
+        ssl_ca_data: Optional[str] = None,
+        ssl_cert_reqs: str = "required",
+        ssl_certfile: Optional[str] = None,
+        ssl_check_hostname: bool = False,
+        ssl_keyfile: Optional[str] = None,
     ) -> None:
-        if not startup_nodes:
-            startup_nodes = []
+        if db:
+            raise RedisClusterException(
+                "Argument 'db' must be 0 or None in cluster mode"
+            )
 
-        if "db" in kwargs:
-            # Argument 'db' is not possible to use in cluster mode
+        if path:
             raise RedisClusterException(
-                "Argument 'db' is not possible to use in cluster mode"
+                "Unix domain socket is not supported in cluster mode"
             )
 
-        # Get the startup node(s)
-        if url:
-            url_options = parse_url(url)
-            if "path" in url_options:
-                raise RedisClusterException(
-                    "RedisCluster does not currently support Unix Domain "
-                    "Socket connections"
-                )
-            if "db" in url_options and url_options["db"] != 0:
-                # Argument 'db' is not possible to use in cluster mode
-                raise RedisClusterException(
-                    "A ``db`` querystring option can only be 0 in cluster mode"
-                )
-            kwargs.update(url_options)
-            host = kwargs.get("host")
-            port = kwargs.get("port", port)
-        elif (not host or not port) and not startup_nodes:
-            # No startup node was provided
+        if (not host or not port) and not startup_nodes:
             raise RedisClusterException(
-                "RedisCluster requires at least one node to discover the "
-                "cluster. Please provide one of the followings:\n"
-                "1. host and port, for example:\n"
-                " RedisCluster(host='localhost', port=6379)\n"
-                "2. list of startup nodes, for example:\n"
-                " RedisCluster(startup_nodes=[ClusterNode('localhost', 6379),"
-                " ClusterNode('localhost', 6378)])"
+                "RedisCluster requires at least one node to discover the cluster.\n"
+                "Please provide one of the following or use RedisCluster.from_url:\n"
+                '   - host and port: RedisCluster(host="localhost", port=6379)\n'
+                "   - startup_nodes: RedisCluster(startup_nodes=["
+                'ClusterNode("localhost", 6379), ClusterNode("localhost", 6380)])'
+            )
+
+        kwargs: Dict[str, Any] = {
+            "max_connections": max_connections,
+            "connection_class": Connection,
+            "parser_class": ClusterParser,
+            # Client related kwargs
+            "credential_provider": credential_provider,
+            "username": username,
+            "password": password,
+            "client_name": client_name,
+            # Encoding related kwargs
+            "encoding": encoding,
+            "encoding_errors": encoding_errors,
+            "decode_responses": decode_responses,
+            # Connection related kwargs
+            "health_check_interval": health_check_interval,
+            "socket_connect_timeout": socket_connect_timeout,
+            "socket_keepalive": socket_keepalive,
+            "socket_keepalive_options": socket_keepalive_options,
+            "socket_timeout": socket_timeout,
+            "retry": retry,
+        }
+
+        if ssl:
+            # SSL related kwargs
+            kwargs.update(
+                {
+                    "connection_class": SSLConnection,
+                    "ssl_ca_certs": ssl_ca_certs,
+                    "ssl_ca_data": ssl_ca_data,
+                    "ssl_cert_reqs": ssl_cert_reqs,
+                    "ssl_certfile": ssl_certfile,
+                    "ssl_check_hostname": ssl_check_hostname,
+                    "ssl_keyfile": ssl_keyfile,
+                }
+            )
+
+        if read_from_replicas:
+            # Call our on_connect function to configure READONLY mode
+            kwargs["redis_connect_func"] = self.on_connect
+
+        self.retry = retry
+        if retry or retry_on_error or connection_error_retry_attempts > 0:
+            # Set a retry object for all cluster nodes
+            self.retry = retry or Retry(
+                default_backoff(), connection_error_retry_attempts
             )
+            if not retry_on_error:
+                # Default errors for retrying
+                retry_on_error = [ConnectionError, TimeoutError]
+            self.retry.update_supported_errors(retry_on_error)
+            kwargs.update({"retry": self.retry})
+
+        kwargs["response_callbacks"] = self.__class__.RESPONSE_CALLBACKS.copy()
+        self.connection_kwargs = kwargs
 
-        # Update the connection arguments
-        # Whenever a new connection is established, RedisCluster's on_connect
-        # method should be run
-        kwargs["redis_connect_func"] = self.on_connect
-        self.connection_kwargs = kwargs = cleanup_kwargs(**kwargs)
-        self.response_callbacks = kwargs[
-            "response_callbacks"
-        ] = self.__class__.RESPONSE_CALLBACKS.copy()
+        if startup_nodes:
+            passed_nodes = []
+            for node in startup_nodes:
+                passed_nodes.append(
+                    ClusterNode(node.host, node.port, **self.connection_kwargs)
+                )
+            startup_nodes = passed_nodes
+        else:
+            startup_nodes = []
         if host and port:
             startup_nodes.append(ClusterNode(host, port, **self.connection_kwargs))
 
-        self.nodes_manager = NodesManager(
-            startup_nodes=startup_nodes,
-            require_full_coverage=require_full_coverage,
-            **self.connection_kwargs,
-        )
-        self.encoder = Encoder(
-            kwargs.get("encoding", "utf-8"),
-            kwargs.get("encoding_errors", "strict"),
-            kwargs.get("decode_responses", False),
-        )
-        self.cluster_error_retry_attempts = cluster_error_retry_attempts
+        self.nodes_manager = NodesManager(startup_nodes, require_full_coverage, kwargs)
+        self.encoder = Encoder(encoding, encoding_errors, decode_responses)
         self.read_from_replicas = read_from_replicas
         self.reinitialize_steps = reinitialize_steps
-
+        self.cluster_error_retry_attempts = cluster_error_retry_attempts
+        self.connection_error_retry_attempts = connection_error_retry_attempts
         self.reinitialize_counter = 0
         self.commands_parser = CommandsParser()
         self.node_flags = self.__class__.NODE_FLAGS.copy()
         self.command_flags = self.__class__.COMMAND_FLAGS.copy()
+        self.response_callbacks = kwargs["response_callbacks"]
         self.result_callbacks = self.__class__.RESULT_CALLBACKS.copy()
         self.result_callbacks[
             "CLUSTER SLOTS"
         ] = lambda cmd, res, **kwargs: parse_cluster_slots(
             list(res.values())[0], **kwargs
         )
+
         self._initialize = True
-        self._lock = asyncio.Lock()
+        self._lock: Optional[asyncio.Lock] = None
 
     async def initialize(self) -> "RedisCluster":
         """Get all nodes from startup nodes & creates connections if not initialized."""
         if self._initialize:
+            if not self._lock:
+                self._lock = asyncio.Lock()
             async with self._lock:
                 if self._initialize:
                     try:
@@ -337,6 +380,8 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
     async def close(self) -> None:
         """Close all connections & client if initialized."""
         if not self._initialize:
+            if not self._lock:
+                self._lock = asyncio.Lock()
             async with self._lock:
                 if not self._initialize:
                     self._initialize = True
@@ -359,24 +404,21 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
             warnings.warn(f"{self._DEL_MESSAGE} {self!r}", ResourceWarning, source=self)
             try:
                 context = {"client": self, "message": self._DEL_MESSAGE}
-                # TODO: Change to get_running_loop() when dropping support for py3.6
-                asyncio.get_event_loop().call_exception_handler(context)
+                asyncio.get_running_loop().call_exception_handler(context)
             except RuntimeError:
                 ...
 
     async def on_connect(self, connection: Connection) -> None:
-        connection.set_parser(ClusterParser)
         await connection.on_connect()
 
-        if self.read_from_replicas:
-            # Sending READONLY command to server to configure connection as
-            # readonly. Since each cluster node may change its server type due
-            # to a failover, we should establish a READONLY connection
-            # regardless of the server type. If this is a primary connection,
-            # READONLY would not affect executing write commands.
-            await connection.send_command("READONLY")
-            if str_if_bytes(await connection.read_response_without_lock()) != "OK":
-                raise ConnectionError("READONLY command failed")
+        # Sending READONLY command to server to configure connection as
+        # readonly. Since each cluster node may change its server type due
+        # to a failover, we should establish a READONLY connection
+        # regardless of the server type. If this is a primary connection,
+        # READONLY would not affect executing write commands.
+        await connection.send_command("READONLY")
+        if str_if_bytes(await connection.read_response()) != "OK":
+            raise ConnectionError("READONLY command failed")
 
     def get_nodes(self) -> List["ClusterNode"]:
         """Get all nodes of the cluster."""
@@ -436,12 +478,12 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
         slot_cache = self.nodes_manager.slots_cache.get(slot)
         if not slot_cache:
             raise SlotNotCoveredError(f'Slot "{slot}" is not covered by the cluster.')
-        if replica and len(self.nodes_manager.slots_cache[slot]) < 2:
-            return None
-        elif replica:
+
+        if replica:
+            if len(self.nodes_manager.slots_cache[slot]) < 2:
+                return None
             node_idx = 1
         else:
-            # primary
             node_idx = 0
 
         return slot_cache[node_idx]
@@ -462,6 +504,16 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
         """Get the kwargs passed to :class:`~redis.asyncio.connection.Connection`."""
         return self.connection_kwargs
 
+    def get_retry(self) -> Optional["Retry"]:
+        return self.retry
+
+    def set_retry(self, retry: "Retry") -> None:
+        self.retry = retry
+        for node in self.get_nodes():
+            node.connection_kwargs.update({"retry": retry})
+            for conn in node._connections:
+                conn.retry = retry
+
     def set_response_callback(self, command: str, callback: ResponseCallbackT) -> None:
         """Set a custom response callback."""
         self.response_callbacks[command] = callback
@@ -469,6 +521,8 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
     async def _determine_nodes(
         self, command: str, *args: Any, node_flag: Optional[str] = None
     ) -> List["ClusterNode"]:
+        # Determine which nodes should be executed the command on.
+        # Returns a list of target nodes.
         if not node_flag:
             # get the nodes group for this command if it was predefined
             node_flag = self.command_flags.get(command)
@@ -599,11 +653,19 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
         if passed_targets and not self._is_node_flag(passed_targets):
             target_nodes = self._parse_target_nodes(passed_targets)
             target_nodes_specified = True
-            retry_attempts = 1
+            retry_attempts = 0
 
-        for _ in range(retry_attempts):
+        # Add one for the first execution
+        execute_attempts = 1 + retry_attempts
+        for _ in range(execute_attempts):
             if self._initialize:
                 await self.initialize()
+                if (
+                    len(target_nodes) == 1
+                    and target_nodes[0] == self.get_default_node()
+                ):
+                    # Replace the default cluster node
+                    self.replace_default_node()
             try:
                 if not target_nodes_specified:
                     # Determine the nodes to execute the command on
@@ -627,7 +689,7 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
                     keys = [node.name for node in target_nodes]
                     values = await asyncio.gather(
                         *(
-                            asyncio.ensure_future(
+                            asyncio.create_task(
                                 self._execute_command(node, *args, **kwargs)
                             )
                             for node in target_nodes
@@ -638,26 +700,22 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
                             command, dict(zip(keys, values)), **kwargs
                         )
                     return dict(zip(keys, values))
-            except BaseException as e:
-                if type(e) in self.__class__.ERRORS_ALLOW_RETRY:
-                    # The nodes and slots cache were reinitialized.
+            except Exception as e:
+                if retry_attempts > 0 and type(e) in self.__class__.ERRORS_ALLOW_RETRY:
+                    # The nodes and slots cache were should be reinitialized.
                     # Try again with the new cluster setup.
-                    exception = e
+                    retry_attempts -= 1
+                    continue
                 else:
-                    # All other errors should be raised.
+                    # raise the exception
                     raise e
 
-        # If it fails the configured number of times then raise exception back
-        # to caller of this method
-        raise exception
-
     async def _execute_command(
         self, target_node: "ClusterNode", *args: Union[KeyT, EncodableT], **kwargs: Any
     ) -> Any:
         asking = moved = False
         redirect_addr = None
         ttl = self.RedisClusterRequestTTL
-        connection_error_retry_counter = 0
 
         while ttl > 0:
             ttl -= 1
@@ -676,21 +734,25 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
                     moved = False
 
                 return await target_node.execute_command(*args, **kwargs)
-            except BusyLoadingError:
+            except (BusyLoadingError, MaxConnectionsError):
                 raise
             except (ConnectionError, TimeoutError):
-                # Give the node 0.25 seconds to get back up and retry again
-                # with same node and configuration. After 5 attempts then try
-                # to reinitialize the cluster and see if the nodes
-                # configuration has changed or not
-                connection_error_retry_counter += 1
-                if connection_error_retry_counter < 5:
-                    await asyncio.sleep(0.25)
-                else:
-                    # Hard force of reinitialize of the node/slots setup
-                    # and try again with the new setup
-                    await self.close()
-                    raise
+                # Connection retries are being handled in the node's
+                # Retry object.
+                # Remove the failed node from the startup nodes before we try
+                # to reinitialize the cluster
+                self.nodes_manager.startup_nodes.pop(target_node.name, None)
+                # Hard force of reinitialize of the node/slots setup
+                # and try again with the new setup
+                await self.close()
+                raise
+            except ClusterDownError:
+                # ClusterDownError can occur during a failover and to get
+                # self-healed, we will try to reinitialize the cluster layout
+                # and retry executing the command
+                await self.close()
+                await asyncio.sleep(0.25)
+                raise
             except MovedError as e:
                 # First, we will try to patch the slots/nodes cache with the
                 # redirected node output and try again. If MovedError exceeds
@@ -711,19 +773,12 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
                 else:
                     self.nodes_manager._moved_exception = e
                 moved = True
-            except TryAgainError:
-                if ttl < self.RedisClusterRequestTTL / 2:
-                    await asyncio.sleep(0.05)
             except AskError as e:
                 redirect_addr = get_node_name(host=e.host, port=e.port)
                 asking = True
-            except ClusterDownError:
-                # ClusterDownError can occur during a failover and to get
-                # self-healed, we will try to reinitialize the cluster layout
-                # and retry executing the command
-                await asyncio.sleep(0.25)
-                await self.close()
-                raise
+            except TryAgainError:
+                if ttl < self.RedisClusterRequestTTL / 2:
+                    await asyncio.sleep(0.05)
 
         raise ClusterError("TTL exhausted.")
 
@@ -745,6 +800,80 @@ class RedisCluster(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommand
 
         return ClusterPipeline(self)
 
+    def lock(
+        self,
+        name: KeyT,
+        timeout: Optional[float] = None,
+        sleep: float = 0.1,
+        blocking: bool = True,
+        blocking_timeout: Optional[float] = None,
+        lock_class: Optional[Type[Lock]] = None,
+        thread_local: bool = True,
+    ) -> Lock:
+        """
+        Return a new Lock object using key ``name`` that mimics
+        the behavior of threading.Lock.
+
+        If specified, ``timeout`` indicates a maximum life for the lock.
+        By default, it will remain locked until release() is called.
+
+        ``sleep`` indicates the amount of time to sleep per loop iteration
+        when the lock is in blocking mode and another client is currently
+        holding the lock.
+
+        ``blocking`` indicates whether calling ``acquire`` should block until
+        the lock has been acquired or to fail immediately, causing ``acquire``
+        to return False and the lock not being acquired. Defaults to True.
+        Note this value can be overridden by passing a ``blocking``
+        argument to ``acquire``.
+
+        ``blocking_timeout`` indicates the maximum amount of time in seconds to
+        spend trying to acquire the lock. A value of ``None`` indicates
+        continue trying forever. ``blocking_timeout`` can be specified as a
+        float or integer, both representing the number of seconds to wait.
+
+        ``lock_class`` forces the specified lock implementation. Note that as
+        of redis-py 3.0, the only lock class we implement is ``Lock`` (which is
+        a Lua-based lock). So, it's unlikely you'll need this parameter, unless
+        you have created your own custom lock class.
+
+        ``thread_local`` indicates whether the lock token is placed in
+        thread-local storage. By default, the token is placed in thread local
+        storage so that a thread only sees its token, not a token set by
+        another thread. Consider the following timeline:
+
+            time: 0, thread-1 acquires `my-lock`, with a timeout of 5 seconds.
+                     thread-1 sets the token to "abc"
+            time: 1, thread-2 blocks trying to acquire `my-lock` using the
+                     Lock instance.
+            time: 5, thread-1 has not yet completed. redis expires the lock
+                     key.
+            time: 5, thread-2 acquired `my-lock` now that it's available.
+                     thread-2 sets the token to "xyz"
+            time: 6, thread-1 finishes its work and calls release(). if the
+                     token is *not* stored in thread local storage, then
+                     thread-1 would see the token value as "xyz" and would be
+                     able to successfully release the thread-2's lock.
+
+        In some use cases it's necessary to disable thread local storage. For
+        example, if you have code where one thread acquires a lock and passes
+        that lock instance to a worker thread to release later. If thread
+        local storage isn't disabled in this case, the worker thread won't see
+        the token set by the thread that acquired the lock. Our assumption
+        is that these cases aren't common and as such default to using
+        thread local storage."""
+        if lock_class is None:
+            lock_class = Lock
+        return lock_class(
+            self,
+            name,
+            timeout=timeout,
+            sleep=sleep,
+            blocking=blocking,
+            blocking_timeout=blocking_timeout,
+            thread_local=thread_local,
+        )
+
 
 class ClusterNode:
     """
@@ -755,7 +884,6 @@ class ClusterNode:
     """
 
     __slots__ = (
-        "_command_stack",
         "_connections",
         "_free",
         "connection_class",
@@ -771,8 +899,9 @@ class ClusterNode:
     def __init__(
         self,
         host: str,
-        port: int,
+        port: Union[str, int],
         server_type: Optional[str] = None,
+        *,
         max_connections: int = 2**31,
         connection_class: Type[Connection] = Connection,
         **connection_kwargs: Any,
@@ -790,13 +919,10 @@ class ClusterNode:
         self.max_connections = max_connections
         self.connection_class = connection_class
         self.connection_kwargs = connection_kwargs
-        self.response_callbacks = connection_kwargs.pop(
-            "response_callbacks", RedisCluster.RESPONSE_CALLBACKS
-        )
+        self.response_callbacks = connection_kwargs.pop("response_callbacks", {})
 
         self._connections: List[Connection] = []
         self._free: Deque[Connection] = collections.deque(maxlen=self.max_connections)
-        self._command_stack: List["PipelineCommand"] = []
 
     def __repr__(self) -> str:
         return (
@@ -817,8 +943,7 @@ class ClusterNode:
                 )
                 try:
                     context = {"client": self, "message": self._DEL_MESSAGE}
-                    # TODO: Change to get_running_loop() when dropping support for py3.6
-                    asyncio.get_event_loop().call_exception_handler(context)
+                    asyncio.get_running_loop().call_exception_handler(context)
                 except RuntimeError:
                     ...
                 break
@@ -826,7 +951,7 @@ class ClusterNode:
     async def disconnect(self) -> None:
         ret = await asyncio.gather(
             *(
-                asyncio.ensure_future(connection.disconnect())
+                asyncio.create_task(connection.disconnect())
                 for connection in self._connections
             ),
             return_exceptions=True,
@@ -836,37 +961,33 @@ class ClusterNode:
             raise exc
 
     def acquire_connection(self) -> Connection:
-        if self._free:
-            for _ in range(len(self._free)):
-                connection = self._free.popleft()
-                if connection.is_connected:
-                    return connection
-                self._free.append(connection)
-
+        try:
             return self._free.popleft()
+        except IndexError:
+            if len(self._connections) < self.max_connections:
+                connection = self.connection_class(**self.connection_kwargs)
+                self._connections.append(connection)
+                return connection
 
-        if len(self._connections) < self.max_connections:
-            connection = self.connection_class(**self.connection_kwargs)
-            self._connections.append(connection)
-            return connection
-
-        raise ConnectionError("Too many connections")
+            raise MaxConnectionsError()
 
     async def parse_response(
         self, connection: Connection, command: str, **kwargs: Any
     ) -> Any:
         try:
             if NEVER_DECODE in kwargs:
-                response = await connection.read_response_without_lock(
-                    disable_decoding=True
-                )
+                response = await connection.read_response(disable_decoding=True)
+                kwargs.pop(NEVER_DECODE)
             else:
-                response = await connection.read_response_without_lock()
+                response = await connection.read_response()
         except ResponseError:
             if EMPTY_RESPONSE in kwargs:
                 return kwargs[EMPTY_RESPONSE]
             raise
 
+        if EMPTY_RESPONSE in kwargs:
+            kwargs.pop(EMPTY_RESPONSE)
+
         # Return response
         if command in self.response_callbacks:
             return self.response_callbacks[command](response, **kwargs)
@@ -887,18 +1008,18 @@ class ClusterNode:
             # Release connection
             self._free.append(connection)
 
-    async def execute_pipeline(self) -> bool:
+    async def execute_pipeline(self, commands: List["PipelineCommand"]) -> bool:
         # Acquire connection
         connection = self.acquire_connection()
 
         # Execute command
         await connection.send_packed_command(
-            connection.pack_commands(cmd.args for cmd in self._command_stack), False
+            connection.pack_commands(cmd.args for cmd in commands), False
         )
 
         # Read responses
         ret = False
-        for cmd in self._command_stack:
+        for cmd in commands:
             try:
                 cmd.result = await self.parse_response(
                     connection, cmd.args[0], **cmd.kwargs
@@ -928,12 +1049,12 @@ class NodesManager:
     def __init__(
         self,
         startup_nodes: List["ClusterNode"],
-        require_full_coverage: bool = False,
-        **kwargs: Any,
+        require_full_coverage: bool,
+        connection_kwargs: Dict[str, Any],
     ) -> None:
         self.startup_nodes = {node.name: node for node in startup_nodes}
         self.require_full_coverage = require_full_coverage
-        self.connection_kwargs = kwargs
+        self.connection_kwargs = connection_kwargs
 
         self.default_node: "ClusterNode" = None
         self.nodes_cache: Dict[str, "ClusterNode"] = {}
@@ -970,13 +1091,13 @@ class NodesManager:
         if remove_old:
             for name in list(old.keys()):
                 if name not in new:
-                    asyncio.ensure_future(old.pop(name).disconnect())
+                    asyncio.create_task(old.pop(name).disconnect())
 
         for name, node in new.items():
             if name in old:
                 if old[name] is node:
                     continue
-                asyncio.ensure_future(old[name].disconnect())
+                asyncio.create_task(old[name].disconnect())
             old[name] = node
 
     def _update_moved_slots(self) -> None:
@@ -1052,6 +1173,7 @@ class NodesManager:
         disagreements = []
         startup_nodes_reachable = False
         fully_covered = False
+        exception = None
         for startup_node in self.startup_nodes.values():
             try:
                 # Make sure cluster mode is enabled on this node
@@ -1063,25 +1185,11 @@ class NodesManager:
                     )
                 cluster_slots = await startup_node.execute_command("CLUSTER SLOTS")
                 startup_nodes_reachable = True
-            except (ConnectionError, TimeoutError):
-                continue
-            except ResponseError as e:
-                # Isn't a cluster connection, so it won't parse these
-                # exceptions automatically
-                message = e.__str__()
-                if "CLUSTERDOWN" in message or "MASTERDOWN" in message:
-                    continue
-                else:
-                    raise RedisClusterException(
-                        'ERROR sending "cluster slots" command to redis '
-                        f"server: {startup_node}. error: {message}"
-                    )
             except Exception as e:
-                message = e.__str__()
-                raise RedisClusterException(
-                    'ERROR sending "cluster slots" command to redis '
-                    f"server {startup_node.name}. error: {message}"
-                )
+                # Try the next startup node.
+                # The exception is saved and raised only if we have no more nodes.
+                exception = e
+                continue
 
             # CLUSTER SLOTS command results in the following output:
             # [[slot_section[from_slot,to_slot,master,replica1,...,replicaN]]]
@@ -1162,9 +1270,9 @@ class NodesManager:
 
         if not startup_nodes_reachable:
             raise RedisClusterException(
-                "Redis Cluster cannot be connected. Please provide at least "
-                "one reachable node. "
-            )
+                f"Redis Cluster cannot be connected. Please provide at least "
+                f"one reachable node: {str(exception)}"
+            ) from exception
 
         # Check if the slots are not fully covered
         if not fully_covered and self.require_full_coverage:
@@ -1191,7 +1299,7 @@ class NodesManager:
         self.default_node = None
         await asyncio.gather(
             *(
-                asyncio.ensure_future(node.disconnect())
+                asyncio.create_task(node.disconnect())
                 for node in getattr(self, attr).values()
             )
         )
@@ -1329,7 +1437,7 @@ class ClusterPipeline(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterComm
                         await asyncio.sleep(0.25)
                     else:
                         # All other errors should be raised.
-                        raise e
+                        raise
 
             # If it fails the configured number of times then raise an exception
             raise exception
@@ -1362,15 +1470,16 @@ class ClusterPipeline(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterComm
                     )
             if len(target_nodes) > 1:
                 raise RedisClusterException(f"Too many targets for command {cmd.args}")
-
             node = target_nodes[0]
             if node.name not in nodes:
-                nodes[node.name] = node
-                node._command_stack = []
-            node._command_stack.append(cmd)
+                nodes[node.name] = (node, [])
+            nodes[node.name][1].append(cmd)
 
         errors = await asyncio.gather(
-            *(asyncio.ensure_future(node.execute_pipeline()) for node in nodes.values())
+            *(
+                asyncio.create_task(node[0].execute_pipeline(node[1]))
+                for node in nodes.values()
+            )
         )
 
         if any(errors):
@@ -1397,6 +1506,19 @@ class ClusterPipeline(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterComm
                         result.args = (msg,) + result.args[1:]
                         raise result
 
+            default_node = nodes.get(client.get_default_node().name)
+            if default_node is not None:
+                # This pipeline execution used the default node, check if we need
+                # to replace it.
+                # Note: when the error is raised we'll reset the default node in the
+                # caller function.
+                for cmd in default_node[1]:
+                    # Check if it has a command that failed with a relevant
+                    # exception
+                    if type(cmd.result) in self.__class__.ERRORS_ALLOW_RETRY:
+                        client.replace_default_node()
+                        break
+
         return [cmd.result for cmd in stack]
 
     def _split_command_across_slots(
diff --git a/redis/asyncio/connection.py b/redis/asyncio/connection.py
index 35536fc..e77fba3 100644
--- a/redis/asyncio/connection.py
+++ b/redis/asyncio/connection.py
@@ -1,13 +1,10 @@
 import asyncio
 import copy
 import enum
-import errno
 import inspect
-import io
 import os
 import socket
 import ssl
-import sys
 import threading
 import weakref
 from itertools import chain
@@ -32,6 +29,7 @@ import async_timeout
 from redis.asyncio.retry import Retry
 from redis.backoff import NoBackoff
 from redis.compat import Protocol, TypedDict
+from redis.credentials import CredentialProvider, UsernamePasswordCredentialProvider
 from redis.exceptions import (
     AuthenticationError,
     AuthenticationWrongNumberOfArgsError,
@@ -56,16 +54,6 @@ hiredis = None
 if HIREDIS_AVAILABLE:
     import hiredis
 
-NONBLOCKING_EXCEPTION_ERROR_NUMBERS = {
-    BlockingIOError: errno.EWOULDBLOCK,
-    ssl.SSLWantReadError: 2,
-    ssl.SSLWantWriteError: 2,
-    ssl.SSLError: 2,
-}
-
-NONBLOCKING_EXCEPTIONS = tuple(NONBLOCKING_EXCEPTION_ERROR_NUMBERS.keys())
-
-
 SYM_STAR = b"*"
 SYM_DOLLAR = b"$"
 SYM_CRLF = b"\r\n"
@@ -88,6 +76,15 @@ MODULE_EXPORTS_DATA_TYPES_ERROR = (
     "exports one or more module-side data "
     "types, can't unload"
 )
+# user send an AUTH cmd to a server without authorization configured
+NO_AUTH_SET_ERROR = {
+    # Redis >= 6.0
+    "AUTH <password> called without any password "
+    "configured for the default user. Are you sure "
+    "your configuration is correct?": AuthenticationError,
+    # Redis < 6.0
+    "Client sent AUTH, but no password is set": AuthenticationError,
+}
 
 
 class _HiredisReaderArgs(TypedDict, total=False):
@@ -144,7 +141,7 @@ ExceptionMappingT = Mapping[str, Union[Type[Exception], Mapping[str, Type[Except
 class BaseParser:
     """Plain Python parsing class"""
 
-    __slots__ = "_stream", "_buffer", "_read_size"
+    __slots__ = "_stream", "_read_size"
 
     EXCEPTION_CLASSES: ExceptionMappingT = {
         "ERR": {
@@ -161,7 +158,9 @@ class BaseParser:
             MODULE_EXPORTS_DATA_TYPES_ERROR: ModuleError,
             NO_SUCH_MODULE_ERROR: ModuleError,
             MODULE_UNLOAD_NOT_POSSIBLE_ERROR: ModuleError,
+            **NO_AUTH_SET_ERROR,
         },
+        "WRONGPASS": AuthenticationError,
         "EXECABORT": ExecAbortError,
         "LOADING": BusyLoadingError,
         "NOSCRIPT": NoScriptError,
@@ -172,7 +171,6 @@ class BaseParser:
 
     def __init__(self, socket_read_size: int):
         self._stream: Optional[asyncio.StreamReader] = None
-        self._buffer: Optional[SocketBuffer] = None
         self._read_size = socket_read_size
 
     def __del__(self):
@@ -198,7 +196,7 @@ class BaseParser:
     def on_connect(self, connection: "Connection"):
         raise NotImplementedError()
 
-    async def can_read(self, timeout: float) -> bool:
+    async def can_read_destructive(self) -> bool:
         raise NotImplementedError()
 
     async def read_response(
@@ -207,154 +205,21 @@ class BaseParser:
         raise NotImplementedError()
 
 
-class SocketBuffer:
-    """Async-friendly re-impl of redis-py's SocketBuffer.
-
-    TODO: We're currently passing through two buffers,
-        the asyncio.StreamReader and this. I imagine we can reduce the layers here
-        while maintaining compliance with prior art.
-    """
-
-    def __init__(
-        self,
-        stream_reader: asyncio.StreamReader,
-        socket_read_size: int,
-        socket_timeout: Optional[float],
-    ):
-        self._stream: Optional[asyncio.StreamReader] = stream_reader
-        self.socket_read_size = socket_read_size
-        self.socket_timeout = socket_timeout
-        self._buffer: Optional[io.BytesIO] = io.BytesIO()
-        # number of bytes written to the buffer from the socket
-        self.bytes_written = 0
-        # number of bytes read from the buffer
-        self.bytes_read = 0
-
-    @property
-    def length(self):
-        return self.bytes_written - self.bytes_read
-
-    async def _read_from_socket(
-        self,
-        length: Optional[int] = None,
-        timeout: Union[float, None, _Sentinel] = SENTINEL,
-        raise_on_timeout: bool = True,
-    ) -> bool:
-        buf = self._buffer
-        if buf is None or self._stream is None:
-            raise RedisError("Buffer is closed.")
-        buf.seek(self.bytes_written)
-        marker = 0
-        timeout = timeout if timeout is not SENTINEL else self.socket_timeout
-
-        try:
-            while True:
-                async with async_timeout.timeout(timeout):
-                    data = await self._stream.read(self.socket_read_size)
-                # an empty string indicates the server shutdown the socket
-                if isinstance(data, bytes) and len(data) == 0:
-                    raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
-                buf.write(data)
-                data_length = len(data)
-                self.bytes_written += data_length
-                marker += data_length
-
-                if length is not None and length > marker:
-                    continue
-                return True
-        except (socket.timeout, asyncio.TimeoutError):
-            if raise_on_timeout:
-                raise TimeoutError("Timeout reading from socket")
-            return False
-        except NONBLOCKING_EXCEPTIONS as ex:
-            # if we're in nonblocking mode and the recv raises a
-            # blocking error, simply return False indicating that
-            # there's no data to be read. otherwise raise the
-            # original exception.
-            allowed = NONBLOCKING_EXCEPTION_ERROR_NUMBERS.get(ex.__class__, -1)
-            if not raise_on_timeout and ex.errno == allowed:
-                return False
-            raise ConnectionError(f"Error while reading from socket: {ex.args}")
-
-    async def can_read(self, timeout: float) -> bool:
-        return bool(self.length) or await self._read_from_socket(
-            timeout=timeout, raise_on_timeout=False
-        )
-
-    async def read(self, length: int) -> bytes:
-        length = length + 2  # make sure to read the \r\n terminator
-        # make sure we've read enough data from the socket
-        if length > self.length:
-            await self._read_from_socket(length - self.length)
-
-        if self._buffer is None:
-            raise RedisError("Buffer is closed.")
-
-        self._buffer.seek(self.bytes_read)
-        data = self._buffer.read(length)
-        self.bytes_read += len(data)
-
-        # purge the buffer when we've consumed it all so it doesn't
-        # grow forever
-        if self.bytes_read == self.bytes_written:
-            self.purge()
-
-        return data[:-2]
-
-    async def readline(self) -> bytes:
-        buf = self._buffer
-        if buf is None:
-            raise RedisError("Buffer is closed.")
-
-        buf.seek(self.bytes_read)
-        data = buf.readline()
-        while not data.endswith(SYM_CRLF):
-            # there's more data in the socket that we need
-            await self._read_from_socket()
-            buf.seek(self.bytes_read)
-            data = buf.readline()
-
-        self.bytes_read += len(data)
-
-        # purge the buffer when we've consumed it all so it doesn't
-        # grow forever
-        if self.bytes_read == self.bytes_written:
-            self.purge()
-
-        return data[:-2]
-
-    def purge(self):
-        if self._buffer is None:
-            raise RedisError("Buffer is closed.")
-
-        self._buffer.seek(0)
-        self._buffer.truncate()
-        self.bytes_written = 0
-        self.bytes_read = 0
-
-    def close(self):
-        try:
-            self.purge()
-            self._buffer.close()
-        except Exception:
-            # issue #633 suggests the purge/close somehow raised a
-            # BadFileDescriptor error. Perhaps the client ran out of
-            # memory or something else? It's probably OK to ignore
-            # any error being raised from purge/close since we're
-            # removing the reference to the instance below.
-            pass
-        self._buffer = None
-        self._stream = None
-
-
 class PythonParser(BaseParser):
     """Plain Python parsing class"""
 
-    __slots__ = BaseParser.__slots__ + ("encoder",)
+    __slots__ = BaseParser.__slots__ + ("encoder", "_buffer", "_pos", "_chunks")
 
     def __init__(self, socket_read_size: int):
         super().__init__(socket_read_size)
         self.encoder: Optional[Encoder] = None
+        self._buffer = b""
+        self._chunks = []
+        self._pos = 0
+
+    def _clear(self):
+        self._buffer = b""
+        self._chunks.clear()
 
     def on_connect(self, connection: "Connection"):
         """Called when the stream connects"""
@@ -362,31 +227,43 @@ class PythonParser(BaseParser):
         if self._stream is None:
             raise RedisError("Buffer is closed.")
 
-        self._buffer = SocketBuffer(
-            self._stream, self._read_size, connection.socket_timeout
-        )
         self.encoder = connection.encoder
 
     def on_disconnect(self):
         """Called when the stream disconnects"""
         if self._stream is not None:
             self._stream = None
-        if self._buffer is not None:
-            self._buffer.close()
-            self._buffer = None
         self.encoder = None
+        self._clear()
 
-    async def can_read(self, timeout: float):
-        return self._buffer and bool(await self._buffer.can_read(timeout))
+    async def can_read_destructive(self) -> bool:
+        if self._buffer:
+            return True
+        if self._stream is None:
+            raise RedisError("Buffer is closed.")
+        try:
+            async with async_timeout.timeout(0):
+                return await self._stream.read(1)
+        except asyncio.TimeoutError:
+            return False
 
-    async def read_response(
+    async def read_response(self, disable_decoding: bool = False):
+        if self._chunks:
+            # augment parsing buffer with previously read data
+            self._buffer += b"".join(self._chunks)
+            self._chunks.clear()
+        self._pos = 0
+        response = await self._read_response(disable_decoding=disable_decoding)
+        # Successfully parsing a response allows us to clear our parsing buffer
+        self._clear()
+        return response
+
+    async def _read_response(
         self, disable_decoding: bool = False
     ) -> Union[EncodableT, ResponseError, None]:
-        if not self._buffer or not self.encoder:
-            raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
-        raw = await self._buffer.readline()
-        if not raw:
+        if not self._stream or not self.encoder:
             raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
+        raw = await self._readline()
         response: Any
         byte, response = raw[:1], raw[1:]
 
@@ -400,6 +277,7 @@ class PythonParser(BaseParser):
             # if the error is a ConnectionError, raise immediately so the user
             # is notified
             if isinstance(error, ConnectionError):
+                self._clear()  # Successful parse
                 raise error
             # otherwise, we're dealing with a ResponseError that might belong
             # inside a pipeline response. the connection's read_response()
@@ -417,33 +295,69 @@ class PythonParser(BaseParser):
             length = int(response)
             if length == -1:
                 return None
-            response = await self._buffer.read(length)
+            response = await self._read(length)
         # multi-bulk response
         elif byte == b"*":
             length = int(response)
             if length == -1:
                 return None
             response = [
-                (await self.read_response(disable_decoding)) for _ in range(length)
+                (await self._read_response(disable_decoding)) for _ in range(length)
             ]
         if isinstance(response, bytes) and disable_decoding is False:
             response = self.encoder.decode(response)
         return response
 
+    async def _read(self, length: int) -> bytes:
+        """
+        Read `length` bytes of data.  These are assumed to be followed
+        by a '\r\n' terminator which is subsequently discarded.
+        """
+        want = length + 2
+        end = self._pos + want
+        if len(self._buffer) >= end:
+            result = self._buffer[self._pos : end - 2]
+        else:
+            tail = self._buffer[self._pos :]
+            try:
+                data = await self._stream.readexactly(want - len(tail))
+            except asyncio.IncompleteReadError as error:
+                raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) from error
+            result = (tail + data)[:-2]
+            self._chunks.append(data)
+        self._pos += want
+        return result
+
+    async def _readline(self) -> bytes:
+        """
+        read an unknown number of bytes up to the next '\r\n'
+        line separator, which is discarded.
+        """
+        found = self._buffer.find(b"\r\n", self._pos)
+        if found >= 0:
+            result = self._buffer[self._pos : found]
+        else:
+            tail = self._buffer[self._pos :]
+            data = await self._stream.readline()
+            if not data.endswith(b"\r\n"):
+                raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
+            result = (tail + data)[:-2]
+            self._chunks.append(data)
+        self._pos += len(result) + 2
+        return result
+
 
 class HiredisParser(BaseParser):
     """Parser class for connections using Hiredis"""
 
-    __slots__ = BaseParser.__slots__ + ("_next_response", "_reader", "_socket_timeout")
-
-    _next_response: bool
+    __slots__ = BaseParser.__slots__ + ("_reader", "_connected")
 
     def __init__(self, socket_read_size: int):
         if not HIREDIS_AVAILABLE:
             raise RedisError("Hiredis is not available.")
         super().__init__(socket_read_size=socket_read_size)
         self._reader: Optional[hiredis.Reader] = None
-        self._socket_timeout: Optional[float] = None
+        self._connected: bool = False
 
     def on_connect(self, connection: "Connection"):
         self._stream = connection._reader
@@ -456,71 +370,40 @@ class HiredisParser(BaseParser):
             kwargs["errors"] = connection.encoder.encoding_errors
 
         self._reader = hiredis.Reader(**kwargs)
-        self._next_response = False
-        self._socket_timeout = connection.socket_timeout
+        self._connected = True
 
     def on_disconnect(self):
-        self._stream = None
-        self._reader = None
-        self._next_response = False
+        self._connected = False
 
-    async def can_read(self, timeout: float):
-        if not self._stream or not self._reader:
+    async def can_read_destructive(self):
+        if not self._connected:
             raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
-
-        if self._next_response is False:
-            self._next_response = self._reader.gets()
-        if self._next_response is False:
-            return await self.read_from_socket(timeout=timeout, raise_on_timeout=False)
-        return True
-
-    async def read_from_socket(
-        self,
-        timeout: Union[float, None, _Sentinel] = SENTINEL,
-        raise_on_timeout: bool = True,
-    ):
-        timeout = self._socket_timeout if timeout is SENTINEL else timeout
-        try:
-            if timeout is None:
-                buffer = await self._stream.read(self._read_size)
-            else:
-                async with async_timeout.timeout(timeout):
-                    buffer = await self._stream.read(self._read_size)
-            if not buffer or not isinstance(buffer, bytes):
-                raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) from None
-            self._reader.feed(buffer)
-            # data was read from the socket and added to the buffer.
-            # return True to indicate that data was read.
+        if self._reader.gets():
             return True
-        except asyncio.CancelledError:
-            raise
-        except (socket.timeout, asyncio.TimeoutError):
-            if raise_on_timeout:
-                raise TimeoutError("Timeout reading from socket") from None
+        try:
+            async with async_timeout.timeout(0):
+                return await self.read_from_socket()
+        except asyncio.TimeoutError:
             return False
-        except NONBLOCKING_EXCEPTIONS as ex:
-            # if we're in nonblocking mode and the recv raises a
-            # blocking error, simply return False indicating that
-            # there's no data to be read. otherwise raise the
-            # original exception.
-            allowed = NONBLOCKING_EXCEPTION_ERROR_NUMBERS.get(ex.__class__, -1)
-            if not raise_on_timeout and ex.errno == allowed:
-                return False
-            raise ConnectionError(f"Error while reading from socket: {ex.args}")
+
+    async def read_from_socket(self):
+        buffer = await self._stream.read(self._read_size)
+        if not buffer or not isinstance(buffer, bytes):
+            raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) from None
+        self._reader.feed(buffer)
+        # data was read from the socket and added to the buffer.
+        # return True to indicate that data was read.
+        return True
 
     async def read_response(
         self, disable_decoding: bool = False
     ) -> Union[EncodableT, List[EncodableT]]:
-        if not self._stream or not self._reader:
-            self.on_disconnect()
+        # If `on_disconnect()` has been called, prohibit any more reads
+        # even if they could happen because data might be present.
+        # We still allow reads in progress to finish
+        if not self._connected:
             raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) from None
 
-        # _next_response might be cached from a can_read() call
-        if self._next_response is not False:
-            response = self._next_response
-            self._next_response = False
-            return response
-
         response = self._reader.gets()
         while response is False:
             await self.read_from_socket()
@@ -570,6 +453,7 @@ class Connection:
         "db",
         "username",
         "client_name",
+        "credential_provider",
         "password",
         "socket_timeout",
         "socket_connect_timeout",
@@ -619,14 +503,23 @@ class Connection:
         retry: Optional[Retry] = None,
         redis_connect_func: Optional[ConnectCallbackT] = None,
         encoder_class: Type[Encoder] = Encoder,
+        credential_provider: Optional[CredentialProvider] = None,
     ):
+        if (username or password) and credential_provider is not None:
+            raise DataError(
+                "'username' and 'password' cannot be passed along with 'credential_"
+                "provider'. Please provide only one of the following arguments: \n"
+                "1. 'password' and (optional) 'username'\n"
+                "2. 'credential_provider'"
+            )
         self.pid = os.getpid()
         self.host = host
         self.port = int(port)
         self.db = db
-        self.username = username
         self.client_name = client_name
+        self.credential_provider = credential_provider
         self.password = password
+        self.username = username
         self.socket_timeout = socket_timeout
         self.socket_connect_timeout = socket_connect_timeout or socket_timeout or None
         self.socket_keepalive = socket_keepalive
@@ -637,8 +530,10 @@ class Connection:
             retry_on_error = []
         if retry_on_timeout:
             retry_on_error.append(TimeoutError)
+            retry_on_error.append(socket.timeout)
+            retry_on_error.append(asyncio.TimeoutError)
         self.retry_on_error = retry_on_error
-        if retry_on_error:
+        if retry or retry_on_error:
             if not retry:
                 self.retry = Retry(NoBackoff(), 1)
             else:
@@ -659,7 +554,6 @@ class Connection:
         self.set_parser(parser_class)
         self._connect_callbacks: List[weakref.WeakMethod[ConnectCallbackT]] = []
         self._buffer_cutoff = 6000
-        self._lock = asyncio.Lock()
 
     def __repr__(self):
         repr_args = ",".join((f"{k}={v}" for k, v in self.repr_pieces()))
@@ -674,7 +568,7 @@ class Connection:
     def __del__(self):
         try:
             if self.is_connected:
-                loop = asyncio.get_event_loop()
+                loop = asyncio.get_running_loop()
                 coro = self.disconnect()
                 if loop.is_running():
                     loop.create_task(coro)
@@ -685,7 +579,7 @@ class Connection:
 
     @property
     def is_connected(self):
-        return self._reader and self._writer
+        return self._reader is not None and self._writer is not None
 
     def register_connect_callback(self, callback):
         self._connect_callbacks.append(weakref.WeakMethod(callback))
@@ -706,9 +600,11 @@ class Connection:
         if self.is_connected:
             return
         try:
-            await self._connect()
+            await self.retry.call_with_retry(
+                lambda: self._connect(), lambda error: self.disconnect()
+            )
         except asyncio.CancelledError:
-            raise
+            raise  # in 3.7 and earlier, this is an Exception, not BaseException
         except (socket.timeout, asyncio.TimeoutError):
             raise TimeoutError("Timeout connecting to server")
         except OSError as e:
@@ -767,7 +663,16 @@ class Connection:
     def _error_message(self, exception):
         # args for socket.error can either be (errno, "message")
         # or just "message"
-        if len(exception.args) == 1:
+        if not exception.args:
+            # asyncio has a bug where on Connection reset by peer, the
+            # exception is not instanciated, so args is empty. This is the
+            # workaround.
+            # See: https://github.com/redis/redis-py/issues/2237
+            # See: https://github.com/python/cpython/issues/94061
+            return (
+                f"Error connecting to {self.host}:{self.port}. Connection reset by peer"
+            )
+        elif len(exception.args) == 1:
             return f"Error connecting to {self.host}:{self.port}. {exception.args[0]}."
         else:
             return (
@@ -779,14 +684,13 @@ class Connection:
         """Initialize the connection, authenticate and select a database"""
         self._parser.on_connect(self)
 
-        # if username and/or password are set, authenticate
-        if self.username or self.password:
-            auth_args: Union[Tuple[str], Tuple[str, str]]
-            if self.username:
-                auth_args = (self.username, self.password or "")
-            else:
-                # Mypy bug: https://github.com/python/mypy/issues/10944
-                auth_args = (self.password or "",)
+        # if credential provider or username and/or password are set, authenticate
+        if self.credential_provider or (self.username or self.password):
+            cred_provider = (
+                self.credential_provider
+                or UsernamePasswordCredentialProvider(self.username, self.password)
+            )
+            auth_args = cred_provider.get_credentials()
             # avoid checking health here -- PING will fail if we try
             # to check the health prior to the AUTH
             await self.send_command("AUTH", *auth_args, check_health=False)
@@ -798,7 +702,7 @@ class Connection:
                 # server seems to be < 6.0.0 which expects a single password
                 # arg. retry auth with just the password.
                 # https://github.com/andymccurdy/redis-py/issues/1274
-                await self.send_command("AUTH", self.password, check_health=False)
+                await self.send_command("AUTH", auth_args[-1], check_health=False)
                 auth_response = await self.read_response()
 
             if str_if_bytes(auth_response) != "OK":
@@ -816,7 +720,7 @@ class Connection:
             if str_if_bytes(await self.read_response()) != "OK":
                 raise ConnectionError("Invalid Database")
 
-    async def disconnect(self) -> None:
+    async def disconnect(self, nowait: bool = False) -> None:
         """Disconnects from the Redis server"""
         try:
             async with async_timeout.timeout(self.socket_connect_timeout):
@@ -826,8 +730,9 @@ class Connection:
                 try:
                     if os.getpid() == self.pid:
                         self._writer.close()  # type: ignore[union-attr]
-                        # py3.6 doesn't have this method
-                        if hasattr(self._writer, "wait_closed"):
+                        # wait for close to finish, except when handling errors and
+                        # forcefully disconnecting.
+                        if not nowait:
                             await self._writer.wait_closed()  # type: ignore[union-attr]
                 except OSError:
                     pass
@@ -851,12 +756,10 @@ class Connection:
 
     async def check_health(self):
         """Check the health of the connection with a PING/PONG"""
-        if sys.version_info[0:2] == (3, 6):
-            func = asyncio.get_event_loop
-        else:
-            func = asyncio.get_running_loop
-
-        if self.health_check_interval and func().time() > self.next_health_check:
+        if (
+            self.health_check_interval
+            and asyncio.get_running_loop().time() > self.next_health_check
+        ):
             await self.retry.call_with_retry(self._send_ping, self._ping_failed)
 
     async def _send_packed_command(self, command: Iterable[bytes]) -> None:
@@ -884,10 +787,10 @@ class Connection:
                 self._writer.writelines(command)
                 await self._writer.drain()
         except asyncio.TimeoutError:
-            await self.disconnect()
+            await self.disconnect(nowait=True)
             raise TimeoutError("Timeout writing to socket") from None
         except OSError as e:
-            await self.disconnect()
+            await self.disconnect(nowait=True)
             if len(e.args) == 1:
                 err_no, errmsg = "UNKNOWN", e.args[0]
             else:
@@ -896,8 +799,8 @@ class Connection:
             raise ConnectionError(
                 f"Error {err_no} while writing to socket. {errmsg}."
             ) from e
-        except BaseException:
-            await self.disconnect()
+        except Exception:
+            await self.disconnect(nowait=True)
             raise
 
     async def send_command(self, *args: Any, **kwargs: Any) -> None:
@@ -906,59 +809,26 @@ class Connection:
             self.pack_command(*args), check_health=kwargs.get("check_health", True)
         )
 
-    async def can_read(self, timeout: float = 0):
+    async def can_read_destructive(self):
         """Poll the socket to see if there's data that can be read."""
-        if not self.is_connected:
-            await self.connect()
         try:
-            return await self._parser.can_read(timeout)
+            return await self._parser.can_read_destructive()
         except OSError as e:
-            await self.disconnect()
+            await self.disconnect(nowait=True)
             raise ConnectionError(
                 f"Error while reading from {self.host}:{self.port}: {e.args}"
             )
 
-    async def read_response(self, disable_decoding: bool = False):
-        """Read the response from a previously sent command"""
-        try:
-            async with self._lock:
-                if self.socket_timeout:
-                    async with async_timeout.timeout(self.socket_timeout):
-                        response = await self._parser.read_response(
-                            disable_decoding=disable_decoding
-                        )
-                else:
-                    response = await self._parser.read_response(
-                        disable_decoding=disable_decoding
-                    )
-        except asyncio.TimeoutError:
-            await self.disconnect()
-            raise TimeoutError(f"Timeout reading from {self.host}:{self.port}")
-        except OSError as e:
-            await self.disconnect()
-            raise ConnectionError(
-                f"Error while reading from {self.host}:{self.port} : {e.args}"
-            )
-        except BaseException:
-            await self.disconnect()
-            raise
-
-        if self.health_check_interval:
-            if sys.version_info[0:2] == (3, 6):
-                func = asyncio.get_event_loop
-            else:
-                func = asyncio.get_running_loop
-            self.next_health_check = func().time() + self.health_check_interval
-
-        if isinstance(response, ResponseError):
-            raise response from None
-        return response
-
-    async def read_response_without_lock(self, disable_decoding: bool = False):
+    async def read_response(
+        self,
+        disable_decoding: bool = False,
+        timeout: Optional[float] = None,
+    ):
         """Read the response from a previously sent command"""
+        read_timeout = timeout if timeout is not None else self.socket_timeout
         try:
-            if self.socket_timeout:
-                async with async_timeout.timeout(self.socket_timeout):
+            if read_timeout is not None:
+                async with async_timeout.timeout(read_timeout):
                     response = await self._parser.read_response(
                         disable_decoding=disable_decoding
                     )
@@ -967,23 +837,28 @@ class Connection:
                     disable_decoding=disable_decoding
                 )
         except asyncio.TimeoutError:
-            await self.disconnect()
+            if timeout is not None:
+                # user requested timeout, return None
+                return None
+            # it was a self.socket_timeout error.
+            await self.disconnect(nowait=True)
             raise TimeoutError(f"Timeout reading from {self.host}:{self.port}")
         except OSError as e:
-            await self.disconnect()
+            await self.disconnect(nowait=True)
             raise ConnectionError(
                 f"Error while reading from {self.host}:{self.port} : {e.args}"
             )
-        except BaseException:
-            await self.disconnect()
+        except asyncio.CancelledError:
+            # need this check for 3.7, where CancelledError
+            # is subclass of Exception, not BaseException
+            raise
+        except Exception:
+            await self.disconnect(nowait=True)
             raise
 
         if self.health_check_interval:
-            if sys.version_info[0:2] == (3, 6):
-                func = asyncio.get_event_loop
-            else:
-                func = asyncio.get_running_loop
-            self.next_health_check = func().time() + self.health_check_interval
+            next_time = asyncio.get_running_loop().time() + self.health_check_interval
+            self.next_health_check = next_time
 
         if isinstance(response, ResponseError):
             raise response from None
@@ -1050,7 +925,8 @@ class Connection:
                     or chunklen > buffer_cutoff
                     or isinstance(chunk, memoryview)
                 ):
-                    output.append(SYM_EMPTY.join(pieces))
+                    if pieces:
+                        output.append(SYM_EMPTY.join(pieces))
                     buffer_length = 0
                     pieces = []
 
@@ -1185,18 +1061,27 @@ class UnixDomainSocketConnection(Connection):  # lgtm [py/missing-call-to-init]
         client_name: str = None,
         retry: Optional[Retry] = None,
         redis_connect_func=None,
+        credential_provider: Optional[CredentialProvider] = None,
     ):
         """
         Initialize a new UnixDomainSocketConnection.
         To specify a retry policy, first set `retry_on_timeout` to `True`
         then set `retry` to a valid `Retry` object
         """
+        if (username or password) and credential_provider is not None:
+            raise DataError(
+                "'username' and 'password' cannot be passed along with 'credential_"
+                "provider'. Please provide only one of the following arguments: \n"
+                "1. 'password' and (optional) 'username'\n"
+                "2. 'credential_provider'"
+            )
         self.pid = os.getpid()
         self.path = path
         self.db = db
-        self.username = username
         self.client_name = client_name
+        self.credential_provider = credential_provider
         self.password = password
+        self.username = username
         self.socket_timeout = socket_timeout
         self.socket_connect_timeout = socket_connect_timeout or socket_timeout or None
         self.retry_on_timeout = retry_on_timeout
@@ -1226,7 +1111,6 @@ class UnixDomainSocketConnection(Connection):  # lgtm [py/missing-call-to-init]
         self.set_parser(parser_class)
         self._connect_callbacks = []
         self._buffer_cutoff = 6000
-        self._lock = asyncio.Lock()
 
     def repr_pieces(self) -> Iterable[Tuple[str, Union[str, int]]]:
         pieces = [("path", self.path), ("db", self.db)]
@@ -1366,7 +1250,7 @@ class ConnectionPool:
 
             redis://[[username]:[password]]@localhost:6379/0
             rediss://[[username]:[password]]@localhost:6379/0
-            unix://[[username]:[password]]@/path/to/socket.sock?db=0
+            unix://[username@]/path/to/socket.sock?db=0[&password=password]
 
         Three URL schemes are supported:
 
@@ -1519,12 +1403,12 @@ class ConnectionPool:
             # pool before all data has been read or the socket has been
             # closed. either way, reconnect and verify everything is good.
             try:
-                if await connection.can_read():
+                if await connection.can_read_destructive():
                     raise ConnectionError("Connection has data") from None
-            except ConnectionError:
+            except (ConnectionError, OSError):
                 await connection.disconnect()
                 await connection.connect()
-                if await connection.can_read():
+                if await connection.can_read_destructive():
                     raise ConnectionError("Connection not ready") from None
         except BaseException:
             # release the connection back to the pool so that we don't
@@ -1598,6 +1482,12 @@ class ConnectionPool:
             if exc:
                 raise exc
 
+    def set_retry(self, retry: "Retry") -> None:
+        for conn in self._available_connections:
+            conn.retry = retry
+        for conn in self._in_use_connections:
+            conn.retry = retry
+
 
 class BlockingConnectionPool(ConnectionPool):
     """
@@ -1720,12 +1610,12 @@ class BlockingConnectionPool(ConnectionPool):
             # pool before all data has been read or the socket has been
             # closed. either way, reconnect and verify everything is good.
             try:
-                if await connection.can_read():
+                if await connection.can_read_destructive():
                     raise ConnectionError("Connection has data") from None
-            except ConnectionError:
+            except (ConnectionError, OSError):
                 await connection.disconnect()
                 await connection.connect()
-                if await connection.can_read():
+                if await connection.can_read_destructive():
                     raise ConnectionError("Connection not ready") from None
         except BaseException:
             # release the connection back to the pool so that we don't leak it
diff --git a/redis/asyncio/lock.py b/redis/asyncio/lock.py
index fc7df37..e1d11a8 100644
--- a/redis/asyncio/lock.py
+++ b/redis/asyncio/lock.py
@@ -1,5 +1,4 @@
 import asyncio
-import sys
 import threading
 import uuid
 from types import SimpleNamespace
@@ -8,7 +7,7 @@ from typing import TYPE_CHECKING, Awaitable, Optional, Union
 from redis.exceptions import LockError, LockNotOwnedError
 
 if TYPE_CHECKING:
-    from redis.asyncio import Redis
+    from redis.asyncio import Redis, RedisCluster
 
 
 class Lock:
@@ -78,7 +77,7 @@ class Lock:
 
     def __init__(
         self,
-        redis: "Redis",
+        redis: Union["Redis", "RedisCluster"],
         name: Union[str, bytes, memoryview],
         timeout: Optional[float] = None,
         sleep: float = 0.1,
@@ -186,16 +185,15 @@ class Lock:
         object with the default encoding. If a token isn't specified, a UUID
         will be generated.
         """
-        if sys.version_info[0:2] != (3, 6):
-            loop = asyncio.get_running_loop()
-        else:
-            loop = asyncio.get_event_loop()
-
         sleep = self.sleep
         if token is None:
             token = uuid.uuid1().hex.encode()
         else:
-            encoder = self.redis.connection_pool.get_encoder()
+            try:
+                encoder = self.redis.connection_pool.get_encoder()
+            except AttributeError:
+                # Cluster
+                encoder = self.redis.get_encoder()
             token = encoder.encode(token)
         if blocking is None:
             blocking = self.blocking
@@ -203,14 +201,14 @@ class Lock:
             blocking_timeout = self.blocking_timeout
         stop_trying_at = None
         if blocking_timeout is not None:
-            stop_trying_at = loop.time() + blocking_timeout
+            stop_trying_at = asyncio.get_running_loop().time() + blocking_timeout
         while True:
             if await self.do_acquire(token):
                 self.local.token = token
                 return True
             if not blocking:
                 return False
-            next_try_at = loop.time() + sleep
+            next_try_at = asyncio.get_running_loop().time() + sleep
             if stop_trying_at is not None and next_try_at > stop_trying_at:
                 return False
             await asyncio.sleep(sleep)
@@ -239,7 +237,11 @@ class Lock:
         # need to always compare bytes to bytes
         # TODO: this can be simplified when the context manager is finished
         if stored_token and not isinstance(stored_token, bytes):
-            encoder = self.redis.connection_pool.get_encoder()
+            try:
+                encoder = self.redis.connection_pool.get_encoder()
+            except AttributeError:
+                # Cluster
+                encoder = self.redis.get_encoder()
             stored_token = encoder.encode(stored_token)
         return self.local.token is not None and stored_token == self.local.token
 
@@ -257,7 +259,7 @@ class Lock:
                 keys=[self.name], args=[expected_token], client=self.redis
             )
         ):
-            raise LockNotOwnedError("Cannot release a lock" " that's no longer owned")
+            raise LockNotOwnedError("Cannot release a lock that's no longer owned")
 
     def extend(
         self, additional_time: float, replace_ttl: bool = False
@@ -287,7 +289,7 @@ class Lock:
                 client=self.redis,
             )
         ):
-            raise LockNotOwnedError("Cannot extend a lock that's" " no longer owned")
+            raise LockNotOwnedError("Cannot extend a lock that's no longer owned")
         return True
 
     def reacquire(self) -> Awaitable[bool]:
@@ -307,5 +309,5 @@ class Lock:
                 keys=[self.name], args=[self.local.token, timeout], client=self.redis
             )
         ):
-            raise LockNotOwnedError("Cannot reacquire a lock that's" " no longer owned")
+            raise LockNotOwnedError("Cannot reacquire a lock that's no longer owned")
         return True
diff --git a/redis/asyncio/sentinel.py b/redis/asyncio/sentinel.py
index 5aefd09..ec17886 100644
--- a/redis/asyncio/sentinel.py
+++ b/redis/asyncio/sentinel.py
@@ -1,7 +1,7 @@
 import asyncio
 import random
 import weakref
-from typing import AsyncIterator, Iterable, Mapping, Sequence, Tuple, Type
+from typing import AsyncIterator, Iterable, Mapping, Optional, Sequence, Tuple, Type
 
 from redis.asyncio.client import Redis
 from redis.asyncio.connection import (
@@ -44,7 +44,7 @@ class SentinelManagedConnection(Connection):
             if str_if_bytes(await self.read_response()) != "PONG":
                 raise ConnectionError("PING failed")
 
-    async def connect(self):
+    async def _connect_retry(self):
         if self._reader:
             return  # already connected
         if self.connection_pool.is_master:
@@ -57,9 +57,22 @@ class SentinelManagedConnection(Connection):
                     continue
             raise SlaveNotFoundError  # Never be here
 
-    async def read_response(self, disable_decoding: bool = False):
+    async def connect(self):
+        return await self.retry.call_with_retry(
+            self._connect_retry,
+            lambda error: asyncio.sleep(0),
+        )
+
+    async def read_response(
+        self,
+        disable_decoding: bool = False,
+        timeout: Optional[float] = None,
+    ):
         try:
-            return await super().read_response(disable_decoding=disable_decoding)
+            return await super().read_response(
+                disable_decoding=disable_decoding,
+                timeout=timeout,
+            )
         except ReadOnlyError:
             if self.connection_pool.is_master:
                 # When talking to a master, a ReadOnlyError when likely
diff --git a/redis/backoff.py b/redis/backoff.py
index 5ccdb91..c62e760 100644
--- a/redis/backoff.py
+++ b/redis/backoff.py
@@ -1,6 +1,11 @@
 import random
 from abc import ABC, abstractmethod
 
+# Maximum backoff between each retry in seconds
+DEFAULT_CAP = 0.512
+# Minimum backoff between each retry in seconds
+DEFAULT_BASE = 0.008
+
 
 class AbstractBackoff(ABC):
     """Backoff interface"""
@@ -40,7 +45,7 @@ class NoBackoff(ConstantBackoff):
 class ExponentialBackoff(AbstractBackoff):
     """Exponential backoff upon failure"""
 
-    def __init__(self, cap, base):
+    def __init__(self, cap=DEFAULT_CAP, base=DEFAULT_BASE):
         """
         `cap`: maximum backoff time in seconds
         `base`: base backoff time in seconds
@@ -55,7 +60,7 @@ class ExponentialBackoff(AbstractBackoff):
 class FullJitterBackoff(AbstractBackoff):
     """Full jitter backoff upon failure"""
 
-    def __init__(self, cap, base):
+    def __init__(self, cap=DEFAULT_CAP, base=DEFAULT_BASE):
         """
         `cap`: maximum backoff time in seconds
         `base`: base backoff time in seconds
@@ -70,7 +75,7 @@ class FullJitterBackoff(AbstractBackoff):
 class EqualJitterBackoff(AbstractBackoff):
     """Equal jitter backoff upon failure"""
 
-    def __init__(self, cap, base):
+    def __init__(self, cap=DEFAULT_CAP, base=DEFAULT_BASE):
         """
         `cap`: maximum backoff time in seconds
         `base`: base backoff time in seconds
@@ -86,7 +91,7 @@ class EqualJitterBackoff(AbstractBackoff):
 class DecorrelatedJitterBackoff(AbstractBackoff):
     """Decorrelated jitter backoff upon failure"""
 
-    def __init__(self, cap, base):
+    def __init__(self, cap=DEFAULT_CAP, base=DEFAULT_BASE):
         """
         `cap`: maximum backoff time in seconds
         `base`: base backoff time in seconds
@@ -103,3 +108,7 @@ class DecorrelatedJitterBackoff(AbstractBackoff):
         temp = random.uniform(self._base, max_backoff)
         self._previous_backoff = min(self._cap, temp)
         return self._previous_backoff
+
+
+def default_backoff():
+    return EqualJitterBackoff()
diff --git a/redis/client.py b/redis/client.py
index c63fb13..1a9b96b 100755
--- a/redis/client.py
+++ b/redis/client.py
@@ -5,6 +5,7 @@ import threading
 import time
 import warnings
 from itertools import chain
+from typing import Optional
 
 from redis.commands import (
     CoreCommands,
@@ -13,6 +14,7 @@ from redis.commands import (
     list_or_args,
 )
 from redis.connection import ConnectionPool, SSLConnection, UnixDomainSocketConnection
+from redis.credentials import CredentialProvider
 from redis.exceptions import (
     ConnectionError,
     ExecAbortError,
@@ -24,6 +26,7 @@ from redis.exceptions import (
     WatchError,
 )
 from redis.lock import Lock
+from redis.retry import Retry
 from redis.utils import safe_str, str_if_bytes
 
 SYM_EMPTY = b""
@@ -850,6 +853,8 @@ class Redis(AbstractRedis, RedisModuleCommands, CoreCommands, SentinelCommands):
     the commands are sent and received to the Redis server. Based on
     configuration, an instance will either use a ConnectionPool, or
     Connection object to talk to redis.
+
+    It is not safe to pass PubSub or Pipeline objects between threads.
     """
 
     @classmethod
@@ -861,7 +866,7 @@ class Redis(AbstractRedis, RedisModuleCommands, CoreCommands, SentinelCommands):
 
             redis://[[username]:[password]]@localhost:6379/0
             rediss://[[username]:[password]]@localhost:6379/0
-            unix://[[username]:[password]]@/path/to/socket.sock?db=0
+            unix://[username@]/path/to/socket.sock?db=0[&password=password]
 
         Three URL schemes are supported:
 
@@ -936,6 +941,7 @@ class Redis(AbstractRedis, RedisModuleCommands, CoreCommands, SentinelCommands):
         username=None,
         retry=None,
         redis_connect_func=None,
+        credential_provider: Optional[CredentialProvider] = None,
     ):
         """
         Initialize a new Redis client.
@@ -943,6 +949,12 @@ class Redis(AbstractRedis, RedisModuleCommands, CoreCommands, SentinelCommands):
         `retry_on_error` to a list of the error/s to retry on, then set
         `retry` to a valid `Retry` object.
         To retry on TimeoutError, `retry_on_timeout` can also be set to `True`.
+
+        Args:
+
+        single_connection_client:
+            if `True`, connection pool is not used. In that case `Redis`
+            instance use is not thread safe.
         """
         if not connection_pool:
             if charset is not None:
@@ -977,6 +989,7 @@ class Redis(AbstractRedis, RedisModuleCommands, CoreCommands, SentinelCommands):
                 "health_check_interval": health_check_interval,
                 "client_name": client_name,
                 "redis_connect_func": redis_connect_func,
+                "credential_provider": credential_provider,
             }
             # based on input, setup appropriate connection args
             if unix_socket_path is not None:
@@ -1035,6 +1048,13 @@ class Redis(AbstractRedis, RedisModuleCommands, CoreCommands, SentinelCommands):
         """Get the connection's key-word arguments"""
         return self.connection_pool.connection_kwargs
 
+    def get_retry(self) -> Optional["Retry"]:
+        return self.get_connection_kwargs().get("retry")
+
+    def set_retry(self, retry: "Retry") -> None:
+        self.get_connection_kwargs().update({"retry": retry})
+        self.connection_pool.set_retry(retry)
+
     def set_response_callback(self, command, callback):
         """Set a custom Response Callback"""
         self.response_callbacks[command] = callback
@@ -1250,12 +1270,17 @@ class Redis(AbstractRedis, RedisModuleCommands, CoreCommands, SentinelCommands):
         try:
             if NEVER_DECODE in options:
                 response = connection.read_response(disable_decoding=True)
+                options.pop(NEVER_DECODE)
             else:
                 response = connection.read_response()
         except ResponseError:
             if EMPTY_RESPONSE in options:
                 return options[EMPTY_RESPONSE]
             raise
+
+        if EMPTY_RESPONSE in options:
+            options.pop(EMPTY_RESPONSE)
+
         if command_name in self.response_callbacks:
             return self.response_callbacks[command_name](response, **options)
         return response
@@ -1495,9 +1520,15 @@ class PubSub:
 
         self.check_health()
 
-        if not block and not self._execute(conn, conn.can_read, timeout=timeout):
-            return None
-        response = self._execute(conn, conn.read_response)
+        def try_read():
+            if not block:
+                if not conn.can_read(timeout=timeout):
+                    return None
+            else:
+                conn.connect()
+            return conn.read_response()
+
+        response = self._execute(conn, try_read)
 
         if self.is_health_check_response(response):
             # ignore the health check message as user might not expect it
@@ -1623,13 +1654,13 @@ class PubSub:
             if response is not None:
                 yield response
 
-    def get_message(self, ignore_subscribe_messages=False, timeout=0):
+    def get_message(self, ignore_subscribe_messages=False, timeout=0.0):
         """
         Get the next message if one is available, otherwise None.
 
         If timeout is specified, the system will wait for `timeout` seconds
         before returning. Timeout should be specified as a floating point
-        number.
+        number, or None, to wait indefinitely.
         """
         if not self.subscribed:
             # Wait for subscription
@@ -1645,7 +1676,7 @@ class PubSub:
                 # so no messages are available
                 return None
 
-        response = self.parse_response(block=False, timeout=timeout)
+        response = self.parse_response(block=(timeout is None), timeout=timeout)
         if response:
             return self.handle_message(response, ignore_subscribe_messages)
         return None
@@ -1850,7 +1881,7 @@ class Pipeline(Redis):
             raise RedisError("Cannot issue nested calls to MULTI")
         if self.command_stack:
             raise RedisError(
-                "Commands without an initial WATCH have already " "been issued"
+                "Commands without an initial WATCH have already been issued"
             )
         self.explicit_transaction = True
 
@@ -1873,7 +1904,7 @@ class Pipeline(Redis):
         if self.watching:
             self.reset()
             raise WatchError(
-                "A ConnectionError occurred on while " "watching one or more keys"
+                "A ConnectionError occurred on while watching one or more keys"
             )
         # if retry_on_timeout is not set, or the error is not
         # a TimeoutError, raise it
@@ -1966,7 +1997,7 @@ class Pipeline(Redis):
         if len(response) != len(commands):
             self.connection.disconnect()
             raise ResponseError(
-                "Wrong number of response items from " "pipeline execution"
+                "Wrong number of response items from pipeline execution"
             )
 
         # find any errors in the response and raise if necessary
@@ -2047,7 +2078,7 @@ class Pipeline(Redis):
         # indicates the user should retry this transaction.
         if self.watching:
             raise WatchError(
-                "A ConnectionError occurred on while " "watching one or more keys"
+                "A ConnectionError occurred on while watching one or more keys"
             )
         # if retry_on_timeout is not set, or the error is not
         # a TimeoutError, raise it
diff --git a/redis/cluster.py b/redis/cluster.py
index 6034e96..d6dc02d 100644
--- a/redis/cluster.py
+++ b/redis/cluster.py
@@ -1,13 +1,12 @@
-import copy
-import logging
 import random
 import socket
 import sys
 import threading
 import time
 from collections import OrderedDict
-from typing import Any, Callable, Dict, Tuple
+from typing import Any, Callable, Dict, List, Optional, Tuple, Union
 
+from redis.backoff import default_backoff
 from redis.client import CaseInsensitiveDict, PubSub, Redis, parse_scan
 from redis.commands import READ_COMMANDS, CommandsParser, RedisClusterCommands
 from redis.connection import ConnectionPool, DefaultParser, Encoder, parse_url
@@ -15,7 +14,6 @@ from redis.crc import REDIS_CLUSTER_HASH_SLOTS, key_slot
 from redis.exceptions import (
     AskError,
     AuthenticationError,
-    BusyLoadingError,
     ClusterCrossSlotError,
     ClusterDownError,
     ClusterError,
@@ -31,6 +29,7 @@ from redis.exceptions import (
     TryAgainError,
 )
 from redis.lock import Lock
+from redis.retry import Retry
 from redis.utils import (
     dict_merge,
     list_keys_to_dict,
@@ -39,10 +38,8 @@ from redis.utils import (
     str_if_bytes,
 )
 
-log = logging.getLogger(__name__)
 
-
-def get_node_name(host: str, port: int) -> str:
+def get_node_name(host: str, port: Union[str, int]) -> str:
     return f"{host}:{port}"
 
 
@@ -124,7 +121,9 @@ REDIS_ALLOWED_KEYS = (
     "charset",
     "connection_class",
     "connection_pool",
+    "connection_pool_class",
     "client_name",
+    "credential_provider",
     "db",
     "decode_responses",
     "encoding",
@@ -267,6 +266,9 @@ class AbstractRedisCluster:
                 "READWRITE",
                 "TIME",
                 "GRAPH.CONFIG",
+                "LATENCY HISTORY",
+                "LATENCY LATEST",
+                "LATENCY RESET",
             ],
             DEFAULT_NODE,
         ),
@@ -381,6 +383,30 @@ class AbstractRedisCluster:
 
     ERRORS_ALLOW_RETRY = (ConnectionError, TimeoutError, ClusterDownError)
 
+    def replace_default_node(self, target_node: "ClusterNode" = None) -> None:
+        """Replace the default cluster node.
+        A random cluster node will be chosen if target_node isn't passed, and primaries
+        will be prioritized. The default node will not be changed if there are no other
+        nodes in the cluster.
+
+        Args:
+            target_node (ClusterNode, optional): Target node to replace the default
+            node. Defaults to None.
+        """
+        if target_node:
+            self.nodes_manager.default_node = target_node
+        else:
+            curr_node = self.get_default_node()
+            primaries = [node for node in self.get_primaries() if node != curr_node]
+            if primaries:
+                # Choose a primary if the cluster contains different primaries
+                self.nodes_manager.default_node = random.choice(primaries)
+            else:
+                # Otherwise, hoose a primary if the cluster contains different primaries
+                replicas = [node for node in self.get_replicas() if node != curr_node]
+                if replicas:
+                    self.nodes_manager.default_node = random.choice(replicas)
+
 
 class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
     @classmethod
@@ -392,7 +418,7 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
 
             redis://[[username]:[password]]@localhost:6379/0
             rediss://[[username]:[password]]@localhost:6379/0
-            unix://[[username]:[password]]@/path/to/socket.sock?db=0
+            unix://[username@]/path/to/socket.sock?db=0[&password=password]
 
         Three URL schemes are supported:
 
@@ -429,27 +455,28 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
 
     def __init__(
         self,
-        host=None,
-        port=6379,
-        startup_nodes=None,
-        cluster_error_retry_attempts=3,
-        require_full_coverage=False,
-        reinitialize_steps=10,
-        read_from_replicas=False,
-        dynamic_startup_nodes=True,
-        url=None,
+        host: Optional[str] = None,
+        port: int = 6379,
+        startup_nodes: Optional[List["ClusterNode"]] = None,
+        cluster_error_retry_attempts: int = 3,
+        retry: Optional["Retry"] = None,
+        require_full_coverage: bool = False,
+        reinitialize_steps: int = 5,
+        read_from_replicas: bool = False,
+        dynamic_startup_nodes: bool = True,
+        url: Optional[str] = None,
         **kwargs,
     ):
         """
          Initialize a new RedisCluster client.
 
-         :startup_nodes: 'list[ClusterNode]'
+         :param startup_nodes:
              List of nodes from which initial bootstrapping can be done
-         :host: 'str'
+         :param host:
              Can be used to point to a startup node
-         :port: 'int'
+         :param port:
              Can be used to point to a startup node
-         :require_full_coverage: 'bool'
+         :param require_full_coverage:
             When set to False (default value): the client will not require a
             full coverage of the slots. However, if not all slots are covered,
             and at least one node has 'cluster-require-full-coverage' set to
@@ -459,12 +486,12 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
             When set to True: all slots must be covered to construct the
             cluster client. If not all slots are covered, RedisClusterException
             will be thrown.
-        :read_from_replicas: 'bool'
+        :param read_from_replicas:
              Enable read from replicas in READONLY mode. You can read possibly
              stale data.
              When set to true, read commands will be assigned between the
              primary and its replications in a Round-Robin manner.
-         :dynamic_startup_nodes: 'bool'
+         :param dynamic_startup_nodes:
              Set the RedisCluster's startup nodes to all of the discovered nodes.
              If true (default value), the cluster's discovered nodes will be used to
              determine the cluster nodes-slots mapping in the next topology refresh.
@@ -472,10 +499,11 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
              listed in the CLUSTER SLOTS output.
              If you use dynamic DNS endpoints for startup nodes but CLUSTER SLOTS lists
              specific IP addresses, it is best to set it to false.
-        :cluster_error_retry_attempts: 'int'
-             Retry command execution attempts when encountering ClusterDownError
-             or ConnectionError
-        :reinitialize_steps: 'int'
+        :param cluster_error_retry_attempts:
+             Number of times to retry before raising an error when
+             :class:`~.TimeoutError` or :class:`~.ConnectionError` or
+             :class:`~.ClusterDownError` are encountered
+        :param reinitialize_steps:
             Specifies the number of MOVED errors that need to occur before
             reinitializing the whole cluster topology. If a MOVED error occurs
             and the cluster does not need to be reinitialized on this current
@@ -535,7 +563,6 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
                 " RedisCluster(startup_nodes=[ClusterNode('localhost', 6379),"
                 " ClusterNode('localhost', 6378)])"
             )
-        log.debug(f"startup_nodes : {startup_nodes}")
         # Update the connection arguments
         # Whenever a new connection is established, RedisCluster's on_connect
         # method should be run
@@ -544,6 +571,11 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
         self.user_on_connect_func = kwargs.pop("redis_connect_func", None)
         kwargs.update({"redis_connect_func": self.on_connect})
         kwargs = cleanup_kwargs(**kwargs)
+        if retry:
+            self.retry = retry
+            kwargs.update({"retry": self.retry})
+        else:
+            kwargs.update({"retry": Retry(default_backoff(), 0)})
 
         self.encoder = Encoder(
             kwargs.get("encoding", "utf-8"),
@@ -666,15 +698,18 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
         :return True if the default node was set, else False
         """
         if node is None or self.get_node(node_name=node.name) is None:
-            log.info(
-                "The requested node does not exist in the cluster, so "
-                "the default node was not changed."
-            )
             return False
         self.nodes_manager.default_node = node
-        log.info(f"Changed the default cluster node to {node}")
         return True
 
+    def get_retry(self) -> Optional["Retry"]:
+        return self.retry
+
+    def set_retry(self, retry: "Retry") -> None:
+        self.retry = retry
+        for node in self.get_nodes():
+            node.redis_connection.set_retry(retry)
+
     def monitor(self, target_node=None):
         """
         Returns a Monitor object for the specified target node.
@@ -804,7 +839,9 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
         """Set a custom Response Callback"""
         self.cluster_response_callbacks[command] = callback
 
-    def _determine_nodes(self, *args, **kwargs):
+    def _determine_nodes(self, *args, **kwargs) -> List["ClusterNode"]:
+        # Determine which nodes should be executed the command on.
+        # Returns a list of target nodes.
         command = args[0].upper()
         if len(args) >= 2 and f"{args[0]} {args[1]}".upper() in self.command_flags:
             command = f"{args[0]} {args[1]}".upper()
@@ -816,8 +853,6 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
         else:
             # get the nodes group for this command if it was predefined
             command_flag = self.command_flags.get(command)
-        if command_flag:
-            log.debug(f"Target node/s for {command}: {command_flag}")
         if command_flag == self.__class__.RANDOM:
             # return a random node
             return [self.get_random_node()]
@@ -841,7 +876,6 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
             node = self.nodes_manager.get_node_from_slot(
                 slot, self.read_from_replicas and command in READ_COMMANDS
             )
-            log.debug(f"Target for {args}: slot {slot}")
             return [node]
 
     def _should_reinitialized(self):
@@ -986,6 +1020,7 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
             dict<Any, ClusterNode>
         """
         target_nodes_specified = False
+        is_default_node = False
         target_nodes = None
         passed_targets = kwargs.pop("target_nodes", None)
         if passed_targets is not None and not self._is_nodes_flag(passed_targets):
@@ -998,12 +1033,13 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
         # nodes were passed to this function, we cannot retry the command
         # execution since the nodes may not be valid anymore after the tables
         # were reinitialized. So in case of passed target nodes,
-        # retry_attempts will be set to 1.
+        # retry_attempts will be set to 0.
         retry_attempts = (
-            1 if target_nodes_specified else self.cluster_error_retry_attempts
+            0 if target_nodes_specified else self.cluster_error_retry_attempts
         )
-        exception = None
-        for _ in range(0, retry_attempts):
+        # Add one for the first execution
+        execute_attempts = 1 + retry_attempts
+        for _ in range(execute_attempts):
             try:
                 res = {}
                 if not target_nodes_specified:
@@ -1015,23 +1051,28 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
                         raise RedisClusterException(
                             f"No targets were found to execute {args} command on"
                         )
+                    if (
+                        len(target_nodes) == 1
+                        and target_nodes[0] == self.get_default_node()
+                    ):
+                        is_default_node = True
                 for node in target_nodes:
                     res[node.name] = self._execute_command(node, *args, **kwargs)
                 # Return the processed result
                 return self._process_result(args[0], res, **kwargs)
-            except BaseException as e:
-                if type(e) in self.__class__.ERRORS_ALLOW_RETRY:
+            except Exception as e:
+                if retry_attempts > 0 and type(e) in self.__class__.ERRORS_ALLOW_RETRY:
+                    if is_default_node:
+                        # Replace the default cluster node
+                        self.replace_default_node()
                     # The nodes and slots cache were reinitialized.
                     # Try again with the new cluster setup.
-                    exception = e
+                    retry_attempts -= 1
+                    continue
                 else:
-                    # All other errors should be raised.
+                    # raise the exception
                     raise e
 
-        # If it fails the configured number of times then raise exception back
-        # to caller of this method
-        raise exception
-
     def _execute_command(self, target_node, *args, **kwargs):
         """
         Send a command to a node in the cluster
@@ -1043,7 +1084,6 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
         asking = False
         moved = False
         ttl = int(self.RedisClusterRequestTTL)
-        connection_error_retry_counter = 0
 
         while ttl > 0:
             ttl -= 1
@@ -1059,10 +1099,6 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
                     )
                     moved = False
 
-                log.debug(
-                    f"Executing command {command} on target node: "
-                    f"{target_node.server_type} {target_node.name}"
-                )
                 redis_node = self.get_redis_connection(target_node)
                 connection = get_connection(redis_node, *args, **kwargs)
                 if asking:
@@ -1077,31 +1113,24 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
                         response, **kwargs
                     )
                 return response
-
-            except (RedisClusterException, BusyLoadingError, AuthenticationError) as e:
-                log.exception(type(e))
+            except AuthenticationError:
                 raise
             except (ConnectionError, TimeoutError) as e:
-                log.exception(type(e))
+                # Connection retries are being handled in the node's
+                # Retry object.
                 # ConnectionError can also be raised if we couldn't get a
                 # connection from the pool before timing out, so check that
                 # this is an actual connection before attempting to disconnect.
                 if connection is not None:
                     connection.disconnect()
-                connection_error_retry_counter += 1
-
-                # Give the node 0.25 seconds to get back up and retry again
-                # with same node and configuration. After 5 attempts then try
-                # to reinitialize the cluster and see if the nodes
-                # configuration has changed or not
-                if connection_error_retry_counter < 5:
-                    time.sleep(0.25)
-                else:
-                    # Hard force of reinitialize of the node/slots setup
-                    # and try again with the new setup
-                    target_node.redis_connection = None
-                    self.nodes_manager.initialize()
-                    raise
+
+                # Remove the failed node from the startup nodes before we try
+                # to reinitialize the cluster
+                self.nodes_manager.startup_nodes.pop(target_node.name, None)
+                # Reset the cluster node's connection
+                target_node.redis_connection = None
+                self.nodes_manager.initialize()
+                raise e
             except MovedError as e:
                 # First, we will try to patch the slots/nodes cache with the
                 # redirected node output and try again. If MovedError exceeds
@@ -1111,7 +1140,6 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
                 # the same client object is shared between multiple threads. To
                 # reduce the frequency you can set this variable in the
                 # RedisCluster constructor.
-                log.exception("MovedError")
                 self.reinitialize_counter += 1
                 if self._should_reinitialized():
                     self.nodes_manager.initialize()
@@ -1121,29 +1149,21 @@ class RedisCluster(AbstractRedisCluster, RedisClusterCommands):
                     self.nodes_manager.update_moved_exception(e)
                 moved = True
             except TryAgainError:
-                log.exception("TryAgainError")
-
                 if ttl < self.RedisClusterRequestTTL / 2:
                     time.sleep(0.05)
             except AskError as e:
-                log.exception("AskError")
-
                 redirect_addr = get_node_name(host=e.host, port=e.port)
                 asking = True
             except ClusterDownError as e:
-                log.exception("ClusterDownError")
                 # ClusterDownError can occur during a failover and to get
                 # self-healed, we will try to reinitialize the cluster layout
                 # and retry executing the command
                 time.sleep(0.25)
                 self.nodes_manager.initialize()
                 raise e
-            except ResponseError as e:
-                message = e.__str__()
-                log.exception(f"ResponseError: {message}")
-                raise e
-            except BaseException as e:
-                log.exception("BaseException")
+            except ResponseError:
+                raise
+            except Exception as e:
                 if connection:
                     connection.disconnect()
                 raise e
@@ -1248,6 +1268,7 @@ class NodesManager:
         require_full_coverage=False,
         lock=None,
         dynamic_startup_nodes=True,
+        connection_pool_class=ConnectionPool,
         **kwargs,
     ):
         self.nodes_cache = {}
@@ -1258,6 +1279,7 @@ class NodesManager:
         self.from_url = from_url
         self._require_full_coverage = require_full_coverage
         self._dynamic_startup_nodes = dynamic_startup_nodes
+        self.connection_pool_class = connection_pool_class
         self._moved_exception = None
         self.connection_kwargs = kwargs
         self.read_load_balancer = LoadBalancer()
@@ -1280,11 +1302,6 @@ class NodesManager:
         elif node_name:
             return self.nodes_cache.get(node_name)
         else:
-            log.error(
-                "get_node requires one of the following: "
-                "1. node name "
-                "2. host and port"
-            )
             return None
 
     def update_moved_exception(self, exception):
@@ -1406,7 +1423,7 @@ class NodesManager:
             # Create a redis node with a costumed connection pool
             kwargs.update({"host": host})
             kwargs.update({"port": port})
-            r = Redis(connection_pool=ConnectionPool(**kwargs))
+            r = Redis(connection_pool=self.connection_pool_class(**kwargs))
         else:
             r = Redis(host=host, port=port, **kwargs)
         return r
@@ -1423,6 +1440,8 @@ class NodesManager:
             if target_node is None or target_node.redis_connection is None:
                 # create new cluster node for this cluster
                 target_node = ClusterNode(host, port, role)
+            if target_node.server_type != role:
+                target_node.server_type = role
 
         return target_node
 
@@ -1432,7 +1451,6 @@ class NodesManager:
         :startup_nodes:
             Responsible for discovering other nodes in the cluster
         """
-        log.debug("Initializing the nodes' topology of the cluster")
         self.reset()
         tmp_nodes_cache = {}
         tmp_slots = {}
@@ -1440,17 +1458,15 @@ class NodesManager:
         startup_nodes_reachable = False
         fully_covered = False
         kwargs = self.connection_kwargs
+        exception = None
         for startup_node in self.startup_nodes.values():
             try:
                 if startup_node.redis_connection:
                     r = startup_node.redis_connection
                 else:
-                    # Create a new Redis connection and let Redis decode the
-                    # responses so we won't need to handle that
-                    copy_kwargs = copy.deepcopy(kwargs)
-                    copy_kwargs.update({"decode_responses": True, "encoding": "utf-8"})
+                    # Create a new Redis connection
                     r = self.create_redis_node(
-                        startup_node.host, startup_node.port, **copy_kwargs
+                        startup_node.host, startup_node.port, **kwargs
                     )
                     self.startup_nodes[startup_node.name].redis_connection = r
                 # Make sure cluster mode is enabled on this node
@@ -1460,33 +1476,11 @@ class NodesManager:
                     )
                 cluster_slots = str_if_bytes(r.execute_command("CLUSTER SLOTS"))
                 startup_nodes_reachable = True
-            except (ConnectionError, TimeoutError) as e:
-                msg = e.__str__
-                log.exception(
-                    "An exception occurred while trying to"
-                    " initialize the cluster using the seed node"
-                    f" {startup_node.name}:\n{msg}"
-                )
-                continue
-            except ResponseError as e:
-                log.exception('ReseponseError sending "cluster slots" to redis server')
-
-                # Isn't a cluster connection, so it won't parse these
-                # exceptions automatically
-                message = e.__str__()
-                if "CLUSTERDOWN" in message or "MASTERDOWN" in message:
-                    continue
-                else:
-                    raise RedisClusterException(
-                        'ERROR sending "cluster slots" command to redis '
-                        f"server: {startup_node}. error: {message}"
-                    )
             except Exception as e:
-                message = e.__str__()
-                raise RedisClusterException(
-                    'ERROR sending "cluster slots" command to redis '
-                    f"server {startup_node.name}. error: {message}"
-                )
+                # Try the next startup node.
+                # The exception is saved and raised only if we have no more nodes.
+                exception = e
+                continue
 
             # CLUSTER SLOTS command results in the following output:
             # [[slot_section[from_slot,to_slot,master,replica1,...,replicaN]]]
@@ -1556,9 +1550,9 @@ class NodesManager:
 
         if not startup_nodes_reachable:
             raise RedisClusterException(
-                "Redis Cluster cannot be connected. Please provide at least "
-                "one reachable node. "
-            )
+                f"Redis Cluster cannot be connected. Please provide at least "
+                f"one reachable node: {str(exception)}"
+            ) from exception
 
         # Create Redis connections to all nodes
         self.create_redis_connections(list(tmp_nodes_cache.values()))
@@ -1659,7 +1653,7 @@ class ClusterPubSub(PubSub):
             pubsub_node = node
         elif any([host, port]) is True:
             # only 'host' or 'port' passed
-            raise DataError("Passing a host requires passing a port, " "and vice versa")
+            raise DataError("Passing a host requires passing a port, and vice versa")
         else:
             # nothing passed by the user. set node to None
             pubsub_node = None
@@ -1741,14 +1735,14 @@ class ClusterPipeline(RedisCluster):
 
     def __init__(
         self,
-        nodes_manager,
-        commands_parser,
-        result_callbacks=None,
-        cluster_response_callbacks=None,
-        startup_nodes=None,
-        read_from_replicas=False,
-        cluster_error_retry_attempts=5,
-        reinitialize_steps=10,
+        nodes_manager: "NodesManager",
+        commands_parser: "CommandsParser",
+        result_callbacks: Optional[Dict[str, Callable]] = None,
+        cluster_response_callbacks: Optional[Dict[str, Callable]] = None,
+        startup_nodes: Optional[List["ClusterNode"]] = None,
+        read_from_replicas: bool = False,
+        cluster_error_retry_attempts: int = 3,
+        reinitialize_steps: int = 5,
         lock=None,
         **kwargs,
     ):
@@ -1798,10 +1792,6 @@ class ClusterPipeline(RedisCluster):
         """ """
         return len(self.command_stack)
 
-    def __nonzero__(self):
-        "Pipeline instances should  always evaluate to True on Python 2.7"
-        return True
-
     def __bool__(self):
         "Pipeline instances should  always evaluate to True on Python 3+"
         return True
@@ -1904,22 +1894,22 @@ class ClusterPipeline(RedisCluster):
         """
         if not stack:
             return []
-
-        for _ in range(0, self.cluster_error_retry_attempts):
+        retry_attempts = self.cluster_error_retry_attempts
+        while True:
             try:
                 return self._send_cluster_commands(
                     stack,
                     raise_on_error=raise_on_error,
                     allow_redirections=allow_redirections,
                 )
-            except ClusterDownError:
-                # Try again with the new cluster setup. All other errors
-                # should be raised.
-                pass
-
-        # If it fails the configured number of times then raise
-        # exception back to caller of this method
-        raise ClusterDownError("CLUSTERDOWN error. Unable to rebuild the cluster")
+            except (ClusterDownError, ConnectionError) as e:
+                if retry_attempts > 0:
+                    # Try again with the new cluster setup. All other errors
+                    # should be raised.
+                    retry_attempts -= 1
+                    pass
+                else:
+                    raise e
 
     def _send_cluster_commands(
         self, stack, raise_on_error=True, allow_redirections=True
@@ -1936,7 +1926,7 @@ class ClusterPipeline(RedisCluster):
         # if we have to run through it again, we only retry
         # the commands that failed.
         attempt = sorted(stack, key=lambda x: x.position)
-
+        is_default_node = False
         # build a list of node objects based on node names we need to
         nodes = {}
 
@@ -1944,34 +1934,53 @@ class ClusterPipeline(RedisCluster):
         # we figure out the slot number that command maps to, then from
         # the slot determine the node.
         for c in attempt:
-            # refer to our internal node -> slot table that
-            # tells us where a given
-            # command should route to.
-            passed_targets = c.options.pop("target_nodes", None)
-            if passed_targets and not self._is_nodes_flag(passed_targets):
-                target_nodes = self._parse_target_nodes(passed_targets)
-            else:
-                target_nodes = self._determine_nodes(*c.args, node_flag=passed_targets)
-                if not target_nodes:
+            while True:
+                # refer to our internal node -> slot table that
+                # tells us where a given command should route to.
+                # (it might be possible we have a cached node that no longer
+                # exists in the cluster, which is why we do this in a loop)
+                passed_targets = c.options.pop("target_nodes", None)
+                if passed_targets and not self._is_nodes_flag(passed_targets):
+                    target_nodes = self._parse_target_nodes(passed_targets)
+                else:
+                    target_nodes = self._determine_nodes(
+                        *c.args, node_flag=passed_targets
+                    )
+                    if not target_nodes:
+                        raise RedisClusterException(
+                            f"No targets were found to execute {c.args} command on"
+                        )
+                if len(target_nodes) > 1:
                     raise RedisClusterException(
-                        f"No targets were found to execute {c.args} command on"
+                        f"Too many targets for command {c.args}"
                     )
-            if len(target_nodes) > 1:
-                raise RedisClusterException(f"Too many targets for command {c.args}")
-
-            node = target_nodes[0]
-            # now that we know the name of the node
-            # ( it's just a string in the form of host:port )
-            # we can build a list of commands for each node.
-            node_name = node.name
-            if node_name not in nodes:
-                redis_node = self.get_redis_connection(node)
-                connection = get_connection(redis_node, c.args)
-                nodes[node_name] = NodeCommands(
-                    redis_node.parse_response, redis_node.connection_pool, connection
-                )
 
-            nodes[node_name].append(c)
+                node = target_nodes[0]
+                if node == self.get_default_node():
+                    is_default_node = True
+
+                # now that we know the name of the node
+                # ( it's just a string in the form of host:port )
+                # we can build a list of commands for each node.
+                node_name = node.name
+                if node_name not in nodes:
+                    redis_node = self.get_redis_connection(node)
+                    try:
+                        connection = get_connection(redis_node, c.args)
+                    except ConnectionError:
+                        # Connection retries are being handled in the node's
+                        # Retry object. Reinitialize the node -> slot table.
+                        self.nodes_manager.initialize()
+                        if is_default_node:
+                            self.replace_default_node()
+                        raise
+                    nodes[node_name] = NodeCommands(
+                        redis_node.parse_response,
+                        redis_node.connection_pool,
+                        connection,
+                    )
+                nodes[node_name].append(c)
+                break
 
         # send the commands in sequence.
         # we  write to all the open sockets for each node first,
@@ -2025,7 +2034,7 @@ class ClusterPipeline(RedisCluster):
         )
         if attempt and allow_redirections:
             # RETRY MAGIC HAPPENS HERE!
-            # send these remaing comamnds one at a time using `execute_command`
+            # send these remaing commands one at a time using `execute_command`
             # in the main client. This keeps our retry logic
             # in one place mostly,
             # and allows us to be more confident in correctness of behavior.
@@ -2042,15 +2051,11 @@ class ClusterPipeline(RedisCluster):
             # If a lot of commands have failed, we'll be setting the
             # flag to rebuild the slots table from scratch.
             # So MOVED errors should correct themselves fairly quickly.
-            log.exception(
-                f"An exception occurred during pipeline execution. "
-                f"args: {attempt[-1].args}, "
-                f"error: {type(attempt[-1].result).__name__} "
-                f"{str(attempt[-1].result)}"
-            )
             self.reinitialize_counter += 1
             if self._should_reinitialized():
                 self.nodes_manager.initialize()
+                if is_default_node:
+                    self.replace_default_node()
             for c in attempt:
                 try:
                     # send each command individually like we
@@ -2126,11 +2131,22 @@ class ClusterPipeline(RedisCluster):
         """
         if len(names) != 1:
             raise RedisClusterException(
-                "deleting multiple keys is not " "implemented in pipeline command"
+                "deleting multiple keys is not implemented in pipeline command"
             )
 
         return self.execute_command("DEL", names[0])
 
+    def unlink(self, *names):
+        """
+        "Unlink a key specified by ``names``"
+        """
+        if len(names) != 1:
+            raise RedisClusterException(
+                "unlinking multiple keys is not implemented in pipeline command"
+            )
+
+        return self.execute_command("UNLINK", names[0])
+
 
 def block_pipeline_command(name: str) -> Callable[..., Any]:
     """
diff --git a/redis/commands/bf/__init__.py b/redis/commands/bf/__init__.py
index d62d8a0..4da060e 100644
--- a/redis/commands/bf/__init__.py
+++ b/redis/commands/bf/__init__.py
@@ -165,11 +165,16 @@ class TDigestBloom(TDigestCommands, AbstractBloom):
             # TDIGEST_RESET: bool_ok,
             # TDIGEST_ADD: spaceHolder,
             # TDIGEST_MERGE: spaceHolder,
-            TDIGEST_CDF: float,
-            TDIGEST_QUANTILE: float,
+            TDIGEST_CDF: parse_to_list,
+            TDIGEST_QUANTILE: parse_to_list,
             TDIGEST_MIN: float,
             TDIGEST_MAX: float,
+            TDIGEST_TRIMMED_MEAN: float,
             TDIGEST_INFO: TDigestInfo,
+            TDIGEST_RANK: parse_to_list,
+            TDIGEST_REVRANK: parse_to_list,
+            TDIGEST_BYRANK: parse_to_list,
+            TDIGEST_BYREVRANK: parse_to_list,
         }
 
         self.client = client
@@ -193,6 +198,7 @@ class BFBloom(BFCommands, AbstractBloom):
             # BF_MEXISTS: spaceHolder,
             # BF_SCANDUMP: spaceHolder,
             # BF_LOADCHUNK: spaceHolder,
+            # BF_CARD: spaceHolder,
             BF_INFO: BFInfo,
         }
 
diff --git a/redis/commands/bf/commands.py b/redis/commands/bf/commands.py
index baf0130..c45523c 100644
--- a/redis/commands/bf/commands.py
+++ b/redis/commands/bf/commands.py
@@ -1,6 +1,6 @@
 from redis.client import NEVER_DECODE
 from redis.exceptions import ModuleError
-from redis.utils import HIREDIS_AVAILABLE
+from redis.utils import HIREDIS_AVAILABLE, deprecated_function
 
 BF_RESERVE = "BF.RESERVE"
 BF_ADD = "BF.ADD"
@@ -11,6 +11,7 @@ BF_MEXISTS = "BF.MEXISTS"
 BF_SCANDUMP = "BF.SCANDUMP"
 BF_LOADCHUNK = "BF.LOADCHUNK"
 BF_INFO = "BF.INFO"
+BF_CARD = "BF.CARD"
 
 CF_RESERVE = "CF.RESERVE"
 CF_ADD = "CF.ADD"
@@ -49,6 +50,11 @@ TDIGEST_QUANTILE = "TDIGEST.QUANTILE"
 TDIGEST_MIN = "TDIGEST.MIN"
 TDIGEST_MAX = "TDIGEST.MAX"
 TDIGEST_INFO = "TDIGEST.INFO"
+TDIGEST_TRIMMED_MEAN = "TDIGEST.TRIMMED_MEAN"
+TDIGEST_RANK = "TDIGEST.RANK"
+TDIGEST_REVRANK = "TDIGEST.REVRANK"
+TDIGEST_BYRANK = "TDIGEST.BYRANK"
+TDIGEST_BYREVRANK = "TDIGEST.BYREVRANK"
 
 
 class BFCommands:
@@ -67,6 +73,8 @@ class BFCommands:
         self.append_no_scale(params, noScale)
         return self.execute_command(BF_RESERVE, *params)
 
+    reserve = create
+
     def add(self, key, item):
         """
         Add to a Bloom Filter `key` an `item`.
@@ -158,6 +166,14 @@ class BFCommands:
         """  # noqa
         return self.execute_command(BF_INFO, key)
 
+    def card(self, key):
+        """
+        Returns the cardinality of a Bloom filter - number of items that were added to a Bloom filter and detected as unique
+        (items that caused at least one bit to be set in at least one sub-filter).
+        For more information see `BF.CARD <https://redis.io/commands/bf.card>`_.
+        """  # noqa
+        return self.execute_command(BF_CARD, key)
+
 
 class CFCommands:
     """Cuckoo Filter commands."""
@@ -176,6 +192,8 @@ class CFCommands:
         self.append_max_iterations(params, max_iterations)
         return self.execute_command(CF_RESERVE, *params)
 
+    reserve = create
+
     def add(self, key, item):
         """
         Add an `item` to a Cuckoo Filter `key`.
@@ -316,6 +334,7 @@ class TOPKCommands:
         """  # noqa
         return self.execute_command(TOPK_QUERY, key, *items)
 
+    @deprecated_function(version="4.4.0", reason="deprecated since redisbloom 2.4.0")
     def count(self, key, *items):
         """
         Return count for one `item` or more from `key`.
@@ -344,12 +363,12 @@ class TOPKCommands:
 
 
 class TDigestCommands:
-    def create(self, key, compression):
+    def create(self, key, compression=100):
         """
         Allocate the memory and initialize the t-digest.
         For more information see `TDIGEST.CREATE <https://redis.io/commands/tdigest.create>`_.
         """  # noqa
-        return self.execute_command(TDIGEST_CREATE, key, compression)
+        return self.execute_command(TDIGEST_CREATE, key, "COMPRESSION", compression)
 
     def reset(self, key):
         """
@@ -358,26 +377,30 @@ class TDigestCommands:
         """  # noqa
         return self.execute_command(TDIGEST_RESET, key)
 
-    def add(self, key, values, weights):
+    def add(self, key, values):
         """
-        Add one or more samples (value with weight) to a sketch `key`.
-        Both `values` and `weights` are lists.
-        For more information see `TDIGEST.ADD <https://redis.io/commands/tdigest.add>`_.
-
-        Example:
+        Adds one or more observations to a t-digest sketch `key`.
 
-        >>> tdigestadd('A', [1500.0], [1.0])
+        For more information see `TDIGEST.ADD <https://redis.io/commands/tdigest.add>`_.
         """  # noqa
-        params = [key]
-        self.append_values_and_weights(params, values, weights)
-        return self.execute_command(TDIGEST_ADD, *params)
+        return self.execute_command(TDIGEST_ADD, key, *values)
 
-    def merge(self, toKey, fromKey):
+    def merge(self, destination_key, num_keys, *keys, compression=None, override=False):
         """
-        Merge all of the values from 'fromKey' to 'toKey' sketch.
+        Merges all of the values from `keys` to 'destination-key' sketch.
+        It is mandatory to provide the `num_keys` before passing the input keys and
+        the other (optional) arguments.
+        If `destination_key` already exists its values are merged with the input keys.
+        If you wish to override the destination key contents use the `OVERRIDE` parameter.
+
         For more information see `TDIGEST.MERGE <https://redis.io/commands/tdigest.merge>`_.
         """  # noqa
-        return self.execute_command(TDIGEST_MERGE, toKey, fromKey)
+        params = [destination_key, num_keys, *keys]
+        if compression is not None:
+            params.extend(["COMPRESSION", compression])
+        if override:
+            params.append("OVERRIDE")
+        return self.execute_command(TDIGEST_MERGE, *params)
 
     def min(self, key):
         """
@@ -393,20 +416,21 @@ class TDigestCommands:
         """  # noqa
         return self.execute_command(TDIGEST_MAX, key)
 
-    def quantile(self, key, quantile):
+    def quantile(self, key, quantile, *quantiles):
         """
-        Return double value estimate of the cutoff such that a specified fraction of the data
-        added to this TDigest would be less than or equal to the cutoff.
+        Returns estimates of one or more cutoffs such that a specified fraction of the
+        observations added to this t-digest would be less than or equal to each of the
+        specified cutoffs. (Multiple quantiles can be returned with one call)
         For more information see `TDIGEST.QUANTILE <https://redis.io/commands/tdigest.quantile>`_.
         """  # noqa
-        return self.execute_command(TDIGEST_QUANTILE, key, quantile)
+        return self.execute_command(TDIGEST_QUANTILE, key, quantile, *quantiles)
 
-    def cdf(self, key, value):
+    def cdf(self, key, value, *values):
         """
         Return double fraction of all points added which are <= value.
         For more information see `TDIGEST.CDF <https://redis.io/commands/tdigest.cdf>`_.
         """  # noqa
-        return self.execute_command(TDIGEST_CDF, key, value)
+        return self.execute_command(TDIGEST_CDF, key, value, *values)
 
     def info(self, key):
         """
@@ -416,6 +440,50 @@ class TDigestCommands:
         """  # noqa
         return self.execute_command(TDIGEST_INFO, key)
 
+    def trimmed_mean(self, key, low_cut_quantile, high_cut_quantile):
+        """
+        Return mean value from the sketch, excluding observation values outside
+        the low and high cutoff quantiles.
+        For more information see `TDIGEST.TRIMMED_MEAN <https://redis.io/commands/tdigest.trimmed_mean>`_.
+        """  # noqa
+        return self.execute_command(
+            TDIGEST_TRIMMED_MEAN, key, low_cut_quantile, high_cut_quantile
+        )
+
+    def rank(self, key, value, *values):
+        """
+        Retrieve the estimated rank of value (the number of observations in the sketch
+        that are smaller than value + half the number of observations that are equal to value).
+
+        For more information see `TDIGEST.RANK <https://redis.io/commands/tdigest.rank>`_.
+        """  # noqa
+        return self.execute_command(TDIGEST_RANK, key, value, *values)
+
+    def revrank(self, key, value, *values):
+        """
+        Retrieve the estimated rank of value (the number of observations in the sketch
+        that are larger than value + half the number of observations that are equal to value).
+
+        For more information see `TDIGEST.REVRANK <https://redis.io/commands/tdigest.revrank>`_.
+        """  # noqa
+        return self.execute_command(TDIGEST_REVRANK, key, value, *values)
+
+    def byrank(self, key, rank, *ranks):
+        """
+        Retrieve an estimation of the value with the given rank.
+
+        For more information see `TDIGEST.BY_RANK <https://redis.io/commands/tdigest.by_rank>`_.
+        """  # noqa
+        return self.execute_command(TDIGEST_BYRANK, key, rank, *ranks)
+
+    def byrevrank(self, key, rank, *ranks):
+        """
+        Retrieve an estimation of the value with the given reverse rank.
+
+        For more information see `TDIGEST.BY_REVRANK <https://redis.io/commands/tdigest.by_revrank>`_.
+        """  # noqa
+        return self.execute_command(TDIGEST_BYREVRANK, key, rank, *ranks)
+
 
 class CMSCommands:
     """Count-Min Sketch Commands"""
diff --git a/redis/commands/bf/info.py b/redis/commands/bf/info.py
index 24c5419..c526e6c 100644
--- a/redis/commands/bf/info.py
+++ b/redis/commands/bf/info.py
@@ -68,18 +68,20 @@ class TopKInfo(object):
 class TDigestInfo(object):
     compression = None
     capacity = None
-    mergedNodes = None
-    unmergedNodes = None
-    mergedWeight = None
-    unmergedWeight = None
-    totalCompressions = None
+    merged_nodes = None
+    unmerged_nodes = None
+    merged_weight = None
+    unmerged_weight = None
+    total_compressions = None
+    memory_usage = None
 
     def __init__(self, args):
         response = dict(zip(map(nativestr, args[::2]), args[1::2]))
         self.compression = response["Compression"]
         self.capacity = response["Capacity"]
-        self.mergedNodes = response["Merged nodes"]
-        self.unmergedNodes = response["Unmerged nodes"]
-        self.mergedWeight = response["Merged weight"]
-        self.unmergedWeight = response["Unmerged weight"]
-        self.totalCompressions = response["Total compressions"]
+        self.merged_nodes = response["Merged nodes"]
+        self.unmerged_nodes = response["Unmerged nodes"]
+        self.merged_weight = response["Merged weight"]
+        self.unmerged_weight = response["Unmerged weight"]
+        self.total_compressions = response["Total compressions"]
+        self.memory_usage = response["Memory usage"]
diff --git a/redis/commands/cluster.py b/redis/commands/cluster.py
index a1060d2..a23a94a 100644
--- a/redis/commands/cluster.py
+++ b/redis/commands/cluster.py
@@ -52,6 +52,8 @@ READ_COMMANDS = frozenset(
     [
         "BITCOUNT",
         "BITPOS",
+        "EVAL_RO",
+        "EVALSHA_RO",
         "EXISTS",
         "GEODIST",
         "GEOHASH",
@@ -316,6 +318,25 @@ class AsyncClusterMultiKeyCommands(ClusterMultiKeyCommands):
         # Sum up the reply from each command
         return sum(await self._execute_pipeline_by_slot(command, slots_to_keys))
 
+    async def _execute_pipeline_by_slot(
+        self, command: str, slots_to_args: Mapping[int, Iterable[EncodableT]]
+    ) -> List[Any]:
+        if self._initialize:
+            await self.initialize()
+        read_from_replicas = self.read_from_replicas and command in READ_COMMANDS
+        pipe = self.pipeline()
+        [
+            pipe.execute_command(
+                command,
+                *slot_args,
+                target_nodes=[
+                    self.nodes_manager.get_node_from_slot(slot, read_from_replicas)
+                ],
+            )
+            for slot, slot_args in slots_to_args.items()
+        ]
+        return await pipe.execute()
+
 
 class ClusterManagementCommands(ManagementCommands):
     """
@@ -625,6 +646,16 @@ class ClusterManagementCommands(ManagementCommands):
         """
         return self.execute_command("CLUSTER LINKS", target_nodes=target_node)
 
+    def cluster_flushslots(self, target_nodes: Optional["TargetNodesT"] = None) -> None:
+        raise NotImplementedError(
+            "CLUSTER FLUSHSLOTS is intentionally not implemented in the client."
+        )
+
+    def cluster_bumpepoch(self, target_nodes: Optional["TargetNodesT"] = None) -> None:
+        raise NotImplementedError(
+            "CLUSTER BUMPEPOCH is intentionally not implemented in the client."
+        )
+
     def readonly(self, target_nodes: Optional["TargetNodesT"] = None) -> ResponseT:
         """
         Enables read queries.
@@ -673,7 +704,7 @@ class AsyncClusterManagementCommands(
         """
         return await asyncio.gather(
             *(
-                asyncio.ensure_future(self.execute_command("CLUSTER DELSLOTS", slot))
+                asyncio.create_task(self.execute_command("CLUSTER DELSLOTS", slot))
                 for slot in slots
             )
         )
diff --git a/redis/commands/core.py b/redis/commands/core.py
index 6d67415..b07f12d 100644
--- a/redis/commands/core.py
+++ b/redis/commands/core.py
@@ -2,7 +2,6 @@
 
 import datetime
 import hashlib
-import time
 import warnings
 from typing import (
     TYPE_CHECKING,
@@ -17,6 +16,7 @@ from typing import (
     Mapping,
     Optional,
     Sequence,
+    Set,
     Tuple,
     Union,
 )
@@ -101,7 +101,7 @@ class ACLCommands(CommandsProtocol):
                     raise ValueError
             except ValueError:
                 raise DataError(
-                    "genpass optionally accepts a bits argument, " "between 0 and 4096."
+                    "genpass optionally accepts a bits argument, between 0 and 4096."
                 )
         return self.execute_command("ACL GENPASS", *pieces, **kwargs)
 
@@ -142,7 +142,7 @@ class ACLCommands(CommandsProtocol):
         args = []
         if count is not None:
             if not isinstance(count, int):
-                raise DataError("ACL LOG count must be an " "integer")
+                raise DataError("ACL LOG count must be an integer")
             args.append(count)
 
         return self.execute_command("ACL LOG", *args, **kwargs)
@@ -193,6 +193,7 @@ class ACLCommands(CommandsProtocol):
         selectors: Optional[Iterable[Tuple[str, KeyT]]] = None,
         reset: bool = False,
         reset_keys: bool = False,
+        reset_channels: bool = False,
         reset_passwords: bool = False,
         **kwargs,
     ) -> ResponseT:
@@ -248,6 +249,12 @@ class ACLCommands(CommandsProtocol):
         key permissions will be kept and any new specified key permissions
         will be applied on top.
 
+        ``reset_channels`` is a boolean indicating whether the user's channel
+        permissions should be reset prior to applying any new channel permissions
+        specified in ``channels``.If this is False, the user's existing
+        channel permissions will be kept and any new specified channel permissions
+        will be applied on top.
+
         ``reset_passwords`` is a boolean indicating whether to remove all
         existing passwords and the 'nopass' flag from the user prior to
         applying any new passwords specified in 'passwords' or
@@ -266,6 +273,9 @@ class ACLCommands(CommandsProtocol):
         if reset_keys:
             pieces.append(b"resetkeys")
 
+        if reset_channels:
+            pieces.append(b"resetchannels")
+
         if reset_passwords:
             pieces.append(b"resetpass")
 
@@ -276,7 +286,7 @@ class ACLCommands(CommandsProtocol):
 
         if (passwords or hashed_passwords) and nopass:
             raise DataError(
-                "Cannot set 'nopass' and supply " "'passwords' or 'hashed_passwords'"
+                "Cannot set 'nopass' and supply 'passwords' or 'hashed_passwords'"
             )
 
         if passwords:
@@ -980,6 +990,34 @@ class ManagementCommands(CommandsProtocol):
         """
         return self.execute_command("LASTSAVE", **kwargs)
 
+    def latency_doctor(self):
+        """Raise a NotImplementedError, as the client will not support LATENCY DOCTOR.
+        This funcion is best used within the redis-cli.
+
+        For more information see https://redis.io/commands/latency-doctor
+        """
+        raise NotImplementedError(
+            """
+            LATENCY DOCTOR is intentionally not implemented in the client.
+
+            For more information see https://redis.io/commands/latency-doctor
+            """
+        )
+
+    def latency_graph(self):
+        """Raise a NotImplementedError, as the client will not support LATENCY GRAPH.
+        This funcion is best used within the redis-cli.
+
+        For more information see https://redis.io/commands/latency-graph.
+        """
+        raise NotImplementedError(
+            """
+            LATENCY GRAPH is intentionally not implemented in the client.
+
+            For more information see https://redis.io/commands/latency-graph
+            """
+        )
+
     def lolwut(self, *version_numbers: Union[str, float], **kwargs) -> ResponseT:
         """
         Get the Redis version and a piece of generative computer art
@@ -1123,6 +1161,30 @@ class ManagementCommands(CommandsProtocol):
             "LATENCY HISTOGRAM is intentionally not implemented in the client."
         )
 
+    def latency_history(self, event: str) -> ResponseT:
+        """
+        Returns the raw data of the ``event``'s latency spikes time series.
+
+        For more information see https://redis.io/commands/latency-history
+        """
+        return self.execute_command("LATENCY HISTORY", event)
+
+    def latency_latest(self) -> ResponseT:
+        """
+        Reports the latest latency events logged.
+
+        For more information see https://redis.io/commands/latency-latest
+        """
+        return self.execute_command("LATENCY LATEST")
+
+    def latency_reset(self, *events: str) -> ResponseT:
+        """
+        Resets the latency spikes time series of all, or only some, events.
+
+        For more information see https://redis.io/commands/latency-reset
+        """
+        return self.execute_command("LATENCY RESET", *events)
+
     def ping(self, **kwargs) -> ResponseT:
         """
         Ping the Redis server
@@ -1504,6 +1566,29 @@ class BasicKeyCommands(CommandsProtocol):
         """
         return BitFieldOperation(self, key, default_overflow=default_overflow)
 
+    def bitfield_ro(
+        self: Union["Redis", "AsyncRedis"],
+        key: KeyT,
+        encoding: str,
+        offset: BitfieldOffsetT,
+        items: Optional[list] = None,
+    ) -> ResponseT:
+        """
+        Return an array of the specified bitfield values
+        where the first value is found using ``encoding`` and ``offset``
+        parameters and remaining values are result of corresponding
+        encoding/offset pairs in optional list ``items``
+        Read-only variant of the BITFIELD command.
+
+        For more information see https://redis.io/commands/bitfield_ro
+        """
+        params = [key, "GET", encoding, offset]
+
+        items = items or []
+        for encoding, offset in items:
+            params.extend(["GET", encoding, offset])
+        return self.execute_command("BITFIELD_RO", *params)
+
     def bitop(self, operation: str, dest: KeyT, *keys: KeyT) -> ResponseT:
         """
         Perform a bitwise operation using ``operation`` between ``keys`` and
@@ -1538,7 +1623,7 @@ class BasicKeyCommands(CommandsProtocol):
         if start is not None and end is not None:
             params.append(end)
         elif start is None and end is not None:
-            raise DataError("start argument is not set, " "when end is specified")
+            raise DataError("start argument is not set, when end is specified")
 
         if mode is not None:
             params.append(mode)
@@ -1673,7 +1758,7 @@ class BasicKeyCommands(CommandsProtocol):
         For more information see https://redis.io/commands/expireat
         """
         if isinstance(when, datetime.datetime):
-            when = int(time.mktime(when.timetuple()))
+            when = int(when.timestamp())
 
         exp_option = list()
         if nx:
@@ -1768,14 +1853,12 @@ class BasicKeyCommands(CommandsProtocol):
         if exat is not None:
             pieces.append("EXAT")
             if isinstance(exat, datetime.datetime):
-                s = int(exat.microsecond / 1000000)
-                exat = int(time.mktime(exat.timetuple())) + s
+                exat = int(exat.timestamp())
             pieces.append(exat)
         if pxat is not None:
             pieces.append("PXAT")
             if isinstance(pxat, datetime.datetime):
-                ms = int(pxat.microsecond / 1000)
-                pxat = int(time.mktime(pxat.timetuple())) * 1000 + ms
+                pxat = int(pxat.timestamp() * 1000)
             pieces.append(pxat)
         if persist:
             pieces.append("PERSIST")
@@ -1994,8 +2077,7 @@ class BasicKeyCommands(CommandsProtocol):
         For more information see https://redis.io/commands/pexpireat
         """
         if isinstance(when, datetime.datetime):
-            ms = int(when.microsecond / 1000)
-            when = int(time.mktime(when.timetuple())) * 1000 + ms
+            when = int(when.timestamp() * 1000)
         exp_option = list()
         if nx:
             exp_option.append("NX")
@@ -2183,6 +2265,8 @@ class BasicKeyCommands(CommandsProtocol):
                 pieces.append(int(ex.total_seconds()))
             elif isinstance(ex, int):
                 pieces.append(ex)
+            elif isinstance(ex, str) and ex.isdigit():
+                pieces.append(int(ex))
             else:
                 raise DataError("ex must be datetime.timedelta or int")
         if px is not None:
@@ -2196,14 +2280,12 @@ class BasicKeyCommands(CommandsProtocol):
         if exat is not None:
             pieces.append("EXAT")
             if isinstance(exat, datetime.datetime):
-                s = int(exat.microsecond / 1000000)
-                exat = int(time.mktime(exat.timetuple())) + s
+                exat = int(exat.timestamp())
             pieces.append(exat)
         if pxat is not None:
             pieces.append("PXAT")
             if isinstance(pxat, datetime.datetime):
-                ms = int(pxat.microsecond / 1000)
-                pxat = int(time.mktime(pxat.timetuple())) * 1000 + ms
+                pxat = int(pxat.timestamp() * 1000)
             pieces.append(pxat)
         if keepttl:
             pieces.append("KEEPTTL")
@@ -2535,7 +2617,7 @@ class ListCommands(CommandsProtocol):
         self,
         num_keys: int,
         *args: List[str],
-        direction: str = None,
+        direction: str,
         count: Optional[int] = 1,
     ) -> Union[Awaitable[list], list]:
         """
@@ -3257,7 +3339,7 @@ class SetCommands(CommandsProtocol):
         """
         return self.execute_command("SISMEMBER", name, value)
 
-    def smembers(self, name: str) -> Union[Awaitable[list], list]:
+    def smembers(self, name: str) -> Union[Awaitable[Set], Set]:
         """
         Return all members of the set ``name``
 
@@ -3387,9 +3469,7 @@ class StreamCommands(CommandsProtocol):
         """
         pieces: list[EncodableT] = []
         if maxlen is not None and minid is not None:
-            raise DataError(
-                "Only one of ```maxlen``` or ```minid``` " "may be specified"
-            )
+            raise DataError("Only one of ```maxlen``` or ```minid``` may be specified")
 
         if maxlen is not None:
             if not isinstance(maxlen, int) or maxlen < 1:
@@ -3420,7 +3500,7 @@ class StreamCommands(CommandsProtocol):
         groupname: GroupT,
         consumername: ConsumerT,
         min_idle_time: int,
-        start_id: int = 0,
+        start_id: StreamIdT = "0-0",
         count: Union[int, None] = None,
         justid: bool = False,
     ) -> ResponseT:
@@ -3445,7 +3525,7 @@ class StreamCommands(CommandsProtocol):
         try:
             if int(min_idle_time) < 0:
                 raise DataError(
-                    "XAUTOCLAIM min_idle_time must be a non" "negative integer"
+                    "XAUTOCLAIM min_idle_time must be a nonnegative integer"
                 )
         except TypeError:
             pass
@@ -3471,7 +3551,7 @@ class StreamCommands(CommandsProtocol):
         groupname: GroupT,
         consumername: ConsumerT,
         min_idle_time: int,
-        message_ids: [List[StreamIdT], Tuple[StreamIdT]],
+        message_ids: Union[List[StreamIdT], Tuple[StreamIdT]],
         idle: Union[int, None] = None,
         time: Union[int, None] = None,
         retrycount: Union[int, None] = None,
@@ -3503,7 +3583,7 @@ class StreamCommands(CommandsProtocol):
          For more information see https://redis.io/commands/xclaim
         """
         if not isinstance(min_idle_time, int) or min_idle_time < 0:
-            raise DataError("XCLAIM min_idle_time must be a non negative " "integer")
+            raise DataError("XCLAIM min_idle_time must be a non negative integer")
         if not isinstance(message_ids, (list, tuple)) or not message_ids:
             raise DataError(
                 "XCLAIM message_ids must be a non empty list or "
@@ -3836,7 +3916,7 @@ class StreamCommands(CommandsProtocol):
             pieces.append(str(count))
         if block is not None:
             if not isinstance(block, int) or block < 0:
-                raise DataError("XREADGROUP block must be a non-negative " "integer")
+                raise DataError("XREADGROUP block must be a non-negative integer")
             pieces.append(b"BLOCK")
             pieces.append(str(block))
         if noack:
@@ -3898,7 +3978,7 @@ class StreamCommands(CommandsProtocol):
         """
         pieces: list[EncodableT] = []
         if maxlen is not None and minid is not None:
-            raise DataError("Only one of ``maxlen`` or ``minid`` " "may be specified")
+            raise DataError("Only one of ``maxlen`` or ``minid`` may be specified")
 
         if maxlen is None and minid is None:
             raise DataError("One of ``maxlen`` or ``minid`` must be specified")
@@ -4272,14 +4352,12 @@ class SortedSetCommands(CommandsProtocol):
         num: Union[int, None] = None,
     ) -> ResponseT:
         if byscore and bylex:
-            raise DataError(
-                "``byscore`` and ``bylex`` can not be " "specified together."
-            )
+            raise DataError("``byscore`` and ``bylex`` can not be specified together.")
         if (offset is not None and num is None) or (num is not None and offset is None):
             raise DataError("``offset`` and ``num`` must both be specified.")
         if bylex and withscores:
             raise DataError(
-                "``withscores`` not supported in combination " "with ``bylex``."
+                "``withscores`` not supported in combination with ``bylex``."
             )
         pieces = [command]
         if dest:
@@ -4912,7 +4990,11 @@ class Script:
         if isinstance(script, str):
             # We need the encoding from the client in order to generate an
             # accurate byte representation of the script
-            encoder = registered_client.connection_pool.get_encoder()
+            try:
+                encoder = registered_client.connection_pool.get_encoder()
+            except AttributeError:
+                # Cluster
+                encoder = registered_client.get_encoder()
             script = encoder.encode(script)
         self.sha = hashlib.sha1(script).hexdigest()
 
@@ -4957,7 +5039,11 @@ class AsyncScript:
         if isinstance(script, str):
             # We need the encoding from the client in order to generate an
             # accurate byte representation of the script
-            encoder = registered_client.connection_pool.get_encoder()
+            try:
+                encoder = registered_client.connection_pool.get_encoder()
+            except AttributeError:
+                # Cluster
+                encoder = registered_client.get_encoder()
             script = encoder.encode(script)
         self.sha = hashlib.sha1(script).hexdigest()
 
@@ -5065,7 +5151,7 @@ class ScriptCommands(CommandsProtocol):
         """
         The read-only variant of the EVAL command
 
-        Execute the read-only Lue ``script`` specifying the ``numkeys`` the script
+        Execute the read-only Lua ``script`` specifying the ``numkeys`` the script
         will touch and the key names and argument values in ``keys_and_args``.
         Returns the result of the script.
 
@@ -5223,7 +5309,7 @@ class GeoCommands(CommandsProtocol):
         if nx and xx:
             raise DataError("GEOADD allows either 'nx' or 'xx', not both")
         if len(values) % 3 != 0:
-            raise DataError("GEOADD requires places with lon, lat and name" " values")
+            raise DataError("GEOADD requires places with lon, lat and name values")
         pieces = [name]
         if nx:
             pieces.append("NX")
@@ -5409,7 +5495,7 @@ class GeoCommands(CommandsProtocol):
                 raise DataError("GEORADIUS invalid sort")
 
         if kwargs["store"] and kwargs["store_dist"]:
-            raise DataError("GEORADIUS store and store_dist cant be set" " together")
+            raise DataError("GEORADIUS store and store_dist cant be set together")
 
         if kwargs["store"]:
             pieces.extend([b"STORE", kwargs["store"]])
@@ -5459,7 +5545,7 @@ class GeoCommands(CommandsProtocol):
         `m` for meters (the default value), `km` for kilometers,
         `mi` for miles and `ft` for feet.
         ``sort`` indicates to return the places in a sorted way,
-        ASC for nearest to farest and DESC for farest to nearest.
+        ASC for nearest to furthest and DESC for furthest to nearest.
         ``count`` limit the results to the first count matching items.
         ``any`` is set to True, the command will return as soon as
         enough matches are found. Can't be provided without ``count``
@@ -5546,22 +5632,20 @@ class GeoCommands(CommandsProtocol):
         # FROMMEMBER or FROMLONLAT
         if kwargs["member"] is None:
             if kwargs["longitude"] is None or kwargs["latitude"] is None:
-                raise DataError(
-                    "GEOSEARCH must have member or" " longitude and latitude"
-                )
+                raise DataError("GEOSEARCH must have member or longitude and latitude")
         if kwargs["member"]:
             if kwargs["longitude"] or kwargs["latitude"]:
                 raise DataError(
-                    "GEOSEARCH member and longitude or latitude" " cant be set together"
+                    "GEOSEARCH member and longitude or latitude cant be set together"
                 )
             pieces.extend([b"FROMMEMBER", kwargs["member"]])
-        if kwargs["longitude"] and kwargs["latitude"]:
+        if kwargs["longitude"] is not None and kwargs["latitude"] is not None:
             pieces.extend([b"FROMLONLAT", kwargs["longitude"], kwargs["latitude"]])
 
         # BYRADIUS or BYBOX
         if kwargs["radius"] is None:
             if kwargs["width"] is None or kwargs["height"] is None:
-                raise DataError("GEOSEARCH must have radius or" " width and height")
+                raise DataError("GEOSEARCH must have radius or width and height")
         if kwargs["unit"] is None:
             raise DataError("GEOSEARCH must have unit")
         if kwargs["unit"].lower() not in ("m", "km", "mi", "ft"):
@@ -5569,7 +5653,7 @@ class GeoCommands(CommandsProtocol):
         if kwargs["radius"]:
             if kwargs["width"] or kwargs["height"]:
                 raise DataError(
-                    "GEOSEARCH radius and width or height" " cant be set together"
+                    "GEOSEARCH radius and width or height cant be set together"
                 )
             pieces.extend([b"BYRADIUS", kwargs["radius"], kwargs["unit"]])
         if kwargs["width"] and kwargs["height"]:
@@ -5590,7 +5674,7 @@ class GeoCommands(CommandsProtocol):
             if kwargs["any"]:
                 pieces.append(b"ANY")
         elif kwargs["any"]:
-            raise DataError("GEOSEARCH ``any`` can't be provided " "without count")
+            raise DataError("GEOSEARCH ``any`` can't be provided without count")
 
         # other properties
         for arg_name, byte_repr in (
diff --git a/redis/commands/graph/__init__.py b/redis/commands/graph/__init__.py
index 3736195..a882dd5 100644
--- a/redis/commands/graph/__init__.py
+++ b/redis/commands/graph/__init__.py
@@ -1,9 +1,13 @@
 from ..helpers import quote_string, random_string, stringify_param_value
-from .commands import GraphCommands
+from .commands import AsyncGraphCommands, GraphCommands
 from .edge import Edge  # noqa
 from .node import Node  # noqa
 from .path import Path  # noqa
 
+DB_LABELS = "DB.LABELS"
+DB_RAELATIONSHIPTYPES = "DB.RELATIONSHIPTYPES"
+DB_PROPERTYKEYS = "DB.PROPERTYKEYS"
+
 
 class Graph(GraphCommands):
     """
@@ -44,25 +48,19 @@ class Graph(GraphCommands):
         lbls = self.labels()
 
         # Unpack data.
-        self._labels = [None] * len(lbls)
-        for i, l in enumerate(lbls):
-            self._labels[i] = l[0]
+        self._labels = [l[0] for _, l in enumerate(lbls)]
 
     def _refresh_relations(self):
         rels = self.relationship_types()
 
         # Unpack data.
-        self._relationship_types = [None] * len(rels)
-        for i, r in enumerate(rels):
-            self._relationship_types[i] = r[0]
+        self._relationship_types = [r[0] for _, r in enumerate(rels)]
 
     def _refresh_attributes(self):
         props = self.property_keys()
 
         # Unpack data.
-        self._properties = [None] * len(props)
-        for i, p in enumerate(props):
-            self._properties[i] = p[0]
+        self._properties = [p[0] for _, p in enumerate(props)]
 
     def get_label(self, idx):
         """
@@ -108,12 +106,12 @@ class Graph(GraphCommands):
             The index of the property
         """
         try:
-            propertie = self._properties[idx]
+            p = self._properties[idx]
         except IndexError:
             # Refresh properties.
             self._refresh_attributes()
-            propertie = self._properties[idx]
-        return propertie
+            p = self._properties[idx]
+        return p
 
     def add_node(self, node):
         """
@@ -133,6 +131,8 @@ class Graph(GraphCommands):
         self.edges.append(edge)
 
     def _build_params_header(self, params):
+        if params is None:
+            return ""
         if not isinstance(params, dict):
             raise TypeError("'params' must be a dict")
         # Header starts with "CYPHER"
@@ -147,16 +147,109 @@ class Graph(GraphCommands):
         q = f"CALL {procedure}({','.join(args)})"
 
         y = kwagrs.get("y", None)
-        if y:
-            q += f" YIELD {','.join(y)}"
+        if y is not None:
+            q += f"YIELD {','.join(y)}"
 
         return self.query(q, read_only=read_only)
 
     def labels(self):
-        return self.call_procedure("db.labels", read_only=True).result_set
+        return self.call_procedure(DB_LABELS, read_only=True).result_set
 
     def relationship_types(self):
-        return self.call_procedure("db.relationshipTypes", read_only=True).result_set
+        return self.call_procedure(DB_RAELATIONSHIPTYPES, read_only=True).result_set
 
     def property_keys(self):
-        return self.call_procedure("db.propertyKeys", read_only=True).result_set
+        return self.call_procedure(DB_PROPERTYKEYS, read_only=True).result_set
+
+
+class AsyncGraph(Graph, AsyncGraphCommands):
+    """Async version for Graph"""
+
+    async def _refresh_labels(self):
+        lbls = await self.labels()
+
+        # Unpack data.
+        self._labels = [l[0] for _, l in enumerate(lbls)]
+
+    async def _refresh_attributes(self):
+        props = await self.property_keys()
+
+        # Unpack data.
+        self._properties = [p[0] for _, p in enumerate(props)]
+
+    async def _refresh_relations(self):
+        rels = await self.relationship_types()
+
+        # Unpack data.
+        self._relationship_types = [r[0] for _, r in enumerate(rels)]
+
+    async def get_label(self, idx):
+        """
+        Returns a label by it's index
+
+        Args:
+
+        idx:
+            The index of the label
+        """
+        try:
+            label = self._labels[idx]
+        except IndexError:
+            # Refresh labels.
+            await self._refresh_labels()
+            label = self._labels[idx]
+        return label
+
+    async def get_property(self, idx):
+        """
+        Returns a property by it's index
+
+        Args:
+
+        idx:
+            The index of the property
+        """
+        try:
+            p = self._properties[idx]
+        except IndexError:
+            # Refresh properties.
+            await self._refresh_attributes()
+            p = self._properties[idx]
+        return p
+
+    async def get_relation(self, idx):
+        """
+        Returns a relationship type by it's index
+
+        Args:
+
+        idx:
+            The index of the relation
+        """
+        try:
+            relationship_type = self._relationship_types[idx]
+        except IndexError:
+            # Refresh relationship types.
+            await self._refresh_relations()
+            relationship_type = self._relationship_types[idx]
+        return relationship_type
+
+    async def call_procedure(self, procedure, *args, read_only=False, **kwagrs):
+        args = [quote_string(arg) for arg in args]
+        q = f"CALL {procedure}({','.join(args)})"
+
+        y = kwagrs.get("y", None)
+        if y is not None:
+            f"YIELD {','.join(y)}"
+        return await self.query(q, read_only=read_only)
+
+    async def labels(self):
+        return ((await self.call_procedure(DB_LABELS, read_only=True))).result_set
+
+    async def property_keys(self):
+        return (await self.call_procedure(DB_PROPERTYKEYS, read_only=True)).result_set
+
+    async def relationship_types(self):
+        return (
+            await self.call_procedure(DB_RAELATIONSHIPTYPES, read_only=True)
+        ).result_set
diff --git a/redis/commands/graph/commands.py b/redis/commands/graph/commands.py
index fe4224b..762ab42 100644
--- a/redis/commands/graph/commands.py
+++ b/redis/commands/graph/commands.py
@@ -3,7 +3,16 @@ from redis.exceptions import ResponseError
 
 from .exceptions import VersionMismatchException
 from .execution_plan import ExecutionPlan
-from .query_result import QueryResult
+from .query_result import AsyncQueryResult, QueryResult
+
+PROFILE_CMD = "GRAPH.PROFILE"
+RO_QUERY_CMD = "GRAPH.RO_QUERY"
+QUERY_CMD = "GRAPH.QUERY"
+DELETE_CMD = "GRAPH.DELETE"
+SLOWLOG_CMD = "GRAPH.SLOWLOG"
+CONFIG_CMD = "GRAPH.CONFIG"
+LIST_CMD = "GRAPH.LIST"
+EXPLAIN_CMD = "GRAPH.EXPLAIN"
 
 
 class GraphCommands:
@@ -52,33 +61,28 @@ class GraphCommands:
         query = q
 
         # handle query parameters
-        if params is not None:
-            query = self._build_params_header(params) + query
+        query = self._build_params_header(params) + query
 
         # construct query command
         # ask for compact result-set format
         # specify known graph version
         if profile:
-            cmd = "GRAPH.PROFILE"
+            cmd = PROFILE_CMD
         else:
-            cmd = "GRAPH.RO_QUERY" if read_only else "GRAPH.QUERY"
+            cmd = RO_QUERY_CMD if read_only else QUERY_CMD
         command = [cmd, self.name, query, "--compact"]
 
         # include timeout is specified
-        if timeout:
-            if not isinstance(timeout, int):
-                raise Exception("Timeout argument must be a positive integer")
-            command += ["timeout", timeout]
+        if isinstance(timeout, int):
+            command.extend(["timeout", timeout])
+        elif timeout is not None:
+            raise Exception("Timeout argument must be a positive integer")
 
         # issue query
         try:
             response = self.execute_command(*command)
             return QueryResult(self, response, profile)
         except ResponseError as e:
-            if "wrong number of arguments" in str(e):
-                print(
-                    "Note: RedisGraph Python requires server version 2.2.8 or above"
-                )  # noqa
             if "unknown command" in str(e) and read_only:
                 # `GRAPH.RO_QUERY` is unavailable in older versions.
                 return self.query(q, params, timeout, read_only=False)
@@ -106,7 +110,7 @@ class GraphCommands:
         For more information see `DELETE <https://redis.io/commands/graph.delete>`_. # noqa
         """
         self._clear_schema()
-        return self.execute_command("GRAPH.DELETE", self.name)
+        return self.execute_command(DELETE_CMD, self.name)
 
     # declared here, to override the built in redis.db.flush()
     def flush(self):
@@ -146,7 +150,7 @@ class GraphCommands:
         3. The issued query.
         4. The amount of time needed for its execution, in milliseconds.
         """
-        return self.execute_command("GRAPH.SLOWLOG", self.name)
+        return self.execute_command(SLOWLOG_CMD, self.name)
 
     def config(self, name, value=None, set=False):
         """
@@ -170,14 +174,14 @@ class GraphCommands:
                 raise DataError(
                     "``value`` can be provided only when ``set`` is True"
                 )  # noqa
-        return self.execute_command("GRAPH.CONFIG", *params)
+        return self.execute_command(CONFIG_CMD, *params)
 
     def list_keys(self):
         """
         Lists all graph keys in the keyspace.
         For more information see `GRAPH.LIST <https://redis.io/commands/graph.list>`_. # noqa
         """
-        return self.execute_command("GRAPH.LIST")
+        return self.execute_command(LIST_CMD)
 
     def execution_plan(self, query, params=None):
         """
@@ -188,10 +192,9 @@ class GraphCommands:
             query: the query that will be executed
             params: query parameters
         """
-        if params is not None:
-            query = self._build_params_header(params) + query
+        query = self._build_params_header(params) + query
 
-        plan = self.execute_command("GRAPH.EXPLAIN", self.name, query)
+        plan = self.execute_command(EXPLAIN_CMD, self.name, query)
         if isinstance(plan[0], bytes):
             plan = [b.decode() for b in plan]
         return "\n".join(plan)
@@ -206,8 +209,105 @@ class GraphCommands:
             query: the query that will be executed
             params: query parameters
         """
-        if params is not None:
-            query = self._build_params_header(params) + query
+        query = self._build_params_header(params) + query
+
+        plan = self.execute_command(EXPLAIN_CMD, self.name, query)
+        return ExecutionPlan(plan)
+
+
+class AsyncGraphCommands(GraphCommands):
+    async def query(self, q, params=None, timeout=None, read_only=False, profile=False):
+        """
+        Executes a query against the graph.
+        For more information see `GRAPH.QUERY <https://oss.redis.com/redisgraph/master/commands/#graphquery>`_. # noqa
+
+        Args:
+
+        q : str
+            The query.
+        params : dict
+            Query parameters.
+        timeout : int
+            Maximum runtime for read queries in milliseconds.
+        read_only : bool
+            Executes a readonly query if set to True.
+        profile : bool
+            Return details on results produced by and time
+            spent in each operation.
+        """
+
+        # maintain original 'q'
+        query = q
+
+        # handle query parameters
+        query = self._build_params_header(params) + query
+
+        # construct query command
+        # ask for compact result-set format
+        # specify known graph version
+        if profile:
+            cmd = PROFILE_CMD
+        else:
+            cmd = RO_QUERY_CMD if read_only else QUERY_CMD
+        command = [cmd, self.name, query, "--compact"]
+
+        # include timeout is specified
+        if isinstance(timeout, int):
+            command.extend(["timeout", timeout])
+        elif timeout is not None:
+            raise Exception("Timeout argument must be a positive integer")
+
+        # issue query
+        try:
+            response = await self.execute_command(*command)
+            return await AsyncQueryResult().initialize(self, response, profile)
+        except ResponseError as e:
+            if "unknown command" in str(e) and read_only:
+                # `GRAPH.RO_QUERY` is unavailable in older versions.
+                return await self.query(q, params, timeout, read_only=False)
+            raise e
+        except VersionMismatchException as e:
+            # client view over the graph schema is out of sync
+            # set client version and refresh local schema
+            self.version = e.version
+            self._refresh_schema()
+            # re-issue query
+            return await self.query(q, params, timeout, read_only)
+
+    async def execution_plan(self, query, params=None):
+        """
+        Get the execution plan for given query,
+        GRAPH.EXPLAIN returns an array of operations.
+
+        Args:
+            query: the query that will be executed
+            params: query parameters
+        """
+        query = self._build_params_header(params) + query
 
-        plan = self.execute_command("GRAPH.EXPLAIN", self.name, query)
+        plan = await self.execute_command(EXPLAIN_CMD, self.name, query)
+        if isinstance(plan[0], bytes):
+            plan = [b.decode() for b in plan]
+        return "\n".join(plan)
+
+    async def explain(self, query, params=None):
+        """
+        Get the execution plan for given query,
+        GRAPH.EXPLAIN returns ExecutionPlan object.
+
+        Args:
+            query: the query that will be executed
+            params: query parameters
+        """
+        query = self._build_params_header(params) + query
+
+        plan = await self.execute_command(EXPLAIN_CMD, self.name, query)
         return ExecutionPlan(plan)
+
+    async def flush(self):
+        """
+        Commit the graph and reset the edges and the nodes to zero length.
+        """
+        await self.commit()
+        self.nodes = {}
+        self.edges = []
diff --git a/redis/commands/graph/edge.py b/redis/commands/graph/edge.py
index b0141d9..6ee195f 100644
--- a/redis/commands/graph/edge.py
+++ b/redis/commands/graph/edge.py
@@ -61,6 +61,10 @@ class Edge:
         return res
 
     def __eq__(self, rhs):
+        # Type checking
+        if not isinstance(rhs, Edge):
+            return False
+
         # Quick positive check, if both IDs are set.
         if self.id is not None and rhs.id is not None and self.id == rhs.id:
             return True
diff --git a/redis/commands/graph/node.py b/redis/commands/graph/node.py
index c5f8429..4546a39 100644
--- a/redis/commands/graph/node.py
+++ b/redis/commands/graph/node.py
@@ -32,7 +32,7 @@ class Node:
             self.labels = label
         else:
             raise AssertionError(
-                "label should be either None, " "string or a list of strings"
+                "label should be either None, string or a list of strings"
             )
 
         self.properties = properties or {}
@@ -65,6 +65,10 @@ class Node:
         return res
 
     def __eq__(self, rhs):
+        # Type checking
+        if not isinstance(rhs, Node):
+            return False
+
         # Quick positive check, if both IDs are set.
         if self.id is not None and rhs.id is not None and self.id != rhs.id:
             return False
diff --git a/redis/commands/graph/path.py b/redis/commands/graph/path.py
index 6f2214a..ee22dc8 100644
--- a/redis/commands/graph/path.py
+++ b/redis/commands/graph/path.py
@@ -54,6 +54,10 @@ class Path:
         return self
 
     def __eq__(self, other):
+        # Type checking
+        if not isinstance(other, Path):
+            return False
+
         return self.nodes() == other.nodes() and self.edges() == other.edges()
 
     def __str__(self):
diff --git a/redis/commands/graph/query_result.py b/redis/commands/graph/query_result.py
index 644ac5a..7c7f58b 100644
--- a/redis/commands/graph/query_result.py
+++ b/redis/commands/graph/query_result.py
@@ -1,4 +1,6 @@
+import sys
 from collections import OrderedDict
+from distutils.util import strtobool
 
 # from prettytable import PrettyTable
 from redis import ResponseError
@@ -9,10 +11,12 @@ from .node import Node
 from .path import Path
 
 LABELS_ADDED = "Labels added"
+LABELS_REMOVED = "Labels removed"
 NODES_CREATED = "Nodes created"
 NODES_DELETED = "Nodes deleted"
 RELATIONSHIPS_DELETED = "Relationships deleted"
 PROPERTIES_SET = "Properties set"
+PROPERTIES_REMOVED = "Properties removed"
 RELATIONSHIPS_CREATED = "Relationships created"
 INDICES_CREATED = "Indices created"
 INDICES_DELETED = "Indices deleted"
@@ -21,8 +25,10 @@ INTERNAL_EXECUTION_TIME = "internal execution time"
 
 STATS = [
     LABELS_ADDED,
+    LABELS_REMOVED,
     NODES_CREATED,
     PROPERTIES_SET,
+    PROPERTIES_REMOVED,
     RELATIONSHIPS_CREATED,
     NODES_DELETED,
     RELATIONSHIPS_DELETED,
@@ -86,6 +92,9 @@ class QueryResult:
             self.parse_results(response)
 
     def _check_for_errors(self, response):
+        """
+        Check if the response contains an error.
+        """
         if isinstance(response[0], ResponseError):
             error = response[0]
             if str(error) == "version mismatch":
@@ -99,6 +108,9 @@ class QueryResult:
             raise response[-1]
 
     def parse_results(self, raw_result_set):
+        """
+        Parse the query execution result returned from the server.
+        """
         self.header = self.parse_header(raw_result_set)
 
         # Empty header.
@@ -108,6 +120,9 @@ class QueryResult:
         self.result_set = self.parse_records(raw_result_set)
 
     def parse_statistics(self, raw_statistics):
+        """
+        Parse the statistics returned in the response.
+        """
         self.statistics = {}
 
         # decode statistics
@@ -121,31 +136,31 @@ class QueryResult:
                 self.statistics[s] = v
 
     def parse_header(self, raw_result_set):
+        """
+        Parse the header of the result.
+        """
         # An array of column name/column type pairs.
         header = raw_result_set[0]
         return header
 
     def parse_records(self, raw_result_set):
-        records = []
-        result_set = raw_result_set[1]
-        for row in result_set:
-            record = []
-            for idx, cell in enumerate(row):
-                if self.header[idx][0] == ResultSetColumnTypes.COLUMN_SCALAR:  # noqa
-                    record.append(self.parse_scalar(cell))
-                elif self.header[idx][0] == ResultSetColumnTypes.COLUMN_NODE:  # noqa
-                    record.append(self.parse_node(cell))
-                elif (
-                    self.header[idx][0] == ResultSetColumnTypes.COLUMN_RELATION
-                ):  # noqa
-                    record.append(self.parse_edge(cell))
-                else:
-                    print("Unknown column type.\n")
-            records.append(record)
+        """
+        Parses the result set and returns a list of records.
+        """
+        records = [
+            [
+                self.parse_record_types[self.header[idx][0]](cell)
+                for idx, cell in enumerate(row)
+            ]
+            for row in raw_result_set[1]
+        ]
 
         return records
 
     def parse_entity_properties(self, props):
+        """
+        Parse node / edge properties.
+        """
         # [[name, value type, value] X N]
         properties = {}
         for prop in props:
@@ -156,6 +171,9 @@ class QueryResult:
         return properties
 
     def parse_string(self, cell):
+        """
+        Parse the cell as a string.
+        """
         if isinstance(cell, bytes):
             return cell.decode()
         elif not isinstance(cell, str):
@@ -164,6 +182,9 @@ class QueryResult:
             return cell
 
     def parse_node(self, cell):
+        """
+        Parse the cell to a node.
+        """
         # Node ID (integer),
         # [label string offset (integer)],
         # [[name, value type, value] X N]
@@ -178,6 +199,9 @@ class QueryResult:
         return Node(node_id=node_id, label=labels, properties=properties)
 
     def parse_edge(self, cell):
+        """
+        Parse the cell to an edge.
+        """
         # Edge ID (integer),
         # reltype string offset (integer),
         # src node ID offset (integer),
@@ -194,11 +218,17 @@ class QueryResult:
         )
 
     def parse_path(self, cell):
+        """
+        Parse the cell to a path.
+        """
         nodes = self.parse_scalar(cell[0])
         edges = self.parse_scalar(cell[1])
         return Path(nodes, edges)
 
     def parse_map(self, cell):
+        """
+        Parse the cell as a map.
+        """
         m = OrderedDict()
         n_entries = len(cell)
 
@@ -212,6 +242,9 @@ class QueryResult:
         return m
 
     def parse_point(self, cell):
+        """
+        Parse the cell to point.
+        """
         p = {}
         # A point is received an array of the form: [latitude, longitude]
         # It is returned as a map of the form: {"latitude": latitude, "longitude": longitude} # noqa
@@ -219,94 +252,63 @@ class QueryResult:
         p["longitude"] = float(cell[1])
         return p
 
-    def parse_scalar(self, cell):
-        scalar_type = int(cell[0])
-        value = cell[1]
-        scalar = None
-
-        if scalar_type == ResultSetScalarTypes.VALUE_NULL:
-            scalar = None
-
-        elif scalar_type == ResultSetScalarTypes.VALUE_STRING:
-            scalar = self.parse_string(value)
-
-        elif scalar_type == ResultSetScalarTypes.VALUE_INTEGER:
-            scalar = int(value)
-
-        elif scalar_type == ResultSetScalarTypes.VALUE_BOOLEAN:
-            value = value.decode() if isinstance(value, bytes) else value
-            if value == "true":
-                scalar = True
-            elif value == "false":
-                scalar = False
-            else:
-                print("Unknown boolean type\n")
-
-        elif scalar_type == ResultSetScalarTypes.VALUE_DOUBLE:
-            scalar = float(value)
-
-        elif scalar_type == ResultSetScalarTypes.VALUE_ARRAY:
-            # array variable is introduced only for readability
-            scalar = array = value
-            for i in range(len(array)):
-                scalar[i] = self.parse_scalar(array[i])
+    def parse_null(self, cell):
+        """
+        Parse a null value.
+        """
+        return None
 
-        elif scalar_type == ResultSetScalarTypes.VALUE_NODE:
-            scalar = self.parse_node(value)
+    def parse_integer(self, cell):
+        """
+        Parse the integer value from the cell.
+        """
+        return int(cell)
 
-        elif scalar_type == ResultSetScalarTypes.VALUE_EDGE:
-            scalar = self.parse_edge(value)
+    def parse_boolean(self, value):
+        """
+        Parse the cell value as a boolean.
+        """
+        value = value.decode() if isinstance(value, bytes) else value
+        try:
+            scalar = True if strtobool(value) else False
+        except ValueError:
+            sys.stderr.write("unknown boolean type\n")
+            scalar = None
+        return scalar
 
-        elif scalar_type == ResultSetScalarTypes.VALUE_PATH:
-            scalar = self.parse_path(value)
+    def parse_double(self, cell):
+        """
+        Parse the cell as a double.
+        """
+        return float(cell)
 
-        elif scalar_type == ResultSetScalarTypes.VALUE_MAP:
-            scalar = self.parse_map(value)
+    def parse_array(self, value):
+        """
+        Parse an array of values.
+        """
+        scalar = [self.parse_scalar(value[i]) for i in range(len(value))]
+        return scalar
 
-        elif scalar_type == ResultSetScalarTypes.VALUE_POINT:
-            scalar = self.parse_point(value)
+    def parse_unknown(self, cell):
+        """
+        Parse a cell of unknown type.
+        """
+        sys.stderr.write("Unknown type\n")
+        return None
 
-        elif scalar_type == ResultSetScalarTypes.VALUE_UNKNOWN:
-            print("Unknown scalar type\n")
+    def parse_scalar(self, cell):
+        """
+        Parse a scalar value from a cell in the result set.
+        """
+        scalar_type = int(cell[0])
+        value = cell[1]
+        scalar = self.parse_scalar_types[scalar_type](value)
 
         return scalar
 
     def parse_profile(self, response):
         self.result_set = [x[0 : x.index(",")].strip() for x in response]
 
-    # """Prints the data from the query response:
-    #    1. First row result_set contains the columns names.
-    #       Thus the first row in PrettyTable will contain the
-    #       columns.
-    #    2. The row after that will contain the data returned,
-    #       or 'No Data returned' if there is none.
-    #    3. Prints the statistics of the query.
-    # """
-
-    # def pretty_print(self):
-    #     if not self.is_empty():
-    #         header = [col[1] for col in self.header]
-    #         tbl = PrettyTable(header)
-
-    #         for row in self.result_set:
-    #             record = []
-    #             for idx, cell in enumerate(row):
-    #                 if type(cell) is Node:
-    #                     record.append(cell.to_string())
-    #                 elif type(cell) is Edge:
-    #                     record.append(cell.to_string())
-    #                 else:
-    #                     record.append(cell)
-    #             tbl.add_row(record)
-
-    #         if len(self.result_set) == 0:
-    #             tbl.add_row(['No data returned.'])
-
-    #         print(str(tbl) + '\n')
-
-    #     for stat in self.statistics:
-    #         print("%s %s" % (stat, self.statistics[stat]))
-
     def is_empty(self):
         return len(self.result_set) == 0
 
@@ -323,40 +325,249 @@ class QueryResult:
 
     @property
     def labels_added(self):
+        """Returns the number of labels added in the query"""
         return self._get_stat(LABELS_ADDED)
 
+    @property
+    def labels_removed(self):
+        """Returns the number of labels removed in the query"""
+        return self._get_stat(LABELS_REMOVED)
+
     @property
     def nodes_created(self):
+        """Returns the number of nodes created in the query"""
         return self._get_stat(NODES_CREATED)
 
     @property
     def nodes_deleted(self):
+        """Returns the number of nodes deleted in the query"""
         return self._get_stat(NODES_DELETED)
 
     @property
     def properties_set(self):
+        """Returns the number of properties set in the query"""
         return self._get_stat(PROPERTIES_SET)
 
+    @property
+    def properties_removed(self):
+        """Returns the number of properties removed in the query"""
+        return self._get_stat(PROPERTIES_REMOVED)
+
     @property
     def relationships_created(self):
+        """Returns the number of relationships created in the query"""
         return self._get_stat(RELATIONSHIPS_CREATED)
 
     @property
     def relationships_deleted(self):
+        """Returns the number of relationships deleted in the query"""
         return self._get_stat(RELATIONSHIPS_DELETED)
 
     @property
     def indices_created(self):
+        """Returns the number of indices created in the query"""
         return self._get_stat(INDICES_CREATED)
 
     @property
     def indices_deleted(self):
+        """Returns the number of indices deleted in the query"""
         return self._get_stat(INDICES_DELETED)
 
     @property
     def cached_execution(self):
+        """Returns whether or not the query execution plan was cached"""
         return self._get_stat(CACHED_EXECUTION) == 1
 
     @property
     def run_time_ms(self):
+        """Returns the server execution time of the query"""
         return self._get_stat(INTERNAL_EXECUTION_TIME)
+
+    @property
+    def parse_scalar_types(self):
+        return {
+            ResultSetScalarTypes.VALUE_NULL: self.parse_null,
+            ResultSetScalarTypes.VALUE_STRING: self.parse_string,
+            ResultSetScalarTypes.VALUE_INTEGER: self.parse_integer,
+            ResultSetScalarTypes.VALUE_BOOLEAN: self.parse_boolean,
+            ResultSetScalarTypes.VALUE_DOUBLE: self.parse_double,
+            ResultSetScalarTypes.VALUE_ARRAY: self.parse_array,
+            ResultSetScalarTypes.VALUE_NODE: self.parse_node,
+            ResultSetScalarTypes.VALUE_EDGE: self.parse_edge,
+            ResultSetScalarTypes.VALUE_PATH: self.parse_path,
+            ResultSetScalarTypes.VALUE_MAP: self.parse_map,
+            ResultSetScalarTypes.VALUE_POINT: self.parse_point,
+            ResultSetScalarTypes.VALUE_UNKNOWN: self.parse_unknown,
+        }
+
+    @property
+    def parse_record_types(self):
+        return {
+            ResultSetColumnTypes.COLUMN_SCALAR: self.parse_scalar,
+            ResultSetColumnTypes.COLUMN_NODE: self.parse_node,
+            ResultSetColumnTypes.COLUMN_RELATION: self.parse_edge,
+            ResultSetColumnTypes.COLUMN_UNKNOWN: self.parse_unknown,
+        }
+
+
+class AsyncQueryResult(QueryResult):
+    """
+    Async version for the QueryResult class - a class that
+    represents a result of the query operation.
+    """
+
+    def __init__(self):
+        """
+        To init the class you must call self.initialize()
+        """
+        pass
+
+    async def initialize(self, graph, response, profile=False):
+        """
+        Initializes the class.
+        Args:
+
+        graph:
+            The graph on which the query was executed.
+        response:
+            The response from the server.
+        profile:
+            A boolean indicating if the query command was "GRAPH.PROFILE"
+        """
+        self.graph = graph
+        self.header = []
+        self.result_set = []
+
+        # in case of an error an exception will be raised
+        self._check_for_errors(response)
+
+        if len(response) == 1:
+            self.parse_statistics(response[0])
+        elif profile:
+            self.parse_profile(response)
+        else:
+            # start by parsing statistics, matches the one we have
+            self.parse_statistics(response[-1])  # Last element.
+            await self.parse_results(response)
+
+        return self
+
+    async def parse_node(self, cell):
+        """
+        Parses a node from the cell.
+        """
+        # Node ID (integer),
+        # [label string offset (integer)],
+        # [[name, value type, value] X N]
+
+        labels = None
+        if len(cell[1]) > 0:
+            labels = []
+            for inner_label in cell[1]:
+                labels.append(await self.graph.get_label(inner_label))
+        properties = await self.parse_entity_properties(cell[2])
+        node_id = int(cell[0])
+        return Node(node_id=node_id, label=labels, properties=properties)
+
+    async def parse_scalar(self, cell):
+        """
+        Parses a scalar value from the server response.
+        """
+        scalar_type = int(cell[0])
+        value = cell[1]
+        try:
+            scalar = await self.parse_scalar_types[scalar_type](value)
+        except TypeError:
+            # Not all of the functions are async
+            scalar = self.parse_scalar_types[scalar_type](value)
+
+        return scalar
+
+    async def parse_records(self, raw_result_set):
+        """
+        Parses the result set and returns a list of records.
+        """
+        records = []
+        for row in raw_result_set[1]:
+            record = [
+                await self.parse_record_types[self.header[idx][0]](cell)
+                for idx, cell in enumerate(row)
+            ]
+            records.append(record)
+
+        return records
+
+    async def parse_results(self, raw_result_set):
+        """
+        Parse the query execution result returned from the server.
+        """
+        self.header = self.parse_header(raw_result_set)
+
+        # Empty header.
+        if len(self.header) == 0:
+            return
+
+        self.result_set = await self.parse_records(raw_result_set)
+
+    async def parse_entity_properties(self, props):
+        """
+        Parse node / edge properties.
+        """
+        # [[name, value type, value] X N]
+        properties = {}
+        for prop in props:
+            prop_name = await self.graph.get_property(prop[0])
+            prop_value = await self.parse_scalar(prop[1:])
+            properties[prop_name] = prop_value
+
+        return properties
+
+    async def parse_edge(self, cell):
+        """
+        Parse the cell to an edge.
+        """
+        # Edge ID (integer),
+        # reltype string offset (integer),
+        # src node ID offset (integer),
+        # dest node ID offset (integer),
+        # [[name, value, value type] X N]
+
+        edge_id = int(cell[0])
+        relation = await self.graph.get_relation(cell[1])
+        src_node_id = int(cell[2])
+        dest_node_id = int(cell[3])
+        properties = await self.parse_entity_properties(cell[4])
+        return Edge(
+            src_node_id, relation, dest_node_id, edge_id=edge_id, properties=properties
+        )
+
+    async def parse_path(self, cell):
+        """
+        Parse the cell to a path.
+        """
+        nodes = await self.parse_scalar(cell[0])
+        edges = await self.parse_scalar(cell[1])
+        return Path(nodes, edges)
+
+    async def parse_map(self, cell):
+        """
+        Parse the cell to a map.
+        """
+        m = OrderedDict()
+        n_entries = len(cell)
+
+        # A map is an array of key value pairs.
+        # 1. key (string)
+        # 2. array: (value type, value)
+        for i in range(0, n_entries, 2):
+            key = self.parse_string(cell[i])
+            m[key] = await self.parse_scalar(cell[i + 1])
+
+        return m
+
+    async def parse_array(self, value):
+        """
+        Parse array value.
+        """
+        scalar = [await self.parse_scalar(value[i]) for i in range(len(value))]
+        return scalar
diff --git a/redis/commands/helpers.py b/redis/commands/helpers.py
index 6989ab5..b65cd1a 100644
--- a/redis/commands/helpers.py
+++ b/redis/commands/helpers.py
@@ -115,6 +115,7 @@ def quote_string(v):
     if len(v) == 0:
         return '""'
 
+    v = v.replace("\\", "\\\\")
     v = v.replace('"', '\\"')
 
     return f'"{v}"'
diff --git a/redis/commands/json/commands.py b/redis/commands/json/commands.py
index 9391c2a..7fd4039 100644
--- a/redis/commands/json/commands.py
+++ b/redis/commands/json/commands.py
@@ -2,9 +2,8 @@ import os
 from json import JSONDecodeError, loads
 from typing import Dict, List, Optional, Union
 
-from deprecated import deprecated
-
 from redis.exceptions import DataError
+from redis.utils import deprecated_function
 
 from ._util import JsonType
 from .decoders import decode_dict_keys
@@ -137,7 +136,7 @@ class JSONCommands:
             "JSON.NUMINCRBY", name, str(path), self._encode(number)
         )
 
-    @deprecated(version="4.0.0", reason="deprecated since redisjson 1.0.0")
+    @deprecated_function(version="4.0.0", reason="deprecated since redisjson 1.0.0")
     def nummultby(self, name: str, path: str, number: int) -> str:
         """Multiply the numeric (integer or floating point) JSON value under
         ``path`` at key ``name`` with the provided ``number``.
@@ -368,19 +367,19 @@ class JSONCommands:
             pieces.append(str(path))
         return self.execute_command("JSON.DEBUG", *pieces)
 
-    @deprecated(
+    @deprecated_function(
         version="4.0.0", reason="redisjson-py supported this, call get directly."
     )
     def jsonget(self, *args, **kwargs):
         return self.get(*args, **kwargs)
 
-    @deprecated(
+    @deprecated_function(
         version="4.0.0", reason="redisjson-py supported this, call get directly."
     )
     def jsonmget(self, *args, **kwargs):
         return self.mget(*args, **kwargs)
 
-    @deprecated(
+    @deprecated_function(
         version="4.0.0", reason="redisjson-py supported this, call get directly."
     )
     def jsonset(self, *args, **kwargs):
diff --git a/redis/commands/redismodules.py b/redis/commands/redismodules.py
index 875f3fc..7e2045a 100644
--- a/redis/commands/redismodules.py
+++ b/redis/commands/redismodules.py
@@ -73,8 +73,8 @@ class RedisModuleCommands:
         return tdigest
 
     def graph(self, index_name="idx"):
-        """Access the timeseries namespace, providing support for
-        redis timeseries data.
+        """Access the graph namespace, providing support for
+        redis graph data.
         """
 
         from .graph import Graph
@@ -91,3 +91,13 @@ class AsyncRedisModuleCommands(RedisModuleCommands):
 
         s = AsyncSearch(client=self, index_name=index_name)
         return s
+
+    def graph(self, index_name="idx"):
+        """Access the graph namespace, providing support for
+        redis graph data.
+        """
+
+        from .graph import AsyncGraph
+
+        g = AsyncGraph(client=self, name=index_name)
+        return g
diff --git a/redis/commands/search/__init__.py b/redis/commands/search/__init__.py
index 923711b..70e9c27 100644
--- a/redis/commands/search/__init__.py
+++ b/redis/commands/search/__init__.py
@@ -167,5 +167,5 @@ class Pipeline(SearchCommands, redis.client.Pipeline):
     """Pipeline for the module."""
 
 
-class AsyncPipeline(AsyncSearchCommands, AsyncioPipeline):
+class AsyncPipeline(AsyncSearchCommands, AsyncioPipeline, Pipeline):
     """AsyncPipeline for the module."""
diff --git a/redis/commands/search/aggregation.py b/redis/commands/search/aggregation.py
index 061e69c..93a3d92 100644
--- a/redis/commands/search/aggregation.py
+++ b/redis/commands/search/aggregation.py
@@ -104,11 +104,11 @@ class AggregateRequest:
         self._aggregateplan = []
         self._loadfields = []
         self._loadall = False
-        self._limit = Limit()
         self._max = 0
         self._with_schema = False
         self._verbatim = False
         self._cursor = []
+        self._dialect = None
 
     def load(self, *fields):
         """
@@ -211,7 +211,8 @@ class AggregateRequest:
         `sort_by()` instead.
 
         """
-        self._limit = Limit(offset, num)
+        _limit = Limit(offset, num)
+        self._aggregateplan.extend(_limit.build_args())
         return self
 
     def sort_by(self, *fields, **kwargs):
@@ -321,12 +322,22 @@ class AggregateRequest:
             ret.append(str(len(self._loadfields)))
             ret.extend(self._loadfields)
 
-        ret.extend(self._aggregateplan)
+        if self._dialect:
+            ret.extend(["DIALECT", self._dialect])
 
-        ret += self._limit.build_args()
+        ret.extend(self._aggregateplan)
 
         return ret
 
+    def dialect(self, dialect):
+        """
+        Add a dialect field to the aggregate command.
+
+        - **dialect** - dialect version to execute the query under
+        """
+        self._dialect = dialect
+        return self
+
 
 class Cursor:
     def __init__(self, cid):
diff --git a/redis/commands/search/commands.py b/redis/commands/search/commands.py
index 0121436..3bd7d47 100644
--- a/redis/commands/search/commands.py
+++ b/redis/commands/search/commands.py
@@ -3,6 +3,7 @@ import time
 from typing import Dict, Optional, Union
 
 from redis.client import Pipeline
+from redis.utils import deprecated_function
 
 from ..helpers import parse_to_dict
 from ._util import to_string
@@ -20,6 +21,7 @@ SEARCH_CMD = "FT.SEARCH"
 ADD_CMD = "FT.ADD"
 ADDHASH_CMD = "FT.ADDHASH"
 DROP_CMD = "FT.DROP"
+DROPINDEX_CMD = "FT.DROPINDEX"
 EXPLAIN_CMD = "FT.EXPLAIN"
 EXPLAINCLI_CMD = "FT.EXPLAINCLI"
 DEL_CMD = "FT.DEL"
@@ -170,8 +172,8 @@ class SearchCommands:
 
         For more information see `FT.DROPINDEX <https://redis.io/commands/ft.dropindex>`_.
         """  # noqa
-        keep_str = "" if delete_documents else "KEEPDOCS"
-        return self.execute_command(DROP_CMD, self.index_name, keep_str)
+        delete_str = "DD" if delete_documents else ""
+        return self.execute_command(DROPINDEX_CMD, self.index_name, delete_str)
 
     def _add_document(
         self,
@@ -235,6 +237,9 @@ class SearchCommands:
 
         return self.execute_command(*args)
 
+    @deprecated_function(
+        version="2.0.0", reason="deprecated since redisearch 2.0, call hset instead"
+    )
     def add_document(
         self,
         doc_id,
@@ -288,6 +293,9 @@ class SearchCommands:
             **fields,
         )
 
+    @deprecated_function(
+        version="2.0.0", reason="deprecated since redisearch 2.0, call hset instead"
+    )
     def add_document_hash(self, doc_id, score=1.0, language=None, replace=False):
         """
         Add a hash document to the index.
@@ -519,7 +527,7 @@ class SearchCommands:
             cmd += query.get_args()
             cmd += self.get_params_args(query_params)
         else:
-            raise ValueError("Must provide AggregateRequest object or " "Query object.")
+            raise ValueError("Must provide AggregateRequest object or Query object.")
 
         res = self.execute_command(*cmd)
 
diff --git a/redis/commands/search/field.py b/redis/commands/search/field.py
index 89ed973..6f31ce1 100644
--- a/redis/commands/search/field.py
+++ b/redis/commands/search/field.py
@@ -64,6 +64,7 @@ class TextField(Field):
         weight: float = 1.0,
         no_stem: bool = False,
         phonetic_matcher: str = None,
+        withsuffixtrie: bool = False,
         **kwargs,
     ):
         Field.__init__(self, name, args=[Field.TEXT, Field.WEIGHT, weight], **kwargs)
@@ -78,6 +79,8 @@ class TextField(Field):
         ]:
             Field.append_arg(self, self.PHONETIC)
             Field.append_arg(self, phonetic_matcher)
+        if withsuffixtrie:
+            Field.append_arg(self, "WITHSUFFIXTRIE")
 
 
 class NumericField(Field):
@@ -108,11 +111,18 @@ class TagField(Field):
     CASESENSITIVE = "CASESENSITIVE"
 
     def __init__(
-        self, name: str, separator: str = ",", case_sensitive: bool = False, **kwargs
+        self,
+        name: str,
+        separator: str = ",",
+        case_sensitive: bool = False,
+        withsuffixtrie: bool = False,
+        **kwargs,
     ):
         args = [Field.TAG, self.SEPARATOR, separator]
         if case_sensitive:
             args.append(self.CASESENSITIVE)
+        if withsuffixtrie:
+            args.append("WITHSUFFIXTRIE")
 
         Field.__init__(self, name, args=args, **kwargs)
 
diff --git a/redis/commands/search/query.py b/redis/commands/search/query.py
index e51918f..5071cfa 100644
--- a/redis/commands/search/query.py
+++ b/redis/commands/search/query.py
@@ -28,6 +28,7 @@ class Query:
         self._filters = list()
         self._ids = None
         self._slop = -1
+        self._timeout = None
         self._in_order = False
         self._sortby = None
         self._return_fields = []
@@ -131,6 +132,11 @@ class Query:
         self._slop = slop
         return self
 
+    def timeout(self, timeout):
+        """overrides the timeout parameter of the module"""
+        self._timeout = timeout
+        return self
+
     def in_order(self):
         """
         Match only documents where the query terms appear in
@@ -188,6 +194,8 @@ class Query:
             args += self._ids
         if self._slop >= 0:
             args += ["SLOP", self._slop]
+        if self._timeout:
+            args += ["TIMEOUT", self._timeout]
         if self._in_order:
             args.append("INORDER")
         if self._return_fields:
diff --git a/redis/commands/search/querystring.py b/redis/commands/search/querystring.py
index 1da0387..3ff1320 100644
--- a/redis/commands/search/querystring.py
+++ b/redis/commands/search/querystring.py
@@ -132,6 +132,9 @@ class GeoValue(Value):
         self.radius = radius
         self.unit = unit
 
+    def to_string(self):
+        return f"[{self.lon} {self.lat} {self.radius} {self.unit}]"
+
 
 class Node:
     def __init__(self, *children, **kwparams):
diff --git a/redis/commands/search/result.py b/redis/commands/search/result.py
index 5f4aca6..451bf89 100644
--- a/redis/commands/search/result.py
+++ b/redis/commands/search/result.py
@@ -38,7 +38,7 @@ class Result:
             score = float(res[i + 1]) if with_scores else None
 
             fields = {}
-            if hascontent:
+            if hascontent and res[i + fields_offset] is not None:
                 fields = (
                     dict(
                         dict(
diff --git a/redis/commands/timeseries/commands.py b/redis/commands/timeseries/commands.py
index 3a30c24..13e3cdf 100644
--- a/redis/commands/timeseries/commands.py
+++ b/redis/commands/timeseries/commands.py
@@ -1,4 +1,7 @@
+from typing import Dict, List, Optional, Tuple, Union
+
 from redis.exceptions import DataError
+from redis.typing import KeyT, Number
 
 ADD_CMD = "TS.ADD"
 ALTER_CMD = "TS.ALTER"
@@ -22,7 +25,15 @@ REVRANGE_CMD = "TS.REVRANGE"
 class TimeSeriesCommands:
     """RedisTimeSeries Commands."""
 
-    def create(self, key, **kwargs):
+    def create(
+        self,
+        key: KeyT,
+        retention_msecs: Optional[int] = None,
+        uncompressed: Optional[bool] = False,
+        labels: Optional[Dict[str, str]] = None,
+        chunk_size: Optional[int] = None,
+        duplicate_policy: Optional[str] = None,
+    ):
         """
         Create a new time-series.
 
@@ -31,40 +42,26 @@ class TimeSeriesCommands:
         key:
             time-series key
         retention_msecs:
-            Maximum age for samples compared to last event time (in milliseconds).
+            Maximum age for samples compared to highest reported timestamp (in milliseconds).
             If None or 0 is passed then  the series is not trimmed at all.
         uncompressed:
-            Since RedisTimeSeries v1.2, both timestamps and values are
-            compressed by default.
-            Adding this flag will keep data in an uncompressed form.
-            Compression not only saves
-            memory but usually improve performance due to lower number
-            of memory accesses.
+            Changes data storage from compressed (by default) to uncompressed
         labels:
             Set of label-value pairs that represent metadata labels of the key.
         chunk_size:
-            Each time-serie uses chunks of memory of fixed size for
-            time series samples.
-            You can alter the default TSDB chunk size by passing the
-            chunk_size argument (in Bytes).
+            Memory size, in bytes, allocated for each data chunk.
+            Must be a multiple of 8 in the range [128 .. 1048576].
         duplicate_policy:
-            Since RedisTimeSeries v1.4 you can specify the duplicate sample policy
-            ( Configure what to do on duplicate sample. )
+            Policy for handling multiple samples with identical timestamps.
             Can be one of:
             - 'block': an error will occur for any out of order sample.
             - 'first': ignore the new value.
             - 'last': override with latest value.
             - 'min': only override if the value is lower than the existing value.
             - 'max': only override if the value is higher than the existing value.
-            When this is not set, the server-wide default will be used.
 
-        For more information: https://oss.redis.com/redistimeseries/commands/#tscreate
+        For more information: https://redis.io/commands/ts.create/
         """  # noqa
-        retention_msecs = kwargs.get("retention_msecs", None)
-        uncompressed = kwargs.get("uncompressed", False)
-        labels = kwargs.get("labels", {})
-        chunk_size = kwargs.get("chunk_size", None)
-        duplicate_policy = kwargs.get("duplicate_policy", None)
         params = [key]
         self._append_retention(params, retention_msecs)
         self._append_uncompressed(params, uncompressed)
@@ -74,29 +71,62 @@ class TimeSeriesCommands:
 
         return self.execute_command(CREATE_CMD, *params)
 
-    def alter(self, key, **kwargs):
+    def alter(
+        self,
+        key: KeyT,
+        retention_msecs: Optional[int] = None,
+        labels: Optional[Dict[str, str]] = None,
+        chunk_size: Optional[int] = None,
+        duplicate_policy: Optional[str] = None,
+    ):
         """
-        Update the retention, labels of an existing key.
-        For more information see
+        Update the retention, chunk size, duplicate policy, and labels of an existing
+        time series.
+
+        Args:
 
-        The parameters are the same as TS.CREATE.
+        key:
+            time-series key
+        retention_msecs:
+            Maximum retention period, compared to maximal existing timestamp (in milliseconds).
+            If None or 0 is passed then  the series is not trimmed at all.
+        labels:
+            Set of label-value pairs that represent metadata labels of the key.
+        chunk_size:
+            Memory size, in bytes, allocated for each data chunk.
+            Must be a multiple of 8 in the range [128 .. 1048576].
+        duplicate_policy:
+            Policy for handling multiple samples with identical timestamps.
+            Can be one of:
+            - 'block': an error will occur for any out of order sample.
+            - 'first': ignore the new value.
+            - 'last': override with latest value.
+            - 'min': only override if the value is lower than the existing value.
+            - 'max': only override if the value is higher than the existing value.
 
-        For more information: https://oss.redis.com/redistimeseries/commands/#tsalter
+        For more information: https://redis.io/commands/ts.alter/
         """  # noqa
-        retention_msecs = kwargs.get("retention_msecs", None)
-        labels = kwargs.get("labels", {})
-        duplicate_policy = kwargs.get("duplicate_policy", None)
         params = [key]
         self._append_retention(params, retention_msecs)
+        self._append_chunk_size(params, chunk_size)
         self._append_duplicate_policy(params, ALTER_CMD, duplicate_policy)
         self._append_labels(params, labels)
 
         return self.execute_command(ALTER_CMD, *params)
 
-    def add(self, key, timestamp, value, **kwargs):
+    def add(
+        self,
+        key: KeyT,
+        timestamp: Union[int, str],
+        value: Number,
+        retention_msecs: Optional[int] = None,
+        uncompressed: Optional[bool] = False,
+        labels: Optional[Dict[str, str]] = None,
+        chunk_size: Optional[int] = None,
+        duplicate_policy: Optional[str] = None,
+    ):
         """
-        Append (or create and append) a new sample to the series.
-        For more information see
+        Append (or create and append) a new sample to a time series.
 
         Args:
 
@@ -107,35 +137,26 @@ class TimeSeriesCommands:
         value:
             Numeric data value of the sample
         retention_msecs:
-            Maximum age for samples compared to last event time (in milliseconds).
+            Maximum retention period, compared to maximal existing timestamp (in milliseconds).
             If None or 0 is passed then  the series is not trimmed at all.
         uncompressed:
-            Since RedisTimeSeries v1.2, both timestamps and values are compressed by default.
-            Adding this flag will keep data in an uncompressed form. Compression not only saves
-            memory but usually improve performance due to lower number of memory accesses.
+            Changes data storage from compressed (by default) to uncompressed
         labels:
             Set of label-value pairs that represent metadata labels of the key.
         chunk_size:
-            Each time-serie uses chunks of memory of fixed size for time series samples.
-            You can alter the default TSDB chunk size by passing the chunk_size argument (in Bytes).
+            Memory size, in bytes, allocated for each data chunk.
+            Must be a multiple of 8 in the range [128 .. 1048576].
         duplicate_policy:
-            Since RedisTimeSeries v1.4 you can specify the duplicate sample policy
-            (Configure what to do on duplicate sample).
+            Policy for handling multiple samples with identical timestamps.
             Can be one of:
             - 'block': an error will occur for any out of order sample.
             - 'first': ignore the new value.
             - 'last': override with latest value.
             - 'min': only override if the value is lower than the existing value.
             - 'max': only override if the value is higher than the existing value.
-            When this is not set, the server-wide default will be used.
 
-        For more information: https://oss.redis.com/redistimeseries/master/commands/#tsadd
+        For more information: https://redis.io/commands/ts.add/
         """  # noqa
-        retention_msecs = kwargs.get("retention_msecs", None)
-        uncompressed = kwargs.get("uncompressed", False)
-        labels = kwargs.get("labels", {})
-        chunk_size = kwargs.get("chunk_size", None)
-        duplicate_policy = kwargs.get("duplicate_policy", None)
         params = [key, timestamp, value]
         self._append_retention(params, retention_msecs)
         self._append_uncompressed(params, uncompressed)
@@ -145,28 +166,34 @@ class TimeSeriesCommands:
 
         return self.execute_command(ADD_CMD, *params)
 
-    def madd(self, ktv_tuples):
+    def madd(self, ktv_tuples: List[Tuple[KeyT, Union[int, str], Number]]):
         """
         Append (or create and append) a new `value` to series
         `key` with `timestamp`.
         Expects a list of `tuples` as (`key`,`timestamp`, `value`).
         Return value is an array with timestamps of insertions.
 
-        For more information: https://oss.redis.com/redistimeseries/master/commands/#tsmadd
+        For more information: https://redis.io/commands/ts.madd/
         """  # noqa
         params = []
         for ktv in ktv_tuples:
-            for item in ktv:
-                params.append(item)
+            params.extend(ktv)
 
         return self.execute_command(MADD_CMD, *params)
 
-    def incrby(self, key, value, **kwargs):
+    def incrby(
+        self,
+        key: KeyT,
+        value: Number,
+        timestamp: Optional[Union[int, str]] = None,
+        retention_msecs: Optional[int] = None,
+        uncompressed: Optional[bool] = False,
+        labels: Optional[Dict[str, str]] = None,
+        chunk_size: Optional[int] = None,
+    ):
         """
-        Increment (or create an time-series and increment) the latest
-        sample's of a series.
-        This command can be used as a counter or gauge that automatically gets
-        history as a time series.
+        Increment (or create an time-series and increment) the latest sample's of a series.
+        This command can be used as a counter or gauge that automatically gets history as a time series.
 
         Args:
 
@@ -175,27 +202,19 @@ class TimeSeriesCommands:
         value:
             Numeric data value of the sample
         timestamp:
-            Timestamp of the sample. None can be used for automatic timestamp (using the system clock).
+            Timestamp of the sample. * can be used for automatic timestamp (using the system clock).
         retention_msecs:
             Maximum age for samples compared to last event time (in milliseconds).
             If None or 0 is passed then  the series is not trimmed at all.
         uncompressed:
-            Since RedisTimeSeries v1.2, both timestamps and values are compressed by default.
-            Adding this flag will keep data in an uncompressed form. Compression not only saves
-            memory but usually improve performance due to lower number of memory accesses.
+            Changes data storage from compressed (by default) to uncompressed
         labels:
             Set of label-value pairs that represent metadata labels of the key.
         chunk_size:
-            Each time-series uses chunks of memory of fixed size for time series samples.
-            You can alter the default TSDB chunk size by passing the chunk_size argument (in Bytes).
+            Memory size, in bytes, allocated for each data chunk.
 
-        For more information: https://oss.redis.com/redistimeseries/master/commands/#tsincrbytsdecrby
+        For more information: https://redis.io/commands/ts.incrby/
         """  # noqa
-        timestamp = kwargs.get("timestamp", None)
-        retention_msecs = kwargs.get("retention_msecs", None)
-        uncompressed = kwargs.get("uncompressed", False)
-        labels = kwargs.get("labels", {})
-        chunk_size = kwargs.get("chunk_size", None)
         params = [key, value]
         self._append_timestamp(params, timestamp)
         self._append_retention(params, retention_msecs)
@@ -205,12 +224,19 @@ class TimeSeriesCommands:
 
         return self.execute_command(INCRBY_CMD, *params)
 
-    def decrby(self, key, value, **kwargs):
+    def decrby(
+        self,
+        key: KeyT,
+        value: Number,
+        timestamp: Optional[Union[int, str]] = None,
+        retention_msecs: Optional[int] = None,
+        uncompressed: Optional[bool] = False,
+        labels: Optional[Dict[str, str]] = None,
+        chunk_size: Optional[int] = None,
+    ):
         """
-        Decrement (or create an time-series and decrement) the
-        latest sample's of a series.
-        This command can be used as a counter or gauge that
-        automatically gets history as a time series.
+        Decrement (or create an time-series and decrement) the latest sample's of a series.
+        This command can be used as a counter or gauge that automatically gets history as a time series.
 
         Args:
 
@@ -219,31 +245,19 @@ class TimeSeriesCommands:
         value:
             Numeric data value of the sample
         timestamp:
-            Timestamp of the sample. None can be used for automatic
-            timestamp (using the system clock).
+            Timestamp of the sample. * can be used for automatic timestamp (using the system clock).
         retention_msecs:
             Maximum age for samples compared to last event time (in milliseconds).
             If None or 0 is passed then  the series is not trimmed at all.
         uncompressed:
-            Since RedisTimeSeries v1.2, both timestamps and values are
-            compressed by default.
-            Adding this flag will keep data in an uncompressed form.
-            Compression not only saves
-            memory but usually improve performance due to lower number
-            of memory accesses.
+            Changes data storage from compressed (by default) to uncompressed
         labels:
             Set of label-value pairs that represent metadata labels of the key.
         chunk_size:
-            Each time-series uses chunks of memory of fixed size for time series samples.
-            You can alter the default TSDB chunk size by passing the chunk_size argument (in Bytes).
+            Memory size, in bytes, allocated for each data chunk.
 
-        For more information: https://oss.redis.com/redistimeseries/master/commands/#tsincrbytsdecrby
+        For more information: https://redis.io/commands/ts.decrby/
         """  # noqa
-        timestamp = kwargs.get("timestamp", None)
-        retention_msecs = kwargs.get("retention_msecs", None)
-        uncompressed = kwargs.get("uncompressed", False)
-        labels = kwargs.get("labels", {})
-        chunk_size = kwargs.get("chunk_size", None)
         params = [key, value]
         self._append_timestamp(params, timestamp)
         self._append_retention(params, retention_msecs)
@@ -253,14 +267,9 @@ class TimeSeriesCommands:
 
         return self.execute_command(DECRBY_CMD, *params)
 
-    def delete(self, key, from_time, to_time):
+    def delete(self, key: KeyT, from_time: int, to_time: int):
         """
-        Delete data points for a given timeseries and interval range
-        in the form of start and end delete timestamps.
-        The given timestamp interval is closed (inclusive), meaning start
-        and end data points will also be deleted.
-        Return the count for deleted items.
-        For more information see
+        Delete all samples between two timestamps for a given time series.
 
         Args:
 
@@ -271,68 +280,98 @@ class TimeSeriesCommands:
         to_time:
             End timestamp for the range deletion.
 
-        For more information: https://oss.redis.com/redistimeseries/master/commands/#tsdel
+        For more information: https://redis.io/commands/ts.del/
         """  # noqa
         return self.execute_command(DEL_CMD, key, from_time, to_time)
 
-    def createrule(self, source_key, dest_key, aggregation_type, bucket_size_msec):
+    def createrule(
+        self,
+        source_key: KeyT,
+        dest_key: KeyT,
+        aggregation_type: str,
+        bucket_size_msec: int,
+        align_timestamp: Optional[int] = None,
+    ):
         """
         Create a compaction rule from values added to `source_key` into `dest_key`.
-        Aggregating for `bucket_size_msec` where an `aggregation_type` can be
-        [`avg`, `sum`, `min`, `max`, `range`, `count`, `first`, `last`,
-        `std.p`, `std.s`, `var.p`, `var.s`]
 
-        For more information: https://oss.redis.com/redistimeseries/master/commands/#tscreaterule
+        Args:
+
+        source_key:
+            Key name for source time series
+        dest_key:
+            Key name for destination (compacted) time series
+        aggregation_type:
+            Aggregation type: One of the following:
+            [`avg`, `sum`, `min`, `max`, `range`, `count`, `first`, `last`, `std.p`,
+            `std.s`, `var.p`, `var.s`, `twa`]
+        bucket_size_msec:
+            Duration of each bucket, in milliseconds
+        align_timestamp:
+            Assure that there is a bucket that starts at exactly align_timestamp and
+            align all other buckets accordingly.
+
+        For more information: https://redis.io/commands/ts.createrule/
         """  # noqa
         params = [source_key, dest_key]
         self._append_aggregation(params, aggregation_type, bucket_size_msec)
+        if align_timestamp is not None:
+            params.append(align_timestamp)
 
         return self.execute_command(CREATERULE_CMD, *params)
 
-    def deleterule(self, source_key, dest_key):
+    def deleterule(self, source_key: KeyT, dest_key: KeyT):
         """
-        Delete a compaction rule.
-        For more information see
+        Delete a compaction rule from `source_key` to `dest_key`..
 
-        For more information: https://oss.redis.com/redistimeseries/master/commands/#tsdeleterule
+        For more information: https://redis.io/commands/ts.deleterule/
         """  # noqa
         return self.execute_command(DELETERULE_CMD, source_key, dest_key)
 
     def __range_params(
         self,
-        key,
-        from_time,
-        to_time,
-        count,
-        aggregation_type,
-        bucket_size_msec,
-        filter_by_ts,
-        filter_by_min_value,
-        filter_by_max_value,
-        align,
+        key: KeyT,
+        from_time: Union[int, str],
+        to_time: Union[int, str],
+        count: Optional[int],
+        aggregation_type: Optional[str],
+        bucket_size_msec: Optional[int],
+        filter_by_ts: Optional[List[int]],
+        filter_by_min_value: Optional[int],
+        filter_by_max_value: Optional[int],
+        align: Optional[Union[int, str]],
+        latest: Optional[bool],
+        bucket_timestamp: Optional[str],
+        empty: Optional[bool],
     ):
         """Create TS.RANGE and TS.REVRANGE arguments."""
         params = [key, from_time, to_time]
+        self._append_latest(params, latest)
         self._append_filer_by_ts(params, filter_by_ts)
         self._append_filer_by_value(params, filter_by_min_value, filter_by_max_value)
         self._append_count(params, count)
         self._append_align(params, align)
         self._append_aggregation(params, aggregation_type, bucket_size_msec)
+        self._append_bucket_timestamp(params, bucket_timestamp)
+        self._append_empty(params, empty)
 
         return params
 
     def range(
         self,
-        key,
-        from_time,
-        to_time,
-        count=None,
-        aggregation_type=None,
-        bucket_size_msec=0,
-        filter_by_ts=None,
-        filter_by_min_value=None,
-        filter_by_max_value=None,
-        align=None,
+        key: KeyT,
+        from_time: Union[int, str],
+        to_time: Union[int, str],
+        count: Optional[int] = None,
+        aggregation_type: Optional[str] = None,
+        bucket_size_msec: Optional[int] = 0,
+        filter_by_ts: Optional[List[int]] = None,
+        filter_by_min_value: Optional[int] = None,
+        filter_by_max_value: Optional[int] = None,
+        align: Optional[Union[int, str]] = None,
+        latest: Optional[bool] = False,
+        bucket_timestamp: Optional[str] = None,
+        empty: Optional[bool] = False,
     ):
         """
         Query a range in forward direction for a specific time-serie.
@@ -342,31 +381,34 @@ class TimeSeriesCommands:
         key:
             Key name for timeseries.
         from_time:
-            Start timestamp for the range query. - can be used to express
-            the minimum possible timestamp (0).
+            Start timestamp for the range query. - can be used to express the minimum possible timestamp (0).
         to_time:
-            End timestamp for range query, + can be used to express the
-            maximum possible timestamp.
+            End timestamp for range query, + can be used to express the maximum possible timestamp.
         count:
-            Optional maximum number of returned results.
+            Limits the number of returned samples.
         aggregation_type:
-            Optional aggregation type. Can be one of
-            [`avg`, `sum`, `min`, `max`, `range`, `count`,
-            `first`, `last`, `std.p`, `std.s`, `var.p`, `var.s`]
+            Optional aggregation type. Can be one of [`avg`, `sum`, `min`, `max`,
+            `range`, `count`, `first`, `last`, `std.p`, `std.s`, `var.p`, `var.s`, `twa`]
         bucket_size_msec:
             Time bucket for aggregation in milliseconds.
         filter_by_ts:
             List of timestamps to filter the result by specific timestamps.
         filter_by_min_value:
-            Filter result by minimum value (must mention also filter
-            by_max_value).
+            Filter result by minimum value (must mention also filter by_max_value).
         filter_by_max_value:
-            Filter result by maximum value (must mention also filter
-            by_min_value).
+            Filter result by maximum value (must mention also filter by_min_value).
         align:
             Timestamp for alignment control for aggregation.
-
-        For more information: https://oss.redis.com/redistimeseries/master/commands/#tsrangetsrevrange
+        latest:
+            Used when a time series is a compaction, reports the compacted value of the
+            latest possibly partial bucket
+        bucket_timestamp:
+            Controls how bucket timestamps are reported. Can be one of [`-`, `low`, `+`,
+            `high`, `~`, `mid`].
+        empty:
+            Reports aggregations for empty buckets.
+
+        For more information: https://redis.io/commands/ts.range/
         """  # noqa
         params = self.__range_params(
             key,
@@ -379,21 +421,27 @@ class TimeSeriesCommands:
             filter_by_min_value,
             filter_by_max_value,
             align,
+            latest,
+            bucket_timestamp,
+            empty,
         )
         return self.execute_command(RANGE_CMD, *params)
 
     def revrange(
         self,
-        key,
-        from_time,
-        to_time,
-        count=None,
-        aggregation_type=None,
-        bucket_size_msec=0,
-        filter_by_ts=None,
-        filter_by_min_value=None,
-        filter_by_max_value=None,
-        align=None,
+        key: KeyT,
+        from_time: Union[int, str],
+        to_time: Union[int, str],
+        count: Optional[int] = None,
+        aggregation_type: Optional[str] = None,
+        bucket_size_msec: Optional[int] = 0,
+        filter_by_ts: Optional[List[int]] = None,
+        filter_by_min_value: Optional[int] = None,
+        filter_by_max_value: Optional[int] = None,
+        align: Optional[Union[int, str]] = None,
+        latest: Optional[bool] = False,
+        bucket_timestamp: Optional[str] = None,
+        empty: Optional[bool] = False,
     ):
         """
         Query a range in reverse direction for a specific time-series.
@@ -409,10 +457,10 @@ class TimeSeriesCommands:
         to_time:
             End timestamp for range query, + can be used to express the maximum possible timestamp.
         count:
-            Optional maximum number of returned results.
+            Limits the number of returned samples.
         aggregation_type:
-            Optional aggregation type. Can be one of [`avg`, `sum`, `min`, `max`, `range`, `count`,
-            `first`, `last`, `std.p`, `std.s`, `var.p`, `var.s`]
+            Optional aggregation type. Can be one of [`avg`, `sum`, `min`, `max`,
+            `range`, `count`, `first`, `last`, `std.p`, `std.s`, `var.p`, `var.s`, `twa`]
         bucket_size_msec:
             Time bucket for aggregation in milliseconds.
         filter_by_ts:
@@ -423,8 +471,16 @@ class TimeSeriesCommands:
             Filter result by maximum value (must mention also filter_by_min_value).
         align:
             Timestamp for alignment control for aggregation.
-
-        For more information: https://oss.redis.com/redistimeseries/master/commands/#tsrangetsrevrange
+        latest:
+            Used when a time series is a compaction, reports the compacted value of the
+            latest possibly partial bucket
+        bucket_timestamp:
+            Controls how bucket timestamps are reported. Can be one of [`-`, `low`, `+`,
+            `high`, `~`, `mid`].
+        empty:
+            Reports aggregations for empty buckets.
+
+        For more information: https://redis.io/commands/ts.revrange/
         """  # noqa
         params = self.__range_params(
             key,
@@ -437,34 +493,43 @@ class TimeSeriesCommands:
             filter_by_min_value,
             filter_by_max_value,
             align,
+            latest,
+            bucket_timestamp,
+            empty,
         )
         return self.execute_command(REVRANGE_CMD, *params)
 
     def __mrange_params(
         self,
-        aggregation_type,
-        bucket_size_msec,
-        count,
-        filters,
-        from_time,
-        to_time,
-        with_labels,
-        filter_by_ts,
-        filter_by_min_value,
-        filter_by_max_value,
-        groupby,
-        reduce,
-        select_labels,
-        align,
+        aggregation_type: Optional[str],
+        bucket_size_msec: Optional[int],
+        count: Optional[int],
+        filters: List[str],
+        from_time: Union[int, str],
+        to_time: Union[int, str],
+        with_labels: Optional[bool],
+        filter_by_ts: Optional[List[int]],
+        filter_by_min_value: Optional[int],
+        filter_by_max_value: Optional[int],
+        groupby: Optional[str],
+        reduce: Optional[str],
+        select_labels: Optional[List[str]],
+        align: Optional[Union[int, str]],
+        latest: Optional[bool],
+        bucket_timestamp: Optional[str],
+        empty: Optional[bool],
     ):
         """Create TS.MRANGE and TS.MREVRANGE arguments."""
         params = [from_time, to_time]
+        self._append_latest(params, latest)
         self._append_filer_by_ts(params, filter_by_ts)
         self._append_filer_by_value(params, filter_by_min_value, filter_by_max_value)
+        self._append_with_labels(params, with_labels, select_labels)
         self._append_count(params, count)
         self._append_align(params, align)
         self._append_aggregation(params, aggregation_type, bucket_size_msec)
-        self._append_with_labels(params, with_labels, select_labels)
+        self._append_bucket_timestamp(params, bucket_timestamp)
+        self._append_empty(params, empty)
         params.extend(["FILTER"])
         params += filters
         self._append_groupby_reduce(params, groupby, reduce)
@@ -472,20 +537,23 @@ class TimeSeriesCommands:
 
     def mrange(
         self,
-        from_time,
-        to_time,
-        filters,
-        count=None,
-        aggregation_type=None,
-        bucket_size_msec=0,
-        with_labels=False,
-        filter_by_ts=None,
-        filter_by_min_value=None,
-        filter_by_max_value=None,
-        groupby=None,
-        reduce=None,
-        select_labels=None,
-        align=None,
+        from_time: Union[int, str],
+        to_time: Union[int, str],
+        filters: List[str],
+        count: Optional[int] = None,
+        aggregation_type: Optional[str] = None,
+        bucket_size_msec: Optional[int] = 0,
+        with_labels: Optional[bool] = False,
+        filter_by_ts: Optional[List[int]] = None,
+        filter_by_min_value: Optional[int] = None,
+        filter_by_max_value: Optional[int] = None,
+        groupby: Optional[str] = None,
+        reduce: Optional[str] = None,
+        select_labels: Optional[List[str]] = None,
+        align: Optional[Union[int, str]] = None,
+        latest: Optional[bool] = False,
+        bucket_timestamp: Optional[str] = None,
+        empty: Optional[bool] = False,
     ):
         """
         Query a range across multiple time-series by filters in forward direction.
@@ -493,46 +561,45 @@ class TimeSeriesCommands:
         Args:
 
         from_time:
-            Start timestamp for the range query. `-` can be used to
-            express the minimum possible timestamp (0).
+            Start timestamp for the range query. `-` can be used to express the minimum possible timestamp (0).
         to_time:
-            End timestamp for range query, `+` can be used to express
-            the maximum possible timestamp.
+            End timestamp for range query, `+` can be used to express the maximum possible timestamp.
         filters:
             filter to match the time-series labels.
         count:
-            Optional maximum number of returned results.
+            Limits the number of returned samples.
         aggregation_type:
-            Optional aggregation type. Can be one of
-            [`avg`, `sum`, `min`, `max`, `range`, `count`,
-            `first`, `last`, `std.p`, `std.s`, `var.p`, `var.s`]
+            Optional aggregation type. Can be one of [`avg`, `sum`, `min`, `max`,
+            `range`, `count`, `first`, `last`, `std.p`, `std.s`, `var.p`, `var.s`, `twa`]
         bucket_size_msec:
             Time bucket for aggregation in milliseconds.
         with_labels:
-            Include in the reply the label-value pairs that represent metadata
-            labels of the time-series.
-            If this argument is not set, by default, an empty Array will be
-            replied on the labels array position.
+            Include in the reply all label-value pairs representing metadata labels of the time series.
         filter_by_ts:
             List of timestamps to filter the result by specific timestamps.
         filter_by_min_value:
-            Filter result by minimum value (must mention also
-            filter_by_max_value).
+            Filter result by minimum value (must mention also filter_by_max_value).
         filter_by_max_value:
-            Filter result by maximum value (must mention also
-            filter_by_min_value).
+            Filter result by maximum value (must mention also filter_by_min_value).
         groupby:
             Grouping by fields the results (must mention also reduce).
         reduce:
-            Applying reducer functions on each group. Can be one
-            of [`sum`, `min`, `max`].
+            Applying reducer functions on each group. Can be one of [`avg` `sum`, `min`,
+            `max`, `range`, `count`, `std.p`, `std.s`, `var.p`, `var.s`].
         select_labels:
-            Include in the reply only a subset of the key-value
-            pair labels of a series.
+            Include in the reply only a subset of the key-value pair labels of a series.
         align:
             Timestamp for alignment control for aggregation.
-
-        For more information: https://oss.redis.com/redistimeseries/master/commands/#tsmrangetsmrevrange
+        latest:
+            Used when a time series is a compaction, reports the compacted
+            value of the latest possibly partial bucket
+        bucket_timestamp:
+            Controls how bucket timestamps are reported. Can be one of [`-`, `low`, `+`,
+            `high`, `~`, `mid`].
+        empty:
+            Reports aggregations for empty buckets.
+
+        For more information: https://redis.io/commands/ts.mrange/
         """  # noqa
         params = self.__mrange_params(
             aggregation_type,
@@ -549,26 +616,32 @@ class TimeSeriesCommands:
             reduce,
             select_labels,
             align,
+            latest,
+            bucket_timestamp,
+            empty,
         )
 
         return self.execute_command(MRANGE_CMD, *params)
 
     def mrevrange(
         self,
-        from_time,
-        to_time,
-        filters,
-        count=None,
-        aggregation_type=None,
-        bucket_size_msec=0,
-        with_labels=False,
-        filter_by_ts=None,
-        filter_by_min_value=None,
-        filter_by_max_value=None,
-        groupby=None,
-        reduce=None,
-        select_labels=None,
-        align=None,
+        from_time: Union[int, str],
+        to_time: Union[int, str],
+        filters: List[str],
+        count: Optional[int] = None,
+        aggregation_type: Optional[str] = None,
+        bucket_size_msec: Optional[int] = 0,
+        with_labels: Optional[bool] = False,
+        filter_by_ts: Optional[List[int]] = None,
+        filter_by_min_value: Optional[int] = None,
+        filter_by_max_value: Optional[int] = None,
+        groupby: Optional[str] = None,
+        reduce: Optional[str] = None,
+        select_labels: Optional[List[str]] = None,
+        align: Optional[Union[int, str]] = None,
+        latest: Optional[bool] = False,
+        bucket_timestamp: Optional[str] = None,
+        empty: Optional[bool] = False,
     ):
         """
         Query a range across multiple time-series by filters in reverse direction.
@@ -576,48 +649,45 @@ class TimeSeriesCommands:
         Args:
 
         from_time:
-            Start timestamp for the range query. - can be used to express
-            the minimum possible timestamp (0).
+            Start timestamp for the range query. - can be used to express the minimum possible timestamp (0).
         to_time:
-            End timestamp for range query, + can be used to express
-            the maximum possible timestamp.
+            End timestamp for range query, + can be used to express the maximum possible timestamp.
         filters:
             Filter to match the time-series labels.
         count:
-            Optional maximum number of returned results.
+            Limits the number of returned samples.
         aggregation_type:
-            Optional aggregation type. Can be one of
-            [`avg`, `sum`, `min`, `max`, `range`, `count`,
-            `first`, `last`, `std.p`, `std.s`, `var.p`, `var.s`]
+            Optional aggregation type. Can be one of [`avg`, `sum`, `min`, `max`,
+            `range`, `count`, `first`, `last`, `std.p`, `std.s`, `var.p`, `var.s`, `twa`]
         bucket_size_msec:
             Time bucket for aggregation in milliseconds.
         with_labels:
-            Include in the reply the label-value pairs that represent
-            metadata labels
-            of the time-series.
-            If this argument is not set, by default, an empty Array
-            will be replied
-            on the labels array position.
+            Include in the reply all label-value pairs representing metadata labels of the time series.
         filter_by_ts:
             List of timestamps to filter the result by specific timestamps.
         filter_by_min_value:
-            Filter result by minimum value (must mention also filter
-            by_max_value).
+            Filter result by minimum value (must mention also filter_by_max_value).
         filter_by_max_value:
-            Filter result by maximum value (must mention also filter
-            by_min_value).
+            Filter result by maximum value (must mention also filter_by_min_value).
         groupby:
             Grouping by fields the results (must mention also reduce).
         reduce:
-            Applying reducer functions on each group. Can be one
-            of [`sum`, `min`, `max`].
+            Applying reducer functions on each group. Can be one of [`avg` `sum`, `min`,
+            `max`, `range`, `count`, `std.p`, `std.s`, `var.p`, `var.s`].
         select_labels:
-            Include in the reply only a subset of the key-value pair
-            labels of a series.
+            Include in the reply only a subset of the key-value pair labels of a series.
         align:
             Timestamp for alignment control for aggregation.
-
-        For more information: https://oss.redis.com/redistimeseries/master/commands/#tsmrangetsmrevrange
+        latest:
+            Used when a time series is a compaction, reports the compacted
+            value of the latest possibly partial bucket
+        bucket_timestamp:
+            Controls how bucket timestamps are reported. Can be one of [`-`, `low`, `+`,
+            `high`, `~`, `mid`].
+        empty:
+            Reports aggregations for empty buckets.
+
+        For more information: https://redis.io/commands/ts.mrevrange/
         """  # noqa
         params = self.__mrange_params(
             aggregation_type,
@@ -634,54 +704,85 @@ class TimeSeriesCommands:
             reduce,
             select_labels,
             align,
+            latest,
+            bucket_timestamp,
+            empty,
         )
 
         return self.execute_command(MREVRANGE_CMD, *params)
 
-    def get(self, key):
+    def get(self, key: KeyT, latest: Optional[bool] = False):
         """# noqa
         Get the last sample of `key`.
+        `latest` used when a time series is a compaction, reports the compacted
+        value of the latest (possibly partial) bucket
 
-        For more information: https://oss.redis.com/redistimeseries/master/commands/#tsget
+        For more information: https://redis.io/commands/ts.get/
         """  # noqa
-        return self.execute_command(GET_CMD, key)
+        params = [key]
+        self._append_latest(params, latest)
+        return self.execute_command(GET_CMD, *params)
 
-    def mget(self, filters, with_labels=False):
+    def mget(
+        self,
+        filters: List[str],
+        with_labels: Optional[bool] = False,
+        select_labels: Optional[List[str]] = None,
+        latest: Optional[bool] = False,
+    ):
         """# noqa
         Get the last samples matching the specific `filter`.
 
-        For more information: https://oss.redis.com/redistimeseries/master/commands/#tsmget
+        Args:
+
+        filters:
+            Filter to match the time-series labels.
+        with_labels:
+            Include in the reply all label-value pairs representing metadata
+            labels of the time series.
+        select_labels:
+            Include in the reply only a subset of the key-value pair labels of a series.
+        latest:
+            Used when a time series is a compaction, reports the compacted
+            value of the latest possibly partial bucket
+
+        For more information: https://redis.io/commands/ts.mget/
         """  # noqa
         params = []
-        self._append_with_labels(params, with_labels)
+        self._append_latest(params, latest)
+        self._append_with_labels(params, with_labels, select_labels)
         params.extend(["FILTER"])
         params += filters
         return self.execute_command(MGET_CMD, *params)
 
-    def info(self, key):
+    def info(self, key: KeyT):
         """# noqa
         Get information of `key`.
 
-        For more information: https://oss.redis.com/redistimeseries/master/commands/#tsinfo
+        For more information: https://redis.io/commands/ts.info/
         """  # noqa
         return self.execute_command(INFO_CMD, key)
 
-    def queryindex(self, filters):
+    def queryindex(self, filters: List[str]):
         """# noqa
-        Get all the keys matching the `filter` list.
+        Get all time series keys matching the `filter` list.
 
-        For more information: https://oss.redis.com/redistimeseries/master/commands/#tsqueryindex
+        For more information: https://redis.io/commands/ts.queryindex/
         """  # noq
         return self.execute_command(QUERYINDEX_CMD, *filters)
 
     @staticmethod
-    def _append_uncompressed(params, uncompressed):
+    def _append_uncompressed(params: List[str], uncompressed: Optional[bool]):
         """Append UNCOMPRESSED tag to params."""
         if uncompressed:
             params.extend(["UNCOMPRESSED"])
 
     @staticmethod
-    def _append_with_labels(params, with_labels, select_labels=None):
+    def _append_with_labels(
+        params: List[str],
+        with_labels: Optional[bool],
+        select_labels: Optional[List[str]],
+    ):
         """Append labels behavior to params."""
         if with_labels and select_labels:
             raise DataError(
@@ -694,19 +795,21 @@ class TimeSeriesCommands:
             params.extend(["SELECTED_LABELS", *select_labels])
 
     @staticmethod
-    def _append_groupby_reduce(params, groupby, reduce):
+    def _append_groupby_reduce(
+        params: List[str], groupby: Optional[str], reduce: Optional[str]
+    ):
         """Append GROUPBY REDUCE property to params."""
         if groupby is not None and reduce is not None:
             params.extend(["GROUPBY", groupby, "REDUCE", reduce.upper()])
 
     @staticmethod
-    def _append_retention(params, retention):
+    def _append_retention(params: List[str], retention: Optional[int]):
         """Append RETENTION property to params."""
         if retention is not None:
             params.extend(["RETENTION", retention])
 
     @staticmethod
-    def _append_labels(params, labels):
+    def _append_labels(params: List[str], labels: Optional[List[str]]):
         """Append LABELS property to params."""
         if labels:
             params.append("LABELS")
@@ -714,38 +817,43 @@ class TimeSeriesCommands:
                 params.extend([k, v])
 
     @staticmethod
-    def _append_count(params, count):
+    def _append_count(params: List[str], count: Optional[int]):
         """Append COUNT property to params."""
         if count is not None:
             params.extend(["COUNT", count])
 
     @staticmethod
-    def _append_timestamp(params, timestamp):
+    def _append_timestamp(params: List[str], timestamp: Optional[int]):
         """Append TIMESTAMP property to params."""
         if timestamp is not None:
             params.extend(["TIMESTAMP", timestamp])
 
     @staticmethod
-    def _append_align(params, align):
+    def _append_align(params: List[str], align: Optional[Union[int, str]]):
         """Append ALIGN property to params."""
         if align is not None:
             params.extend(["ALIGN", align])
 
     @staticmethod
-    def _append_aggregation(params, aggregation_type, bucket_size_msec):
+    def _append_aggregation(
+        params: List[str],
+        aggregation_type: Optional[str],
+        bucket_size_msec: Optional[int],
+    ):
         """Append AGGREGATION property to params."""
         if aggregation_type is not None:
-            params.append("AGGREGATION")
-            params.extend([aggregation_type, bucket_size_msec])
+            params.extend(["AGGREGATION", aggregation_type, bucket_size_msec])
 
     @staticmethod
-    def _append_chunk_size(params, chunk_size):
+    def _append_chunk_size(params: List[str], chunk_size: Optional[int]):
         """Append CHUNK_SIZE property to params."""
         if chunk_size is not None:
             params.extend(["CHUNK_SIZE", chunk_size])
 
     @staticmethod
-    def _append_duplicate_policy(params, command, duplicate_policy):
+    def _append_duplicate_policy(
+        params: List[str], command: Optional[str], duplicate_policy: Optional[str]
+    ):
         """Append DUPLICATE_POLICY property to params on CREATE
         and ON_DUPLICATE on ADD.
         """
@@ -756,13 +864,33 @@ class TimeSeriesCommands:
                 params.extend(["DUPLICATE_POLICY", duplicate_policy])
 
     @staticmethod
-    def _append_filer_by_ts(params, ts_list):
+    def _append_filer_by_ts(params: List[str], ts_list: Optional[List[int]]):
         """Append FILTER_BY_TS property to params."""
         if ts_list is not None:
             params.extend(["FILTER_BY_TS", *ts_list])
 
     @staticmethod
-    def _append_filer_by_value(params, min_value, max_value):
+    def _append_filer_by_value(
+        params: List[str], min_value: Optional[int], max_value: Optional[int]
+    ):
         """Append FILTER_BY_VALUE property to params."""
         if min_value is not None and max_value is not None:
             params.extend(["FILTER_BY_VALUE", min_value, max_value])
+
+    @staticmethod
+    def _append_latest(params: List[str], latest: Optional[bool]):
+        """Append LATEST property to params."""
+        if latest:
+            params.append("LATEST")
+
+    @staticmethod
+    def _append_bucket_timestamp(params: List[str], bucket_timestamp: Optional[str]):
+        """Append BUCKET_TIMESTAMP property to params."""
+        if bucket_timestamp is not None:
+            params.extend(["BUCKETTIMESTAMP", bucket_timestamp])
+
+    @staticmethod
+    def _append_empty(params: List[str], empty: Optional[bool]):
+        """Append EMPTY property to params."""
+        if empty:
+            params.append("EMPTY")
diff --git a/redis/commands/timeseries/info.py b/redis/commands/timeseries/info.py
index fba7f09..65f3baa 100644
--- a/redis/commands/timeseries/info.py
+++ b/redis/commands/timeseries/info.py
@@ -60,15 +60,15 @@ class TSInfo:
         https://oss.redis.com/redistimeseries/configuration/#duplicate_policy
         """
         response = dict(zip(map(nativestr, args[::2]), args[1::2]))
-        self.rules = response["rules"]
-        self.source_key = response["sourceKey"]
-        self.chunk_count = response["chunkCount"]
-        self.memory_usage = response["memoryUsage"]
-        self.total_samples = response["totalSamples"]
-        self.labels = list_to_dict(response["labels"])
-        self.retention_msecs = response["retentionTime"]
-        self.lastTimeStamp = response["lastTimestamp"]
-        self.first_time_stamp = response["firstTimestamp"]
+        self.rules = response.get("rules")
+        self.source_key = response.get("sourceKey")
+        self.chunk_count = response.get("chunkCount")
+        self.memory_usage = response.get("memoryUsage")
+        self.total_samples = response.get("totalSamples")
+        self.labels = list_to_dict(response.get("labels"))
+        self.retention_msecs = response.get("retentionTime")
+        self.last_timestamp = response.get("lastTimestamp")
+        self.first_timestamp = response.get("firstTimestamp")
         if "maxSamplesPerChunk" in response:
             self.max_samples_per_chunk = response["maxSamplesPerChunk"]
             self.chunk_size = (
diff --git a/redis/connection.py b/redis/connection.py
old mode 100755
new mode 100644
index 3438baf..d35980c
--- a/redis/connection.py
+++ b/redis/connection.py
@@ -3,16 +3,18 @@ import errno
 import io
 import os
 import socket
+import sys
 import threading
 import weakref
+from io import SEEK_END
 from itertools import chain
 from queue import Empty, Full, LifoQueue
 from time import time
+from typing import Optional, Union
 from urllib.parse import parse_qs, unquote, urlparse
 
-from packaging.version import Version
-
 from redis.backoff import NoBackoff
+from redis.credentials import CredentialProvider, UsernamePasswordCredentialProvider
 from redis.exceptions import (
     AuthenticationError,
     AuthenticationWrongNumberOfArgsError,
@@ -31,7 +33,12 @@ from redis.exceptions import (
     TimeoutError,
 )
 from redis.retry import Retry
-from redis.utils import CRYPTOGRAPHY_AVAILABLE, HIREDIS_AVAILABLE, str_if_bytes
+from redis.utils import (
+    CRYPTOGRAPHY_AVAILABLE,
+    HIREDIS_AVAILABLE,
+    HIREDIS_PACK_AVAILABLE,
+    str_if_bytes,
+)
 
 try:
     import ssl
@@ -54,16 +61,6 @@ NONBLOCKING_EXCEPTIONS = tuple(NONBLOCKING_EXCEPTION_ERROR_NUMBERS.keys())
 if HIREDIS_AVAILABLE:
     import hiredis
 
-    hiredis_version = Version(hiredis.__version__)
-    HIREDIS_SUPPORTS_CALLABLE_ERRORS = hiredis_version >= Version("0.1.3")
-    HIREDIS_SUPPORTS_BYTE_BUFFER = hiredis_version >= Version("0.1.4")
-    HIREDIS_SUPPORTS_ENCODING_ERRORS = hiredis_version >= Version("1.0.0")
-
-    HIREDIS_USE_BYTE_BUFFER = True
-    # only use byte buffer if hiredis supports it
-    if not HIREDIS_SUPPORTS_BYTE_BUFFER:
-        HIREDIS_USE_BYTE_BUFFER = False
-
 SYM_STAR = b"*"
 SYM_DOLLAR = b"$"
 SYM_CRLF = b"\r\n"
@@ -72,14 +69,23 @@ SYM_EMPTY = b""
 SERVER_CLOSED_CONNECTION_ERROR = "Connection closed by server."
 
 SENTINEL = object()
-MODULE_LOAD_ERROR = "Error loading the extension. " "Please check the server logs."
+MODULE_LOAD_ERROR = "Error loading the extension. Please check the server logs."
 NO_SUCH_MODULE_ERROR = "Error unloading module: no such module with that name"
-MODULE_UNLOAD_NOT_POSSIBLE_ERROR = "Error unloading module: operation not " "possible."
+MODULE_UNLOAD_NOT_POSSIBLE_ERROR = "Error unloading module: operation not possible."
 MODULE_EXPORTS_DATA_TYPES_ERROR = (
     "Error unloading module: the module "
     "exports one or more module-side data "
     "types, can't unload"
 )
+# user send an AUTH cmd to a server without authorization configured
+NO_AUTH_SET_ERROR = {
+    # Redis >= 6.0
+    "AUTH <password> called without any password "
+    "configured for the default user. Are you sure "
+    "your configuration is correct?": AuthenticationError,
+    # Redis < 6.0
+    "Client sent AUTH, but no password is set": AuthenticationError,
+}
 
 
 class Encoder:
@@ -127,7 +133,6 @@ class BaseParser:
     EXCEPTION_CLASSES = {
         "ERR": {
             "max number of clients reached": ConnectionError,
-            "Client sent AUTH, but no password is set": AuthenticationError,
             "invalid password": AuthenticationError,
             # some Redis server versions report invalid command syntax
             # in lowercase
@@ -141,7 +146,9 @@ class BaseParser:
             MODULE_EXPORTS_DATA_TYPES_ERROR: ModuleError,
             NO_SUCH_MODULE_ERROR: ModuleError,
             MODULE_UNLOAD_NOT_POSSIBLE_ERROR: ModuleError,
+            **NO_AUTH_SET_ERROR,
         },
+        "WRONGPASS": AuthenticationError,
         "EXECABORT": ExecAbortError,
         "LOADING": BusyLoadingError,
         "NOSCRIPT": NoScriptError,
@@ -163,31 +170,40 @@ class BaseParser:
 
 
 class SocketBuffer:
-    def __init__(self, socket, socket_read_size, socket_timeout):
+    def __init__(
+        self, socket: socket.socket, socket_read_size: int, socket_timeout: float
+    ):
         self._sock = socket
         self.socket_read_size = socket_read_size
         self.socket_timeout = socket_timeout
         self._buffer = io.BytesIO()
-        # number of bytes written to the buffer from the socket
-        self.bytes_written = 0
-        # number of bytes read from the buffer
-        self.bytes_read = 0
 
-    @property
-    def length(self):
-        return self.bytes_written - self.bytes_read
+    def unread_bytes(self) -> int:
+        """
+        Remaining unread length of buffer
+        """
+        pos = self._buffer.tell()
+        end = self._buffer.seek(0, SEEK_END)
+        self._buffer.seek(pos)
+        return end - pos
 
-    def _read_from_socket(self, length=None, timeout=SENTINEL, raise_on_timeout=True):
+    def _read_from_socket(
+        self,
+        length: Optional[int] = None,
+        timeout: Union[float, object] = SENTINEL,
+        raise_on_timeout: Optional[bool] = True,
+    ) -> bool:
         sock = self._sock
         socket_read_size = self.socket_read_size
-        buf = self._buffer
-        buf.seek(self.bytes_written)
         marker = 0
         custom_timeout = timeout is not SENTINEL
 
+        buf = self._buffer
+        current_pos = buf.tell()
+        buf.seek(0, SEEK_END)
+        if custom_timeout:
+            sock.settimeout(timeout)
         try:
-            if custom_timeout:
-                sock.settimeout(timeout)
             while True:
                 data = self._sock.recv(socket_read_size)
                 # an empty string indicates the server shutdown the socket
@@ -195,7 +211,6 @@ class SocketBuffer:
                     raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
                 buf.write(data)
                 data_length = len(data)
-                self.bytes_written += data_length
                 marker += data_length
 
                 if length is not None and length > marker:
@@ -215,59 +230,69 @@ class SocketBuffer:
                 return False
             raise ConnectionError(f"Error while reading from socket: {ex.args}")
         finally:
+            buf.seek(current_pos)
             if custom_timeout:
                 sock.settimeout(self.socket_timeout)
 
-    def can_read(self, timeout):
-        return bool(self.length) or self._read_from_socket(
+    def can_read(self, timeout: float) -> bool:
+        return bool(self.unread_bytes()) or self._read_from_socket(
             timeout=timeout, raise_on_timeout=False
         )
 
-    def read(self, length):
+    def read(self, length: int) -> bytes:
         length = length + 2  # make sure to read the \r\n terminator
-        # make sure we've read enough data from the socket
-        if length > self.length:
-            self._read_from_socket(length - self.length)
-
-        self._buffer.seek(self.bytes_read)
+        # BufferIO will return less than requested if buffer is short
         data = self._buffer.read(length)
-        self.bytes_read += len(data)
-
-        # purge the buffer when we've consumed it all so it doesn't
-        # grow forever
-        if self.bytes_read == self.bytes_written:
-            self.purge()
-
+        missing = length - len(data)
+        if missing:
+            # fill up the buffer and read the remainder
+            self._read_from_socket(missing)
+            data += self._buffer.read(missing)
         return data[:-2]
 
-    def readline(self):
+    def readline(self) -> bytes:
         buf = self._buffer
-        buf.seek(self.bytes_read)
         data = buf.readline()
         while not data.endswith(SYM_CRLF):
             # there's more data in the socket that we need
             self._read_from_socket()
-            buf.seek(self.bytes_read)
-            data = buf.readline()
+            data += buf.readline()
 
-        self.bytes_read += len(data)
+        return data[:-2]
 
-        # purge the buffer when we've consumed it all so it doesn't
-        # grow forever
-        if self.bytes_read == self.bytes_written:
-            self.purge()
+    def get_pos(self) -> int:
+        """
+        Get current read position
+        """
+        return self._buffer.tell()
 
-        return data[:-2]
+    def rewind(self, pos: int) -> None:
+        """
+        Rewind the buffer to a specific position, to re-start reading
+        """
+        self._buffer.seek(pos)
+
+    def purge(self) -> None:
+        """
+        After a successful read, purge the read part of buffer
+        """
+        unread = self.unread_bytes()
+
+        # Only if we have read all of the buffer do we truncate, to
+        # reduce the amount of memory thrashing.  This heuristic
+        # can be changed or removed later.
+        if unread > 0:
+            return
 
-    def purge(self):
+        if unread > 0:
+            # move unread data to the front
+            view = self._buffer.getbuffer()
+            view[:unread] = view[-unread:]
+        self._buffer.truncate(unread)
         self._buffer.seek(0)
-        self._buffer.truncate()
-        self.bytes_written = 0
-        self.bytes_read = 0
 
-    def close(self):
+    def close(self) -> None:
         try:
-            self.purge()
             self._buffer.close()
         except Exception:
             # issue #633 suggests the purge/close somehow raised a
@@ -315,6 +340,18 @@ class PythonParser(BaseParser):
         return self._buffer and self._buffer.can_read(timeout)
 
     def read_response(self, disable_decoding=False):
+        pos = self._buffer.get_pos() if self._buffer else None
+        try:
+            result = self._read_response(disable_decoding=disable_decoding)
+        except BaseException:
+            if self._buffer:
+                self._buffer.rewind(pos)
+            raise
+        else:
+            self._buffer.purge()
+            return result
+
+    def _read_response(self, disable_decoding=False):
         raw = self._buffer.readline()
         if not raw:
             raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
@@ -355,7 +392,7 @@ class PythonParser(BaseParser):
             if length == -1:
                 return None
             response = [
-                self.read_response(disable_decoding=disable_decoding)
+                self._read_response(disable_decoding=disable_decoding)
                 for i in range(length)
             ]
         if isinstance(response, bytes) and disable_decoding is False:
@@ -370,9 +407,7 @@ class HiredisParser(BaseParser):
         if not HIREDIS_AVAILABLE:
             raise RedisError("Hiredis is not installed")
         self.socket_read_size = socket_read_size
-
-        if HIREDIS_USE_BYTE_BUFFER:
-            self._buffer = bytearray(socket_read_size)
+        self._buffer = bytearray(socket_read_size)
 
     def __del__(self):
         try:
@@ -383,16 +418,14 @@ class HiredisParser(BaseParser):
     def on_connect(self, connection, **kwargs):
         self._sock = connection._sock
         self._socket_timeout = connection.socket_timeout
-        kwargs = {"protocolError": InvalidResponse, "replyError": self.parse_error}
-
-        # hiredis < 0.1.3 doesn't support functions that create exceptions
-        if not HIREDIS_SUPPORTS_CALLABLE_ERRORS:
-            kwargs["replyError"] = ResponseError
+        kwargs = {
+            "protocolError": InvalidResponse,
+            "replyError": self.parse_error,
+            "errors": connection.encoder.encoding_errors,
+        }
 
         if connection.encoder.decode_responses:
             kwargs["encoding"] = connection.encoder.encoding
-        if HIREDIS_SUPPORTS_ENCODING_ERRORS:
-            kwargs["errors"] = connection.encoder.encoding_errors
         self._reader = hiredis.Reader(**kwargs)
         self._next_response = False
 
@@ -417,17 +450,10 @@ class HiredisParser(BaseParser):
         try:
             if custom_timeout:
                 sock.settimeout(timeout)
-            if HIREDIS_USE_BYTE_BUFFER:
-                bufflen = self._sock.recv_into(self._buffer)
-                if bufflen == 0:
-                    raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
-                self._reader.feed(self._buffer, 0, bufflen)
-            else:
-                buffer = self._sock.recv(self.socket_read_size)
-                # an empty string indicates the server shutdown the socket
-                if not isinstance(buffer, bytes) or len(buffer) == 0:
-                    raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
-                self._reader.feed(buffer)
+            bufflen = self._sock.recv_into(self._buffer)
+            if bufflen == 0:
+                raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
+            self._reader.feed(self._buffer, 0, bufflen)
             # data was read from the socket and added to the buffer.
             # return True to indicate that data was read.
             return True
@@ -469,17 +495,6 @@ class HiredisParser(BaseParser):
                 response = self._reader.gets(False)
             else:
                 response = self._reader.gets()
-        # if an older version of hiredis is installed, we need to attempt
-        # to convert ResponseErrors to their appropriate types.
-        if not HIREDIS_SUPPORTS_CALLABLE_ERRORS:
-            if isinstance(response, ResponseError):
-                response = self.parse_error(response.args[0])
-            elif (
-                isinstance(response, list)
-                and response
-                and isinstance(response[0], ResponseError)
-            ):
-                response[0] = self.parse_error(response[0].args[0])
         # if the response is a ConnectionError or the response is a list and
         # the first item is a ConnectionError, raise it as something bad
         # happened
@@ -494,12 +509,82 @@ class HiredisParser(BaseParser):
         return response
 
 
+DefaultParser: BaseParser
 if HIREDIS_AVAILABLE:
     DefaultParser = HiredisParser
 else:
     DefaultParser = PythonParser
 
 
+class HiredisRespSerializer:
+    def pack(self, *args):
+        """Pack a series of arguments into the Redis protocol"""
+        output = []
+
+        if isinstance(args[0], str):
+            args = tuple(args[0].encode().split()) + args[1:]
+        elif b" " in args[0]:
+            args = tuple(args[0].split()) + args[1:]
+        try:
+            output.append(hiredis.pack_command(args))
+        except TypeError:
+            _, value, traceback = sys.exc_info()
+            raise DataError(value).with_traceback(traceback)
+
+        return output
+
+
+class PythonRespSerializer:
+    def __init__(self, buffer_cutoff, encode) -> None:
+        self._buffer_cutoff = buffer_cutoff
+        self.encode = encode
+
+    def pack(self, *args):
+        """Pack a series of arguments into the Redis protocol"""
+        output = []
+        # the client might have included 1 or more literal arguments in
+        # the command name, e.g., 'CONFIG GET'. The Redis server expects these
+        # arguments to be sent separately, so split the first argument
+        # manually. These arguments should be bytestrings so that they are
+        # not encoded.
+        if isinstance(args[0], str):
+            args = tuple(args[0].encode().split()) + args[1:]
+        elif b" " in args[0]:
+            args = tuple(args[0].split()) + args[1:]
+
+        buff = SYM_EMPTY.join((SYM_STAR, str(len(args)).encode(), SYM_CRLF))
+
+        buffer_cutoff = self._buffer_cutoff
+        for arg in map(self.encode, args):
+            # to avoid large string mallocs, chunk the command into the
+            # output list if we're sending large values or memoryviews
+            arg_length = len(arg)
+            if (
+                len(buff) > buffer_cutoff
+                or arg_length > buffer_cutoff
+                or isinstance(arg, memoryview)
+            ):
+                buff = SYM_EMPTY.join(
+                    (buff, SYM_DOLLAR, str(arg_length).encode(), SYM_CRLF)
+                )
+                output.append(buff)
+                output.append(arg)
+                buff = SYM_CRLF
+            else:
+                buff = SYM_EMPTY.join(
+                    (
+                        buff,
+                        SYM_DOLLAR,
+                        str(arg_length).encode(),
+                        SYM_CRLF,
+                        arg,
+                        SYM_CRLF,
+                    )
+                )
+        output.append(buff)
+        return output
+
+
 class Connection:
     "Manages TCP communication to and from a Redis server"
 
@@ -526,6 +611,8 @@ class Connection:
         username=None,
         retry=None,
         redis_connect_func=None,
+        credential_provider: Optional[CredentialProvider] = None,
+        command_packer=None,
     ):
         """
         Initialize a new Connection.
@@ -534,13 +621,21 @@ class Connection:
         `retry` to a valid `Retry` object.
         To retry on TimeoutError, `retry_on_timeout` can also be set to `True`.
         """
+        if (username or password) and credential_provider is not None:
+            raise DataError(
+                "'username' and 'password' cannot be passed along with 'credential_"
+                "provider'. Please provide only one of the following arguments: \n"
+                "1. 'password' and (optional) 'username'\n"
+                "2. 'credential_provider'"
+            )
         self.pid = os.getpid()
         self.host = host
         self.port = int(port)
         self.db = db
-        self.username = username
         self.client_name = client_name
+        self.credential_provider = credential_provider
         self.password = password
+        self.username = username
         self.socket_timeout = socket_timeout
         self.socket_connect_timeout = socket_connect_timeout or socket_timeout
         self.socket_keepalive = socket_keepalive
@@ -553,7 +648,7 @@ class Connection:
             # Add TimeoutError to the errors list to retry on
             retry_on_error.append(TimeoutError)
         self.retry_on_error = retry_on_error
-        if retry_on_error:
+        if retry or retry_on_error:
             if retry is None:
                 self.retry = Retry(NoBackoff(), 1)
             else:
@@ -572,6 +667,7 @@ class Connection:
         self.set_parser(parser_class)
         self._connect_callbacks = []
         self._buffer_cutoff = 6000
+        self._command_packer = self._construct_command_packer(command_packer)
 
     def __repr__(self):
         repr_args = ",".join([f"{k}={v}" for k, v in self.repr_pieces()])
@@ -589,6 +685,14 @@ class Connection:
         except Exception:
             pass
 
+    def _construct_command_packer(self, packer):
+        if packer is not None:
+            return packer
+        elif HIREDIS_PACK_AVAILABLE:
+            return HiredisRespSerializer()
+        else:
+            return PythonRespSerializer(self._buffer_cutoff, self.encoder.encode)
+
     def register_connect_callback(self, callback):
         self._connect_callbacks.append(weakref.WeakMethod(callback))
 
@@ -677,12 +781,23 @@ class Connection:
             raise err
         raise OSError("socket.getaddrinfo returned an empty list")
 
+    def _host_error(self):
+        try:
+            host_error = f"{self.host}:{self.port}"
+        except AttributeError:
+            host_error = "connection"
+
+        return host_error
+
     def _error_message(self, exception):
         # args for socket.error can either be (errno, "message")
         # or just "message"
+
+        host_error = self._host_error()
+
         if len(exception.args) == 1:
             try:
-                return f"Error connecting to {self.host}:{self.port}. \
+                return f"Error connecting to {host_error}. \
                         {exception.args[0]}."
             except AttributeError:
                 return f"Connection Error: {exception.args[0]}"
@@ -690,7 +805,7 @@ class Connection:
             try:
                 return (
                     f"Error {exception.args[0]} connecting to "
-                    f"{self.host}:{self.port}. {exception.args[1]}."
+                    f"{host_error}. {exception.args[1]}."
                 )
             except AttributeError:
                 return f"Connection Error: {exception.args[0]}"
@@ -699,12 +814,13 @@ class Connection:
         "Initialize the connection, authenticate and select a database"
         self._parser.on_connect(self)
 
-        # if username and/or password are set, authenticate
-        if self.username or self.password:
-            if self.username:
-                auth_args = (self.username, self.password or "")
-            else:
-                auth_args = (self.password,)
+        # if credential provider or username and/or password are set, authenticate
+        if self.credential_provider or (self.username or self.password):
+            cred_provider = (
+                self.credential_provider
+                or UsernamePasswordCredentialProvider(self.username, self.password)
+            )
+            auth_args = cred_provider.get_credentials()
             # avoid checking health here -- PING will fail if we try
             # to check the health prior to the AUTH
             self.send_command("AUTH", *auth_args, check_health=False)
@@ -716,7 +832,7 @@ class Connection:
                 # server seems to be < 6.0.0 which expects a single password
                 # arg. retry auth with just the password.
                 # https://github.com/andymccurdy/redis-py/issues/1274
-                self.send_command("AUTH", self.password, check_health=False)
+                self.send_command("AUTH", auth_args[-1], check_health=False)
                 auth_response = self.read_response()
 
             if str_if_bytes(auth_response) != "OK":
@@ -790,14 +906,15 @@ class Connection:
                 errno = e.args[0]
                 errmsg = e.args[1]
             raise ConnectionError(f"Error {errno} while writing to socket. {errmsg}.")
-        except BaseException:
+        except Exception:
             self.disconnect()
             raise
 
     def send_command(self, *args, **kwargs):
         """Pack and send a command to the Redis server"""
         self.send_packed_command(
-            self.pack_command(*args), check_health=kwargs.get("check_health", True)
+            self._command_packer.pack(*args),
+            check_health=kwargs.get("check_health", True),
         )
 
     def can_read(self, timeout=0):
@@ -805,30 +922,31 @@ class Connection:
         sock = self._sock
         if not sock:
             self.connect()
+
+        host_error = self._host_error()
+
         try:
             return self._parser.can_read(timeout)
         except OSError as e:
             self.disconnect()
-            raise ConnectionError(
-                f"Error while reading from {self.host}:{self.port}: {e.args}"
-            )
+            raise ConnectionError(f"Error while reading from {host_error}: {e.args}")
 
     def read_response(self, disable_decoding=False):
         """Read the response from a previously sent command"""
-        try:
-            hosterr = f"{self.host}:{self.port}"
-        except AttributeError:
-            hosterr = "connection"
+
+        host_error = self._host_error()
 
         try:
             response = self._parser.read_response(disable_decoding=disable_decoding)
         except socket.timeout:
             self.disconnect()
-            raise TimeoutError(f"Timeout reading from {hosterr}")
+            raise TimeoutError(f"Timeout reading from {host_error}")
         except OSError as e:
             self.disconnect()
-            raise ConnectionError(f"Error while reading from {hosterr}" f" : {e.args}")
-        except BaseException:
+            raise ConnectionError(
+                f"Error while reading from {host_error}" f" : {e.args}"
+            )
+        except Exception:
             self.disconnect()
             raise
 
@@ -841,48 +959,7 @@ class Connection:
 
     def pack_command(self, *args):
         """Pack a series of arguments into the Redis protocol"""
-        output = []
-        # the client might have included 1 or more literal arguments in
-        # the command name, e.g., 'CONFIG GET'. The Redis server expects these
-        # arguments to be sent separately, so split the first argument
-        # manually. These arguments should be bytestrings so that they are
-        # not encoded.
-        if isinstance(args[0], str):
-            args = tuple(args[0].encode().split()) + args[1:]
-        elif b" " in args[0]:
-            args = tuple(args[0].split()) + args[1:]
-
-        buff = SYM_EMPTY.join((SYM_STAR, str(len(args)).encode(), SYM_CRLF))
-
-        buffer_cutoff = self._buffer_cutoff
-        for arg in map(self.encoder.encode, args):
-            # to avoid large string mallocs, chunk the command into the
-            # output list if we're sending large values or memoryviews
-            arg_length = len(arg)
-            if (
-                len(buff) > buffer_cutoff
-                or arg_length > buffer_cutoff
-                or isinstance(arg, memoryview)
-            ):
-                buff = SYM_EMPTY.join(
-                    (buff, SYM_DOLLAR, str(arg_length).encode(), SYM_CRLF)
-                )
-                output.append(buff)
-                output.append(arg)
-                buff = SYM_CRLF
-            else:
-                buff = SYM_EMPTY.join(
-                    (
-                        buff,
-                        SYM_DOLLAR,
-                        str(arg_length).encode(),
-                        SYM_CRLF,
-                        arg,
-                        SYM_CRLF,
-                    )
-                )
-        output.append(buff)
-        return output
+        return self._command_packer.pack(*args)
 
     def pack_commands(self, commands):
         """Pack multiple commands into the Redis protocol"""
@@ -892,14 +969,15 @@ class Connection:
         buffer_cutoff = self._buffer_cutoff
 
         for cmd in commands:
-            for chunk in self.pack_command(*cmd):
+            for chunk in self._command_packer.pack(*cmd):
                 chunklen = len(chunk)
                 if (
                     buffer_length > buffer_cutoff
                     or chunklen > buffer_cutoff
                     or isinstance(chunk, memoryview)
                 ):
-                    output.append(SYM_EMPTY.join(pieces))
+                    if pieces:
+                        output.append(SYM_EMPTY.join(pieces))
                     buffer_length = 0
                     pieces = []
 
@@ -1074,6 +1152,8 @@ class UnixDomainSocketConnection(Connection):
         client_name=None,
         retry=None,
         redis_connect_func=None,
+        credential_provider: Optional[CredentialProvider] = None,
+        command_packer=None,
     ):
         """
         Initialize a new UnixDomainSocketConnection.
@@ -1082,12 +1162,20 @@ class UnixDomainSocketConnection(Connection):
         `retry` to a valid `Retry` object.
         To retry on TimeoutError, `retry_on_timeout` can also be set to `True`.
         """
+        if (username or password) and credential_provider is not None:
+            raise DataError(
+                "'username' and 'password' cannot be passed along with 'credential_"
+                "provider'. Please provide only one of the following arguments: \n"
+                "1. 'password' and (optional) 'username'\n"
+                "2. 'credential_provider'"
+            )
         self.pid = os.getpid()
         self.path = path
         self.db = db
-        self.username = username
         self.client_name = client_name
+        self.credential_provider = credential_provider
         self.password = password
+        self.username = username
         self.socket_timeout = socket_timeout
         self.retry_on_timeout = retry_on_timeout
         if retry_on_error is SENTINEL:
@@ -1115,6 +1203,7 @@ class UnixDomainSocketConnection(Connection):
         self.set_parser(parser_class)
         self._connect_callbacks = []
         self._buffer_cutoff = 6000
+        self._command_packer = self._construct_command_packer(command_packer)
 
     def repr_pieces(self):
         pieces = [("path", self.path), ("db", self.db)]
@@ -1166,6 +1255,16 @@ URL_QUERY_ARGUMENT_PARSERS = {
 
 
 def parse_url(url):
+    if not (
+        url.startswith("redis://")
+        or url.startswith("rediss://")
+        or url.startswith("unix://")
+    ):
+        raise ValueError(
+            "Redis URL must specify one of the following "
+            "schemes (redis://, rediss://, unix://)"
+        )
+
     url = urlparse(url)
     kwargs = {}
 
@@ -1192,7 +1291,7 @@ def parse_url(url):
             kwargs["path"] = unquote(url.path)
         kwargs["connection_class"] = UnixDomainSocketConnection
 
-    elif url.scheme in ("redis", "rediss"):
+    else:  # implied:  url.scheme in ("redis", "rediss"):
         if url.hostname:
             kwargs["host"] = unquote(url.hostname)
         if url.port:
@@ -1208,11 +1307,6 @@ def parse_url(url):
 
         if url.scheme == "rediss":
             kwargs["connection_class"] = SSLConnection
-    else:
-        raise ValueError(
-            "Redis URL must specify one of the following "
-            "schemes (redis://, rediss://, unix://)"
-        )
 
     return kwargs
 
@@ -1240,7 +1334,7 @@ class ConnectionPool:
 
             redis://[[username]:[password]]@localhost:6379/0
             rediss://[[username]:[password]]@localhost:6379/0
-            unix://[[username]:[password]]@/path/to/socket.sock?db=0
+            unix://[username@]/path/to/socket.sock?db=0[&password=password]
 
         Three URL schemes are supported:
 
@@ -1465,6 +1559,13 @@ class ConnectionPool:
             for connection in connections:
                 connection.disconnect()
 
+    def set_retry(self, retry: "Retry") -> None:
+        self.connection_kwargs.update({"retry": retry})
+        for conn in self._available_connections:
+            conn.retry = retry
+        for conn in self._in_use_connections:
+            conn.retry = retry
+
 
 class BlockingConnectionPool(ConnectionPool):
     """
diff --git a/redis/credentials.py b/redis/credentials.py
new file mode 100644
index 0000000..7ba26dc
--- /dev/null
+++ b/redis/credentials.py
@@ -0,0 +1,26 @@
+from typing import Optional, Tuple, Union
+
+
+class CredentialProvider:
+    """
+    Credentials Provider.
+    """
+
+    def get_credentials(self) -> Union[Tuple[str], Tuple[str, str]]:
+        raise NotImplementedError("get_credentials must be implemented")
+
+
+class UsernamePasswordCredentialProvider(CredentialProvider):
+    """
+    Simple implementation of CredentialProvider that just wraps static
+    username and password.
+    """
+
+    def __init__(self, username: Optional[str] = None, password: Optional[str] = None):
+        self.username = username or ""
+        self.password = password or ""
+
+    def get_credentials(self):
+        if self.username:
+            return self.username, self.password
+        return (self.password,)
diff --git a/redis/exceptions.py b/redis/exceptions.py
index d18b354..8a8bf42 100644
--- a/redis/exceptions.py
+++ b/redis/exceptions.py
@@ -199,3 +199,7 @@ class SlotNotCoveredError(RedisClusterException):
     """
 
     pass
+
+
+class MaxConnectionsError(ConnectionError):
+    ...
diff --git a/redis/lock.py b/redis/lock.py
index 912ff57..4cca102 100644
--- a/redis/lock.py
+++ b/redis/lock.py
@@ -256,7 +256,7 @@ class Lock:
         if not bool(
             self.lua_release(keys=[self.name], args=[expected_token], client=self.redis)
         ):
-            raise LockNotOwnedError("Cannot release a lock" " that's no longer owned")
+            raise LockNotOwnedError("Cannot release a lock that's no longer owned")
 
     def extend(self, additional_time: int, replace_ttl: bool = False) -> bool:
         """
diff --git a/redis/ocsp.py b/redis/ocsp.py
index 4753434..ab8a35a 100644
--- a/redis/ocsp.py
+++ b/redis/ocsp.py
@@ -189,8 +189,8 @@ class OCSPVerifier:
         return cert
 
     def components_from_socket(self):
-        """This function returns the certificate, primary issuer, and primary ocsp server
-        in the chain for a socket already wrapped with ssl.
+        """This function returns the certificate, primary issuer, and primary ocsp
+        server in the chain for a socket already wrapped with ssl.
         """
 
         # convert the binary certifcate to text
@@ -292,7 +292,7 @@ class OCSPVerifier:
         This first retrieves for validate the certificate, issuer_url,
         and ocsp_server for certificate validate. Then retrieves the
         issuer certificate from the issuer_url, and finally checks
-        the valididy of OCSP revocation status.
+        the validity of OCSP revocation status.
         """
 
         # validate the certificate
diff --git a/redis/sentinel.py b/redis/sentinel.py
index d35abaf..d70b714 100644
--- a/redis/sentinel.py
+++ b/redis/sentinel.py
@@ -200,10 +200,10 @@ class Sentinel(SentinelCommands):
             kwargs.pop("once")
 
         if once:
+            random.choice(self.sentinels).execute_command(*args, **kwargs)
+        else:
             for sentinel in self.sentinels:
                 sentinel.execute_command(*args, **kwargs)
-        else:
-            random.choice(self.sentinels).execute_command(*args, **kwargs)
         return True
 
     def __repr__(self):
diff --git a/redis/typing.py b/redis/typing.py
index b572b0c..8504c7d 100644
--- a/redis/typing.py
+++ b/redis/typing.py
@@ -16,7 +16,7 @@ EncodedT = Union[bytes, memoryview]
 DecodedT = Union[str, int, float]
 EncodableT = Union[EncodedT, DecodedT]
 AbsExpiryT = Union[int, datetime]
-ExpiryT = Union[float, timedelta]
+ExpiryT = Union[int, timedelta]
 ZScoreBoundT = Union[float, str]  # str allows for the [ or ( prefix
 BitfieldOffsetT = Union[int, str]  # str allows for #x syntax
 _StringLikeT = Union[bytes, str, memoryview]
diff --git a/redis/utils.py b/redis/utils.py
index 0c34e1e..d95e62c 100644
--- a/redis/utils.py
+++ b/redis/utils.py
@@ -1,12 +1,16 @@
 from contextlib import contextmanager
+from functools import wraps
 from typing import Any, Dict, Mapping, Union
 
 try:
     import hiredis  # noqa
 
-    HIREDIS_AVAILABLE = True
+    # Only support Hiredis >= 1.0:
+    HIREDIS_AVAILABLE = not hiredis.__version__.startswith("0.")
+    HIREDIS_PACK_AVAILABLE = hasattr(hiredis, "pack_command")
 except ImportError:
     HIREDIS_AVAILABLE = False
+    HIREDIS_PACK_AVAILABLE = False
 
 try:
     import cryptography  # noqa
@@ -79,3 +83,30 @@ def merge_result(command, res):
             result.add(value)
 
     return list(result)
+
+
+def warn_deprecated(name, reason="", version="", stacklevel=2):
+    import warnings
+
+    msg = f"Call to deprecated {name}."
+    if reason:
+        msg += f" ({reason})"
+    if version:
+        msg += f" -- Deprecated since version {version}."
+    warnings.warn(msg, category=DeprecationWarning, stacklevel=stacklevel)
+
+
+def deprecated_function(reason="", version="", name=None):
+    """
+    Decorator to mark a function as deprecated.
+    """
+
+    def decorator(func):
+        @wraps(func)
+        def wrapper(*args, **kwargs):
+            warn_deprecated(name or func.__name__, reason, version, stacklevel=3)
+            return func(*args, **kwargs)
+
+        return wrapper
+
+    return decorator
diff --git a/requirements.txt b/requirements.txt
index c40eca7..82c46c9 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,4 +1,2 @@
 async-timeout>=4.0.2
-deprecated>=1.2.3
-packaging>=20.4
 typing-extensions; python_version<"3.8"
diff --git a/setup.py b/setup.py
index 87649c5..060e9da 100644
--- a/setup.py
+++ b/setup.py
@@ -8,7 +8,7 @@ setup(
     long_description_content_type="text/markdown",
     keywords=["Redis", "key-value store", "database"],
     license="MIT",
-    version="4.3.4",
+    version="4.5.1",
     packages=find_packages(
         include=[
             "redis",
@@ -30,10 +30,8 @@ setup(
     },
     author="Redis Inc.",
     author_email="oss@redis.com",
-    python_requires=">=3.6",
+    python_requires=">=3.7",
     install_requires=[
-        "deprecated>=1.2.3",
-        "packaging>=20.4",
         'importlib-metadata >= 1.0; python_version < "3.8"',
         'typing-extensions; python_version<"3.8"',
         "async-timeout>=4.0.2",
@@ -47,7 +45,6 @@ setup(
         "Programming Language :: Python",
         "Programming Language :: Python :: 3",
         "Programming Language :: Python :: 3 :: Only",
-        "Programming Language :: Python :: 3.6",
         "Programming Language :: Python :: 3.7",
         "Programming Language :: Python :: 3.8",
         "Programming Language :: Python :: 3.9",
diff --git a/tests/conftest.py b/tests/conftest.py
index 8a907cc..27dcc74 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -77,7 +77,7 @@ def pytest_addoption(parser):
         "--redis-url",
         default=default_redis_url,
         action="store",
-        help="Redis connection string," " defaults to `%(default)s`",
+        help="Redis connection string, defaults to `%(default)s`",
     )
 
     parser.addoption(
@@ -93,7 +93,7 @@ def pytest_addoption(parser):
         "--redis-ssl-url",
         default=default_redis_ssl_url,
         action="store",
-        help="Redis SSL connection string," " defaults to `%(default)s`",
+        help="Redis SSL connection string, defaults to `%(default)s`",
     )
 
     parser.addoption(
@@ -130,15 +130,25 @@ def _get_info(redis_url):
 
 
 def pytest_sessionstart(session):
+    # during test discovery, e.g. with VS Code, we may not
+    # have a server running.
     redis_url = session.config.getoption("--redis-url")
-    info = _get_info(redis_url)
-    version = info["redis_version"]
-    arch_bits = info["arch_bits"]
-    cluster_enabled = info["cluster_enabled"]
+    try:
+        info = _get_info(redis_url)
+        version = info["redis_version"]
+        arch_bits = info["arch_bits"]
+        cluster_enabled = info["cluster_enabled"]
+        enterprise = info["enterprise"]
+    except redis.ConnectionError:
+        # provide optimistic defaults
+        version = "10.0.0"
+        arch_bits = 64
+        cluster_enabled = False
+        enterprise = False
     REDIS_INFO["version"] = version
     REDIS_INFO["arch_bits"] = arch_bits
     REDIS_INFO["cluster_enabled"] = cluster_enabled
-    REDIS_INFO["enterprise"] = info["enterprise"]
+    REDIS_INFO["enterprise"] = enterprise
     # store REDIS_INFO in config so that it is available from "condition strings"
     session.config.REDIS_INFO = REDIS_INFO
 
diff --git a/tests/mocks.py b/tests/mocks.py
new file mode 100644
index 0000000..d7d450e
--- /dev/null
+++ b/tests/mocks.py
@@ -0,0 +1,41 @@
+# Various mocks for testing
+
+
+class MockSocket:
+    """
+    A class simulating an readable socket, optionally raising a
+    special exception every other read.
+    """
+
+    class TestError(BaseException):
+        pass
+
+    def __init__(self, data, interrupt_every=0):
+        self.data = data
+        self.counter = 0
+        self.pos = 0
+        self.interrupt_every = interrupt_every
+
+    def tick(self):
+        self.counter += 1
+        if not self.interrupt_every:
+            return
+        if (self.counter % self.interrupt_every) == 0:
+            raise self.TestError()
+
+    def recv(self, bufsize):
+        self.tick()
+        bufsize = min(5, bufsize)  # truncate the read size
+        result = self.data[self.pos : self.pos + bufsize]
+        self.pos += len(result)
+        return result
+
+    def recv_into(self, buffer, nbytes=0, flags=0):
+        self.tick()
+        if nbytes == 0:
+            nbytes = len(buffer)
+        nbytes = min(5, nbytes)  # truncate the read size
+        result = self.data[self.pos : self.pos + nbytes]
+        self.pos += len(result)
+        buffer[: len(result)] = result
+        return len(result)
diff --git a/tests/test_asyncio/compat.py b/tests/test_asyncio/compat.py
index ced4974..5edcd4a 100644
--- a/tests/test_asyncio/compat.py
+++ b/tests/test_asyncio/compat.py
@@ -1,6 +1,11 @@
+import asyncio
 from unittest import mock
 
 try:
     mock.AsyncMock
 except AttributeError:
     import mock
+
+
+def create_task(coroutine):
+    return asyncio.create_task(coroutine)
diff --git a/tests/test_asyncio/conftest.py b/tests/test_asyncio/conftest.py
index 8166588..6982cc8 100644
--- a/tests/test_asyncio/conftest.py
+++ b/tests/test_asyncio/conftest.py
@@ -1,15 +1,10 @@
-import asyncio
 import random
-import sys
+from contextlib import asynccontextmanager as _asynccontextmanager
 from typing import Union
 from urllib.parse import urlparse
 
-if sys.version_info[0:2] == (3, 6):
-    import pytest as pytest_asyncio
-else:
-    import pytest_asyncio
-
 import pytest
+import pytest_asyncio
 from packaging.version import Version
 
 import redis.asyncio as redis
@@ -69,11 +64,13 @@ async def _get_info(redis_url):
         "pool-hiredis",
     ],
 )
-def create_redis(request, event_loop: asyncio.BaseEventLoop):
+async def create_redis(request):
     """Wrapper around redis.create_redis."""
     single_connection, parser_cls = request.param
 
-    async def f(
+    teardown_clients = []
+
+    async def client_factory(
         url: str = request.config.getoption("--redis-url"),
         cls=redis.Redis,
         flushdb=True,
@@ -95,56 +92,50 @@ def create_redis(request, event_loop: asyncio.BaseEventLoop):
             client = client.client()
             await client.initialize()
 
-        def teardown():
-            async def ateardown():
-                if not cluster_mode:
-                    if "username" in kwargs:
-                        return
-                    if flushdb:
-                        try:
-                            await client.flushdb()
-                        except redis.ConnectionError:
-                            # handle cases where a test disconnected a client
-                            # just manually retry the flushdb
-                            await client.flushdb()
-                    await client.close()
-                    await client.connection_pool.disconnect()
-                else:
-                    if flushdb:
-                        try:
-                            await client.flushdb(target_nodes="primaries")
-                        except redis.ConnectionError:
-                            # handle cases where a test disconnected a client
-                            # just manually retry the flushdb
-                            await client.flushdb(target_nodes="primaries")
-                    await client.close()
-
-            if event_loop.is_running():
-                event_loop.create_task(ateardown())
+        async def teardown():
+            if not cluster_mode:
+                if flushdb and "username" not in kwargs:
+                    try:
+                        await client.flushdb()
+                    except redis.ConnectionError:
+                        # handle cases where a test disconnected a client
+                        # just manually retry the flushdb
+                        await client.flushdb()
+                await client.close()
+                await client.connection_pool.disconnect()
             else:
-                event_loop.run_until_complete(ateardown())
-
-        request.addfinalizer(teardown)
-
+                if flushdb:
+                    try:
+                        await client.flushdb(target_nodes="primaries")
+                    except redis.ConnectionError:
+                        # handle cases where a test disconnected a client
+                        # just manually retry the flushdb
+                        await client.flushdb(target_nodes="primaries")
+                await client.close()
+
+        teardown_clients.append(teardown)
         return client
 
-    return f
+    yield client_factory
+
+    for teardown in teardown_clients:
+        await teardown()
 
 
 @pytest_asyncio.fixture()
-async def r(request, create_redis):
-    yield await create_redis()
+async def r(create_redis):
+    return await create_redis()
 
 
 @pytest_asyncio.fixture()
 async def r2(create_redis):
     """A second client for tests that need multiple"""
-    yield await create_redis()
+    return await create_redis()
 
 
 @pytest_asyncio.fixture()
 async def modclient(request, create_redis):
-    yield await create_redis(
+    return await create_redis(
         url=request.config.getoption("--redismod-url"), decode_responses=True
     )
 
@@ -222,7 +213,7 @@ async def mock_cluster_resp_slaves(create_redis, **kwargs):
 def master_host(request):
     url = request.config.getoption("--redis-url")
     parts = urlparse(url)
-    yield parts.hostname
+    return parts.hostname
 
 
 async def wait_for_command(
@@ -246,3 +237,29 @@ async def wait_for_command(
             return monitor_response
         if key in monitor_response["command"]:
             return None
+
+
+# python 3.6 doesn't have the asynccontextmanager decorator.  Provide it here.
+class AsyncContextManager:
+    def __init__(self, async_generator):
+        self.gen = async_generator
+
+    async def __aenter__(self):
+        try:
+            return await self.gen.__anext__()
+        except StopAsyncIteration as err:
+            raise RuntimeError("Pickles") from err
+
+    async def __aexit__(self, exc_type, exc_inst, tb):
+        if exc_type:
+            await self.gen.athrow(exc_type, exc_inst, tb)
+            return True
+        try:
+            await self.gen.__anext__()
+        except StopAsyncIteration:
+            return
+        raise RuntimeError("More pickles")
+
+
+def asynccontextmanager(func):
+    return _asynccontextmanager(func)
diff --git a/tests/test_asyncio/mocks.py b/tests/test_asyncio/mocks.py
new file mode 100644
index 0000000..89bd9c0
--- /dev/null
+++ b/tests/test_asyncio/mocks.py
@@ -0,0 +1,51 @@
+import asyncio
+
+# Helper Mocking classes for the tests.
+
+
+class MockStream:
+    """
+    A class simulating an asyncio input buffer, optionally raising a
+    special exception every other read.
+    """
+
+    class TestError(BaseException):
+        pass
+
+    def __init__(self, data, interrupt_every=0):
+        self.data = data
+        self.counter = 0
+        self.pos = 0
+        self.interrupt_every = interrupt_every
+
+    def tick(self):
+        self.counter += 1
+        if not self.interrupt_every:
+            return
+        if (self.counter % self.interrupt_every) == 0:
+            raise self.TestError()
+
+    async def read(self, want):
+        self.tick()
+        want = 5
+        result = self.data[self.pos : self.pos + want]
+        self.pos += len(result)
+        return result
+
+    async def readline(self):
+        self.tick()
+        find = self.data.find(b"\n", self.pos)
+        if find >= 0:
+            result = self.data[self.pos : find + 1]
+        else:
+            result = self.data[self.pos :]
+        self.pos += len(result)
+        return result
+
+    async def readexactly(self, length):
+        self.tick()
+        result = self.data[self.pos : self.pos + length]
+        if len(result) < length:
+            raise asyncio.IncompleteReadError(result, None)
+        self.pos += len(result)
+        return result
diff --git a/tests/test_asyncio/test_bloom.py b/tests/test_asyncio/test_bloom.py
index feb98cc..9f4a805 100644
--- a/tests/test_asyncio/test_bloom.py
+++ b/tests/test_asyncio/test_bloom.py
@@ -1,10 +1,11 @@
+from math import inf
+
 import pytest
 
 import redis.asyncio as redis
 from redis.exceptions import ModuleError, RedisError
 from redis.utils import HIREDIS_AVAILABLE
-
-pytestmark = pytest.mark.asyncio
+from tests.conftest import skip_ifmodversion_lt
 
 
 def intlist(obj):
@@ -91,7 +92,7 @@ async def test_bf_scandump_and_loadchunk(modclient: redis.Redis):
             res += rv == x
         assert res < 5
 
-    do_verify()
+    await do_verify()
     cmds = []
     if HIREDIS_AVAILABLE:
         with pytest.raises(ModuleError):
@@ -120,7 +121,7 @@ async def test_bf_scandump_and_loadchunk(modclient: redis.Redis):
 
     cur_info = await modclient.bf().execute_command("bf.debug", "myBloom")
     assert prev_info == cur_info
-    do_verify()
+    await do_verify()
 
     await modclient.bf().client.delete("myBloom")
     await modclient.bf().create("myBloom", "0.0001", "10000000")
@@ -148,6 +149,21 @@ async def test_bf_info(modclient: redis.Redis):
         assert True
 
 
+@pytest.mark.redismod
+async def test_bf_card(modclient: redis.Redis):
+    # return 0 if the key does not exist
+    assert await modclient.bf().card("not_exist") == 0
+
+    # Store a filter
+    assert await modclient.bf().add("bf1", "item_foo") == 1
+    assert await modclient.bf().card("bf1") == 1
+
+    # Error when key is of a type other than Bloom filter.
+    with pytest.raises(redis.ResponseError):
+        await modclient.set("setKey", "value")
+        await modclient.bf().card("setKey")
+
+
 # region Test Cuckoo Filter
 @pytest.mark.redismod
 async def test_cf_add_and_insert(modclient: redis.Redis):
@@ -264,9 +280,10 @@ async def test_topk(modclient: redis.Redis):
     assert [1, 1, 0, 0, 1, 0, 0] == await modclient.topk().query(
         "topk", "A", "B", "C", "D", "E", "F", "G"
     )
-    assert [4, 3, 2, 3, 3, 0, 1] == await modclient.topk().count(
-        "topk", "A", "B", "C", "D", "E", "F", "G"
-    )
+    with pytest.deprecated_call():
+        assert [4, 3, 2, 3, 3, 0, 1] == await modclient.topk().count(
+            "topk", "A", "B", "C", "D", "E", "F", "G"
+        )
 
     # test full list
     assert await modclient.topk().reserve("topklist", 3, 50, 3, 0.9)
@@ -308,9 +325,10 @@ async def test_topk_incrby(modclient: redis.Redis):
     )
     res = await modclient.topk().incrby("topk", ["42", "xyzzy"], [8, 4])
     assert [None, "bar"] == res
-    assert [3, 6, 10, 4, 0] == await modclient.topk().count(
-        "topk", "bar", "baz", "42", "xyzzy", 4
-    )
+    with pytest.deprecated_call():
+        assert [3, 6, 10, 4, 0] == await modclient.topk().count(
+            "topk", "bar", "baz", "42", "xyzzy", 4
+        )
 
 
 # region Test T-Digest
@@ -321,11 +339,11 @@ async def test_tdigest_reset(modclient: redis.Redis):
     # reset on empty histogram
     assert await modclient.tdigest().reset("tDigest")
     # insert data-points into sketch
-    assert await modclient.tdigest().add("tDigest", list(range(10)), [1.0] * 10)
+    assert await modclient.tdigest().add("tDigest", list(range(10)))
 
     assert await modclient.tdigest().reset("tDigest")
     # assert we have 0 unmerged nodes
-    assert 0 == (await modclient.tdigest().info("tDigest")).unmergedNodes
+    assert 0 == (await modclient.tdigest().info("tDigest")).unmerged_nodes
 
 
 @pytest.mark.redismod
@@ -334,14 +352,24 @@ async def test_tdigest_merge(modclient: redis.Redis):
     assert await modclient.tdigest().create("to-tDigest", 10)
     assert await modclient.tdigest().create("from-tDigest", 10)
     # insert data-points into sketch
-    assert await modclient.tdigest().add("from-tDigest", [1.0] * 10, [1.0] * 10)
-    assert await modclient.tdigest().add("to-tDigest", [2.0] * 10, [10.0] * 10)
+    assert await modclient.tdigest().add("from-tDigest", [1.0] * 10)
+    assert await modclient.tdigest().add("to-tDigest", [2.0] * 10)
     # merge from-tdigest into to-tdigest
-    assert await modclient.tdigest().merge("to-tDigest", "from-tDigest")
+    assert await modclient.tdigest().merge("to-tDigest", 1, "from-tDigest")
     # we should now have 110 weight on to-histogram
     info = await modclient.tdigest().info("to-tDigest")
-    total_weight_to = float(info.mergedWeight) + float(info.unmergedWeight)
-    assert 110 == total_weight_to
+    total_weight_to = float(info.merged_weight) + float(info.unmerged_weight)
+    assert 20.0 == total_weight_to
+    # test override
+    assert await modclient.tdigest().create("from-override", 10)
+    assert await modclient.tdigest().create("from-override-2", 10)
+    assert await modclient.tdigest().add("from-override", [3.0] * 10)
+    assert await modclient.tdigest().add("from-override-2", [4.0] * 10)
+    assert await modclient.tdigest().merge(
+        "to-tDigest", 2, "from-override", "from-override-2", override=True
+    )
+    assert 3.0 == await modclient.tdigest().min("to-tDigest")
+    assert 4.0 == await modclient.tdigest().max("to-tDigest")
 
 
 @pytest.mark.redismod
@@ -349,7 +377,7 @@ async def test_tdigest_merge(modclient: redis.Redis):
 async def test_tdigest_min_and_max(modclient: redis.Redis):
     assert await modclient.tdigest().create("tDigest", 100)
     # insert data-points into sketch
-    assert await modclient.tdigest().add("tDigest", [1, 2, 3], [1.0] * 3)
+    assert await modclient.tdigest().add("tDigest", [1, 2, 3])
     # min/max
     assert 3 == await modclient.tdigest().max("tDigest")
     assert 1 == await modclient.tdigest().min("tDigest")
@@ -357,22 +385,31 @@ async def test_tdigest_min_and_max(modclient: redis.Redis):
 
 @pytest.mark.redismod
 @pytest.mark.experimental
+@skip_ifmodversion_lt("2.4.0", "bf")
 async def test_tdigest_quantile(modclient: redis.Redis):
     assert await modclient.tdigest().create("tDigest", 500)
     # insert data-points into sketch
     assert await modclient.tdigest().add(
-        "tDigest", list([x * 0.01 for x in range(1, 10000)]), [1.0] * 10000
+        "tDigest", list([x * 0.01 for x in range(1, 10000)])
     )
     # assert min min/max have same result as quantile 0 and 1
-    assert await modclient.tdigest().max(
-        "tDigest"
-    ) == await modclient.tdigest().quantile("tDigest", 1.0)
-    assert await modclient.tdigest().min(
-        "tDigest"
-    ) == await modclient.tdigest().quantile("tDigest", 0.0)
+    assert (
+        await modclient.tdigest().max("tDigest")
+        == (await modclient.tdigest().quantile("tDigest", 1))[0]
+    )
+    assert (
+        await modclient.tdigest().min("tDigest")
+        == (await modclient.tdigest().quantile("tDigest", 0.0))[0]
+    )
+
+    assert 1.0 == round((await modclient.tdigest().quantile("tDigest", 0.01))[0], 2)
+    assert 99.0 == round((await modclient.tdigest().quantile("tDigest", 0.99))[0], 2)
 
-    assert 1.0 == round(await modclient.tdigest().quantile("tDigest", 0.01), 2)
-    assert 99.0 == round(await modclient.tdigest().quantile("tDigest", 0.99), 2)
+    # test multiple quantiles
+    assert await modclient.tdigest().create("t-digest", 100)
+    assert await modclient.tdigest().add("t-digest", [1, 2, 3, 4, 5])
+    res = await modclient.tdigest().quantile("t-digest", 0.5, 0.8)
+    assert [3.0, 5.0] == res
 
 
 @pytest.mark.redismod
@@ -380,9 +417,67 @@ async def test_tdigest_quantile(modclient: redis.Redis):
 async def test_tdigest_cdf(modclient: redis.Redis):
     assert await modclient.tdigest().create("tDigest", 100)
     # insert data-points into sketch
-    assert await modclient.tdigest().add("tDigest", list(range(1, 10)), [1.0] * 10)
-    assert 0.1 == round(await modclient.tdigest().cdf("tDigest", 1.0), 1)
-    assert 0.9 == round(await modclient.tdigest().cdf("tDigest", 9.0), 1)
+    assert await modclient.tdigest().add("tDigest", list(range(1, 10)))
+    assert 0.1 == round((await modclient.tdigest().cdf("tDigest", 1.0))[0], 1)
+    assert 0.9 == round((await modclient.tdigest().cdf("tDigest", 9.0))[0], 1)
+    res = await modclient.tdigest().cdf("tDigest", 1.0, 9.0)
+    assert [0.1, 0.9] == [round(x, 1) for x in res]
+
+
+@pytest.mark.redismod
+@pytest.mark.experimental
+@skip_ifmodversion_lt("2.4.0", "bf")
+async def test_tdigest_trimmed_mean(modclient: redis.Redis):
+    assert await modclient.tdigest().create("tDigest", 100)
+    # insert data-points into sketch
+    assert await modclient.tdigest().add("tDigest", list(range(1, 10)))
+    assert 5 == await modclient.tdigest().trimmed_mean("tDigest", 0.1, 0.9)
+    assert 4.5 == await modclient.tdigest().trimmed_mean("tDigest", 0.4, 0.5)
+
+
+@pytest.mark.redismod
+@pytest.mark.experimental
+async def test_tdigest_rank(modclient: redis.Redis):
+    assert await modclient.tdigest().create("t-digest", 500)
+    assert await modclient.tdigest().add("t-digest", list(range(0, 20)))
+    assert -1 == (await modclient.tdigest().rank("t-digest", -1))[0]
+    assert 0 == (await modclient.tdigest().rank("t-digest", 0))[0]
+    assert 10 == (await modclient.tdigest().rank("t-digest", 10))[0]
+    assert [-1, 20, 9] == await modclient.tdigest().rank("t-digest", -20, 20, 9)
+
+
+@pytest.mark.redismod
+@pytest.mark.experimental
+async def test_tdigest_revrank(modclient: redis.Redis):
+    assert await modclient.tdigest().create("t-digest", 500)
+    assert await modclient.tdigest().add("t-digest", list(range(0, 20)))
+    assert -1 == (await modclient.tdigest().revrank("t-digest", 20))[0]
+    assert 19 == (await modclient.tdigest().revrank("t-digest", 0))[0]
+    assert [-1, 19, 9] == await modclient.tdigest().revrank("t-digest", 21, 0, 10)
+
+
+@pytest.mark.redismod
+@pytest.mark.experimental
+async def test_tdigest_byrank(modclient: redis.Redis):
+    assert await modclient.tdigest().create("t-digest", 500)
+    assert await modclient.tdigest().add("t-digest", list(range(1, 11)))
+    assert 1 == (await modclient.tdigest().byrank("t-digest", 0))[0]
+    assert 10 == (await modclient.tdigest().byrank("t-digest", 9))[0]
+    assert (await modclient.tdigest().byrank("t-digest", 100))[0] == inf
+    with pytest.raises(redis.ResponseError):
+        (await modclient.tdigest().byrank("t-digest", -1))[0]
+
+
+@pytest.mark.redismod
+@pytest.mark.experimental
+async def test_tdigest_byrevrank(modclient: redis.Redis):
+    assert await modclient.tdigest().create("t-digest", 500)
+    assert await modclient.tdigest().add("t-digest", list(range(1, 11)))
+    assert 10 == (await modclient.tdigest().byrevrank("t-digest", 0))[0]
+    assert 1 == (await modclient.tdigest().byrevrank("t-digest", 9))[0]
+    assert (await modclient.tdigest().byrevrank("t-digest", 100))[0] == -inf
+    with pytest.raises(redis.ResponseError):
+        (await modclient.tdigest().byrevrank("t-digest", -1))[0]
 
 
 # @pytest.mark.redismod
diff --git a/tests/test_asyncio/test_cluster.py b/tests/test_asyncio/test_cluster.py
index f4ea5cd..13e5e26 100644
--- a/tests/test_asyncio/test_cluster.py
+++ b/tests/test_asyncio/test_cluster.py
@@ -1,24 +1,20 @@
 import asyncio
 import binascii
 import datetime
-import sys
+import os
 import warnings
-from typing import Any, Callable, Dict, List, Optional, Type, Union
+from typing import Any, Awaitable, Callable, Dict, List, Optional, Type, Union
+from urllib.parse import urlparse
 
 import pytest
+import pytest_asyncio
+from _pytest.fixtures import FixtureRequest
 
-from .compat import mock
-
-if sys.version_info[0:2] == (3, 6):
-    import pytest as pytest_asyncio
-else:
-    import pytest_asyncio
-
-from _pytest.fixtures import FixtureRequest, SubRequest
-
-from redis.asyncio import Connection, RedisCluster
-from redis.asyncio.cluster import ClusterNode, NodesManager
+from redis.asyncio.cluster import ClusterNode, NodesManager, RedisCluster
+from redis.asyncio.connection import Connection, SSLConnection
 from redis.asyncio.parser import CommandsParser
+from redis.asyncio.retry import Retry
+from redis.backoff import ExponentialBackoff, NoBackoff, default_backoff
 from redis.cluster import PIPELINE_BLOCKED_COMMANDS, PRIMARY, REPLICA, get_node_name
 from redis.crc import REDIS_CLUSTER_HASH_SLOTS, key_slot
 from redis.exceptions import (
@@ -26,6 +22,7 @@ from redis.exceptions import (
     ClusterDownError,
     ConnectionError,
     DataError,
+    MaxConnectionsError,
     MovedError,
     NoPermissionError,
     RedisClusterException,
@@ -39,7 +36,10 @@ from tests.conftest import (
     skip_unless_arch_bits,
 )
 
-pytestmark = pytest.mark.asyncio
+from .compat import mock
+
+pytestmark = pytest.mark.onlycluster
+
 
 default_host = "127.0.0.1"
 default_port = 7000
@@ -50,7 +50,7 @@ default_cluster_slots = [
 
 
 @pytest_asyncio.fixture()
-async def slowlog(request: SubRequest, r: RedisCluster) -> None:
+async def slowlog(r: RedisCluster) -> None:
     """
     Set the slowlog threshold to 0, and the
     max length to 128. This will force every
@@ -122,7 +122,7 @@ async def get_mocked_redis_client(*args, **kwargs) -> RedisCluster:
 def mock_node_resp(node: ClusterNode, response: Any) -> ClusterNode:
     connection = mock.AsyncMock()
     connection.is_connected = True
-    connection.read_response_without_lock.return_value = response
+    connection.read_response.return_value = response
     while node._free:
         node._free.pop()
     node._free.append(connection)
@@ -132,7 +132,7 @@ def mock_node_resp(node: ClusterNode, response: Any) -> ClusterNode:
 def mock_node_resp_exc(node: ClusterNode, exc: Exception) -> ClusterNode:
     connection = mock.AsyncMock()
     connection.is_connected = True
-    connection.read_response_without_lock.side_effect = exc
+    connection.read_response.side_effect = exc
     while node._free:
         node._free.pop()
     node._free.append(connection)
@@ -146,7 +146,7 @@ def mock_all_nodes_resp(rc: RedisCluster, response: Any) -> RedisCluster:
 
 
 async def moved_redirection_helper(
-    request: FixtureRequest, create_redis: Callable, failover: bool = False
+    create_redis: Callable[..., RedisCluster], failover: bool = False
 ) -> None:
     """
     Test that the client handles MOVED response after a failover.
@@ -171,7 +171,7 @@ async def moved_redirection_helper(
     prev_primary = rc.nodes_manager.get_node_from_slot(slot)
     if failover:
         if len(rc.nodes_manager.slots_cache[slot]) < 2:
-            warnings.warn("Skipping this test since it requires to have a " "replica")
+            warnings.warn("Skipping this test since it requires to have a replica")
             return
         redirect_node = rc.nodes_manager.slots_cache[slot][1]
     else:
@@ -202,7 +202,6 @@ async def moved_redirection_helper(
             assert prev_primary.server_type == REPLICA
 
 
-@pytest.mark.onlycluster
 class TestRedisClusterObj:
     """
     Tests for the RedisCluster class
@@ -237,10 +236,88 @@ class TestRedisClusterObj:
 
         await cluster.close()
 
-        startup_nodes = [ClusterNode("127.0.0.1", 16379)]
-        async with RedisCluster(startup_nodes=startup_nodes) as rc:
+        startup_node = ClusterNode("127.0.0.1", 16379)
+        async with RedisCluster(startup_nodes=[startup_node], client_name="test") as rc:
             assert await rc.set("A", 1)
             assert await rc.get("A") == b"1"
+            assert all(
+                [
+                    name == "test"
+                    for name in (
+                        await rc.client_getname(target_nodes=rc.ALL_NODES)
+                    ).values()
+                ]
+            )
+
+    async def test_cluster_set_get_retry_object(self, request: FixtureRequest):
+        retry = Retry(NoBackoff(), 2)
+        url = request.config.getoption("--redis-url")
+        async with RedisCluster.from_url(url, retry=retry) as r:
+            assert r.get_retry()._retries == retry._retries
+            assert isinstance(r.get_retry()._backoff, NoBackoff)
+            for node in r.get_nodes():
+                n_retry = node.connection_kwargs.get("retry")
+                assert n_retry is not None
+                assert n_retry._retries == retry._retries
+                assert isinstance(n_retry._backoff, NoBackoff)
+            rand_cluster_node = r.get_random_node()
+            existing_conn = rand_cluster_node.acquire_connection()
+            # Change retry policy
+            new_retry = Retry(ExponentialBackoff(), 3)
+            r.set_retry(new_retry)
+            assert r.get_retry()._retries == new_retry._retries
+            assert isinstance(r.get_retry()._backoff, ExponentialBackoff)
+            for node in r.get_nodes():
+                n_retry = node.connection_kwargs.get("retry")
+                assert n_retry is not None
+                assert n_retry._retries == new_retry._retries
+                assert isinstance(n_retry._backoff, ExponentialBackoff)
+            assert existing_conn.retry._retries == new_retry._retries
+            new_conn = rand_cluster_node.acquire_connection()
+            assert new_conn.retry._retries == new_retry._retries
+
+    async def test_cluster_retry_object(self, request: FixtureRequest) -> None:
+        url = request.config.getoption("--redis-url")
+        async with RedisCluster.from_url(url) as rc_default:
+            # Test default retry
+            retry = rc_default.connection_kwargs.get("retry")
+            assert isinstance(retry, Retry)
+            assert retry._retries == 3
+            assert isinstance(retry._backoff, type(default_backoff()))
+            assert rc_default.get_node("127.0.0.1", 16379).connection_kwargs.get(
+                "retry"
+            ) == rc_default.get_node("127.0.0.1", 16380).connection_kwargs.get("retry")
+
+        retry = Retry(ExponentialBackoff(10, 5), 5)
+        async with RedisCluster.from_url(url, retry=retry) as rc_custom_retry:
+            # Test custom retry
+            assert (
+                rc_custom_retry.get_node("127.0.0.1", 16379).connection_kwargs.get(
+                    "retry"
+                )
+                == retry
+            )
+
+        async with RedisCluster.from_url(
+            url, connection_error_retry_attempts=0
+        ) as rc_no_retries:
+            # Test no connection retries
+            assert (
+                rc_no_retries.get_node("127.0.0.1", 16379).connection_kwargs.get(
+                    "retry"
+                )
+                is None
+            )
+
+        async with RedisCluster.from_url(
+            url, retry=Retry(NoBackoff(), 0)
+        ) as rc_no_retries:
+            assert (
+                rc_no_retries.get_node("127.0.0.1", 16379)
+                .connection_kwargs.get("retry")
+                ._retries
+                == 0
+            )
 
     async def test_empty_startup_nodes(self) -> None:
         """
@@ -250,21 +327,42 @@ class TestRedisClusterObj:
             RedisCluster(startup_nodes=[])
 
         assert str(ex.value).startswith(
-            "RedisCluster requires at least one node to discover the " "cluster"
+            "RedisCluster requires at least one node to discover the cluster"
         ), str_if_bytes(ex.value)
 
-    async def test_from_url(self, r: RedisCluster) -> None:
-        redis_url = f"redis://{default_host}:{default_port}/0"
-        with mock.patch.object(RedisCluster, "from_url") as from_url:
+    async def test_from_url(self, request: FixtureRequest) -> None:
+        url = request.config.getoption("--redis-url")
 
-            async def from_url_mocked(_url, **_kwargs):
-                return await get_mocked_redis_client(url=_url, **_kwargs)
+        async with RedisCluster.from_url(url) as rc:
+            await rc.set("a", 1)
+            await rc.get("a") == 1
 
-            from_url.side_effect = from_url_mocked
-            cluster = await RedisCluster.from_url(redis_url)
-        assert cluster.get_node(host=default_host, port=default_port) is not None
+        rc = RedisCluster.from_url("rediss://localhost:16379")
+        assert rc.connection_kwargs["connection_class"] is SSLConnection
 
-        await cluster.close()
+    async def test_max_connections(
+        self, create_redis: Callable[..., RedisCluster]
+    ) -> None:
+        rc = await create_redis(cls=RedisCluster, max_connections=10)
+        for node in rc.get_nodes():
+            assert node.max_connections == 10
+
+        with mock.patch.object(Connection, "read_response") as read_response:
+
+            async def read_response_mocked(*args: Any, **kwargs: Any) -> None:
+                await asyncio.sleep(10)
+
+            read_response.side_effect = read_response_mocked
+
+            with pytest.raises(MaxConnectionsError):
+                await asyncio.gather(
+                    *(
+                        rc.ping(target_nodes=RedisCluster.DEFAULT_NODE)
+                        for _ in range(11)
+                    )
+                )
+
+        await rc.close()
 
     async def test_execute_command_errors(self, r: RedisCluster) -> None:
         """
@@ -273,7 +371,7 @@ class TestRedisClusterObj:
         with pytest.raises(RedisClusterException) as ex:
             await r.execute_command("GET")
         assert str(ex.value).startswith(
-            "No way to dispatch this command to " "Redis Cluster. Missing key."
+            "No way to dispatch this command to Redis Cluster. Missing key."
         )
 
     async def test_execute_command_node_flag_primaries(self, r: RedisCluster) -> None:
@@ -286,10 +384,10 @@ class TestRedisClusterObj:
         assert await r.ping(target_nodes=RedisCluster.PRIMARIES) is True
         for primary in primaries:
             conn = primary._free.pop()
-            assert conn.read_response_without_lock.called is True
+            assert conn.read_response.called is True
         for replica in replicas:
             conn = replica._free.pop()
-            assert conn.read_response_without_lock.called is not True
+            assert conn.read_response.called is not True
 
     async def test_execute_command_node_flag_replicas(self, r: RedisCluster) -> None:
         """
@@ -303,10 +401,10 @@ class TestRedisClusterObj:
         assert await r.ping(target_nodes=RedisCluster.REPLICAS) is True
         for replica in replicas:
             conn = replica._free.pop()
-            assert conn.read_response_without_lock.called is True
+            assert conn.read_response.called is True
         for primary in primaries:
             conn = primary._free.pop()
-            assert conn.read_response_without_lock.called is not True
+            assert conn.read_response.called is not True
 
         await r.close()
 
@@ -318,7 +416,7 @@ class TestRedisClusterObj:
         assert await r.ping(target_nodes=RedisCluster.ALL_NODES) is True
         for node in r.get_nodes():
             conn = node._free.pop()
-            assert conn.read_response_without_lock.called is True
+            assert conn.read_response.called is True
 
     async def test_execute_command_node_flag_random(self, r: RedisCluster) -> None:
         """
@@ -329,7 +427,7 @@ class TestRedisClusterObj:
         called_count = 0
         for node in r.get_nodes():
             conn = node._free.pop()
-            if conn.read_response_without_lock.called is True:
+            if conn.read_response.called is True:
                 called_count += 1
         assert called_count == 1
 
@@ -342,7 +440,7 @@ class TestRedisClusterObj:
         mock_node_resp(def_node, "PONG")
         assert await r.ping() is True
         conn = def_node._free.pop()
-        assert conn.read_response_without_lock.called
+        assert conn.read_response.called
 
     async def test_ask_redirection(self, r: RedisCluster) -> None:
         """
@@ -373,23 +471,23 @@ class TestRedisClusterObj:
             assert await r.execute_command("SET", "foo", "bar") == "MOCK_OK"
 
     async def test_moved_redirection(
-        self, request: FixtureRequest, create_redis: Callable
+        self, create_redis: Callable[..., RedisCluster]
     ) -> None:
         """
         Test that the client handles MOVED response.
         """
-        await moved_redirection_helper(request, create_redis, failover=False)
+        await moved_redirection_helper(create_redis, failover=False)
 
     async def test_moved_redirection_after_failover(
-        self, request: FixtureRequest, create_redis: Callable
+        self, create_redis: Callable[..., RedisCluster]
     ) -> None:
         """
         Test that the client handles MOVED response after a failover.
         """
-        await moved_redirection_helper(request, create_redis, failover=True)
+        await moved_redirection_helper(create_redis, failover=True)
 
     async def test_refresh_using_specific_nodes(
-        self, request: FixtureRequest, create_redis: Callable
+        self, create_redis: Callable[..., RedisCluster]
     ) -> None:
         """
         Test making calls on specific nodes when the cluster has failed over to
@@ -407,7 +505,7 @@ class TestRedisClusterObj:
                     Connection,
                     send_packed_command=mock.DEFAULT,
                     connect=mock.DEFAULT,
-                    can_read=mock.DEFAULT,
+                    can_read_destructive=mock.DEFAULT,
                 ) as mocks:
                     # simulate 7006 as a failed node
                     def execute_command_mock(self, *args, **options):
@@ -447,7 +545,7 @@ class TestRedisClusterObj:
                     execute_command.successful_calls = 0
                     execute_command.failed_calls = 0
                     initialize.side_effect = initialize_mock
-                    mocks["can_read"].return_value = False
+                    mocks["can_read_destructive"].return_value = False
                     mocks["send_packed_command"].return_value = "MOCK_OK"
                     mocks["connect"].return_value = None
                     with mock.patch.object(
@@ -486,9 +584,9 @@ class TestRedisClusterObj:
         with mock.patch.multiple(
             Connection,
             send_command=mock.DEFAULT,
-            read_response_without_lock=mock.DEFAULT,
+            read_response=mock.DEFAULT,
             _connect=mock.DEFAULT,
-            can_read=mock.DEFAULT,
+            can_read_destructive=mock.DEFAULT,
             on_connect=mock.DEFAULT,
         ) as mocks:
             with mock.patch.object(
@@ -518,9 +616,9 @@ class TestRedisClusterObj:
                 # so we'll mock some of the Connection's functions to allow it
                 execute_command.side_effect = execute_command_mock_first
                 mocks["send_command"].return_value = True
-                mocks["read_response_without_lock"].return_value = "OK"
+                mocks["read_response"].return_value = "OK"
                 mocks["_connect"].return_value = True
-                mocks["can_read"].return_value = False
+                mocks["can_read_destructive"].return_value = False
                 mocks["on_connect"].return_value = True
 
                 # Create a cluster with reading from replications
@@ -690,8 +788,28 @@ class TestRedisClusterObj:
         )
         await rc.close()
 
+    def test_replace_cluster_node(self, r: RedisCluster) -> None:
+        prev_default_node = r.get_default_node()
+        r.replace_default_node()
+        assert r.get_default_node() != prev_default_node
+        r.replace_default_node(prev_default_node)
+        assert r.get_default_node() == prev_default_node
+
+    async def test_default_node_is_replaced_after_exception(self, r):
+        curr_default_node = r.get_default_node()
+        # CLUSTER NODES command is being executed on the default node
+        nodes = await r.cluster_nodes()
+        assert "myself" in nodes.get(curr_default_node.name).get("flags")
+        # Mock connection error for the default node
+        mock_node_resp_exc(curr_default_node, ConnectionError("error"))
+        # Test that the command succeed from a different node
+        nodes = await r.cluster_nodes()
+        assert "myself" not in nodes.get(curr_default_node.name).get("flags")
+        assert r.get_default_node() != curr_default_node
+        # Rollback to the old default node
+        r.replace_default_node(curr_default_node)
+
 
-@pytest.mark.onlycluster
 class TestClusterRedisCommands:
     """
     Tests for RedisCluster unique commands
@@ -777,6 +895,15 @@ class TestClusterRedisCommands:
         await asyncio.sleep(0.1)
         assert await r.unlink(*d.keys()) == 0
 
+    async def test_initialize_before_execute_multi_key_command(
+        self, request: FixtureRequest
+    ) -> None:
+        # Test for issue https://github.com/redis/redis-py/issues/2437
+        url = request.config.getoption("--redis-url")
+        r = RedisCluster.from_url(url)
+        assert 0 == await r.exists("a", "b", "c")
+        await r.close()
+
     @skip_if_redis_enterprise()
     async def test_cluster_myid(self, r: RedisCluster) -> None:
         node = r.get_random_node()
@@ -828,8 +955,8 @@ class TestClusterRedisCommands:
         node0 = r.get_node(default_host, 7000)
         node1 = r.get_node(default_host, 7001)
         assert await r.cluster_delslots(0, 8192) == [True, True]
-        assert node0._free.pop().read_response_without_lock.called
-        assert node1._free.pop().read_response_without_lock.called
+        assert node0._free.pop().read_response.called
+        assert node1._free.pop().read_response.called
 
         await r.close()
 
@@ -998,7 +1125,7 @@ class TestClusterRedisCommands:
         node = r.nodes_manager.get_node_from_slot(12182)
         mock_node_resp(node, "OK")
         assert await r.cluster_setslot_stable(12182) is True
-        assert node._free.pop().read_response_without_lock.called
+        assert node._free.pop().read_response.called
 
     @skip_if_redis_enterprise()
     async def test_cluster_replicas(self, r: RedisCluster) -> None:
@@ -1040,7 +1167,7 @@ class TestClusterRedisCommands:
         for res in all_replicas_results.values():
             assert res is True
         for replica in r.get_replicas():
-            assert replica._free.pop().read_response_without_lock.called
+            assert replica._free.pop().read_response.called
 
         await r.close()
 
@@ -1053,7 +1180,7 @@ class TestClusterRedisCommands:
         for res in all_replicas_results.values():
             assert res is True
         for replica in r.get_replicas():
-            assert replica._free.pop().read_response_without_lock.called
+            assert replica._free.pop().read_response.called
 
         await r.close()
 
@@ -1255,8 +1382,11 @@ class TestClusterRedisCommands:
         assert "addr" in info
 
     @skip_if_server_version_lt("2.6.9")
-    async def test_client_kill(self, r: RedisCluster, r2: RedisCluster) -> None:
+    async def test_client_kill(
+        self, r: RedisCluster, create_redis: Callable[..., RedisCluster]
+    ) -> None:
         node = r.get_primaries()[0]
+        r2 = await create_redis(cls=RedisCluster, flushdb=False)
         await r.client_setname("redis-py-c1", target_nodes="all")
         await r2.client_setname("redis-py-c2", target_nodes="all")
         clients = [
@@ -1277,6 +1407,7 @@ class TestClusterRedisCommands:
         ]
         assert len(clients) == 1
         assert clients[0].get("name") == "redis-py-c1"
+        await r2.close()
 
     @skip_if_server_version_lt("2.6.0")
     async def test_cluster_bitop_not_empty_string(self, r: RedisCluster) -> None:
@@ -1918,7 +2049,7 @@ class TestClusterRedisCommands:
     @skip_if_server_version_lt("6.0.0")
     @skip_if_redis_enterprise()
     async def test_acl_log(
-        self, r: RedisCluster, request: FixtureRequest, create_redis: Callable
+        self, r: RedisCluster, create_redis: Callable[..., RedisCluster]
     ) -> None:
         key = "{cache}:"
         node = r.get_node_from_key(key)
@@ -1963,7 +2094,6 @@ class TestClusterRedisCommands:
         await user_client.close()
 
 
-@pytest.mark.onlycluster
 class TestNodesManager:
     """
     Tests for the NodesManager class
@@ -2095,7 +2225,7 @@ class TestNodesManager:
         specified
         """
         with pytest.raises(RedisClusterException):
-            await NodesManager([]).initialize()
+            await NodesManager([], False, {}).initialize()
 
     async def test_wrong_startup_nodes_type(self) -> None:
         """
@@ -2103,11 +2233,9 @@ class TestNodesManager:
         fail
         """
         with pytest.raises(RedisClusterException):
-            await NodesManager({}).initialize()
+            await NodesManager({}, False, {}).initialize()
 
-    async def test_init_slots_cache_slots_collision(
-        self, request: FixtureRequest
-    ) -> None:
+    async def test_init_slots_cache_slots_collision(self) -> None:
         """
         Test that if 2 nodes do not agree on the same slots setup it should
         raise an error. In this test both nodes will say that the first
@@ -2236,7 +2364,6 @@ class TestNodesManager:
                     assert rc.get_node(host=default_host, port=7002) is not None
 
 
-@pytest.mark.onlycluster
 class TestClusterPipeline:
     """Tests for the ClusterPipeline class."""
 
@@ -2416,8 +2543,8 @@ class TestClusterPipeline:
             mock_node_resp_exc(first_node, AskError(ask_msg))
             mock_node_resp(ask_node, "MOCK_OK")
             res = await pipe.get(key).execute()
-            assert first_node._free.pop().read_response_without_lock.await_count
-            assert ask_node._free.pop().read_response_without_lock.await_count
+            assert first_node._free.pop().read_response.await_count
+            assert ask_node._free.pop().read_response.await_count
             assert res == ["MOCK_OK"]
 
     async def test_moved_redirection_on_slave_with_default(
@@ -2472,7 +2599,147 @@ class TestClusterPipeline:
             executed_on_replica = False
             for node in slot_nodes:
                 if node.server_type == REPLICA:
-                    if node._free.pop().read_response_without_lock.await_count:
+                    if node._free.pop().read_response.await_count:
                         executed_on_replica = True
                         break
             assert executed_on_replica
+
+    async def test_can_run_concurrent_pipelines(self, r: RedisCluster) -> None:
+        """Test that the pipeline can be used concurrently."""
+        await asyncio.gather(
+            *(self.test_redis_cluster_pipeline(r) for i in range(100)),
+            *(self.test_multi_key_operation_with_a_single_slot(r) for i in range(100)),
+            *(self.test_multi_key_operation_with_multi_slots(r) for i in range(100)),
+        )
+
+    @pytest.mark.onlycluster
+    async def test_pipeline_with_default_node_error_command(self, create_redis):
+        """
+        Test that the default node is being replaced when it raises a relevant exception
+        """
+        r = await create_redis(cls=RedisCluster, flushdb=False)
+        curr_default_node = r.get_default_node()
+        err = ConnectionError("error")
+        cmd_count = await r.command_count()
+        mock_node_resp_exc(curr_default_node, err)
+        async with r.pipeline(transaction=False) as pipe:
+            pipe.command_count()
+            result = await pipe.execute(raise_on_error=False)
+            assert result[0] == err
+            assert r.get_default_node() != curr_default_node
+            pipe.command_count()
+            result = await pipe.execute(raise_on_error=False)
+            assert result[0] == cmd_count
+
+
+@pytest.mark.ssl
+class TestSSL:
+    """
+    Tests for SSL connections.
+
+    This relies on the --redis-ssl-url for building the client and connecting to the
+    appropriate port.
+    """
+
+    ROOT = os.path.join(os.path.dirname(__file__), "../..")
+    CERT_DIR = os.path.abspath(os.path.join(ROOT, "docker", "stunnel", "keys"))
+    if not os.path.isdir(CERT_DIR):  # github actions package validation case
+        CERT_DIR = os.path.abspath(
+            os.path.join(ROOT, "..", "docker", "stunnel", "keys")
+        )
+        if not os.path.isdir(CERT_DIR):
+            raise IOError(f"No SSL certificates found. They should be in {CERT_DIR}")
+
+    SERVER_CERT = os.path.join(CERT_DIR, "server-cert.pem")
+    SERVER_KEY = os.path.join(CERT_DIR, "server-key.pem")
+
+    @pytest_asyncio.fixture()
+    def create_client(self, request: FixtureRequest) -> Callable[..., RedisCluster]:
+        ssl_url = request.config.option.redis_ssl_url
+        ssl_host, ssl_port = urlparse(ssl_url)[1].split(":")
+
+        async def _create_client(mocked: bool = True, **kwargs: Any) -> RedisCluster:
+            if mocked:
+                with mock.patch.object(
+                    ClusterNode, "execute_command", autospec=True
+                ) as execute_command_mock:
+
+                    async def execute_command(self, *args, **kwargs):
+                        if args[0] == "INFO":
+                            return {"cluster_enabled": True}
+                        if args[0] == "CLUSTER SLOTS":
+                            return [[0, 16383, [ssl_host, ssl_port, "ssl_node"]]]
+                        if args[0] == "COMMAND":
+                            return {
+                                "ping": {
+                                    "name": "ping",
+                                    "arity": -1,
+                                    "flags": ["stale", "fast"],
+                                    "first_key_pos": 0,
+                                    "last_key_pos": 0,
+                                    "step_count": 0,
+                                }
+                            }
+                        raise NotImplementedError()
+
+                    execute_command_mock.side_effect = execute_command
+
+                    rc = await RedisCluster(host=ssl_host, port=ssl_port, **kwargs)
+
+                assert len(rc.get_nodes()) == 1
+                node = rc.get_default_node()
+                assert node.port == int(ssl_port)
+                return rc
+
+            return await RedisCluster(host=ssl_host, port=ssl_port, **kwargs)
+
+        return _create_client
+
+    async def test_ssl_connection_without_ssl(
+        self, create_client: Callable[..., Awaitable[RedisCluster]]
+    ) -> None:
+        with pytest.raises(RedisClusterException) as e:
+            await create_client(mocked=False, ssl=False)
+        e = e.value.__cause__
+        assert "Connection closed by server" in str(e)
+
+    async def test_ssl_with_invalid_cert(
+        self, create_client: Callable[..., Awaitable[RedisCluster]]
+    ) -> None:
+        with pytest.raises(RedisClusterException) as e:
+            await create_client(mocked=False, ssl=True)
+        e = e.value.__cause__.__context__
+        assert "SSL: CERTIFICATE_VERIFY_FAILED" in str(e)
+
+    async def test_ssl_connection(
+        self, create_client: Callable[..., Awaitable[RedisCluster]]
+    ) -> None:
+        async with await create_client(ssl=True, ssl_cert_reqs="none") as rc:
+            assert await rc.ping()
+
+    async def test_validating_self_signed_certificate(
+        self, create_client: Callable[..., Awaitable[RedisCluster]]
+    ) -> None:
+        async with await create_client(
+            ssl=True,
+            ssl_ca_certs=self.SERVER_CERT,
+            ssl_cert_reqs="required",
+            ssl_certfile=self.SERVER_CERT,
+            ssl_keyfile=self.SERVER_KEY,
+        ) as rc:
+            assert await rc.ping()
+
+    async def test_validating_self_signed_string_certificate(
+        self, create_client: Callable[..., Awaitable[RedisCluster]]
+    ) -> None:
+        with open(self.SERVER_CERT) as f:
+            cert_data = f.read()
+
+        async with await create_client(
+            ssl=True,
+            ssl_ca_data=cert_data,
+            ssl_cert_reqs="required",
+            ssl_certfile=self.SERVER_CERT,
+            ssl_keyfile=self.SERVER_KEY,
+        ) as rc:
+            assert await rc.ping()
diff --git a/tests/test_asyncio/test_commands.py b/tests/test_asyncio/test_commands.py
index e128ac4..7c6fd45 100644
--- a/tests/test_asyncio/test_commands.py
+++ b/tests/test_asyncio/test_commands.py
@@ -4,20 +4,14 @@ Tests async overrides of commands from their mixins
 import binascii
 import datetime
 import re
-import sys
-import time
 from string import ascii_letters
 
 import pytest
-
-if sys.version_info[0:2] == (3, 6):
-    import pytest as pytest_asyncio
-else:
-    import pytest_asyncio
+import pytest_asyncio
 
 import redis
 from redis import exceptions
-from redis.client import parse_info
+from redis.client import EMPTY_RESPONSE, NEVER_DECODE, parse_info
 from tests.conftest import (
     skip_if_server_version_gte,
     skip_if_server_version_lt,
@@ -27,11 +21,24 @@ from tests.conftest import (
 REDIS_6_VERSION = "5.9.0"
 
 
-pytestmark = pytest.mark.asyncio
+@pytest_asyncio.fixture()
+async def r_teardown(r: redis.Redis):
+    """
+    A special fixture which removes the provided names from the database after use
+    """
+    usernames = []
+
+    def factory(username):
+        usernames.append(username)
+        return r
+
+    yield factory
+    for username in usernames:
+        await r.acl_deluser(username)
 
 
 @pytest_asyncio.fixture()
-async def slowlog(r: redis.Redis, event_loop):
+async def slowlog(r: redis.Redis):
     current_config = await r.config_get()
     old_slower_than_value = current_config["slowlog-log-slower-than"]
     old_max_legnth_value = current_config["slowlog-max-len"]
@@ -94,17 +101,9 @@ class TestRedisCommands:
         assert "get" in commands
 
     @skip_if_server_version_lt(REDIS_6_VERSION)
-    async def test_acl_deluser(self, r: redis.Redis, request, event_loop):
+    async def test_acl_deluser(self, r_teardown):
         username = "redis-py-user"
-
-        def teardown():
-            coro = r.acl_deluser(username)
-            if event_loop.is_running():
-                event_loop.create_task(coro)
-            else:
-                event_loop.run_until_complete(coro)
-
-        request.addfinalizer(teardown)
+        r = r_teardown(username)
 
         assert await r.acl_deluser(username) == 0
         assert await r.acl_setuser(username, enabled=False, reset=True)
@@ -117,18 +116,9 @@ class TestRedisCommands:
 
     @skip_if_server_version_lt(REDIS_6_VERSION)
     @skip_if_server_version_gte("7.0.0")
-    async def test_acl_getuser_setuser(self, r: redis.Redis, request, event_loop):
+    async def test_acl_getuser_setuser(self, r_teardown):
         username = "redis-py-user"
-
-        def teardown():
-            coro = r.acl_deluser(username)
-            if event_loop.is_running():
-                event_loop.create_task(coro)
-            else:
-                event_loop.run_until_complete(coro)
-
-        request.addfinalizer(teardown)
-
+        r = r_teardown(username)
         # test enabled=False
         assert await r.acl_setuser(username, enabled=False, reset=True)
         assert await r.acl_getuser(username) == {
@@ -209,7 +199,7 @@ class TestRedisCommands:
 
         # Resets and tests that hashed passwords are set properly.
         hashed_password = (
-            "5e884898da28047151d0e56f8dc629" "2773603d0d6aabbdd62a11ef721d1542d8"
+            "5e884898da28047151d0e56f8dc6292773603d0d6aabbdd62a11ef721d1542d8"
         )
         assert await r.acl_setuser(
             username, enabled=True, reset=True, hashed_passwords=["+" + hashed_password]
@@ -233,17 +223,9 @@ class TestRedisCommands:
 
     @skip_if_server_version_lt(REDIS_6_VERSION)
     @skip_if_server_version_gte("7.0.0")
-    async def test_acl_list(self, r: redis.Redis, request, event_loop):
+    async def test_acl_list(self, r_teardown):
         username = "redis-py-user"
-
-        def teardown():
-            coro = r.acl_deluser(username)
-            if event_loop.is_running():
-                event_loop.create_task(coro)
-            else:
-                event_loop.run_until_complete(coro)
-
-        request.addfinalizer(teardown)
+        r = r_teardown(username)
 
         assert await r.acl_setuser(username, enabled=False, reset=True)
         users = await r.acl_list()
@@ -251,17 +233,9 @@ class TestRedisCommands:
 
     @skip_if_server_version_lt(REDIS_6_VERSION)
     @pytest.mark.onlynoncluster
-    async def test_acl_log(self, r: redis.Redis, request, event_loop, create_redis):
+    async def test_acl_log(self, r_teardown, create_redis):
         username = "redis-py-user"
-
-        def teardown():
-            coro = r.acl_deluser(username)
-            if event_loop.is_running():
-                event_loop.create_task(coro)
-            else:
-                event_loop.run_until_complete(coro)
-
-        request.addfinalizer(teardown)
+        r = r_teardown(username)
         await r.acl_setuser(
             username,
             enabled=True,
@@ -294,55 +268,25 @@ class TestRedisCommands:
         assert await r.acl_log_reset()
 
     @skip_if_server_version_lt(REDIS_6_VERSION)
-    async def test_acl_setuser_categories_without_prefix_fails(
-        self, r: redis.Redis, request, event_loop
-    ):
+    async def test_acl_setuser_categories_without_prefix_fails(self, r_teardown):
         username = "redis-py-user"
-
-        def teardown():
-            coro = r.acl_deluser(username)
-            if event_loop.is_running():
-                event_loop.create_task(coro)
-            else:
-                event_loop.run_until_complete(coro)
-
-        request.addfinalizer(teardown)
+        r = r_teardown(username)
 
         with pytest.raises(exceptions.DataError):
             await r.acl_setuser(username, categories=["list"])
 
     @skip_if_server_version_lt(REDIS_6_VERSION)
-    async def test_acl_setuser_commands_without_prefix_fails(
-        self, r: redis.Redis, request, event_loop
-    ):
+    async def test_acl_setuser_commands_without_prefix_fails(self, r_teardown):
         username = "redis-py-user"
-
-        def teardown():
-            coro = r.acl_deluser(username)
-            if event_loop.is_running():
-                event_loop.create_task(coro)
-            else:
-                event_loop.run_until_complete(coro)
-
-        request.addfinalizer(teardown)
+        r = r_teardown(username)
 
         with pytest.raises(exceptions.DataError):
             await r.acl_setuser(username, commands=["get"])
 
     @skip_if_server_version_lt(REDIS_6_VERSION)
-    async def test_acl_setuser_add_passwords_and_nopass_fails(
-        self, r: redis.Redis, request, event_loop
-    ):
+    async def test_acl_setuser_add_passwords_and_nopass_fails(self, r_teardown):
         username = "redis-py-user"
-
-        def teardown():
-            coro = r.acl_deluser(username)
-            if event_loop.is_running():
-                event_loop.create_task(coro)
-            else:
-                event_loop.run_until_complete(coro)
-
-        request.addfinalizer(teardown)
+        r = r_teardown(username)
 
         with pytest.raises(exceptions.DataError):
             await r.acl_setuser(username, passwords="+mypass", nopass=True)
@@ -598,6 +542,16 @@ class TestRedisCommands:
         assert isinstance(t[0], int)
         assert isinstance(t[1], int)
 
+    async def test_never_decode_option(self, r: redis.Redis):
+        opts = {NEVER_DECODE: []}
+        await r.delete("a")
+        assert await r.execute_command("EXISTS", "a", **opts) == 0
+
+    async def test_empty_response_option(self, r: redis.Redis):
+        opts = {EMPTY_RESPONSE: []}
+        await r.delete("a")
+        assert await r.execute_command("EXISTS", "a", **opts) == 0
+
     # BASIC KEY COMMANDS
     async def test_append(self, r: redis.Redis):
         assert await r.append("a", "a1") == 2
@@ -805,7 +759,7 @@ class TestRedisCommands:
     async def test_expireat_unixtime(self, r: redis.Redis):
         expire_at = await redis_server_time(r) + datetime.timedelta(minutes=1)
         await r.set("a", "foo")
-        expire_at_seconds = int(time.mktime(expire_at.timetuple()))
+        expire_at_seconds = int(expire_at.timestamp())
         assert await r.expireat("a", expire_at_seconds)
         assert 0 < await r.ttl("a") <= 61
 
@@ -930,8 +884,8 @@ class TestRedisCommands:
     async def test_pexpireat_unixtime(self, r: redis.Redis):
         expire_at = await redis_server_time(r) + datetime.timedelta(minutes=1)
         await r.set("a", "foo")
-        expire_at_seconds = int(time.mktime(expire_at.timetuple())) * 1000
-        assert await r.pexpireat("a", expire_at_seconds)
+        expire_at_milliseconds = int(expire_at.timestamp() * 1000)
+        assert await r.pexpireat("a", expire_at_milliseconds)
         assert 0 < await r.pttl("a") <= 61000
 
     @skip_if_server_version_lt("2.6.0")
@@ -3009,6 +2963,19 @@ class TestRedisCommands:
         )
         assert resp == [0, None, 255]
 
+    @skip_if_server_version_lt("6.0.0")
+    async def test_bitfield_ro(self, r: redis.Redis):
+        bf = r.bitfield("a")
+        resp = await bf.set("u8", 8, 255).execute()
+        assert resp == [0]
+
+        resp = await r.bitfield_ro("a", "u8", 0)
+        assert resp == [0]
+
+        items = [("u4", 8), ("u4", 12), ("u4", 13)]
+        resp = await r.bitfield_ro("a", "u8", 0, items)
+        assert resp == [0, 15, 15, 14]
+
     @skip_if_server_version_lt("4.0.0")
     async def test_memory_stats(self, r: redis.Redis):
         # put a key into the current db to make sure that "db.<current-db>"
@@ -3029,7 +2996,8 @@ class TestRedisCommands:
     @pytest.mark.onlynoncluster
     async def test_module_list(self, r: redis.Redis):
         assert isinstance(await r.module_list(), list)
-        assert not await r.module_list()
+        for x in await r.module_list():
+            assert isinstance(x, dict)
 
 
 @pytest.mark.onlynoncluster
diff --git a/tests/test_asyncio/test_connection.py b/tests/test_asyncio/test_connection.py
index f6259ad..1851ca9 100644
--- a/tests/test_asyncio/test_connection.py
+++ b/tests/test_asyncio/test_connection.py
@@ -1,31 +1,91 @@
 import asyncio
+import socket
 import types
+from unittest.mock import patch
 
 import pytest
 
-from redis.asyncio.connection import PythonParser, UnixDomainSocketConnection
-from redis.exceptions import InvalidResponse
+import redis
+from redis.asyncio import Redis
+from redis.asyncio.connection import (
+    BaseParser,
+    Connection,
+    HiredisParser,
+    PythonParser,
+    UnixDomainSocketConnection,
+)
+from redis.asyncio.retry import Retry
+from redis.backoff import NoBackoff
+from redis.exceptions import ConnectionError, InvalidResponse, TimeoutError
 from redis.utils import HIREDIS_AVAILABLE
 from tests.conftest import skip_if_server_version_lt
 
 from .compat import mock
-
-pytestmark = pytest.mark.asyncio
+from .mocks import MockStream
 
 
 @pytest.mark.onlynoncluster
-@pytest.mark.skipif(HIREDIS_AVAILABLE, reason="PythonParser only")
 async def test_invalid_response(create_redis):
     r = await create_redis(single_connection_client=True)
 
     raw = b"x"
-    readline_mock = mock.AsyncMock(return_value=raw)
+    fake_stream = MockStream(raw + b"\r\n")
 
-    parser: "PythonParser" = r.connection._parser
-    with mock.patch.object(parser._buffer, "readline", readline_mock):
+    parser: BaseParser = r.connection._parser
+    with mock.patch.object(parser, "_stream", fake_stream):
         with pytest.raises(InvalidResponse) as cm:
             await parser.read_response()
-    assert str(cm.value) == f"Protocol Error: {raw!r}"
+    if isinstance(parser, PythonParser):
+        assert str(cm.value) == f"Protocol Error: {raw!r}"
+    else:
+        assert (
+            str(cm.value) == f'Protocol error, got "{raw.decode()}" as reply type byte'
+        )
+    await r.connection.disconnect()
+
+
+@pytest.mark.onlynoncluster
+async def test_single_connection():
+    """Test that concurrent requests on a single client are synchronised."""
+    r = Redis(single_connection_client=True)
+
+    init_call_count = 0
+    command_call_count = 0
+    in_use = False
+
+    class Retry_:
+        async def call_with_retry(self, _, __):
+            # If we remove the single-client lock, this error gets raised as two
+            # coroutines will be vying for the `in_use` flag due to the two
+            # asymmetric sleep calls
+            nonlocal command_call_count
+            nonlocal in_use
+            if in_use is True:
+                raise ValueError("Commands should be executed one at a time.")
+            in_use = True
+            await asyncio.sleep(0.01)
+            command_call_count += 1
+            await asyncio.sleep(0.03)
+            in_use = False
+            return "foo"
+
+    mock_conn = mock.MagicMock()
+    mock_conn.retry = Retry_()
+
+    async def get_conn(_):
+        # Validate only one client is created in single-client mode when
+        # concurrent requests are made
+        nonlocal init_call_count
+        await asyncio.sleep(0.01)
+        init_call_count += 1
+        return mock_conn
+
+    with mock.patch.object(r.connection_pool, "get_connection", get_conn):
+        with mock.patch.object(r.connection_pool, "release"):
+            await asyncio.gather(r.set("a", "b"), r.set("c", "d"))
+
+    assert init_call_count == 1
+    assert command_call_count == 2
 
 
 @skip_if_server_version_lt("4.0.0")
@@ -58,5 +118,158 @@ async def test_socket_param_regression(r):
 
 
 async def test_can_run_concurrent_commands(r):
+    if getattr(r, "connection", None) is not None:
+        # Concurrent commands are only supported on pooled or cluster connections
+        # since there is no synchronization on a single connection.
+        pytest.skip("pool only")
     assert await r.ping() is True
     assert all(await asyncio.gather(*(r.ping() for _ in range(10))))
+
+
+async def test_connect_retry_on_timeout_error():
+    """Test that the _connect function is retried in case of a timeout"""
+    conn = Connection(retry_on_timeout=True, retry=Retry(NoBackoff(), 3))
+    origin_connect = conn._connect
+    conn._connect = mock.AsyncMock()
+
+    async def mock_connect():
+        # connect only on the last retry
+        if conn._connect.call_count <= 2:
+            raise socket.timeout
+        else:
+            return await origin_connect()
+
+    conn._connect.side_effect = mock_connect
+    await conn.connect()
+    assert conn._connect.call_count == 3
+
+
+async def test_connect_without_retry_on_os_error():
+    """Test that the _connect function is not being retried in case of a OSError"""
+    with patch.object(Connection, "_connect") as _connect:
+        _connect.side_effect = OSError("")
+        conn = Connection(retry_on_timeout=True, retry=Retry(NoBackoff(), 2))
+        with pytest.raises(ConnectionError):
+            await conn.connect()
+        assert _connect.call_count == 1
+
+
+async def test_connect_timeout_error_without_retry():
+    """Test that the _connect function is not being retried if retry_on_timeout is
+    set to False"""
+    conn = Connection(retry_on_timeout=False)
+    conn._connect = mock.AsyncMock()
+    conn._connect.side_effect = socket.timeout
+
+    with pytest.raises(TimeoutError) as e:
+        await conn.connect()
+    assert conn._connect.call_count == 1
+    assert str(e.value) == "Timeout connecting to server"
+
+
+@pytest.mark.onlynoncluster
+async def test_connection_parse_response_resume(r: redis.Redis):
+    """
+    This test verifies that the Connection parser,
+    be that PythonParser or HiredisParser,
+    can be interrupted at IO time and then resume parsing.
+    """
+    conn = Connection(**r.connection_pool.connection_kwargs)
+    await conn.connect()
+    message = (
+        b"*3\r\n$7\r\nmessage\r\n$8\r\nchannel1\r\n"
+        b"$25\r\nhi\r\nthere\r\n+how\r\nare\r\nyou\r\n"
+    )
+
+    conn._parser._stream = MockStream(message, interrupt_every=2)
+    for i in range(100):
+        try:
+            response = await conn.read_response()
+            break
+        except MockStream.TestError:
+            pass
+
+    else:
+        pytest.fail("didn't receive a response")
+    assert response
+    assert i > 0
+
+
+@pytest.mark.onlynoncluster
+@pytest.mark.parametrize(
+    "parser_class", [PythonParser, HiredisParser], ids=["PythonParser", "HiredisParser"]
+)
+async def test_connection_disconect_race(parser_class):
+    """
+    This test reproduces the case in issue #2349
+    where a connection is closed while the parser is reading to feed the
+    internal buffer.The stream `read()` will succeed, but when it returns,
+    another task has already called `disconnect()` and is waiting for
+    close to finish.  When we attempts to feed the buffer, we will fail
+    since the buffer is no longer there.
+
+    This test verifies that a read in progress can finish even
+    if the `disconnect()` method is called.
+    """
+    if parser_class == PythonParser:
+        pytest.xfail("doesn't work yet with PythonParser")
+    if parser_class == HiredisParser and not HIREDIS_AVAILABLE:
+        pytest.skip("Hiredis not available")
+
+    args = {}
+    args["parser_class"] = parser_class
+
+    conn = Connection(**args)
+
+    cond = asyncio.Condition()
+    # 0 == initial
+    # 1 == reader is reading
+    # 2 == closer has closed and is waiting for close to finish
+    state = 0
+
+    # Mock read function, which wait for a close to happen before returning
+    # Can either be invoked as two `read()` calls (HiredisParser)
+    # or as a `readline()` followed by `readexact()` (PythonParser)
+    chunks = [b"$13\r\n", b"Hello, World!\r\n"]
+
+    async def read(_=None):
+        nonlocal state
+        async with cond:
+            if state == 0:
+                state = 1  # we are reading
+                cond.notify()
+                # wait until the closing task has done
+                await cond.wait_for(lambda: state == 2)
+        return chunks.pop(0)
+
+    # function closes the connection while reader is still blocked reading
+    async def do_close():
+        nonlocal state
+        async with cond:
+            await cond.wait_for(lambda: state == 1)
+            state = 2
+            cond.notify()
+        await conn.disconnect()
+
+    async def do_read():
+        return await conn.read_response()
+
+    reader = mock.AsyncMock()
+    writer = mock.AsyncMock()
+    writer.transport = mock.Mock()
+    writer.transport.get_extra_info.side_effect = None
+
+    # for HiredisParser
+    reader.read.side_effect = read
+    # for PythonParser
+    reader.readline.side_effect = read
+    reader.readexactly.side_effect = read
+
+    async def open_connection(*args, **kwargs):
+        return reader, writer
+
+    with patch.object(asyncio, "open_connection", open_connection):
+        await conn.connect()
+
+    vals = await asyncio.gather(do_read(), do_close())
+    assert vals == [b"Hello, World!", None]
diff --git a/tests/test_asyncio/test_connection_pool.py b/tests/test_asyncio/test_connection_pool.py
index 6c56558..d1e52bd 100644
--- a/tests/test_asyncio/test_connection_pool.py
+++ b/tests/test_asyncio/test_connection_pool.py
@@ -1,24 +1,18 @@
 import asyncio
 import os
 import re
-import sys
 
 import pytest
-
-if sys.version_info[0:2] == (3, 6):
-    import pytest as pytest_asyncio
-else:
-    import pytest_asyncio
+import pytest_asyncio
 
 import redis.asyncio as redis
 from redis.asyncio.connection import Connection, to_bool
 from tests.conftest import skip_if_redis_enterprise, skip_if_server_version_lt
 
 from .compat import mock
+from .conftest import asynccontextmanager
 from .test_pubsub import wait_for_message
 
-pytestmark = pytest.mark.asyncio
-
 
 @pytest.mark.onlynoncluster
 class TestRedisAutoReleaseConnectionPool:
@@ -109,12 +103,13 @@ class DummyConnection(Connection):
     async def disconnect(self):
         pass
 
-    async def can_read(self, timeout: float = 0):
+    async def can_read_destructive(self, timeout: float = 0):
         return False
 
 
 class TestConnectionPool:
-    def get_pool(
+    @asynccontextmanager
+    async def get_pool(
         self,
         connection_kwargs=None,
         max_connections=None,
@@ -126,71 +121,77 @@ class TestConnectionPool:
             max_connections=max_connections,
             **connection_kwargs,
         )
-        return pool
+        try:
+            yield pool
+        finally:
+            await pool.disconnect(inuse_connections=True)
 
     async def test_connection_creation(self):
         connection_kwargs = {"foo": "bar", "biz": "baz"}
-        pool = self.get_pool(
+        async with self.get_pool(
             connection_kwargs=connection_kwargs, connection_class=DummyConnection
-        )
-        connection = await pool.get_connection("_")
-        assert isinstance(connection, DummyConnection)
-        assert connection.kwargs == connection_kwargs
+        ) as pool:
+            connection = await pool.get_connection("_")
+            assert isinstance(connection, DummyConnection)
+            assert connection.kwargs == connection_kwargs
 
     async def test_multiple_connections(self, master_host):
         connection_kwargs = {"host": master_host}
-        pool = self.get_pool(connection_kwargs=connection_kwargs)
-        c1 = await pool.get_connection("_")
-        c2 = await pool.get_connection("_")
-        assert c1 != c2
+        async with self.get_pool(connection_kwargs=connection_kwargs) as pool:
+            c1 = await pool.get_connection("_")
+            c2 = await pool.get_connection("_")
+            assert c1 != c2
 
     async def test_max_connections(self, master_host):
         connection_kwargs = {"host": master_host}
-        pool = self.get_pool(max_connections=2, connection_kwargs=connection_kwargs)
-        await pool.get_connection("_")
-        await pool.get_connection("_")
-        with pytest.raises(redis.ConnectionError):
+        async with self.get_pool(
+            max_connections=2, connection_kwargs=connection_kwargs
+        ) as pool:
+            await pool.get_connection("_")
             await pool.get_connection("_")
+            with pytest.raises(redis.ConnectionError):
+                await pool.get_connection("_")
 
     async def test_reuse_previously_released_connection(self, master_host):
         connection_kwargs = {"host": master_host}
-        pool = self.get_pool(connection_kwargs=connection_kwargs)
-        c1 = await pool.get_connection("_")
-        await pool.release(c1)
-        c2 = await pool.get_connection("_")
-        assert c1 == c2
+        async with self.get_pool(connection_kwargs=connection_kwargs) as pool:
+            c1 = await pool.get_connection("_")
+            await pool.release(c1)
+            c2 = await pool.get_connection("_")
+            assert c1 == c2
 
-    def test_repr_contains_db_info_tcp(self):
+    async def test_repr_contains_db_info_tcp(self):
         connection_kwargs = {
             "host": "localhost",
             "port": 6379,
             "db": 1,
             "client_name": "test-client",
         }
-        pool = self.get_pool(
+        async with self.get_pool(
             connection_kwargs=connection_kwargs, connection_class=redis.Connection
-        )
-        expected = (
-            "ConnectionPool<Connection<"
-            "host=localhost,port=6379,db=1,client_name=test-client>>"
-        )
-        assert repr(pool) == expected
+        ) as pool:
+            expected = (
+                "ConnectionPool<Connection<"
+                "host=localhost,port=6379,db=1,client_name=test-client>>"
+            )
+            assert repr(pool) == expected
 
-    def test_repr_contains_db_info_unix(self):
+    async def test_repr_contains_db_info_unix(self):
         connection_kwargs = {"path": "/abc", "db": 1, "client_name": "test-client"}
-        pool = self.get_pool(
+        async with self.get_pool(
             connection_kwargs=connection_kwargs,
             connection_class=redis.UnixDomainSocketConnection,
-        )
-        expected = (
-            "ConnectionPool<UnixDomainSocketConnection<"
-            "path=/abc,db=1,client_name=test-client>>"
-        )
-        assert repr(pool) == expected
+        ) as pool:
+            expected = (
+                "ConnectionPool<UnixDomainSocketConnection<"
+                "path=/abc,db=1,client_name=test-client>>"
+            )
+            assert repr(pool) == expected
 
 
 class TestBlockingConnectionPool:
-    def get_pool(self, connection_kwargs=None, max_connections=10, timeout=20):
+    @asynccontextmanager
+    async def get_pool(self, connection_kwargs=None, max_connections=10, timeout=20):
         connection_kwargs = connection_kwargs or {}
         pool = redis.BlockingConnectionPool(
             connection_class=DummyConnection,
@@ -198,7 +199,10 @@ class TestBlockingConnectionPool:
             timeout=timeout,
             **connection_kwargs,
         )
-        return pool
+        try:
+            yield pool
+        finally:
+            await pool.disconnect(inuse_connections=True)
 
     async def test_connection_creation(self, master_host):
         connection_kwargs = {
@@ -207,10 +211,10 @@ class TestBlockingConnectionPool:
             "host": master_host[0],
             "port": master_host[1],
         }
-        pool = self.get_pool(connection_kwargs=connection_kwargs)
-        connection = await pool.get_connection("_")
-        assert isinstance(connection, DummyConnection)
-        assert connection.kwargs == connection_kwargs
+        async with self.get_pool(connection_kwargs=connection_kwargs) as pool:
+            connection = await pool.get_connection("_")
+            assert isinstance(connection, DummyConnection)
+            assert connection.kwargs == connection_kwargs
 
     async def test_disconnect(self, master_host):
         """A regression test for #1047"""
@@ -220,30 +224,31 @@ class TestBlockingConnectionPool:
             "host": master_host[0],
             "port": master_host[1],
         }
-        pool = self.get_pool(connection_kwargs=connection_kwargs)
-        await pool.get_connection("_")
-        await pool.disconnect()
+        async with self.get_pool(connection_kwargs=connection_kwargs) as pool:
+            await pool.get_connection("_")
+            await pool.disconnect()
 
     async def test_multiple_connections(self, master_host):
         connection_kwargs = {"host": master_host[0], "port": master_host[1]}
-        pool = self.get_pool(connection_kwargs=connection_kwargs)
-        c1 = await pool.get_connection("_")
-        c2 = await pool.get_connection("_")
-        assert c1 != c2
+        async with self.get_pool(connection_kwargs=connection_kwargs) as pool:
+            c1 = await pool.get_connection("_")
+            c2 = await pool.get_connection("_")
+            assert c1 != c2
 
     async def test_connection_pool_blocks_until_timeout(self, master_host):
         """When out of connections, block for timeout seconds, then raise"""
         connection_kwargs = {"host": master_host}
-        pool = self.get_pool(
+        async with self.get_pool(
             max_connections=1, timeout=0.1, connection_kwargs=connection_kwargs
-        )
-        await pool.get_connection("_")
+        ) as pool:
+            c1 = await pool.get_connection("_")
 
-        start = asyncio.get_event_loop().time()
-        with pytest.raises(redis.ConnectionError):
-            await pool.get_connection("_")
-        # we should have waited at least 0.1 seconds
-        assert asyncio.get_event_loop().time() - start >= 0.1
+            start = asyncio.get_running_loop().time()
+            with pytest.raises(redis.ConnectionError):
+                await pool.get_connection("_")
+            # we should have waited at least 0.1 seconds
+            assert asyncio.get_running_loop().time() - start >= 0.1
+            await c1.disconnect()
 
     async def test_connection_pool_blocks_until_conn_available(self, master_host):
         """
@@ -251,26 +256,26 @@ class TestBlockingConnectionPool:
         to the pool
         """
         connection_kwargs = {"host": master_host[0], "port": master_host[1]}
-        pool = self.get_pool(
+        async with self.get_pool(
             max_connections=1, timeout=2, connection_kwargs=connection_kwargs
-        )
-        c1 = await pool.get_connection("_")
+        ) as pool:
+            c1 = await pool.get_connection("_")
 
-        async def target():
-            await asyncio.sleep(0.1)
-            await pool.release(c1)
+            async def target():
+                await asyncio.sleep(0.1)
+                await pool.release(c1)
 
-        start = asyncio.get_event_loop().time()
-        await asyncio.gather(target(), pool.get_connection("_"))
-        assert asyncio.get_event_loop().time() - start >= 0.1
+            start = asyncio.get_running_loop().time()
+            await asyncio.gather(target(), pool.get_connection("_"))
+            assert asyncio.get_running_loop().time() - start >= 0.1
 
     async def test_reuse_previously_released_connection(self, master_host):
         connection_kwargs = {"host": master_host}
-        pool = self.get_pool(connection_kwargs=connection_kwargs)
-        c1 = await pool.get_connection("_")
-        await pool.release(c1)
-        c2 = await pool.get_connection("_")
-        assert c1 == c2
+        async with self.get_pool(connection_kwargs=connection_kwargs) as pool:
+            c1 = await pool.get_connection("_")
+            await pool.release(c1)
+            c2 = await pool.get_connection("_")
+            assert c1 == c2
 
     def test_repr_contains_db_info_tcp(self):
         pool = redis.ConnectionPool(
@@ -411,7 +416,7 @@ class TestConnectionPoolURLParsing:
     def test_invalid_extra_typed_querystring_options(self):
         with pytest.raises(ValueError):
             redis.ConnectionPool.from_url(
-                "redis://localhost/2?socket_timeout=_&" "socket_connect_timeout=abc"
+                "redis://localhost/2?socket_timeout=_&socket_connect_timeout=abc"
             )
 
     def test_extra_querystring_options(self):
@@ -663,12 +668,12 @@ class TestHealthCheck:
         await redis.flushall()
 
     def assert_interval_advanced(self, connection):
-        diff = connection.next_health_check - asyncio.get_event_loop().time()
+        diff = connection.next_health_check - asyncio.get_running_loop().time()
         assert self.interval >= diff > (self.interval - 1)
 
     async def test_health_check_runs(self, r):
         if r.connection:
-            r.connection.next_health_check = asyncio.get_event_loop().time() - 1
+            r.connection.next_health_check = asyncio.get_running_loop().time() - 1
             await r.connection.check_health()
             self.assert_interval_advanced(r.connection)
 
@@ -676,7 +681,7 @@ class TestHealthCheck:
         # invoke a command to make sure the connection is entirely setup
         if r.connection:
             await r.get("foo")
-            r.connection.next_health_check = asyncio.get_event_loop().time()
+            r.connection.next_health_check = asyncio.get_running_loop().time()
             with mock.patch.object(
                 r.connection, "send_command", wraps=r.connection.send_command
             ) as m:
@@ -689,6 +694,8 @@ class TestHealthCheck:
         if r.connection:
             await r.get("foo")
             next_health_check = r.connection.next_health_check
+            # ensure that the event loop's `time()` advances a bit
+            await asyncio.sleep(0.001)
             await r.get("foo")
             assert next_health_check < r.connection.next_health_check
 
diff --git a/tests/test_asyncio/test_credentials.py b/tests/test_asyncio/test_credentials.py
new file mode 100644
index 0000000..8e213cd
--- /dev/null
+++ b/tests/test_asyncio/test_credentials.py
@@ -0,0 +1,284 @@
+import functools
+import random
+import string
+from typing import Optional, Tuple, Union
+
+import pytest
+import pytest_asyncio
+
+import redis
+from redis import AuthenticationError, DataError, ResponseError
+from redis.credentials import CredentialProvider, UsernamePasswordCredentialProvider
+from redis.utils import str_if_bytes
+from tests.conftest import skip_if_redis_enterprise
+
+
+@pytest_asyncio.fixture()
+async def r_acl_teardown(r: redis.Redis):
+    """
+    A special fixture which removes the provided names from the database after use
+    """
+    usernames = []
+
+    def factory(username):
+        usernames.append(username)
+        return r
+
+    yield factory
+    for username in usernames:
+        await r.acl_deluser(username)
+
+
+@pytest_asyncio.fixture()
+async def r_required_pass_teardown(r: redis.Redis):
+    """
+    A special fixture which removes the provided password from the database after use
+    """
+    passwords = []
+
+    def factory(username):
+        passwords.append(username)
+        return r
+
+    yield factory
+    for password in passwords:
+        try:
+            await r.auth(password)
+        except (ResponseError, AuthenticationError):
+            await r.auth("default", "")
+        await r.config_set("requirepass", "")
+
+
+class NoPassCredProvider(CredentialProvider):
+    def get_credentials(self) -> Union[Tuple[str], Tuple[str, str]]:
+        return "username", ""
+
+
+class AsyncRandomAuthCredProvider(CredentialProvider):
+    def __init__(self, user: Optional[str], endpoint: str):
+        self.user = user
+        self.endpoint = endpoint
+
+    @functools.lru_cache(maxsize=10)
+    def get_credentials(self) -> Union[Tuple[str, str], Tuple[str]]:
+        def get_random_string(length):
+            letters = string.ascii_lowercase
+            result_str = "".join(random.choice(letters) for i in range(length))
+            return result_str
+
+        if self.user:
+            auth_token: str = get_random_string(5) + self.user + "_" + self.endpoint
+            return self.user, auth_token
+        else:
+            auth_token: str = get_random_string(5) + self.endpoint
+            return (auth_token,)
+
+
+async def init_acl_user(r, username, password):
+    # reset the user
+    await r.acl_deluser(username)
+    if password:
+        assert (
+            await r.acl_setuser(
+                username,
+                enabled=True,
+                passwords=["+" + password],
+                keys="~*",
+                commands=[
+                    "+ping",
+                    "+command",
+                    "+info",
+                    "+select",
+                    "+flushdb",
+                    "+cluster",
+                ],
+            )
+            is True
+        )
+    else:
+        assert (
+            await r.acl_setuser(
+                username,
+                enabled=True,
+                keys="~*",
+                commands=[
+                    "+ping",
+                    "+command",
+                    "+info",
+                    "+select",
+                    "+flushdb",
+                    "+cluster",
+                ],
+                nopass=True,
+            )
+            is True
+        )
+
+
+async def init_required_pass(r, password):
+    await r.config_set("requirepass", password)
+
+
+@pytest.mark.asyncio
+class TestCredentialsProvider:
+    @skip_if_redis_enterprise()
+    async def test_only_pass_without_creds_provider(
+        self, r_required_pass_teardown, create_redis
+    ):
+        # test for default user (`username` is supposed to be optional)
+        password = "password"
+        r = r_required_pass_teardown(password)
+        await init_required_pass(r, password)
+        assert await r.auth(password) is True
+
+        r2 = await create_redis(flushdb=False, password=password)
+
+        assert await r2.ping() is True
+
+    @skip_if_redis_enterprise()
+    async def test_user_and_pass_without_creds_provider(
+        self, r_acl_teardown, create_redis
+    ):
+        """
+        Test backward compatibility with username and password
+        """
+        # test for other users
+        username = "username"
+        password = "password"
+        r = r_acl_teardown(username)
+        await init_acl_user(r, username, password)
+        r2 = await create_redis(flushdb=False, username=username, password=password)
+
+        assert await r2.ping() is True
+
+    @pytest.mark.parametrize("username", ["username", None])
+    @skip_if_redis_enterprise()
+    @pytest.mark.onlynoncluster
+    async def test_credential_provider_with_supplier(
+        self, r_acl_teardown, r_required_pass_teardown, create_redis, username
+    ):
+        creds_provider = AsyncRandomAuthCredProvider(
+            user=username,
+            endpoint="localhost",
+        )
+
+        auth_args = creds_provider.get_credentials()
+        password = auth_args[-1]
+
+        if username:
+            r = r_acl_teardown(username)
+            await init_acl_user(r, username, password)
+        else:
+            r = r_required_pass_teardown(password)
+            await init_required_pass(r, password)
+
+        r2 = await create_redis(flushdb=False, credential_provider=creds_provider)
+
+        assert await r2.ping() is True
+
+    async def test_async_credential_provider_no_password_success(
+        self, r_acl_teardown, create_redis
+    ):
+        username = "username"
+        r = r_acl_teardown(username)
+        await init_acl_user(r, username, "")
+        r2 = await create_redis(
+            flushdb=False,
+            credential_provider=NoPassCredProvider(),
+        )
+        assert await r2.ping() is True
+
+    @pytest.mark.onlynoncluster
+    async def test_credential_provider_no_password_error(
+        self, r_acl_teardown, create_redis
+    ):
+        username = "username"
+        r = r_acl_teardown(username)
+        await init_acl_user(r, username, "password")
+        with pytest.raises(AuthenticationError) as e:
+            await create_redis(
+                flushdb=False,
+                credential_provider=NoPassCredProvider(),
+                single_connection_client=True,
+            )
+        assert e.match("invalid username-password")
+        assert await r.acl_deluser(username)
+
+    @pytest.mark.onlynoncluster
+    async def test_password_and_username_together_with_cred_provider_raise_error(
+        self, r_acl_teardown, create_redis
+    ):
+        username = "username"
+        r = r_acl_teardown(username)
+        await init_acl_user(r, username, "password")
+        cred_provider = UsernamePasswordCredentialProvider(
+            username="username", password="password"
+        )
+        with pytest.raises(DataError) as e:
+            await create_redis(
+                flushdb=False,
+                username="username",
+                password="password",
+                credential_provider=cred_provider,
+                single_connection_client=True,
+            )
+        assert e.match(
+            "'username' and 'password' cannot be passed along with "
+            "'credential_provider'."
+        )
+
+    @pytest.mark.onlynoncluster
+    async def test_change_username_password_on_existing_connection(
+        self, r_acl_teardown, create_redis
+    ):
+        username = "origin_username"
+        password = "origin_password"
+        new_username = "new_username"
+        new_password = "new_password"
+        r = r_acl_teardown(username)
+        await init_acl_user(r, username, password)
+        r2 = await create_redis(flushdb=False, username=username, password=password)
+        assert await r2.ping() is True
+        conn = await r2.connection_pool.get_connection("_")
+        await conn.send_command("PING")
+        assert str_if_bytes(await conn.read_response()) == "PONG"
+        assert conn.username == username
+        assert conn.password == password
+        await init_acl_user(r, new_username, new_password)
+        conn.password = new_password
+        conn.username = new_username
+        await conn.send_command("PING")
+        assert str_if_bytes(await conn.read_response()) == "PONG"
+
+
+@pytest.mark.asyncio
+class TestUsernamePasswordCredentialProvider:
+    async def test_user_pass_credential_provider_acl_user_and_pass(
+        self, r_acl_teardown, create_redis
+    ):
+        username = "username"
+        password = "password"
+        r = r_acl_teardown(username)
+        provider = UsernamePasswordCredentialProvider(username, password)
+        assert provider.username == username
+        assert provider.password == password
+        assert provider.get_credentials() == (username, password)
+        await init_acl_user(r, provider.username, provider.password)
+        r2 = await create_redis(flushdb=False, credential_provider=provider)
+        assert await r2.ping() is True
+
+    async def test_user_pass_provider_only_password(
+        self, r_required_pass_teardown, create_redis
+    ):
+        password = "password"
+        provider = UsernamePasswordCredentialProvider(password=password)
+        r = r_required_pass_teardown(password)
+        assert provider.username == ""
+        assert provider.password == password
+        assert provider.get_credentials() == (password,)
+
+        await init_required_pass(r, password)
+
+        r2 = await create_redis(flushdb=False, credential_provider=provider)
+        assert await r2.auth(provider.password) is True
+        assert await r2.ping() is True
diff --git a/tests/test_asyncio/test_encoding.py b/tests/test_asyncio/test_encoding.py
index 133ea37..3efcf69 100644
--- a/tests/test_asyncio/test_encoding.py
+++ b/tests/test_asyncio/test_encoding.py
@@ -1,17 +1,9 @@
-import sys
-
 import pytest
-
-if sys.version_info[0:2] == (3, 6):
-    import pytest as pytest_asyncio
-else:
-    import pytest_asyncio
+import pytest_asyncio
 
 import redis.asyncio as redis
 from redis.exceptions import DataError
 
-pytestmark = pytest.mark.asyncio
-
 
 @pytest.mark.onlynoncluster
 class TestEncoding:
diff --git a/tests/test_asyncio/test_graph.py b/tests/test_asyncio/test_graph.py
new file mode 100644
index 0000000..7e70baa
--- /dev/null
+++ b/tests/test_asyncio/test_graph.py
@@ -0,0 +1,501 @@
+import pytest
+
+import redis.asyncio as redis
+from redis.commands.graph import Edge, Node, Path
+from redis.commands.graph.execution_plan import Operation
+from redis.exceptions import ResponseError
+from tests.conftest import skip_if_redis_enterprise
+
+
+@pytest.mark.redismod
+async def test_bulk(modclient):
+    with pytest.raises(NotImplementedError):
+        await modclient.graph().bulk()
+        await modclient.graph().bulk(foo="bar!")
+
+
+@pytest.mark.redismod
+async def test_graph_creation(modclient: redis.Redis):
+    graph = modclient.graph()
+
+    john = Node(
+        label="person",
+        properties={
+            "name": "John Doe",
+            "age": 33,
+            "gender": "male",
+            "status": "single",
+        },
+    )
+    graph.add_node(john)
+    japan = Node(label="country", properties={"name": "Japan"})
+
+    graph.add_node(japan)
+    edge = Edge(john, "visited", japan, properties={"purpose": "pleasure"})
+    graph.add_edge(edge)
+
+    await graph.commit()
+
+    query = (
+        'MATCH (p:person)-[v:visited {purpose:"pleasure"}]->(c:country) '
+        "RETURN p, v, c"
+    )
+
+    result = await graph.query(query)
+
+    person = result.result_set[0][0]
+    visit = result.result_set[0][1]
+    country = result.result_set[0][2]
+
+    assert person == john
+    assert visit.properties == edge.properties
+    assert country == japan
+
+    query = """RETURN [1, 2.3, "4", true, false, null]"""
+    result = await graph.query(query)
+    assert [1, 2.3, "4", True, False, None] == result.result_set[0][0]
+
+    # All done, remove graph.
+    await graph.delete()
+
+
+@pytest.mark.redismod
+async def test_array_functions(modclient: redis.Redis):
+    graph = modclient.graph()
+
+    query = """CREATE (p:person{name:'a',age:32, array:[0,1,2]})"""
+    await graph.query(query)
+
+    query = """WITH [0,1,2] as x return x"""
+    result = await graph.query(query)
+    assert [0, 1, 2] == result.result_set[0][0]
+
+    query = """MATCH(n) return collect(n)"""
+    result = await graph.query(query)
+
+    a = Node(
+        node_id=0,
+        label="person",
+        properties={"name": "a", "age": 32, "array": [0, 1, 2]},
+    )
+
+    assert [a] == result.result_set[0][0]
+
+
+@pytest.mark.redismod
+async def test_path(modclient: redis.Redis):
+    node0 = Node(node_id=0, label="L1")
+    node1 = Node(node_id=1, label="L1")
+    edge01 = Edge(node0, "R1", node1, edge_id=0, properties={"value": 1})
+
+    graph = modclient.graph()
+    graph.add_node(node0)
+    graph.add_node(node1)
+    graph.add_edge(edge01)
+    await graph.flush()
+
+    path01 = Path.new_empty_path().add_node(node0).add_edge(edge01).add_node(node1)
+    expected_results = [[path01]]
+
+    query = "MATCH p=(:L1)-[:R1]->(:L1) RETURN p ORDER BY p"
+    result = await graph.query(query)
+    assert expected_results == result.result_set
+
+
+@pytest.mark.redismod
+async def test_param(modclient: redis.Redis):
+    params = [1, 2.3, "str", True, False, None, [0, 1, 2]]
+    query = "RETURN $param"
+    for param in params:
+        result = await modclient.graph().query(query, {"param": param})
+        expected_results = [[param]]
+        assert expected_results == result.result_set
+
+
+@pytest.mark.redismod
+async def test_map(modclient: redis.Redis):
+    query = "RETURN {a:1, b:'str', c:NULL, d:[1,2,3], e:True, f:{x:1, y:2}}"
+
+    actual = (await modclient.graph().query(query)).result_set[0][0]
+    expected = {
+        "a": 1,
+        "b": "str",
+        "c": None,
+        "d": [1, 2, 3],
+        "e": True,
+        "f": {"x": 1, "y": 2},
+    }
+
+    assert actual == expected
+
+
+@pytest.mark.redismod
+async def test_point(modclient: redis.Redis):
+    query = "RETURN point({latitude: 32.070794860, longitude: 34.820751118})"
+    expected_lat = 32.070794860
+    expected_lon = 34.820751118
+    actual = (await modclient.graph().query(query)).result_set[0][0]
+    assert abs(actual["latitude"] - expected_lat) < 0.001
+    assert abs(actual["longitude"] - expected_lon) < 0.001
+
+    query = "RETURN point({latitude: 32, longitude: 34.0})"
+    expected_lat = 32
+    expected_lon = 34
+    actual = (await modclient.graph().query(query)).result_set[0][0]
+    assert abs(actual["latitude"] - expected_lat) < 0.001
+    assert abs(actual["longitude"] - expected_lon) < 0.001
+
+
+@pytest.mark.redismod
+async def test_index_response(modclient: redis.Redis):
+    result_set = await modclient.graph().query("CREATE INDEX ON :person(age)")
+    assert 1 == result_set.indices_created
+
+    result_set = await modclient.graph().query("CREATE INDEX ON :person(age)")
+    assert 0 == result_set.indices_created
+
+    result_set = await modclient.graph().query("DROP INDEX ON :person(age)")
+    assert 1 == result_set.indices_deleted
+
+    with pytest.raises(ResponseError):
+        await modclient.graph().query("DROP INDEX ON :person(age)")
+
+
+@pytest.mark.redismod
+async def test_stringify_query_result(modclient: redis.Redis):
+    graph = modclient.graph()
+
+    john = Node(
+        alias="a",
+        label="person",
+        properties={
+            "name": "John Doe",
+            "age": 33,
+            "gender": "male",
+            "status": "single",
+        },
+    )
+    graph.add_node(john)
+
+    japan = Node(alias="b", label="country", properties={"name": "Japan"})
+    graph.add_node(japan)
+
+    edge = Edge(john, "visited", japan, properties={"purpose": "pleasure"})
+    graph.add_edge(edge)
+
+    assert (
+        str(john)
+        == """(a:person{age:33,gender:"male",name:"John Doe",status:"single"})"""  # noqa
+    )
+    assert (
+        str(edge)
+        == """(a:person{age:33,gender:"male",name:"John Doe",status:"single"})"""  # noqa
+        + """-[:visited{purpose:"pleasure"}]->"""
+        + """(b:country{name:"Japan"})"""
+    )
+    assert str(japan) == """(b:country{name:"Japan"})"""
+
+    await graph.commit()
+
+    query = """MATCH (p:person)-[v:visited {purpose:"pleasure"}]->(c:country)
+            RETURN p, v, c"""
+
+    result = await graph.query(query)
+    person = result.result_set[0][0]
+    visit = result.result_set[0][1]
+    country = result.result_set[0][2]
+
+    assert (
+        str(person)
+        == """(:person{age:33,gender:"male",name:"John Doe",status:"single"})"""  # noqa
+    )
+    assert str(visit) == """()-[:visited{purpose:"pleasure"}]->()"""
+    assert str(country) == """(:country{name:"Japan"})"""
+
+    await graph.delete()
+
+
+@pytest.mark.redismod
+async def test_optional_match(modclient: redis.Redis):
+    # Build a graph of form (a)-[R]->(b)
+    node0 = Node(node_id=0, label="L1", properties={"value": "a"})
+    node1 = Node(node_id=1, label="L1", properties={"value": "b"})
+
+    edge01 = Edge(node0, "R", node1, edge_id=0)
+
+    graph = modclient.graph()
+    graph.add_node(node0)
+    graph.add_node(node1)
+    graph.add_edge(edge01)
+    await graph.flush()
+
+    # Issue a query that collects all outgoing edges from both nodes
+    # (the second has none)
+    query = """MATCH (a) OPTIONAL MATCH (a)-[e]->(b) RETURN a, e, b ORDER BY a.value"""  # noqa
+    expected_results = [[node0, edge01, node1], [node1, None, None]]
+
+    result = await graph.query(query)
+    assert expected_results == result.result_set
+
+    await graph.delete()
+
+
+@pytest.mark.redismod
+async def test_cached_execution(modclient: redis.Redis):
+    await modclient.graph().query("CREATE ()")
+
+    uncached_result = await modclient.graph().query(
+        "MATCH (n) RETURN n, $param", {"param": [0]}
+    )
+    assert uncached_result.cached_execution is False
+
+    # loop to make sure the query is cached on each thread on server
+    for x in range(0, 64):
+        cached_result = await modclient.graph().query(
+            "MATCH (n) RETURN n, $param", {"param": [0]}
+        )
+        assert uncached_result.result_set == cached_result.result_set
+
+    # should be cached on all threads by now
+    assert cached_result.cached_execution
+
+
+@pytest.mark.redismod
+async def test_slowlog(modclient: redis.Redis):
+    create_query = """CREATE
+    (:Rider {name:'Valentino Rossi'})-[:rides]->(:Team {name:'Yamaha'}),
+    (:Rider {name:'Dani Pedrosa'})-[:rides]->(:Team {name:'Honda'}),
+    (:Rider {name:'Andrea Dovizioso'})-[:rides]->(:Team {name:'Ducati'})"""
+    await modclient.graph().query(create_query)
+
+    results = await modclient.graph().slowlog()
+    assert results[0][1] == "GRAPH.QUERY"
+    assert results[0][2] == create_query
+
+
+@pytest.mark.redismod
+async def test_query_timeout(modclient: redis.Redis):
+    # Build a sample graph with 1000 nodes.
+    await modclient.graph().query("UNWIND range(0,1000) as val CREATE ({v: val})")
+    # Issue a long-running query with a 1-millisecond timeout.
+    with pytest.raises(ResponseError):
+        await modclient.graph().query("MATCH (a), (b), (c), (d) RETURN *", timeout=1)
+        assert False is False
+
+    with pytest.raises(Exception):
+        await modclient.graph().query("RETURN 1", timeout="str")
+        assert False is False
+
+
+@pytest.mark.redismod
+async def test_read_only_query(modclient: redis.Redis):
+    with pytest.raises(Exception):
+        # Issue a write query, specifying read-only true,
+        # this call should fail.
+        await modclient.graph().query("CREATE (p:person {name:'a'})", read_only=True)
+        assert False is False
+
+
+@pytest.mark.redismod
+async def test_profile(modclient: redis.Redis):
+    q = """UNWIND range(1, 3) AS x CREATE (p:Person {v:x})"""
+    profile = (await modclient.graph().profile(q)).result_set
+    assert "Create | Records produced: 3" in profile
+    assert "Unwind | Records produced: 3" in profile
+
+    q = "MATCH (p:Person) WHERE p.v > 1 RETURN p"
+    profile = (await modclient.graph().profile(q)).result_set
+    assert "Results | Records produced: 2" in profile
+    assert "Project | Records produced: 2" in profile
+    assert "Filter | Records produced: 2" in profile
+    assert "Node By Label Scan | (p:Person) | Records produced: 3" in profile
+
+
+@pytest.mark.redismod
+@skip_if_redis_enterprise()
+async def test_config(modclient: redis.Redis):
+    config_name = "RESULTSET_SIZE"
+    config_value = 3
+
+    # Set configuration
+    response = await modclient.graph().config(config_name, config_value, set=True)
+    assert response == "OK"
+
+    # Make sure config been updated.
+    response = await modclient.graph().config(config_name, set=False)
+    expected_response = [config_name, config_value]
+    assert response == expected_response
+
+    config_name = "QUERY_MEM_CAPACITY"
+    config_value = 1 << 20  # 1MB
+
+    # Set configuration
+    response = await modclient.graph().config(config_name, config_value, set=True)
+    assert response == "OK"
+
+    # Make sure config been updated.
+    response = await modclient.graph().config(config_name, set=False)
+    expected_response = [config_name, config_value]
+    assert response == expected_response
+
+    # reset to default
+    await modclient.graph().config("QUERY_MEM_CAPACITY", 0, set=True)
+    await modclient.graph().config("RESULTSET_SIZE", -100, set=True)
+
+
+@pytest.mark.redismod
+@pytest.mark.onlynoncluster
+async def test_list_keys(modclient: redis.Redis):
+    result = await modclient.graph().list_keys()
+    assert result == []
+
+    await modclient.graph("G").query("CREATE (n)")
+    result = await modclient.graph().list_keys()
+    assert result == ["G"]
+
+    await modclient.graph("X").query("CREATE (m)")
+    result = await modclient.graph().list_keys()
+    assert result == ["G", "X"]
+
+    await modclient.delete("G")
+    await modclient.rename("X", "Z")
+    result = await modclient.graph().list_keys()
+    assert result == ["Z"]
+
+    await modclient.delete("Z")
+    result = await modclient.graph().list_keys()
+    assert result == []
+
+
+@pytest.mark.redismod
+async def test_multi_label(modclient: redis.Redis):
+    redis_graph = modclient.graph("g")
+
+    node = Node(label=["l", "ll"])
+    redis_graph.add_node(node)
+    await redis_graph.commit()
+
+    query = "MATCH (n) RETURN n"
+    result = await redis_graph.query(query)
+    result_node = result.result_set[0][0]
+    assert result_node == node
+
+    try:
+        Node(label=1)
+        assert False
+    except AssertionError:
+        assert True
+
+    try:
+        Node(label=["l", 1])
+        assert False
+    except AssertionError:
+        assert True
+
+
+@pytest.mark.redismod
+async def test_execution_plan(modclient: redis.Redis):
+    redis_graph = modclient.graph("execution_plan")
+    create_query = """CREATE
+    (:Rider {name:'Valentino Rossi'})-[:rides]->(:Team {name:'Yamaha'}),
+    (:Rider {name:'Dani Pedrosa'})-[:rides]->(:Team {name:'Honda'}),
+    (:Rider {name:'Andrea Dovizioso'})-[:rides]->(:Team {name:'Ducati'})"""
+    await redis_graph.query(create_query)
+
+    result = await redis_graph.execution_plan(
+        "MATCH (r:Rider)-[:rides]->(t:Team) WHERE t.name = $name RETURN r.name, t.name, $params",  # noqa
+        {"name": "Yehuda"},
+    )
+    expected = "Results\n    Project\n        Conditional Traverse | (t)->(r:Rider)\n            Filter\n                Node By Label Scan | (t:Team)"  # noqa
+    assert result == expected
+
+    await redis_graph.delete()
+
+
+@pytest.mark.redismod
+async def test_explain(modclient: redis.Redis):
+    redis_graph = modclient.graph("execution_plan")
+    # graph creation / population
+    create_query = """CREATE
+(:Rider {name:'Valentino Rossi'})-[:rides]->(:Team {name:'Yamaha'}),
+(:Rider {name:'Dani Pedrosa'})-[:rides]->(:Team {name:'Honda'}),
+(:Rider {name:'Andrea Dovizioso'})-[:rides]->(:Team {name:'Ducati'})"""
+    await redis_graph.query(create_query)
+
+    result = await redis_graph.explain(
+        """MATCH (r:Rider)-[:rides]->(t:Team)
+WHERE t.name = $name
+RETURN r.name, t.name
+UNION
+MATCH (r:Rider)-[:rides]->(t:Team)
+WHERE t.name = $name
+RETURN r.name, t.name""",
+        {"name": "Yamaha"},
+    )
+    expected = """\
+Results
+Distinct
+    Join
+        Project
+            Conditional Traverse | (t)->(r:Rider)
+                Filter
+                    Node By Label Scan | (t:Team)
+        Project
+            Conditional Traverse | (t)->(r:Rider)
+                Filter
+                    Node By Label Scan | (t:Team)"""
+    assert str(result).replace(" ", "").replace("\n", "") == expected.replace(
+        " ", ""
+    ).replace("\n", "")
+
+    expected = Operation("Results").append_child(
+        Operation("Distinct").append_child(
+            Operation("Join")
+            .append_child(
+                Operation("Project").append_child(
+                    Operation("Conditional Traverse", "(t)->(r:Rider)").append_child(
+                        Operation("Filter").append_child(
+                            Operation("Node By Label Scan", "(t:Team)")
+                        )
+                    )
+                )
+            )
+            .append_child(
+                Operation("Project").append_child(
+                    Operation("Conditional Traverse", "(t)->(r:Rider)").append_child(
+                        Operation("Filter").append_child(
+                            Operation("Node By Label Scan", "(t:Team)")
+                        )
+                    )
+                )
+            )
+        )
+    )
+
+    assert result.structured_plan == expected
+
+    result = await redis_graph.explain(
+        """MATCH (r:Rider), (t:Team)
+                                    RETURN r.name, t.name"""
+    )
+    expected = """\
+Results
+Project
+    Cartesian Product
+        Node By Label Scan | (r:Rider)
+        Node By Label Scan | (t:Team)"""
+    assert str(result).replace(" ", "").replace("\n", "") == expected.replace(
+        " ", ""
+    ).replace("\n", "")
+
+    expected = Operation("Results").append_child(
+        Operation("Project").append_child(
+            Operation("Cartesian Product")
+            .append_child(Operation("Node By Label Scan"))
+            .append_child(Operation("Node By Label Scan"))
+        )
+    )
+
+    assert result.structured_plan == expected
+
+    await redis_graph.delete()
diff --git a/tests/test_asyncio/test_json.py b/tests/test_asyncio/test_json.py
index a045dd7..b8854d2 100644
--- a/tests/test_asyncio/test_json.py
+++ b/tests/test_asyncio/test_json.py
@@ -5,8 +5,6 @@ from redis import exceptions
 from redis.commands.json.path import Path
 from tests.conftest import skip_ifmodversion_lt
 
-pytestmark = pytest.mark.asyncio
-
 
 @pytest.mark.redismod
 async def test_json_setbinarykey(modclient: redis.Redis):
@@ -819,7 +817,7 @@ async def test_objlen_dollar(modclient: redis.Redis):
         },
     )
     # Test multi
-    assert await modclient.json().objlen("doc1", "$..a") == [2, None, 1]
+    assert await modclient.json().objlen("doc1", "$..a") == [None, 2, 1]
     # Test single
     assert await modclient.json().objlen("doc1", "$.nested1.a") == [2]
 
diff --git a/tests/test_asyncio/test_lock.py b/tests/test_asyncio/test_lock.py
index 8ceb3bc..d78f741 100644
--- a/tests/test_asyncio/test_lock.py
+++ b/tests/test_asyncio/test_lock.py
@@ -1,20 +1,12 @@
 import asyncio
-import sys
 
 import pytest
-
-if sys.version_info[0:2] == (3, 6):
-    import pytest as pytest_asyncio
-else:
-    import pytest_asyncio
+import pytest_asyncio
 
 from redis.asyncio.lock import Lock
 from redis.exceptions import LockError, LockNotOwnedError
 
-pytestmark = pytest.mark.asyncio
-
 
-@pytest.mark.onlynoncluster
 class TestLock:
     @pytest_asyncio.fixture()
     async def r_decoded(self, create_redis):
@@ -105,6 +97,14 @@ class TestLock:
         assert 8 < (await r.pttl("foo")) <= 9500
         await lock.release()
 
+    async def test_blocking(self, r):
+        blocking = False
+        lock = self.get_lock(r, "foo", blocking=blocking)
+        assert not lock.blocking
+
+        lock_2 = self.get_lock(r, "foo")
+        assert lock_2.blocking
+
     async def test_blocking_timeout(self, r, event_loop):
         lock1 = self.get_lock(r, "foo")
         assert await lock1.acquire(blocking=False)
@@ -136,11 +136,11 @@ class TestLock:
         sleep = 60
         bt = 1
         lock2 = self.get_lock(r, "foo", sleep=sleep, blocking_timeout=bt)
-        start = asyncio.get_event_loop().time()
+        start = asyncio.get_running_loop().time()
         assert not await lock2.acquire()
         # the elapsed timed is less than the blocking_timeout as the lock is
         # unattainable given the sleep/blocking_timeout configuration
-        assert bt > (asyncio.get_event_loop().time() - start)
+        assert bt > (asyncio.get_running_loop().time() - start)
         await lock1.release()
 
     async def test_releasing_unlocked_lock_raises_error(self, r):
diff --git a/tests/test_asyncio/test_monitor.py b/tests/test_asyncio/test_monitor.py
index 783ba26..3551579 100644
--- a/tests/test_asyncio/test_monitor.py
+++ b/tests/test_asyncio/test_monitor.py
@@ -4,8 +4,6 @@ from tests.conftest import skip_if_redis_enterprise, skip_ifnot_redis_enterprise
 
 from .conftest import wait_for_command
 
-pytestmark = pytest.mark.asyncio
-
 
 @pytest.mark.onlynoncluster
 class TestMonitor:
diff --git a/tests/test_asyncio/test_pipeline.py b/tests/test_asyncio/test_pipeline.py
index dfeb664..3df57eb 100644
--- a/tests/test_asyncio/test_pipeline.py
+++ b/tests/test_asyncio/test_pipeline.py
@@ -5,8 +5,6 @@ from tests.conftest import skip_if_server_version_lt
 
 from .conftest import wait_for_command
 
-pytestmark = pytest.mark.asyncio
-
 
 class TestPipeline:
     @pytest.mark.onlynoncluster
@@ -126,7 +124,7 @@ class TestPipeline:
             with pytest.raises(redis.ResponseError) as ex:
                 await pipe.execute()
             assert str(ex.value).startswith(
-                "Command # 3 (LPUSH c 3) of " "pipeline caused error: "
+                "Command # 3 (LPUSH c 3) of pipeline caused error: "
             )
 
             # make sure the pipe was restored to a working state
@@ -171,7 +169,7 @@ class TestPipeline:
                 await pipe.execute()
 
             assert str(ex.value).startswith(
-                "Command # 2 (ZREM b) of " "pipeline caused error: "
+                "Command # 2 (ZREM b) of pipeline caused error: "
             )
 
             # make sure the pipe was restored to a working state
@@ -188,7 +186,7 @@ class TestPipeline:
                 await pipe.execute()
 
             assert str(ex.value).startswith(
-                "Command # 2 (ZREM b) of " "pipeline caused error: "
+                "Command # 2 (ZREM b) of pipeline caused error: "
             )
 
             # make sure the pipe was restored to a working state
@@ -335,7 +333,7 @@ class TestPipeline:
                 await pipe.execute()
 
             assert str(ex.value).startswith(
-                "Command # 1 (LLEN a) of " "pipeline caused error: "
+                "Command # 1 (LLEN a) of pipeline caused error: "
             )
 
         assert await r.get("a") == b"1"
diff --git a/tests/test_asyncio/test_pubsub.py b/tests/test_asyncio/test_pubsub.py
index 6c76bf3..c2a9130 100644
--- a/tests/test_asyncio/test_pubsub.py
+++ b/tests/test_asyncio/test_pubsub.py
@@ -1,24 +1,20 @@
 import asyncio
 import functools
+import socket
 import sys
 from typing import Optional
+from unittest.mock import patch
 
 import async_timeout
 import pytest
-
-if sys.version_info[0:2] == (3, 6):
-    import pytest as pytest_asyncio
-else:
-    import pytest_asyncio
+import pytest_asyncio
 
 import redis.asyncio as redis
 from redis.exceptions import ConnectionError
 from redis.typing import EncodableT
 from tests.conftest import skip_if_server_version_lt
 
-from .compat import mock
-
-pytestmark = pytest.mark.asyncio(forbid_global_loop=True)
+from .compat import create_task, mock
 
 
 def with_timeout(t):
@@ -33,8 +29,8 @@ def with_timeout(t):
     return wrapper
 
 
-async def wait_for_message(pubsub, timeout=0.1, ignore_subscribe_messages=False):
-    now = asyncio.get_event_loop().time()
+async def wait_for_message(pubsub, timeout=0.2, ignore_subscribe_messages=False):
+    now = asyncio.get_running_loop().time()
     timeout = now + timeout
     while now < timeout:
         message = await pubsub.get_message(
@@ -43,7 +39,7 @@ async def wait_for_message(pubsub, timeout=0.1, ignore_subscribe_messages=False)
         if message is not None:
             return message
         await asyncio.sleep(0.01)
-        now = asyncio.get_event_loop().time()
+        now = asyncio.get_running_loop().time()
     return None
 
 
@@ -80,6 +76,13 @@ def make_subscribe_test_data(pubsub, type):
     assert False, f"invalid subscribe type: {type}"
 
 
+@pytest_asyncio.fixture()
+async def pubsub(r: redis.Redis):
+    p = r.pubsub()
+    yield p
+    await p.close()
+
+
 @pytest.mark.onlynoncluster
 class TestPubSubSubscribeUnsubscribe:
     async def _test_subscribe_unsubscribe(
@@ -101,12 +104,12 @@ class TestPubSubSubscribeUnsubscribe:
             i = len(keys) - 1 - i
             assert await wait_for_message(p) == make_message(unsub_type, key, i)
 
-    async def test_channel_subscribe_unsubscribe(self, r: redis.Redis):
-        kwargs = make_subscribe_test_data(r.pubsub(), "channel")
+    async def test_channel_subscribe_unsubscribe(self, pubsub):
+        kwargs = make_subscribe_test_data(pubsub, "channel")
         await self._test_subscribe_unsubscribe(**kwargs)
 
-    async def test_pattern_subscribe_unsubscribe(self, r: redis.Redis):
-        kwargs = make_subscribe_test_data(r.pubsub(), "pattern")
+    async def test_pattern_subscribe_unsubscribe(self, pubsub):
+        kwargs = make_subscribe_test_data(pubsub, "pattern")
         await self._test_subscribe_unsubscribe(**kwargs)
 
     @pytest.mark.onlynoncluster
@@ -144,12 +147,12 @@ class TestPubSubSubscribeUnsubscribe:
         for channel in unique_channels:
             assert channel in keys
 
-    async def test_resubscribe_to_channels_on_reconnection(self, r: redis.Redis):
-        kwargs = make_subscribe_test_data(r.pubsub(), "channel")
+    async def test_resubscribe_to_channels_on_reconnection(self, pubsub):
+        kwargs = make_subscribe_test_data(pubsub, "channel")
         await self._test_resubscribe_on_reconnection(**kwargs)
 
-    async def test_resubscribe_to_patterns_on_reconnection(self, r: redis.Redis):
-        kwargs = make_subscribe_test_data(r.pubsub(), "pattern")
+    async def test_resubscribe_to_patterns_on_reconnection(self, pubsub):
+        kwargs = make_subscribe_test_data(pubsub, "pattern")
         await self._test_resubscribe_on_reconnection(**kwargs)
 
     async def _test_subscribed_property(
@@ -199,13 +202,13 @@ class TestPubSubSubscribeUnsubscribe:
         # now we're finally unsubscribed
         assert p.subscribed is False
 
-    async def test_subscribe_property_with_channels(self, r: redis.Redis):
-        kwargs = make_subscribe_test_data(r.pubsub(), "channel")
+    async def test_subscribe_property_with_channels(self, pubsub):
+        kwargs = make_subscribe_test_data(pubsub, "channel")
         await self._test_subscribed_property(**kwargs)
 
     @pytest.mark.onlynoncluster
-    async def test_subscribe_property_with_patterns(self, r: redis.Redis):
-        kwargs = make_subscribe_test_data(r.pubsub(), "pattern")
+    async def test_subscribe_property_with_patterns(self, pubsub):
+        kwargs = make_subscribe_test_data(pubsub, "pattern")
         await self._test_subscribed_property(**kwargs)
 
     async def test_ignore_all_subscribe_messages(self, r: redis.Redis):
@@ -224,9 +227,10 @@ class TestPubSubSubscribeUnsubscribe:
             assert p.subscribed is True
             assert await wait_for_message(p) is None
         assert p.subscribed is False
+        await p.close()
 
-    async def test_ignore_individual_subscribe_messages(self, r: redis.Redis):
-        p = r.pubsub()
+    async def test_ignore_individual_subscribe_messages(self, pubsub):
+        p = pubsub
 
         checks = (
             (p.subscribe, "foo"),
@@ -243,13 +247,13 @@ class TestPubSubSubscribeUnsubscribe:
             assert message is None
         assert p.subscribed is False
 
-    async def test_sub_unsub_resub_channels(self, r: redis.Redis):
-        kwargs = make_subscribe_test_data(r.pubsub(), "channel")
+    async def test_sub_unsub_resub_channels(self, pubsub):
+        kwargs = make_subscribe_test_data(pubsub, "channel")
         await self._test_sub_unsub_resub(**kwargs)
 
     @pytest.mark.onlynoncluster
-    async def test_sub_unsub_resub_patterns(self, r: redis.Redis):
-        kwargs = make_subscribe_test_data(r.pubsub(), "pattern")
+    async def test_sub_unsub_resub_patterns(self, pubsub):
+        kwargs = make_subscribe_test_data(pubsub, "pattern")
         await self._test_sub_unsub_resub(**kwargs)
 
     async def _test_sub_unsub_resub(
@@ -266,12 +270,12 @@ class TestPubSubSubscribeUnsubscribe:
         assert await wait_for_message(p) == make_message(sub_type, key, 1)
         assert p.subscribed is True
 
-    async def test_sub_unsub_all_resub_channels(self, r: redis.Redis):
-        kwargs = make_subscribe_test_data(r.pubsub(), "channel")
+    async def test_sub_unsub_all_resub_channels(self, pubsub):
+        kwargs = make_subscribe_test_data(pubsub, "channel")
         await self._test_sub_unsub_all_resub(**kwargs)
 
-    async def test_sub_unsub_all_resub_patterns(self, r: redis.Redis):
-        kwargs = make_subscribe_test_data(r.pubsub(), "pattern")
+    async def test_sub_unsub_all_resub_patterns(self, pubsub):
+        kwargs = make_subscribe_test_data(pubsub, "pattern")
         await self._test_sub_unsub_all_resub(**kwargs)
 
     async def _test_sub_unsub_all_resub(
@@ -300,8 +304,8 @@ class TestPubSubMessages:
     async def async_message_handler(self, message):
         self.async_message = message
 
-    async def test_published_message_to_channel(self, r: redis.Redis):
-        p = r.pubsub()
+    async def test_published_message_to_channel(self, r: redis.Redis, pubsub):
+        p = pubsub
         await p.subscribe("foo")
         assert await wait_for_message(p) == make_message("subscribe", "foo", 1)
         assert await r.publish("foo", "test message") == 1
@@ -310,8 +314,8 @@ class TestPubSubMessages:
         assert isinstance(message, dict)
         assert message == make_message("message", "foo", "test message")
 
-    async def test_published_message_to_pattern(self, r: redis.Redis):
-        p = r.pubsub()
+    async def test_published_message_to_pattern(self, r: redis.Redis, pubsub):
+        p = pubsub
         await p.subscribe("foo")
         await p.psubscribe("f*")
         assert await wait_for_message(p) == make_message("subscribe", "foo", 1)
@@ -340,6 +344,7 @@ class TestPubSubMessages:
         assert await r.publish("foo", "test message") == 1
         assert await wait_for_message(p) is None
         assert self.message == make_message("message", "foo", "test message")
+        await p.close()
 
     async def test_channel_async_message_handler(self, r):
         p = r.pubsub(ignore_subscribe_messages=True)
@@ -348,6 +353,7 @@ class TestPubSubMessages:
         assert await r.publish("foo", "test message") == 1
         assert await wait_for_message(p) is None
         assert self.async_message == make_message("message", "foo", "test message")
+        await p.close()
 
     async def test_channel_sync_async_message_handler(self, r):
         p = r.pubsub(ignore_subscribe_messages=True)
@@ -359,6 +365,7 @@ class TestPubSubMessages:
         assert await wait_for_message(p) is None
         assert self.message == make_message("message", "foo", "test message")
         assert self.async_message == make_message("message", "bar", "test message 2")
+        await p.close()
 
     @pytest.mark.onlynoncluster
     async def test_pattern_message_handler(self, r: redis.Redis):
@@ -370,6 +377,7 @@ class TestPubSubMessages:
         assert self.message == make_message(
             "pmessage", "foo", "test message", pattern="f*"
         )
+        await p.close()
 
     async def test_unicode_channel_message_handler(self, r: redis.Redis):
         p = r.pubsub(ignore_subscribe_messages=True)
@@ -380,6 +388,7 @@ class TestPubSubMessages:
         assert await r.publish(channel, "test message") == 1
         assert await wait_for_message(p) is None
         assert self.message == make_message("message", channel, "test message")
+        await p.close()
 
     @pytest.mark.onlynoncluster
     # see: https://redis-py-cluster.readthedocs.io/en/stable/pubsub.html
@@ -395,13 +404,14 @@ class TestPubSubMessages:
         assert self.message == make_message(
             "pmessage", channel, "test message", pattern=pattern
         )
+        await p.close()
 
-    async def test_get_message_without_subscribe(self, r: redis.Redis):
-        p = r.pubsub()
+    async def test_get_message_without_subscribe(self, r: redis.Redis, pubsub):
+        p = pubsub
         with pytest.raises(RuntimeError) as info:
             await p.get_message()
         expect = (
-            "connection not set: " "did you forget to call subscribe() or psubscribe()?"
+            "connection not set: did you forget to call subscribe() or psubscribe()?"
         )
         assert expect in info.exconly()
 
@@ -427,8 +437,8 @@ class TestPubSubAutoDecoding:
     async def r(self, create_redis):
         return await create_redis(decode_responses=True)
 
-    async def test_channel_subscribe_unsubscribe(self, r: redis.Redis):
-        p = r.pubsub()
+    async def test_channel_subscribe_unsubscribe(self, pubsub):
+        p = pubsub
         await p.subscribe(self.channel)
         assert await wait_for_message(p) == self.make_message(
             "subscribe", self.channel, 1
@@ -439,8 +449,8 @@ class TestPubSubAutoDecoding:
             "unsubscribe", self.channel, 0
         )
 
-    async def test_pattern_subscribe_unsubscribe(self, r: redis.Redis):
-        p = r.pubsub()
+    async def test_pattern_subscribe_unsubscribe(self, pubsub):
+        p = pubsub
         await p.psubscribe(self.pattern)
         assert await wait_for_message(p) == self.make_message(
             "psubscribe", self.pattern, 1
@@ -451,8 +461,8 @@ class TestPubSubAutoDecoding:
             "punsubscribe", self.pattern, 0
         )
 
-    async def test_channel_publish(self, r: redis.Redis):
-        p = r.pubsub()
+    async def test_channel_publish(self, r: redis.Redis, pubsub):
+        p = pubsub
         await p.subscribe(self.channel)
         assert await wait_for_message(p) == self.make_message(
             "subscribe", self.channel, 1
@@ -463,8 +473,8 @@ class TestPubSubAutoDecoding:
         )
 
     @pytest.mark.onlynoncluster
-    async def test_pattern_publish(self, r: redis.Redis):
-        p = r.pubsub()
+    async def test_pattern_publish(self, r: redis.Redis, pubsub):
+        p = pubsub
         await p.psubscribe(self.pattern)
         assert await wait_for_message(p) == self.make_message(
             "psubscribe", self.pattern, 1
@@ -490,6 +500,7 @@ class TestPubSubAutoDecoding:
         await r.publish(self.channel, new_data)
         assert await wait_for_message(p) is None
         assert self.message == self.make_message("message", self.channel, new_data)
+        await p.close()
 
     async def test_pattern_message_handler(self, r: redis.Redis):
         p = r.pubsub(ignore_subscribe_messages=True)
@@ -511,6 +522,7 @@ class TestPubSubAutoDecoding:
         assert self.message == self.make_message(
             "pmessage", self.channel, new_data, pattern=self.pattern
         )
+        await p.close()
 
     async def test_context_manager(self, r: redis.Redis):
         async with r.pubsub() as pubsub:
@@ -520,6 +532,7 @@ class TestPubSubAutoDecoding:
         assert pubsub.connection is None
         assert pubsub.channels == {}
         assert pubsub.patterns == {}
+        await pubsub.close()
 
 
 @pytest.mark.onlynoncluster
@@ -535,8 +548,8 @@ class TestPubSubRedisDown:
 class TestPubSubSubcommands:
     @pytest.mark.onlynoncluster
     @skip_if_server_version_lt("2.8.0")
-    async def test_pubsub_channels(self, r: redis.Redis):
-        p = r.pubsub()
+    async def test_pubsub_channels(self, r: redis.Redis, pubsub):
+        p = pubsub
         await p.subscribe("foo", "bar", "baz", "quux")
         for i in range(4):
             assert (await wait_for_message(p))["type"] == "subscribe"
@@ -560,6 +573,9 @@ class TestPubSubSubcommands:
 
         channels = [(b"foo", 1), (b"bar", 2), (b"baz", 3)]
         assert await r.pubsub_numsub("foo", "bar", "baz") == channels
+        await p1.close()
+        await p2.close()
+        await p3.close()
 
     @skip_if_server_version_lt("2.8.0")
     async def test_pubsub_numpat(self, r: redis.Redis):
@@ -568,6 +584,7 @@ class TestPubSubSubcommands:
         for i in range(3):
             assert (await wait_for_message(p))["type"] == "psubscribe"
         assert await r.pubsub_numpat() == 3
+        await p.close()
 
 
 @pytest.mark.onlynoncluster
@@ -580,6 +597,7 @@ class TestPubSubPings:
         assert await wait_for_message(p) == make_message(
             type="pong", channel=None, data="", pattern=None
         )
+        await p.close()
 
     @skip_if_server_version_lt("3.0.0")
     async def test_send_pubsub_ping_message(self, r: redis.Redis):
@@ -589,13 +607,16 @@ class TestPubSubPings:
         assert await wait_for_message(p) == make_message(
             type="pong", channel=None, data="hello world", pattern=None
         )
+        await p.close()
 
 
 @pytest.mark.onlynoncluster
 class TestPubSubConnectionKilled:
     @skip_if_server_version_lt("3.0.0")
-    async def test_connection_error_raised_when_connection_dies(self, r: redis.Redis):
-        p = r.pubsub()
+    async def test_connection_error_raised_when_connection_dies(
+        self, r: redis.Redis, pubsub
+    ):
+        p = pubsub
         await p.subscribe("foo")
         assert await wait_for_message(p) == make_message("subscribe", "foo", 1)
         for client in await r.client_list():
@@ -607,8 +628,8 @@ class TestPubSubConnectionKilled:
 
 @pytest.mark.onlynoncluster
 class TestPubSubTimeouts:
-    async def test_get_message_with_timeout_returns_none(self, r: redis.Redis):
-        p = r.pubsub()
+    async def test_get_message_with_timeout_returns_none(self, pubsub):
+        p = pubsub
         await p.subscribe("foo")
         assert await wait_for_message(p) == make_message("subscribe", "foo", 1)
         assert await p.get_message(timeout=0.01) is None
@@ -616,15 +637,13 @@ class TestPubSubTimeouts:
 
 @pytest.mark.onlynoncluster
 class TestPubSubReconnect:
-    # @pytest.mark.xfail
     @with_timeout(2)
-    async def test_reconnect_listen(self, r: redis.Redis):
+    async def test_reconnect_listen(self, r: redis.Redis, pubsub):
         """
         Test that a loop processing PubSub messages can survive
         a disconnect, by issuing a connect() call.
         """
         messages = asyncio.Queue()
-        pubsub = r.pubsub()
         interrupt = False
 
         async def loop():
@@ -656,7 +675,7 @@ class TestPubSubReconnect:
                 await messages.put(message)
                 break
 
-        task = asyncio.get_event_loop().create_task(loop())
+        task = asyncio.get_running_loop().create_task(loop())
         # get the initial connect message
         async with async_timeout.timeout(1):
             message = await messages.get()
@@ -698,14 +717,14 @@ class TestPubSubRun:
             ):
                 return
 
-    async def test_callbacks(self, r: redis.Redis):
+    async def test_callbacks(self, r: redis.Redis, pubsub):
         def callback(message):
             messages.put_nowait(message)
 
         messages = asyncio.Queue()
-        p = r.pubsub()
+        p = pubsub
         await self._subscribe(p, foo=callback)
-        task = asyncio.get_event_loop().create_task(p.run())
+        task = asyncio.get_running_loop().create_task(p.run())
         await r.publish("foo", "bar")
         message = await messages.get()
         task.cancel()
@@ -720,16 +739,16 @@ class TestPubSubRun:
             "type": "message",
         }
 
-    async def test_exception_handler(self, r: redis.Redis):
+    async def test_exception_handler(self, r: redis.Redis, pubsub):
         def exception_handler_callback(e, pubsub) -> None:
             assert pubsub == p
             exceptions.put_nowait(e)
 
         exceptions = asyncio.Queue()
-        p = r.pubsub()
+        p = pubsub
         await self._subscribe(p, foo=lambda x: None)
         with mock.patch.object(p, "get_message", side_effect=Exception("error")):
-            task = asyncio.get_event_loop().create_task(
+            task = asyncio.get_running_loop().create_task(
                 p.run(exception_handler=exception_handler_callback)
             )
             e = await exceptions.get()
@@ -740,26 +759,25 @@ class TestPubSubRun:
                 pass
         assert str(e) == "error"
 
-    async def test_late_subscribe(self, r: redis.Redis):
+    async def test_late_subscribe(self, r: redis.Redis, pubsub):
         def callback(message):
             messages.put_nowait(message)
 
         messages = asyncio.Queue()
-        p = r.pubsub()
-        task = asyncio.get_event_loop().create_task(p.run())
+        p = pubsub
+        task = asyncio.get_running_loop().create_task(p.run())
         # wait until loop gets settled.  Add a subscription
         await asyncio.sleep(0.1)
         await p.subscribe(foo=callback)
         # wait tof the subscribe to finish.  Cannot use _subscribe() because
         # p.run() is already accepting messages
-        await asyncio.sleep(0.1)
-        await r.publish("foo", "bar")
-        message = None
-        try:
-            async with async_timeout.timeout(0.1):
-                message = await messages.get()
-        except asyncio.TimeoutError:
-            pass
+        while True:
+            n = await r.publish("foo", "bar")
+            if n == 1:
+                break
+            await asyncio.sleep(0.1)
+        async with async_timeout.timeout(0.1):
+            message = await messages.get()
         task.cancel()
         # we expect a cancelled error, not the Runtime error
         # ("did you forget to call subscribe()"")
@@ -771,3 +789,215 @@ class TestPubSubRun:
             "pattern": None,
             "type": "message",
         }
+
+
+# @pytest.mark.xfail
+@pytest.mark.parametrize("method", ["get_message", "listen"])
+@pytest.mark.onlynoncluster
+class TestPubSubAutoReconnect:
+    timeout = 2
+
+    async def mysetup(self, r, method):
+        self.messages = asyncio.Queue()
+        self.pubsub = r.pubsub()
+        # State: 0 = initial state , 1 = after disconnect, 2 = ConnectionError is seen,
+        # 3=successfully reconnected 4 = exit
+        self.state = 0
+        self.cond = asyncio.Condition()
+        if method == "get_message":
+            self.get_message = self.loop_step_get_message
+        else:
+            self.get_message = self.loop_step_listen
+
+        self.task = create_task(self.loop())
+        # get the initial connect message
+        message = await self.messages.get()
+        assert message == {
+            "channel": b"foo",
+            "data": 1,
+            "pattern": None,
+            "type": "subscribe",
+        }
+
+    async def myfinish(self):
+        message = await self.messages.get()
+        assert message == {
+            "channel": b"foo",
+            "data": 1,
+            "pattern": None,
+            "type": "subscribe",
+        }
+
+    async def mykill(self):
+        # kill thread
+        async with self.cond:
+            self.state = 4  # quit
+        await self.task
+
+    async def test_reconnect_socket_error(self, r: redis.Redis, method):
+        """
+        Test that a socket error will cause reconnect
+        """
+        try:
+            async with async_timeout.timeout(self.timeout):
+                await self.mysetup(r, method)
+                # now, disconnect the connection, and wait for it to be re-established
+                async with self.cond:
+                    assert self.state == 0
+                    self.state = 1
+                    with mock.patch.object(self.pubsub.connection, "_parser") as m:
+                        m.read_response.side_effect = socket.error
+                        m.can_read_destructive.side_effect = socket.error
+                        # wait until task noticies the disconnect until we
+                        # undo the patch
+                        await self.cond.wait_for(lambda: self.state >= 2)
+                        assert not self.pubsub.connection.is_connected
+                        # it is in a disconnecte state
+                    # wait for reconnect
+                    await self.cond.wait_for(
+                        lambda: self.pubsub.connection.is_connected
+                    )
+                    assert self.state == 3
+
+                await self.myfinish()
+        finally:
+            await self.mykill()
+
+    async def test_reconnect_disconnect(self, r: redis.Redis, method):
+        """
+        Test that a manual disconnect() will cause reconnect
+        """
+        try:
+            async with async_timeout.timeout(self.timeout):
+                await self.mysetup(r, method)
+                # now, disconnect the connection, and wait for it to be re-established
+                async with self.cond:
+                    self.state = 1
+                    await self.pubsub.connection.disconnect()
+                    assert not self.pubsub.connection.is_connected
+                    # wait for reconnect
+                    await self.cond.wait_for(
+                        lambda: self.pubsub.connection.is_connected
+                    )
+                    assert self.state == 3
+
+                await self.myfinish()
+        finally:
+            await self.mykill()
+
+    async def loop(self):
+        # reader loop, performing state transitions as it
+        # discovers disconnects and reconnects
+        await self.pubsub.subscribe("foo")
+        while True:
+            await asyncio.sleep(0.01)  # give main thread chance to get lock
+            async with self.cond:
+                old_state = self.state
+                try:
+                    if self.state == 4:
+                        break
+                    # print("state a ", self.state)
+                    got_msg = await self.get_message()
+                    assert got_msg
+                    if self.state in (1, 2):
+                        self.state = 3  # successful reconnect
+                except redis.ConnectionError:
+                    assert self.state in (1, 2)
+                    self.state = 2  # signal that we noticed the disconnect
+                finally:
+                    self.cond.notify()
+                # make sure that we did notice the connection error
+                # or reconnected without any error
+                if old_state == 1:
+                    assert self.state in (2, 3)
+
+    async def loop_step_get_message(self):
+        # get a single message via get_message
+        message = await self.pubsub.get_message(timeout=0.1)
+        # print(message)
+        if message is not None:
+            await self.messages.put(message)
+            return True
+        return False
+
+    async def loop_step_listen(self):
+        # get a single message via listen()
+        try:
+            async with async_timeout.timeout(0.1):
+                async for message in self.pubsub.listen():
+                    await self.messages.put(message)
+                    return True
+        except asyncio.TimeoutError:
+            return False
+
+
+@pytest.mark.onlynoncluster
+class TestBaseException:
+    @pytest.mark.skipif(
+        sys.version_info < (3, 8), reason="requires python 3.8 or higher"
+    )
+    async def test_outer_timeout(self, r: redis.Redis):
+        """
+        Using asyncio_timeout manually outside the inner method timeouts works.
+        This works on Python versions 3.8 and greater, at which time asyncio.
+        CancelledError became a BaseException instead of an Exception before.
+        """
+        pubsub = r.pubsub()
+        await pubsub.subscribe("foo")
+        assert pubsub.connection.is_connected
+
+        async def get_msg_or_timeout(timeout=0.1):
+            async with async_timeout.timeout(timeout):
+                # blocking method to return messages
+                while True:
+                    response = await pubsub.parse_response(block=True)
+                    message = await pubsub.handle_message(
+                        response, ignore_subscribe_messages=False
+                    )
+                    if message is not None:
+                        return message
+
+        # get subscribe message
+        msg = await get_msg_or_timeout(10)
+        assert msg is not None
+        # timeout waiting for another message which never arrives
+        assert pubsub.connection.is_connected
+        with pytest.raises(asyncio.TimeoutError):
+            await get_msg_or_timeout()
+        # the timeout on the read should not cause disconnect
+        assert pubsub.connection.is_connected
+
+    async def test_base_exception(self, r: redis.Redis):
+        """
+        Manually trigger a BaseException inside the parser's .read_response method
+        and verify that it isn't caught
+        """
+        pubsub = r.pubsub()
+        await pubsub.subscribe("foo")
+        assert pubsub.connection.is_connected
+
+        async def get_msg():
+            # blocking method to return messages
+            while True:
+                response = await pubsub.parse_response(block=True)
+                message = await pubsub.handle_message(
+                    response, ignore_subscribe_messages=False
+                )
+                if message is not None:
+                    return message
+
+        # get subscribe message
+        msg = await get_msg()
+        assert msg is not None
+        # timeout waiting for another message which never arrives
+        assert pubsub.connection.is_connected
+        with patch("redis.asyncio.connection.PythonParser.read_response") as mock1:
+            mock1.side_effect = BaseException("boom")
+            with patch("redis.asyncio.connection.HiredisParser.read_response") as mock2:
+                mock2.side_effect = BaseException("boom")
+
+                with pytest.raises(BaseException):
+                    await get_msg()
+
+        # the timeout on the read should not cause disconnect
+        assert pubsub.connection.is_connected
diff --git a/tests/test_asyncio/test_retry.py b/tests/test_asyncio/test_retry.py
index 38e353b..86e6ddf 100644
--- a/tests/test_asyncio/test_retry.py
+++ b/tests/test_asyncio/test_retry.py
@@ -1,8 +1,9 @@
 import pytest
 
+from redis.asyncio import Redis
 from redis.asyncio.connection import Connection, UnixDomainSocketConnection
 from redis.asyncio.retry import Retry
-from redis.backoff import AbstractBackoff, NoBackoff
+from redis.backoff import AbstractBackoff, ExponentialBackoff, NoBackoff
 from redis.exceptions import ConnectionError, TimeoutError
 
 
@@ -114,3 +115,22 @@ class TestRetry:
 
         assert self.actual_attempts == 5
         assert self.actual_failures == 5
+
+
+class TestRedisClientRetry:
+    "Test the Redis client behavior with retries"
+
+    async def test_get_set_retry_object(self, request):
+        retry = Retry(NoBackoff(), 2)
+        url = request.config.getoption("--redis-url")
+        r = await Redis.from_url(url, retry_on_timeout=True, retry=retry)
+        assert r.get_retry()._retries == retry._retries
+        assert isinstance(r.get_retry()._backoff, NoBackoff)
+        new_retry_policy = Retry(ExponentialBackoff(), 3)
+        exiting_conn = await r.connection_pool.get_connection("_")
+        r.set_retry(new_retry_policy)
+        assert r.get_retry()._retries == new_retry_policy._retries
+        assert isinstance(r.get_retry()._backoff, ExponentialBackoff)
+        assert exiting_conn.retry._retries == new_retry_policy._retries
+        new_conn = await r.connection_pool.get_connection("_")
+        assert new_conn.retry._retries == new_retry_policy._retries
diff --git a/tests/test_asyncio/test_scripting.py b/tests/test_asyncio/test_scripting.py
index 764525f..3776d12 100644
--- a/tests/test_asyncio/test_scripting.py
+++ b/tests/test_asyncio/test_scripting.py
@@ -1,11 +1,5 @@
-import sys
-
 import pytest
-
-if sys.version_info[0:2] == (3, 6):
-    import pytest as pytest_asyncio
-else:
-    import pytest_asyncio
+import pytest_asyncio
 
 from redis import exceptions
 from tests.conftest import skip_if_server_version_lt
diff --git a/tests/test_asyncio/test_search.py b/tests/test_asyncio/test_search.py
index 5aaa56f..8707cdf 100644
--- a/tests/test_asyncio/test_search.py
+++ b/tests/test_asyncio/test_search.py
@@ -16,10 +16,7 @@ from redis.commands.search.indexDefinition import IndexDefinition
 from redis.commands.search.query import GeoFilter, NumericFilter, Query
 from redis.commands.search.result import Result
 from redis.commands.search.suggestion import Suggestion
-from tests.conftest import skip_ifmodversion_lt
-
-pytestmark = pytest.mark.asyncio
-
+from tests.conftest import skip_if_redis_enterprise, skip_ifmodversion_lt
 
 WILL_PLAY_TEXT = os.path.abspath(
     os.path.join(os.path.dirname(__file__), "testdata", "will_play_text.csv.bz2")
@@ -88,7 +85,7 @@ async def createIndex(modclient, num_docs=100, definition=None):
     assert 50 == indexer.chunk_size
 
     for key, doc in chapters.items():
-        await indexer.add_document(key, **doc)
+        await indexer.client.client.hset(key, mapping=doc)
     await indexer.commit()
 
 
@@ -192,7 +189,7 @@ async def test_client(modclient: redis.Redis):
     assert 167 == (await modclient.ft().search(Query("henry king").slop(100))).total
 
     # test delete document
-    await modclient.ft().add_document("doc-5ghs2", play="Death of a Salesman")
+    await modclient.hset("doc-5ghs2", mapping={"play": "Death of a Salesman"})
     res = await modclient.ft().search(Query("death of a salesman"))
     assert 1 == res.total
 
@@ -201,36 +198,19 @@ async def test_client(modclient: redis.Redis):
     assert 0 == res.total
     assert 0 == await modclient.ft().delete_document("doc-5ghs2")
 
-    await modclient.ft().add_document("doc-5ghs2", play="Death of a Salesman")
+    await modclient.hset("doc-5ghs2", mapping={"play": "Death of a Salesman"})
     res = await modclient.ft().search(Query("death of a salesman"))
     assert 1 == res.total
     await modclient.ft().delete_document("doc-5ghs2")
 
 
-@pytest.mark.redismod
-@skip_ifmodversion_lt("2.2.0", "search")
-async def test_payloads(modclient: redis.Redis):
-    await modclient.ft().create_index((TextField("txt"),))
-
-    await modclient.ft().add_document("doc1", payload="foo baz", txt="foo bar")
-    await modclient.ft().add_document("doc2", txt="foo bar")
-
-    q = Query("foo bar").with_payloads()
-    res = await modclient.ft().search(q)
-    assert 2 == res.total
-    assert "doc1" == res.docs[0].id
-    assert "doc2" == res.docs[1].id
-    assert "foo baz" == res.docs[0].payload
-    assert res.docs[1].payload is None
-
-
 @pytest.mark.redismod
 @pytest.mark.onlynoncluster
 async def test_scores(modclient: redis.Redis):
     await modclient.ft().create_index((TextField("txt"),))
 
-    await modclient.ft().add_document("doc1", txt="foo baz")
-    await modclient.ft().add_document("doc2", txt="foo bar")
+    await modclient.hset("doc1", mapping={"txt": "foo baz"})
+    await modclient.hset("doc2", mapping={"txt": "foo bar"})
 
     q = Query("foo ~bar").with_scores()
     res = await modclient.ft().search(q)
@@ -242,35 +222,12 @@ async def test_scores(modclient: redis.Redis):
     # self.assertEqual(0.2, res.docs[1].score)
 
 
-@pytest.mark.redismod
-async def test_replace(modclient: redis.Redis):
-    await modclient.ft().create_index((TextField("txt"),))
-
-    await modclient.ft().add_document("doc1", txt="foo bar")
-    await modclient.ft().add_document("doc2", txt="foo bar")
-    await waitForIndex(modclient, "idx")
-
-    res = await modclient.ft().search("foo bar")
-    assert 2 == res.total
-    await (
-        modclient.ft().add_document("doc1", replace=True, txt="this is a replaced doc")
-    )
-
-    res = await modclient.ft().search("foo bar")
-    assert 1 == res.total
-    assert "doc2" == res.docs[0].id
-
-    res = await modclient.ft().search("replaced doc")
-    assert 1 == res.total
-    assert "doc1" == res.docs[0].id
-
-
 @pytest.mark.redismod
 async def test_stopwords(modclient: redis.Redis):
     stopwords = ["foo", "bar", "baz"]
     await modclient.ft().create_index((TextField("txt"),), stopwords=stopwords)
-    await modclient.ft().add_document("doc1", txt="foo bar")
-    await modclient.ft().add_document("doc2", txt="hello world")
+    await modclient.hset("doc1", mapping={"txt": "foo bar"})
+    await modclient.hset("doc2", mapping={"txt": "hello world"})
     await waitForIndex(modclient, "idx")
 
     q1 = Query("foo bar").no_content()
@@ -288,11 +245,13 @@ async def test_filters(modclient: redis.Redis):
         )
     )
     await (
-        modclient.ft().add_document(
-            "doc1", txt="foo bar", num=3.141, loc="-0.441,51.458"
+        modclient.hset(
+            "doc1", mapping={"txt": "foo bar", "num": 3.141, "loc": "-0.441,51.458"}
         )
     )
-    await modclient.ft().add_document("doc2", txt="foo baz", num=2, loc="-0.1,51.2")
+    await (
+        modclient.hset("doc2", mapping={"txt": "foo baz", "num": 2, "loc": "-0.1,51.2"})
+    )
 
     await waitForIndex(modclient, "idx")
     # Test numerical filter
@@ -324,17 +283,6 @@ async def test_filters(modclient: redis.Redis):
     assert ["doc1", "doc2"] == res
 
 
-@pytest.mark.redismod
-async def test_payloads_with_no_content(modclient: redis.Redis):
-    await modclient.ft().create_index((TextField("txt"),))
-    await modclient.ft().add_document("doc1", payload="foo baz", txt="foo bar")
-    await modclient.ft().add_document("doc2", payload="foo baz2", txt="foo bar")
-
-    q = Query("foo bar").with_payloads().no_content()
-    res = await modclient.ft().search(q)
-    assert 2 == len(res.docs)
-
-
 @pytest.mark.redismod
 async def test_sort_by(modclient: redis.Redis):
     await (
@@ -342,9 +290,9 @@ async def test_sort_by(modclient: redis.Redis):
             (TextField("txt"), NumericField("num", sortable=True))
         )
     )
-    await modclient.ft().add_document("doc1", txt="foo bar", num=1)
-    await modclient.ft().add_document("doc2", txt="foo baz", num=2)
-    await modclient.ft().add_document("doc3", txt="foo qux", num=3)
+    await modclient.hset("doc1", mapping={"txt": "foo bar", "num": 1})
+    await modclient.hset("doc2", mapping={"txt": "foo baz", "num": 2})
+    await modclient.hset("doc3", mapping={"txt": "foo qux", "num": 3})
 
     # Test sort
     q1 = Query("foo").sort_by("num", asc=True).no_content()
@@ -388,10 +336,12 @@ async def test_example(modclient: redis.Redis):
     )
 
     # Indexing a document
-    await modclient.ft().add_document(
+    await modclient.hset(
         "doc1",
-        title="RediSearch",
-        body="Redisearch impements a search engine on top of redis",
+        mapping={
+            "title": "RediSearch",
+            "body": "Redisearch impements a search engine on top of redis",
+        },
     )
 
     # Searching with complex parameters:
@@ -464,11 +414,13 @@ async def test_no_index(modclient: redis.Redis):
         )
     )
 
-    await modclient.ft().add_document(
-        "doc1", field="aaa", text="1", numeric="1", geo="1,1", tag="1"
+    await modclient.hset(
+        "doc1",
+        mapping={"field": "aaa", "text": "1", "numeric": "1", "geo": "1,1", "tag": "1"},
     )
-    await modclient.ft().add_document(
-        "doc2", field="aab", text="2", numeric="2", geo="2,2", tag="2"
+    await modclient.hset(
+        "doc2",
+        mapping={"field": "aab", "text": "2", "numeric": "2", "geo": "2,2", "tag": "2"},
     )
     await waitForIndex(modclient, "idx")
 
@@ -505,53 +457,6 @@ async def test_no_index(modclient: redis.Redis):
         TagField("name", no_index=True, sortable=False)
 
 
-@pytest.mark.redismod
-async def test_partial(modclient: redis.Redis):
-    await (
-        modclient.ft().create_index((TextField("f1"), TextField("f2"), TextField("f3")))
-    )
-    await modclient.ft().add_document("doc1", f1="f1_val", f2="f2_val")
-    await modclient.ft().add_document("doc2", f1="f1_val", f2="f2_val")
-    await modclient.ft().add_document("doc1", f3="f3_val", partial=True)
-    await modclient.ft().add_document("doc2", f3="f3_val", replace=True)
-    await waitForIndex(modclient, "idx")
-
-    # Search for f3 value. All documents should have it
-    res = await modclient.ft().search("@f3:f3_val")
-    assert 2 == res.total
-
-    # Only the document updated with PARTIAL should still have f1 and f2 values
-    res = await modclient.ft().search("@f3:f3_val @f2:f2_val @f1:f1_val")
-    assert 1 == res.total
-
-
-@pytest.mark.redismod
-async def test_no_create(modclient: redis.Redis):
-    await (
-        modclient.ft().create_index((TextField("f1"), TextField("f2"), TextField("f3")))
-    )
-    await modclient.ft().add_document("doc1", f1="f1_val", f2="f2_val")
-    await modclient.ft().add_document("doc2", f1="f1_val", f2="f2_val")
-    await modclient.ft().add_document("doc1", f3="f3_val", no_create=True)
-    await modclient.ft().add_document("doc2", f3="f3_val", no_create=True, partial=True)
-    await waitForIndex(modclient, "idx")
-
-    # Search for f3 value. All documents should have it
-    res = await modclient.ft().search("@f3:f3_val")
-    assert 2 == res.total
-
-    # Only the document updated with PARTIAL should still have f1 and f2 values
-    res = await modclient.ft().search("@f3:f3_val @f2:f2_val @f1:f1_val")
-    assert 1 == res.total
-
-    with pytest.raises(redis.ResponseError):
-        await (
-            modclient.ft().add_document(
-                "doc3", f2="f2_val", f3="f3_val", no_create=True
-            )
-        )
-
-
 @pytest.mark.redismod
 async def test_explain(modclient: redis.Redis):
     await (
@@ -643,11 +548,11 @@ async def test_alias_basic(modclient: redis.Redis):
     index1 = getClient(modclient).ft("testAlias")
 
     await index1.create_index((TextField("txt"),))
-    await index1.add_document("doc1", txt="text goes here")
+    await index1.client.hset("doc1", mapping={"txt": "text goes here"})
 
     index2 = getClient(modclient).ft("testAlias2")
     await index2.create_index((TextField("txt"),))
-    await index2.add_document("doc2", txt="text goes here")
+    await index2.client.hset("doc2", mapping={"txt": "text goes here"})
 
     # add the actual alias and check
     await index1.aliasadd("myalias")
@@ -677,8 +582,8 @@ async def test_tags(modclient: redis.Redis):
     tags = "foo,foo bar,hello;world"
     tags2 = "soba,ramen"
 
-    await modclient.ft().add_document("doc1", txt="fooz barz", tags=tags)
-    await modclient.ft().add_document("doc2", txt="noodles", tags=tags2)
+    await modclient.hset("doc1", mapping={"txt": "fooz barz", "tags": tags})
+    await modclient.hset("doc2", mapping={"txt": "noodles", "tags": tags2})
     await waitForIndex(modclient, "idx")
 
     q = Query("@tags:{foo}")
@@ -721,8 +626,8 @@ async def test_alter_schema_add(modclient: redis.Redis):
     await modclient.ft().alter_schema_add(TextField("body"))
 
     # Indexing a document
-    await modclient.ft().add_document(
-        "doc1", title="MyTitle", body="Some content only in the body"
+    await modclient.hset(
+        "doc1", mapping={"title": "MyTitle", "body": "Some content only in the body"}
     )
 
     # Searching with parameter only in the body (the added field)
@@ -738,11 +643,11 @@ async def test_spell_check(modclient: redis.Redis):
     await modclient.ft().create_index((TextField("f1"), TextField("f2")))
 
     await (
-        modclient.ft().add_document(
-            "doc1", f1="some valid content", f2="this is sample text"
+        modclient.hset(
+            "doc1", mapping={"f1": "some valid content", "f2": "this is sample text"}
         )
     )
-    await modclient.ft().add_document("doc2", f1="very important", f2="lorem ipsum")
+    await modclient.hset("doc2", mapping={"f1": "very important", "f2": "lorem ipsum"})
     await waitForIndex(modclient, "idx")
 
     # test spellcheck
@@ -796,8 +701,8 @@ async def test_dict_operations(modclient: redis.Redis):
 @pytest.mark.redismod
 async def test_phonetic_matcher(modclient: redis.Redis):
     await modclient.ft().create_index((TextField("name"),))
-    await modclient.ft().add_document("doc1", name="Jon")
-    await modclient.ft().add_document("doc2", name="John")
+    await modclient.hset("doc1", mapping={"name": "Jon"})
+    await modclient.hset("doc2", mapping={"name": "John"})
 
     res = await modclient.ft().search(Query("Jon"))
     assert 1 == len(res.docs)
@@ -807,8 +712,8 @@ async def test_phonetic_matcher(modclient: redis.Redis):
     await modclient.flushdb()
 
     await modclient.ft().create_index((TextField("name", phonetic_matcher="dm:en"),))
-    await modclient.ft().add_document("doc1", name="Jon")
-    await modclient.ft().add_document("doc2", name="John")
+    await modclient.hset("doc1", mapping={"name": "Jon"})
+    await modclient.hset("doc2", mapping={"name": "John"})
 
     res = await modclient.ft().search(Query("Jon"))
     assert 2 == len(res.docs)
@@ -820,12 +725,14 @@ async def test_phonetic_matcher(modclient: redis.Redis):
 async def test_scorer(modclient: redis.Redis):
     await modclient.ft().create_index((TextField("description"),))
 
-    await modclient.ft().add_document(
-        "doc1", description="The quick brown fox jumps over the lazy dog"
+    await modclient.hset(
+        "doc1", mapping={"description": "The quick brown fox jumps over the lazy dog"}
     )
-    await modclient.ft().add_document(
+    await modclient.hset(
         "doc2",
-        description="Quick alice was beginning to get very tired of sitting by her quick sister on the bank, and of having nothing to do.",  # noqa
+        mapping={
+            "description": "Quick alice was beginning to get very tired of sitting by her quick sister on the bank, and of having nothing to do."  # noqa
+        },
     )
 
     # default scorer is TFIDF
@@ -854,19 +761,19 @@ async def test_get(modclient: redis.Redis):
     assert [None] == await modclient.ft().get("doc1")
     assert [None, None] == await modclient.ft().get("doc2", "doc1")
 
-    await modclient.ft().add_document(
-        "doc1", f1="some valid content dd1", f2="this is sample text ff1"
+    await modclient.hset(
+        "doc1", mapping={"f1": "some valid content dd1", "f2": "this is sample text f1"}
     )
-    await modclient.ft().add_document(
-        "doc2", f1="some valid content dd2", f2="this is sample text ff2"
+    await modclient.hset(
+        "doc2", mapping={"f1": "some valid content dd2", "f2": "this is sample text f2"}
     )
 
     assert [
-        ["f1", "some valid content dd2", "f2", "this is sample text ff2"]
+        ["f1", "some valid content dd2", "f2", "this is sample text f2"]
     ] == await modclient.ft().get("doc2")
     assert [
-        ["f1", "some valid content dd1", "f2", "this is sample text ff1"],
-        ["f1", "some valid content dd2", "f2", "this is sample text ff2"],
+        ["f1", "some valid content dd1", "f2", "this is sample text f1"],
+        ["f1", "some valid content dd2", "f2", "this is sample text f2"],
     ] == await modclient.ft().get("doc1", "doc2")
 
 
@@ -897,122 +804,155 @@ async def test_aggregations_groupby(modclient: redis.Redis):
     )
 
     # Indexing a document
-    await modclient.ft().add_document(
+    await modclient.hset(
         "search",
-        title="RediSearch",
-        body="Redisearch impements a search engine on top of redis",
-        parent="redis",
-        random_num=10,
+        mapping={
+            "title": "RediSearch",
+            "body": "Redisearch impements a search engine on top of redis",
+            "parent": "redis",
+            "random_num": 10,
+        },
     )
-    await modclient.ft().add_document(
+    await modclient.hset(
         "ai",
-        title="RedisAI",
-        body="RedisAI executes Deep Learning/Machine Learning models and managing their data.",  # noqa
-        parent="redis",
-        random_num=3,
+        mapping={
+            "title": "RedisAI",
+            "body": "RedisAI executes Deep Learning/Machine Learning models and managing their data.",  # noqa
+            "parent": "redis",
+            "random_num": 3,
+        },
     )
-    await modclient.ft().add_document(
+    await modclient.hset(
         "json",
-        title="RedisJson",
-        body="RedisJSON implements ECMA-404 The JSON Data Interchange Standard as a native data type.",  # noqa
-        parent="redis",
-        random_num=8,
+        mapping={
+            "title": "RedisJson",
+            "body": "RedisJSON implements ECMA-404 The JSON Data Interchange Standard as a native data type.",  # noqa
+            "parent": "redis",
+            "random_num": 8,
+        },
     )
 
-    req = aggregations.AggregateRequest("redis").group_by("@parent", reducers.count())
+    for dialect in [1, 2]:
+        req = (
+            aggregations.AggregateRequest("redis")
+            .group_by("@parent", reducers.count())
+            .dialect(dialect)
+        )
 
-    res = (await modclient.ft().aggregate(req)).rows[0]
-    assert res[1] == "redis"
-    assert res[3] == "3"
+        res = (await modclient.ft().aggregate(req)).rows[0]
+        assert res[1] == "redis"
+        assert res[3] == "3"
 
-    req = aggregations.AggregateRequest("redis").group_by(
-        "@parent", reducers.count_distinct("@title")
-    )
+        req = (
+            aggregations.AggregateRequest("redis")
+            .group_by("@parent", reducers.count_distinct("@title"))
+            .dialect(dialect)
+        )
 
-    res = (await modclient.ft().aggregate(req)).rows[0]
-    assert res[1] == "redis"
-    assert res[3] == "3"
+        res = (await modclient.ft().aggregate(req)).rows[0]
+        assert res[1] == "redis"
+        assert res[3] == "3"
 
-    req = aggregations.AggregateRequest("redis").group_by(
-        "@parent", reducers.count_distinctish("@title")
-    )
+        req = (
+            aggregations.AggregateRequest("redis")
+            .group_by("@parent", reducers.count_distinctish("@title"))
+            .dialect(dialect)
+        )
 
-    res = (await modclient.ft().aggregate(req)).rows[0]
-    assert res[1] == "redis"
-    assert res[3] == "3"
+        res = (await modclient.ft().aggregate(req)).rows[0]
+        assert res[1] == "redis"
+        assert res[3] == "3"
 
-    req = aggregations.AggregateRequest("redis").group_by(
-        "@parent", reducers.sum("@random_num")
-    )
+        req = (
+            aggregations.AggregateRequest("redis")
+            .group_by("@parent", reducers.sum("@random_num"))
+            .dialect(dialect)
+        )
 
-    res = (await modclient.ft().aggregate(req)).rows[0]
-    assert res[1] == "redis"
-    assert res[3] == "21"  # 10+8+3
+        res = (await modclient.ft().aggregate(req)).rows[0]
+        assert res[1] == "redis"
+        assert res[3] == "21"  # 10+8+3
 
-    req = aggregations.AggregateRequest("redis").group_by(
-        "@parent", reducers.min("@random_num")
-    )
+        req = (
+            aggregations.AggregateRequest("redis")
+            .group_by("@parent", reducers.min("@random_num"))
+            .dialect(dialect)
+        )
 
-    res = (await modclient.ft().aggregate(req)).rows[0]
-    assert res[1] == "redis"
-    assert res[3] == "3"  # min(10,8,3)
+        res = (await modclient.ft().aggregate(req)).rows[0]
+        assert res[1] == "redis"
+        assert res[3] == "3"  # min(10,8,3)
 
-    req = aggregations.AggregateRequest("redis").group_by(
-        "@parent", reducers.max("@random_num")
-    )
+        req = (
+            aggregations.AggregateRequest("redis")
+            .group_by("@parent", reducers.max("@random_num"))
+            .dialect(dialect)
+        )
 
-    res = (await modclient.ft().aggregate(req)).rows[0]
-    assert res[1] == "redis"
-    assert res[3] == "10"  # max(10,8,3)
+        res = (await modclient.ft().aggregate(req)).rows[0]
+        assert res[1] == "redis"
+        assert res[3] == "10"  # max(10,8,3)
 
-    req = aggregations.AggregateRequest("redis").group_by(
-        "@parent", reducers.avg("@random_num")
-    )
+        req = (
+            aggregations.AggregateRequest("redis")
+            .group_by("@parent", reducers.avg("@random_num"))
+            .dialect(dialect)
+        )
 
-    res = (await modclient.ft().aggregate(req)).rows[0]
-    assert res[1] == "redis"
-    assert res[3] == "7"  # (10+3+8)/3
+        res = (await modclient.ft().aggregate(req)).rows[0]
+        assert res[1] == "redis"
+        assert res[3] == "7"  # (10+3+8)/3
 
-    req = aggregations.AggregateRequest("redis").group_by(
-        "@parent", reducers.stddev("random_num")
-    )
+        req = (
+            aggregations.AggregateRequest("redis")
+            .group_by("@parent", reducers.stddev("random_num"))
+            .dialect(dialect)
+        )
 
-    res = (await modclient.ft().aggregate(req)).rows[0]
-    assert res[1] == "redis"
-    assert res[3] == "3.60555127546"
+        res = (await modclient.ft().aggregate(req)).rows[0]
+        assert res[1] == "redis"
+        assert res[3] == "3.60555127546"
 
-    req = aggregations.AggregateRequest("redis").group_by(
-        "@parent", reducers.quantile("@random_num", 0.5)
-    )
+        req = (
+            aggregations.AggregateRequest("redis")
+            .group_by("@parent", reducers.quantile("@random_num", 0.5))
+            .dialect(dialect)
+        )
 
-    res = (await modclient.ft().aggregate(req)).rows[0]
-    assert res[1] == "redis"
-    assert res[3] == "8"  # median of 3,8,10
+        res = (await modclient.ft().aggregate(req)).rows[0]
+        assert res[1] == "redis"
+        assert res[3] == "8"  # median of 3,8,10
 
-    req = aggregations.AggregateRequest("redis").group_by(
-        "@parent", reducers.tolist("@title")
-    )
+        req = (
+            aggregations.AggregateRequest("redis")
+            .group_by("@parent", reducers.tolist("@title"))
+            .dialect(dialect)
+        )
 
-    res = (await modclient.ft().aggregate(req)).rows[0]
-    assert res[1] == "redis"
-    assert res[3] == ["RediSearch", "RedisAI", "RedisJson"]
+        res = (await modclient.ft().aggregate(req)).rows[0]
+        assert res[1] == "redis"
+        assert set(res[3]) == {"RediSearch", "RedisAI", "RedisJson"}
 
-    req = aggregations.AggregateRequest("redis").group_by(
-        "@parent", reducers.first_value("@title").alias("first")
-    )
+        req = (
+            aggregations.AggregateRequest("redis")
+            .group_by("@parent", reducers.first_value("@title").alias("first"))
+            .dialect(dialect)
+        )
 
-    res = (await modclient.ft().aggregate(req)).rows[0]
-    assert res == ["parent", "redis", "first", "RediSearch"]
+        res = (await modclient.ft().aggregate(req)).rows[0]
+        assert res == ["parent", "redis", "first", "RediSearch"]
 
-    req = aggregations.AggregateRequest("redis").group_by(
-        "@parent", reducers.random_sample("@title", 2).alias("random")
-    )
+        req = (
+            aggregations.AggregateRequest("redis")
+            .group_by("@parent", reducers.random_sample("@title", 2).alias("random"))
+            .dialect(dialect)
+        )
 
-    res = (await modclient.ft().aggregate(req)).rows[0]
-    assert res[1] == "redis"
-    assert res[2] == "random"
-    assert len(res[3]) == 2
-    assert res[3][0] in ["RediSearch", "RedisAI", "RedisJson"]
+        res = (await modclient.ft().aggregate(req)).rows[0]
+        assert res[1] == "redis"
+        assert res[2] == "random"
+        assert len(res[3]) == 2
+        assert res[3][0] in ["RediSearch", "RedisAI", "RedisJson"]
 
 
 @pytest.mark.redismod
@@ -1046,3 +986,54 @@ async def test_aggregations_sort_by_and_limit(modclient: redis.Redis):
     res = await modclient.ft().aggregate(req)
     assert len(res.rows) == 1
     assert res.rows[0] == ["t1", "b"]
+
+
+@pytest.mark.redismod
+@pytest.mark.experimental
+async def test_withsuffixtrie(modclient: redis.Redis):
+    # create index
+    assert await modclient.ft().create_index((TextField("txt"),))
+    await waitForIndex(modclient, getattr(modclient.ft(), "index_name", "idx"))
+    info = await modclient.ft().info()
+    assert "WITHSUFFIXTRIE" not in info["attributes"][0]
+    assert await modclient.ft().dropindex("idx")
+
+    # create withsuffixtrie index (text field)
+    assert await modclient.ft().create_index((TextField("t", withsuffixtrie=True)))
+    await waitForIndex(modclient, getattr(modclient.ft(), "index_name", "idx"))
+    info = await modclient.ft().info()
+    assert "WITHSUFFIXTRIE" in info["attributes"][0]
+    assert await modclient.ft().dropindex("idx")
+
+    # create withsuffixtrie index (tag field)
+    assert await modclient.ft().create_index((TagField("t", withsuffixtrie=True)))
+    await waitForIndex(modclient, getattr(modclient.ft(), "index_name", "idx"))
+    info = await modclient.ft().info()
+    assert "WITHSUFFIXTRIE" in info["attributes"][0]
+
+
+@pytest.mark.redismod
+@skip_if_redis_enterprise()
+async def test_search_commands_in_pipeline(modclient: redis.Redis):
+    p = await modclient.ft().pipeline()
+    p.create_index((TextField("txt"),))
+    p.hset("doc1", mapping={"txt": "foo bar"})
+    p.hset("doc2", mapping={"txt": "foo bar"})
+    q = Query("foo bar").with_payloads()
+    await p.search(q)
+    res = await p.execute()
+    assert res[:3] == ["OK", True, True]
+    assert 2 == res[3][0]
+    assert "doc1" == res[3][1]
+    assert "doc2" == res[3][4]
+    assert res[3][5] is None
+    assert res[3][3] == res[3][6] == ["txt", "foo bar"]
+
+
+@pytest.mark.redismod
+async def test_query_timeout(modclient: redis.Redis):
+    q1 = Query("foo").timeout(5000)
+    assert q1.get_args() == ["foo", "TIMEOUT", 5000, "LIMIT", 0, 10]
+    q2 = Query("foo").timeout("not_a_number")
+    with pytest.raises(redis.ResponseError):
+        await modclient.ft().search(q2)
diff --git a/tests/test_asyncio/test_sentinel.py b/tests/test_asyncio/test_sentinel.py
index 4130e67..5a0533b 100644
--- a/tests/test_asyncio/test_sentinel.py
+++ b/tests/test_asyncio/test_sentinel.py
@@ -1,12 +1,7 @@
 import socket
-import sys
 
 import pytest
-
-if sys.version_info[0:2] == (3, 6):
-    import pytest as pytest_asyncio
-else:
-    import pytest_asyncio
+import pytest_asyncio
 
 import redis.asyncio.sentinel
 from redis import exceptions
@@ -17,8 +12,6 @@ from redis.asyncio.sentinel import (
     SlaveNotFoundError,
 )
 
-pytestmark = pytest.mark.asyncio
-
 
 @pytest_asyncio.fixture(scope="module")
 def master_ip(master_host):
diff --git a/tests/test_asyncio/test_sentinel_managed_connection.py b/tests/test_asyncio/test_sentinel_managed_connection.py
new file mode 100644
index 0000000..a6e9f37
--- /dev/null
+++ b/tests/test_asyncio/test_sentinel_managed_connection.py
@@ -0,0 +1,37 @@
+import socket
+
+import pytest
+
+from redis.asyncio.retry import Retry
+from redis.asyncio.sentinel import SentinelManagedConnection
+from redis.backoff import NoBackoff
+
+from .compat import mock
+
+pytestmark = pytest.mark.asyncio
+
+
+async def test_connect_retry_on_timeout_error():
+    """Test that the _connect function is retried in case of a timeout"""
+    connection_pool = mock.AsyncMock()
+    connection_pool.get_master_address = mock.AsyncMock(
+        return_value=("localhost", 6379)
+    )
+    conn = SentinelManagedConnection(
+        retry_on_timeout=True,
+        retry=Retry(NoBackoff(), 3),
+        connection_pool=connection_pool,
+    )
+    origin_connect = conn._connect
+    conn._connect = mock.AsyncMock()
+
+    async def mock_connect():
+        # connect only on the last retry
+        if conn._connect.call_count <= 2:
+            raise socket.timeout
+        else:
+            return await origin_connect()
+
+    conn._connect.side_effect = mock_connect
+    await conn.connect()
+    assert conn._connect.call_count == 3
diff --git a/tests/test_asyncio/test_timeseries.py b/tests/test_asyncio/test_timeseries.py
index ac2807f..a710993 100644
--- a/tests/test_asyncio/test_timeseries.py
+++ b/tests/test_asyncio/test_timeseries.py
@@ -6,8 +6,6 @@ import pytest
 import redis.asyncio as redis
 from tests.conftest import skip_ifmodversion_lt
 
-pytestmark = pytest.mark.asyncio
-
 
 @pytest.mark.redismod
 async def test_create(modclient: redis.Redis):
@@ -242,6 +240,9 @@ async def test_range_advanced(modclient: redis.Redis):
     assert [(0, 5.0), (5, 6.0)] == await modclient.ts().range(
         1, 0, 10, aggregation_type="count", bucket_size_msec=10, align=5
     )
+    assert [(0, 2.55), (10, 3.0)] == await modclient.ts().range(
+        1, 0, 10, aggregation_type="twa", bucket_size_msec=10
+    )
 
 
 @pytest.mark.redismod
diff --git a/tests/test_bloom.py b/tests/test_bloom.py
index 1f8201c..30d3219 100644
--- a/tests/test_bloom.py
+++ b/tests/test_bloom.py
@@ -1,9 +1,13 @@
+from math import inf
+
 import pytest
 
 import redis.commands.bf
 from redis.exceptions import ModuleError, RedisError
 from redis.utils import HIREDIS_AVAILABLE
 
+from .conftest import skip_ifmodversion_lt
+
 
 def intlist(obj):
     return [int(v) for v in obj]
@@ -36,6 +40,21 @@ def test_create(client):
     assert client.topk().reserve("topk", 5, 100, 5, 0.9)
 
 
+@pytest.mark.redismod
+def test_bf_reserve(client):
+    """Testing BF.RESERVE"""
+    assert client.bf().reserve("bloom", 0.01, 1000)
+    assert client.bf().reserve("bloom_e", 0.01, 1000, expansion=1)
+    assert client.bf().reserve("bloom_ns", 0.01, 1000, noScale=True)
+    assert client.cf().reserve("cuckoo", 1000)
+    assert client.cf().reserve("cuckoo_e", 1000, expansion=1)
+    assert client.cf().reserve("cuckoo_bs", 1000, bucket_size=4)
+    assert client.cf().reserve("cuckoo_mi", 1000, max_iterations=10)
+    assert client.cms().initbydim("cmsDim", 100, 5)
+    assert client.cms().initbyprob("cmsProb", 0.01, 0.01)
+    assert client.topk().reserve("topk", 5, 100, 5, 0.9)
+
+
 @pytest.mark.redismod
 @pytest.mark.experimental
 def test_tdigest_create(client):
@@ -146,6 +165,21 @@ def test_bf_info(client):
         assert True
 
 
+@pytest.mark.redismod
+def test_bf_card(client):
+    # return 0 if the key does not exist
+    assert client.bf().card("not_exist") == 0
+
+    # Store a filter
+    assert client.bf().add("bf1", "item_foo") == 1
+    assert client.bf().card("bf1") == 1
+
+    # Error when key is of a type other than Bloom filter.
+    with pytest.raises(redis.ResponseError):
+        client.set("setKey", "value")
+        client.bf().card("setKey")
+
+
 # region Test Cuckoo Filter
 @pytest.mark.redismod
 def test_cf_add_and_insert(client):
@@ -263,9 +297,10 @@ def test_topk(client):
     assert [1, 1, 0, 0, 1, 0, 0] == client.topk().query(
         "topk", "A", "B", "C", "D", "E", "F", "G"
     )
-    assert [4, 3, 2, 3, 3, 0, 1] == client.topk().count(
-        "topk", "A", "B", "C", "D", "E", "F", "G"
-    )
+    with pytest.deprecated_call():
+        assert [4, 3, 2, 3, 3, 0, 1] == client.topk().count(
+            "topk", "A", "B", "C", "D", "E", "F", "G"
+        )
 
     # test full list
     assert client.topk().reserve("topklist", 3, 50, 3, 0.9)
@@ -305,9 +340,10 @@ def test_topk_incrby(client):
         "topk", ["bar", "baz", "42"], [3, 6, 2]
     )
     assert [None, "bar"] == client.topk().incrby("topk", ["42", "xyzzy"], [8, 4])
-    assert [3, 6, 10, 4, 0] == client.topk().count(
-        "topk", "bar", "baz", "42", "xyzzy", 4
-    )
+    with pytest.deprecated_call():
+        assert [3, 6, 10, 4, 0] == client.topk().count(
+            "topk", "bar", "baz", "42", "xyzzy", 4
+        )
 
 
 # region Test T-Digest
@@ -318,11 +354,11 @@ def test_tdigest_reset(client):
     # reset on empty histogram
     assert client.tdigest().reset("tDigest")
     # insert data-points into sketch
-    assert client.tdigest().add("tDigest", list(range(10)), [1.0] * 10)
+    assert client.tdigest().add("tDigest", list(range(10)))
 
     assert client.tdigest().reset("tDigest")
     # assert we have 0 unmerged nodes
-    assert 0 == client.tdigest().info("tDigest").unmergedNodes
+    assert 0 == client.tdigest().info("tDigest").unmerged_nodes
 
 
 @pytest.mark.redismod
@@ -331,14 +367,24 @@ def test_tdigest_merge(client):
     assert client.tdigest().create("to-tDigest", 10)
     assert client.tdigest().create("from-tDigest", 10)
     # insert data-points into sketch
-    assert client.tdigest().add("from-tDigest", [1.0] * 10, [1.0] * 10)
-    assert client.tdigest().add("to-tDigest", [2.0] * 10, [10.0] * 10)
+    assert client.tdigest().add("from-tDigest", [1.0] * 10)
+    assert client.tdigest().add("to-tDigest", [2.0] * 10)
     # merge from-tdigest into to-tdigest
-    assert client.tdigest().merge("to-tDigest", "from-tDigest")
+    assert client.tdigest().merge("to-tDigest", 1, "from-tDigest")
     # we should now have 110 weight on to-histogram
     info = client.tdigest().info("to-tDigest")
-    total_weight_to = float(info.mergedWeight) + float(info.unmergedWeight)
-    assert 110 == total_weight_to
+    total_weight_to = float(info.merged_weight) + float(info.unmerged_weight)
+    assert 20 == total_weight_to
+    # test override
+    assert client.tdigest().create("from-override", 10)
+    assert client.tdigest().create("from-override-2", 10)
+    assert client.tdigest().add("from-override", [3.0] * 10)
+    assert client.tdigest().add("from-override-2", [4.0] * 10)
+    assert client.tdigest().merge(
+        "to-tDigest", 2, "from-override", "from-override-2", override=True
+    )
+    assert 3.0 == client.tdigest().min("to-tDigest")
+    assert 4.0 == client.tdigest().max("to-tDigest")
 
 
 @pytest.mark.redismod
@@ -346,7 +392,7 @@ def test_tdigest_merge(client):
 def test_tdigest_min_and_max(client):
     assert client.tdigest().create("tDigest", 100)
     # insert data-points into sketch
-    assert client.tdigest().add("tDigest", [1, 2, 3], [1.0] * 3)
+    assert client.tdigest().add("tDigest", [1, 2, 3])
     # min/max
     assert 3 == client.tdigest().max("tDigest")
     assert 1 == client.tdigest().min("tDigest")
@@ -354,18 +400,24 @@ def test_tdigest_min_and_max(client):
 
 @pytest.mark.redismod
 @pytest.mark.experimental
+@skip_ifmodversion_lt("2.4.0", "bf")
 def test_tdigest_quantile(client):
     assert client.tdigest().create("tDigest", 500)
     # insert data-points into sketch
-    assert client.tdigest().add(
-        "tDigest", list([x * 0.01 for x in range(1, 10000)]), [1.0] * 10000
-    )
+    assert client.tdigest().add("tDigest", list([x * 0.01 for x in range(1, 10000)]))
     # assert min min/max have same result as quantile 0 and 1
-    assert client.tdigest().max("tDigest") == client.tdigest().quantile("tDigest", 1.0)
-    assert client.tdigest().min("tDigest") == client.tdigest().quantile("tDigest", 0.0)
+    res = client.tdigest().quantile("tDigest", 1.0)
+    assert client.tdigest().max("tDigest") == res[0]
+    res = client.tdigest().quantile("tDigest", 0.0)
+    assert client.tdigest().min("tDigest") == res[0]
+
+    assert 1.0 == round(client.tdigest().quantile("tDigest", 0.01)[0], 2)
+    assert 99.0 == round(client.tdigest().quantile("tDigest", 0.99)[0], 2)
 
-    assert 1.0 == round(client.tdigest().quantile("tDigest", 0.01), 2)
-    assert 99.0 == round(client.tdigest().quantile("tDigest", 0.99), 2)
+    # test multiple quantiles
+    assert client.tdigest().create("t-digest", 100)
+    assert client.tdigest().add("t-digest", [1, 2, 3, 4, 5])
+    assert [3.0, 5.0] == client.tdigest().quantile("t-digest", 0.5, 0.8)
 
 
 @pytest.mark.redismod
@@ -373,9 +425,67 @@ def test_tdigest_quantile(client):
 def test_tdigest_cdf(client):
     assert client.tdigest().create("tDigest", 100)
     # insert data-points into sketch
-    assert client.tdigest().add("tDigest", list(range(1, 10)), [1.0] * 10)
-    assert 0.1 == round(client.tdigest().cdf("tDigest", 1.0), 1)
-    assert 0.9 == round(client.tdigest().cdf("tDigest", 9.0), 1)
+    assert client.tdigest().add("tDigest", list(range(1, 10)))
+    assert 0.1 == round(client.tdigest().cdf("tDigest", 1.0)[0], 1)
+    assert 0.9 == round(client.tdigest().cdf("tDigest", 9.0)[0], 1)
+    res = client.tdigest().cdf("tDigest", 1.0, 9.0)
+    assert [0.1, 0.9] == [round(x, 1) for x in res]
+
+
+@pytest.mark.redismod
+@pytest.mark.experimental
+@skip_ifmodversion_lt("2.4.0", "bf")
+def test_tdigest_trimmed_mean(client):
+    assert client.tdigest().create("tDigest", 100)
+    # insert data-points into sketch
+    assert client.tdigest().add("tDigest", list(range(1, 10)))
+    assert 5 == client.tdigest().trimmed_mean("tDigest", 0.1, 0.9)
+    assert 4.5 == client.tdigest().trimmed_mean("tDigest", 0.4, 0.5)
+
+
+@pytest.mark.redismod
+@pytest.mark.experimental
+def test_tdigest_rank(client):
+    assert client.tdigest().create("t-digest", 500)
+    assert client.tdigest().add("t-digest", list(range(0, 20)))
+    assert -1 == client.tdigest().rank("t-digest", -1)[0]
+    assert 0 == client.tdigest().rank("t-digest", 0)[0]
+    assert 10 == client.tdigest().rank("t-digest", 10)[0]
+    assert [-1, 20, 9] == client.tdigest().rank("t-digest", -20, 20, 9)
+
+
+@pytest.mark.redismod
+@pytest.mark.experimental
+def test_tdigest_revrank(client):
+    assert client.tdigest().create("t-digest", 500)
+    assert client.tdigest().add("t-digest", list(range(0, 20)))
+    assert -1 == client.tdigest().revrank("t-digest", 20)[0]
+    assert 19 == client.tdigest().revrank("t-digest", 0)[0]
+    assert [-1, 19, 9] == client.tdigest().revrank("t-digest", 21, 0, 10)
+
+
+@pytest.mark.redismod
+@pytest.mark.experimental
+def test_tdigest_byrank(client):
+    assert client.tdigest().create("t-digest", 500)
+    assert client.tdigest().add("t-digest", list(range(1, 11)))
+    assert 1 == client.tdigest().byrank("t-digest", 0)[0]
+    assert 10 == client.tdigest().byrank("t-digest", 9)[0]
+    assert client.tdigest().byrank("t-digest", 100)[0] == inf
+    with pytest.raises(redis.ResponseError):
+        client.tdigest().byrank("t-digest", -1)[0]
+
+
+@pytest.mark.redismod
+@pytest.mark.experimental
+def test_tdigest_byrevrank(client):
+    assert client.tdigest().create("t-digest", 500)
+    assert client.tdigest().add("t-digest", list(range(1, 11)))
+    assert 10 == client.tdigest().byrevrank("t-digest", 0)[0]
+    assert 1 == client.tdigest().byrevrank("t-digest", 9)[0]
+    assert client.tdigest().byrevrank("t-digest", 100)[0] == -inf
+    with pytest.raises(redis.ResponseError):
+        client.tdigest().byrevrank("t-digest", -1)[0]
 
 
 # @pytest.mark.redismod
diff --git a/tests/test_cluster.py b/tests/test_cluster.py
index 0353323..1bf57a3 100644
--- a/tests/test_cluster.py
+++ b/tests/test_cluster.py
@@ -7,6 +7,7 @@ from unittest.mock import DEFAULT, Mock, call, patch
 import pytest
 
 from redis import Redis
+from redis.backoff import ExponentialBackoff, NoBackoff, default_backoff
 from redis.cluster import (
     PRIMARY,
     REDIS_CLUSTER_HASH_SLOTS,
@@ -17,7 +18,7 @@ from redis.cluster import (
     get_node_name,
 )
 from redis.commands import CommandsParser
-from redis.connection import Connection
+from redis.connection import BlockingConnectionPool, Connection, ConnectionPool
 from redis.crc import key_slot
 from redis.exceptions import (
     AskError,
@@ -31,6 +32,7 @@ from redis.exceptions import (
     ResponseError,
     TimeoutError,
 )
+from redis.retry import Retry
 from redis.utils import str_if_bytes
 from tests.test_pubsub import wait_for_message
 
@@ -174,7 +176,7 @@ def moved_redirection_helper(request, failover=False):
     prev_primary = rc.nodes_manager.get_node_from_slot(slot)
     if failover:
         if len(rc.nodes_manager.slots_cache[slot]) < 2:
-            warnings.warn("Skipping this test since it requires to have a " "replica")
+            warnings.warn("Skipping this test since it requires to have a replica")
             return
         redirect_node = rc.nodes_manager.slots_cache[slot][1]
     else:
@@ -242,7 +244,7 @@ class TestRedisClusterObj:
             RedisCluster(startup_nodes=[])
 
         assert str(ex.value).startswith(
-            "RedisCluster requires at least one node to discover the " "cluster"
+            "RedisCluster requires at least one node to discover the cluster"
         ), str_if_bytes(ex.value)
 
     def test_from_url(self, r):
@@ -263,7 +265,7 @@ class TestRedisClusterObj:
         with pytest.raises(RedisClusterException) as ex:
             r.execute_command("GET")
         assert str(ex.value).startswith(
-            "No way to dispatch this command to " "Redis Cluster. Missing key."
+            "No way to dispatch this command to Redis Cluster. Missing key."
         )
 
     def test_execute_command_node_flag_primaries(self, r):
@@ -358,6 +360,60 @@ class TestRedisClusterObj:
 
             assert r.execute_command("SET", "foo", "bar") == "MOCK_OK"
 
+    def test_handling_cluster_failover_to_a_replica(self, r):
+        # Set the key we'll test for
+        key = "key"
+        r.set("key", "value")
+        primary = r.get_node_from_key(key, replica=False)
+        assert str_if_bytes(r.get("key")) == "value"
+        # Get the current output of cluster slots
+        cluster_slots = primary.redis_connection.execute_command("CLUSTER SLOTS")
+        replica_host = ""
+        replica_port = 0
+        # Replace one of the replicas to be the new primary based on the
+        # cluster slots output
+        for slot_range in cluster_slots:
+            primary_port = slot_range[2][1]
+            if primary_port == primary.port:
+                if len(slot_range) <= 3:
+                    # cluster doesn't have a replica, return
+                    return
+                replica_host = str_if_bytes(slot_range[3][0])
+                replica_port = slot_range[3][1]
+                # replace replica and primary in the cluster slots output
+                tmp_node = slot_range[2]
+                slot_range[2] = slot_range[3]
+                slot_range[3] = tmp_node
+                break
+
+        def raise_connection_error():
+            raise ConnectionError("error")
+
+        def mock_execute_command(*_args, **_kwargs):
+            if _args[0] == "CLUSTER SLOTS":
+                return cluster_slots
+            else:
+                raise Exception("Failed to mock cluster slots")
+
+        # Mock connection error for the current primary
+        mock_node_resp_func(primary, raise_connection_error)
+        primary.redis_connection.set_retry(Retry(NoBackoff(), 1))
+
+        # Mock the cluster slots response for all other nodes
+        redis_mock_node = Mock()
+        redis_mock_node.execute_command.side_effect = mock_execute_command
+        # Mock response value for all other commands
+        redis_mock_node.parse_response.return_value = "MOCK_OK"
+        for node in r.get_nodes():
+            if node.port != primary.port:
+                node.redis_connection = redis_mock_node
+
+        assert r.get(key) == "MOCK_OK"
+        new_primary = r.get_node_from_key(key, replica=False)
+        assert new_primary.host == replica_host
+        assert new_primary.port == replica_port
+        assert r.get_node(primary.host, primary.port).server_type == REPLICA
+
     def test_moved_redirection(self, request):
         """
         Test that the client handles MOVED response.
@@ -505,7 +561,15 @@ class TestRedisClusterObj:
                 read_cluster.get("foo")
                 read_cluster.get("foo")
                 read_cluster.get("foo")
-                mocks["send_command"].assert_has_calls([call("READONLY")])
+                mocks["send_command"].assert_has_calls(
+                    [
+                        call("READONLY"),
+                        call("GET", "foo"),
+                        call("READONLY"),
+                        call("GET", "foo"),
+                        call("GET", "foo"),
+                    ]
+                )
 
     def test_keyslot(self, r):
         """
@@ -672,7 +736,7 @@ class TestRedisClusterObj:
         with patch.object(Redis, "parse_response") as parse_response:
 
             def moved_redirect_effect(connection, *args, **options):
-                # raise a timeout for 5 times so we'll need to reinitilize the topology
+                # raise a timeout for 5 times so we'll need to reinitialize the topology
                 if count.val == 4:
                     parse_response.side_effect = real_func
                 count.val += 1
@@ -691,6 +755,73 @@ class TestRedisClusterObj:
                     cur_node = r.get_node(node_name=node_name)
                     assert conn == r.get_redis_connection(cur_node)
 
+    def test_cluster_get_set_retry_object(self, request):
+        retry = Retry(NoBackoff(), 2)
+        r = _get_client(RedisCluster, request, retry=retry)
+        assert r.get_retry()._retries == retry._retries
+        assert isinstance(r.get_retry()._backoff, NoBackoff)
+        for node in r.get_nodes():
+            assert node.redis_connection.get_retry()._retries == retry._retries
+            assert isinstance(node.redis_connection.get_retry()._backoff, NoBackoff)
+        rand_node = r.get_random_node()
+        existing_conn = rand_node.redis_connection.connection_pool.get_connection("_")
+        # Change retry policy
+        new_retry = Retry(ExponentialBackoff(), 3)
+        r.set_retry(new_retry)
+        assert r.get_retry()._retries == new_retry._retries
+        assert isinstance(r.get_retry()._backoff, ExponentialBackoff)
+        for node in r.get_nodes():
+            assert node.redis_connection.get_retry()._retries == new_retry._retries
+            assert isinstance(
+                node.redis_connection.get_retry()._backoff, ExponentialBackoff
+            )
+        assert existing_conn.retry._retries == new_retry._retries
+        new_conn = rand_node.redis_connection.connection_pool.get_connection("_")
+        assert new_conn.retry._retries == new_retry._retries
+
+    def test_cluster_retry_object(self, r) -> None:
+        # Test default retry
+        retry = r.get_connection_kwargs().get("retry")
+        assert isinstance(retry, Retry)
+        assert retry._retries == 0
+        assert isinstance(retry._backoff, type(default_backoff()))
+        node1 = r.get_node("127.0.0.1", 16379).redis_connection
+        node2 = r.get_node("127.0.0.1", 16380).redis_connection
+        assert node1.get_retry()._retries == node2.get_retry()._retries
+
+        # Test custom retry
+        retry = Retry(ExponentialBackoff(10, 5), 5)
+        rc_custom_retry = RedisCluster("127.0.0.1", 16379, retry=retry)
+        assert (
+            rc_custom_retry.get_node("127.0.0.1", 16379)
+            .redis_connection.get_retry()
+            ._retries
+            == retry._retries
+        )
+
+    def test_replace_cluster_node(self, r) -> None:
+        prev_default_node = r.get_default_node()
+        r.replace_default_node()
+        assert r.get_default_node() != prev_default_node
+        r.replace_default_node(prev_default_node)
+        assert r.get_default_node() == prev_default_node
+
+    def test_default_node_is_replaced_after_exception(self, r):
+        curr_default_node = r.get_default_node()
+        # CLUSTER NODES command is being executed on the default node
+        nodes = r.cluster_nodes()
+        assert "myself" in nodes.get(curr_default_node.name).get("flags")
+
+        def raise_connection_error():
+            raise ConnectionError("error")
+
+        # Mock connection error for the default node
+        mock_node_resp_func(curr_default_node, raise_connection_error)
+        # Test that the command succeed from a different node
+        nodes = r.cluster_nodes()
+        assert "myself" not in nodes.get(curr_default_node.name).get("flags")
+        assert r.get_default_node() != curr_default_node
+
 
 @pytest.mark.onlycluster
 class TestClusterRedisCommands:
@@ -1166,6 +1297,14 @@ class TestClusterRedisCommands:
         for i in range(0, len(res) - 1, 2):
             assert res[i][3] == res[i + 1][3]
 
+    def test_cluster_flshslots_not_implemented(self, r):
+        with pytest.raises(NotImplementedError):
+            r.cluster_flushslots()
+
+    def test_cluster_bumpepoch_not_implemented(self, r):
+        with pytest.raises(NotImplementedError):
+            r.cluster_bumpepoch()
+
     @skip_if_redis_enterprise()
     def test_readonly(self):
         r = get_mocked_redis_client(host=default_host, port=default_port)
@@ -2123,6 +2262,57 @@ class TestNodesManager:
 
         assert len(n_manager.nodes_cache) == 6
 
+    def test_init_promote_server_type_for_node_in_cache(self):
+        """
+        When replica is promoted to master, nodes_cache must change the server type
+        accordingly
+        """
+        cluster_slots_before_promotion = [
+            [0, 16383, ["127.0.0.1", 7000], ["127.0.0.1", 7003]]
+        ]
+        cluster_slots_after_promotion = [
+            [0, 16383, ["127.0.0.1", 7003], ["127.0.0.1", 7004]]
+        ]
+
+        cluster_slots_results = [
+            cluster_slots_before_promotion,
+            cluster_slots_after_promotion,
+        ]
+
+        with patch.object(Redis, "execute_command") as execute_command_mock:
+
+            def execute_command(*_args, **_kwargs):
+                if _args[0] == "CLUSTER SLOTS":
+                    mock_cluster_slots = cluster_slots_results.pop(0)
+                    return mock_cluster_slots
+                elif _args[0] == "COMMAND":
+                    return {"get": [], "set": []}
+                elif _args[0] == "INFO":
+                    return {"cluster_enabled": True}
+                elif len(_args) > 1 and _args[1] == "cluster-require-full-coverage":
+                    return {"cluster-require-full-coverage": False}
+                else:
+                    return execute_command_mock(*_args, **_kwargs)
+
+            execute_command_mock.side_effect = execute_command
+
+            nm = NodesManager(
+                startup_nodes=[ClusterNode(host=default_host, port=default_port)],
+                from_url=False,
+                require_full_coverage=False,
+                dynamic_startup_nodes=True,
+            )
+
+            assert nm.default_node.host == "127.0.0.1"
+            assert nm.default_node.port == 7000
+            assert nm.default_node.server_type == PRIMARY
+
+            nm.initialize()
+
+            assert nm.default_node.host == "127.0.0.1"
+            assert nm.default_node.port == 7003
+            assert nm.default_node.server_type == PRIMARY
+
     def test_init_slots_cache_cluster_mode_disabled(self):
         """
         Test that creating a RedisCluster failes if one of the startup nodes
@@ -2306,6 +2496,21 @@ class TestNodesManager:
         else:
             assert startup_nodes == ["my@DNS.com:7000"]
 
+    @pytest.mark.parametrize(
+        "connection_pool_class", [ConnectionPool, BlockingConnectionPool]
+    )
+    def test_connection_pool_class(self, connection_pool_class):
+        rc = get_mocked_redis_client(
+            url="redis://my@DNS.com:7000",
+            cluster_slots=default_cluster_slots,
+            connection_pool_class=connection_pool_class,
+        )
+
+        for node in rc.nodes_manager.nodes_cache.values():
+            assert isinstance(
+                node.redis_connection.connection_pool, connection_pool_class
+            )
+
 
 @pytest.mark.onlycluster
 class TestClusterPubSubObject:
@@ -2498,6 +2703,25 @@ class TestClusterPipeline:
             with pytest.raises(RedisClusterException):
                 pipe.delete("a", "b")
 
+    def test_unlink_single(self, r):
+        """
+        Test a single unlink operation
+        """
+        r["a"] = 1
+        with r.pipeline(transaction=False) as pipe:
+            pipe.unlink("a")
+            assert pipe.execute() == [1]
+
+    def test_multi_unlink_unsupported(self, r):
+        """
+        Test that multi unlink operation is unsupported
+        """
+        with r.pipeline(transaction=False) as pipe:
+            r["a"] = 1
+            r["b"] = 2
+            with pytest.raises(RedisClusterException):
+                pipe.unlink("a", "b")
+
     def test_brpoplpush_disabled(self, r):
         """
         Test that brpoplpush is disabled for ClusterPipeline
@@ -2666,7 +2890,7 @@ class TestClusterPipeline:
                 ask_node = node
                 break
         if ask_node is None:
-            warnings.warn("skipping this test since the cluster has only one " "node")
+            warnings.warn("skipping this test since the cluster has only one node")
             return
         ask_msg = f"{r.keyslot(key)} {ask_node.host}:{ask_node.port}"
 
diff --git a/tests/test_command_parser.py b/tests/test_command_parser.py
index 708c069..6c3ede9 100644
--- a/tests/test_command_parser.py
+++ b/tests/test_command_parser.py
@@ -51,21 +51,13 @@ class TestCommandsParser:
         ]
         args7 = ["MIGRATE", "192.168.1.34", 6379, "key1", 0, 5000]
 
-        assert commands_parser.get_keys(r, *args1).sort() == ["key1", "key2"].sort()
-        assert (
-            commands_parser.get_keys(r, *args2).sort() == ["mystream", "writers"].sort()
-        )
-        assert (
-            commands_parser.get_keys(r, *args3).sort()
-            == ["out", "zset1", "zset2"].sort()
-        )
-        assert commands_parser.get_keys(r, *args4).sort() == ["Sicily", "out"].sort()
-        assert commands_parser.get_keys(r, *args5).sort() == ["foo"].sort()
-        assert (
-            commands_parser.get_keys(r, *args6).sort()
-            == ["key1", "key2", "key3"].sort()
-        )
-        assert commands_parser.get_keys(r, *args7).sort() == ["key1"].sort()
+        assert sorted(commands_parser.get_keys(r, *args1)) == ["key1", "key2"]
+        assert sorted(commands_parser.get_keys(r, *args2)) == ["mystream", "writers"]
+        assert sorted(commands_parser.get_keys(r, *args3)) == ["out", "zset1", "zset2"]
+        assert sorted(commands_parser.get_keys(r, *args4)) == ["Sicily", "out"]
+        assert sorted(commands_parser.get_keys(r, *args5)) == ["foo"]
+        assert sorted(commands_parser.get_keys(r, *args6)) == ["key1", "key2", "key3"]
+        assert sorted(commands_parser.get_keys(r, *args7)) == ["key1"]
 
     # A bug in redis<7.0 causes this to fail: https://github.com/redis/redis/issues/9493
     @skip_if_server_version_lt("7.0.0")
diff --git a/tests/test_commands.py b/tests/test_commands.py
index 715d18c..94249e9 100644
--- a/tests/test_commands.py
+++ b/tests/test_commands.py
@@ -9,7 +9,7 @@ import pytest
 
 import redis
 from redis import exceptions
-from redis.client import parse_info
+from redis.client import EMPTY_RESPONSE, NEVER_DECODE, parse_info
 
 from .conftest import (
     _get_client,
@@ -68,6 +68,14 @@ class TestResponseCallbacks:
 class TestRedisCommands:
     @skip_if_redis_enterprise()
     def test_auth(self, r, request):
+        # sending an AUTH command before setting a user/password on the
+        # server should return an AuthenticationError
+        with pytest.raises(exceptions.AuthenticationError):
+            r.auth("some_password")
+
+        with pytest.raises(exceptions.AuthenticationError):
+            r.auth("some_password", "some_user")
+
         # first, test for default user (`username` is supposed to be optional)
         default_username = "default"
         temp_pass = "temp_pass"
@@ -81,9 +89,19 @@ class TestRedisCommands:
 
         def teardown():
             try:
-                r.auth(temp_pass)
-            except exceptions.ResponseError:
-                r.auth("default", "")
+                # this is needed because after an AuthenticationError the connection
+                # is closed, and if we send an AUTH command a new connection is
+                # created, but in this case we'd get an "Authentication required"
+                # error when switching to the db 9 because we're not authenticated yet
+                # setting the password on the connection itself triggers the
+                # authentication in the connection's `on_connect` method
+                r.connection.password = temp_pass
+            except AttributeError:
+                # connection field is not set in Redis Cluster, but that's ok
+                # because the problem discussed above does not apply to Redis Cluster
+                pass
+
+            r.auth(temp_pass)
             r.config_set("requirepass", "")
             r.acl_deluser(username)
 
@@ -95,7 +113,7 @@ class TestRedisCommands:
 
         assert r.auth(username=username, password="strong_password") is True
 
-        with pytest.raises(exceptions.ResponseError):
+        with pytest.raises(exceptions.AuthenticationError):
             r.auth(username=username, password="wrong_password")
 
     def test_command_on_invalid_key_type(self, r):
@@ -256,7 +274,7 @@ class TestRedisCommands:
 
         # Resets and tests that hashed passwords are set properly.
         hashed_password = (
-            "5e884898da28047151d0e56f8dc629" "2773603d0d6aabbdd62a11ef721d1542d8"
+            "5e884898da28047151d0e56f8dc6292773603d0d6aabbdd62a11ef721d1542d8"
         )
         assert r.acl_setuser(
             username, enabled=True, reset=True, hashed_passwords=["+" + hashed_password]
@@ -899,6 +917,16 @@ class TestRedisCommands:
         time.sleep(0.3)
         assert r.bgsave(True)
 
+    def test_never_decode_option(self, r: redis.Redis):
+        opts = {NEVER_DECODE: []}
+        r.delete("a")
+        assert r.execute_command("EXISTS", "a", **opts) == 0
+
+    def test_empty_response_option(self, r: redis.Redis):
+        opts = {EMPTY_RESPONSE: []}
+        r.delete("a")
+        assert r.execute_command("EXISTS", "a", **opts) == 0
+
     # BASIC KEY COMMANDS
     def test_append(self, r):
         assert r.append("a", "a1") == 2
@@ -1185,7 +1213,7 @@ class TestRedisCommands:
     def test_expireat_unixtime(self, r):
         expire_at = redis_server_time(r) + datetime.timedelta(minutes=1)
         r["a"] = "foo"
-        expire_at_seconds = int(time.mktime(expire_at.timetuple()))
+        expire_at_seconds = int(expire_at.timestamp())
         assert r.expireat("a", expire_at_seconds) is True
         assert 0 < r.ttl("a") <= 61
 
@@ -1428,8 +1456,8 @@ class TestRedisCommands:
     def test_pexpireat_unixtime(self, r):
         expire_at = redis_server_time(r) + datetime.timedelta(minutes=1)
         r["a"] = "foo"
-        expire_at_seconds = int(time.mktime(expire_at.timetuple())) * 1000
-        assert r.pexpireat("a", expire_at_seconds) is True
+        expire_at_milliseconds = int(expire_at.timestamp() * 1000)
+        assert r.pexpireat("a", expire_at_milliseconds) is True
         assert 0 < r.pttl("a") <= 61000
 
     @skip_if_server_version_lt("7.0.0")
@@ -1572,6 +1600,13 @@ class TestRedisCommands:
         with pytest.raises(exceptions.DataError):
             assert r.set("a", "1", ex=10.0)
 
+    @skip_if_server_version_lt("2.6.0")
+    def test_set_ex_str(self, r):
+        assert r.set("a", "1", ex="10")
+        assert 0 < r.ttl("a") <= 10
+        with pytest.raises(exceptions.DataError):
+            assert r.set("a", "1", ex="10.5")
+
     @skip_if_server_version_lt("2.6.0")
     def test_set_ex_timedelta(self, r):
         expire_at = datetime.timedelta(seconds=60)
@@ -4438,6 +4473,19 @@ class TestRedisCommands:
         )
         assert resp == [0, None, 255]
 
+    @skip_if_server_version_lt("6.0.0")
+    def test_bitfield_ro(self, r: redis.Redis):
+        bf = r.bitfield("a")
+        resp = bf.set("u8", 8, 255).execute()
+        assert resp == [0]
+
+        resp = r.bitfield_ro("a", "u8", 0)
+        assert resp == [0]
+
+        items = [("u4", 8), ("u4", 12), ("u4", 13)]
+        resp = r.bitfield_ro("a", "u8", 0, items)
+        assert resp == [0, 15, 15, 14]
+
     @skip_if_server_version_lt("4.0.0")
     def test_memory_help(self, r):
         with pytest.raises(NotImplementedError):
@@ -4486,6 +4534,23 @@ class TestRedisCommands:
         with pytest.raises(NotImplementedError):
             r.latency_histogram()
 
+    def test_latency_graph_not_implemented(self, r: redis.Redis):
+        with pytest.raises(NotImplementedError):
+            r.latency_graph()
+
+    def test_latency_doctor_not_implemented(self, r: redis.Redis):
+        with pytest.raises(NotImplementedError):
+            r.latency_doctor()
+
+    def test_latency_history(self, r: redis.Redis):
+        assert r.latency_history("command") == []
+
+    def test_latency_latest(self, r: redis.Redis):
+        assert r.latency_latest() == []
+
+    def test_latency_reset(self, r: redis.Redis):
+        assert r.latency_reset() == 0
+
     @pytest.mark.onlynoncluster
     @skip_if_server_version_lt("4.0.0")
     @skip_if_redis_enterprise()
diff --git a/tests/test_connection.py b/tests/test_connection.py
index d9251c3..25b4118 100644
--- a/tests/test_connection.py
+++ b/tests/test_connection.py
@@ -5,13 +5,21 @@ from unittest.mock import patch
 
 import pytest
 
+import redis
 from redis.backoff import NoBackoff
-from redis.connection import Connection
+from redis.connection import (
+    Connection,
+    HiredisParser,
+    PythonParser,
+    SSLConnection,
+    UnixDomainSocketConnection,
+)
 from redis.exceptions import ConnectionError, InvalidResponse, TimeoutError
 from redis.retry import Retry
 from redis.utils import HIREDIS_AVAILABLE
 
 from .conftest import skip_if_server_version_lt
+from .mocks import MockSocket
 
 
 @pytest.mark.skipif(HIREDIS_AVAILABLE, reason="PythonParser only")
@@ -122,3 +130,78 @@ class TestConnection:
         assert conn._connect.call_count == 1
         assert str(e.value) == "Timeout connecting to server"
         self.clear(conn)
+
+
+@pytest.mark.onlynoncluster
+@pytest.mark.parametrize(
+    "parser_class", [PythonParser, HiredisParser], ids=["PythonParser", "HiredisParser"]
+)
+def test_connection_parse_response_resume(r: redis.Redis, parser_class):
+    """
+    This test verifies that the Connection parser,
+    be that PythonParser or HiredisParser,
+    can be interrupted at IO time and then resume parsing.
+    """
+    if parser_class is HiredisParser and not HIREDIS_AVAILABLE:
+        pytest.skip("Hiredis not available)")
+    args = dict(r.connection_pool.connection_kwargs)
+    args["parser_class"] = parser_class
+    conn = Connection(**args)
+    conn.connect()
+    message = (
+        b"*3\r\n$7\r\nmessage\r\n$8\r\nchannel1\r\n"
+        b"$25\r\nhi\r\nthere\r\n+how\r\nare\r\nyou\r\n"
+    )
+    mock_socket = MockSocket(message, interrupt_every=2)
+
+    if isinstance(conn._parser, PythonParser):
+        conn._parser._buffer._sock = mock_socket
+    else:
+        conn._parser._sock = mock_socket
+    for i in range(100):
+        try:
+            response = conn.read_response()
+            break
+        except MockSocket.TestError:
+            pass
+
+    else:
+        pytest.fail("didn't receive a response")
+    assert response
+    assert i > 0
+
+
+@pytest.mark.onlynoncluster
+@pytest.mark.parametrize(
+    "Class",
+    [
+        Connection,
+        SSLConnection,
+        UnixDomainSocketConnection,
+    ],
+)
+def test_pack_command(Class):
+    """
+    This test verifies that the pack_command works
+    on all supported connections. #2581
+    """
+    cmd = (
+        "HSET",
+        "foo",
+        "key",
+        "value1",
+        b"key_b",
+        b"bytes str",
+        b"key_i",
+        67,
+        "key_f",
+        3.14159265359,
+    )
+    expected = (
+        b"*10\r\n$4\r\nHSET\r\n$3\r\nfoo\r\n$3\r\nkey\r\n$6\r\nvalue1\r\n"
+        b"$5\r\nkey_b\r\n$9\r\nbytes str\r\n$5\r\nkey_i\r\n$2\r\n67\r\n$5"
+        b"\r\nkey_f\r\n$13\r\n3.14159265359\r\n"
+    )
+
+    actual = Class().pack_command(*cmd)[0]
+    assert actual == expected, f"actual = {actual}, expected = {expected}"
diff --git a/tests/test_connection_pool.py b/tests/test_connection_pool.py
index a836f5b..e8a4269 100644
--- a/tests/test_connection_pool.py
+++ b/tests/test_connection_pool.py
@@ -314,7 +314,7 @@ class TestConnectionPoolURLParsing:
     def test_invalid_extra_typed_querystring_options(self):
         with pytest.raises(ValueError):
             redis.ConnectionPool.from_url(
-                "redis://localhost/2?socket_timeout=_&" "socket_connect_timeout=abc"
+                "redis://localhost/2?socket_timeout=_&socket_connect_timeout=abc"
             )
 
     def test_extra_querystring_options(self):
@@ -339,6 +339,14 @@ class TestConnectionPoolURLParsing:
             "(redis://, rediss://, unix://)"
         )
 
+    def test_invalid_scheme_raises_error_when_double_slash_missing(self):
+        with pytest.raises(ValueError) as cm:
+            redis.ConnectionPool.from_url("redis:foo.bar.com:12345")
+        assert str(cm.value) == (
+            "Redis URL must specify one of the following schemes "
+            "(redis://, rediss://, unix://)"
+        )
+
 
 class TestConnectionPoolUnixSocketURLParsing:
     def test_defaults(self):
@@ -545,22 +553,40 @@ class TestConnection:
         )
 
     @skip_if_redis_enterprise()
-    def test_connect_no_auth_supplied_when_required(self, r):
+    def test_connect_no_auth_configured(self, r):
         """
-        AuthenticationError should be raised when the server requires a
-        password but one isn't supplied.
+        AuthenticationError should be raised when the server is not configured with auth
+        but credentials are supplied by the user.
         """
+        # Redis < 6
         with pytest.raises(redis.AuthenticationError):
             r.execute_command(
                 "DEBUG", "ERROR", "ERR Client sent AUTH, but no password is set"
             )
 
+        # Redis >= 6
+        with pytest.raises(redis.AuthenticationError):
+            r.execute_command(
+                "DEBUG",
+                "ERROR",
+                "ERR AUTH <password> called without any password "
+                "configured for the default user. Are you sure "
+                "your configuration is correct?",
+            )
+
     @skip_if_redis_enterprise()
-    def test_connect_invalid_password_supplied(self, r):
-        "AuthenticationError should be raised when sending the wrong password"
+    def test_connect_invalid_auth_credentials_supplied(self, r):
+        """
+        AuthenticationError should be raised when sending invalid username/password
+        """
+        # Redis < 6
         with pytest.raises(redis.AuthenticationError):
             r.execute_command("DEBUG", "ERROR", "ERR invalid password")
 
+        # Redis >= 6
+        with pytest.raises(redis.AuthenticationError):
+            r.execute_command("DEBUG", "ERROR", "WRONGPASS")
+
 
 @pytest.mark.onlynoncluster
 class TestMultiConnectionClient:
diff --git a/tests/test_credentials.py b/tests/test_credentials.py
new file mode 100644
index 0000000..9aeb1ef
--- /dev/null
+++ b/tests/test_credentials.py
@@ -0,0 +1,245 @@
+import functools
+import random
+import string
+from typing import Optional, Tuple, Union
+
+import pytest
+
+import redis
+from redis import AuthenticationError, DataError, ResponseError
+from redis.credentials import CredentialProvider, UsernamePasswordCredentialProvider
+from redis.utils import str_if_bytes
+from tests.conftest import _get_client, skip_if_redis_enterprise
+
+
+class NoPassCredProvider(CredentialProvider):
+    def get_credentials(self) -> Union[Tuple[str], Tuple[str, str]]:
+        return "username", ""
+
+
+class RandomAuthCredProvider(CredentialProvider):
+    def __init__(self, user: Optional[str], endpoint: str):
+        self.user = user
+        self.endpoint = endpoint
+
+    @functools.lru_cache(maxsize=10)
+    def get_credentials(self) -> Union[Tuple[str, str], Tuple[str]]:
+        def get_random_string(length):
+            letters = string.ascii_lowercase
+            result_str = "".join(random.choice(letters) for i in range(length))
+            return result_str
+
+        if self.user:
+            auth_token: str = get_random_string(5) + self.user + "_" + self.endpoint
+            return self.user, auth_token
+        else:
+            auth_token: str = get_random_string(5) + self.endpoint
+            return (auth_token,)
+
+
+def init_acl_user(r, request, username, password):
+    # reset the user
+    r.acl_deluser(username)
+    if password:
+        assert (
+            r.acl_setuser(
+                username,
+                enabled=True,
+                passwords=["+" + password],
+                keys="~*",
+                commands=[
+                    "+ping",
+                    "+command",
+                    "+info",
+                    "+select",
+                    "+flushdb",
+                    "+cluster",
+                ],
+            )
+            is True
+        )
+    else:
+        assert (
+            r.acl_setuser(
+                username,
+                enabled=True,
+                keys="~*",
+                commands=[
+                    "+ping",
+                    "+command",
+                    "+info",
+                    "+select",
+                    "+flushdb",
+                    "+cluster",
+                ],
+                nopass=True,
+            )
+            is True
+        )
+
+    if request is not None:
+
+        def teardown():
+            r.acl_deluser(username)
+
+        request.addfinalizer(teardown)
+
+
+def init_required_pass(r, request, password):
+    r.config_set("requirepass", password)
+
+    def teardown():
+        try:
+            r.auth(password)
+        except (ResponseError, AuthenticationError):
+            r.auth("default", "")
+        r.config_set("requirepass", "")
+
+    request.addfinalizer(teardown)
+
+
+class TestCredentialsProvider:
+    @skip_if_redis_enterprise()
+    def test_only_pass_without_creds_provider(self, r, request):
+        # test for default user (`username` is supposed to be optional)
+        password = "password"
+        init_required_pass(r, request, password)
+        assert r.auth(password) is True
+
+        r2 = _get_client(redis.Redis, request, flushdb=False, password=password)
+
+        assert r2.ping() is True
+
+    @skip_if_redis_enterprise()
+    def test_user_and_pass_without_creds_provider(self, r, request):
+        """
+        Test backward compatibility with username and password
+        """
+        # test for other users
+        username = "username"
+        password = "password"
+
+        init_acl_user(r, request, username, password)
+        r2 = _get_client(
+            redis.Redis, request, flushdb=False, username=username, password=password
+        )
+
+        assert r2.ping() is True
+
+    @pytest.mark.parametrize("username", ["username", None])
+    @skip_if_redis_enterprise()
+    @pytest.mark.onlynoncluster
+    def test_credential_provider_with_supplier(self, r, request, username):
+        creds_provider = RandomAuthCredProvider(
+            user=username,
+            endpoint="localhost",
+        )
+
+        password = creds_provider.get_credentials()[-1]
+
+        if username:
+            init_acl_user(r, request, username, password)
+        else:
+            init_required_pass(r, request, password)
+
+        r2 = _get_client(
+            redis.Redis, request, flushdb=False, credential_provider=creds_provider
+        )
+
+        assert r2.ping() is True
+
+    def test_credential_provider_no_password_success(self, r, request):
+        init_acl_user(r, request, "username", "")
+        r2 = _get_client(
+            redis.Redis,
+            request,
+            flushdb=False,
+            credential_provider=NoPassCredProvider(),
+        )
+        assert r2.ping() is True
+
+    @pytest.mark.onlynoncluster
+    def test_credential_provider_no_password_error(self, r, request):
+        init_acl_user(r, request, "username", "password")
+        with pytest.raises(AuthenticationError) as e:
+            _get_client(
+                redis.Redis,
+                request,
+                flushdb=False,
+                credential_provider=NoPassCredProvider(),
+            )
+        assert e.match("invalid username-password")
+
+    @pytest.mark.onlynoncluster
+    def test_password_and_username_together_with_cred_provider_raise_error(
+        self, r, request
+    ):
+        init_acl_user(r, request, "username", "password")
+        cred_provider = UsernamePasswordCredentialProvider(
+            username="username", password="password"
+        )
+        with pytest.raises(DataError) as e:
+            _get_client(
+                redis.Redis,
+                request,
+                flushdb=False,
+                username="username",
+                password="password",
+                credential_provider=cred_provider,
+            )
+        assert e.match(
+            "'username' and 'password' cannot be passed along with "
+            "'credential_provider'."
+        )
+
+    @pytest.mark.onlynoncluster
+    def test_change_username_password_on_existing_connection(self, r, request):
+        username = "origin_username"
+        password = "origin_password"
+        new_username = "new_username"
+        new_password = "new_password"
+        init_acl_user(r, request, username, password)
+        r2 = _get_client(
+            redis.Redis, request, flushdb=False, username=username, password=password
+        )
+        assert r2.ping() is True
+        conn = r2.connection_pool.get_connection("_")
+        conn.send_command("PING")
+        assert str_if_bytes(conn.read_response()) == "PONG"
+        assert conn.username == username
+        assert conn.password == password
+        init_acl_user(r, request, new_username, new_password)
+        conn.password = new_password
+        conn.username = new_username
+        conn.send_command("PING")
+        assert str_if_bytes(conn.read_response()) == "PONG"
+
+
+class TestUsernamePasswordCredentialProvider:
+    def test_user_pass_credential_provider_acl_user_and_pass(self, r, request):
+        username = "username"
+        password = "password"
+        provider = UsernamePasswordCredentialProvider(username, password)
+        assert provider.username == username
+        assert provider.password == password
+        assert provider.get_credentials() == (username, password)
+        init_acl_user(r, request, provider.username, provider.password)
+        r2 = _get_client(
+            redis.Redis, request, flushdb=False, credential_provider=provider
+        )
+        assert r2.ping() is True
+
+    def test_user_pass_provider_only_password(self, r, request):
+        password = "password"
+        provider = UsernamePasswordCredentialProvider(password=password)
+        assert provider.username == ""
+        assert provider.password == password
+        assert provider.get_credentials() == (password,)
+
+        init_required_pass(r, request, password)
+
+        r2 = _get_client(
+            redis.Redis, request, flushdb=False, credential_provider=provider
+        )
+        assert r2.auth(provider.password) is True
+        assert r2.ping() is True
diff --git a/tests/test_encoding.py b/tests/test_encoding.py
index 2867640..cb9c4e2 100644
--- a/tests/test_encoding.py
+++ b/tests/test_encoding.py
@@ -2,6 +2,7 @@ import pytest
 
 import redis
 from redis.connection import Connection
+from redis.utils import HIREDIS_PACK_AVAILABLE
 
 from .conftest import _get_client
 
@@ -75,6 +76,10 @@ class TestEncodingErrors:
         assert r.get("a") == "foo\ufffd"
 
 
+@pytest.mark.skipif(
+    HIREDIS_PACK_AVAILABLE,
+    reason="Packing via hiredis does not preserve memoryviews",
+)
 class TestMemoryviewsAreNotPacked:
     def test_memoryviews_are_not_packed(self):
         c = Connection()
diff --git a/tests/test_graph.py b/tests/test_graph.py
index 76f8794..4721b2f 100644
--- a/tests/test_graph.py
+++ b/tests/test_graph.py
@@ -1,7 +1,24 @@
+from unittest.mock import patch
+
 import pytest
 
 from redis.commands.graph import Edge, Node, Path
 from redis.commands.graph.execution_plan import Operation
+from redis.commands.graph.query_result import (
+    CACHED_EXECUTION,
+    INDICES_CREATED,
+    INDICES_DELETED,
+    INTERNAL_EXECUTION_TIME,
+    LABELS_ADDED,
+    LABELS_REMOVED,
+    NODES_CREATED,
+    NODES_DELETED,
+    PROPERTIES_REMOVED,
+    PROPERTIES_SET,
+    RELATIONSHIPS_CREATED,
+    RELATIONSHIPS_DELETED,
+    QueryResult,
+)
 from redis.exceptions import ResponseError
 from tests.conftest import skip_if_redis_enterprise
 
@@ -107,7 +124,7 @@ def test_path(client):
 
 @pytest.mark.redismod
 def test_param(client):
-    params = [1, 2.3, "str", True, False, None, [0, 1, 2]]
+    params = [1, 2.3, "str", True, False, None, [0, 1, 2], r"\" RETURN 1337 //"]
     query = "RETURN $param"
     for param in params:
         result = client.graph().query(query, {"param": param})
@@ -263,7 +280,8 @@ def test_cached_execution(client):
 
 @pytest.mark.redismod
 def test_slowlog(client):
-    create_query = """CREATE (:Rider {name:'Valentino Rossi'})-[:rides]->(:Team {name:'Yamaha'}),
+    create_query = """CREATE (:Rider
+    {name:'Valentino Rossi'})-[:rides]->(:Team {name:'Yamaha'}),
     (:Rider {name:'Dani Pedrosa'})-[:rides]->(:Team {name:'Honda'}),
     (:Rider {name:'Andrea Dovizioso'})-[:rides]->(:Team {name:'Ducati'})"""
     client.graph().query(create_query)
@@ -349,11 +367,11 @@ def test_list_keys(client):
     result = client.graph().list_keys()
     assert result == []
 
-    client.execute_command("GRAPH.EXPLAIN", "G", "RETURN 1")
+    client.graph("G").query("CREATE (n)")
     result = client.graph().list_keys()
     assert result == ["G"]
 
-    client.execute_command("GRAPH.EXPLAIN", "X", "RETURN 1")
+    client.graph("X").query("CREATE (m)")
     result = client.graph().list_keys()
     assert result == ["G", "X"]
 
@@ -469,7 +487,8 @@ def test_cache_sync(client):
 @pytest.mark.redismod
 def test_execution_plan(client):
     redis_graph = client.graph("execution_plan")
-    create_query = """CREATE (:Rider {name:'Valentino Rossi'})-[:rides]->(:Team {name:'Yamaha'}),
+    create_query = """CREATE
+    (:Rider {name:'Valentino Rossi'})-[:rides]->(:Team {name:'Yamaha'}),
     (:Rider {name:'Dani Pedrosa'})-[:rides]->(:Team {name:'Honda'}),
     (:Rider {name:'Andrea Dovizioso'})-[:rides]->(:Team {name:'Ducati'})"""
     redis_graph.query(create_query)
@@ -478,7 +497,7 @@ def test_execution_plan(client):
         "MATCH (r:Rider)-[:rides]->(t:Team) WHERE t.name = $name RETURN r.name, t.name, $params",  # noqa
         {"name": "Yehuda"},
     )
-    expected = "Results\n    Project\n        Conditional Traverse | (t:Team)->(r:Rider)\n            Filter\n                Node By Label Scan | (t:Team)"  # noqa
+    expected = "Results\n    Project\n        Conditional Traverse | (t)->(r:Rider)\n            Filter\n                Node By Label Scan | (t:Team)"  # noqa
     assert result == expected
 
     redis_graph.delete()
@@ -509,11 +528,11 @@ Results
 Distinct
     Join
         Project
-            Conditional Traverse | (t:Team)->(r:Rider)
+            Conditional Traverse | (t)->(r:Rider)
                 Filter
                     Node By Label Scan | (t:Team)
         Project
-            Conditional Traverse | (t:Team)->(r:Rider)
+            Conditional Traverse | (t)->(r:Rider)
                 Filter
                     Node By Label Scan | (t:Team)"""
     assert str(result).replace(" ", "").replace("\n", "") == expected.replace(
@@ -525,9 +544,7 @@ Distinct
             Operation("Join")
             .append_child(
                 Operation("Project").append_child(
-                    Operation(
-                        "Conditional Traverse", "(t:Team)->(r:Rider)"
-                    ).append_child(
+                    Operation("Conditional Traverse", "(t)->(r:Rider)").append_child(
                         Operation("Filter").append_child(
                             Operation("Node By Label Scan", "(t:Team)")
                         )
@@ -536,9 +553,7 @@ Distinct
             )
             .append_child(
                 Operation("Project").append_child(
-                    Operation(
-                        "Conditional Traverse", "(t:Team)->(r:Rider)"
-                    ).append_child(
+                    Operation("Conditional Traverse", "(t)->(r:Rider)").append_child(
                         Operation("Filter").append_child(
                             Operation("Node By Label Scan", "(t:Team)")
                         )
@@ -575,3 +590,33 @@ Project
     assert result.structured_plan == expected
 
     redis_graph.delete()
+
+
+@pytest.mark.redismod
+def test_resultset_statistics(client):
+    with patch.object(target=QueryResult, attribute="_get_stat") as mock_get_stats:
+        result = client.graph().query("RETURN 1")
+        result.labels_added
+        mock_get_stats.assert_called_with(LABELS_ADDED)
+        result.labels_removed
+        mock_get_stats.assert_called_with(LABELS_REMOVED)
+        result.nodes_created
+        mock_get_stats.assert_called_with(NODES_CREATED)
+        result.nodes_deleted
+        mock_get_stats.assert_called_with(NODES_DELETED)
+        result.properties_set
+        mock_get_stats.assert_called_with(PROPERTIES_SET)
+        result.properties_removed
+        mock_get_stats.assert_called_with(PROPERTIES_REMOVED)
+        result.relationships_created
+        mock_get_stats.assert_called_with(RELATIONSHIPS_CREATED)
+        result.relationships_deleted
+        mock_get_stats.assert_called_with(RELATIONSHIPS_DELETED)
+        result.indices_created
+        mock_get_stats.assert_called_with(INDICES_CREATED)
+        result.indices_deleted
+        mock_get_stats.assert_called_with(INDICES_DELETED)
+        result.cached_execution
+        mock_get_stats.assert_called_with(CACHED_EXECUTION)
+        result.run_time_ms
+        mock_get_stats.assert_called_with(INTERNAL_EXECUTION_TIME)
diff --git a/tests/test_helpers.py b/tests/test_helpers.py
index 3595829..57a94d2 100644
--- a/tests/test_helpers.py
+++ b/tests/test_helpers.py
@@ -80,3 +80,9 @@ def test_quote_string():
     assert quote_string("hello world!") == '"hello world!"'
     assert quote_string("") == '""'
     assert quote_string("hello world!") == '"hello world!"'
+    assert quote_string("abc") == '"abc"'
+    assert quote_string("") == '""'
+    assert quote_string('"') == r'"\""'
+    assert quote_string(r"foo \ bar") == r'"foo \\ bar"'
+    assert quote_string(r"foo \" bar") == r'"foo \\\" bar"'
+    assert quote_string('a"a') == r'"a\"a"'
diff --git a/tests/test_json.py b/tests/test_json.py
index 1cc448c..a776e9e 100644
--- a/tests/test_json.py
+++ b/tests/test_json.py
@@ -824,7 +824,7 @@ def test_objlen_dollar(client):
         },
     )
     # Test multi
-    assert client.json().objlen("doc1", "$..a") == [2, None, 1]
+    assert client.json().objlen("doc1", "$..a") == [None, 2, 1]
     # Test single
     assert client.json().objlen("doc1", "$.nested1.a") == [2]
 
@@ -1326,17 +1326,10 @@ def test_arrindex_dollar(client):
         [],
     ]
 
-    # Fail with none-scalar value
-    with pytest.raises(exceptions.ResponseError):
-        client.json().arrindex("test_None", "$..nested42_empty_arr.arr", {"arr": []})
-
-    # Do not fail with none-scalar value in legacy mode
-    assert (
-        client.json().arrindex(
-            "test_None", ".[4][1].nested42_empty_arr.arr", '{"arr":[]}'
-        )
-        == -1
-    )
+    # Test with none-scalar value
+    assert client.json().arrindex(
+        "test_None", "$..nested42_empty_arr.arr", {"arr": []}
+    ) == [-1]
 
     # Test legacy (path begins with dot)
     # Test index of int scalar in single value
@@ -1411,7 +1404,8 @@ def test_set_path(client):
 
     with open(jsonfile, "w+") as fp:
         fp.write(json.dumps({"hello": "world"}))
-    open(nojsonfile, "a+").write("hello")
+    with open(nojsonfile, "a+") as fp:
+        fp.write("hello")
 
     result = {jsonfile: True, nojsonfile: False}
     assert client.json().set_path(Path.root_path(), root) == result
diff --git a/tests/test_pipeline.py b/tests/test_pipeline.py
index 03377d8..716cd0f 100644
--- a/tests/test_pipeline.py
+++ b/tests/test_pipeline.py
@@ -122,7 +122,7 @@ class TestPipeline:
             with pytest.raises(redis.ResponseError) as ex:
                 pipe.execute()
             assert str(ex.value).startswith(
-                "Command # 3 (LPUSH c 3) of " "pipeline caused error: "
+                "Command # 3 (LPUSH c 3) of pipeline caused error: "
             )
 
             # make sure the pipe was restored to a working state
@@ -167,7 +167,7 @@ class TestPipeline:
                 pipe.execute()
 
             assert str(ex.value).startswith(
-                "Command # 2 (ZREM b) of " "pipeline caused error: "
+                "Command # 2 (ZREM b) of pipeline caused error: "
             )
 
             # make sure the pipe was restored to a working state
@@ -184,7 +184,7 @@ class TestPipeline:
                 pipe.execute()
 
             assert str(ex.value).startswith(
-                "Command # 2 (ZREM b) of " "pipeline caused error: "
+                "Command # 2 (ZREM b) of pipeline caused error: "
             )
 
             # make sure the pipe was restored to a working state
@@ -331,7 +331,7 @@ class TestPipeline:
                 pipe.execute()
 
             assert str(ex.value).startswith(
-                "Command # 1 (LLEN a) of " "pipeline caused error: "
+                "Command # 1 (LLEN a) of pipeline caused error: "
             )
 
         assert r["a"] == b"1"
diff --git a/tests/test_pubsub.py b/tests/test_pubsub.py
index 6456370..5d86934 100644
--- a/tests/test_pubsub.py
+++ b/tests/test_pubsub.py
@@ -1,4 +1,6 @@
 import platform
+import queue
+import socket
 import threading
 import time
 from unittest import mock
@@ -608,3 +610,170 @@ class TestPubSubDeadlock:
             p = r.pubsub()
             p.subscribe("my-channel-1", "my-channel-2")
             pool.reset()
+
+
+@pytest.mark.timeout(5, method="thread")
+@pytest.mark.parametrize("method", ["get_message", "listen"])
+@pytest.mark.onlynoncluster
+class TestPubSubAutoReconnect:
+    def mysetup(self, r, method):
+        self.messages = queue.Queue()
+        self.pubsub = r.pubsub()
+        self.state = 0
+        self.cond = threading.Condition()
+        if method == "get_message":
+            self.get_message = self.loop_step_get_message
+        else:
+            self.get_message = self.loop_step_listen
+
+        self.thread = threading.Thread(target=self.loop)
+        self.thread.daemon = True
+        self.thread.start()
+        # get the initial connect message
+        message = self.messages.get(timeout=1)
+        assert message == {
+            "channel": b"foo",
+            "data": 1,
+            "pattern": None,
+            "type": "subscribe",
+        }
+
+    def wait_for_reconnect(self):
+        self.cond.wait_for(lambda: self.pubsub.connection._sock is not None, timeout=2)
+        assert self.pubsub.connection._sock is not None  # we didn't time out
+        assert self.state == 3
+
+        message = self.messages.get(timeout=1)
+        assert message == {
+            "channel": b"foo",
+            "data": 1,
+            "pattern": None,
+            "type": "subscribe",
+        }
+
+    def mycleanup(self):
+        # kill thread
+        with self.cond:
+            self.state = 4  # quit
+            self.cond.notify()
+        self.thread.join()
+
+    def test_reconnect_socket_error(self, r: redis.Redis, method):
+        """
+        Test that a socket error will cause reconnect
+        """
+        self.mysetup(r, method)
+        try:
+            # now, disconnect the connection, and wait for it to be re-established
+            with self.cond:
+                self.state = 1
+                with mock.patch.object(self.pubsub.connection, "_parser") as mockobj:
+                    mockobj.read_response.side_effect = socket.error
+                    mockobj.can_read.side_effect = socket.error
+                    # wait until thread notices the disconnect until we undo the patch
+                    self.cond.wait_for(lambda: self.state >= 2)
+                    assert (
+                        self.pubsub.connection._sock is None
+                    )  # it is in a disconnected state
+                self.wait_for_reconnect()
+
+        finally:
+            self.mycleanup()
+
+    def test_reconnect_disconnect(self, r: redis.Redis, method):
+        """
+        Test that a manual disconnect() will cause reconnect
+        """
+        self.mysetup(r, method)
+        try:
+            # now, disconnect the connection, and wait for it to be re-established
+            with self.cond:
+                self.state = 1
+                self.pubsub.connection.disconnect()
+                assert self.pubsub.connection._sock is None
+                # wait for reconnect
+                self.wait_for_reconnect()
+        finally:
+            self.mycleanup()
+
+    def loop(self):
+        # reader loop, performing state transitions as it
+        # discovers disconnects and reconnects
+        self.pubsub.subscribe("foo")
+        while True:
+            time.sleep(0.01)  # give main thread chance to get lock
+            with self.cond:
+                old_state = self.state
+                try:
+                    if self.state == 4:
+                        break
+                    # print ('state, %s, sock %s' % (state, pubsub.connection._sock))
+                    got_msg = self.get_message()
+                    assert got_msg
+                    if self.state in (1, 2):
+                        self.state = 3  # successful reconnect
+                except redis.ConnectionError:
+                    assert self.state in (1, 2)
+                    self.state = 2
+                finally:
+                    self.cond.notify()
+                # assert that we noticed a connect error, or automatically
+                # reconnected without error
+                if old_state == 1:
+                    assert self.state in (2, 3)
+
+    def loop_step_get_message(self):
+        # get a single message via listen()
+        message = self.pubsub.get_message(timeout=0.1)
+        if message is not None:
+            self.messages.put(message)
+            return True
+        return False
+
+    def loop_step_listen(self):
+        # get a single message via listen()
+        for message in self.pubsub.listen():
+            self.messages.put(message)
+            return True
+
+
+@pytest.mark.onlynoncluster
+class TestBaseException:
+    def test_base_exception(self, r: redis.Redis):
+        """
+        Manually trigger a BaseException inside the parser's .read_response method
+        and verify that it isn't caught
+        """
+        pubsub = r.pubsub()
+        pubsub.subscribe("foo")
+
+        def is_connected():
+            return pubsub.connection._sock is not None
+
+        assert is_connected()
+
+        def get_msg():
+            # blocking method to return messages
+            while True:
+                response = pubsub.parse_response(block=True)
+                message = pubsub.handle_message(
+                    response, ignore_subscribe_messages=False
+                )
+                if message is not None:
+                    return message
+
+        # get subscribe message
+        msg = get_msg()
+        assert msg is not None
+        # timeout waiting for another message which never arrives
+        assert is_connected()
+        with patch("redis.connection.PythonParser.read_response") as mock1:
+            mock1.side_effect = BaseException("boom")
+            with patch("redis.connection.HiredisParser.read_response") as mock2:
+                mock2.side_effect = BaseException("boom")
+
+                with pytest.raises(BaseException):
+                    get_msg()
+
+        # the timeout on the read should not cause disconnect
+        assert is_connected()
diff --git a/tests/test_retry.py b/tests/test_retry.py
index f844fd0..3cfea5c 100644
--- a/tests/test_retry.py
+++ b/tests/test_retry.py
@@ -2,7 +2,7 @@ from unittest.mock import patch
 
 import pytest
 
-from redis.backoff import NoBackoff
+from redis.backoff import ExponentialBackoff, NoBackoff
 from redis.client import Redis
 from redis.connection import Connection, UnixDomainSocketConnection
 from redis.exceptions import (
@@ -203,3 +203,17 @@ class TestRedisClientRetry:
                     r.get("foo")
                 finally:
                     assert parse_response.call_count == retries + 1
+
+    def test_get_set_retry_object(self, request):
+        retry = Retry(NoBackoff(), 2)
+        r = _get_client(Redis, request, retry_on_timeout=True, retry=retry)
+        exist_conn = r.connection_pool.get_connection("_")
+        assert r.get_retry()._retries == retry._retries
+        assert isinstance(r.get_retry()._backoff, NoBackoff)
+        new_retry_policy = Retry(ExponentialBackoff(), 3)
+        r.set_retry(new_retry_policy)
+        assert r.get_retry()._retries == new_retry_policy._retries
+        assert isinstance(r.get_retry()._backoff, ExponentialBackoff)
+        assert exist_conn.retry._retries == new_retry_policy._retries
+        new_conn = r.connection_pool.get_connection("_")
+        assert new_conn.retry._retries == new_retry_policy._retries
diff --git a/tests/test_scripting.py b/tests/test_scripting.py
index bbe845c..b6b5f9f 100644
--- a/tests/test_scripting.py
+++ b/tests/test_scripting.py
@@ -67,7 +67,6 @@ class TestScripting:
 
     @skip_if_server_version_lt("7.0.0")
     @skip_if_redis_enterprise()
-    @pytest.mark.onlynoncluster
     def test_eval_ro(self, r):
         r.set("a", "b")
         assert r.eval_ro("return redis.call('GET', KEYS[1])", 1, "a") == b"b"
@@ -157,7 +156,6 @@ class TestScripting:
 
     @skip_if_server_version_lt("7.0.0")
     @skip_if_redis_enterprise()
-    @pytest.mark.onlynoncluster
     def test_evalsha_ro(self, r):
         r.set("a", "b")
         get_sha = r.script_load("return redis.call('GET', KEYS[1])")
diff --git a/tests/test_search.py b/tests/test_search.py
index f0a1190..57d4338 100644
--- a/tests/test_search.py
+++ b/tests/test_search.py
@@ -93,7 +93,7 @@ def createIndex(client, num_docs=100, definition=None):
     assert 50 == indexer.chunk_size
 
     for key, doc in chapters.items():
-        indexer.add_document(key, **doc)
+        indexer.client.client.hset(key, mapping=doc)
     indexer.commit()
 
 
@@ -196,7 +196,7 @@ def test_client(client):
     assert 167 == client.ft().search(Query("henry king").slop(100)).total
 
     # test delete document
-    client.ft().add_document("doc-5ghs2", play="Death of a Salesman")
+    client.hset("doc-5ghs2", mapping={"play": "Death of a Salesman"})
     res = client.ft().search(Query("death of a salesman"))
     assert 1 == res.total
 
@@ -205,36 +205,19 @@ def test_client(client):
     assert 0 == res.total
     assert 0 == client.ft().delete_document("doc-5ghs2")
 
-    client.ft().add_document("doc-5ghs2", play="Death of a Salesman")
+    client.hset("doc-5ghs2", mapping={"play": "Death of a Salesman"})
     res = client.ft().search(Query("death of a salesman"))
     assert 1 == res.total
     client.ft().delete_document("doc-5ghs2")
 
 
-@pytest.mark.redismod
-@skip_ifmodversion_lt("2.2.0", "search")
-def test_payloads(client):
-    client.ft().create_index((TextField("txt"),))
-
-    client.ft().add_document("doc1", payload="foo baz", txt="foo bar")
-    client.ft().add_document("doc2", txt="foo bar")
-
-    q = Query("foo bar").with_payloads()
-    res = client.ft().search(q)
-    assert 2 == res.total
-    assert "doc1" == res.docs[0].id
-    assert "doc2" == res.docs[1].id
-    assert "foo baz" == res.docs[0].payload
-    assert res.docs[1].payload is None
-
-
 @pytest.mark.redismod
 @pytest.mark.onlynoncluster
 def test_scores(client):
     client.ft().create_index((TextField("txt"),))
 
-    client.ft().add_document("doc1", txt="foo baz")
-    client.ft().add_document("doc2", txt="foo bar")
+    client.hset("doc1", mapping={"txt": "foo baz"})
+    client.hset("doc2", mapping={"txt": "foo bar"})
 
     q = Query("foo ~bar").with_scores()
     res = client.ft().search(q)
@@ -246,32 +229,11 @@ def test_scores(client):
     # self.assertEqual(0.2, res.docs[1].score)
 
 
-@pytest.mark.redismod
-def test_replace(client):
-    client.ft().create_index((TextField("txt"),))
-
-    client.ft().add_document("doc1", txt="foo bar")
-    client.ft().add_document("doc2", txt="foo bar")
-    waitForIndex(client, getattr(client.ft(), "index_name", "idx"))
-
-    res = client.ft().search("foo bar")
-    assert 2 == res.total
-    client.ft().add_document("doc1", replace=True, txt="this is a replaced doc")
-
-    res = client.ft().search("foo bar")
-    assert 1 == res.total
-    assert "doc2" == res.docs[0].id
-
-    res = client.ft().search("replaced doc")
-    assert 1 == res.total
-    assert "doc1" == res.docs[0].id
-
-
 @pytest.mark.redismod
 def test_stopwords(client):
     client.ft().create_index((TextField("txt"),), stopwords=["foo", "bar", "baz"])
-    client.ft().add_document("doc1", txt="foo bar")
-    client.ft().add_document("doc2", txt="hello world")
+    client.hset("doc1", mapping={"txt": "foo bar"})
+    client.hset("doc2", mapping={"txt": "hello world"})
     waitForIndex(client, getattr(client.ft(), "index_name", "idx"))
 
     q1 = Query("foo bar").no_content()
@@ -284,8 +246,10 @@ def test_stopwords(client):
 @pytest.mark.redismod
 def test_filters(client):
     client.ft().create_index((TextField("txt"), NumericField("num"), GeoField("loc")))
-    client.ft().add_document("doc1", txt="foo bar", num=3.141, loc="-0.441,51.458")
-    client.ft().add_document("doc2", txt="foo baz", num=2, loc="-0.1,51.2")
+    client.hset(
+        "doc1", mapping={"txt": "foo bar", "num": 3.141, "loc": "-0.441,51.458"}
+    )
+    client.hset("doc2", mapping={"txt": "foo baz", "num": 2, "loc": "-0.1,51.2"})
 
     waitForIndex(client, getattr(client.ft(), "index_name", "idx"))
     # Test numerical filter
@@ -317,23 +281,12 @@ def test_filters(client):
     assert ["doc1", "doc2"] == res
 
 
-@pytest.mark.redismod
-def test_payloads_with_no_content(client):
-    client.ft().create_index((TextField("txt"),))
-    client.ft().add_document("doc1", payload="foo baz", txt="foo bar")
-    client.ft().add_document("doc2", payload="foo baz2", txt="foo bar")
-
-    q = Query("foo bar").with_payloads().no_content()
-    res = client.ft().search(q)
-    assert 2 == len(res.docs)
-
-
 @pytest.mark.redismod
 def test_sort_by(client):
     client.ft().create_index((TextField("txt"), NumericField("num", sortable=True)))
-    client.ft().add_document("doc1", txt="foo bar", num=1)
-    client.ft().add_document("doc2", txt="foo baz", num=2)
-    client.ft().add_document("doc3", txt="foo qux", num=3)
+    client.hset("doc1", mapping={"txt": "foo bar", "num": 1})
+    client.hset("doc2", mapping={"txt": "foo baz", "num": 2})
+    client.hset("doc3", mapping={"txt": "foo qux", "num": 3})
 
     # Test sort
     q1 = Query("foo").sort_by("num", asc=True).no_content()
@@ -375,10 +328,12 @@ def test_example(client):
     client.ft().create_index((TextField("title", weight=5.0), TextField("body")))
 
     # Indexing a document
-    client.ft().add_document(
+    client.hset(
         "doc1",
-        title="RediSearch",
-        body="Redisearch impements a search engine on top of redis",
+        mapping={
+            "title": "RediSearch",
+            "body": "Redisearch impements a search engine on top of redis",
+        },
     )
 
     # Searching with complex parameters:
@@ -450,11 +405,13 @@ def test_no_index(client):
         )
     )
 
-    client.ft().add_document(
-        "doc1", field="aaa", text="1", numeric="1", geo="1,1", tag="1"
+    client.hset(
+        "doc1",
+        mapping={"field": "aaa", "text": "1", "numeric": "1", "geo": "1,1", "tag": "1"},
     )
-    client.ft().add_document(
-        "doc2", field="aab", text="2", numeric="2", geo="2,2", tag="2"
+    client.hset(
+        "doc2",
+        mapping={"field": "aab", "text": "2", "numeric": "2", "geo": "2,2", "tag": "2"},
     )
     waitForIndex(client, getattr(client.ft(), "index_name", "idx"))
 
@@ -491,45 +448,6 @@ def test_no_index(client):
         TagField("name", no_index=True, sortable=False)
 
 
-@pytest.mark.redismod
-def test_partial(client):
-    client.ft().create_index((TextField("f1"), TextField("f2"), TextField("f3")))
-    client.ft().add_document("doc1", f1="f1_val", f2="f2_val")
-    client.ft().add_document("doc2", f1="f1_val", f2="f2_val")
-    client.ft().add_document("doc1", f3="f3_val", partial=True)
-    client.ft().add_document("doc2", f3="f3_val", replace=True)
-    waitForIndex(client, getattr(client.ft(), "index_name", "idx"))
-
-    # Search for f3 value. All documents should have it
-    res = client.ft().search("@f3:f3_val")
-    assert 2 == res.total
-
-    # Only the document updated with PARTIAL should still have f1 and f2 values
-    res = client.ft().search("@f3:f3_val @f2:f2_val @f1:f1_val")
-    assert 1 == res.total
-
-
-@pytest.mark.redismod
-def test_no_create(client):
-    client.ft().create_index((TextField("f1"), TextField("f2"), TextField("f3")))
-    client.ft().add_document("doc1", f1="f1_val", f2="f2_val")
-    client.ft().add_document("doc2", f1="f1_val", f2="f2_val")
-    client.ft().add_document("doc1", f3="f3_val", no_create=True)
-    client.ft().add_document("doc2", f3="f3_val", no_create=True, partial=True)
-    waitForIndex(client, getattr(client.ft(), "index_name", "idx"))
-
-    # Search for f3 value. All documents should have it
-    res = client.ft().search("@f3:f3_val")
-    assert 2 == res.total
-
-    # Only the document updated with PARTIAL should still have f1 and f2 values
-    res = client.ft().search("@f3:f3_val @f2:f2_val @f1:f1_val")
-    assert 1 == res.total
-
-    with pytest.raises(redis.ResponseError):
-        client.ft().add_document("doc3", f2="f2_val", f3="f3_val", no_create=True)
-
-
 @pytest.mark.redismod
 def test_explain(client):
     client.ft().create_index((TextField("f1"), TextField("f2"), TextField("f3")))
@@ -618,11 +536,11 @@ def test_alias_basic(client):
     index1 = getClient(client).ft("testAlias")
 
     index1.create_index((TextField("txt"),))
-    index1.add_document("doc1", txt="text goes here")
+    index1.client.hset("doc1", mapping={"txt": "text goes here"})
 
     index2 = getClient(client).ft("testAlias2")
     index2.create_index((TextField("txt"),))
-    index2.add_document("doc2", txt="text goes here")
+    index2.client.hset("doc2", mapping={"txt": "text goes here"})
 
     # add the actual alias and check
     index1.aliasadd("myalias")
@@ -646,36 +564,6 @@ def test_alias_basic(client):
         _ = alias_client2.search("*").docs[0]
 
 
-@pytest.mark.redismod
-def test_tags(client):
-    client.ft().create_index((TextField("txt"), TagField("tags")))
-    tags = "foo,foo bar,hello;world"
-    tags2 = "soba,ramen"
-
-    client.ft().add_document("doc1", txt="fooz barz", tags=tags)
-    client.ft().add_document("doc2", txt="noodles", tags=tags2)
-    waitForIndex(client, getattr(client.ft(), "index_name", "idx"))
-
-    q = Query("@tags:{foo}")
-    res = client.ft().search(q)
-    assert 1 == res.total
-
-    q = Query("@tags:{foo bar}")
-    res = client.ft().search(q)
-    assert 1 == res.total
-
-    q = Query("@tags:{foo\\ bar}")
-    res = client.ft().search(q)
-    assert 1 == res.total
-
-    q = Query("@tags:{hello\\;world}")
-    res = client.ft().search(q)
-    assert 1 == res.total
-
-    q2 = client.ft().tagvals("tags")
-    assert (tags.split(",") + tags2.split(",")).sort() == q2.sort()
-
-
 @pytest.mark.redismod
 def test_textfield_sortable_nostem(client):
     # Creating the index definition with sortable and no_stem
@@ -696,8 +584,8 @@ def test_alter_schema_add(client):
     client.ft().alter_schema_add(TextField("body"))
 
     # Indexing a document
-    client.ft().add_document(
-        "doc1", title="MyTitle", body="Some content only in the body"
+    client.hset(
+        "doc1", mapping={"title": "MyTitle", "body": "Some content only in the body"}
     )
 
     # Searching with parameter only in the body (the added field)
@@ -712,8 +600,10 @@ def test_alter_schema_add(client):
 def test_spell_check(client):
     client.ft().create_index((TextField("f1"), TextField("f2")))
 
-    client.ft().add_document("doc1", f1="some valid content", f2="this is sample text")
-    client.ft().add_document("doc2", f1="very important", f2="lorem ipsum")
+    client.hset(
+        "doc1", mapping={"f1": "some valid content", "f2": "this is sample text"}
+    )
+    client.hset("doc2", mapping={"f1": "very important", "f2": "lorem ipsum"})
     waitForIndex(client, getattr(client.ft(), "index_name", "idx"))
 
     # test spellcheck
@@ -767,8 +657,8 @@ def test_dict_operations(client):
 @pytest.mark.redismod
 def test_phonetic_matcher(client):
     client.ft().create_index((TextField("name"),))
-    client.ft().add_document("doc1", name="Jon")
-    client.ft().add_document("doc2", name="John")
+    client.hset("doc1", mapping={"name": "Jon"})
+    client.hset("doc2", mapping={"name": "John"})
 
     res = client.ft().search(Query("Jon"))
     assert 1 == len(res.docs)
@@ -778,8 +668,8 @@ def test_phonetic_matcher(client):
     client.flushdb()
 
     client.ft().create_index((TextField("name", phonetic_matcher="dm:en"),))
-    client.ft().add_document("doc1", name="Jon")
-    client.ft().add_document("doc2", name="John")
+    client.hset("doc1", mapping={"name": "Jon"})
+    client.hset("doc2", mapping={"name": "John"})
 
     res = client.ft().search(Query("Jon"))
     assert 2 == len(res.docs)
@@ -791,12 +681,14 @@ def test_phonetic_matcher(client):
 def test_scorer(client):
     client.ft().create_index((TextField("description"),))
 
-    client.ft().add_document(
-        "doc1", description="The quick brown fox jumps over the lazy dog"
+    client.hset(
+        "doc1", mapping={"description": "The quick brown fox jumps over the lazy dog"}
     )
-    client.ft().add_document(
+    client.hset(
         "doc2",
-        description="Quick alice was beginning to get very tired of sitting by her quick sister on the bank, and of having nothing to do.",  # noqa
+        mapping={
+            "description": "Quick alice was beginning to get very tired of sitting by her quick sister on the bank, and of having nothing to do."  # noqa
+        },
     )
 
     # default scorer is TFIDF
@@ -823,19 +715,19 @@ def test_get(client):
     assert [None] == client.ft().get("doc1")
     assert [None, None] == client.ft().get("doc2", "doc1")
 
-    client.ft().add_document(
-        "doc1", f1="some valid content dd1", f2="this is sample text ff1"
+    client.hset(
+        "doc1", mapping={"f1": "some valid content dd1", "f2": "this is sample text f1"}
     )
-    client.ft().add_document(
-        "doc2", f1="some valid content dd2", f2="this is sample text ff2"
+    client.hset(
+        "doc2", mapping={"f1": "some valid content dd2", "f2": "this is sample text f2"}
     )
 
     assert [
-        ["f1", "some valid content dd2", "f2", "this is sample text ff2"]
+        ["f1", "some valid content dd2", "f2", "this is sample text f2"]
     ] == client.ft().get("doc2")
     assert [
-        ["f1", "some valid content dd1", "f2", "this is sample text ff1"],
-        ["f1", "some valid content dd2", "f2", "this is sample text ff2"],
+        ["f1", "some valid content dd1", "f2", "this is sample text f1"],
+        ["f1", "some valid content dd2", "f2", "this is sample text f2"],
     ] == client.ft().get("doc1", "doc2")
 
 
@@ -866,26 +758,32 @@ def test_aggregations_groupby(client):
     )
 
     # Indexing a document
-    client.ft().add_document(
+    client.hset(
         "search",
-        title="RediSearch",
-        body="Redisearch impements a search engine on top of redis",
-        parent="redis",
-        random_num=10,
+        mapping={
+            "title": "RediSearch",
+            "body": "Redisearch impements a search engine on top of redis",
+            "parent": "redis",
+            "random_num": 10,
+        },
     )
-    client.ft().add_document(
+    client.hset(
         "ai",
-        title="RedisAI",
-        body="RedisAI executes Deep Learning/Machine Learning models and managing their data.",  # noqa
-        parent="redis",
-        random_num=3,
+        mapping={
+            "title": "RedisAI",
+            "body": "RedisAI executes Deep Learning/Machine Learning models and managing their data.",  # noqa
+            "parent": "redis",
+            "random_num": 3,
+        },
     )
-    client.ft().add_document(
+    client.hset(
         "json",
-        title="RedisJson",
-        body="RedisJSON implements ECMA-404 The JSON Data Interchange Standard as a native data type.",  # noqa
-        parent="redis",
-        random_num=8,
+        mapping={
+            "title": "RedisJson",
+            "body": "RedisJSON implements ECMA-404 The JSON Data Interchange Standard as a native data type.",  # noqa
+            "parent": "redis",
+            "random_num": 8,
+        },
     )
 
     req = aggregations.AggregateRequest("redis").group_by("@parent", reducers.count())
@@ -965,7 +863,7 @@ def test_aggregations_groupby(client):
 
     res = client.ft().aggregate(req).rows[0]
     assert res[1] == "redis"
-    assert res[3] == ["RediSearch", "RedisAI", "RedisJson"]
+    assert set(res[3]) == {"RediSearch", "RedisAI", "RedisJson"}
 
     req = aggregations.AggregateRequest("redis").group_by(
         "@parent", reducers.first_value("@title").alias("first")
@@ -1075,16 +973,26 @@ def test_aggregations_filter(client):
     client.ft().client.hset("doc1", mapping={"name": "bar", "age": "25"})
     client.ft().client.hset("doc2", mapping={"name": "foo", "age": "19"})
 
-    req = aggregations.AggregateRequest("*").filter("@name=='foo' && @age < 20")
-    res = client.ft().aggregate(req)
-    assert len(res.rows) == 1
-    assert res.rows[0] == ["name", "foo", "age", "19"]
-
-    req = aggregations.AggregateRequest("*").filter("@age > 15").sort_by("@age")
-    res = client.ft().aggregate(req)
-    assert len(res.rows) == 2
-    assert res.rows[0] == ["age", "19"]
-    assert res.rows[1] == ["age", "25"]
+    for dialect in [1, 2]:
+        req = (
+            aggregations.AggregateRequest("*")
+            .filter("@name=='foo' && @age < 20")
+            .dialect(dialect)
+        )
+        res = client.ft().aggregate(req)
+        assert len(res.rows) == 1
+        assert res.rows[0] == ["name", "foo", "age", "19"]
+
+        req = (
+            aggregations.AggregateRequest("*")
+            .filter("@age > 15")
+            .sort_by("@age")
+            .dialect(dialect)
+        )
+        res = client.ft().aggregate(req)
+        assert len(res.rows) == 2
+        assert res.rows[0] == ["age", "19"]
+        assert res.rows[1] == ["age", "25"]
 
 
 @pytest.mark.redismod
@@ -1134,7 +1042,7 @@ def test_index_definition(client):
 @pytest.mark.redismod
 @pytest.mark.onlynoncluster
 @skip_if_redis_enterprise()
-def testExpire(client):
+def test_expire(client):
     client.ft().create_index((TextField("txt", sortable=True),), temporary=4)
     ttl = client.execute_command("ft.debug", "TTL", "idx")
     assert ttl > 2
@@ -1143,20 +1051,9 @@ def testExpire(client):
         ttl = client.execute_command("ft.debug", "TTL", "idx")
         time.sleep(0.01)
 
-    # add document - should reset the ttl
-    client.ft().add_document("doc", txt="foo bar", text="this is a simple test")
-    ttl = client.execute_command("ft.debug", "TTL", "idx")
-    assert ttl > 2
-    try:
-        while True:
-            ttl = client.execute_command("ft.debug", "TTL", "idx")
-            time.sleep(0.5)
-    except redis.exceptions.ResponseError:
-        assert ttl == 0
-
 
 @pytest.mark.redismod
-def testSkipInitialScan(client):
+def test_skip_initial_scan(client):
     client.hset("doc1", "foo", "bar")
     q = Query("@foo:bar")
 
@@ -1165,23 +1062,23 @@ def testSkipInitialScan(client):
 
 
 @pytest.mark.redismod
-def testSummarizeDisabled_nooffset(client):
+def test_summarize_disabled_nooffset(client):
     client.ft().create_index((TextField("txt"),), no_term_offsets=True)
-    client.ft().add_document("doc1", txt="foo bar")
+    client.hset("doc1", mapping={"txt": "foo bar"})
     with pytest.raises(Exception):
         client.ft().search(Query("foo").summarize(fields=["txt"]))
 
 
 @pytest.mark.redismod
-def testSummarizeDisabled_nohl(client):
+def test_summarize_disabled_nohl(client):
     client.ft().create_index((TextField("txt"),), no_highlight=True)
-    client.ft().add_document("doc1", txt="foo bar")
+    client.hset("doc1", mapping={"txt": "foo bar"})
     with pytest.raises(Exception):
         client.ft().search(Query("foo").summarize(fields=["txt"]))
 
 
 @pytest.mark.redismod
-def testMaxTextFields(client):
+def test_max_text_fields(client):
     # Creating the index definition
     client.ft().create_index((TextField("f0"),))
     for x in range(1, 32):
@@ -1337,10 +1234,10 @@ def test_synupdate(client):
     )
 
     client.ft().synupdate("id1", True, "boy", "child", "offspring")
-    client.ft().add_document("doc1", title="he is a baby", body="this is a test")
+    client.hset("doc1", mapping={"title": "he is a baby", "body": "this is a test"})
 
     client.ft().synupdate("id1", True, "baby")
-    client.ft().add_document("doc2", title="he is another baby", body="another test")
+    client.hset("doc2", mapping={"title": "he is another baby", "body": "another test"})
 
     res = client.ft().search(Query("child").expander("SYNONYM"))
     assert res.docs[0].id == "doc2"
@@ -1448,9 +1345,9 @@ def test_json_with_jsonpath(client):
     assert res.docs[0].id == "doc:1"
     assert res.docs[0].json == '{"prod:name":"RediSearch"}'
 
-    # query for an unsupported field fails
+    # query for an unsupported field
     res = client.ft().search("@name_unsupported:RediSearch")
-    assert res.total == 0
+    assert res.total == 1
 
     # return of a supported field succeeds
     res = client.ft().search(Query("@name:RediSearch").return_field("name"))
@@ -1458,13 +1355,6 @@ def test_json_with_jsonpath(client):
     assert res.docs[0].id == "doc:1"
     assert res.docs[0].name == "RediSearch"
 
-    # return of an unsupported field fails
-    res = client.ft().search(Query("@name:RediSearch").return_field("name_unsupported"))
-    assert res.total == 1
-    assert res.docs[0].id == "doc:1"
-    with pytest.raises(Exception):
-        res.docs[0].name_unsupported
-
 
 @pytest.mark.redismod
 @pytest.mark.onlynoncluster
@@ -1586,9 +1476,9 @@ def test_text_params(modclient):
     modclient.flushdb()
     modclient.ft().create_index((TextField("name"),))
 
-    modclient.ft().add_document("doc1", name="Alice")
-    modclient.ft().add_document("doc2", name="Bob")
-    modclient.ft().add_document("doc3", name="Carol")
+    modclient.hset("doc1", mapping={"name": "Alice"})
+    modclient.hset("doc2", mapping={"name": "Bob"})
+    modclient.hset("doc3", mapping={"name": "Carol"})
 
     params_dict = {"name1": "Alice", "name2": "Bob"}
     q = Query("@name:($name1 | $name2 )").dialect(2)
@@ -1604,9 +1494,9 @@ def test_numeric_params(modclient):
     modclient.flushdb()
     modclient.ft().create_index((NumericField("numval"),))
 
-    modclient.ft().add_document("doc1", numval=101)
-    modclient.ft().add_document("doc2", numval=102)
-    modclient.ft().add_document("doc3", numval=103)
+    modclient.hset("doc1", mapping={"numval": 101})
+    modclient.hset("doc2", mapping={"numval": 102})
+    modclient.hset("doc3", mapping={"numval": 103})
 
     params_dict = {"min": 101, "max": 102}
     q = Query("@numval:[$min $max]").dialect(2)
@@ -1623,9 +1513,9 @@ def test_geo_params(modclient):
 
     modclient.flushdb()
     modclient.ft().create_index((GeoField("g")))
-    modclient.ft().add_document("doc1", g="29.69465, 34.95126")
-    modclient.ft().add_document("doc2", g="29.69350, 34.94737")
-    modclient.ft().add_document("doc3", g="29.68746, 34.94882")
+    modclient.hset("doc1", mapping={"g": "29.69465, 34.95126"})
+    modclient.hset("doc2", mapping={"g": "29.69350, 34.94737"})
+    modclient.hset("doc3", mapping={"g": "29.68746, 34.94882"})
 
     params_dict = {"lat": "34.95126", "lon": "29.69465", "radius": 1000, "units": "km"}
     q = Query("@g:[$lon $lat $radius $units]").dialect(2)
@@ -1641,16 +1531,15 @@ def test_geo_params(modclient):
 def test_search_commands_in_pipeline(client):
     p = client.ft().pipeline()
     p.create_index((TextField("txt"),))
-    p.add_document("doc1", payload="foo baz", txt="foo bar")
-    p.add_document("doc2", txt="foo bar")
+    p.hset("doc1", mapping={"txt": "foo bar"})
+    p.hset("doc2", mapping={"txt": "foo bar"})
     q = Query("foo bar").with_payloads()
     p.search(q)
     res = p.execute()
-    assert res[:3] == ["OK", "OK", "OK"]
+    assert res[:3] == ["OK", True, True]
     assert 2 == res[3][0]
     assert "doc1" == res[3][1]
     assert "doc2" == res[3][4]
-    assert "foo baz" == res[3][2]
     assert res[3][5] is None
     assert res[3][3] == res[3][6] == ["txt", "foo bar"]
 
@@ -1698,3 +1587,50 @@ def test_dialect(modclient: redis.Redis):
     with pytest.raises(redis.ResponseError) as err:
         modclient.ft().explain(Query("@title:(@num:[0 10])").dialect(2))
     assert "Syntax error" in str(err)
+
+
+@pytest.mark.redismod
+def test_expire_while_search(modclient: redis.Redis):
+    modclient.ft().create_index((TextField("txt"),))
+    modclient.hset("hset:1", "txt", "a")
+    modclient.hset("hset:2", "txt", "b")
+    modclient.hset("hset:3", "txt", "c")
+    assert 3 == modclient.ft().search(Query("*")).total
+    modclient.pexpire("hset:2", 300)
+    for _ in range(500):
+        modclient.ft().search(Query("*")).docs[1]
+    time.sleep(1)
+    assert 2 == modclient.ft().search(Query("*")).total
+
+
+@pytest.mark.redismod
+@pytest.mark.experimental
+def test_withsuffixtrie(modclient: redis.Redis):
+    # create index
+    assert modclient.ft().create_index((TextField("txt"),))
+    waitForIndex(modclient, getattr(modclient.ft(), "index_name", "idx"))
+    info = modclient.ft().info()
+    assert "WITHSUFFIXTRIE" not in info["attributes"][0]
+    assert modclient.ft().dropindex("idx")
+
+    # create withsuffixtrie index (text fiels)
+    assert modclient.ft().create_index((TextField("t", withsuffixtrie=True)))
+    waitForIndex(modclient, getattr(modclient.ft(), "index_name", "idx"))
+    info = modclient.ft().info()
+    assert "WITHSUFFIXTRIE" in info["attributes"][0]
+    assert modclient.ft().dropindex("idx")
+
+    # create withsuffixtrie index (tag field)
+    assert modclient.ft().create_index((TagField("t", withsuffixtrie=True)))
+    waitForIndex(modclient, getattr(modclient.ft(), "index_name", "idx"))
+    info = modclient.ft().info()
+    assert "WITHSUFFIXTRIE" in info["attributes"][0]
+
+
+@pytest.mark.redismod
+def test_query_timeout(modclient: redis.Redis):
+    q1 = Query("foo").timeout(5000)
+    assert q1.get_args() == ["foo", "TIMEOUT", 5000, "LIMIT", 0, 10]
+    q2 = Query("foo").timeout("not_a_number")
+    with pytest.raises(redis.ResponseError):
+        modclient.ft().search(q2)
diff --git a/tests/test_ssl.py b/tests/test_ssl.py
index d029b80..ed38a31 100644
--- a/tests/test_ssl.py
+++ b/tests/test_ssl.py
@@ -68,8 +68,8 @@ class TestSSL:
         assert r.ping()
 
     def test_validating_self_signed_string_certificate(self, request):
-        f = open(self.SERVER_CERT)
-        cert_data = f.read()
+        with open(self.SERVER_CERT) as f:
+            cert_data = f.read()
         ssl_url = request.config.option.redis_ssl_url
         p = urlparse(ssl_url)[1].split(":")
         r = redis.Redis(
diff --git a/tests/test_timeseries.py b/tests/test_timeseries.py
index 7d42147..6ced535 100644
--- a/tests/test_timeseries.py
+++ b/tests/test_timeseries.py
@@ -1,8 +1,11 @@
+import math
 import time
 from time import sleep
 
 import pytest
 
+import redis
+
 from .conftest import skip_ifmodversion_lt
 
 
@@ -230,6 +233,84 @@ def test_range_advanced(client):
     assert [(0, 5.0), (5, 6.0)] == client.ts().range(
         1, 0, 10, aggregation_type="count", bucket_size_msec=10, align=5
     )
+    assert [(0, 2.55), (10, 3.0)] == client.ts().range(
+        1, 0, 10, aggregation_type="twa", bucket_size_msec=10
+    )
+
+
+@pytest.mark.redismod
+@skip_ifmodversion_lt("1.8.0", "timeseries")
+def test_range_latest(client: redis.Redis):
+    timeseries = client.ts()
+    timeseries.create("t1")
+    timeseries.create("t2")
+    timeseries.createrule("t1", "t2", aggregation_type="sum", bucket_size_msec=10)
+    timeseries.add("t1", 1, 1)
+    timeseries.add("t1", 2, 3)
+    timeseries.add("t1", 11, 7)
+    timeseries.add("t1", 13, 1)
+    res = timeseries.range("t1", 0, 20)
+    assert res == [(1, 1.0), (2, 3.0), (11, 7.0), (13, 1.0)]
+    res = timeseries.range("t2", 0, 10)
+    assert res == [(0, 4.0)]
+    res = timeseries.range("t2", 0, 10, latest=True)
+    assert res == [(0, 4.0), (10, 8.0)]
+    res = timeseries.range("t2", 0, 9, latest=True)
+    assert res == [(0, 4.0)]
+
+
+@pytest.mark.redismod
+@skip_ifmodversion_lt("1.8.0", "timeseries")
+def test_range_bucket_timestamp(client: redis.Redis):
+    timeseries = client.ts()
+    timeseries.create("t1")
+    timeseries.add("t1", 15, 1)
+    timeseries.add("t1", 17, 4)
+    timeseries.add("t1", 51, 3)
+    timeseries.add("t1", 73, 5)
+    timeseries.add("t1", 75, 3)
+    assert [(10, 4.0), (50, 3.0), (70, 5.0)] == timeseries.range(
+        "t1", 0, 100, align=0, aggregation_type="max", bucket_size_msec=10
+    )
+    assert [(20, 4.0), (60, 3.0), (80, 5.0)] == timeseries.range(
+        "t1",
+        0,
+        100,
+        align=0,
+        aggregation_type="max",
+        bucket_size_msec=10,
+        bucket_timestamp="+",
+    )
+
+
+@pytest.mark.redismod
+@skip_ifmodversion_lt("1.8.0", "timeseries")
+def test_range_empty(client: redis.Redis):
+    timeseries = client.ts()
+    timeseries.create("t1")
+    timeseries.add("t1", 15, 1)
+    timeseries.add("t1", 17, 4)
+    timeseries.add("t1", 51, 3)
+    timeseries.add("t1", 73, 5)
+    timeseries.add("t1", 75, 3)
+    assert [(10, 4.0), (50, 3.0), (70, 5.0)] == timeseries.range(
+        "t1", 0, 100, align=0, aggregation_type="max", bucket_size_msec=10
+    )
+    res = timeseries.range(
+        "t1", 0, 100, align=0, aggregation_type="max", bucket_size_msec=10, empty=True
+    )
+    for i in range(len(res)):
+        if math.isnan(res[i][1]):
+            res[i] = (res[i][0], None)
+    assert [
+        (10, 4.0),
+        (20, None),
+        (30, None),
+        (40, None),
+        (50, 3.0),
+        (60, None),
+        (70, 5.0),
+    ] == res
 
 
 @pytest.mark.redismod
@@ -262,11 +343,87 @@ def test_rev_range(client):
     assert [(1, 10.0), (0, 1.0)] == client.ts().revrange(
         1, 0, 10, aggregation_type="count", bucket_size_msec=10, align=1
     )
+    assert [(10, 3.0), (0, 2.55)] == client.ts().revrange(
+        1, 0, 10, aggregation_type="twa", bucket_size_msec=10
+    )
+
+
+@pytest.mark.redismod
+@skip_ifmodversion_lt("1.8.0", "timeseries")
+def test_revrange_latest(client: redis.Redis):
+    timeseries = client.ts()
+    timeseries.create("t1")
+    timeseries.create("t2")
+    timeseries.createrule("t1", "t2", aggregation_type="sum", bucket_size_msec=10)
+    timeseries.add("t1", 1, 1)
+    timeseries.add("t1", 2, 3)
+    timeseries.add("t1", 11, 7)
+    timeseries.add("t1", 13, 1)
+    res = timeseries.revrange("t2", 0, 10)
+    assert res == [(0, 4.0)]
+    res = timeseries.revrange("t2", 0, 10, latest=True)
+    assert res == [(10, 8.0), (0, 4.0)]
+    res = timeseries.revrange("t2", 0, 9, latest=True)
+    assert res == [(0, 4.0)]
+
+
+@pytest.mark.redismod
+@skip_ifmodversion_lt("1.8.0", "timeseries")
+def test_revrange_bucket_timestamp(client: redis.Redis):
+    timeseries = client.ts()
+    timeseries.create("t1")
+    timeseries.add("t1", 15, 1)
+    timeseries.add("t1", 17, 4)
+    timeseries.add("t1", 51, 3)
+    timeseries.add("t1", 73, 5)
+    timeseries.add("t1", 75, 3)
+    assert [(70, 5.0), (50, 3.0), (10, 4.0)] == timeseries.revrange(
+        "t1", 0, 100, align=0, aggregation_type="max", bucket_size_msec=10
+    )
+    assert [(20, 4.0), (60, 3.0), (80, 5.0)] == timeseries.range(
+        "t1",
+        0,
+        100,
+        align=0,
+        aggregation_type="max",
+        bucket_size_msec=10,
+        bucket_timestamp="+",
+    )
+
+
+@pytest.mark.redismod
+@skip_ifmodversion_lt("1.8.0", "timeseries")
+def test_revrange_empty(client: redis.Redis):
+    timeseries = client.ts()
+    timeseries.create("t1")
+    timeseries.add("t1", 15, 1)
+    timeseries.add("t1", 17, 4)
+    timeseries.add("t1", 51, 3)
+    timeseries.add("t1", 73, 5)
+    timeseries.add("t1", 75, 3)
+    assert [(70, 5.0), (50, 3.0), (10, 4.0)] == timeseries.revrange(
+        "t1", 0, 100, align=0, aggregation_type="max", bucket_size_msec=10
+    )
+    res = timeseries.revrange(
+        "t1", 0, 100, align=0, aggregation_type="max", bucket_size_msec=10, empty=True
+    )
+    for i in range(len(res)):
+        if math.isnan(res[i][1]):
+            res[i] = (res[i][0], None)
+    assert [
+        (70, 5.0),
+        (60, None),
+        (50, 3.0),
+        (40, None),
+        (30, None),
+        (20, None),
+        (10, 4.0),
+    ] == res
 
 
 @pytest.mark.redismod
 @pytest.mark.onlynoncluster
-def testMultiRange(client):
+def test_mrange(client):
     client.ts().create(1, labels={"Test": "This", "team": "ny"})
     client.ts().create(2, labels={"Test": "This", "Taste": "That", "team": "sf"})
     for i in range(100):
@@ -351,6 +508,31 @@ def test_multi_range_advanced(client):
     assert [(0, 5.0), (5, 6.0)] == res[0]["1"][1]
 
 
+@pytest.mark.redismod
+@pytest.mark.onlynoncluster
+@skip_ifmodversion_lt("1.8.0", "timeseries")
+def test_mrange_latest(client: redis.Redis):
+    timeseries = client.ts()
+    timeseries.create("t1")
+    timeseries.create("t2", labels={"is_compaction": "true"})
+    timeseries.create("t3")
+    timeseries.create("t4", labels={"is_compaction": "true"})
+    timeseries.createrule("t1", "t2", aggregation_type="sum", bucket_size_msec=10)
+    timeseries.createrule("t3", "t4", aggregation_type="sum", bucket_size_msec=10)
+    timeseries.add("t1", 1, 1)
+    timeseries.add("t1", 2, 3)
+    timeseries.add("t1", 11, 7)
+    timeseries.add("t1", 13, 1)
+    timeseries.add("t3", 1, 1)
+    timeseries.add("t3", 2, 3)
+    timeseries.add("t3", 11, 7)
+    timeseries.add("t3", 13, 1)
+    assert client.ts().mrange(0, 10, filters=["is_compaction=true"], latest=True) == [
+        {"t2": [{}, [(0, 4.0), (10, 8.0)]]},
+        {"t4": [{}, [(0, 4.0), (10, 8.0)]]},
+    ]
+
+
 @pytest.mark.redismod
 @pytest.mark.onlynoncluster
 @skip_ifmodversion_lt("99.99.99", "timeseries")
@@ -434,6 +616,30 @@ def test_multi_reverse_range(client):
     assert [(1, 10.0), (0, 1.0)] == res[0]["1"][1]
 
 
+@pytest.mark.redismod
+@pytest.mark.onlynoncluster
+@skip_ifmodversion_lt("1.8.0", "timeseries")
+def test_mrevrange_latest(client: redis.Redis):
+    timeseries = client.ts()
+    timeseries.create("t1")
+    timeseries.create("t2", labels={"is_compaction": "true"})
+    timeseries.create("t3")
+    timeseries.create("t4", labels={"is_compaction": "true"})
+    timeseries.createrule("t1", "t2", aggregation_type="sum", bucket_size_msec=10)
+    timeseries.createrule("t3", "t4", aggregation_type="sum", bucket_size_msec=10)
+    timeseries.add("t1", 1, 1)
+    timeseries.add("t1", 2, 3)
+    timeseries.add("t1", 11, 7)
+    timeseries.add("t1", 13, 1)
+    timeseries.add("t3", 1, 1)
+    timeseries.add("t3", 2, 3)
+    timeseries.add("t3", 11, 7)
+    timeseries.add("t3", 13, 1)
+    assert client.ts().mrevrange(
+        0, 10, filters=["is_compaction=true"], latest=True
+    ) == [{"t2": [{}, [(10, 8.0), (0, 4.0)]]}, {"t4": [{}, [(10, 8.0), (0, 4.0)]]}]
+
+
 @pytest.mark.redismod
 def test_get(client):
     name = "test"
@@ -445,6 +651,21 @@ def test_get(client):
     assert 4 == client.ts().get(name)[1]
 
 
+@pytest.mark.redismod
+@skip_ifmodversion_lt("1.8.0", "timeseries")
+def test_get_latest(client: redis.Redis):
+    timeseries = client.ts()
+    timeseries.create("t1")
+    timeseries.create("t2")
+    timeseries.createrule("t1", "t2", aggregation_type="sum", bucket_size_msec=10)
+    timeseries.add("t1", 1, 1)
+    timeseries.add("t1", 2, 3)
+    timeseries.add("t1", 11, 7)
+    timeseries.add("t1", 13, 1)
+    assert (0, 4.0) == timeseries.get("t2")
+    assert (10, 8.0) == timeseries.get("t2", latest=True)
+
+
 @pytest.mark.redismod
 @pytest.mark.onlynoncluster
 def test_mget(client):
@@ -467,6 +688,24 @@ def test_mget(client):
     assert {"Taste": "That", "Test": "This"} == res[0]["2"][0]
 
 
+@pytest.mark.redismod
+@pytest.mark.onlynoncluster
+@skip_ifmodversion_lt("1.8.0", "timeseries")
+def test_mget_latest(client: redis.Redis):
+    timeseries = client.ts()
+    timeseries.create("t1")
+    timeseries.create("t2", labels={"is_compaction": "true"})
+    timeseries.createrule("t1", "t2", aggregation_type="sum", bucket_size_msec=10)
+    timeseries.add("t1", 1, 1)
+    timeseries.add("t1", 2, 3)
+    timeseries.add("t1", 11, 7)
+    timeseries.add("t1", 13, 1)
+    assert timeseries.mget(filters=["is_compaction=true"]) == [{"t2": [{}, 0, 4.0]}]
+    assert [{"t2": [{}, 10, 8.0]}] == timeseries.mget(
+        filters=["is_compaction=true"], latest=True
+    )
+
+
 @pytest.mark.redismod
 def test_info(client):
     client.ts().create(1, retention_msecs=5, labels={"currentLabel": "currentData"})
@@ -506,7 +745,7 @@ def test_pipeline(client):
     pipeline.execute()
 
     info = client.ts().info("with_pipeline")
-    assert info.lastTimeStamp == 99
+    assert info.last_timestamp == 99
     assert info.total_samples == 100
     assert client.ts().get("with_pipeline")[1] == 99 * 1.1
 
diff --git a/tox.ini b/tox.ini
index 0ceb008..553c77b 100644
--- a/tox.ini
+++ b/tox.ini
@@ -9,15 +9,16 @@ markers =
     asyncio: marker for async tests
     replica: replica tests
     experimental: run only experimental tests
+asyncio_mode = auto
 
 [tox]
 minversion = 3.2.0
 requires = tox-docker
-envlist = {standalone,cluster}-{plain,hiredis,ocsp}-{uvloop,asyncio}-{py36,py37,py38,py39,pypy3},linters,docs
+envlist = {standalone,cluster}-{plain,hiredis,ocsp}-{uvloop,asyncio}-{py37,py38,py39,pypy3},linters,docs
 
 [docker:master]
 name = master
-image = redisfab/redis-py:6.2.6-buster
+image = redisfab/redis-py:6.2.6
 ports =
     6379:6379/tcp
 healtcheck_cmd = python -c "import socket;print(True) if 0 == socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect_ex(('127.0.0.1',6379)) else False"
@@ -26,7 +27,7 @@ volumes =
 
 [docker:replica]
 name = replica
-image = redisfab/redis-py:6.2.6-buster
+image = redisfab/redis-py:6.2.6
 links =
     master:master
 ports =
@@ -37,7 +38,7 @@ volumes =
 
 [docker:unstable]
 name = unstable
-image = redisfab/redis-py:unstable-bionic
+image = redisfab/redis-py:unstable
 ports =
     6378:6378/tcp
 healtcheck_cmd = python -c "import socket;print(True) if 0 == socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect_ex(('127.0.0.1',6378)) else False"
@@ -46,7 +47,7 @@ volumes =
 
 [docker:unstable_cluster]
 name = unstable_cluster
-image = redisfab/redis-py-cluster:unstable-bionic
+image = redisfab/redis-py-cluster:unstable
 ports =
     6372:6372/tcp
     6373:6373/tcp
@@ -60,7 +61,7 @@ volumes =
 
 [docker:sentinel_1]
 name = sentinel_1
-image = redisfab/redis-py-sentinel:6.2.6-buster
+image = redisfab/redis-py-sentinel:6.2.6
 links =
     master:master
 ports =
@@ -71,7 +72,7 @@ volumes =
 
 [docker:sentinel_2]
 name = sentinel_2
-image = redisfab/redis-py-sentinel:6.2.6-buster
+image = redisfab/redis-py-sentinel:6.2.6
 links =
     master:master
 ports =
@@ -82,7 +83,7 @@ volumes =
 
 [docker:sentinel_3]
 name = sentinel_3
-image = redisfab/redis-py-sentinel:6.2.6-buster
+image = redisfab/redis-py-sentinel:6.2.6
 links =
     master:master
 ports =
@@ -91,16 +92,16 @@ healtcheck_cmd = python -c "import socket;print(True) if 0 == socket.socket(sock
 volumes =
     bind:rw:{toxinidir}/docker/redis6.2/sentinel/sentinel_3.conf:/sentinel.conf
 
-[docker:redismod]
-name = redismod
-image = redislabs/redismod:edge
+[docker:redis_stack]
+name = redis_stack
+image = redis/redis-stack-server:edge
 ports =
     36379:6379/tcp
 healtcheck_cmd = python -c "import socket;print(True) if 0 == socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect_ex(('127.0.0.1',36379)) else False"
 
 [docker:redis_cluster]
 name = redis_cluster
-image = redisfab/redis-py-cluster:6.2.6-buster
+image = redisfab/redis-py-cluster:6.2.6
 ports =
     16379:16379/tcp
     16380:16380/tcp
@@ -114,7 +115,7 @@ volumes =
 
 [docker:redismod_cluster]
 name = redismod_cluster
-image = redisfab/redis-py-modcluster:6.2.6
+image = redisfab/redis-py-modcluster:edge
 ports =
     46379:46379/tcp
     46380:46380/tcp
@@ -278,8 +279,7 @@ docker =
     sentinel_2
     sentinel_3
     redis_cluster
-    redismod
-    redismod_cluster
+    redis_stack
     stunnel
 extras =
     hiredis: hiredis
@@ -288,10 +288,10 @@ setenv =
     CLUSTER_URL = "redis://localhost:16379/0"
     UNSTABLE_CLUSTER_URL = "redis://localhost:6372/0"
 commands =
-    standalone: pytest --cov=./ --cov-report=xml:coverage_redis.xml -W always -m 'not onlycluster' {posargs}
-    standalone-uvloop: pytest --cov=./ --cov-report=xml:coverage_redis.xml -W always -m 'not onlycluster' --uvloop {posargs}
-    cluster: pytest --cov=./ --cov-report=xml:coverage_cluster.xml -W always -m 'not onlynoncluster and not redismod' --redis-url={env:CLUSTER_URL:} --redis-unstable-url={env:UNSTABLE_CLUSTER_URL:} {posargs}
-    cluster-uvloop: pytest --cov=./ --cov-report=xml:coverage_cluster.xml -W always -m 'not onlynoncluster and not redismod' --redis-url={env:CLUSTER_URL:} --redis-unstable-url={env:UNSTABLE_CLUSTER_URL:} --uvloop {posargs}
+    standalone: pytest --cov=./ --cov-report=xml:coverage_redis.xml -W always -m 'not onlycluster' --junit-xml=standalone-results.xml {posargs}
+    standalone-uvloop: pytest --cov=./ --cov-report=xml:coverage_redis.xml -W always -m 'not onlycluster' --junit-xml=standalone-uvloop-results.xml --uvloop {posargs}
+    cluster: pytest --cov=./ --cov-report=xml:coverage_cluster.xml -W always -m 'not onlynoncluster and not redismod' --redis-url={env:CLUSTER_URL:} --redis-unstable-url={env:UNSTABLE_CLUSTER_URL:} --junit-xml=cluster-results.xml {posargs}
+    cluster-uvloop: pytest --cov=./ --cov-report=xml:coverage_cluster.xml -W always -m 'not onlynoncluster and not redismod' --redis-url={env:CLUSTER_URL:} --redis-unstable-url={env:UNSTABLE_CLUSTER_URL:} --junit-xml=cluster-uvloop-results.xml --uvloop {posargs}
 
 [testenv:redis5]
 deps =
@@ -343,7 +343,7 @@ deps_files = dev_requirements.txt
 docker =
 commands =
     flake8
-    black --target-version py36 --check --diff .
+    black --target-version py37 --check --diff .
     isort --check-only --diff .
     vulture redis whitelist.py --min-confidence 80
     flynt --fail-on-change --dry-run .

More details

Full run details

Historical runs