diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml
new file mode 100644
index 0000000..9d5f997
--- /dev/null
+++ b/.github/FUNDING.yml
@@ -0,0 +1,2 @@
+github: [mna]
+custom: ["https://www.buymeacoffee.com/mna"]
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
new file mode 100644
index 0000000..cda91e0
--- /dev/null
+++ b/.github/workflows/test.yml
@@ -0,0 +1,47 @@
+name: test
+on: [push, pull_request]
+
+env:
+  GOPROXY: https://proxy.golang.org,direct
+
+jobs:
+  test:
+    strategy:
+      matrix:
+        go-version: [1.15.x, 1.16.x, 1.17.x]
+        redis-version: [5.x, 6.x]
+        os: [ubuntu-latest, macos-latest]
+    runs-on: ${{ matrix.os }}
+
+    steps:
+      - name: Install Go
+        uses: actions/setup-go@v2
+        with:
+          go-version: ${{ matrix.go-version }}
+
+      - name: Install Redis
+        uses: shogo82148/actions-setup-redis@v1
+        with:
+          redis-version: ${{ matrix.redis-version }}
+          auto-start: "false"
+
+      - name: Checkout code
+        uses: actions/checkout@v2
+
+      - name: Test
+        run: go test ./... -v -cover
+
+      - name: Data Race
+        run: go test ./... -race
+
+  golangci:
+    runs-on: ubuntu-latest
+
+    steps:
+      - name: Checkout code
+        uses: actions/checkout@v2
+
+      - name: Lint
+        uses: golangci/golangci-lint-action@v2
+        with:
+          version: latest
diff --git a/.golangci.yml b/.golangci.yml
new file mode 100644
index 0000000..3cceefd
--- /dev/null
+++ b/.golangci.yml
@@ -0,0 +1,57 @@
+linters:
+  disable-all: true
+  enable:
+    - deadcode
+    - errcheck
+    - gci
+    - gochecknoinits
+    - gofmt
+    - gosec
+    - gosimple
+    - govet
+    - importas
+    - ineffassign
+    - misspell
+    - nakedret
+    - prealloc
+    - revive
+    - staticcheck
+    - structcheck
+    - typecheck
+    - unconvert
+    - unparam
+    - unused
+    - varcheck
+
+linters-settings:
+  revive:
+    ignoreGeneratedHeader: false
+    severity: "warning"
+    confidence: 0.8
+    errorCode: 0
+    warningCode: 0
+
+    rules:
+      - name: blank-imports
+      - name: context-as-argument
+      - name: context-keys-type
+      - name: dot-imports
+      - name: error-return
+      - name: error-strings
+      - name: error-naming
+      - name: exported
+      - name: increment-decrement
+      - name: var-naming
+      - name: var-declaration
+      - name: package-comments
+      - name: range
+      - name: receiver-naming
+      - name: time-naming
+      - name: unexported-return
+      - name: indent-error-flow
+      - name: errorf
+      - name: empty-block
+      - name: superfluous-else
+      - name: unused-parameter
+      - name: unreachable-code
+      - name: redefines-builtin-id
diff --git a/README.md b/README.md
index 8842133..2329693 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,9 @@
-# redisc [![GoDoc](https://godoc.org/github.com/mna/redisc?status.png)][godoc] [![Build Status](https://semaphoreci.com/api/v1/mna/redisc/branches/master/badge.svg)](https://semaphoreci.com/mna/redisc)
+[![Go Reference](https://pkg.go.dev/badge/github.com/mna/redisc.svg)](https://pkg.go.dev/github.com/mna/redisc)
+[![Build Status](https://github.com/mna/redisc/actions/workflows/test.yml/badge.svg?branch=master)](https://github.com/mna/redisc/actions)
 
-Package redisc implements a redis cluster client built on top of the [redigo package][redigo]. See the [godoc][] for details.
+# redisc
+
+Package redisc implements a redis cluster client built on top of the [redigo package][redigo]. See the [documentation][godoc] for details.
 
 ## Installation
 
@@ -8,6 +11,14 @@ Package redisc implements a redis cluster client built on top of the [redigo pac
 
 ## Releases
 
+* **v1.3.2** : Export the `HashSlots` constant to make it nicer to write the `Cluster.LayoutRefresh` function signature.
+
+* **v1.3.1** : Fix closing/releasing of connections used in `Cluster.EachNode`.
+
+* **v1.3.0** : Add `Cluster.EachNode` to call a function with a connection for each known node in the cluster (e.g. to run diagnostics commands on each node or to collect all keys in a cluster); add optional Cluster function field `BgError` to receive notification of errors happening in background topology refreshes and on closing of `RetryConn` after following a redirection to a new connection; add optional Cluster function field `LayoutRefresh` to receive the old and new cluster slot mappings to server address(es); prevent unnecessary cluster layout refreshes when the internal mapping is the same as the redirection error; better handling of closed Cluster; move CI to Github Actions; drop support for old Go versions (currently tested on 1.15+); enable more static analysis/linters; refactor tests to create less separate clusters and run faster.
+
+* **v1.2.0** : Use Go modules, fix a failing test due to changed error message on Redis 6.
+
 * **v1.1.7** : Do not bind to a random node if `Do` is called without a command and the connection is not already bound (thanks to [@tysonmote][tysonmote]).
 
 * **v1.1.6** : Append the actual error messages when a refresh returns "all nodes failed" error.
@@ -28,15 +39,15 @@ Package redisc implements a redis cluster client built on top of the [redigo pac
 
 ## Documentation
 
-The [godoc][] is the canonical source for documentation.
+The [code documentation][godoc] is the canonical source for documentation.
 
-The design goal of redisc is to be as compatible as possible with the [redigo][] package. As such, the `Cluster` type can be used as a drop-in replacement to a `redis.Pool`, and the connections returned by the cluster implement the `redis.Conn` interface. The package offers additional features specific to dealing with a cluster that may be needed for more advanced scenarios.
+The design goal of redisc is to be as compatible as possible with the [redigo][] package. As such, the `Cluster` type can be used as a drop-in replacement to a `redis.Pool` when moving from a standalone Redis to a Redis Cluster setup, and the connections returned by the cluster implement redigo's `redis.Conn` interface. The package offers additional features specific to dealing with a cluster that may be needed for more advanced scenarios.
 
 The main features are:
 
 * Drop-in replacement for `redis.Pool` (the `Cluster` type implements the same `Get` and `Close` method signatures).
 * Connections are `redis.Conn` interfaces and use the `redigo` package to execute commands, `redisc` only handles the cluster part.
-* Support for all cluster-supported commands including scripting, transactions and pub-sub.
+* Support for all cluster-supported commands including scripting, transactions and pub-sub (within the limitations imposed by Redis Cluster).
 * Support for READONLY/READWRITE commands to allow reading data from replicas.
 * Client-side smart routing, automatically keeps track of which node holds which key slots.
 * Automatic retry of MOVED, ASK and TRYAGAIN errors when desired, via `RetryConn`.
@@ -45,6 +56,8 @@ The main features are:
 * Explicit selection of the node to call via `BindConn` when needed.
 * Support for optimal batch calls via `SplitBySlot`.
 
+Note that to make efficient use of Redis Cluster, some upfront work is usually required. A good understanding of Redis Cluster is highly recommended and the official Redis website has [good documentation that covers this](https://redis.io/topics/cluster-spec). In particular, [Migrating to Redis Cluster](https://redis.io/topics/cluster-tutorial#migrating-to-redis-cluster) will help understand how straightforward (or not) the migration may be for your specific case.
+
 ## Alternatives
 
 * [redis-go-cluster][rgc].
@@ -61,17 +74,15 @@ There are a number of ways you can support the project:
 * Pull requests: please discuss new code in an issue first, unless the fix is really trivial.
   - Make sure new code is tested.
   - Be mindful of existing code - PRs that break existing code have a high probability of being declined, unless it fixes a serious issue.
-
-If you desperately want to send money my way, I have a BuyMeACoffee.com page:
-
-<a href="https://www.buymeacoffee.com/mna" target="_blank"><img src="https://www.buymeacoffee.com/assets/img/custom_images/orange_img.png" alt="Buy Me A Coffee" style="height: 41px !important;width: 174px !important;box-shadow: 0px 3px 2px 0px rgba(190, 190, 190, 0.5) !important;-webkit-box-shadow: 0px 3px 2px 0px rgba(190, 190, 190, 0.5) !important;" ></a>
+* Sponsor the developer
+  - See the Github Sponsor button at the top of the repo on github
 
 ## License
 
 The [BSD 3-Clause license][bsd].
 
 [bsd]: http://opensource.org/licenses/BSD-3-Clause
-[godoc]: http://godoc.org/github.com/mna/redisc
+[godoc]: https://pkg.go.dev/github.com/mna/redisc
 [redigo]: https://github.com/gomodule/redigo
 [oldredigo]: https://github.com/garyburd/redigo
 [rgc]: https://github.com/chasex/redis-go-cluster
diff --git a/ccheck/ccheck.go b/ccheck/ccheck.go
index c1cd121..168a9fa 100644
--- a/ccheck/ccheck.go
+++ b/ccheck/ccheck.go
@@ -175,10 +175,11 @@ func printStats() {
 
 func genKey() string {
 	ks := workingSet
+	//nolint:gosec
 	if rand.Float64() > 0.5 {
 		ks = keySpace
 	}
-	return "key_" + strconv.Itoa(rand.Intn(ks))
+	return "key_" + strconv.Itoa(rand.Intn(ks)) //nolint:gosec
 }
 
 func createPool(addr string, opts ...redis.DialOption) (*redis.Pool, error) {
diff --git a/cluster.go b/cluster.go
index 5b57875..a046ee2 100644
--- a/cluster.go
+++ b/cluster.go
@@ -1,6 +1,7 @@
 package redisc
 
 import (
+	"context"
 	"errors"
 	"math/rand"
 	"strconv"
@@ -11,52 +12,82 @@ import (
 	"github.com/gomodule/redigo/redis"
 )
 
-const hashSlots = 16384
+// HashSlots is the number of slots supported by redis cluster.
+const HashSlots = 16384
+
+// BgErrorSrc identifies the origin of a background error as reported by calls
+// to Cluster.BgError, when set.
+type BgErrorSrc uint
+
+// List of possible BgErrorSrc values.
+const (
+	// ClusterRefresh indicates the error comes from a background refresh of
+	// cluster slots mapping, e.g. following reception of a MOVED error.
+	ClusterRefresh BgErrorSrc = iota
+	// RetryCloseConn indicates the error comes from the call to Close for a
+	// previous connection, before retrying a command with a new one.
+	RetryCloseConn
+)
 
-// Cluster manages a redis cluster. If the CreatePool field is not nil,
-// a redis.Pool is used for each node in the cluster to get connections
-// via Get. If it is nil or if Dial is called, redis.Dial
-// is used to get the connection.
+// A Cluster manages a redis cluster. If the CreatePool field is not nil, a
+// redis.Pool is used for each node in the cluster to get connections via Get.
+// If it is nil or if Dial is called, redis.Dial is used to get the connection.
+//
+// All fields must be set prior to using the Cluster value, and must not be
+// changed afterwards, as that could be a data race.
 type Cluster struct {
-	// StartupNodes is the list of initial nodes that make up
-	// the cluster. The values are expected as "address:port"
-	// (e.g.: "127.0.0.1:6379").
+	// StartupNodes is the list of initial nodes that make up the cluster. The
+	// values are expected as "address:port" (e.g.: "127.0.0.1:6379").
 	StartupNodes []string
 
 	// DialOptions is the list of options to set on each new connection.
 	DialOptions []redis.DialOption
 
-	// CreatePool is the function to call to create a redis.Pool for
-	// the specified TCP address, using the provided options
-	// as set in DialOptions. If this field is not nil, a
-	// redis.Pool is created for each node in the cluster and the
-	// pool is used to manage the connections returned by Get.
+	// CreatePool is the function to call to create a redis.Pool for the
+	// specified TCP address, using the provided options as set in DialOptions.
+	// If this field is not nil, a redis.Pool is created for each node in the
+	// cluster and the pool is used to manage the connections returned by Get.
 	CreatePool func(address string, options ...redis.DialOption) (*redis.Pool, error)
 
-	// PoolWaitTime is the time to wait when getting a connection from
-	// a pool configured with MaxActive > 0 and Wait set to true, and
-	// MaxActive connections are already in use.
+	// PoolWaitTime is the time to wait when getting a connection from a pool
+	// configured with MaxActive > 0 and Wait set to true, and MaxActive
+	// connections are already in use.
 	//
 	// If <= 0 (or with Go < 1.7), there is no wait timeout, it will wait
 	// indefinitely if Pool.Wait is true.
 	PoolWaitTime time.Duration
 
+	// BgError is an optional function to call when a background error occurs
+	// that would otherwise go unnoticed. The source of the error is indicated
+	// by the parameter of type BgErrorSrc, see the list of BgErrorSrc values
+	// for possible error sources. The function may be called in a distinct
+	// goroutine, it should not access shared values that are not meant to be
+	// used concurrently.
+	BgError func(BgErrorSrc, error)
+
+	// LayoutRefresh is an optional function that is called each time a cluster
+	// refresh is successfully executed, either by an explicit call to
+	// Cluster.Refresh or e.g.  as required following a MOVED error. Note that
+	// even though it is unlikely, the old and new mappings could be identical.
+	// The function may be called in a separate goroutine, it should not access
+	// shared values that are not meant to be used concurrently.
+	LayoutRefresh func(old, new [HashSlots][]string)
+
 	mu         sync.RWMutex           // protects following fields
-	err        error                  // broken connection error
-	pools      map[string]*redis.Pool // created pools per node
-	masters    map[string]bool        // set of known active master nodes, kept up-to-date
-	replicas   map[string]bool        // set of known active replica nodes, kept up-to-date
-	mapping    [hashSlots][]string    // hash slot number to master and replica(s) server addresses, master is always at [0]
+	err        error                  // closed cluster error
+	pools      map[string]*redis.Pool // created pools per node address
+	masters    map[string]bool        // set of known active master nodes addresses, kept up-to-date
+	replicas   map[string]bool        // set of known active replica nodes addresses, kept up-to-date
+	mapping    [HashSlots][]string    // hash slot number to master and replica(s) addresses, master is always at [0]
 	refreshing bool                   // indicates if there's a refresh in progress
 }
 
-// Refresh updates the cluster's internal mapping of hash slots
-// to redis node. It calls CLUSTER SLOTS on each known node until one
-// of them succeeds.
+// Refresh updates the cluster's internal mapping of hash slots to redis node.
+// It calls CLUSTER SLOTS on each known node until one of them succeeds.
 //
-// It should typically be called after creating the Cluster and before
-// using it. The cluster automatically keeps its mapping up-to-date
-// afterwards, based on the redis commands' MOVED responses.
+// It should typically be called after creating the Cluster and before using
+// it. The cluster automatically keeps its mapping up-to-date afterwards, based
+// on the redis commands' MOVED responses.
 func (c *Cluster) Refresh() error {
 	c.mu.Lock()
 	err := c.err
@@ -68,13 +99,14 @@ func (c *Cluster) Refresh() error {
 		return err
 	}
 
-	return c.refresh()
+	return c.refresh(false)
 }
 
-func (c *Cluster) refresh() error {
+func (c *Cluster) refresh(bg bool) error {
 	var errMsgs []string
+	var oldm, newm [HashSlots][]string
 
-	addrs := c.getNodeAddrs(false)
+	addrs, _ := c.getNodeAddrs(false)
 	for _, addr := range addrs {
 		m, err := c.getClusterSlots(addr)
 		if err != nil {
@@ -84,6 +116,8 @@ func (c *Cluster) refresh() error {
 
 		// succeeded, save as mapping
 		c.mu.Lock()
+
+		oldm = c.mapping
 		// mark all current nodes as false
 		for k := range c.masters {
 			c.masters[k] = false
@@ -115,6 +149,7 @@ func (c *Cluster) refresh() error {
 
 					// close and remove all existing pools for removed nodes
 					if p := c.pools[k]; p != nil {
+						// Pool.Close always returns nil
 						p.Close()
 						delete(c.pools, k)
 					}
@@ -124,8 +159,13 @@ func (c *Cluster) refresh() error {
 
 		// mark that no refresh is needed until another MOVED
 		c.refreshing = false
+		newm = c.mapping
 		c.mu.Unlock()
 
+		if c.LayoutRefresh != nil {
+			c.LayoutRefresh(oldm, newm)
+		}
+
 		return nil
 	}
 
@@ -134,36 +174,43 @@ func (c *Cluster) refresh() error {
 	c.refreshing = false
 	c.mu.Unlock()
 
-	var sb strings.Builder
-	sb.WriteString("redisc: all nodes failed")
-	for _, msg := range errMsgs {
-		sb.WriteByte('\n')
-		sb.WriteString(msg)
+	msg := "redisc: all nodes failed\n"
+	msg += strings.Join(errMsgs, "\n")
+	err := errors.New(msg)
+	if bg && c.BgError != nil {
+		// in bg mode, this is already called in a distinct goroutine, so do not
+		// call BgError in a distinct one.
+		c.BgError(ClusterRefresh, err)
 	}
-	return errors.New(sb.String())
+	return err
 }
 
-// needsRefresh handles automatic update of the mapping.
+// needsRefresh handles automatic update of the mapping, either because no node
+// was found for the slot, or because a MOVED error was received.
 func (c *Cluster) needsRefresh(re *RedirError) {
 	c.mu.Lock()
 	if re != nil {
-		// update the mapping only if the address has changed, so that if
-		// a READONLY replica read returns a MOVED to a master, it doesn't
-		// overwrite that slot's replicas by setting just the master (i.e. this
-		// is not a MOVED because the cluster is updating, it is a MOVED
-		// because the replica cannot serve that key). Same goes for a request
-		// to a random connection that gets a MOVED, should not overwrite
-		// the moved-to slot's configuration if the master's address is the same.
+		// update the mapping only if the address has changed, so that if a
+		// READONLY replica read returns a MOVED to a master, it doesn't overwrite
+		// that slot's replicas by setting just the master (i.e. this is not a
+		// MOVED because the cluster is updating, it is a MOVED because the replica
+		// cannot serve that key). Same goes for a request to a random connection
+		// that gets a MOVED, should not overwrite the moved-to slot's
+		// configuration if the master's address is the same.
 		if current := c.mapping[re.NewSlot]; len(current) == 0 || current[0] != re.Addr {
 			c.mapping[re.NewSlot] = []string{re.Addr}
+		} else {
+			// no refresh needed, the mapping already points to this address
+			c.mu.Unlock()
+			return
 		}
 	}
 	if !c.refreshing {
-		// refreshing is reset to only once the goroutine has
-		// finished updating the mapping, so a new refresh goroutine
-		// will only be started if none is running.
+		// refreshing is reset only once the goroutine has finished updating the
+		// mapping, so a new refresh goroutine will only be started if none is
+		// running.
 		c.refreshing = true
-		go c.refresh()
+		go c.refresh(true) //nolint:errcheck
 	}
 	c.mu.Unlock()
 }
@@ -223,13 +270,17 @@ func (c *Cluster) getClusterSlots(addr string) ([]slotMapping, error) {
 }
 
 func (c *Cluster) getConnForAddr(addr string, forceDial bool) (redis.Conn, error) {
-	// non-pooled doesn't require a lock
+	c.mu.Lock()
+
+	if err := c.err; err != nil {
+		c.mu.Unlock()
+		return nil, err
+	}
 	if c.CreatePool == nil || forceDial {
+		c.mu.Unlock()
 		return redis.Dial("tcp", addr, c.DialOptions...)
 	}
 
-	c.mu.Lock()
-
 	p := c.pools[addr]
 	if p == nil {
 		c.mu.Unlock()
@@ -249,7 +300,8 @@ func (c *Cluster) getConnForAddr(addr string, forceDial bool) (redis.Conn, error
 		} else {
 			// Don't assume CreatePool just returned the pool struct, it may have
 			// used a connection or something - always match CreatePool with Close.
-			// Do it in a defer to keep lock time short.
+			// Do it in a defer to keep lock time short. Pool.Close always returns
+			// nil.
 			defer pool.Close()
 		}
 	}
@@ -258,6 +310,20 @@ func (c *Cluster) getConnForAddr(addr string, forceDial bool) (redis.Conn, error
 	return c.getFromPool(p)
 }
 
+// get connection from the pool.
+// use GetContext if PoolWaitTime > 0
+func (c *Cluster) getFromPool(p *redis.Pool) (redis.Conn, error) {
+	if c.PoolWaitTime <= 0 {
+		conn := p.Get()
+		return conn, conn.Err()
+	}
+
+	ctx, cancel := context.WithTimeout(context.Background(), c.PoolWaitTime)
+	defer cancel()
+
+	return p.GetContext(ctx)
+}
+
 var errNoNodeForSlot = errors.New("redisc: no node for slot")
 
 func (c *Cluster) getConnForSlot(slot int, forceDial, readOnly bool) (redis.Conn, string, error) {
@@ -286,7 +352,7 @@ func (c *Cluster) getConnForSlot(slot int, forceDial, readOnly bool) (redis.Conn
 	}
 	conn, err := c.getConnForAddr(addr, forceDial)
 	if err == nil && readOnly {
-		conn.Do("READONLY")
+		_, _ = conn.Do("READONLY")
 	}
 	return conn, addr, err
 }
@@ -295,25 +361,32 @@ func (c *Cluster) getConnForSlot(slot int, forceDial, readOnly bool) (redis.Conn
 var rnd = struct {
 	sync.Mutex
 	*rand.Rand
-}{Rand: rand.New(rand.NewSource(time.Now().UnixNano()))}
+}{Rand: rand.New(rand.NewSource(time.Now().UnixNano()))} //nolint:gosec
 
 func (c *Cluster) getRandomConn(forceDial, readOnly bool) (redis.Conn, string, error) {
-	addrs := c.getNodeAddrs(readOnly)
+	addrs, _ := c.getNodeAddrs(readOnly)
 	rnd.Lock()
 	perms := rnd.Perm(len(addrs))
 	rnd.Unlock()
 
+	var errMsgs []string //nolint:prealloc
 	for _, ix := range perms {
 		addr := addrs[ix]
 		conn, err := c.getConnForAddr(addr, forceDial)
 		if err == nil {
 			if readOnly {
-				conn.Do("READONLY")
+				_, _ = conn.Do("READONLY")
 			}
 			return conn, addr, nil
 		}
+		errMsgs = append(errMsgs, err.Error())
+	}
+	msg := "redisc: failed to get a connection"
+	if len(errMsgs) > 0 {
+		msg += "\n"
+		msg += strings.Join(errMsgs, "\n")
 	}
-	return nil, "", errors.New("redisc: failed to get a connection")
+	return nil, "", errors.New(msg)
 }
 
 func (c *Cluster) getConn(preferredSlot int, forceDial, readOnly bool) (conn redis.Conn, addr string, err error) {
@@ -329,12 +402,12 @@ func (c *Cluster) getConn(preferredSlot int, forceDial, readOnly bool) (conn red
 	return conn, addr, err
 }
 
-func (c *Cluster) getNodeAddrs(preferReplicas bool) []string {
+func (c *Cluster) getNodeAddrs(preferReplicas bool) (addrs []string, replicas bool) {
 	c.mu.Lock()
 
 	// populate nodes lazily, only once
 	if c.masters == nil {
-		c.masters = make(map[string]bool)
+		c.masters = make(map[string]bool, len(c.StartupNodes))
 		c.replicas = make(map[string]bool)
 
 		// StartupNodes should be masters
@@ -346,22 +419,22 @@ func (c *Cluster) getNodeAddrs(preferReplicas bool) []string {
 	from := c.masters
 	if preferReplicas && len(c.replicas) > 0 {
 		from = c.replicas
+		replicas = true
 	}
 
 	// grab a slice of addresses
-	addrs := make([]string, 0, len(from))
+	addrs = make([]string, 0, len(from))
 	for addr := range from {
 		addrs = append(addrs, addr)
 	}
 	c.mu.Unlock()
 
-	return addrs
+	return addrs, replicas
 }
 
-// Dial returns a connection the same way as Get, but
-// it guarantees that the connection will not be managed by the
-// pool, even if CreatePool is set. The actual returned
-// type is *Conn, see its documentation for details.
+// Dial returns a connection the same way as Get, but it guarantees that the
+// connection will not be managed by the pool, even if CreatePool is set. The
+// actual returned type is *Conn, see its documentation for details.
 func (c *Cluster) Dial() (redis.Conn, error) {
 	c.mu.Lock()
 	err := c.err
@@ -377,10 +450,9 @@ func (c *Cluster) Dial() (redis.Conn, error) {
 	}, nil
 }
 
-// Get returns a redis.Conn interface that can be used to call
-// redis commands on the cluster. The application must close the
-// returned connection. The actual returned type is *Conn,
-// see its documentation for details.
+// Get returns a redis.Conn interface that can be used to call redis commands
+// on the cluster. The application must close the returned connection. The
+// actual returned type is *Conn, see its documentation for details.
 func (c *Cluster) Get() redis.Conn {
 	c.mu.Lock()
 	err := c.err
@@ -392,8 +464,55 @@ func (c *Cluster) Get() redis.Conn {
 	}
 }
 
-// Close releases the resources used by the cluster. It closes all the
-// pools that were created, if any.
+// EachNode calls fn for each node in the cluster, with a connection bound to
+// that node. The connection is automatically closed (and potentially returned
+// to the pool if Cluster.CreatePool is set) after the function executes. Note
+// that conn is not a RetryConn and using one is inappropriate, as the goal of
+// EachNode is to connect to specific nodes, not to target specific keys. The
+// visited nodes are those that are known at the time of the call - it does not
+// force a refresh of the cluster layout. If no nodes are known, it returns an
+// error.
+//
+// If fn returns an error, no more nodes are visited and that error is returned
+// by EachNode. If replicas is true, it will visit each replica node instead,
+// otherwise the primary nodes are visited. Keep in mind that if replicas is
+// true, it will visit all known replicas - which is great e.g. to run
+// diagnostics on each node, but can be surprising if the goal is e.g. to
+// collect all keys, as it is possible that more than one node is acting as
+// replica for the same primary, meaning that the same keys could be seen
+// multiple times - you should be prepared to handle this scenario. The
+// connection provided to fn is not a ReadOnly connection (conn.ReadOnly hasn't
+// been called on it), it is up to fn to execute the READONLY redis command if
+// required.
+func (c *Cluster) EachNode(replicas bool, fn func(addr string, conn redis.Conn) error) error {
+	addrs, ok := c.getNodeAddrs(replicas)
+	if len(addrs) == 0 || replicas && !ok {
+		return errors.New("redisc: no known node address")
+	}
+
+	for _, addr := range addrs {
+		conn, err := c.getConnForAddr(addr, false)
+		cconn := &Conn{
+			cluster:   c,
+			boundAddr: addr,
+			rc:        conn,
+			// in case of error, create a failed connection and still call fn, so
+			// that it can decide whether or not to keep visiting nodes.
+			err: err,
+		}
+		err = func() error {
+			defer cconn.Close()
+			return fn(addr, cconn)
+		}()
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// Close releases the resources used by the cluster. It closes all the pools
+// that were created, if any.
 func (c *Cluster) Close() error {
 	c.mu.Lock()
 	err := c.err
@@ -401,25 +520,26 @@ func (c *Cluster) Close() error {
 		c.err = errors.New("redisc: closed")
 		for _, p := range c.pools {
 			if e := p.Close(); e != nil && err == nil {
+				// note that Pool.Close always returns nil.
 				err = e
 			}
 		}
+		// keep c.pools around so that Stats can still be called after Close
 	}
 	c.mu.Unlock()
 
 	return err
 }
 
-// Stats returns the current statistics for all pools. Keys are node's addresses.
+// Stats returns the current statistics for all pools. Keys are node's
+// addresses.
 func (c *Cluster) Stats() map[string]redis.PoolStats {
 	c.mu.RLock()
 	defer c.mu.RUnlock()
 
 	stats := make(map[string]redis.PoolStats, len(c.pools))
-
 	for address, pool := range c.pools {
 		stats[address] = pool.Stats()
 	}
-
 	return stats
 }
diff --git a/cluster_go16.go b/cluster_go16.go
deleted file mode 100644
index 806783c..0000000
--- a/cluster_go16.go
+++ /dev/null
@@ -1,15 +0,0 @@
-// +build !go1.7
-
-package redisc
-
-import (
-	"github.com/gomodule/redigo/redis"
-)
-
-// get connection from the pool
-// pre go1.7, Pool has no GetContext method, so it always
-// calls Get.
-func (c *Cluster) getFromPool(p *redis.Pool) (redis.Conn, error) {
-	conn := p.Get()
-	return conn, conn.Err()
-}
diff --git a/cluster_go16_test.go b/cluster_go16_test.go
deleted file mode 100644
index 4880256..0000000
--- a/cluster_go16_test.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// +build !go1.7
-
-package redisc
-
-import (
-	"testing"
-
-	"github.com/gomodule/redigo/redis"
-	"github.com/mna/redisc/redistest"
-	"github.com/stretchr/testify/assert"
-)
-
-// TestGetPool
-func TestGetPool(t *testing.T) {
-	s := redistest.StartMockServer(t, func(cmd string, args ...string) interface{} {
-		return nil
-	})
-	defer s.Close()
-
-	p := &redis.Pool{
-		MaxActive: 1,
-		Dial: func() (redis.Conn, error) {
-			return redis.Dial("tcp", s.Addr)
-		},
-	}
-	c := Cluster{}
-
-	// fist connection is OK
-	conn, err := c.getFromPool(p)
-	if assert.NoError(t, err) {
-		defer conn.Close()
-	}
-
-	// second connection should be failed because we only have 1 MaxActive
-	_, err = c.getFromPool(p)
-	assert.Error(t, err)
-}
diff --git a/cluster_go17.go b/cluster_go17.go
deleted file mode 100644
index 20dfe75..0000000
--- a/cluster_go17.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// +build go1.7
-
-package redisc
-
-import (
-	"context"
-
-	"github.com/gomodule/redigo/redis"
-)
-
-// get connection from the pool.
-// use GetContext if PoolWaitTime > 0
-func (c *Cluster) getFromPool(p *redis.Pool) (redis.Conn, error) {
-	if c.PoolWaitTime <= 0 {
-		conn := p.Get()
-		return conn, conn.Err()
-	}
-
-	ctx, cancel := context.WithTimeout(context.Background(), c.PoolWaitTime)
-	defer cancel()
-
-	return p.GetContext(ctx)
-}
diff --git a/cluster_go17_test.go b/cluster_go17_test.go
deleted file mode 100644
index 2891d1b..0000000
--- a/cluster_go17_test.go
+++ /dev/null
@@ -1,94 +0,0 @@
-// +build go1.7
-
-package redisc
-
-import (
-	"context"
-	"testing"
-	"time"
-
-	"github.com/gomodule/redigo/redis"
-	"github.com/mna/redisc/redistest"
-	"github.com/stretchr/testify/assert"
-)
-
-// TestGetPoolTimedOut test case where we can't get the connection because the pool
-// is full
-func TestGetPoolTimedOut(t *testing.T) {
-	s := redistest.StartMockServer(t, func(cmd string, args ...string) interface{} {
-		return nil
-	})
-	defer s.Close()
-
-	p := &redis.Pool{
-		MaxActive: 1,
-		Dial: func() (redis.Conn, error) {
-			return redis.Dial("tcp", s.Addr)
-		},
-		Wait: true,
-	}
-	c := Cluster{
-		PoolWaitTime: 100 * time.Millisecond,
-	}
-	conn, err := c.getFromPool(p)
-	if assert.NoError(t, err) {
-		defer conn.Close()
-	}
-
-	// second connection should be failed because we only have 1 MaxActive
-	start := time.Now()
-	_, err = c.getFromPool(p)
-	if assert.Error(t, err) {
-		assert.Equal(t, context.DeadlineExceeded, err)
-		assert.True(t, time.Since(start) >= 100*time.Millisecond)
-	}
-}
-
-// TestGetPoolWaitOnFull test that we could get the connection when the pool
-// is full and we can wait for it
-func TestGetPoolWaitOnFull(t *testing.T) {
-	s := redistest.StartMockServer(t, func(cmd string, args ...string) interface{} {
-		return nil
-	})
-	defer s.Close()
-
-	var (
-		usageTime = 100 * time.Millisecond // how long the connection will be used
-		waitTime  = 3 * usageTime          // how long we want to wait
-	)
-
-	p := &redis.Pool{
-		MaxActive: 1,
-		Dial: func() (redis.Conn, error) {
-			return redis.Dial("tcp", s.Addr)
-		},
-		Wait: true,
-	}
-	c := Cluster{
-		PoolWaitTime: waitTime,
-	}
-
-	// first connection OK
-	conn, err := c.getFromPool(p)
-	assert.NoError(t, err)
-
-	// second connection should be failed because we only have 1 MaxActive
-	start := time.Now()
-	_, err = c.getFromPool(p)
-	if assert.Error(t, err) {
-		assert.Equal(t, context.DeadlineExceeded, err)
-		assert.True(t, time.Since(start) >= waitTime)
-	}
-
-	go func() {
-		time.Sleep(usageTime) // sleep before close, to simulate waiting for connection
-		conn.Close()
-	}()
-
-	start = time.Now()
-	conn2, err := c.getFromPool(p)
-	if assert.NoError(t, err) {
-		assert.True(t, time.Since(start) >= usageTime)
-	}
-	conn2.Close()
-}
diff --git a/cluster_test.go b/cluster_test.go
index be1a0f3..36596c2 100644
--- a/cluster_test.go
+++ b/cluster_test.go
@@ -1,8 +1,10 @@
 package redisc
 
 import (
+	"context"
 	"strings"
 	"sync"
+	"sync/atomic"
 	"testing"
 	"time"
 
@@ -13,54 +15,398 @@ import (
 	"github.com/stretchr/testify/require"
 )
 
-func TestClusterRefreshNormalServer(t *testing.T) {
+func TestStandaloneRedis(t *testing.T) {
 	cmd, port := redistest.StartServer(t, nil, "")
-	defer cmd.Process.Kill()
+	defer cmd.Process.Kill() //nolint:errcheck
+	port = ":" + port
 
-	c := &Cluster{
-		StartupNodes: []string{":" + port},
+	t.Run("refresh", func(t *testing.T) {
+		c := &Cluster{
+			StartupNodes: []string{port},
+		}
+		err := c.Refresh()
+		if assert.Error(t, err, "Refresh") {
+			assert.Contains(t, err.Error(), "redisc: all nodes failed", "expected redisc error message")
+			assert.Contains(t, err.Error(), "cluster support disabled", "expected redis error message")
+		}
+	})
+}
+
+func TestClusterRedis(t *testing.T) {
+	fn, ports := redistest.StartCluster(t, nil)
+	defer fn()
+	for i, p := range ports {
+		ports[i] = ":" + p
 	}
-	err := c.Refresh()
-	if assert.Error(t, err, "Refresh") {
-		assert.Contains(t, err.Error(), "redisc: all nodes failed", "expected redisc error message")
-		assert.Contains(t, err.Error(), "cluster support disabled", "expected redis error message")
+
+	t.Run("refresh", func(t *testing.T) { testClusterRefresh(t, ports) })
+	t.Run("needs refresh", func(t *testing.T) { testClusterNeedsRefresh(t, ports) })
+	t.Run("close", func(t *testing.T) { testClusterClose(t, ports) })
+	t.Run("closed refresh", func(t *testing.T) { testClusterClosedRefresh(t, ports) })
+	t.Run("conn readonly no replica", func(t *testing.T) { testConnReadOnlyNoReplica(t, ports) })
+	t.Run("conn bind", func(t *testing.T) { testConnBind(t, ports) })
+	t.Run("conn blank do", func(t *testing.T) { testConnBlankDo(t, ports) })
+	t.Run("conn with timeout", func(t *testing.T) { testConnWithTimeout(t, ports) })
+	t.Run("retry conn too many attempts", func(t *testing.T) { testRetryConnTooManyAttempts(t, ports) })
+	t.Run("retry conn moved", func(t *testing.T) { testRetryConnMoved(t, ports) })
+	t.Run("each node none", func(t *testing.T) { testEachNodeNone(t, ports) })
+	t.Run("each node some", func(t *testing.T) { testEachNodeSome(t, ports) })
+	t.Run("retry conn trigger refresh", func(t *testing.T) { testRetryConnTriggerRefreshes(t, ports) })
+}
+
+func TestClusterRedisWithReplica(t *testing.T) {
+	fn, ports := redistest.StartClusterWithReplicas(t, nil)
+	defer fn()
+	for i, p := range ports {
+		ports[i] = ":" + p
 	}
+
+	t.Run("refresh startup nodes a replica", func(t *testing.T) { testClusterRefreshStartWithReplica(t, ports) })
+	t.Run("conn readonly", func(t *testing.T) { testConnReadOnlyWithReplicas(t, ports) })
+	t.Run("each node some", func(t *testing.T) { testEachNodeSomeWithReplica(t, ports) })
+	t.Run("each node scan keys", func(t *testing.T) { testEachNodeScanKeysWithReplica(t, ports) })
+	t.Run("layout refresh", func(t *testing.T) { testLayoutRefreshWithReplica(t, ports) })
+	t.Run("layout moved", func(t *testing.T) { testLayoutMovedWithReplica(t, ports) })
 }
 
-func assertMapping(t *testing.T, mapping [hashSlots][]string, masterPorts, replicaPorts []string) {
-	var prev string
-	pix := -1
+func assertMapping(t *testing.T, mapping [HashSlots][]string, masterPorts, replicaPorts []string) {
 	expectedMappingNodes := 1 // at least a master node
 	if len(replicaPorts) > 0 {
-		// if there are replicase, then we expected 2 mapping nodes (master+replica)
+		// if there are replicas, then we expected 2 mapping nodes (master+replica)
 		expectedMappingNodes = 2
 	}
-	for ix, maps := range mapping {
+	for _, maps := range mapping {
 		if assert.Equal(t, expectedMappingNodes, len(maps), "Mapping has %d node(s)", expectedMappingNodes) {
-			if maps[0] != prev || ix == len(mapping)-1 {
-				prev = maps[0]
-				t.Logf("%5d: %s\n", ix, maps[0])
-				pix++
-			}
 			if assert.NotEmpty(t, maps[0]) {
 				split := strings.Index(maps[0], ":")
-				assert.Contains(t, masterPorts, maps[0][split+1:], "expected master")
+				assert.Contains(t, masterPorts, maps[0][split:], "expected master")
 			}
 			if len(maps) > 1 && assert.NotEmpty(t, maps[1]) {
 				split := strings.Index(maps[1], ":")
-				assert.Contains(t, replicaPorts, maps[1][split+1:], "expected replica")
+				assert.Contains(t, replicaPorts, maps[1][split:], "expected replica")
 			}
 		}
 	}
 }
 
-func TestClusterRefresh(t *testing.T) {
-	fn, ports := redistest.StartCluster(t, nil)
-	defer fn()
+func testEachNodeNone(t *testing.T, _ []string) {
+	c := &Cluster{}
+	defer c.Close()
+
+	// no known node
+	var count int
+	err := c.EachNode(false, func(addr string, conn redis.Conn) error {
+		count++
+		return nil
+	})
+	if assert.Error(t, err) {
+		assert.Contains(t, err.Error(), "no known node")
+	}
+	assert.Equal(t, 0, count)
+
+	// no known replica
+	count = 0
+	err = c.EachNode(true, func(addr string, conn redis.Conn) error {
+		count++
+		return nil
+	})
+	if assert.Error(t, err) {
+		assert.Contains(t, err.Error(), "no known node")
+	}
+	assert.Equal(t, 0, count)
+}
+
+func assertNodeIdentity(t *testing.T, conn redis.Conn, gotAddr, wantPort, wantRole string) {
+	assertNodeIdentityIn(t, conn, gotAddr, wantRole, map[string]bool{wantPort: true})
+}
+
+func assertNodeIdentityIn(t *testing.T, conn redis.Conn, gotAddr, wantRole string, portIn map[string]bool) {
+	var foundPort string
+	for port := range portIn {
+		if strings.HasSuffix(gotAddr, port) {
+			foundPort = port
+			delete(portIn, port)
+		}
+	}
+	assert.NotEmpty(t, foundPort, "address not in %#v", portIn)
+	vs, err := redis.Values(conn.Do("ROLE"))
+	require.NoError(t, err)
+
+	var role string
+	_, err = redis.Scan(vs, &role)
+	require.NoError(t, err)
+	assert.Equal(t, wantRole, role)
+
+	info, err := redis.String(conn.Do("INFO", "server"))
+	require.NoError(t, err)
+	assert.Contains(t, info, "tcp_port"+foundPort)
+}
+
+func testEachNodeSome(t *testing.T, ports []string) {
+	c := &Cluster{
+		StartupNodes: []string{ports[0]},
+	}
+	defer c.Close()
+
+	// only the single startup node at the moment
+	var count int
+	err := c.EachNode(false, func(addr string, conn redis.Conn) error {
+		count++
+		assertNodeIdentity(t, conn, addr, ports[0], "master")
+		return nil
+	})
+	assert.NoError(t, err)
+	assert.Equal(t, 1, count)
+
+	// no known replica
+	count = 0
+	err = c.EachNode(true, func(addr string, conn redis.Conn) error {
+		count++
+		return nil
+	})
+	if assert.Error(t, err) {
+		assert.Contains(t, err.Error(), "no known node")
+	}
+	assert.Equal(t, 0, count)
+
+	require.NoError(t, c.Refresh())
+
+	portsIn := make(map[string]bool, len(ports))
+	for _, port := range ports {
+		portsIn[port] = true
+	}
+
+	count = 0
+	err = c.EachNode(false, func(addr string, conn redis.Conn) error {
+		count++
+		assertNodeIdentityIn(t, conn, addr, "master", portsIn)
+		return nil
+	})
+	assert.NoError(t, err)
+	assert.Equal(t, len(ports), count)
+
+	// no known replica
+	count = 0
+	err = c.EachNode(true, func(addr string, conn redis.Conn) error {
+		count++
+		return nil
+	})
+	if assert.Error(t, err) {
+		assert.Contains(t, err.Error(), "no known node")
+	}
+	assert.Equal(t, 0, count)
+}
+
+func testEachNodeSomeWithReplica(t *testing.T, ports []string) {
+	c := &Cluster{
+		StartupNodes: []string{ports[0]},
+	}
+	defer c.Close()
+
+	// only the single startup node at the moment
+	var count int
+	err := c.EachNode(false, func(addr string, conn redis.Conn) error {
+		count++
+		assertNodeIdentity(t, conn, addr, ports[0], "master")
+		return nil
+	})
+	assert.NoError(t, err)
+	assert.Equal(t, 1, count)
+
+	// no known replica
+	count = 0
+	err = c.EachNode(true, func(addr string, conn redis.Conn) error {
+		count++
+		return nil
+	})
+	if assert.Error(t, err) {
+		assert.Contains(t, err.Error(), "no known node")
+	}
+	assert.Equal(t, 0, count)
+
+	require.NoError(t, c.Refresh())
+
+	// visit each primary
+	primaries, replicas := ports[:redistest.NumClusterNodes], ports[redistest.NumClusterNodes:]
+	portsIn := make(map[string]bool, len(primaries))
+	for _, port := range primaries {
+		portsIn[port] = true
+	}
+
+	count = 0
+	err = c.EachNode(false, func(addr string, conn redis.Conn) error {
+		count++
+		assertNodeIdentityIn(t, conn, addr, "master", portsIn)
+		return nil
+	})
+	assert.NoError(t, err)
+	assert.Equal(t, len(primaries), count)
+
+	// visit each replica
+	portsIn = make(map[string]bool, len(replicas))
+	for _, port := range replicas {
+		portsIn[port] = true
+	}
+
+	count = 0
+	err = c.EachNode(true, func(addr string, conn redis.Conn) error {
+		count++
+		assertNodeIdentityIn(t, conn, addr, "slave", portsIn)
+		return nil
+	})
+	assert.NoError(t, err)
+	assert.Equal(t, len(replicas), count)
+}
+
+func testEachNodeScanKeysWithReplica(t *testing.T, ports []string) {
+	c := &Cluster{
+		StartupNodes: []string{"127.0.0.1" + ports[0]},
+		CreatePool:   createPool,
+	}
+	defer c.Close()
+	require.NoError(t, c.Refresh())
+
+	conn := c.Get()
+	conn, _ = RetryConn(conn, 3, 100*time.Millisecond)
+	defer conn.Close()
+
+	const prefix = "eachnode:"
+	keys := []string{"a", "b", "c", "d", "e"}
+	for i, k := range keys {
+		k = prefix + "{" + k + "}"
+		keys[i] = k
+		_, err := conn.Do("SET", k, i)
+		require.NoError(t, err)
+	}
+	conn.Close() // close it now so it does not show up as in use in stats
+
+	// collect from primaries
+	var gotKeys []string
+	err := c.EachNode(false, func(addr string, conn redis.Conn) error {
+		var cursor int
+		for {
+			var keyList []string
+			vs, err := redis.Values(conn.Do("SCAN", cursor, "MATCH", prefix+"*"))
+			require.NoError(t, err)
+			_, err = redis.Scan(vs, &cursor, &keyList)
+			require.NoError(t, err)
+			gotKeys = append(gotKeys, keyList...)
+			if cursor == 0 {
+				return nil
+			}
+		}
+	})
+	require.NoError(t, err)
+	assert.ElementsMatch(t, keys, gotKeys)
+
+	// collect from replicas
+	gotKeys = nil
+	err = c.EachNode(true, func(addr string, conn redis.Conn) error {
+		var cursor int
+		for {
+			var keyList []string
+			vs, err := redis.Values(conn.Do("SCAN", cursor, "MATCH", prefix+"*"))
+			require.NoError(t, err)
+			_, err = redis.Scan(vs, &cursor, &keyList)
+			require.NoError(t, err)
+			gotKeys = append(gotKeys, keyList...)
+			if cursor == 0 {
+				return nil
+			}
+		}
+	})
+	require.NoError(t, err)
+	assert.ElementsMatch(t, keys, gotKeys)
+
+	var inuse, idle int
+	stats := c.Stats()
+	for _, st := range stats {
+		inuse += st.ActiveCount - st.IdleCount
+		idle += st.IdleCount
+	}
+	assert.Equal(t, 0, inuse)         // all connections were closed/returned to the pool
+	assert.Equal(t, len(ports), idle) // one for each node, primary + replica
+}
+
+func testLayoutRefreshWithReplica(t *testing.T, ports []string) {
+	var count int
+	c := &Cluster{
+		StartupNodes: []string{ports[0]},
+		LayoutRefresh: func(old, new [HashSlots][]string) {
+			for slot, maps := range old {
+				assert.Len(t, maps, 0, "slot %d", slot)
+			}
+			for slot, maps := range new {
+				assert.Len(t, maps, 2, "slot %d", slot)
+			}
+			count++
+		},
+	}
+	defer c.Close()
+
+	// LayoutRefresh is called synchronously when Refresh call is explicit
+	require.NoError(t, c.Refresh())
+	require.Equal(t, count, 1)
+}
+
+func testLayoutMovedWithReplica(t *testing.T, ports []string) {
+	var count int64
+	done := make(chan bool, 1)
+	c := &Cluster{
+		StartupNodes: []string{ports[0]},
+		LayoutRefresh: func(old, new [HashSlots][]string) {
+			for slot, maps := range old {
+				if slot == 15495 { // slot of key "a"
+					assert.Len(t, maps, 1, "slot %d", slot)
+					continue
+				}
+				assert.Len(t, maps, 0, "slot %d", slot)
+			}
+			for slot, maps := range new {
+				assert.Len(t, maps, 2, "slot %d", slot)
+			}
+			atomic.AddInt64(&count, 1)
+			done <- true
+		},
+	}
+	defer c.Close()
+
+	conn := c.Get()
+	defer conn.Close()
 
+	// to trigger this properly, first do EachNode (which only knows about the
+	// current node, which serves the bottom tier slots), and request key "a"
+	// which hashes to a high slot. This will result in a MOVED error that will
+	// update the single mapping and trigger a full refresh.
+	var eachCalls int
+	_ = c.EachNode(false, func(_ string, conn redis.Conn) error {
+		eachCalls++
+		_, err := conn.Do("GET", "a")
+		if assert.Error(t, err) {
+			assert.Contains(t, err.Error(), "MOVED")
+		}
+		return nil
+	})
+	assert.Equal(t, 1, eachCalls)
+
+	// LayoutRefresh call might not have completed yet, so wait for the channel
+	// receive, or fail after a second.
+	waitForClusterRefresh(c, nil)
+
+	select {
+	case <-time.After(time.Second):
+		require.Fail(t, "LayoutRefresh call not done after timeout")
+	case <-done:
+		count := atomic.LoadInt64(&count)
+		require.Equal(t, int(count), 1)
+	}
+}
+
+func testClusterRefresh(t *testing.T, ports []string) {
 	c := &Cluster{
-		StartupNodes: []string{":" + ports[0]},
+		StartupNodes: []string{ports[0]},
 	}
+	defer c.Close()
 
 	err := c.Refresh()
 	if assert.NoError(t, err, "Refresh") {
@@ -68,13 +414,12 @@ func TestClusterRefresh(t *testing.T) {
 	}
 }
 
-func TestClusterRefreshStartWithReplica(t *testing.T) {
-	fn, ports := redistest.StartClusterWithReplicas(t, nil)
-	defer fn()
-
+func testClusterRefreshStartWithReplica(t *testing.T, ports []string) {
 	c := &Cluster{
-		StartupNodes: []string{":" + ports[len(ports)-1]}, // last port is a replica
+		StartupNodes: []string{ports[len(ports)-1]}, // last port is a replica
 	}
+	defer c.Close()
+
 	err := c.Refresh()
 	if assert.NoError(t, err, "Refresh") {
 		assertMapping(t, c.mapping, ports[:redistest.NumClusterNodes], ports[redistest.NumClusterNodes:])
@@ -90,6 +435,8 @@ func TestClusterRefreshAllFail(t *testing.T) {
 	c := &Cluster{
 		StartupNodes: []string{s.Addr},
 	}
+	defer c.Close()
+
 	if err := c.Refresh(); assert.Error(t, err, "Refresh") {
 		assert.Contains(t, err.Error(), "all nodes failed", "expected message")
 		assert.Contains(t, err.Error(), "nope", "expected server message")
@@ -99,6 +446,8 @@ func TestClusterRefreshAllFail(t *testing.T) {
 
 func TestClusterNoNode(t *testing.T) {
 	c := &Cluster{}
+	defer c.Close()
+
 	conn := c.Get()
 	_, err := conn.Do("A")
 	if assert.Error(t, err, "Do") {
@@ -112,13 +461,7 @@ func TestClusterNoNode(t *testing.T) {
 	}
 }
 
-func TestClusterNeedsRefresh(t *testing.T) {
-	fn, ports := redistest.StartCluster(t, nil)
-	defer fn()
-
-	for i, p := range ports {
-		ports[i] = ":" + p
-	}
+func testClusterNeedsRefresh(t *testing.T, ports []string) {
 	c := &Cluster{
 		StartupNodes: ports,
 	}
@@ -138,30 +481,42 @@ func TestClusterNeedsRefresh(t *testing.T) {
 
 	// calling Do may or may not generate a MOVED error (it will get a
 	// random node, because no mapping is known yet)
-	conn.Do("GET", "b")
+	_, _ = conn.Do("GET", "b")
 
-	// wait for refreshing to become false again
-	c.mu.Lock()
-	for c.refreshing {
-		c.mu.Unlock()
-		time.Sleep(100 * time.Millisecond)
-		c.mu.Lock()
-	}
-	for i, v := range c.mapping {
-		if !assert.NotEmpty(t, v, "Addr for %d", i) {
-			break
+	waitForClusterRefresh(c, func() {
+		for i, v := range c.mapping {
+			if !assert.NotEmpty(t, v, "Addr for %d", i) {
+				break
+			}
 		}
-	}
-	c.mu.Unlock()
+	})
 }
 
-func TestClusterClose(t *testing.T) {
+func testClusterClose(t *testing.T, ports []string) {
 	c := &Cluster{
-		StartupNodes: []string{":6379"},
+		StartupNodes: []string{ports[0]},
 		DialOptions:  []redis.DialOption{redis.DialConnectTimeout(2 * time.Second)},
 		CreatePool:   createPool,
 	}
+	defer c.Close()
+
+	require.NoError(t, c.Refresh())
+
+	// get some connections before closing
+	connUnbound := c.Get()
+	defer connUnbound.Close()
+
+	connBound := c.Get()
+	defer connBound.Close()
+	_ = BindConn(connBound, "b")
+
+	connRetry := c.Get()
+	defer connRetry.Close()
+	connRetry, _ = RetryConn(connRetry, 3, time.Millisecond)
+
+	// close the cluster and check that all API works as expected
 	assert.NoError(t, c.Close(), "Close")
+
 	if err := c.Close(); assert.Error(t, err, "Close after Close") {
 		assert.Contains(t, err.Error(), "redisc: closed", "expected message")
 	}
@@ -174,6 +529,152 @@ func TestClusterClose(t *testing.T) {
 	if err := c.Refresh(); assert.Error(t, err, "Refresh after Close") {
 		assert.Contains(t, err.Error(), "redisc: closed", "expected message")
 	}
+	if err := c.EachNode(false, func(addr string, c redis.Conn) error { return c.Err() }); assert.Error(t, err, "EachNode after Close") {
+		assert.Contains(t, err.Error(), "redisc: closed", "expected message")
+	}
+
+	if _, err := connUnbound.Do("SET", "a", 1); assert.Error(t, err, "unbound connection Do") {
+		assert.Contains(t, err.Error(), "redisc: closed", "expected message")
+	}
+	// connection was bound pre-cluster-close, so it already has a valid connection
+	if _, err := connBound.Do("SET", "b", 1); assert.NoError(t, err, "bound connection Do") {
+		err = connBound.Close()
+		assert.NoError(t, err)
+	}
+	if _, err := connRetry.Do("GET", "a"); assert.Error(t, err, "retry connection Do") {
+		assert.Contains(t, err.Error(), "redisc: closed", "expected message")
+	}
+
+	// Stats still works after Close
+	stats := c.Stats()
+	assert.True(t, len(stats) > 0)
+}
+
+func testClusterClosedRefresh(t *testing.T, ports []string) {
+	var clusterRefreshCount int64
+	var clusterRefreshErr atomic.Value
+
+	done := make(chan bool)
+	c := &Cluster{
+		StartupNodes: []string{ports[0]},
+		DialOptions:  []redis.DialOption{redis.DialConnectTimeout(2 * time.Second)},
+		CreatePool:   createPool,
+		BgError: func(src BgErrorSrc, err error) {
+			if src == ClusterRefresh {
+				atomic.AddInt64(&clusterRefreshCount, 1)
+				clusterRefreshErr.Store(err)
+				done <- true
+			}
+		},
+	}
+	defer c.Close()
+
+	conn := c.Get()
+	defer conn.Close()
+
+	// close the cluster and check that all API works as expected
+	assert.NoError(t, c.Close(), "Close")
+	if _, err := conn.Do("SET", "a", 1); assert.Error(t, err, "connection Do") {
+		assert.Contains(t, err.Error(), "redisc: closed", "expected message")
+	}
+	waitForClusterRefresh(c, nil)
+
+	// BgError call might not have completed yet, so wait for the channel
+	// receive, or fail after a second.
+	select {
+	case <-time.After(time.Second):
+		require.Fail(t, "BgError call not done after timeout")
+	case <-done:
+		count := atomic.LoadInt64(&clusterRefreshCount)
+		require.Equal(t, int(count), 1)
+		if err := clusterRefreshErr.Load().(error); assert.Error(t, err, "refresh error") {
+			assert.Contains(t, err.Error(), "redisc: closed", "expected message")
+		}
+	}
+}
+
+// TestGetPoolTimedOut test case where we can't get the connection because the pool
+// is full
+func TestGetPoolTimedOut(t *testing.T) {
+	s := redistest.StartMockServer(t, func(cmd string, args ...string) interface{} {
+		return nil
+	})
+	defer s.Close()
+
+	p := &redis.Pool{
+		MaxActive: 1,
+		Dial: func() (redis.Conn, error) {
+			return redis.Dial("tcp", s.Addr)
+		},
+		Wait: true,
+	}
+	c := Cluster{
+		PoolWaitTime: 100 * time.Millisecond,
+	}
+	defer c.Close()
+
+	conn, err := c.getFromPool(p)
+	if assert.NoError(t, err) {
+		defer conn.Close()
+	}
+
+	// second connection should be failed because we only have 1 MaxActive
+	start := time.Now()
+	_, err = c.getFromPool(p)
+	if assert.Error(t, err) {
+		assert.Equal(t, context.DeadlineExceeded, err)
+		assert.True(t, time.Since(start) >= 100*time.Millisecond)
+	}
+}
+
+// TestGetPoolWaitOnFull test that we could get the connection when the pool
+// is full and we can wait for it
+func TestGetPoolWaitOnFull(t *testing.T) {
+	s := redistest.StartMockServer(t, func(cmd string, args ...string) interface{} {
+		return nil
+	})
+	defer s.Close()
+
+	var (
+		usageTime = 100 * time.Millisecond // how long the connection will be used
+		waitTime  = 3 * usageTime          // how long we want to wait
+	)
+
+	p := &redis.Pool{
+		MaxActive: 1,
+		Dial: func() (redis.Conn, error) {
+			return redis.Dial("tcp", s.Addr)
+		},
+		Wait: true,
+	}
+	c := Cluster{
+		PoolWaitTime: waitTime,
+	}
+	defer c.Close()
+
+	// first connection OK
+	conn, err := c.getFromPool(p)
+	assert.NoError(t, err)
+
+	// second connection should be failed because we only have 1 MaxActive
+	start := time.Now()
+	_, err = c.getFromPool(p)
+	if assert.Error(t, err) {
+		assert.Equal(t, context.DeadlineExceeded, err)
+		assert.True(t, time.Since(start) >= waitTime)
+	}
+
+	go func() {
+		time.Sleep(usageTime) // sleep before close, to simulate waiting for connection
+		conn.Close()
+	}()
+
+	start = time.Now()
+	conn2, err := c.getFromPool(p)
+	if assert.NoError(t, err) {
+		assert.True(t, time.Since(start) >= usageTime)
+	}
+	conn2.Close()
 }
 
 func createPool(addr string, opts ...redis.DialOption) (*redis.Pool, error) {
@@ -191,6 +692,24 @@ func createPool(addr string, opts ...redis.DialOption) (*redis.Pool, error) {
 	}, nil
 }
 
+// waits for a running Cluster.refresh call to complete before calling fn.
+// Note that fn is called while the Cluster's lock is held - to just wait
+// for refresh to complete and continue without holding the lock, simply
+// pass nil as fn - the lock is released before this call returns.
+func waitForClusterRefresh(cluster *Cluster, fn func()) {
+	// wait for refreshing to become false again
+	cluster.mu.Lock()
+	for cluster.refreshing {
+		cluster.mu.Unlock()
+		time.Sleep(100 * time.Millisecond)
+		cluster.mu.Lock()
+	}
+	if fn != nil {
+		fn()
+	}
+	cluster.mu.Unlock()
+}
+
 type redisCmd struct {
 	name   string
 	args   redis.Args
@@ -201,9 +720,6 @@ type redisCmd struct {
 type lenResult int
 
 func TestCommands(t *testing.T) {
-	fn, ports := redistest.StartCluster(t, nil)
-	defer fn()
-
 	cmdsPerGroup := map[string][]redisCmd{
 		"cluster": {
 			{"CLUSTER", redis.Args{"INFO"}, lenResult(10), ""},
@@ -214,7 +730,7 @@ func TestCommands(t *testing.T) {
 			{"CLUSTER", redis.Args{"NODES"}, lenResult(100), ""},
 		},
 		"connection": {
-			{"AUTH", redis.Args{"pwd"}, nil, "ERR Client sent AUTH, but no password is set"},
+			{"AUTH", redis.Args{"pwd"}, nil, "AUTH"},
 			{"ECHO", redis.Args{"a"}, []byte("a"), ""},
 			{"PING", nil, "PONG", ""},
 			{"SELECT", redis.Args{1}, nil, "ERR SELECT is not allowed in cluster mode"},
@@ -245,6 +761,7 @@ func TestCommands(t *testing.T) {
 			{"PFMERGE", redis.Args{"hll", "hll2"}, nil, "CROSSSLOT"},
 		},
 		"keys": {
+			// connection will bind to the node that serves slot of "k1"
 			{"SET", redis.Args{"k1", "z"}, "OK", ""},
 			{"EXISTS", redis.Args{"k1"}, int64(1), ""},
 			{"DUMP", redis.Args{"k1"}, lenResult(10), ""},
@@ -263,8 +780,9 @@ func TestCommands(t *testing.T) {
 			{"TTL", redis.Args{"k1"}, lenResult(3000), ""},
 			{"TYPE", redis.Args{"k1"}, "string", ""},
 			{"DEL", redis.Args{"k1"}, int64(1), ""},
-			{"SADD", redis.Args{"k3", "a", "z", "d"}, int64(3), ""},
-			{"SORT", redis.Args{"k3", "ALPHA"}, []interface{}{[]byte("a"), []byte("d"), []byte("z")}, ""},
+			{"SADD", redis.Args{"k1", "a", "z", "d"}, int64(3), ""},
+			{"SORT", redis.Args{"k1", "ALPHA"}, []interface{}{[]byte("a"), []byte("d"), []byte("z")}, ""},
+			{"DEL", redis.Args{"a", "b"}, nil, "CROSSSLOT"},
 		},
 		"lists": {
 			{"LPUSH", redis.Args{"l1", "a", "b", "c"}, int64(3), ""},
@@ -312,8 +830,8 @@ func TestCommands(t *testing.T) {
 			{"SISMEMBER", redis.Args{"t1", "a"}, int64(1), ""},
 			{"SMEMBERS", redis.Args{"t1"}, lenResult(2), ""}, // order is not deterministic
 			{"SMOVE", redis.Args{"t1", "{t1}.c", "a"}, int64(1), ""},
-			{"SPOP", redis.Args{"t3"}, nil, ""},
-			{"SRANDMEMBER", redis.Args{"t3"}, nil, ""},
+			{"SPOP", redis.Args{"t3{t1}"}, nil, ""},
+			{"SRANDMEMBER", redis.Args{"t3{t1}"}, nil, ""},
 			{"SREM", redis.Args{"t1", "b"}, int64(1), ""},
 			{"SSCAN", redis.Args{"{t1}.b", 0}, lenResult(2), ""},
 			{"SUNION", redis.Args{"{t1}.b", "{t1}.c"}, lenResult(3), ""},
@@ -342,7 +860,8 @@ func TestCommands(t *testing.T) {
 			{"SET", redis.Args{"s{b}", "b"}, "OK", ""},
 			{"SET", redis.Args{"s{bcd}", "c"}, "OK", ""},
 			// keys "b" (3300) and "bcd" (1872) are both in a hash slot < 5000, so on same node for this test
-			// yet it still fails with CROSSSLOT.
+			// yet it still fails with CROSSSLOT (i.e. redis does not accept multi-key commands that don't
+			// strictly hash to the same slot, regardless of which host serves them).
 			{"MGET", redis.Args{"s{b}", "s{bcd}"}, "", "CROSSSLOT"},
 		},
 		"transactions": {
@@ -356,6 +875,9 @@ func TestCommands(t *testing.T) {
 		},
 	}
 
+	fn, ports := redistest.StartCluster(t, nil)
+	defer fn()
+
 	for i, p := range ports {
 		ports[i] = ":" + p
 	}
@@ -364,6 +886,8 @@ func TestCommands(t *testing.T) {
 		DialOptions:  []redis.DialOption{redis.DialConnectTimeout(2 * time.Second)},
 		CreatePool:   createPool,
 	}
+	defer c.Close()
+
 	require.NoError(t, c.Refresh(), "Refresh")
 
 	var wg sync.WaitGroup
@@ -393,9 +917,7 @@ func runTransactionsCommands(t *testing.T, c *Cluster, wg *sync.WaitGroup) {
 
 	conn := c.Get()
 	defer conn.Close()
-	if conn, ok := conn.(*Conn); ok {
-		require.NoError(t, conn.Bind("tr{a}1", "tr{a}2"), "Bind")
-	}
+	require.NoError(t, BindConn(conn, "tr{a}1", "tr{a}2"), "Bind")
 
 	_, err := conn.Do("WATCH", "tr{a}1")
 	assert.NoError(t, err, "WATCH")
diff --git a/conn.go b/conn.go
index 931be43..7a7002d 100644
--- a/conn.go
+++ b/conn.go
@@ -13,22 +13,20 @@ import (
 
 var _ redis.ConnWithTimeout = (*Conn)(nil)
 
-// Conn is a redis cluster connection. When returned by Get
-// or Dial, it is not yet bound to any node in the cluster.
-// Only when a call to Do, Send, Receive or Bind is made is a connection
-// to a specific node established:
+// Conn is a redis cluster connection. When returned by Get or Dial, it is not
+// yet bound to any node in the cluster.  Only when a call to Do, Send, Receive
+// or Bind is made is a connection to a specific node established:
 //
-//     - if Do or Send is called first, the command's first parameter
-//       is assumed to be the key, and its slot is used to find the node
-//     - if Receive is called first, or if Do or Send is called first
-//       but with no parameter for the command (or no command), a
-//       random node is selected in the cluster
-//     - if Bind is called first, the node corresponding to the slot of
-//       the specified key(s) is selected
+//     - if Do or Send is called first, the command's first parameter is
+//     assumed to be the key, and its slot is used to find the node
+//     - if Receive is called first, or if Do or Send is called first but with
+//     no parameter for the command (or no command), a random node is selected
+//     in the cluster
+//     - if Bind is called first, the node corresponding to the slot of the
+//     specified key(s) is selected
 //
-// Because Get and Dial return a redis.Conn interface,
-// a type assertion can be used to call Bind or ReadOnly on this
-// concrete Conn type:
+// Because Get and Dial return a redis.Conn interface, a type assertion can be
+// used to call Bind or ReadOnly on this concrete Conn type:
 //
 //     redisConn := cluster.Get()
 //     if conn, ok := redisConn.(*redisc.Conn); ok {
@@ -37,15 +35,14 @@ var _ redis.ConnWithTimeout = (*Conn)(nil)
 //       }
 //     }
 //
-// Or call the package-level BindConn or ReadOnlyConn helper functions.
-//
+// Alternatively, the package-level BindConn or ReadOnlyConn helper functions
+// may be used.
 type Conn struct {
-	cluster   *Cluster
-	forceDial bool // immutable
+	cluster   *Cluster // immutable
+	forceDial bool     // immutable
 
 	// redigo allows concurrent reader and writer (conn.Receive and
-	// conn.Send/conn.Flush), a mutex is needed to protect concurrent
-	// accesses.
+	// conn.Send/conn.Flush), a mutex is needed to protect concurrent accesses.
 	mu        sync.Mutex
 	readOnly  bool
 	boundAddr string
@@ -53,9 +50,8 @@ type Conn struct {
 	rc        redis.Conn
 }
 
-// RedirError is a cluster redirection error. It indicates that
-// the redis node returned either a MOVED or an ASK error, as
-// specified by the Type field.
+// RedirError is a cluster redirection error. It indicates that the redis node
+// returned either a MOVED or an ASK error, as specified by the Type field.
 type RedirError struct {
 	// Type indicates if the redirection is a MOVED or an ASK.
 	Type string
@@ -67,8 +63,8 @@ type RedirError struct {
 	raw string
 }
 
-// Error returns the error message of a RedirError. This is the
-// message as received from redis.
+// Error returns the error message of a RedirError. This is the message as
+// received from redis.
 func (e *RedirError) Error() string {
 	return e.raw
 }
@@ -82,23 +78,21 @@ func isRedisErr(err error, typ string) bool {
 	return len(parts) > 0 && parts[0] == typ
 }
 
-// IsTryAgain returns true if the error is a redis cluster
-// error of type TRYAGAIN, meaning that the command is valid,
-// but the cluster is in an unstable state and it can't complete
-// the request at the moment.
+// IsTryAgain returns true if the error is a redis cluster error of type
+// TRYAGAIN, meaning that the command is valid, but the cluster is in an
+// unstable state and it can't complete the request at the moment.
 func IsTryAgain(err error) bool {
 	return isRedisErr(err, "TRYAGAIN")
 }
 
-// IsCrossSlot returns true if the error is a redis cluster
-// error of type CROSSSLOT, meaning that a command was sent
-// with keys from different slots.
+// IsCrossSlot returns true if the error is a redis cluster error of type
+// CROSSSLOT, meaning that a command was sent with keys from different slots.
 func IsCrossSlot(err error) bool {
 	return isRedisErr(err, "CROSSSLOT")
 }
 
-// ParseRedir parses err into a RedirError. If err is
-// not a MOVED or ASK error or if it is nil, it returns nil.
+// ParseRedir parses err into a RedirError. If err is not a MOVED or ASK error
+// or if it is nil, it returns nil.
 func ParseRedir(err error) *RedirError {
 	re, ok := err.(redis.Error)
 	if !ok {
@@ -120,10 +114,10 @@ func ParseRedir(err error) *RedirError {
 	}
 }
 
-// binds the connection to a specific node, the one holding the slot
-// or a random node if slot is -1, iff the connection is not broken
-// and is not already bound. It returns the redis conn, true if it
-// successfully bound to this slot, or any error.
+// binds the connection to a specific node, the one holding the slot or a
+// random node if slot is -1, iff the connection is not broken and is not
+// already bound. It returns the redis conn, true if it successfully bound to
+// this slot, or any error.
 func (c *Conn) bind(slot int) (rc redis.Conn, ok bool, err error) {
 	c.mu.Lock()
 	rc, err = c.rc, c.err
@@ -143,7 +137,7 @@ func (c *Conn) bind(slot int) (rc redis.Conn, ok bool, err error) {
 	return rc, ok, err
 }
 
-func cmdSlot(cmd string, args []interface{}) int {
+func cmdSlot(_ string, args []interface{}) int {
 	slot := -1
 	if len(args) > 0 {
 		key := fmt.Sprintf("%s", args[0])
@@ -152,10 +146,9 @@ func cmdSlot(cmd string, args []interface{}) int {
 	return slot
 }
 
-// BindConn is a convenience function that checks if c implements
-// a Bind method with the right signature such as the one for
-// a *Conn, and calls that method. If c doesn't implement that
-// method, it returns an error.
+// BindConn is a convenience function that checks if c implements a Bind method
+// with the right signature such as the one for a *Conn, and calls that method.
+// If c doesn't implement that method, it returns an error.
 func BindConn(c redis.Conn, keys ...string) error {
 	if cc, ok := c.(interface {
 		Bind(...string) error
@@ -165,11 +158,11 @@ func BindConn(c redis.Conn, keys ...string) error {
 	return errors.New("redisc: no Bind method")
 }
 
-// Bind binds the connection to the cluster node corresponding to
-// the slot of the provided keys. If the keys don't belong to the
-// same slot, an error is returned and the connection is not bound.
-// If the connection is already bound, an error is returned.
-// If no key is provided, it binds to a random node.
+// Bind binds the connection to the cluster node corresponding to the slot of
+// the provided keys. If the keys don't belong to the same slot, an error is
+// returned and the connection is not bound.  If the connection is already
+// bound, an error is returned.  If no key is provided, it binds to a random
+// node.
 func (c *Conn) Bind(keys ...string) error {
 	slot := -1
 	for _, k := range keys {
@@ -191,10 +184,9 @@ func (c *Conn) Bind(keys ...string) error {
 	return nil
 }
 
-// ReadOnlyConn is a convenience function that checks if c implements
-// a ReadOnly method with the right signature such as the one for
-// a *Conn, and calls that method. If c doesn't implement that
-// method, it returns an error.
+// ReadOnlyConn is a convenience function that checks if c implements a
+// ReadOnly method with the right signature such as the one for a *Conn, and
+// calls that method. If c doesn't implement that method, it returns an error.
 func ReadOnlyConn(c redis.Conn) error {
 	if cc, ok := c.(interface {
 		ReadOnly() error
@@ -204,16 +196,16 @@ func ReadOnlyConn(c redis.Conn) error {
 	return errors.New("redisc: no ReadOnly method")
 }
 
-// ReadOnly marks the connection as read-only, meaning that when it is
-// bound to a cluster node, it will attempt to connect to a replica instead
-// of the master and will automatically emit a READONLY command so that
-// the replica agrees to serve read commands. Be aware that reading
-// from a replica may return stale data. Sending write commands on a
-// read-only connection will fail with a MOVED error.
-// See http://redis.io/commands/readonly for more details.
+// ReadOnly marks the connection as read-only, meaning that when it is bound to
+// a cluster node, it will attempt to connect to a replica instead of the
+// master and will automatically emit a READONLY command so that the replica
+// agrees to serve read commands. Be aware that reading from a replica may
+// return stale data. Sending write commands on a read-only connection will
+// fail with a MOVED error.  See http://redis.io/commands/readonly for more
+// details.
 //
-// If the connection is already bound to a node, either via a call to
-// Do, Send, Receive or Bind, ReadOnly returns an error.
+// If the connection is already bound to a node, either via a call to Do, Send,
+// Receive or Bind, ReadOnly returns an error.
 func (c *Conn) ReadOnly() error {
 	c.mu.Lock()
 	defer c.mu.Unlock()
@@ -229,23 +221,23 @@ func (c *Conn) ReadOnly() error {
 	return nil
 }
 
-// Do sends a command to the server and returns the received reply.
-// If the connection is not yet bound to a cluster node, it will be
-// after this call, based on the rules documented in the Conn type.
+// Do sends a command to the server and returns the received reply.  If the
+// connection is not yet bound to a cluster node, it will be after this call,
+// based on the rules documented in the Conn type.
 func (c *Conn) Do(cmd string, args ...interface{}) (interface{}, error) {
 	return c.DoWithTimeout(-1, cmd, args...)
 }
 
 // DoWithTimeout sends a command to the server and returns the received reply.
-// If the connection is not yet bound to a cluster node, it will be
-// after this call, based on the rules documented in the Conn type.
+// If the connection is not yet bound to a cluster node, it will be after this
+// call, based on the rules documented in the Conn type.
 //
-// The timeout overrides the read timeout set when dialing the
-// connection (in the DialOptions of the Cluster).
+// The timeout overrides the read timeout set when dialing the connection (in
+// the DialOptions of the Cluster).
 func (c *Conn) DoWithTimeout(timeout time.Duration, cmd string, args ...interface{}) (v interface{}, err error) {
 	// The blank command is a special redigo/redis command that flushes the
 	// output buffer and receives all pending replies. This is used, for example,
-	// when returning a Redis conneciton back to the pool. If we recieve the
+	// when returning a Redis connection back to the pool. If we receive the
 	// blank command, don't bind to a random node if this connection is not bound
 	// yet.
 	if cmd == "" && len(args) == 0 {
@@ -277,9 +269,9 @@ func (c *Conn) DoWithTimeout(timeout time.Duration, cmd string, args ...interfac
 	return v, err
 }
 
-// Send writes the command to the client's output buffer. If the
-// connection is not yet bound to a cluster node, it will be after
-// this call, based on the rules documented in the Conn type.
+// Send writes the command to the client's output buffer. If the connection is
+// not yet bound to a cluster node, it will be after this call, based on the
+// rules documented in the Conn type.
 func (c *Conn) Send(cmd string, args ...interface{}) error {
 	rc, _, err := c.bind(cmdSlot(cmd, args))
 	if err != nil {
@@ -288,19 +280,19 @@ func (c *Conn) Send(cmd string, args ...interface{}) error {
 	return rc.Send(cmd, args...)
 }
 
-// Receive receives a single reply from the server. If the connection
-// is not yet bound to a cluster node, it will be after this call,
-// based on the rules documented in the Conn type.
+// Receive receives a single reply from the server. If the connection is not
+// yet bound to a cluster node, it will be after this call, based on the rules
+// documented in the Conn type.
 func (c *Conn) Receive() (interface{}, error) {
 	return c.ReceiveWithTimeout(-1)
 }
 
-// ReceiveWithTimeout receives a single reply from the Redis server.
-// If the connection is not yet bound to a cluster node, it will be
-// after this call, based on the rules documented in the Conn type.
+// ReceiveWithTimeout receives a single reply from the Redis server.  If the
+// connection is not yet bound to a cluster node, it will be after this call,
+// based on the rules documented in the Conn type.
 //
-// The timeout overrides the read timeout set when dialing the
-// connection (in the DialOptions of the Cluster).
+// The timeout overrides the read timeout set when dialing the connection (in
+// the DialOptions of the Cluster).
 func (c *Conn) ReceiveWithTimeout(timeout time.Duration) (v interface{}, err error) {
 	rc, _, err := c.bind(-1)
 	if err != nil {
@@ -361,7 +353,7 @@ func (c *Conn) closeLocked() (err error) {
 	if c.rc != nil {
 		// this may be a pooled connection, so make sure the readOnly flag is reset
 		if c.readOnly {
-			c.rc.Do("READWRITE")
+			_, _ = c.rc.Do("READWRITE")
 		}
 		err = c.rc.Close()
 	}
diff --git a/conn_test.go b/conn_test.go
index a0cfa5e..ae7a9f8 100644
--- a/conn_test.go
+++ b/conn_test.go
@@ -14,27 +14,32 @@ import (
 )
 
 // Test the conn.ReadOnly behaviour in a cluster setup with 1 replica per
-// node. Runs multiple tests in the same function because setting up
-// such a cluster is slow.
-func TestConnReadOnlyWithReplicas(t *testing.T) {
-	fn, ports := redistest.StartClusterWithReplicas(t, nil)
-	defer fn()
-
-	c := &Cluster{}
-	testWithReplicaBindRandomWithoutNode(t, c)
-
-	c = &Cluster{StartupNodes: []string{":" + ports[0]}}
-	testWithReplicaBindEmptySlot(t, c)
-
-	c = &Cluster{StartupNodes: []string{":" + ports[0]}}
-	testWithReplicaClusterRefresh(t, c, ports)
-
-	// at this point the cluster has refreshed its mapping
-	testReadWriteFromReplica(t, c, ports[redistest.NumClusterNodes:])
-
-	testReadOnlyWithRandomConn(t, c, ports[redistest.NumClusterNodes:])
-
-	testRetryReadOnlyConn(t, c, ports[:redistest.NumClusterNodes], ports[redistest.NumClusterNodes:])
+// node.
+func testConnReadOnlyWithReplicas(t *testing.T, ports []string) {
+	t.Run("bind random without node", func(t *testing.T) {
+		c := &Cluster{}
+		defer c.Close()
+		testWithReplicaBindRandomWithoutNode(t, c)
+	})
+
+	t.Run("bind empty slot", func(t *testing.T) {
+		c := &Cluster{StartupNodes: []string{ports[0]}}
+		defer c.Close()
+		testWithReplicaBindEmptySlot(t, c)
+	})
+
+	t.Run("with refresh", func(t *testing.T) {
+		c := &Cluster{StartupNodes: []string{ports[0]}}
+		defer c.Close()
+		testWithReplicaClusterRefresh(t, c, ports)
+
+		// at this point the cluster has refreshed its mapping
+		testReadWriteFromReplica(t, c, ports[redistest.NumClusterNodes:])
+
+		testReadOnlyWithRandomConn(t, c, ports[redistest.NumClusterNodes:])
+
+		testRetryReadOnlyConn(t, c, ports[:redistest.NumClusterNodes], ports[redistest.NumClusterNodes:])
+	})
 }
 
 func testRetryReadOnlyConn(t *testing.T, c *Cluster, masters []string, replicas []string) {
@@ -73,7 +78,7 @@ func assertBoundTo(t *testing.T, conn *Conn, ports []string) string {
 
 	found := false
 	for _, port := range ports {
-		if strings.HasSuffix(addr, ":"+port) {
+		if strings.HasSuffix(addr, port) {
 			found = true
 			break
 		}
@@ -102,7 +107,7 @@ func testReadWriteFromReplica(t *testing.T, c *Cluster, replicas []string) {
 
 	conn2 := c.Get().(*Conn)
 	defer conn2.Close()
-	ReadOnlyConn(conn2)
+	_ = ReadOnlyConn(conn2)
 
 	// can read the key from the replica (may take a moment to replicate,
 	// so retry a few times)
@@ -148,19 +153,13 @@ func testWithReplicaBindEmptySlot(t *testing.T, c *Cluster) {
 		assert.Contains(t, err.Error(), "MOVED", "MOVED error")
 	}
 
-	// wait for refreshing to become false again
-	c.mu.Lock()
-	for c.refreshing {
-		c.mu.Unlock()
-		time.Sleep(100 * time.Millisecond)
-		c.mu.Lock()
-	}
-	for i, v := range c.mapping {
-		if !assert.NotEmpty(t, v, "Addr for %d", i) {
-			break
+	waitForClusterRefresh(c, func() {
+		for i, v := range c.mapping {
+			if !assert.NotEmpty(t, v, "Addr for %d", i) {
+				break
+			}
 		}
-	}
-	c.mu.Unlock()
+	})
 }
 
 func testWithReplicaBindRandomWithoutNode(t *testing.T, c *Cluster) {
@@ -185,8 +184,8 @@ func testWithReplicaClusterRefresh(t *testing.T, c *Cluster, ports []string) {
 				}
 				if assert.NotEmpty(t, node[0]) {
 					split0, split1 := strings.Index(node[0], ":"), strings.Index(node[1], ":")
-					assert.Contains(t, ports, node[0][split0+1:], "expected address")
-					assert.Contains(t, ports, node[1][split1+1:], "expected address")
+					assert.Contains(t, ports, node[0][split0:], "expected address")
+					assert.Contains(t, ports, node[1][split1:], "expected address")
 				}
 			} else {
 				break
@@ -195,13 +194,11 @@ func testWithReplicaClusterRefresh(t *testing.T, c *Cluster, ports []string) {
 	}
 }
 
-func TestConnReadOnly(t *testing.T) {
-	fn, ports := redistest.StartCluster(t, nil)
-	defer fn()
-
+func testConnReadOnlyNoReplica(t *testing.T, ports []string) {
 	c := &Cluster{
-		StartupNodes: []string{":" + ports[0]},
+		StartupNodes: []string{ports[0]},
 	}
+	defer c.Close()
 	require.NoError(t, c.Refresh(), "Refresh")
 
 	conn := c.Get()
@@ -224,17 +221,12 @@ func TestConnReadOnly(t *testing.T) {
 	assert.Error(t, cc2.ReadOnly(), "ReadOnly after Bind")
 }
 
-func TestConnBind(t *testing.T) {
-	fn, ports := redistest.StartCluster(t, nil)
-	defer fn()
-
-	for i, p := range ports {
-		ports[i] = ":" + p
-	}
+func testConnBind(t *testing.T, ports []string) {
 	c := &Cluster{
 		StartupNodes: ports,
 		DialOptions:  []redis.DialOption{redis.DialConnectTimeout(2 * time.Second)},
 	}
+	defer c.Close()
 	require.NoError(t, c.Refresh(), "Refresh")
 
 	conn := c.Get()
@@ -254,17 +246,12 @@ func TestConnBind(t *testing.T) {
 	assert.NoError(t, BindConn(conn2), "Bind without key")
 }
 
-func TestConnBlankDo(t *testing.T) {
-	fn, ports := redistest.StartCluster(t, nil)
-	defer fn()
-
-	for i, p := range ports {
-		ports[i] = ":" + p
-	}
+func testConnBlankDo(t *testing.T, ports []string) {
 	c := &Cluster{
 		StartupNodes: ports,
 		DialOptions:  []redis.DialOption{redis.DialConnectTimeout(2 * time.Second)},
 	}
+	defer c.Close()
 	require.NoError(t, c.Refresh(), "Refresh")
 
 	conn := c.Get()
@@ -280,17 +267,15 @@ func TestConnBlankDo(t *testing.T) {
 	assert.NotNil(t, cconn.rc)
 }
 
-func TestConnWithTimeout(t *testing.T) {
-	fn, ports := redistest.StartCluster(t, nil)
-	defer fn()
-
+func testConnWithTimeout(t *testing.T, ports []string) {
 	c := &Cluster{
-		StartupNodes: []string{":" + ports[0]},
+		StartupNodes: []string{ports[0]},
 		DialOptions: []redis.DialOption{
 			redis.DialConnectTimeout(2 * time.Second),
 			redis.DialReadTimeout(time.Second),
 		},
 	}
+	defer c.Close()
 	require.NoError(t, c.Refresh(), "Refresh")
 
 	testConnDoWithTimeout(t, c)
@@ -350,6 +335,8 @@ func TestConnClose(t *testing.T) {
 	c := &Cluster{
 		StartupNodes: []string{":6379"},
 	}
+	defer c.Close()
+
 	conn := c.Get()
 	require.NoError(t, conn.Close(), "Close")
 
diff --git a/doc.go b/doc.go
index e9075b0..66cf998 100644
--- a/doc.go
+++ b/doc.go
@@ -57,7 +57,9 @@
 // Along with some additional methods specific to a cluster:
 //
 //     Dial() (redis.Conn, error)
+//     EachNode(bool, func(string, redis.Conn) error) error
 //     Refresh() error
+//     Stats() map[string]redis.PoolStats
 //
 // If the CreatePool function field is set, then a
 // redis.Pool is created to manage connections to each of the
@@ -78,6 +80,14 @@
 // It is automatically kept up-to-date based on the redis MOVED
 // responses afterwards.
 //
+// The EachNode method visits each node in the cluster and calls
+// the provided function with a connection to that node, which may
+// be useful to run diagnostics commands on each node or to collect
+// keys across the whole cluster.
+//
+// The Stats method returns the pool statistics for each node, with
+// the node's address as key of the map.
+//
 // A cluster must be closed once it is no longer used to release
 // its resources.
 //
diff --git a/go.mod b/go.mod
new file mode 100644
index 0000000..9f4d5cd
--- /dev/null
+++ b/go.mod
@@ -0,0 +1,8 @@
+module github.com/mna/redisc
+
+go 1.16
+
+require (
+	github.com/gomodule/redigo v1.8.5
+	github.com/stretchr/testify v1.7.0
+)
diff --git a/go.sum b/go.sum
new file mode 100644
index 0000000..8fbcce4
--- /dev/null
+++ b/go.sum
@@ -0,0 +1,17 @@
+github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/gomodule/redigo v1.8.4 h1:Z5JUg94HMTR1XpwBaSH4vq3+PNSIykBLxMdglbw10gg=
+github.com/gomodule/redigo v1.8.4/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0=
+github.com/gomodule/redigo v1.8.5 h1:nRAxCa+SVsyjSBrtZmG/cqb6VbTmuRzpg/PoTFlpumc=
+github.com/gomodule/redigo v1.8.5/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
+github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/hash.go b/hash.go
index d6baf76..2177060 100644
--- a/hash.go
+++ b/hash.go
@@ -13,7 +13,7 @@ func Slot(key string) int {
 			key = key[start+1 : end]
 		}
 	}
-	return int(crc16(key) % hashSlots)
+	return int(crc16(key) % HashSlots)
 }
 
 // SplitBySlot takes a list of keys and returns a list of list of keys,
diff --git a/misc/git/pre-commit b/misc/git/pre-commit
deleted file mode 100755
index 8c51c09..0000000
--- a/misc/git/pre-commit
+++ /dev/null
@@ -1,32 +0,0 @@
-#!/bin/sh
-
-echo ">>> golint"
-for dir in $(go list ./... | grep -v /vendor/)
-do
-    golint $dir
-done
-echo "<<< golint"
-echo
-
-echo ">>> go vet"
-go vet $(go list ./... | grep -v /vendor/)
-echo "<<< go vet"
-echo
-
-# Check for gofmt problems and report if any.
-gofiles=$(git diff --cached --name-only --diff-filter=ACM | grep '.go$' | grep -v /vendor/)
-[ -z "$gofiles" ] && echo "EXIT $vetres" && exit $vetres
-
-if [ -n "$gofiles" ]; then
-    unformatted=$(gofmt -l $gofiles)
-
-    if [ -n "$unformatted" ]; then
-        # Some files are not gofmt'd.
-        echo >&2 "Go files must be formatted with gofmt. Please run:"
-        for fn in $unformatted; do
-            echo >&2 "  gofmt -w $PWD/$fn"
-        done
-    fi
-fi
-echo
-
diff --git a/misc/lint_tester.goo b/misc/lint_tester.goo
deleted file mode 100644
index 92995fd..0000000
--- a/misc/lint_tester.goo
+++ /dev/null
@@ -1,9 +0,0 @@
-package misc
-
-import "fmt"
-
-// golint error
-func Exported(s string) {
-	fmt.Printf("%d", s) // go vet error
-return // gofmt error
-}
diff --git a/redistest/mock_server.go b/redistest/mock_server.go
index 141f646..203a021 100644
--- a/redistest/mock_server.go
+++ b/redistest/mock_server.go
@@ -27,7 +27,7 @@ type MockServer struct {
 // encoded in the redis protocol and sent to the client. The caller should close
 // the server after use.
 func StartMockServer(t *testing.T, handler func(cmd string, args ...string) interface{}) *MockServer {
-	l, err := net.Listen("tcp", ":0")
+	l, err := net.Listen("tcp", "127.0.0.1:0")
 	require.NoError(t, err, "net.Listen")
 
 	_, port, _ := net.SplitHostPort(l.Addr().String())
diff --git a/redistest/resp/encode_test.go b/redistest/resp/encode_test.go
index 63fce9b..d2930ca 100644
--- a/redistest/resp/encode_test.go
+++ b/redistest/resp/encode_test.go
@@ -46,7 +46,7 @@ func TestEncode(t *testing.T) {
 			t.Errorf("%d: got error %s", i, err)
 			continue
 		}
-		if bytes.Compare(buf.Bytes(), c.enc) != 0 {
+		if !bytes.Equal(buf.Bytes(), c.enc) {
 			t.Errorf("%d: expected %x (%q), got %x (%q)", i, c.enc, string(c.enc), buf.Bytes(), buf.String())
 		}
 	}
diff --git a/redistest/server.go b/redistest/server.go
index d3f9b6e..58232ed 100644
--- a/redistest/server.go
+++ b/redistest/server.go
@@ -7,9 +7,7 @@ import (
 	"fmt"
 	"io"
 	"net"
-	"os"
 	"os/exec"
-	"path/filepath"
 	"strconv"
 	"strings"
 	"testing"
@@ -43,7 +41,7 @@ const NumClusterNodes = 3
 // If w is not nil, both stdout and stderr of the server are
 // written to it. If a configuration is specified, it is supplied
 // to the server via stdin.
-func StartServer(t *testing.T, w io.Writer, conf string) (*exec.Cmd, string) {
+func StartServer(t testing.TB, w io.Writer, conf string) (*exec.Cmd, string) {
 	if _, err := exec.LookPath("redis-server"); err != nil {
 		t.Skip("redis-server not found in $PATH")
 	}
@@ -56,12 +54,12 @@ func StartServer(t *testing.T, w io.Writer, conf string) (*exec.Cmd, string) {
 // 1 replica each. It returns the cleanup function to call after use
 // (typically in a defer) and the list of ports for each node,
 // masters first, then replicas.
-func StartClusterWithReplicas(t *testing.T, w io.Writer) (func(), []string) {
+func StartClusterWithReplicas(t testing.TB, w io.Writer) (func(), []string) {
 	fn, ports := StartCluster(t, w)
 	mapping := getClusterNodeIDs(t, ports...)
 
-	var replicaPorts []string
-	var replicaCmds []*exec.Cmd
+	replicaPorts := make([]string, 0, len(ports))
+	replicaCmds := make([]*exec.Cmd, 0, len(ports))
 	replicaMaster := make(map[string]string)
 	for _, master := range ports {
 		port := getClusterFreePort(t)
@@ -83,13 +81,7 @@ func StartClusterWithReplicas(t *testing.T, w io.Writer) (func(), []string) {
 
 	return func() {
 		for _, c := range replicaCmds {
-			c.Process.Kill()
-		}
-		for _, port := range replicaPorts {
-			if strings.HasPrefix(port, ":") {
-				port = port[1:]
-			}
-			os.Remove(filepath.Join(os.TempDir(), fmt.Sprintf("nodes.%s.conf", port)))
+			_ = c.Process.Kill()
 		}
 		fn()
 	}, append(ports, replicaPorts...)
@@ -102,7 +94,7 @@ func StartClusterWithReplicas(t *testing.T, w io.Writer) (func(), []string) {
 // It returns a function that should be called after the test
 // (typically in a defer), and the list of ports for all nodes
 // in the cluster.
-func StartCluster(t *testing.T, w io.Writer) (func(), []string) {
+func StartCluster(t testing.TB, w io.Writer) (func(), []string) {
 	if _, err := exec.LookPath("redis-server"); err != nil {
 		t.Skip("redis-server not found in $PATH")
 	}
@@ -139,18 +131,13 @@ func StartCluster(t *testing.T, w io.Writer) (func(), []string) {
 
 	return func() {
 		for _, c := range cmds {
-			c.Process.Kill()
-		}
-		for _, port := range ports {
-			if strings.HasPrefix(port, ":") {
-				port = port[1:]
-			}
-			os.Remove(filepath.Join(os.TempDir(), fmt.Sprintf("nodes.%s.conf", port)))
+			_ = c.Process.Kill()
 		}
 	}, ports
 }
 
-func printClusterNodes(t *testing.T, port string) {
+//nolint:deadcode,unused
+func printClusterNodes(t testing.TB, port string) {
 	conn, err := redis.Dial("tcp", ":"+port)
 	require.NoError(t, err, "Dial to cluster node")
 	defer conn.Close()
@@ -160,7 +147,8 @@ func printClusterNodes(t *testing.T, port string) {
 	fmt.Println(string(res.([]byte)))
 }
 
-func printClusterSlots(t *testing.T, port string) {
+//nolint:deadcode,unused
+func printClusterSlots(t testing.TB, port string) {
 	conn, err := redis.Dial("tcp", ":"+port)
 	require.NoError(t, err, "Dial to cluster node")
 	defer conn.Close()
@@ -171,7 +159,7 @@ func printClusterSlots(t *testing.T, port string) {
 	fmt.Println(string(b))
 }
 
-func joinCluster(t *testing.T, nodePort, clusterPort string) {
+func joinCluster(t testing.TB, nodePort, clusterPort string) {
 	conn, err := redis.Dial("tcp", ":"+nodePort)
 	require.NoError(t, err, "Dial to node")
 	defer conn.Close()
@@ -181,7 +169,7 @@ func joinCluster(t *testing.T, nodePort, clusterPort string) {
 	require.NoError(t, err, "CLUSTER MEET")
 }
 
-func getClusterNodeIDs(t *testing.T, ports ...string) map[string]string {
+func getClusterNodeIDs(t testing.TB, ports ...string) map[string]string {
 	if len(ports) == 0 {
 		return nil
 	}
@@ -212,7 +200,7 @@ func getClusterNodeIDs(t *testing.T, ports ...string) map[string]string {
 	return mapping
 }
 
-func setupReplica(t *testing.T, replicaPort, masterID string) {
+func setupReplica(t testing.TB, replicaPort, masterID string) {
 	conn, err := redis.Dial("tcp", ":"+replicaPort)
 	require.NoError(t, err, "Dial to replica node")
 	defer conn.Close()
@@ -221,7 +209,7 @@ func setupReplica(t *testing.T, replicaPort, masterID string) {
 	require.NoError(t, err, "CLUSTER REPLICATE")
 }
 
-func setupClusterNode(t *testing.T, port string, start, count int) {
+func setupClusterNode(t testing.TB, port string, start, count int) {
 	conn, err := redis.Dial("tcp", ":"+port)
 	require.NoError(t, err, "Dial to cluster node")
 	defer conn.Close()
@@ -235,7 +223,7 @@ func setupClusterNode(t *testing.T, port string, start, count int) {
 	require.NoError(t, err, "CLUSTER ADDSLOTS")
 }
 
-func waitForReplicas(t *testing.T, timeout time.Duration, ports ...string) bool {
+func waitForReplicas(t testing.TB, timeout time.Duration, ports ...string) bool {
 	deadline := time.Now().Add(timeout)
 
 	for _, port := range ports {
@@ -273,7 +261,7 @@ func waitForReplicas(t *testing.T, timeout time.Duration, ports ...string) bool
 	return true
 }
 
-func waitForCluster(t *testing.T, timeout time.Duration, ports ...string) bool {
+func waitForCluster(t testing.TB, timeout time.Duration, ports ...string) bool {
 	deadline := time.Now().Add(timeout)
 
 	for _, port := range ports {
@@ -297,7 +285,7 @@ func waitForCluster(t *testing.T, timeout time.Duration, ports ...string) bool {
 	return true
 }
 
-func startServerWithConfig(t *testing.T, port string, w io.Writer, conf string) *exec.Cmd {
+func startServerWithConfig(t testing.TB, port string, w io.Writer, conf string) *exec.Cmd {
 	var args []string
 	if conf == "" {
 		args = []string{"--port", port}
@@ -305,7 +293,7 @@ func startServerWithConfig(t *testing.T, port string, w io.Writer, conf string)
 		args = []string{"-"}
 	}
 	c := exec.Command("redis-server", args...)
-	c.Dir = os.TempDir()
+	c.Dir = t.TempDir()
 
 	if w != nil {
 		c.Stderr = w
@@ -339,7 +327,7 @@ func waitForPort(port string, timeout time.Duration) bool {
 	return false
 }
 
-func getClusterFreePort(t *testing.T) string {
+func getClusterFreePort(t testing.TB) string {
 	const maxPort = 55535
 
 	// the port number in a redis-cluster must be below 55535 because
@@ -353,8 +341,8 @@ func getClusterFreePort(t *testing.T) string {
 	return port
 }
 
-func getFreePort(t *testing.T) string {
-	l, err := net.Listen("tcp", ":0")
+func getFreePort(t testing.TB) string {
+	l, err := net.Listen("tcp", "127.0.0.1:0")
 	require.NoError(t, err, "listen on port 0")
 	defer l.Close()
 	_, p, err := net.SplitHostPort(l.Addr().String())
@@ -364,7 +352,7 @@ func getFreePort(t *testing.T) string {
 
 // NewPool creates a redis pool to return connections on the specified
 // addr.
-func NewPool(t *testing.T, addr string) *redis.Pool {
+func NewPool(_ testing.TB, addr string) *redis.Pool {
 	return &redis.Pool{
 		MaxIdle:     2,
 		MaxActive:   10,
diff --git a/retry_conn.go b/retry_conn.go
index b27bebd..9dd883a 100644
--- a/retry_conn.go
+++ b/retry_conn.go
@@ -7,15 +7,17 @@ import (
 	"github.com/gomodule/redigo/redis"
 )
 
-// RetryConn wraps the connection c (which must be a *Conn)
-// into a connection that automatically handles cluster redirections
-// (MOVED and ASK replies) and retries for TRYAGAIN errors.
-// Only Do, Close and Err can be called on that connection,
-// all other methods return an error.
+// RetryConn wraps the connection c (which must be a *redisc.Conn) into a
+// connection that automatically handles cluster redirections (MOVED and ASK
+// replies) and retries for TRYAGAIN errors.  Only Do, Close and Err can be
+// called on that connection, all other methods return an error.
 //
-// The maxAtt parameter indicates the maximum number of attempts
-// to successfully execute the command. The tryAgainDelay is the
-// duration to wait before retrying a TRYAGAIN error.
+// The maxAtt parameter indicates the maximum number of attempts to
+// successfully execute the command. The tryAgainDelay is the duration to wait
+// before retrying a TRYAGAIN error.
+//
+// The only case where it returns a non-nil error is if c is not a
+// *redisc.Conn.
 func RetryConn(c redis.Conn, maxAtt int, tryAgainDelay time.Duration) (redis.Conn, error) {
 	cc, ok := c.(*Conn)
 	if !ok {
@@ -27,8 +29,8 @@ func RetryConn(c redis.Conn, maxAtt int, tryAgainDelay time.Duration) (redis.Con
 type retryConn struct {
 	c *Conn
 
-	maxAttempts   int
-	tryAgainDelay time.Duration
+	maxAttempts   int           // immutable
+	tryAgainDelay time.Duration // immutable
 }
 
 func (rc *retryConn) Do(cmd string, args ...interface{}) (interface{}, error) {
@@ -68,11 +70,11 @@ func (rc *retryConn) do(cmd string, args ...interface{}) (interface{}, error) {
 		connAddr := rc.c.boundAddr
 		rc.c.mu.Unlock()
 		if readOnly {
-			// check if the connection was already made to that slot, meaning
-			// that the redirection is because the command can't be served
-			// by the replica and a non-readonly connection must be made to
-			// the slot's master. If that's not the case, then keep the
-			// readonly flag to true, meaning that it will attempt a connection
+			// check if the connection was already made to that slot, meaning that
+			// the redirection is because the command can't be served by the replica
+			// and a non-readonly connection must be made to the slot's master. If
+			// that's not the case, then keep the readonly flag to true, meaning that
+			// it will attempt a connection
 			// to a replica for the new slot.
 			cluster.mu.Lock()
 			slotMappings := cluster.mapping[re.NewSlot]
@@ -87,21 +89,19 @@ func (rc *retryConn) do(cmd string, args ...interface{}) (interface{}, error) {
 		asking = re.Type == "ASK"
 
 		if asking {
-			// if redirecting due to ASK, use the address that was
-			// provided in the ASK error reply.
+			// if redirecting due to ASK, use the address that was provided in the
+			// ASK error reply.
 			conn, err = cluster.getConnForAddr(addr, rc.c.forceDial)
 			if err != nil {
 				return nil, err
 			}
-			// TODO(mna): does redis cluster send ASK replies that
-			// redirect to replicas if the source node was a replica?
-			// Assume no for now.
+			// TODO(mna): does redis cluster send ASK replies that redirect to
+			// replicas if the source node was a replica?  Assume no for now.
 			readOnly = false
 		} else {
-			// if redirecting due to a MOVED, the slot mapping is already
-			// updated to reflect the new server for that slot (done in
-			// rc.c.Do), so getConnForSlot will return a connection to
-			// the correct address.
+			// if redirecting due to a MOVED, the slot mapping is already updated to
+			// reflect the new server for that slot (done in rc.c.Do), so
+			// getConnForSlot will return a connection to the correct address.
 			conn, addr, err = cluster.getConnForSlot(re.NewSlot, rc.c.forceDial, readOnly)
 			if err != nil {
 				// could not get connection to that node, return that error
@@ -109,14 +109,19 @@ func (rc *retryConn) do(cmd string, args ...interface{}) (interface{}, error) {
 			}
 		}
 
+		var cerr error
 		rc.c.mu.Lock()
 		// close and replace the old connection (close must come before assignments)
-		rc.c.closeLocked()
+		cerr = rc.c.closeLocked()
 		rc.c.rc = conn
 		rc.c.boundAddr = addr
 		rc.c.readOnly = readOnly
 		rc.c.mu.Unlock()
 
+		if cerr != nil && cluster.BgError != nil {
+			go cluster.BgError(RetryCloseConn, cerr)
+		}
+
 		att++
 	}
 	return nil, errors.New("redisc: too many attempts")
@@ -130,7 +135,7 @@ func (rc *retryConn) Close() error {
 	return rc.c.Close()
 }
 
-func (rc *retryConn) Send(cmd string, args ...interface{}) error {
+func (rc *retryConn) Send(_ string, _ ...interface{}) error {
 	return errors.New("redisc: unsupported call to Send")
 }
 
diff --git a/retry_conn_test.go b/retry_conn_test.go
index 7d556f1..6e81692 100644
--- a/retry_conn_test.go
+++ b/retry_conn_test.go
@@ -3,6 +3,7 @@ package redisc
 import (
 	"net"
 	"strconv"
+	"strings"
 	"sync/atomic"
 	"testing"
 	"time"
@@ -28,7 +29,7 @@ func TestRetryConnAsk(t *testing.T) {
 			nPort, _ := strconv.Atoi(port)
 			// reply that all slots are served by this server
 			return resp.Array{
-				0: resp.Array{0: int64(0), 1: int64(hashSlots - 1), 2: resp.Array{0: addr, 1: int64(nPort)}},
+				0: resp.Array{0: int64(0), 1: int64(HashSlots - 1), 2: resp.Array{0: addr, 1: int64(nPort)}},
 			}
 
 		case "GET":
@@ -90,7 +91,7 @@ func TestRetryConnAskDistinctServers(t *testing.T) {
 			nPort, _ := strconv.Atoi(port)
 			// reply that all slots are served by this server
 			return resp.Array{
-				0: resp.Array{0: int64(0), 1: int64(hashSlots - 1), 2: resp.Array{0: addr, 1: int64(nPort)}},
+				0: resp.Array{0: int64(0), 1: int64(HashSlots - 1), 2: resp.Array{0: addr, 1: int64(nPort)}},
 			}
 		case "GET":
 			// reply with ASK redirection
@@ -180,6 +181,8 @@ func TestRetryConnErrs(t *testing.T) {
 	c := &Cluster{
 		StartupNodes: []string{":6379"},
 	}
+	defer c.Close()
+
 	conn := c.Get()
 	require.NoError(t, conn.Close(), "Close")
 
@@ -198,17 +201,13 @@ func TestRetryConnErrs(t *testing.T) {
 	assert.Error(t, err, "RetryConn with a non-*Conn")
 }
 
-func TestRetryConnTooManyAttempts(t *testing.T) {
-	fn, ports := redistest.StartCluster(t, nil)
-	defer fn()
-
-	for i, p := range ports {
-		ports[i] = ":" + p
-	}
+func testRetryConnTooManyAttempts(t *testing.T, ports []string) {
 	c := &Cluster{
 		StartupNodes: ports,
 		DialOptions:  []redis.DialOption{redis.DialConnectTimeout(2 * time.Second)},
 	}
+	defer c.Close()
+
 	require.NoError(t, c.Refresh(), "Refresh")
 
 	// create a connection and bind to key "a"
@@ -226,17 +225,13 @@ func TestRetryConnTooManyAttempts(t *testing.T) {
 	}
 }
 
-func TestRetryConnMoved(t *testing.T) {
-	fn, ports := redistest.StartCluster(t, nil)
-	defer fn()
-
-	for i, p := range ports {
-		ports[i] = ":" + p
-	}
+func testRetryConnMoved(t *testing.T, ports []string) {
 	c := &Cluster{
 		StartupNodes: ports,
 		DialOptions:  []redis.DialOption{redis.DialConnectTimeout(2 * time.Second)},
 	}
+	defer c.Close()
+
 	require.NoError(t, c.Refresh(), "Refresh")
 
 	// create a connection and bind to key "a"
@@ -286,3 +281,83 @@ func TestRetryConnMoved(t *testing.T) {
 		assert.Equal(t, "x", v, "GET value")
 	}
 }
+
+func testRetryConnTriggerRefreshes(t *testing.T, ports []string) {
+	var count int64
+	done := make(chan bool, 1)
+	c := &Cluster{
+		StartupNodes: []string{ports[0]},
+		DialOptions:  []redis.DialOption{redis.DialConnectTimeout(2 * time.Second)},
+		CreatePool:   createPool,
+		LayoutRefresh: func(old, new [HashSlots][]string) {
+			atomic.AddInt64(&count, 1)
+			select {
+			case done <- true:
+			default:
+			}
+		},
+	}
+	defer c.Close()
+
+	conn := c.Get()
+	conn, _ = RetryConn(conn, 3, 100*time.Millisecond)
+	defer conn.Close()
+
+	// set keys from different slots served by different servers
+	// (a=15495, b=3300, abc=7638).
+	_, err := conn.Do("SET", "a", 1)
+	assert.NoError(t, err, "SET a")
+	_, err = conn.Do("SET", "b", 2)
+	assert.NoError(t, err, "SET b")
+	_, err = conn.Do("SET", "abc", 3)
+	assert.NoError(t, err, "SET abc")
+	_, err = conn.Do("INCR", "a")
+	assert.NoError(t, err, "INCR a")
+	_, err = conn.Do("INCR", "b")
+	assert.NoError(t, err, "INCR b")
+	_, err = conn.Do("INCR", "abc")
+	assert.NoError(t, err, "INCR abc")
+
+	v, err := redis.Int(conn.Do("GET", "a"))
+	if assert.NoError(t, err, "GET a") {
+		assert.Equal(t, 2, v)
+	}
+	v, err = redis.Int(conn.Do("GET", "b"))
+	if assert.NoError(t, err, "GET b") {
+		assert.Equal(t, 3, v)
+	}
+	v, err = redis.Int(conn.Do("GET", "abc"))
+	if assert.NoError(t, err, "GET abc") {
+		assert.Equal(t, 4, v)
+	}
+	// return the conn to the pool
+	assert.NoError(t, conn.Close(), "Close conn")
+
+	waitForClusterRefresh(c, nil)
+	<-done
+	// only the first command triggered a refresh, the rest were all known
+	assert.Equal(t, 1, int(atomic.LoadInt64(&count)))
+
+	stats := c.Stats()
+	assert.Len(t, stats, 3)
+	var inuse, idle int
+	for _, st := range stats {
+		inuse += st.ActiveCount - st.IdleCount
+		idle += st.IdleCount
+	}
+	assert.Equal(t, 0, inuse, "connections in use")
+	assert.Equal(t, len(ports), idle, "idle connections in pools")
+
+	// verify the connections count from the servers
+	var clients int
+	err = c.EachNode(false, func(addr string, conn redis.Conn) error {
+		s, err := redis.String(conn.Do("CLIENT", "LIST", "TYPE", "normal"))
+		if err != nil {
+			return err
+		}
+		clients += strings.Count(s, "\n")
+		return nil
+	})
+	require.NoError(t, err)
+	assert.Equal(t, idle, clients, "server-reported clients count")
+}