New Upstream Release - golang-github-coreos-bbolt

Ready changes

Summary

Merged new upstream version: 1.3.7 (was: 1.3.6).

Diff

diff --git a/.github/dependabot.yml b/.github/dependabot.yml
new file mode 100644
index 0000000..aafb8a2
--- /dev/null
+++ b/.github/dependabot.yml
@@ -0,0 +1,11 @@
+version: 2
+updates:
+  - package-ecosystem: github-actions
+    directory: /
+    schedule:
+      interval: weekly
+
+  - package-ecosystem: gomod
+    directory: /
+    schedule:
+      interval: weekly
diff --git a/.github/workflows/failpoint_test.yaml b/.github/workflows/failpoint_test.yaml
new file mode 100644
index 0000000..37f3681
--- /dev/null
+++ b/.github/workflows/failpoint_test.yaml
@@ -0,0 +1,18 @@
+name: Failpoint test
+on: [push, pull_request]
+permissions: read-all
+jobs:
+  test:
+    strategy:
+      matrix:
+        os: [ubuntu-latest]
+    runs-on: ${{ matrix.os }}
+    steps:
+      - uses: actions/checkout@v3
+      - uses: actions/setup-go@v3
+        with:
+          go-version: "1.17.13"
+      - run: |
+          make gofail-enable
+          make test-failpoint
+
diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml
new file mode 100644
index 0000000..30eed1c
--- /dev/null
+++ b/.github/workflows/tests.yaml
@@ -0,0 +1,99 @@
+name: Tests
+on: [push, pull_request]
+jobs:
+  test-linux:
+    strategy:
+      fail-fast: false
+      matrix:
+        target:
+        - linux-amd64-unit-test-1-cpu
+        - linux-amd64-unit-test-2-cpu
+        - linux-amd64-unit-test-4-cpu
+        - linux-amd64-unit-test-4-cpu-race
+    runs-on: ubuntu-latest
+    steps:
+    - uses: actions/checkout@v3
+    - uses: actions/setup-go@v3
+      with:
+        go-version: "1.17.13"
+    - run: make fmt
+    - env:
+        TARGET: ${{ matrix.target }}
+      run: |
+        case "${TARGET}" in
+          linux-amd64-unit-test-1-cpu)
+            CPU=1 make test
+            ;;
+          linux-amd64-unit-test-2-cpu)
+            CPU=2 make test
+            ;;
+          linux-amd64-unit-test-4-cpu)
+            CPU=4 make test
+            ;;
+          linux-amd64-unit-test-4-cpu-race)
+            # XXX: By default, the Github Action runner will terminate the process
+            # if it has high resource usage. Try to use GOGC to limit memory and
+            # cpu usage here to prevent unexpected terminating. It can be replaced
+            # with GOMEMLIMIT=2048MiB if the go-version is updated to >=1.19.x.
+            #
+            # REF: https://github.com/actions/runner-images/issues/6680#issuecomment-1335778010
+            GOGC=30 CPU=4 ENABLE_RACE=true make test
+            ;;
+          *)
+            echo "Failed to find target"
+            exit 1
+            ;;
+        esac
+    - name: golangci-lint
+      uses: golangci/golangci-lint-action@08e2f20817b15149a52b5b3ebe7de50aff2ba8c5 # v3.4.0
+
+  test-windows:
+    strategy:
+      fail-fast: false
+      matrix:
+        target:
+        - windows-amd64-unit-test-4-cpu
+        # FIXME(fuweid):
+        #
+        # The windows will throws the following error when enable race.
+        # We skip it until we have solution.
+        #
+        #   ThreadSanitizer failed to allocate 0x000200000000 (8589934592) bytes at 0x0400c0000000 (error code: 1455)
+        #
+        #- windows-amd64-unit-test-4-cpu-race
+    runs-on: windows-latest
+    steps:
+    - uses: actions/checkout@v3
+    - uses: actions/setup-go@v3
+      with:
+        go-version: "1.17.13"
+    - run: make fmt
+    - env:
+        TARGET: ${{ matrix.target }}
+      run: |
+        case "${TARGET}" in
+          windows-amd64-unit-test-4-cpu)
+            CPU=4 make test
+            ;;
+          *)
+            echo "Failed to find target"
+            exit 1
+            ;;
+        esac
+      shell: bash
+    - name: golangci-lint
+      uses: golangci/golangci-lint-action@08e2f20817b15149a52b5b3ebe7de50aff2ba8c5 # v3.4.0
+
+  coverage:
+    needs: ["test-linux", "test-windows"]
+    strategy:
+      matrix:
+        os: [ubuntu-latest, windows-latest]
+    runs-on: ${{ matrix.os }}
+    steps:
+    - uses: actions/checkout@v3
+    - uses: actions/setup-go@v3
+      with:
+        go-version: "1.17.13"
+    - run: make coverage
+
diff --git a/.gitignore b/.gitignore
index 18312f0..9fa948e 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,5 +3,8 @@
 *.swp
 /bin/
 cover.out
+cover-*.out
 /.idea
 *.iml
+/cmd/bbolt/bbolt
+
diff --git a/.travis.yml b/.travis.yml
deleted file mode 100644
index 452601e..0000000
--- a/.travis.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-language: go
-go_import_path: go.etcd.io/bbolt
-
-sudo: false
-
-go:
-- 1.15
-
-before_install:
-- go get -v golang.org/x/sys/unix
-- go get -v honnef.co/go/tools/...
-- go get -v github.com/kisielk/errcheck
-
-script:
-- make fmt
-- make test
-- make race
-# - make errcheck
diff --git a/Makefile b/Makefile
index 21ecf48..18154c6 100644
--- a/Makefile
+++ b/Makefile
@@ -2,35 +2,62 @@ BRANCH=`git rev-parse --abbrev-ref HEAD`
 COMMIT=`git rev-parse --short HEAD`
 GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)"
 
-race:
-	@TEST_FREELIST_TYPE=hashmap go test -v -race -test.run="TestSimulate_(100op|1000op)"
-	@echo "array freelist test"
-	@TEST_FREELIST_TYPE=array go test -v -race -test.run="TestSimulate_(100op|1000op)"
-
+TESTFLAGS_RACE=-race=false
+ifdef ENABLE_RACE
+	TESTFLAGS_RACE=-race=true
+endif
+
+TESTFLAGS_CPU=
+ifdef CPU
+	TESTFLAGS_CPU=-cpu=$(CPU)
+endif
+TESTFLAGS = $(TESTFLAGS_RACE) $(TESTFLAGS_CPU) $(EXTRA_TESTFLAGS)
+
+.PHONY: fmt
 fmt:
 	!(gofmt -l -s -d $(shell find . -name \*.go) | grep '[a-z]')
 
-# go get honnef.co/go/tools/simple
-gosimple:
-	gosimple ./...
+.PHONY: lint
+lint:
+	golangci-lint run ./...
 
-# go get honnef.co/go/tools/unused
-unused:
-	unused ./...
+.PHONY: test
+test:
+	@echo "hashmap freelist test"
+	TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} -timeout 30m
+	TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} ./cmd/bbolt
 
-# go get github.com/kisielk/errcheck
-errcheck:
-	@errcheck -ignorepkg=bytes -ignore=os:Remove go.etcd.io/bbolt
+	@echo "array freelist test"
+	TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} -timeout 30m
+	TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} ./cmd/bbolt
 
-test:
-	TEST_FREELIST_TYPE=hashmap go test -timeout 20m -v -coverprofile cover.out -covermode atomic
-	# Note: gets "program not an importable package" in out of path builds
-	TEST_FREELIST_TYPE=hashmap go test -v ./cmd/bbolt
+.PHONY: coverage
+coverage:
+	@echo "hashmap freelist test"
+	TEST_FREELIST_TYPE=hashmap go test -v -timeout 30m \
+		-coverprofile cover-freelist-hashmap.out -covermode atomic
 
 	@echo "array freelist test"
+	TEST_FREELIST_TYPE=array go test -v -timeout 30m \
+		-coverprofile cover-freelist-array.out -covermode atomic
+
+.PHONY: gofail-enable
+gofail-enable: install-gofail
+	gofail enable .
+
+.PHONY: gofail-disable
+gofail-disable:
+	gofail disable .
+
+.PHONY: install-gofail
+install-gofail:
+	go install go.etcd.io/gofail
+
+.PHONY: test-failpoint
+test-failpoint:
+	@echo "[failpoint] hashmap freelist test"
+	TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} -timeout 30m ./tests/failpoint
 
-	@TEST_FREELIST_TYPE=array go test -timeout 20m -v -coverprofile cover.out -covermode atomic
-	# Note: gets "program not an importable package" in out of path builds
-	@TEST_FREELIST_TYPE=array go test -v ./cmd/bbolt
+	@echo "[failpoint] array freelist test"
+	TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} -timeout 30m ./tests/failpoint
 
-.PHONY: race fmt errcheck test gosimple unused
diff --git a/README.md b/README.md
index f1b4a7b..2be669a 100644
--- a/README.md
+++ b/README.md
@@ -26,7 +26,7 @@ and setting values. That's it.
 [gh_ben]: https://github.com/benbjohnson
 [bolt]: https://github.com/boltdb/bolt
 [hyc_symas]: https://twitter.com/hyc_symas
-[lmdb]: http://symas.com/mdb/
+[lmdb]: https://www.symas.com/symas-embedded-database-lmdb
 
 ## Project Status
 
@@ -78,14 +78,23 @@ New minor versions may add additional features to the API.
 ### Installing
 
 To start using Bolt, install Go and run `go get`:
-
 ```sh
-$ go get go.etcd.io/bbolt/...
+$ go get go.etcd.io/bbolt@latest
 ```
 
-This will retrieve the library and install the `bolt` command line utility into
-your `$GOBIN` path.
+This will retrieve the library and update your `go.mod` and `go.sum` files.
+
+To run the command line utility, execute:
+```sh
+$ go run go.etcd.io/bbolt/cmd/bbolt@latest
+```
 
+Run `go install` to install the `bbolt` command line utility into
+your `$GOBIN` path, which defaults to `$GOPATH/bin` or `$HOME/go/bin` if the
+`GOPATH` environment variable is not set.
+```sh
+$ go install go.etcd.io/bbolt/cmd/bbolt@latest
+```
 
 ### Importing bbolt
 
@@ -933,7 +942,7 @@ Below is a list of public, open source projects that use Bolt:
 * [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed.
 * [Ironsmith](https://github.com/timshannon/ironsmith) - A simple, script-driven continuous integration (build - > test -> release) tool, with no external dependencies
 * [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs.
-* [Key Value Access Langusge (KVAL)](https://github.com/kval-access-language) - A proposed grammar for key-value datastores offering a bbolt binding.
+* [Key Value Access Language (KVAL)](https://github.com/kval-access-language) - A proposed grammar for key-value datastores offering a bbolt binding.
 * [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage.
 * [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores.
 * [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets.
diff --git a/allocate_test.go b/allocate_test.go
index 98b06b4..94e9116 100644
--- a/allocate_test.go
+++ b/allocate_test.go
@@ -18,14 +18,16 @@ func TestTx_allocatePageStats(t *testing.T) {
 		pages: make(map[pgid]*page),
 	}
 
-	prePageCnt := tx.Stats().PageCount
+	txStats := tx.Stats()
+	prePageCnt := txStats.GetPageCount()
 	allocateCnt := f.free_count()
 
 	if _, err := tx.allocate(allocateCnt); err != nil {
 		t.Fatal(err)
 	}
 
-	if tx.Stats().PageCount != prePageCnt+allocateCnt {
-		t.Errorf("Allocated %d but got %d page in stats", allocateCnt, tx.Stats().PageCount)
+	txStats = tx.Stats()
+	if txStats.GetPageCount() != prePageCnt+int64(allocateCnt) {
+		t.Errorf("Allocated %d but got %d page in stats", allocateCnt, txStats.GetPageCount())
 	}
 }
diff --git a/bolt_arm64.go b/bolt_loong64.go
similarity index 85%
rename from bolt_arm64.go
rename to bolt_loong64.go
index 810dfd5..31c17c1 100644
--- a/bolt_arm64.go
+++ b/bolt_loong64.go
@@ -1,4 +1,5 @@
-// +build arm64
+//go:build loong64
+// +build loong64
 
 package bbolt
 
diff --git a/bolt_mips64x.go b/bolt_mips64x.go
index dd8ffe1..a9385be 100644
--- a/bolt_mips64x.go
+++ b/bolt_mips64x.go
@@ -1,3 +1,4 @@
+//go:build mips64 || mips64le
 // +build mips64 mips64le
 
 package bbolt
diff --git a/bolt_mipsx.go b/bolt_mipsx.go
index a669703..ed734ff 100644
--- a/bolt_mipsx.go
+++ b/bolt_mipsx.go
@@ -1,3 +1,4 @@
+//go:build mips || mipsle
 // +build mips mipsle
 
 package bbolt
diff --git a/bolt_ppc.go b/bolt_ppc.go
index 84e545e..e403f57 100644
--- a/bolt_ppc.go
+++ b/bolt_ppc.go
@@ -1,3 +1,4 @@
+//go:build ppc
 // +build ppc
 
 package bbolt
diff --git a/bolt_ppc64.go b/bolt_ppc64.go
index a761209..fcd8652 100644
--- a/bolt_ppc64.go
+++ b/bolt_ppc64.go
@@ -1,3 +1,4 @@
+//go:build ppc64
 // +build ppc64
 
 package bbolt
diff --git a/bolt_ppc64le.go b/bolt_ppc64le.go
index c830f2f..20234ac 100644
--- a/bolt_ppc64le.go
+++ b/bolt_ppc64le.go
@@ -1,3 +1,4 @@
+//go:build ppc64le
 // +build ppc64le
 
 package bbolt
diff --git a/bolt_riscv64.go b/bolt_riscv64.go
index c967613..060f30c 100644
--- a/bolt_riscv64.go
+++ b/bolt_riscv64.go
@@ -1,3 +1,4 @@
+//go:build riscv64
 // +build riscv64
 
 package bbolt
diff --git a/bolt_s390x.go b/bolt_s390x.go
index ff2a560..92d2755 100644
--- a/bolt_s390x.go
+++ b/bolt_s390x.go
@@ -1,3 +1,4 @@
+//go:build s390x
 // +build s390x
 
 package bbolt
diff --git a/bolt_unix.go b/bolt_unix.go
index 4e5f65c..757ae4d 100644
--- a/bolt_unix.go
+++ b/bolt_unix.go
@@ -1,3 +1,4 @@
+//go:build !windows && !plan9 && !solaris && !aix
 // +build !windows,!plan9,!solaris,!aix
 
 package bbolt
diff --git a/bolt_unix_aix.go b/bolt_unix_aix.go
index a64c16f..6dea429 100644
--- a/bolt_unix_aix.go
+++ b/bolt_unix_aix.go
@@ -1,3 +1,4 @@
+//go:build aix
 // +build aix
 
 package bbolt
diff --git a/bolt_windows.go b/bolt_windows.go
index fca178b..e5dde27 100644
--- a/bolt_windows.go
+++ b/bolt_windows.go
@@ -6,40 +6,10 @@ import (
 	"syscall"
 	"time"
 	"unsafe"
-)
-
-// LockFileEx code derived from golang build filemutex_windows.go @ v1.5.1
-var (
-	modkernel32      = syscall.NewLazyDLL("kernel32.dll")
-	procLockFileEx   = modkernel32.NewProc("LockFileEx")
-	procUnlockFileEx = modkernel32.NewProc("UnlockFileEx")
-)
-
-const (
-	// see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx
-	flagLockExclusive       = 2
-	flagLockFailImmediately = 1
 
-	// see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx
-	errLockViolation syscall.Errno = 0x21
+	"golang.org/x/sys/windows"
 )
 
-func lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
-	r, _, err := procLockFileEx.Call(uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)))
-	if r == 0 {
-		return err
-	}
-	return nil
-}
-
-func unlockFileEx(h syscall.Handle, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
-	r, _, err := procUnlockFileEx.Call(uintptr(h), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)), 0)
-	if r == 0 {
-		return err
-	}
-	return nil
-}
-
 // fdatasync flushes written data to a file descriptor.
 func fdatasync(db *DB) error {
 	return db.file.Sync()
@@ -51,22 +21,22 @@ func flock(db *DB, exclusive bool, timeout time.Duration) error {
 	if timeout != 0 {
 		t = time.Now()
 	}
-	var flag uint32 = flagLockFailImmediately
+	var flags uint32 = windows.LOCKFILE_FAIL_IMMEDIATELY
 	if exclusive {
-		flag |= flagLockExclusive
+		flags |= windows.LOCKFILE_EXCLUSIVE_LOCK
 	}
 	for {
 		// Fix for https://github.com/etcd-io/bbolt/issues/121. Use byte-range
 		// -1..0 as the lock on the database file.
 		var m1 uint32 = (1 << 32) - 1 // -1 in a uint32
-		err := lockFileEx(syscall.Handle(db.file.Fd()), flag, 0, 1, 0, &syscall.Overlapped{
+		err := windows.LockFileEx(windows.Handle(db.file.Fd()), flags, 0, 1, 0, &windows.Overlapped{
 			Offset:     m1,
 			OffsetHigh: m1,
 		})
 
 		if err == nil {
 			return nil
-		} else if err != errLockViolation {
+		} else if err != windows.ERROR_LOCK_VIOLATION {
 			return err
 		}
 
@@ -83,34 +53,37 @@ func flock(db *DB, exclusive bool, timeout time.Duration) error {
 // funlock releases an advisory lock on a file descriptor.
 func funlock(db *DB) error {
 	var m1 uint32 = (1 << 32) - 1 // -1 in a uint32
-	err := unlockFileEx(syscall.Handle(db.file.Fd()), 0, 1, 0, &syscall.Overlapped{
+	return windows.UnlockFileEx(windows.Handle(db.file.Fd()), 0, 1, 0, &windows.Overlapped{
 		Offset:     m1,
 		OffsetHigh: m1,
 	})
-	return err
 }
 
 // mmap memory maps a DB's data file.
 // Based on: https://github.com/edsrzf/mmap-go
 func mmap(db *DB, sz int) error {
+	var sizelo, sizehi uint32
+
 	if !db.readOnly {
 		// Truncate the database to the size of the mmap.
 		if err := db.file.Truncate(int64(sz)); err != nil {
 			return fmt.Errorf("truncate: %s", err)
 		}
+		sizehi = uint32(sz >> 32)
+		sizelo = uint32(sz) & 0xffffffff
 	}
 
 	// Open a file mapping handle.
-	sizelo := uint32(sz >> 32)
-	sizehi := uint32(sz) & 0xffffffff
-	h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READONLY, sizelo, sizehi, nil)
+	h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READONLY, sizehi, sizelo, nil)
 	if h == 0 {
 		return os.NewSyscallError("CreateFileMapping", errno)
 	}
 
 	// Create the memory map.
-	addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, uintptr(sz))
+	addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, 0)
 	if addr == 0 {
+		// Do our best and report error returned from MapViewOfFile.
+		_ = syscall.CloseHandle(h)
 		return os.NewSyscallError("MapViewOfFile", errno)
 	}
 
@@ -134,8 +107,11 @@ func munmap(db *DB) error {
 	}
 
 	addr := (uintptr)(unsafe.Pointer(&db.data[0]))
+	var err1 error
 	if err := syscall.UnmapViewOfFile(addr); err != nil {
-		return os.NewSyscallError("UnmapViewOfFile", err)
+		err1 = os.NewSyscallError("UnmapViewOfFile", err)
 	}
-	return nil
+	db.data = nil
+	db.datasz = 0
+	return err1
 }
diff --git a/boltsync_unix.go b/boltsync_unix.go
index 9587afe..81e09a5 100644
--- a/boltsync_unix.go
+++ b/boltsync_unix.go
@@ -1,3 +1,4 @@
+//go:build !windows && !plan9 && !linux && !openbsd
 // +build !windows,!plan9,!linux,!openbsd
 
 package bbolt
diff --git a/bucket.go b/bucket.go
index d8750b1..054467a 100644
--- a/bucket.go
+++ b/bucket.go
@@ -81,7 +81,7 @@ func (b *Bucket) Writable() bool {
 // Do not use a cursor after the transaction is closed.
 func (b *Bucket) Cursor() *Cursor {
 	// Update transaction statistics.
-	b.tx.stats.CursorCount++
+	b.tx.stats.IncCursorCount(1)
 
 	// Allocate and return a cursor.
 	return &Cursor{
@@ -229,11 +229,9 @@ func (b *Bucket) DeleteBucket(key []byte) error {
 
 	// Recursively delete all child buckets.
 	child := b.Bucket(key)
-	err := child.ForEach(func(k, v []byte) error {
-		if _, _, childFlags := child.Cursor().seek(k); (childFlags & bucketLeafFlag) != 0 {
-			if err := child.DeleteBucket(k); err != nil {
-				return fmt.Errorf("delete bucket: %s", err)
-			}
+	err := child.ForEachBucket(func(k []byte) error {
+		if err := child.DeleteBucket(k); err != nil {
+			return fmt.Errorf("delete bucket: %s", err)
 		}
 		return nil
 	})
@@ -353,7 +351,7 @@ func (b *Bucket) SetSequence(v uint64) error {
 		_ = b.node(b.root, nil)
 	}
 
-	// Increment and return the sequence.
+	// Set the sequence.
 	b.bucket.sequence = v
 	return nil
 }
@@ -378,6 +376,7 @@ func (b *Bucket) NextSequence() (uint64, error) {
 }
 
 // ForEach executes a function for each key/value pair in a bucket.
+// Because ForEach uses a Cursor, the iteration over keys is in lexicographical order.
 // If the provided function returns an error then the iteration is stopped and
 // the error is returned to the caller. The provided function must not modify
 // the bucket; this will result in undefined behavior.
@@ -394,7 +393,22 @@ func (b *Bucket) ForEach(fn func(k, v []byte) error) error {
 	return nil
 }
 
-// Stat returns stats on a bucket.
+func (b *Bucket) ForEachBucket(fn func(k []byte) error) error {
+	if b.tx.db == nil {
+		return ErrTxClosed
+	}
+	c := b.Cursor()
+	for k, _, flags := c.first(); k != nil; k, _, flags = c.next() {
+		if flags&bucketLeafFlag != 0 {
+			if err := fn(k); err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+// Stats returns stats on a bucket.
 func (b *Bucket) Stats() BucketStats {
 	var s, subStats BucketStats
 	pageSize := b.tx.db.pageSize
@@ -402,7 +416,7 @@ func (b *Bucket) Stats() BucketStats {
 	if b.root == 0 {
 		s.InlineBucketN += 1
 	}
-	b.forEachPage(func(p *page, depth int) {
+	b.forEachPage(func(p *page, depth int, pgstack []pgid) {
 		if (p.flags & leafPageFlag) != 0 {
 			s.KeyN += int(p.count)
 
@@ -461,7 +475,7 @@ func (b *Bucket) Stats() BucketStats {
 
 		// Keep track of maximum page depth.
 		if depth+1 > s.Depth {
-			s.Depth = (depth + 1)
+			s.Depth = depth + 1
 		}
 	})
 
@@ -477,15 +491,15 @@ func (b *Bucket) Stats() BucketStats {
 }
 
 // forEachPage iterates over every page in a bucket, including inline pages.
-func (b *Bucket) forEachPage(fn func(*page, int)) {
+func (b *Bucket) forEachPage(fn func(*page, int, []pgid)) {
 	// If we have an inline page then just use that.
 	if b.page != nil {
-		fn(b.page, 0)
+		fn(b.page, 0, []pgid{b.root})
 		return
 	}
 
 	// Otherwise traverse the page hierarchy.
-	b.tx.forEachPage(b.root, 0, fn)
+	b.tx.forEachPage(b.root, fn)
 }
 
 // forEachPageNode iterates over every page (or node) in a bucket.
@@ -499,8 +513,8 @@ func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) {
 	b._forEachPageNode(b.root, 0, fn)
 }
 
-func (b *Bucket) _forEachPageNode(pgid pgid, depth int, fn func(*page, *node, int)) {
-	var p, n = b.pageNode(pgid)
+func (b *Bucket) _forEachPageNode(pgId pgid, depth int, fn func(*page, *node, int)) {
+	var p, n = b.pageNode(pgId)
 
 	// Execute function.
 	fn(p, n, depth)
@@ -640,11 +654,11 @@ func (b *Bucket) rebalance() {
 }
 
 // node creates a node from a page and associates it with a given parent.
-func (b *Bucket) node(pgid pgid, parent *node) *node {
+func (b *Bucket) node(pgId pgid, parent *node) *node {
 	_assert(b.nodes != nil, "nodes map expected")
 
 	// Retrieve node if it's already been created.
-	if n := b.nodes[pgid]; n != nil {
+	if n := b.nodes[pgId]; n != nil {
 		return n
 	}
 
@@ -659,15 +673,15 @@ func (b *Bucket) node(pgid pgid, parent *node) *node {
 	// Use the inline page if this is an inline bucket.
 	var p = b.page
 	if p == nil {
-		p = b.tx.page(pgid)
+		p = b.tx.page(pgId)
 	}
 
 	// Read the page into the node and cache it.
 	n.read(p)
-	b.nodes[pgid] = n
+	b.nodes[pgId] = n
 
 	// Update statistics.
-	b.tx.stats.NodeCount++
+	b.tx.stats.IncNodeCount(1)
 
 	return n
 }
diff --git a/bucket_test.go b/bucket_test.go
index 2ac9263..1370612 100644
--- a/bucket_test.go
+++ b/bucket_test.go
@@ -13,13 +13,16 @@ import (
 	"testing"
 	"testing/quick"
 
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+
 	bolt "go.etcd.io/bbolt"
+	"go.etcd.io/bbolt/internal/btesting"
 )
 
 // Ensure that a bucket that gets a non-existent key returns nil.
 func TestBucket_Get_NonExistent(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	if err := db.Update(func(tx *bolt.Tx) error {
 		b, err := tx.CreateBucket([]byte("widgets"))
@@ -37,8 +40,7 @@ func TestBucket_Get_NonExistent(t *testing.T) {
 
 // Ensure that a bucket can read a value that is not flushed yet.
 func TestBucket_Get_FromNode(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	if err := db.Update(func(tx *bolt.Tx) error {
 		b, err := tx.CreateBucket([]byte("widgets"))
@@ -59,8 +61,7 @@ func TestBucket_Get_FromNode(t *testing.T) {
 
 // Ensure that a bucket retrieved via Get() returns a nil.
 func TestBucket_Get_IncompatibleValue(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 	if err := db.Update(func(tx *bolt.Tx) error {
 		_, err := tx.CreateBucket([]byte("widgets"))
 		if err != nil {
@@ -85,8 +86,7 @@ func TestBucket_Get_IncompatibleValue(t *testing.T) {
 //
 // https://github.com/boltdb/bolt/issues/544
 func TestBucket_Get_Capacity(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	// Write key to a bucket.
 	if err := db.Update(func(tx *bolt.Tx) error {
@@ -123,8 +123,7 @@ func TestBucket_Get_Capacity(t *testing.T) {
 
 // Ensure that a bucket can write a key/value.
 func TestBucket_Put(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 	if err := db.Update(func(tx *bolt.Tx) error {
 		b, err := tx.CreateBucket([]byte("widgets"))
 		if err != nil {
@@ -146,8 +145,7 @@ func TestBucket_Put(t *testing.T) {
 
 // Ensure that a bucket can rewrite a key in the same transaction.
 func TestBucket_Put_Repeat(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 	if err := db.Update(func(tx *bolt.Tx) error {
 		b, err := tx.CreateBucket([]byte("widgets"))
 		if err != nil {
@@ -172,8 +170,7 @@ func TestBucket_Put_Repeat(t *testing.T) {
 
 // Ensure that a bucket can write a bunch of large values.
 func TestBucket_Put_Large(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	count, factor := 100, 200
 	if err := db.Update(func(tx *bolt.Tx) error {
@@ -214,8 +211,7 @@ func TestDB_Put_VeryLarge(t *testing.T) {
 	n, batchN := 400000, 200000
 	ksize, vsize := 8, 500
 
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	for i := 0; i < n; i += batchN {
 		if err := db.Update(func(tx *bolt.Tx) error {
@@ -239,8 +235,7 @@ func TestDB_Put_VeryLarge(t *testing.T) {
 
 // Ensure that a setting a value on a key with a bucket value returns an error.
 func TestBucket_Put_IncompatibleValue(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	if err := db.Update(func(tx *bolt.Tx) error {
 		b0, err := tx.CreateBucket([]byte("widgets"))
@@ -262,8 +257,7 @@ func TestBucket_Put_IncompatibleValue(t *testing.T) {
 
 // Ensure that a setting a value while the transaction is closed returns an error.
 func TestBucket_Put_Closed(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 	tx, err := db.Begin(true)
 	if err != nil {
 		t.Fatal(err)
@@ -285,8 +279,7 @@ func TestBucket_Put_Closed(t *testing.T) {
 
 // Ensure that setting a value on a read-only bucket returns an error.
 func TestBucket_Put_ReadOnly(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	if err := db.Update(func(tx *bolt.Tx) error {
 		if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
@@ -310,8 +303,7 @@ func TestBucket_Put_ReadOnly(t *testing.T) {
 
 // Ensure that a bucket can delete an existing key.
 func TestBucket_Delete(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	if err := db.Update(func(tx *bolt.Tx) error {
 		b, err := tx.CreateBucket([]byte("widgets"))
@@ -335,8 +327,7 @@ func TestBucket_Delete(t *testing.T) {
 
 // Ensure that deleting a large set of keys will work correctly.
 func TestBucket_Delete_Large(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	if err := db.Update(func(tx *bolt.Tx) error {
 		b, err := tx.CreateBucket([]byte("widgets"))
@@ -386,11 +377,11 @@ func TestBucket_Delete_FreelistOverflow(t *testing.T) {
 		t.Skip("skipping test in short mode.")
 	}
 
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	k := make([]byte, 16)
-	for i := uint64(0); i < 10000; i++ {
+	// The bigger the pages - the more values we need to write.
+	for i := uint64(0); i < 2*uint64(db.Info().PageSize); i++ {
 		if err := db.Update(func(tx *bolt.Tx) error {
 			b, err := tx.CreateBucketIfNotExists([]byte("0"))
 			if err != nil {
@@ -433,9 +424,7 @@ func TestBucket_Delete_FreelistOverflow(t *testing.T) {
 	}
 
 	// Free page count should be preserved on reopen.
-	if err := db.DB.Close(); err != nil {
-		t.Fatal(err)
-	}
+	db.MustClose()
 	db.MustReopen()
 	if reopenFreePages := db.Stats().FreePageN; freePages != reopenFreePages {
 		t.Fatalf("expected %d free pages, got %+v", freePages, db.Stats())
@@ -444,8 +433,7 @@ func TestBucket_Delete_FreelistOverflow(t *testing.T) {
 
 // Ensure that deleting of non-existing key is a no-op.
 func TestBucket_Delete_NonExisting(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	if err := db.Update(func(tx *bolt.Tx) error {
 		b, err := tx.CreateBucket([]byte("widgets"))
@@ -477,8 +465,7 @@ func TestBucket_Delete_NonExisting(t *testing.T) {
 
 // Ensure that accessing and updating nested buckets is ok across transactions.
 func TestBucket_Nested(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	if err := db.Update(func(tx *bolt.Tx) error {
 		// Create a widgets bucket.
@@ -564,8 +551,7 @@ func TestBucket_Nested(t *testing.T) {
 
 // Ensure that deleting a bucket using Delete() returns an error.
 func TestBucket_Delete_Bucket(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 	if err := db.Update(func(tx *bolt.Tx) error {
 		b, err := tx.CreateBucket([]byte("widgets"))
 		if err != nil {
@@ -585,8 +571,7 @@ func TestBucket_Delete_Bucket(t *testing.T) {
 
 // Ensure that deleting a key on a read-only bucket returns an error.
 func TestBucket_Delete_ReadOnly(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	if err := db.Update(func(tx *bolt.Tx) error {
 		if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
@@ -609,8 +594,7 @@ func TestBucket_Delete_ReadOnly(t *testing.T) {
 
 // Ensure that a deleting value while the transaction is closed returns an error.
 func TestBucket_Delete_Closed(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	tx, err := db.Begin(true)
 	if err != nil {
@@ -632,8 +616,7 @@ func TestBucket_Delete_Closed(t *testing.T) {
 
 // Ensure that deleting a bucket causes nested buckets to be deleted.
 func TestBucket_DeleteBucket_Nested(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	if err := db.Update(func(tx *bolt.Tx) error {
 		widgets, err := tx.CreateBucket([]byte("widgets"))
@@ -664,8 +647,7 @@ func TestBucket_DeleteBucket_Nested(t *testing.T) {
 
 // Ensure that deleting a bucket causes nested buckets to be deleted after they have been committed.
 func TestBucket_DeleteBucket_Nested2(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	if err := db.Update(func(tx *bolt.Tx) error {
 		widgets, err := tx.CreateBucket([]byte("widgets"))
@@ -731,8 +713,7 @@ func TestBucket_DeleteBucket_Nested2(t *testing.T) {
 // Ensure that deleting a child bucket with multiple pages causes all pages to get collected.
 // NOTE: Consistency check in bolt_test.DB.Close() will panic if pages not freed properly.
 func TestBucket_DeleteBucket_Large(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	if err := db.Update(func(tx *bolt.Tx) error {
 		widgets, err := tx.CreateBucket([]byte("widgets"))
@@ -767,8 +748,7 @@ func TestBucket_DeleteBucket_Large(t *testing.T) {
 
 // Ensure that a simple value retrieved via Bucket() returns a nil.
 func TestBucket_Bucket_IncompatibleValue(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	if err := db.Update(func(tx *bolt.Tx) error {
 		widgets, err := tx.CreateBucket([]byte("widgets"))
@@ -790,8 +770,7 @@ func TestBucket_Bucket_IncompatibleValue(t *testing.T) {
 
 // Ensure that creating a bucket on an existing non-bucket key returns an error.
 func TestBucket_CreateBucket_IncompatibleValue(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 	if err := db.Update(func(tx *bolt.Tx) error {
 		widgets, err := tx.CreateBucket([]byte("widgets"))
 		if err != nil {
@@ -812,8 +791,7 @@ func TestBucket_CreateBucket_IncompatibleValue(t *testing.T) {
 
 // Ensure that deleting a bucket on an existing non-bucket key returns an error.
 func TestBucket_DeleteBucket_IncompatibleValue(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	if err := db.Update(func(tx *bolt.Tx) error {
 		widgets, err := tx.CreateBucket([]byte("widgets"))
@@ -834,8 +812,7 @@ func TestBucket_DeleteBucket_IncompatibleValue(t *testing.T) {
 
 // Ensure bucket can set and update its sequence number.
 func TestBucket_Sequence(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	if err := db.Update(func(tx *bolt.Tx) error {
 		bkt, err := tx.CreateBucket([]byte("0"))
@@ -876,8 +853,7 @@ func TestBucket_Sequence(t *testing.T) {
 
 // Ensure that a bucket can return an autoincrementing sequence.
 func TestBucket_NextSequence(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	if err := db.Update(func(tx *bolt.Tx) error {
 		widgets, err := tx.CreateBucket([]byte("widgets"))
@@ -919,8 +895,7 @@ func TestBucket_NextSequence(t *testing.T) {
 // the only thing updated on the bucket.
 // https://github.com/boltdb/bolt/issues/296
 func TestBucket_NextSequence_Persist(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	if err := db.Update(func(tx *bolt.Tx) error {
 		if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
@@ -955,8 +930,7 @@ func TestBucket_NextSequence_Persist(t *testing.T) {
 
 // Ensure that retrieving the next sequence on a read-only bucket returns an error.
 func TestBucket_NextSequence_ReadOnly(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	if err := db.Update(func(tx *bolt.Tx) error {
 		if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
@@ -980,8 +954,7 @@ func TestBucket_NextSequence_ReadOnly(t *testing.T) {
 
 // Ensure that retrieving the next sequence for a bucket on a closed database return an error.
 func TestBucket_NextSequence_Closed(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 	tx, err := db.Begin(true)
 	if err != nil {
 		t.Fatal(err)
@@ -1000,66 +973,138 @@ func TestBucket_NextSequence_Closed(t *testing.T) {
 
 // Ensure a user can loop over all key/value pairs in a bucket.
 func TestBucket_ForEach(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
-	if err := db.Update(func(tx *bolt.Tx) error {
+	type kv struct {
+		k []byte
+		v []byte
+	}
+
+	expectedItems := []kv{
+		{k: []byte("bar"), v: []byte("0002")},
+		{k: []byte("baz"), v: []byte("0001")},
+		{k: []byte("csubbucket"), v: nil},
+		{k: []byte("foo"), v: []byte("0000")},
+	}
+
+	verifyReads := func(b *bolt.Bucket) {
+		var items []kv
+		err := b.ForEach(func(k, v []byte) error {
+			items = append(items, kv{k: k, v: v})
+			return nil
+		})
+		assert.NoErrorf(t, err, "b.ForEach failed")
+		assert.Equal(t, expectedItems, items, "what we iterated (ForEach) is not what we put")
+	}
+
+	err := db.Update(func(tx *bolt.Tx) error {
 		b, err := tx.CreateBucket([]byte("widgets"))
-		if err != nil {
-			t.Fatal(err)
-		}
-		if err := b.Put([]byte("foo"), []byte("0000")); err != nil {
-			t.Fatal(err)
-		}
-		if err := b.Put([]byte("baz"), []byte("0001")); err != nil {
-			t.Fatal(err)
-		}
-		if err := b.Put([]byte("bar"), []byte("0002")); err != nil {
-			t.Fatal(err)
-		}
+		require.NoError(t, err, "bucket creation failed")
 
-		var index int
-		if err := b.ForEach(func(k, v []byte) error {
-			switch index {
-			case 0:
-				if !bytes.Equal(k, []byte("bar")) {
-					t.Fatalf("unexpected key: %v", k)
-				} else if !bytes.Equal(v, []byte("0002")) {
-					t.Fatalf("unexpected value: %v", v)
-				}
-			case 1:
-				if !bytes.Equal(k, []byte("baz")) {
-					t.Fatalf("unexpected key: %v", k)
-				} else if !bytes.Equal(v, []byte("0001")) {
-					t.Fatalf("unexpected value: %v", v)
-				}
-			case 2:
-				if !bytes.Equal(k, []byte("foo")) {
-					t.Fatalf("unexpected key: %v", k)
-				} else if !bytes.Equal(v, []byte("0000")) {
-					t.Fatalf("unexpected value: %v", v)
-				}
-			}
-			index++
+		require.NoErrorf(t, b.Put([]byte("foo"), []byte("0000")), "put 'foo' failed")
+		require.NoErrorf(t, b.Put([]byte("baz"), []byte("0001")), "put 'baz' failed")
+		require.NoErrorf(t, b.Put([]byte("bar"), []byte("0002")), "put 'bar' failed")
+		_, err = b.CreateBucket([]byte("csubbucket"))
+		require.NoErrorf(t, err, "creation of subbucket failed")
+
+		verifyReads(b)
+
+		return nil
+	})
+	require.NoErrorf(t, err, "db.Update failed")
+	err = db.View(func(tx *bolt.Tx) error {
+		b := tx.Bucket([]byte("widgets"))
+		require.NotNil(t, b, "bucket opening failed")
+		verifyReads(b)
+		return nil
+	})
+	assert.NoErrorf(t, err, "db.View failed")
+}
+
+func TestBucket_ForEachBucket(t *testing.T) {
+	db := btesting.MustCreateDB(t)
+
+	expectedItems := [][]byte{
+		[]byte("csubbucket"),
+		[]byte("zsubbucket"),
+	}
+
+	verifyReads := func(b *bolt.Bucket) {
+		var items [][]byte
+		err := b.ForEachBucket(func(k []byte) error {
+			items = append(items, k)
 			return nil
-		}); err != nil {
-			t.Fatal(err)
-		}
+		})
+		assert.NoErrorf(t, err, "b.ForEach failed")
+		assert.Equal(t, expectedItems, items, "what we iterated (ForEach) is not what we put")
+	}
 
-		if index != 3 {
-			t.Fatalf("unexpected index: %d", index)
-		}
+	err := db.Update(func(tx *bolt.Tx) error {
+		b, err := tx.CreateBucket([]byte("widgets"))
+		require.NoError(t, err, "bucket creation failed")
+
+		require.NoErrorf(t, b.Put([]byte("foo"), []byte("0000")), "put 'foo' failed")
+		_, err = b.CreateBucket([]byte("zsubbucket"))
+		require.NoErrorf(t, err, "creation of subbucket failed")
+		require.NoErrorf(t, b.Put([]byte("baz"), []byte("0001")), "put 'baz' failed")
+		require.NoErrorf(t, b.Put([]byte("bar"), []byte("0002")), "put 'bar' failed")
+		_, err = b.CreateBucket([]byte("csubbucket"))
+		require.NoErrorf(t, err, "creation of subbucket failed")
+
+		verifyReads(b)
 
 		return nil
-	}); err != nil {
-		t.Fatal(err)
+	})
+	assert.NoErrorf(t, err, "db.Update failed")
+	err = db.View(func(tx *bolt.Tx) error {
+		b := tx.Bucket([]byte("widgets"))
+		require.NotNil(t, b, "bucket opening failed")
+		verifyReads(b)
+		return nil
+	})
+	assert.NoErrorf(t, err, "db.View failed")
+}
+
+func TestBucket_ForEachBucket_NoBuckets(t *testing.T) {
+	db := btesting.MustCreateDB(t)
+
+	verifyReads := func(b *bolt.Bucket) {
+		var items [][]byte
+		err := b.ForEachBucket(func(k []byte) error {
+			items = append(items, k)
+			return nil
+		})
+		assert.NoErrorf(t, err, "b.ForEach failed")
+		assert.Emptyf(t, items, "what we iterated (ForEach) is not what we put")
 	}
+
+	err := db.Update(func(tx *bolt.Tx) error {
+		b, err := tx.CreateBucket([]byte("widgets"))
+		require.NoError(t, err, "bucket creation failed")
+
+		require.NoErrorf(t, b.Put([]byte("foo"), []byte("0000")), "put 'foo' failed")
+		require.NoErrorf(t, err, "creation of subbucket failed")
+		require.NoErrorf(t, b.Put([]byte("baz"), []byte("0001")), "put 'baz' failed")
+		require.NoErrorf(t, err, "creation of subbucket failed")
+
+		verifyReads(b)
+
+		return nil
+	})
+	require.NoErrorf(t, err, "db.Update failed")
+
+	err = db.View(func(tx *bolt.Tx) error {
+		b := tx.Bucket([]byte("widgets"))
+		require.NotNil(t, b, "bucket opening failed")
+		verifyReads(b)
+		return nil
+	})
+	assert.NoErrorf(t, err, "db.View failed")
 }
 
 // Ensure a database can stop iteration early.
 func TestBucket_ForEach_ShortCircuit(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 	if err := db.Update(func(tx *bolt.Tx) error {
 		b, err := tx.CreateBucket([]byte("widgets"))
 		if err != nil {
@@ -1097,8 +1142,7 @@ func TestBucket_ForEach_ShortCircuit(t *testing.T) {
 
 // Ensure that looping over a bucket on a closed database returns an error.
 func TestBucket_ForEach_Closed(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	tx, err := db.Begin(true)
 	if err != nil {
@@ -1121,8 +1165,7 @@ func TestBucket_ForEach_Closed(t *testing.T) {
 
 // Ensure that an error is returned when inserting with an empty key.
 func TestBucket_Put_EmptyKey(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	if err := db.Update(func(tx *bolt.Tx) error {
 		b, err := tx.CreateBucket([]byte("widgets"))
@@ -1143,8 +1186,7 @@ func TestBucket_Put_EmptyKey(t *testing.T) {
 
 // Ensure that an error is returned when inserting with a key that's too large.
 func TestBucket_Put_KeyTooLarge(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 	if err := db.Update(func(tx *bolt.Tx) error {
 		b, err := tx.CreateBucket([]byte("widgets"))
 		if err != nil {
@@ -1166,8 +1208,7 @@ func TestBucket_Put_ValueTooLarge(t *testing.T) {
 		t.Skip("not enough RAM for test")
 	}
 
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	if err := db.Update(func(tx *bolt.Tx) error {
 		b, err := tx.CreateBucket([]byte("widgets"))
@@ -1189,8 +1230,7 @@ func TestBucket_Stats(t *testing.T) {
 		t.Skip("skipping test in short mode")
 	}
 
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	// Add bucket with fewer keys but one big value.
 	bigKey := []byte("really-big-value")
@@ -1209,8 +1249,9 @@ func TestBucket_Stats(t *testing.T) {
 			t.Fatal(err)
 		}
 	}
+	longKeyLength := 10*db.Info().PageSize + 17
 	if err := db.Update(func(tx *bolt.Tx) error {
-		if err := tx.Bucket([]byte("woojits")).Put(bigKey, []byte(strings.Repeat("*", 10000))); err != nil {
+		if err := tx.Bucket([]byte("woojits")).Put(bigKey, []byte(strings.Repeat("*", longKeyLength))); err != nil {
 			t.Fatal(err)
 		}
 		return nil
@@ -1220,54 +1261,53 @@ func TestBucket_Stats(t *testing.T) {
 
 	db.MustCheck()
 
+	pageSize2stats := map[int]bolt.BucketStats{
+		4096: {
+			BranchPageN:     1,
+			BranchOverflowN: 0,
+			LeafPageN:       7,
+			LeafOverflowN:   10,
+			KeyN:            501,
+			Depth:           2,
+			BranchAlloc:     4096,
+			BranchInuse:     149,
+			LeafAlloc:       69632,
+			LeafInuse: 0 +
+				7*16 + // leaf page header (x LeafPageN)
+				501*16 + // leaf elements
+				500*3 + len(bigKey) + // leaf keys
+				1*10 + 2*90 + 3*400 + longKeyLength, // leaf values: 10 * 1digit, 90*2digits, ...
+			BucketN:           1,
+			InlineBucketN:     0,
+			InlineBucketInuse: 0},
+		16384: {
+			BranchPageN:     1,
+			BranchOverflowN: 0,
+			LeafPageN:       3,
+			LeafOverflowN:   10,
+			KeyN:            501,
+			Depth:           2,
+			BranchAlloc:     16384,
+			BranchInuse:     73,
+			LeafAlloc:       212992,
+			LeafInuse: 0 +
+				3*16 + // leaf page header (x LeafPageN)
+				501*16 + // leaf elements
+				500*3 + len(bigKey) + // leaf keys
+				1*10 + 2*90 + 3*400 + longKeyLength, // leaf values: 10 * 1digit, 90*2digits, ...
+			BucketN:           1,
+			InlineBucketN:     0,
+			InlineBucketInuse: 0},
+	}
+
 	if err := db.View(func(tx *bolt.Tx) error {
 		stats := tx.Bucket([]byte("woojits")).Stats()
-		if stats.BranchPageN != 1 {
-			t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN)
-		} else if stats.BranchOverflowN != 0 {
-			t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN)
-		} else if stats.LeafPageN != 7 {
-			t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN)
-		} else if stats.LeafOverflowN != 2 {
-			t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN)
-		} else if stats.KeyN != 501 {
-			t.Fatalf("unexpected KeyN: %d", stats.KeyN)
-		} else if stats.Depth != 2 {
-			t.Fatalf("unexpected Depth: %d", stats.Depth)
-		}
-
-		branchInuse := 16     // branch page header
-		branchInuse += 7 * 16 // branch elements
-		branchInuse += 7 * 3  // branch keys (6 3-byte keys)
-		if stats.BranchInuse != branchInuse {
-			t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse)
-		}
-
-		leafInuse := 7 * 16                      // leaf page header
-		leafInuse += 501 * 16                    // leaf elements
-		leafInuse += 500*3 + len(bigKey)         // leaf keys
-		leafInuse += 1*10 + 2*90 + 3*400 + 10000 // leaf values
-		if stats.LeafInuse != leafInuse {
-			t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse)
+		t.Logf("Stats: %#v", stats)
+		if expected, ok := pageSize2stats[db.Info().PageSize]; ok {
+			assert.EqualValues(t, expected, stats, "stats differs from expectations")
+		} else {
+			t.Skipf("No expectations for page size: %d", db.Info().PageSize)
 		}
-
-		// Only check allocations for 4KB pages.
-		if db.Info().PageSize == 4096 {
-			if stats.BranchAlloc != 4096 {
-				t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc)
-			} else if stats.LeafAlloc != 36864 {
-				t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc)
-			}
-		}
-
-		if stats.BucketN != 1 {
-			t.Fatalf("unexpected BucketN: %d", stats.BucketN)
-		} else if stats.InlineBucketN != 0 {
-			t.Fatalf("unexpected InlineBucketN: %d", stats.InlineBucketN)
-		} else if stats.InlineBucketInuse != 0 {
-			t.Fatalf("unexpected InlineBucketInuse: %d", stats.InlineBucketInuse)
-		}
-
 		return nil
 	}); err != nil {
 		t.Fatal(err)
@@ -1282,8 +1322,7 @@ func TestBucket_Stats_RandomFill(t *testing.T) {
 		t.Skip("invalid page size for test")
 	}
 
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	// Add a set of values in random order. It will be the same random
 	// order so we can maintain consistency between test runs.
@@ -1344,8 +1383,7 @@ func TestBucket_Stats_RandomFill(t *testing.T) {
 
 // Ensure a bucket can calculate stats.
 func TestBucket_Stats_Small(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	if err := db.Update(func(tx *bolt.Tx) error {
 		// Add a bucket that fits on a single root leaf.
@@ -1408,8 +1446,7 @@ func TestBucket_Stats_Small(t *testing.T) {
 }
 
 func TestBucket_Stats_EmptyBucket(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	if err := db.Update(func(tx *bolt.Tx) error {
 		// Add a bucket that fits on a single root leaf.
@@ -1468,8 +1505,7 @@ func TestBucket_Stats_EmptyBucket(t *testing.T) {
 
 // Ensure a bucket can calculate stats.
 func TestBucket_Stats_Nested(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	if err := db.Update(func(tx *bolt.Tx) error {
 		b, err := tx.CreateBucket([]byte("foo"))
@@ -1574,8 +1610,7 @@ func TestBucket_Stats_Large(t *testing.T) {
 		t.Skip("skipping test in short mode.")
 	}
 
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	var index int
 	for i := 0; i < 100; i++ {
@@ -1599,42 +1634,45 @@ func TestBucket_Stats_Large(t *testing.T) {
 
 	db.MustCheck()
 
+	pageSize2stats := map[int]bolt.BucketStats{
+		4096: {
+			BranchPageN:       13,
+			BranchOverflowN:   0,
+			LeafPageN:         1196,
+			LeafOverflowN:     0,
+			KeyN:              100000,
+			Depth:             3,
+			BranchAlloc:       53248,
+			BranchInuse:       25257,
+			LeafAlloc:         4898816,
+			LeafInuse:         2596916,
+			BucketN:           1,
+			InlineBucketN:     0,
+			InlineBucketInuse: 0},
+		16384: {
+			BranchPageN:       1,
+			BranchOverflowN:   0,
+			LeafPageN:         292,
+			LeafOverflowN:     0,
+			KeyN:              100000,
+			Depth:             2,
+			BranchAlloc:       16384,
+			BranchInuse:       6094,
+			LeafAlloc:         4784128,
+			LeafInuse:         2582452,
+			BucketN:           1,
+			InlineBucketN:     0,
+			InlineBucketInuse: 0},
+	}
+
 	if err := db.View(func(tx *bolt.Tx) error {
 		stats := tx.Bucket([]byte("widgets")).Stats()
-		if stats.BranchPageN != 13 {
-			t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN)
-		} else if stats.BranchOverflowN != 0 {
-			t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN)
-		} else if stats.LeafPageN != 1196 {
-			t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN)
-		} else if stats.LeafOverflowN != 0 {
-			t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN)
-		} else if stats.KeyN != 100000 {
-			t.Fatalf("unexpected KeyN: %d", stats.KeyN)
-		} else if stats.Depth != 3 {
-			t.Fatalf("unexpected Depth: %d", stats.Depth)
-		} else if stats.BranchInuse != 25257 {
-			t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse)
-		} else if stats.LeafInuse != 2596916 {
-			t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse)
-		}
-
-		if db.Info().PageSize == 4096 {
-			if stats.BranchAlloc != 53248 {
-				t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc)
-			} else if stats.LeafAlloc != 4898816 {
-				t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc)
-			}
+		t.Logf("Stats: %#v", stats)
+		if expected, ok := pageSize2stats[db.Info().PageSize]; ok {
+			assert.EqualValues(t, expected, stats, "stats differs from expectations")
+		} else {
+			t.Skipf("No expectations for page size: %d", db.Info().PageSize)
 		}
-
-		if stats.BucketN != 1 {
-			t.Fatalf("unexpected BucketN: %d", stats.BucketN)
-		} else if stats.InlineBucketN != 0 {
-			t.Fatalf("unexpected InlineBucketN: %d", stats.InlineBucketN)
-		} else if stats.InlineBucketInuse != 0 {
-			t.Fatalf("unexpected InlineBucketInuse: %d", stats.InlineBucketInuse)
-		}
-
 		return nil
 	}); err != nil {
 		t.Fatal(err)
@@ -1649,7 +1687,7 @@ func TestBucket_Put_Single(t *testing.T) {
 
 	index := 0
 	if err := quick.Check(func(items testdata) bool {
-		db := MustOpenDB()
+		db := btesting.MustCreateDB(t)
 		defer db.MustClose()
 
 		m := make(map[string][]byte)
@@ -1706,7 +1744,7 @@ func TestBucket_Put_Multiple(t *testing.T) {
 	}
 
 	if err := quick.Check(func(items testdata) bool {
-		db := MustOpenDB()
+		db := btesting.MustCreateDB(t)
 		defer db.MustClose()
 
 		// Bulk insert all values.
@@ -1759,7 +1797,7 @@ func TestBucket_Delete_Quick(t *testing.T) {
 	}
 
 	if err := quick.Check(func(items testdata) bool {
-		db := MustOpenDB()
+		db := btesting.MustCreateDB(t)
 		defer db.MustClose()
 
 		// Bulk insert all values.
diff --git a/cmd/bbolt/main.go b/cmd/bbolt/main.go
index 93e6760..f76cd82 100644
--- a/cmd/bbolt/main.go
+++ b/cmd/bbolt/main.go
@@ -2,12 +2,13 @@ package main
 
 import (
 	"bytes"
+	"crypto/sha256"
 	"encoding/binary"
+	"encoding/hex"
 	"errors"
 	"flag"
 	"fmt"
 	"io"
-	"io/ioutil"
 	"math/rand"
 	"os"
 	"runtime"
@@ -19,6 +20,8 @@ import (
 	"unicode/utf8"
 	"unsafe"
 
+	"go.etcd.io/bbolt/internal/guts_cli"
+
 	bolt "go.etcd.io/bbolt"
 )
 
@@ -39,9 +42,6 @@ var (
 	// ErrInvalidValue is returned when a benchmark reads an unexpected value.
 	ErrInvalidValue = errors.New("invalid value")
 
-	// ErrCorrupt is returned when a checking a data file finds errors.
-	ErrCorrupt = errors.New("invalid value")
-
 	// ErrNonDivisibleBatchSize is returned when the batch size can't be evenly
 	// divided by the iteration count.
 	ErrNonDivisibleBatchSize = errors.New("number of iterations must be divisible by the batch size")
@@ -62,9 +62,6 @@ var (
 	ErrKeyNotFound = errors.New("key not found")
 )
 
-// PageHeaderSize represents the size of the bolt.page header.
-const PageHeaderSize = 16
-
 func main() {
 	m := NewMain()
 	if err := m.Run(os.Args[1:]...); err == ErrUsage {
@@ -75,19 +72,25 @@ func main() {
 	}
 }
 
-// Main represents the main program execution.
-type Main struct {
+type baseCommand struct {
 	Stdin  io.Reader
 	Stdout io.Writer
 	Stderr io.Writer
 }
 
+// Main represents the main program execution.
+type Main struct {
+	baseCommand
+}
+
 // NewMain returns a new instance of Main connect to the standard input/output.
 func NewMain() *Main {
 	return &Main{
-		Stdin:  os.Stdin,
-		Stdout: os.Stdout,
-		Stderr: os.Stderr,
+		baseCommand: baseCommand{
+			Stdin:  os.Stdin,
+			Stdout: os.Stdout,
+			Stderr: os.Stderr,
+		},
 	}
 }
 
@@ -128,6 +131,8 @@ func (m *Main) Run(args ...string) error {
 		return newPagesCommand(m).Run(args[1:]...)
 	case "stats":
 		return newStatsCommand(m).Run(args[1:]...)
+	case "surgery":
+		return newSurgeryCommand(m).Run(args[1:]...)
 	default:
 		return ErrUnknownCommand
 	}
@@ -157,29 +162,26 @@ The commands are:
     pages       print list of pages with their types
     page-item   print the key and value of a page item.
     stats       iterate over all pages and generate usage stats
+    surgery     perform surgery on bbolt database
 
 Use "bbolt [command] -h" for more information about a command.
 `, "\n")
 }
 
-// CheckCommand represents the "check" command execution.
-type CheckCommand struct {
-	Stdin  io.Reader
-	Stdout io.Writer
-	Stderr io.Writer
+// checkCommand represents the "check" command execution.
+type checkCommand struct {
+	baseCommand
 }
 
-// NewCheckCommand returns a CheckCommand.
-func newCheckCommand(m *Main) *CheckCommand {
-	return &CheckCommand{
-		Stdin:  m.Stdin,
-		Stdout: m.Stdout,
-		Stderr: m.Stderr,
-	}
+// newCheckCommand returns a checkCommand.
+func newCheckCommand(m *Main) *checkCommand {
+	c := &checkCommand{}
+	c.baseCommand = m.baseCommand
+	return c
 }
 
 // Run executes the command.
-func (cmd *CheckCommand) Run(args ...string) error {
+func (cmd *checkCommand) Run(args ...string) error {
 	// Parse flags.
 	fs := flag.NewFlagSet("", flag.ContinueOnError)
 	help := fs.Bool("h", false, "")
@@ -199,7 +201,10 @@ func (cmd *CheckCommand) Run(args ...string) error {
 	}
 
 	// Open database.
-	db, err := bolt.Open(path, 0666, nil)
+	db, err := bolt.Open(path, 0666, &bolt.Options{
+		ReadOnly:        true,
+		PreLoadFreelist: true,
+	})
 	if err != nil {
 		return err
 	}
@@ -208,7 +213,7 @@ func (cmd *CheckCommand) Run(args ...string) error {
 	// Perform consistency check.
 	return db.View(func(tx *bolt.Tx) error {
 		var count int
-		for err := range tx.Check() {
+		for err := range tx.CheckWithOptions(bolt.WithKVStringer(CmdKvStringer())) {
 			fmt.Fprintln(cmd.Stdout, err)
 			count++
 		}
@@ -216,7 +221,7 @@ func (cmd *CheckCommand) Run(args ...string) error {
 		// Print summary of errors.
 		if count > 0 {
 			fmt.Fprintf(cmd.Stdout, "%d errors found\n", count)
-			return ErrCorrupt
+			return guts_cli.ErrCorrupt
 		}
 
 		// Notify user that database is valid.
@@ -226,7 +231,7 @@ func (cmd *CheckCommand) Run(args ...string) error {
 }
 
 // Usage returns the help message.
-func (cmd *CheckCommand) Usage() string {
+func (cmd *checkCommand) Usage() string {
 	return strings.TrimLeft(`
 usage: bolt check PATH
 
@@ -239,24 +244,20 @@ return after all pages have been checked.
 `, "\n")
 }
 
-// InfoCommand represents the "info" command execution.
-type InfoCommand struct {
-	Stdin  io.Reader
-	Stdout io.Writer
-	Stderr io.Writer
+// infoCommand represents the "info" command execution.
+type infoCommand struct {
+	baseCommand
 }
 
-// NewInfoCommand returns a InfoCommand.
-func newInfoCommand(m *Main) *InfoCommand {
-	return &InfoCommand{
-		Stdin:  m.Stdin,
-		Stdout: m.Stdout,
-		Stderr: m.Stderr,
-	}
+// newInfoCommand returns a infoCommand.
+func newInfoCommand(m *Main) *infoCommand {
+	c := &infoCommand{}
+	c.baseCommand = m.baseCommand
+	return c
 }
 
 // Run executes the command.
-func (cmd *InfoCommand) Run(args ...string) error {
+func (cmd *infoCommand) Run(args ...string) error {
 	// Parse flags.
 	fs := flag.NewFlagSet("", flag.ContinueOnError)
 	help := fs.Bool("h", false, "")
@@ -276,7 +277,7 @@ func (cmd *InfoCommand) Run(args ...string) error {
 	}
 
 	// Open the database.
-	db, err := bolt.Open(path, 0666, nil)
+	db, err := bolt.Open(path, 0666, &bolt.Options{ReadOnly: true})
 	if err != nil {
 		return err
 	}
@@ -290,7 +291,7 @@ func (cmd *InfoCommand) Run(args ...string) error {
 }
 
 // Usage returns the help message.
-func (cmd *InfoCommand) Usage() string {
+func (cmd *infoCommand) Usage() string {
 	return strings.TrimLeft(`
 usage: bolt info PATH
 
@@ -298,24 +299,20 @@ Info prints basic information about the Bolt database at PATH.
 `, "\n")
 }
 
-// DumpCommand represents the "dump" command execution.
-type DumpCommand struct {
-	Stdin  io.Reader
-	Stdout io.Writer
-	Stderr io.Writer
+// dumpCommand represents the "dump" command execution.
+type dumpCommand struct {
+	baseCommand
 }
 
-// newDumpCommand returns a DumpCommand.
-func newDumpCommand(m *Main) *DumpCommand {
-	return &DumpCommand{
-		Stdin:  m.Stdin,
-		Stdout: m.Stdout,
-		Stderr: m.Stderr,
-	}
+// newDumpCommand returns a dumpCommand.
+func newDumpCommand(m *Main) *dumpCommand {
+	c := &dumpCommand{}
+	c.baseCommand = m.baseCommand
+	return c
 }
 
 // Run executes the command.
-func (cmd *DumpCommand) Run(args ...string) error {
+func (cmd *dumpCommand) Run(args ...string) error {
 	// Parse flags.
 	fs := flag.NewFlagSet("", flag.ContinueOnError)
 	help := fs.Bool("h", false, "")
@@ -335,7 +332,7 @@ func (cmd *DumpCommand) Run(args ...string) error {
 	}
 
 	// Read page ids.
-	pageIDs, err := atois(fs.Args()[1:])
+	pageIDs, err := stringToPages(fs.Args()[1:])
 	if err != nil {
 		return err
 	} else if len(pageIDs) == 0 {
@@ -343,7 +340,7 @@ func (cmd *DumpCommand) Run(args ...string) error {
 	}
 
 	// Open database to retrieve page size.
-	pageSize, err := ReadPageSize(path)
+	pageSize, _, err := guts_cli.ReadPageAndHWMSize(path)
 	if err != nil {
 		return err
 	}
@@ -363,7 +360,7 @@ func (cmd *DumpCommand) Run(args ...string) error {
 		}
 
 		// Print page to stdout.
-		if err := cmd.PrintPage(cmd.Stdout, f, pageID, pageSize); err != nil {
+		if err := cmd.PrintPage(cmd.Stdout, f, pageID, uint64(pageSize)); err != nil {
 			return err
 		}
 	}
@@ -372,22 +369,22 @@ func (cmd *DumpCommand) Run(args ...string) error {
 }
 
 // PrintPage prints a given page as hexadecimal.
-func (cmd *DumpCommand) PrintPage(w io.Writer, r io.ReaderAt, pageID int, pageSize int) error {
+func (cmd *dumpCommand) PrintPage(w io.Writer, r io.ReaderAt, pageID uint64, pageSize uint64) error {
 	const bytesPerLineN = 16
 
 	// Read page into buffer.
 	buf := make([]byte, pageSize)
-	addr := pageID * pageSize
+	addr := pageID * uint64(pageSize)
 	if n, err := r.ReadAt(buf, int64(addr)); err != nil {
 		return err
-	} else if n != pageSize {
+	} else if uint64(n) != pageSize {
 		return io.ErrUnexpectedEOF
 	}
 
 	// Write out to writer in 16-byte lines.
 	var prev []byte
 	var skipped bool
-	for offset := 0; offset < pageSize; offset += bytesPerLineN {
+	for offset := uint64(0); offset < pageSize; offset += bytesPerLineN {
 		// Retrieve current 16-byte line.
 		line := buf[offset : offset+bytesPerLineN]
 		isLastLine := (offset == (pageSize - bytesPerLineN))
@@ -417,7 +414,7 @@ func (cmd *DumpCommand) PrintPage(w io.Writer, r io.ReaderAt, pageID int, pageSi
 }
 
 // Usage returns the help message.
-func (cmd *DumpCommand) Usage() string {
+func (cmd *dumpCommand) Usage() string {
 	return strings.TrimLeft(`
 usage: bolt dump PATH pageid [pageid...]
 
@@ -425,20 +422,16 @@ Dump prints a hexadecimal dump of one or more pages.
 `, "\n")
 }
 
-// PageItemCommand represents the "page-item" command execution.
-type PageItemCommand struct {
-	Stdin  io.Reader
-	Stdout io.Writer
-	Stderr io.Writer
+// pageItemCommand represents the "page-item" command execution.
+type pageItemCommand struct {
+	baseCommand
 }
 
-// newPageItemCommand returns a PageItemCommand.
-func newPageItemCommand(m *Main) *PageItemCommand {
-	return &PageItemCommand{
-		Stdin:  m.Stdin,
-		Stdout: m.Stdout,
-		Stderr: m.Stderr,
-	}
+// newPageItemCommand returns a pageItemCommand.
+func newPageItemCommand(m *Main) *pageItemCommand {
+	c := &pageItemCommand{}
+	c.baseCommand = m.baseCommand
+	return c
 }
 
 type pageItemOptions struct {
@@ -449,13 +442,13 @@ type pageItemOptions struct {
 }
 
 // Run executes the command.
-func (cmd *PageItemCommand) Run(args ...string) error {
+func (cmd *pageItemCommand) Run(args ...string) error {
 	// Parse flags.
 	options := &pageItemOptions{}
 	fs := flag.NewFlagSet("", flag.ContinueOnError)
 	fs.BoolVar(&options.keyOnly, "key-only", false, "Print only the key")
 	fs.BoolVar(&options.valueOnly, "value-only", false, "Print only the value")
-	fs.StringVar(&options.format, "format", "ascii-encoded", "Output format. One of: ascii-encoded|hex|bytes")
+	fs.StringVar(&options.format, "format", "ascii-encoded", "Output format. One of: "+FORMAT_MODES)
 	fs.BoolVar(&options.help, "h", false, "")
 	if err := fs.Parse(args); err != nil {
 		return err
@@ -477,13 +470,13 @@ func (cmd *PageItemCommand) Run(args ...string) error {
 	}
 
 	// Read page id.
-	pageID, err := strconv.Atoi(fs.Arg(1))
+	pageID, err := strconv.ParseUint(fs.Arg(1), 10, 64)
 	if err != nil {
 		return err
 	}
 
 	// Read item id.
-	itemID, err := strconv.Atoi(fs.Arg(2))
+	itemID, err := strconv.ParseUint(fs.Arg(2), 10, 64)
 	if err != nil {
 		return err
 	}
@@ -496,7 +489,7 @@ func (cmd *PageItemCommand) Run(args ...string) error {
 	defer func() { _ = f.Close() }()
 
 	// Retrieve page info and page size.
-	_, buf, err := ReadPage(path, pageID)
+	_, buf, err := guts_cli.ReadPage(path, pageID)
 	if err != nil {
 		return err
 	}
@@ -517,62 +510,80 @@ func (cmd *PageItemCommand) Run(args ...string) error {
 }
 
 // leafPageElement retrieves a leaf page element.
-func (cmd *PageItemCommand) leafPageElement(pageBytes []byte, index uint16) (*leafPageElement, error) {
-	p := (*page)(unsafe.Pointer(&pageBytes[0]))
-	if index >= p.count {
-		return nil, fmt.Errorf("leafPageElement: expected item index less than %d, but got %d.", p.count, index)
+func (cmd *pageItemCommand) leafPageElement(pageBytes []byte, index uint16) (*guts_cli.LeafPageElement, error) {
+	p := (*guts_cli.Page)(unsafe.Pointer(&pageBytes[0]))
+	if index >= p.Count() {
+		return nil, fmt.Errorf("leafPageElement: expected item index less than %d, but got %d.", p.Count(), index)
 	}
 	if p.Type() != "leaf" {
 		return nil, fmt.Errorf("leafPageElement: expected page type of 'leaf', but got '%s'", p.Type())
 	}
-	return p.leafPageElement(index), nil
+	return p.LeafPageElement(index), nil
 }
 
-// writeBytes writes the byte to the writer. Supported formats: ascii-encoded, hex, bytes.
-func (cmd *PageItemCommand) writeBytes(w io.Writer, b []byte, format string) error {
+const FORMAT_MODES = "auto|ascii-encoded|hex|bytes|redacted"
+
+// formatBytes converts bytes into string according to format.
+// Supported formats: ascii-encoded, hex, bytes.
+func formatBytes(b []byte, format string) (string, error) {
 	switch format {
 	case "ascii-encoded":
-		_, err := fmt.Fprintf(w, "%q", b)
-		if err != nil {
-			return err
-		}
-		_, err = fmt.Fprintf(w, "\n")
-		return err
+		return fmt.Sprintf("%q", b), nil
 	case "hex":
-		_, err := fmt.Fprintf(w, "%x", b)
-		if err != nil {
-			return err
-		}
-		_, err = fmt.Fprintf(w, "\n")
-		return err
+		return fmt.Sprintf("%x", b), nil
 	case "bytes":
-		_, err := w.Write(b)
-		return err
+		return string(b), nil
+	case "auto":
+		return bytesToAsciiOrHex(b), nil
+	case "redacted":
+		return fmt.Sprintf("<redacted len:%d sha256:%x>", len(b), sha256.New().Sum(b)), nil
+	default:
+		return "", fmt.Errorf("formatBytes: unsupported format: %s", format)
+	}
+}
+
+func parseBytes(str string, format string) ([]byte, error) {
+	switch format {
+	case "ascii-encoded":
+		return []byte(str), nil
+	case "hex":
+		return hex.DecodeString(str)
 	default:
-		return fmt.Errorf("writeBytes: unsupported format: %s", format)
+		return nil, fmt.Errorf("parseBytes: unsupported format: %s", format)
 	}
 }
 
+// writelnBytes writes the byte to the writer. Supported formats: ascii-encoded, hex, bytes, auto, redacted.
+// Terminates the write with a new line symbol;
+func writelnBytes(w io.Writer, b []byte, format string) error {
+	str, err := formatBytes(b, format)
+	if err != nil {
+		return err
+	}
+	_, err = fmt.Fprintln(w, str)
+	return err
+}
+
 // PrintLeafItemKey writes the bytes of a leaf element's key.
-func (cmd *PageItemCommand) PrintLeafItemKey(w io.Writer, pageBytes []byte, index uint16, format string) error {
+func (cmd *pageItemCommand) PrintLeafItemKey(w io.Writer, pageBytes []byte, index uint16, format string) error {
 	e, err := cmd.leafPageElement(pageBytes, index)
 	if err != nil {
 		return err
 	}
-	return cmd.writeBytes(w, e.key(), format)
+	return writelnBytes(w, e.Key(), format)
 }
 
 // PrintLeafItemKey writes the bytes of a leaf element's value.
-func (cmd *PageItemCommand) PrintLeafItemValue(w io.Writer, pageBytes []byte, index uint16, format string) error {
+func (cmd *pageItemCommand) PrintLeafItemValue(w io.Writer, pageBytes []byte, index uint16, format string) error {
 	e, err := cmd.leafPageElement(pageBytes, index)
 	if err != nil {
 		return err
 	}
-	return cmd.writeBytes(w, e.value(), format)
+	return writelnBytes(w, e.Value(), format)
 }
 
 // Usage returns the help message.
-func (cmd *PageItemCommand) Usage() string {
+func (cmd *pageItemCommand) Usage() string {
 	return strings.TrimLeft(`
 usage: bolt page-item [options] PATH pageid itemid
 
@@ -583,276 +594,26 @@ Additional options include:
 	--value-only
 		Print only the value
 	--format
-		Output format. One of: ascii-encoded|hex|bytes (default=ascii-encoded)
+		Output format. One of: `+FORMAT_MODES+` (default=ascii-encoded)
 
 page-item prints a page item key and value.
 `, "\n")
 }
 
-// PageCommand represents the "page" command execution.
-type PageCommand struct {
-	Stdin  io.Reader
-	Stdout io.Writer
-	Stderr io.Writer
+// pagesCommand represents the "pages" command execution.
+type pagesCommand struct {
+	baseCommand
 }
 
-// newPageCommand returns a PageCommand.
-func newPageCommand(m *Main) *PageCommand {
-	return &PageCommand{
-		Stdin:  m.Stdin,
-		Stdout: m.Stdout,
-		Stderr: m.Stderr,
-	}
+// newPagesCommand returns a pagesCommand.
+func newPagesCommand(m *Main) *pagesCommand {
+	c := &pagesCommand{}
+	c.baseCommand = m.baseCommand
+	return c
 }
 
 // Run executes the command.
-func (cmd *PageCommand) Run(args ...string) error {
-	// Parse flags.
-	fs := flag.NewFlagSet("", flag.ContinueOnError)
-	help := fs.Bool("h", false, "")
-	if err := fs.Parse(args); err != nil {
-		return err
-	} else if *help {
-		fmt.Fprintln(cmd.Stderr, cmd.Usage())
-		return ErrUsage
-	}
-
-	// Require database path and page id.
-	path := fs.Arg(0)
-	if path == "" {
-		return ErrPathRequired
-	} else if _, err := os.Stat(path); os.IsNotExist(err) {
-		return ErrFileNotFound
-	}
-
-	// Read page ids.
-	pageIDs, err := atois(fs.Args()[1:])
-	if err != nil {
-		return err
-	} else if len(pageIDs) == 0 {
-		return ErrPageIDRequired
-	}
-
-	// Open database file handler.
-	f, err := os.Open(path)
-	if err != nil {
-		return err
-	}
-	defer func() { _ = f.Close() }()
-
-	// Print each page listed.
-	for i, pageID := range pageIDs {
-		// Print a separator.
-		if i > 0 {
-			fmt.Fprintln(cmd.Stdout, "===============================================")
-		}
-
-		// Retrieve page info and page size.
-		p, buf, err := ReadPage(path, pageID)
-		if err != nil {
-			return err
-		}
-
-		// Print basic page info.
-		fmt.Fprintf(cmd.Stdout, "Page ID:    %d\n", p.id)
-		fmt.Fprintf(cmd.Stdout, "Page Type:  %s\n", p.Type())
-		fmt.Fprintf(cmd.Stdout, "Total Size: %d bytes\n", len(buf))
-
-		// Print type-specific data.
-		switch p.Type() {
-		case "meta":
-			err = cmd.PrintMeta(cmd.Stdout, buf)
-		case "leaf":
-			err = cmd.PrintLeaf(cmd.Stdout, buf)
-		case "branch":
-			err = cmd.PrintBranch(cmd.Stdout, buf)
-		case "freelist":
-			err = cmd.PrintFreelist(cmd.Stdout, buf)
-		}
-		if err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-// PrintMeta prints the data from the meta page.
-func (cmd *PageCommand) PrintMeta(w io.Writer, buf []byte) error {
-	m := (*meta)(unsafe.Pointer(&buf[PageHeaderSize]))
-	fmt.Fprintf(w, "Version:    %d\n", m.version)
-	fmt.Fprintf(w, "Page Size:  %d bytes\n", m.pageSize)
-	fmt.Fprintf(w, "Flags:      %08x\n", m.flags)
-	fmt.Fprintf(w, "Root:       <pgid=%d>\n", m.root.root)
-	fmt.Fprintf(w, "Freelist:   <pgid=%d>\n", m.freelist)
-	fmt.Fprintf(w, "HWM:        <pgid=%d>\n", m.pgid)
-	fmt.Fprintf(w, "Txn ID:     %d\n", m.txid)
-	fmt.Fprintf(w, "Checksum:   %016x\n", m.checksum)
-	fmt.Fprintf(w, "\n")
-	return nil
-}
-
-// PrintLeaf prints the data for a leaf page.
-func (cmd *PageCommand) PrintLeaf(w io.Writer, buf []byte) error {
-	p := (*page)(unsafe.Pointer(&buf[0]))
-
-	// Print number of items.
-	fmt.Fprintf(w, "Item Count: %d\n", p.count)
-	fmt.Fprintf(w, "\n")
-
-	// Print each key/value.
-	for i := uint16(0); i < p.count; i++ {
-		e := p.leafPageElement(i)
-
-		// Format key as string.
-		var k string
-		if isPrintable(string(e.key())) {
-			k = fmt.Sprintf("%q", string(e.key()))
-		} else {
-			k = fmt.Sprintf("%x", string(e.key()))
-		}
-
-		// Format value as string.
-		var v string
-		if (e.flags & uint32(bucketLeafFlag)) != 0 {
-			b := (*bucket)(unsafe.Pointer(&e.value()[0]))
-			v = fmt.Sprintf("<pgid=%d,seq=%d>", b.root, b.sequence)
-		} else if isPrintable(string(e.value())) {
-			v = fmt.Sprintf("%q", string(e.value()))
-		} else {
-			v = fmt.Sprintf("%x", string(e.value()))
-		}
-
-		fmt.Fprintf(w, "%s: %s\n", k, v)
-	}
-	fmt.Fprintf(w, "\n")
-	return nil
-}
-
-// PrintBranch prints the data for a leaf page.
-func (cmd *PageCommand) PrintBranch(w io.Writer, buf []byte) error {
-	p := (*page)(unsafe.Pointer(&buf[0]))
-
-	// Print number of items.
-	fmt.Fprintf(w, "Item Count: %d\n", p.count)
-	fmt.Fprintf(w, "\n")
-
-	// Print each key/value.
-	for i := uint16(0); i < p.count; i++ {
-		e := p.branchPageElement(i)
-
-		// Format key as string.
-		var k string
-		if isPrintable(string(e.key())) {
-			k = fmt.Sprintf("%q", string(e.key()))
-		} else {
-			k = fmt.Sprintf("%x", string(e.key()))
-		}
-
-		fmt.Fprintf(w, "%s: <pgid=%d>\n", k, e.pgid)
-	}
-	fmt.Fprintf(w, "\n")
-	return nil
-}
-
-// PrintFreelist prints the data for a freelist page.
-func (cmd *PageCommand) PrintFreelist(w io.Writer, buf []byte) error {
-	p := (*page)(unsafe.Pointer(&buf[0]))
-
-	// Check for overflow and, if present, adjust starting index and actual element count.
-	idx, count := 0, int(p.count)
-	if p.count == 0xFFFF {
-		idx = 1
-		count = int(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0])
-	}
-
-	// Print number of items.
-	fmt.Fprintf(w, "Item Count: %d\n", count)
-	fmt.Fprintf(w, "Overflow: %d\n", p.overflow)
-
-	fmt.Fprintf(w, "\n")
-
-	// Print each page in the freelist.
-	ids := (*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr))
-	for i := idx; i < count; i++ {
-		fmt.Fprintf(w, "%d\n", ids[i])
-	}
-	fmt.Fprintf(w, "\n")
-	return nil
-}
-
-// PrintPage prints a given page as hexadecimal.
-func (cmd *PageCommand) PrintPage(w io.Writer, r io.ReaderAt, pageID int, pageSize int) error {
-	const bytesPerLineN = 16
-
-	// Read page into buffer.
-	buf := make([]byte, pageSize)
-	addr := pageID * pageSize
-	if n, err := r.ReadAt(buf, int64(addr)); err != nil {
-		return err
-	} else if n != pageSize {
-		return io.ErrUnexpectedEOF
-	}
-
-	// Write out to writer in 16-byte lines.
-	var prev []byte
-	var skipped bool
-	for offset := 0; offset < pageSize; offset += bytesPerLineN {
-		// Retrieve current 16-byte line.
-		line := buf[offset : offset+bytesPerLineN]
-		isLastLine := (offset == (pageSize - bytesPerLineN))
-
-		// If it's the same as the previous line then print a skip.
-		if bytes.Equal(line, prev) && !isLastLine {
-			if !skipped {
-				fmt.Fprintf(w, "%07x *\n", addr+offset)
-				skipped = true
-			}
-		} else {
-			// Print line as hexadecimal in 2-byte groups.
-			fmt.Fprintf(w, "%07x %04x %04x %04x %04x %04x %04x %04x %04x\n", addr+offset,
-				line[0:2], line[2:4], line[4:6], line[6:8],
-				line[8:10], line[10:12], line[12:14], line[14:16],
-			)
-
-			skipped = false
-		}
-
-		// Save the previous line.
-		prev = line
-	}
-	fmt.Fprint(w, "\n")
-
-	return nil
-}
-
-// Usage returns the help message.
-func (cmd *PageCommand) Usage() string {
-	return strings.TrimLeft(`
-usage: bolt page PATH pageid [pageid...]
-
-Page prints one or more pages in human readable format.
-`, "\n")
-}
-
-// PagesCommand represents the "pages" command execution.
-type PagesCommand struct {
-	Stdin  io.Reader
-	Stdout io.Writer
-	Stderr io.Writer
-}
-
-// NewPagesCommand returns a PagesCommand.
-func newPagesCommand(m *Main) *PagesCommand {
-	return &PagesCommand{
-		Stdin:  m.Stdin,
-		Stdout: m.Stdout,
-		Stderr: m.Stderr,
-	}
-}
-
-// Run executes the command.
-func (cmd *PagesCommand) Run(args ...string) error {
+func (cmd *pagesCommand) Run(args ...string) error {
 	// Parse flags.
 	fs := flag.NewFlagSet("", flag.ContinueOnError)
 	help := fs.Bool("h", false, "")
@@ -872,7 +633,10 @@ func (cmd *PagesCommand) Run(args ...string) error {
 	}
 
 	// Open database.
-	db, err := bolt.Open(path, 0666, nil)
+	db, err := bolt.Open(path, 0666, &bolt.Options{
+		ReadOnly:        true,
+		PreLoadFreelist: true,
+	})
 	if err != nil {
 		return err
 	}
@@ -915,7 +679,7 @@ func (cmd *PagesCommand) Run(args ...string) error {
 }
 
 // Usage returns the help message.
-func (cmd *PagesCommand) Usage() string {
+func (cmd *pagesCommand) Usage() string {
 	return strings.TrimLeft(`
 usage: bolt pages PATH
 
@@ -929,24 +693,20 @@ a single page to take up multiple blocks.
 `, "\n")
 }
 
-// StatsCommand represents the "stats" command execution.
-type StatsCommand struct {
-	Stdin  io.Reader
-	Stdout io.Writer
-	Stderr io.Writer
+// statsCommand represents the "stats" command execution.
+type statsCommand struct {
+	baseCommand
 }
 
-// NewStatsCommand returns a StatsCommand.
-func newStatsCommand(m *Main) *StatsCommand {
-	return &StatsCommand{
-		Stdin:  m.Stdin,
-		Stdout: m.Stdout,
-		Stderr: m.Stderr,
-	}
+// newStatsCommand returns a statsCommand.
+func newStatsCommand(m *Main) *statsCommand {
+	c := &statsCommand{}
+	c.baseCommand = m.baseCommand
+	return c
 }
 
 // Run executes the command.
-func (cmd *StatsCommand) Run(args ...string) error {
+func (cmd *statsCommand) Run(args ...string) error {
 	// Parse flags.
 	fs := flag.NewFlagSet("", flag.ContinueOnError)
 	help := fs.Bool("h", false, "")
@@ -966,7 +726,7 @@ func (cmd *StatsCommand) Run(args ...string) error {
 	}
 
 	// Open database.
-	db, err := bolt.Open(path, 0666, nil)
+	db, err := bolt.Open(path, 0666, &bolt.Options{ReadOnly: true})
 	if err != nil {
 		return err
 	}
@@ -1029,7 +789,7 @@ func (cmd *StatsCommand) Run(args ...string) error {
 }
 
 // Usage returns the help message.
-func (cmd *StatsCommand) Usage() string {
+func (cmd *statsCommand) Usage() string {
 	return strings.TrimLeft(`
 usage: bolt stats PATH
 
@@ -1064,24 +824,20 @@ experience corruption, please submit a ticket to the Bolt project page:
 `, "\n")
 }
 
-// BucketsCommand represents the "buckets" command execution.
-type BucketsCommand struct {
-	Stdin  io.Reader
-	Stdout io.Writer
-	Stderr io.Writer
+// bucketsCommand represents the "buckets" command execution.
+type bucketsCommand struct {
+	baseCommand
 }
 
-// NewBucketsCommand returns a BucketsCommand.
-func newBucketsCommand(m *Main) *BucketsCommand {
-	return &BucketsCommand{
-		Stdin:  m.Stdin,
-		Stdout: m.Stdout,
-		Stderr: m.Stderr,
-	}
+// newBucketsCommand returns a bucketsCommand.
+func newBucketsCommand(m *Main) *bucketsCommand {
+	c := &bucketsCommand{}
+	c.baseCommand = m.baseCommand
+	return c
 }
 
 // Run executes the command.
-func (cmd *BucketsCommand) Run(args ...string) error {
+func (cmd *bucketsCommand) Run(args ...string) error {
 	// Parse flags.
 	fs := flag.NewFlagSet("", flag.ContinueOnError)
 	help := fs.Bool("h", false, "")
@@ -1101,7 +857,7 @@ func (cmd *BucketsCommand) Run(args ...string) error {
 	}
 
 	// Open database.
-	db, err := bolt.Open(path, 0666, nil)
+	db, err := bolt.Open(path, 0666, &bolt.Options{ReadOnly: true})
 	if err != nil {
 		return err
 	}
@@ -1117,7 +873,7 @@ func (cmd *BucketsCommand) Run(args ...string) error {
 }
 
 // Usage returns the help message.
-func (cmd *BucketsCommand) Usage() string {
+func (cmd *bucketsCommand) Usage() string {
 	return strings.TrimLeft(`
 usage: bolt buckets PATH
 
@@ -1125,26 +881,23 @@ Print a list of buckets.
 `, "\n")
 }
 
-// KeysCommand represents the "keys" command execution.
-type KeysCommand struct {
-	Stdin  io.Reader
-	Stdout io.Writer
-	Stderr io.Writer
+// keysCommand represents the "keys" command execution.
+type keysCommand struct {
+	baseCommand
 }
 
-// NewKeysCommand returns a KeysCommand.
-func newKeysCommand(m *Main) *KeysCommand {
-	return &KeysCommand{
-		Stdin:  m.Stdin,
-		Stdout: m.Stdout,
-		Stderr: m.Stderr,
-	}
+// newKeysCommand returns a keysCommand.
+func newKeysCommand(m *Main) *keysCommand {
+	c := &keysCommand{}
+	c.baseCommand = m.baseCommand
+	return c
 }
 
 // Run executes the command.
-func (cmd *KeysCommand) Run(args ...string) error {
+func (cmd *keysCommand) Run(args ...string) error {
 	// Parse flags.
 	fs := flag.NewFlagSet("", flag.ContinueOnError)
+	optionsFormat := fs.String("format", "bytes", "Output format. One of: "+FORMAT_MODES+" (default: bytes)")
 	help := fs.Bool("h", false, "")
 	if err := fs.Parse(args); err != nil {
 		return err
@@ -1154,17 +907,18 @@ func (cmd *KeysCommand) Run(args ...string) error {
 	}
 
 	// Require database path and bucket.
-	path, bucket := fs.Arg(0), fs.Arg(1)
+	relevantArgs := fs.Args()
+	path, buckets := relevantArgs[0], relevantArgs[1:]
 	if path == "" {
 		return ErrPathRequired
 	} else if _, err := os.Stat(path); os.IsNotExist(err) {
 		return ErrFileNotFound
-	} else if bucket == "" {
+	} else if len(buckets) == 0 {
 		return ErrBucketRequired
 	}
 
 	// Open database.
-	db, err := bolt.Open(path, 0666, nil)
+	db, err := bolt.Open(path, 0666, &bolt.Options{ReadOnly: true})
 	if err != nil {
 		return err
 	}
@@ -1173,48 +927,62 @@ func (cmd *KeysCommand) Run(args ...string) error {
 	// Print keys.
 	return db.View(func(tx *bolt.Tx) error {
 		// Find bucket.
-		b := tx.Bucket([]byte(bucket))
-		if b == nil {
+		var lastbucket *bolt.Bucket = tx.Bucket([]byte(buckets[0]))
+		if lastbucket == nil {
 			return ErrBucketNotFound
 		}
+		for _, bucket := range buckets[1:] {
+			lastbucket = lastbucket.Bucket([]byte(bucket))
+			if lastbucket == nil {
+				return ErrBucketNotFound
+			}
+		}
 
 		// Iterate over each key.
-		return b.ForEach(func(key, _ []byte) error {
-			fmt.Fprintln(cmd.Stdout, string(key))
-			return nil
+		return lastbucket.ForEach(func(key, _ []byte) error {
+			return writelnBytes(cmd.Stdout, key, *optionsFormat)
 		})
 	})
 }
 
 // Usage returns the help message.
-func (cmd *KeysCommand) Usage() string {
+// TODO: Use https://pkg.go.dev/flag#FlagSet.PrintDefaults to print supported flags.
+func (cmd *keysCommand) Usage() string {
 	return strings.TrimLeft(`
-usage: bolt keys PATH BUCKET
+usage: bolt keys PATH [BUCKET...]
+
+Print a list of keys in the given (sub)bucket.
+=======
+
+Additional options include:
+
+	--format
+		Output format. One of: `+FORMAT_MODES+` (default=bytes)
 
 Print a list of keys in the given bucket.
 `, "\n")
 }
 
-// GetCommand represents the "get" command execution.
-type GetCommand struct {
-	Stdin  io.Reader
-	Stdout io.Writer
-	Stderr io.Writer
+// getCommand represents the "get" command execution.
+type getCommand struct {
+	baseCommand
 }
 
-// NewGetCommand returns a GetCommand.
-func newGetCommand(m *Main) *GetCommand {
-	return &GetCommand{
-		Stdin:  m.Stdin,
-		Stdout: m.Stdout,
-		Stderr: m.Stderr,
-	}
+// newGetCommand returns a getCommand.
+func newGetCommand(m *Main) *getCommand {
+	c := &getCommand{}
+	c.baseCommand = m.baseCommand
+	return c
 }
 
 // Run executes the command.
-func (cmd *GetCommand) Run(args ...string) error {
+func (cmd *getCommand) Run(args ...string) error {
 	// Parse flags.
 	fs := flag.NewFlagSet("", flag.ContinueOnError)
+	var parseFormat string
+	var format string
+	fs.StringVar(&parseFormat, "parse-format", "ascii-encoded", "Input format. One of: ascii-encoded|hex (default: ascii-encoded)")
+	fs.StringVar(&format, "format", "bytes", "Output format. One of: "+FORMAT_MODES+" (default: bytes)")
 	help := fs.Bool("h", false, "")
 	if err := fs.Parse(args); err != nil {
 		return err
@@ -1224,19 +992,24 @@ func (cmd *GetCommand) Run(args ...string) error {
 	}
 
 	// Require database path, bucket and key.
-	path, bucket, key := fs.Arg(0), fs.Arg(1), fs.Arg(2)
+	relevantArgs := fs.Args()
+	path, buckets := relevantArgs[0], relevantArgs[1:len(relevantArgs)-1]
+	key, err := parseBytes(relevantArgs[len(relevantArgs)-1], parseFormat)
+	if err != nil {
+		return err
+	}
 	if path == "" {
 		return ErrPathRequired
 	} else if _, err := os.Stat(path); os.IsNotExist(err) {
 		return ErrFileNotFound
-	} else if bucket == "" {
+	} else if len(buckets) == 0 {
 		return ErrBucketRequired
-	} else if key == "" {
+	} else if len(key) == 0 {
 		return ErrKeyRequired
 	}
 
 	// Open database.
-	db, err := bolt.Open(path, 0666, nil)
+	db, err := bolt.Open(path, 0666, &bolt.Options{ReadOnly: true})
 	if err != nil {
 		return err
 	}
@@ -1245,51 +1018,60 @@ func (cmd *GetCommand) Run(args ...string) error {
 	// Print value.
 	return db.View(func(tx *bolt.Tx) error {
 		// Find bucket.
-		b := tx.Bucket([]byte(bucket))
-		if b == nil {
+		var lastbucket *bolt.Bucket = tx.Bucket([]byte(buckets[0]))
+		if lastbucket == nil {
 			return ErrBucketNotFound
 		}
+		for _, bucket := range buckets[1:] {
+			lastbucket = lastbucket.Bucket([]byte(bucket))
+			if lastbucket == nil {
+				return ErrBucketNotFound
+			}
+		}
 
 		// Find value for given key.
-		val := b.Get([]byte(key))
+		val := lastbucket.Get(key)
 		if val == nil {
-			return ErrKeyNotFound
+			return fmt.Errorf("Error %w for key: %q hex: \"%x\"", ErrKeyNotFound, key, string(key))
 		}
 
-		fmt.Fprintln(cmd.Stdout, string(val))
-		return nil
+		// TODO: In this particular case, it would be better to not terminate with '\n'
+		return writelnBytes(cmd.Stdout, val, format)
 	})
 }
 
 // Usage returns the help message.
-func (cmd *GetCommand) Usage() string {
+func (cmd *getCommand) Usage() string {
 	return strings.TrimLeft(`
-usage: bolt get PATH BUCKET KEY
+usage: bolt get PATH [BUCKET..] KEY
+
+Print the value of the given key in the given (sub)bucket.
 
-Print the value of the given key in the given bucket.
+Additional options include:
+
+	--format
+		Output format. One of: `+FORMAT_MODES+` (default=bytes)
+	--parse-format
+		Input format (of key). One of: ascii-encoded|hex (default=ascii-encoded)"
 `, "\n")
 }
 
 var benchBucketName = []byte("bench")
 
-// BenchCommand represents the "bench" command execution.
-type BenchCommand struct {
-	Stdin  io.Reader
-	Stdout io.Writer
-	Stderr io.Writer
+// benchCommand represents the "bench" command execution.
+type benchCommand struct {
+	baseCommand
 }
 
-// NewBenchCommand returns a BenchCommand using the
-func newBenchCommand(m *Main) *BenchCommand {
-	return &BenchCommand{
-		Stdin:  m.Stdin,
-		Stdout: m.Stdout,
-		Stderr: m.Stderr,
-	}
+// newBenchCommand returns a BenchCommand using the
+func newBenchCommand(m *Main) *benchCommand {
+	c := &benchCommand{}
+	c.baseCommand = m.baseCommand
+	return c
 }
 
 // Run executes the "bench" command.
-func (cmd *BenchCommand) Run(args ...string) error {
+func (cmd *benchCommand) Run(args ...string) error {
 	// Parse CLI arguments.
 	options, err := cmd.ParseFlags(args)
 	if err != nil {
@@ -1330,7 +1112,7 @@ func (cmd *BenchCommand) Run(args ...string) error {
 }
 
 // ParseFlags parses the command line flags.
-func (cmd *BenchCommand) ParseFlags(args []string) (*BenchOptions, error) {
+func (cmd *benchCommand) ParseFlags(args []string) (*BenchOptions, error) {
 	var options BenchOptions
 
 	// Parse flagset.
@@ -1364,7 +1146,7 @@ func (cmd *BenchCommand) ParseFlags(args []string) (*BenchOptions, error) {
 
 	// Generate temp path if one is not passed in.
 	if options.Path == "" {
-		f, err := ioutil.TempFile("", "bolt-bench-")
+		f, err := os.CreateTemp("", "bolt-bench-")
 		if err != nil {
 			return nil, fmt.Errorf("temp file: %s", err)
 		}
@@ -1377,7 +1159,7 @@ func (cmd *BenchCommand) ParseFlags(args []string) (*BenchOptions, error) {
 }
 
 // Writes to the database.
-func (cmd *BenchCommand) runWrites(db *bolt.DB, options *BenchOptions, results *BenchResults) error {
+func (cmd *benchCommand) runWrites(db *bolt.DB, options *BenchOptions, results *BenchResults) error {
 	// Start profiling for writes.
 	if options.ProfileMode == "rw" || options.ProfileMode == "w" {
 		cmd.startProfiling(options)
@@ -1410,27 +1192,27 @@ func (cmd *BenchCommand) runWrites(db *bolt.DB, options *BenchOptions, results *
 	return err
 }
 
-func (cmd *BenchCommand) runWritesSequential(db *bolt.DB, options *BenchOptions, results *BenchResults) error {
+func (cmd *benchCommand) runWritesSequential(db *bolt.DB, options *BenchOptions, results *BenchResults) error {
 	var i = uint32(0)
 	return cmd.runWritesWithSource(db, options, results, func() uint32 { i++; return i })
 }
 
-func (cmd *BenchCommand) runWritesRandom(db *bolt.DB, options *BenchOptions, results *BenchResults) error {
+func (cmd *benchCommand) runWritesRandom(db *bolt.DB, options *BenchOptions, results *BenchResults) error {
 	r := rand.New(rand.NewSource(time.Now().UnixNano()))
 	return cmd.runWritesWithSource(db, options, results, func() uint32 { return r.Uint32() })
 }
 
-func (cmd *BenchCommand) runWritesSequentialNested(db *bolt.DB, options *BenchOptions, results *BenchResults) error {
+func (cmd *benchCommand) runWritesSequentialNested(db *bolt.DB, options *BenchOptions, results *BenchResults) error {
 	var i = uint32(0)
 	return cmd.runWritesNestedWithSource(db, options, results, func() uint32 { i++; return i })
 }
 
-func (cmd *BenchCommand) runWritesRandomNested(db *bolt.DB, options *BenchOptions, results *BenchResults) error {
+func (cmd *benchCommand) runWritesRandomNested(db *bolt.DB, options *BenchOptions, results *BenchResults) error {
 	r := rand.New(rand.NewSource(time.Now().UnixNano()))
 	return cmd.runWritesNestedWithSource(db, options, results, func() uint32 { return r.Uint32() })
 }
 
-func (cmd *BenchCommand) runWritesWithSource(db *bolt.DB, options *BenchOptions, results *BenchResults, keySource func() uint32) error {
+func (cmd *benchCommand) runWritesWithSource(db *bolt.DB, options *BenchOptions, results *BenchResults, keySource func() uint32) error {
 	results.WriteOps = options.Iterations
 
 	for i := 0; i < options.Iterations; i += options.BatchSize {
@@ -1459,7 +1241,7 @@ func (cmd *BenchCommand) runWritesWithSource(db *bolt.DB, options *BenchOptions,
 	return nil
 }
 
-func (cmd *BenchCommand) runWritesNestedWithSource(db *bolt.DB, options *BenchOptions, results *BenchResults, keySource func() uint32) error {
+func (cmd *benchCommand) runWritesNestedWithSource(db *bolt.DB, options *BenchOptions, results *BenchResults, keySource func() uint32) error {
 	results.WriteOps = options.Iterations
 
 	for i := 0; i < options.Iterations; i += options.BatchSize {
@@ -1503,7 +1285,7 @@ func (cmd *BenchCommand) runWritesNestedWithSource(db *bolt.DB, options *BenchOp
 }
 
 // Reads from the database.
-func (cmd *BenchCommand) runReads(db *bolt.DB, options *BenchOptions, results *BenchResults) error {
+func (cmd *benchCommand) runReads(db *bolt.DB, options *BenchOptions, results *BenchResults) error {
 	// Start profiling for reads.
 	if options.ProfileMode == "r" {
 		cmd.startProfiling(options)
@@ -1535,7 +1317,7 @@ func (cmd *BenchCommand) runReads(db *bolt.DB, options *BenchOptions, results *B
 	return err
 }
 
-func (cmd *BenchCommand) runReadsSequential(db *bolt.DB, options *BenchOptions, results *BenchResults) error {
+func (cmd *benchCommand) runReadsSequential(db *bolt.DB, options *BenchOptions, results *BenchResults) error {
 	return db.View(func(tx *bolt.Tx) error {
 		t := time.Now()
 
@@ -1566,7 +1348,7 @@ func (cmd *BenchCommand) runReadsSequential(db *bolt.DB, options *BenchOptions,
 	})
 }
 
-func (cmd *BenchCommand) runReadsSequentialNested(db *bolt.DB, options *BenchOptions, results *BenchResults) error {
+func (cmd *benchCommand) runReadsSequentialNested(db *bolt.DB, options *BenchOptions, results *BenchResults) error {
 	return db.View(func(tx *bolt.Tx) error {
 		t := time.Now()
 
@@ -1608,7 +1390,7 @@ func (cmd *BenchCommand) runReadsSequentialNested(db *bolt.DB, options *BenchOpt
 var cpuprofile, memprofile, blockprofile *os.File
 
 // Starts all profiles set on the options.
-func (cmd *BenchCommand) startProfiling(options *BenchOptions) {
+func (cmd *benchCommand) startProfiling(options *BenchOptions) {
 	var err error
 
 	// Start CPU profiling.
@@ -1618,7 +1400,11 @@ func (cmd *BenchCommand) startProfiling(options *BenchOptions) {
 			fmt.Fprintf(cmd.Stderr, "bench: could not create cpu profile %q: %v\n", options.CPUProfile, err)
 			os.Exit(1)
 		}
-		pprof.StartCPUProfile(cpuprofile)
+		err = pprof.StartCPUProfile(cpuprofile)
+		if err != nil {
+			fmt.Fprintf(cmd.Stderr, "bench: could not start cpu profile %q: %v\n", options.CPUProfile, err)
+			os.Exit(1)
+		}
 	}
 
 	// Start memory profiling.
@@ -1643,7 +1429,7 @@ func (cmd *BenchCommand) startProfiling(options *BenchOptions) {
 }
 
 // Stops all profiles.
-func (cmd *BenchCommand) stopProfiling() {
+func (cmd *benchCommand) stopProfiling() {
 	if cpuprofile != nil {
 		pprof.StopCPUProfile()
 		cpuprofile.Close()
@@ -1651,13 +1437,19 @@ func (cmd *BenchCommand) stopProfiling() {
 	}
 
 	if memprofile != nil {
-		pprof.Lookup("heap").WriteTo(memprofile, 0)
+		err := pprof.Lookup("heap").WriteTo(memprofile, 0)
+		if err != nil {
+			fmt.Fprintf(cmd.Stderr, "bench: could not write mem profile")
+		}
 		memprofile.Close()
 		memprofile = nil
 	}
 
 	if blockprofile != nil {
-		pprof.Lookup("block").WriteTo(blockprofile, 0)
+		err := pprof.Lookup("block").WriteTo(blockprofile, 0)
+		if err != nil {
+			fmt.Fprintf(cmd.Stderr, "bench: could not write block profile")
+		}
 		blockprofile.Close()
 		blockprofile = nil
 		runtime.SetBlockProfileRate(0)
@@ -1747,72 +1539,24 @@ func isPrintable(s string) bool {
 	return true
 }
 
-// ReadPage reads page info & full page data from a path.
-// This is not transactionally safe.
-func ReadPage(path string, pageID int) (*page, []byte, error) {
-	// Find page size.
-	pageSize, err := ReadPageSize(path)
-	if err != nil {
-		return nil, nil, fmt.Errorf("read page size: %s", err)
-	}
-
-	// Open database file.
-	f, err := os.Open(path)
-	if err != nil {
-		return nil, nil, err
-	}
-	defer f.Close()
-
-	// Read one block into buffer.
-	buf := make([]byte, pageSize)
-	if n, err := f.ReadAt(buf, int64(pageID*pageSize)); err != nil {
-		return nil, nil, err
-	} else if n != len(buf) {
-		return nil, nil, io.ErrUnexpectedEOF
-	}
-
-	// Determine total number of blocks.
-	p := (*page)(unsafe.Pointer(&buf[0]))
-	overflowN := p.overflow
-
-	// Re-read entire page (with overflow) into buffer.
-	buf = make([]byte, (int(overflowN)+1)*pageSize)
-	if n, err := f.ReadAt(buf, int64(pageID*pageSize)); err != nil {
-		return nil, nil, err
-	} else if n != len(buf) {
-		return nil, nil, io.ErrUnexpectedEOF
+func bytesToAsciiOrHex(b []byte) string {
+	sb := string(b)
+	if isPrintable(sb) {
+		return sb
+	} else {
+		return hex.EncodeToString(b)
 	}
-	p = (*page)(unsafe.Pointer(&buf[0]))
-
-	return p, buf, nil
 }
 
-// ReadPageSize reads page size a path.
-// This is not transactionally safe.
-func ReadPageSize(path string) (int, error) {
-	// Open database file.
-	f, err := os.Open(path)
-	if err != nil {
-		return 0, err
-	}
-	defer f.Close()
-
-	// Read 4KB chunk.
-	buf := make([]byte, 4096)
-	if _, err := io.ReadFull(f, buf); err != nil {
-		return 0, err
-	}
-
-	// Read page size from metadata.
-	m := (*meta)(unsafe.Pointer(&buf[PageHeaderSize]))
-	return int(m.pageSize), nil
+func stringToPage(str string) (uint64, error) {
+	return strconv.ParseUint(str, 10, 64)
 }
 
-// atois parses a slice of strings into integers.
-func atois(strs []string) ([]int, error) {
-	var a []int
+// stringToPages parses a slice of strings into page ids.
+func stringToPages(strs []string) ([]uint64, error) {
+	var a []uint64
 	for _, str := range strs {
-		i, err := strconv.Atoi(str)
+		i, err := stringToPage(str)
 		if err != nil {
 			return nil, err
 		}
@@ -1821,117 +1565,9 @@ func atois(strs []string) ([]int, error) {
 	return a, nil
 }
 
-// DO NOT EDIT. Copied from the "bolt" package.
-const maxAllocSize = 0xFFFFFFF
-
-// DO NOT EDIT. Copied from the "bolt" package.
-const (
-	branchPageFlag   = 0x01
-	leafPageFlag     = 0x02
-	metaPageFlag     = 0x04
-	freelistPageFlag = 0x10
-)
-
-// DO NOT EDIT. Copied from the "bolt" package.
-const bucketLeafFlag = 0x01
-
-// DO NOT EDIT. Copied from the "bolt" package.
-type pgid uint64
-
-// DO NOT EDIT. Copied from the "bolt" package.
-type txid uint64
-
-// DO NOT EDIT. Copied from the "bolt" package.
-type meta struct {
-	magic    uint32
-	version  uint32
-	pageSize uint32
-	flags    uint32
-	root     bucket
-	freelist pgid
-	pgid     pgid
-	txid     txid
-	checksum uint64
-}
-
-// DO NOT EDIT. Copied from the "bolt" package.
-type bucket struct {
-	root     pgid
-	sequence uint64
-}
-
-// DO NOT EDIT. Copied from the "bolt" package.
-type page struct {
-	id       pgid
-	flags    uint16
-	count    uint16
-	overflow uint32
-	ptr      uintptr
-}
-
-// DO NOT EDIT. Copied from the "bolt" package.
-func (p *page) Type() string {
-	if (p.flags & branchPageFlag) != 0 {
-		return "branch"
-	} else if (p.flags & leafPageFlag) != 0 {
-		return "leaf"
-	} else if (p.flags & metaPageFlag) != 0 {
-		return "meta"
-	} else if (p.flags & freelistPageFlag) != 0 {
-		return "freelist"
-	}
-	return fmt.Sprintf("unknown<%02x>", p.flags)
-}
-
-// DO NOT EDIT. Copied from the "bolt" package.
-func (p *page) leafPageElement(index uint16) *leafPageElement {
-	n := &((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[index]
-	return n
-}
-
-// DO NOT EDIT. Copied from the "bolt" package.
-func (p *page) branchPageElement(index uint16) *branchPageElement {
-	return &((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[index]
-}
-
-// DO NOT EDIT. Copied from the "bolt" package.
-type branchPageElement struct {
-	pos   uint32
-	ksize uint32
-	pgid  pgid
-}
-
-// DO NOT EDIT. Copied from the "bolt" package.
-func (n *branchPageElement) key() []byte {
-	buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
-	return buf[n.pos : n.pos+n.ksize]
-}
-
-// DO NOT EDIT. Copied from the "bolt" package.
-type leafPageElement struct {
-	flags uint32
-	pos   uint32
-	ksize uint32
-	vsize uint32
-}
-
-// DO NOT EDIT. Copied from the "bolt" package.
-func (n *leafPageElement) key() []byte {
-	buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
-	return buf[n.pos : n.pos+n.ksize]
-}
-
-// DO NOT EDIT. Copied from the "bolt" package.
-func (n *leafPageElement) value() []byte {
-	buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
-	return buf[n.pos+n.ksize : n.pos+n.ksize+n.vsize]
-}
-
-// CompactCommand represents the "compact" command execution.
-type CompactCommand struct {
-	Stdin  io.Reader
-	Stdout io.Writer
-	Stderr io.Writer
+// compactCommand represents the "compact" command execution.
+type compactCommand struct {
+	baseCommand
 
 	SrcPath   string
 	DstPath   string
@@ -1939,19 +1575,17 @@ type CompactCommand struct {
 }
 
 // newCompactCommand returns a CompactCommand.
-func newCompactCommand(m *Main) *CompactCommand {
-	return &CompactCommand{
-		Stdin:  m.Stdin,
-		Stdout: m.Stdout,
-		Stderr: m.Stderr,
-	}
+func newCompactCommand(m *Main) *compactCommand {
+	c := &compactCommand{}
+	c.baseCommand = m.baseCommand
+	return c
 }
 
 // Run executes the command.
-func (cmd *CompactCommand) Run(args ...string) (err error) {
+func (cmd *compactCommand) Run(args ...string) (err error) {
 	// Parse flags.
 	fs := flag.NewFlagSet("", flag.ContinueOnError)
-	fs.SetOutput(ioutil.Discard)
+	fs.SetOutput(io.Discard)
 	fs.StringVar(&cmd.DstPath, "o", "", "")
 	fs.Int64Var(&cmd.TxMaxSize, "tx-max-size", 65536, "")
 	if err := fs.Parse(args); err == flag.ErrHelp {
@@ -1979,7 +1613,7 @@ func (cmd *CompactCommand) Run(args ...string) (err error) {
 	initialSize := fi.Size()
 
 	// Open source database.
-	src, err := bolt.Open(cmd.SrcPath, 0444, nil)
+	src, err := bolt.Open(cmd.SrcPath, 0444, &bolt.Options{ReadOnly: true})
 	if err != nil {
 		return err
 	}
@@ -2010,7 +1644,7 @@ func (cmd *CompactCommand) Run(args ...string) (err error) {
 }
 
 // Usage returns the help message.
-func (cmd *CompactCommand) Usage() string {
+func (cmd *compactCommand) Usage() string {
 	return strings.TrimLeft(`
 usage: bolt compact [options] -o DST SRC
 
@@ -2026,3 +1660,17 @@ Additional options include:
 		Defaults to 64KB.
 `, "\n")
 }
+
+type cmdKvStringer struct{}
+
+func (_ cmdKvStringer) KeyToString(key []byte) string {
+	return bytesToAsciiOrHex(key)
+}
+
+func (_ cmdKvStringer) ValueToString(value []byte) string {
+	return bytesToAsciiOrHex(value)
+}
+
+func CmdKvStringer() bolt.KVStringer {
+	return cmdKvStringer{}
+}
diff --git a/cmd/bbolt/main_test b/cmd/bbolt/main_test.go
similarity index 78%
rename from cmd/bbolt/main_test
rename to cmd/bbolt/main_test.go
index b4871ff..7d0cfd2 100644
--- a/cmd/bbolt/main_test
+++ b/cmd/bbolt/main_test.go
@@ -6,24 +6,29 @@ import (
 	"encoding/binary"
 	"fmt"
 	"io"
-	"io/ioutil"
 	"math/rand"
 	"os"
 	"strconv"
 	"testing"
 
+	"go.etcd.io/bbolt/internal/btesting"
+
+	"github.com/stretchr/testify/require"
+
 	bolt "go.etcd.io/bbolt"
+	main "go.etcd.io/bbolt/cmd/bbolt"
 )
 
 // Ensure the "info" command can print information about a database.
 func TestInfoCommand_Run(t *testing.T) {
-	db := MustOpen(0666, nil)
-	db.DB.Close()
-	defer db.Close()
+	db := btesting.MustCreateDB(t)
+	db.Close()
+
+	defer requireDBNoChange(t, dbData(t, db.Path()), db.Path())
 
 	// Run the info command.
 	m := NewMain()
-	if err := m.Run("info", db.Path); err != nil {
+	if err := m.Run("info", db.Path()); err != nil {
 		t.Fatal(err)
 	}
 }
@@ -35,9 +40,10 @@ func TestStatsCommand_Run_EmptyDatabase(t *testing.T) {
 		t.Skip("system does not use 4KB page size")
 	}
 
-	db := MustOpen(0666, nil)
-	defer db.Close()
-	db.DB.Close()
+	db := btesting.MustCreateDB(t)
+	db.Close()
+
+	defer requireDBNoChange(t, dbData(t, db.Path()), db.Path())
 
 	// Generate expected result.
 	exp := "Aggregate statistics for 0 buckets\n\n" +
@@ -61,7 +67,7 @@ func TestStatsCommand_Run_EmptyDatabase(t *testing.T) {
 
 	// Run the command.
 	m := NewMain()
-	if err := m.Run("stats", db.Path); err != nil {
+	if err := m.Run("stats", db.Path()); err != nil {
 		t.Fatal(err)
 	} else if m.Stdout.String() != exp {
 		t.Fatalf("unexpected stdout:\n\n%s", m.Stdout.String())
@@ -75,8 +81,7 @@ func TestStatsCommand_Run(t *testing.T) {
 		t.Skip("system does not use 4KB page size")
 	}
 
-	db := MustOpen(0666, nil)
-	defer db.Close()
+	db := btesting.MustCreateDB(t)
 
 	if err := db.Update(func(tx *bolt.Tx) error {
 		// Create "foo" bucket.
@@ -114,7 +119,9 @@ func TestStatsCommand_Run(t *testing.T) {
 	}); err != nil {
 		t.Fatal(err)
 	}
-	db.DB.Close()
+	db.Close()
+
+	defer requireDBNoChange(t, dbData(t, db.Path()), db.Path())
 
 	// Generate expected result.
 	exp := "Aggregate statistics for 3 buckets\n\n" +
@@ -138,7 +145,7 @@ func TestStatsCommand_Run(t *testing.T) {
 
 	// Run the command.
 	m := NewMain()
-	if err := m.Run("stats", db.Path); err != nil {
+	if err := m.Run("stats", db.Path()); err != nil {
 		t.Fatal(err)
 	} else if m.Stdout.String() != exp {
 		t.Fatalf("unexpected stdout:\n\n%s", m.Stdout.String())
@@ -147,8 +154,7 @@ func TestStatsCommand_Run(t *testing.T) {
 
 // Ensure the "buckets" command can print a list of buckets.
 func TestBucketsCommand_Run(t *testing.T) {
-	db := MustOpen(0666, nil)
-	defer db.Close()
+	db := btesting.MustCreateDB(t)
 
 	if err := db.Update(func(tx *bolt.Tx) error {
 		for _, name := range []string{"foo", "bar", "baz"} {
@@ -161,13 +167,15 @@ func TestBucketsCommand_Run(t *testing.T) {
 	}); err != nil {
 		t.Fatal(err)
 	}
-	db.DB.Close()
+	db.Close()
+
+	defer requireDBNoChange(t, dbData(t, db.Path()), db.Path())
 
 	expected := "bar\nbaz\nfoo\n"
 
 	// Run the command.
 	m := NewMain()
-	if err := m.Run("buckets", db.Path); err != nil {
+	if err := m.Run("buckets", db.Path()); err != nil {
 		t.Fatal(err)
 	} else if actual := m.Stdout.String(); actual != expected {
 		t.Fatalf("unexpected stdout:\n\n%s", actual)
@@ -176,8 +184,7 @@ func TestBucketsCommand_Run(t *testing.T) {
 
 // Ensure the "keys" command can print a list of keys for a bucket.
 func TestKeysCommand_Run(t *testing.T) {
-	db := MustOpen(0666, nil)
-	defer db.Close()
+	db := btesting.MustCreateDB(t)
 
 	if err := db.Update(func(tx *bolt.Tx) error {
 		for _, name := range []string{"foo", "bar"} {
@@ -196,13 +203,15 @@ func TestKeysCommand_Run(t *testing.T) {
 	}); err != nil {
 		t.Fatal(err)
 	}
-	db.DB.Close()
+	db.Close()
+
+	defer requireDBNoChange(t, dbData(t, db.Path()), db.Path())
 
 	expected := "foo-0\nfoo-1\nfoo-2\n"
 
 	// Run the command.
 	m := NewMain()
-	if err := m.Run("keys", db.Path, "foo"); err != nil {
+	if err := m.Run("keys", db.Path(), "foo"); err != nil {
 		t.Fatal(err)
 	} else if actual := m.Stdout.String(); actual != expected {
 		t.Fatalf("unexpected stdout:\n\n%s", actual)
@@ -211,8 +220,7 @@ func TestKeysCommand_Run(t *testing.T) {
 
 // Ensure the "get" command can print the value of a key in a bucket.
 func TestGetCommand_Run(t *testing.T) {
-	db := MustOpen(0666, nil)
-	defer db.Close()
+	db := btesting.MustCreateDB(t)
 
 	if err := db.Update(func(tx *bolt.Tx) error {
 		for _, name := range []string{"foo", "bar"} {
@@ -232,19 +240,52 @@ func TestGetCommand_Run(t *testing.T) {
 	}); err != nil {
 		t.Fatal(err)
 	}
-	db.DB.Close()
+	db.Close()
+
+	defer requireDBNoChange(t, dbData(t, db.Path()), db.Path())
 
 	expected := "val-foo-1\n"
 
 	// Run the command.
 	m := NewMain()
-	if err := m.Run("get", db.Path, "foo", "foo-1"); err != nil {
+	if err := m.Run("get", db.Path(), "foo", "foo-1"); err != nil {
 		t.Fatal(err)
 	} else if actual := m.Stdout.String(); actual != expected {
 		t.Fatalf("unexpected stdout:\n\n%s", actual)
 	}
 }
 
+// Ensure the "pages" command neither panic, nor change the db file.
+func TestPagesCommand_Run(t *testing.T) {
+	db := btesting.MustCreateDB(t)
+
+	err := db.Update(func(tx *bolt.Tx) error {
+		for _, name := range []string{"foo", "bar"} {
+			b, err := tx.CreateBucket([]byte(name))
+			if err != nil {
+				return err
+			}
+			for i := 0; i < 3; i++ {
+				key := fmt.Sprintf("%s-%d", name, i)
+				val := fmt.Sprintf("val-%s-%d", name, i)
+				if err := b.Put([]byte(key), []byte(val)); err != nil {
+					return err
+				}
+			}
+		}
+		return nil
+	})
+	require.NoError(t, err)
+	db.Close()
+
+	defer requireDBNoChange(t, dbData(t, db.Path()), db.Path())
+
+	// Run the command.
+	m := NewMain()
+	err = m.Run("pages", db.Path())
+	require.NoError(t, err)
+}
+
 // Main represents a test wrapper for main.Main that records output.
 type Main struct {
 	*main.Main
@@ -262,32 +303,6 @@ func NewMain() *Main {
 	return m
 }
 
-// MustOpen creates a Bolt database in a temporary location.
-func MustOpen(mode os.FileMode, options *bolt.Options) *DB {
-	// Create temporary path.
-	f, _ := ioutil.TempFile("", "bolt-")
-	f.Close()
-	os.Remove(f.Name())
-
-	db, err := bolt.Open(f.Name(), mode, options)
-	if err != nil {
-		panic(err.Error())
-	}
-	return &DB{DB: db, Path: f.Name()}
-}
-
-// DB is a test wrapper for bolt.DB.
-type DB struct {
-	*bolt.DB
-	Path string
-}
-
-// Close closes and removes the database.
-func (db *DB) Close() error {
-	defer os.Remove(db.Path)
-	return db.DB.Close()
-}
-
 func TestCompactCommand_Run(t *testing.T) {
 	var s int64
 	if err := binary.Read(crypto.Reader, binary.BigEndian, &s); err != nil {
@@ -295,11 +310,11 @@ func TestCompactCommand_Run(t *testing.T) {
 	}
 	rand.Seed(s)
 
-	dstdb := MustOpen(0666, nil)
+	dstdb := btesting.MustCreateDB(t)
 	dstdb.Close()
 
 	// fill the db
-	db := MustOpen(0666, nil)
+	db := btesting.MustCreateDB(t)
 	if err := db.Update(func(tx *bolt.Tx) error {
 		n := 2 + rand.Intn(5)
 		for i := 0; i < n; i++ {
@@ -317,7 +332,6 @@ func TestCompactCommand_Run(t *testing.T) {
 		}
 		return nil
 	}); err != nil {
-		db.Close()
 		t.Fatal(err)
 	}
 
@@ -340,7 +354,6 @@ func TestCompactCommand_Run(t *testing.T) {
 		}
 		return nil
 	}); err != nil {
-		db.Close()
 		t.Fatal(err)
 	}
 	if err := db.Update(func(tx *bolt.Tx) error {
@@ -352,29 +365,26 @@ func TestCompactCommand_Run(t *testing.T) {
 		}
 		return tx.DeleteBucket([]byte("large_vals"))
 	}); err != nil {
-		db.Close()
 		t.Fatal(err)
 	}
-	db.DB.Close()
-	defer db.Close()
-	defer dstdb.Close()
+	db.Close()
 
-	dbChk, err := chkdb(db.Path)
+	dbChk, err := chkdb(db.Path())
 	if err != nil {
 		t.Fatal(err)
 	}
 
 	m := NewMain()
-	if err := m.Run("compact", "-o", dstdb.Path, db.Path); err != nil {
+	if err := m.Run("compact", "-o", dstdb.Path(), db.Path()); err != nil {
 		t.Fatal(err)
 	}
 
-	dbChkAfterCompact, err := chkdb(db.Path)
+	dbChkAfterCompact, err := chkdb(db.Path())
 	if err != nil {
 		t.Fatal(err)
 	}
 
-	dstdbChk, err := chkdb(dstdb.Path)
+	dstdbChk, err := chkdb(dstdb.Path())
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -420,7 +430,7 @@ func fillBucket(b *bolt.Bucket, prefix []byte) error {
 }
 
 func chkdb(path string) ([]byte, error) {
-	db, err := bolt.Open(path, 0666, nil)
+	db, err := bolt.Open(path, 0666, &bolt.Options{ReadOnly: true})
 	if err != nil {
 		return nil, err
 	}
@@ -453,3 +463,17 @@ func walkBucket(parent *bolt.Bucket, k []byte, v []byte, w io.Writer) error {
 		return walkBucket(parent, k, v, w)
 	})
 }
+
+func dbData(t *testing.T, filePath string) []byte {
+	data, err := os.ReadFile(filePath)
+	require.NoError(t, err)
+	return data
+}
+
+func requireDBNoChange(t *testing.T, oldData []byte, filePath string) {
+	newData, err := os.ReadFile(filePath)
+	require.NoError(t, err)
+
+	noChange := bytes.Equal(oldData, newData)
+	require.True(t, noChange)
+}
diff --git a/cmd/bbolt/page_command.go b/cmd/bbolt/page_command.go
new file mode 100644
index 0000000..6789ba5
--- /dev/null
+++ b/cmd/bbolt/page_command.go
@@ -0,0 +1,288 @@
+package main
+
+import (
+	"bytes"
+	"flag"
+	"fmt"
+	"io"
+	"os"
+	"strings"
+
+	"go.etcd.io/bbolt/internal/guts_cli"
+)
+
+// pageCommand represents the "page" command execution.
+type pageCommand struct {
+	baseCommand
+}
+
+// newPageCommand returns a pageCommand.
+func newPageCommand(m *Main) *pageCommand {
+	c := &pageCommand{}
+	c.baseCommand = m.baseCommand
+	return c
+}
+
+// Run executes the command.
+func (cmd *pageCommand) Run(args ...string) error {
+	// Parse flags.
+	fs := flag.NewFlagSet("", flag.ContinueOnError)
+	help := fs.Bool("h", false, "")
+	all := fs.Bool("all", false, "list all pages")
+	formatValue := fs.String("format-value", "auto", "One of: "+FORMAT_MODES+" . Applies to values on the leaf page.")
+
+	if err := fs.Parse(args); err != nil {
+		return err
+	} else if *help {
+		fmt.Fprintln(cmd.Stderr, cmd.Usage())
+		return ErrUsage
+	}
+
+	// Require database path and page id.
+	path := fs.Arg(0)
+	if path == "" {
+		return ErrPathRequired
+	} else if _, err := os.Stat(path); os.IsNotExist(err) {
+		return ErrFileNotFound
+	}
+
+	if !*all {
+		// Read page ids.
+		pageIDs, err := stringToPages(fs.Args()[1:])
+		if err != nil {
+			return err
+		} else if len(pageIDs) == 0 {
+			return ErrPageIDRequired
+		}
+		cmd.printPages(pageIDs, path, formatValue)
+	} else {
+		cmd.printAllPages(path, formatValue)
+	}
+	return nil
+}
+
+func (cmd *pageCommand) printPages(pageIDs []uint64, path string, formatValue *string) {
+	// Print each page listed.
+	for i, pageID := range pageIDs {
+		// Print a separator.
+		if i > 0 {
+			fmt.Fprintln(cmd.Stdout, "===============================================")
+		}
+		_, err2 := cmd.printPage(path, pageID, *formatValue)
+		if err2 != nil {
+			fmt.Fprintf(cmd.Stdout, "Prining page %d failed: %s. Continuuing...\n", pageID, err2)
+		}
+	}
+}
+
+func (cmd *pageCommand) printAllPages(path string, formatValue *string) {
+	_, hwm, err := guts_cli.ReadPageAndHWMSize(path)
+	if err != nil {
+		fmt.Fprintf(cmd.Stdout, "cannot read number of pages: %v", err)
+	}
+
+	// Print each page listed.
+	for pageID := uint64(0); pageID < uint64(hwm); {
+		// Print a separator.
+		if pageID > 0 {
+			fmt.Fprintln(cmd.Stdout, "===============================================")
+		}
+		overflow, err2 := cmd.printPage(path, pageID, *formatValue)
+		if err2 != nil {
+			fmt.Fprintf(cmd.Stdout, "Prining page %d failed: %s. Continuuing...\n", pageID, err2)
+			pageID++
+		} else {
+			pageID += uint64(overflow) + 1
+		}
+	}
+}
+
+// printPage prints given page to cmd.Stdout and returns error or number of interpreted pages.
+func (cmd *pageCommand) printPage(path string, pageID uint64, formatValue string) (numPages uint32, reterr error) {
+	defer func() {
+		if err := recover(); err != nil {
+			reterr = fmt.Errorf("%s", err)
+		}
+	}()
+
+	// Retrieve page info and page size.
+	p, buf, err := guts_cli.ReadPage(path, pageID)
+	if err != nil {
+		return 0, err
+	}
+
+	// Print basic page info.
+	fmt.Fprintf(cmd.Stdout, "Page ID:    %d\n", p.Id())
+	fmt.Fprintf(cmd.Stdout, "Page Type:  %s\n", p.Type())
+	fmt.Fprintf(cmd.Stdout, "Total Size: %d bytes\n", len(buf))
+	fmt.Fprintf(cmd.Stdout, "Overflow pages: %d\n", p.Overflow())
+
+	// Print type-specific data.
+	switch p.Type() {
+	case "meta":
+		err = cmd.PrintMeta(cmd.Stdout, buf)
+	case "leaf":
+		err = cmd.PrintLeaf(cmd.Stdout, buf, formatValue)
+	case "branch":
+		err = cmd.PrintBranch(cmd.Stdout, buf)
+	case "freelist":
+		err = cmd.PrintFreelist(cmd.Stdout, buf)
+	}
+	if err != nil {
+		return 0, err
+	}
+	return p.Overflow(), nil
+}
+
+// PrintMeta prints the data from the meta page.
+func (cmd *pageCommand) PrintMeta(w io.Writer, buf []byte) error {
+	m := guts_cli.LoadPageMeta(buf)
+	m.Print(w)
+	return nil
+}
+
+// PrintLeaf prints the data for a leaf page.
+func (cmd *pageCommand) PrintLeaf(w io.Writer, buf []byte, formatValue string) error {
+	p := guts_cli.LoadPage(buf)
+
+	// Print number of items.
+	fmt.Fprintf(w, "Item Count: %d\n", p.Count())
+	fmt.Fprintf(w, "\n")
+
+	// Print each key/value.
+	for i := uint16(0); i < p.Count(); i++ {
+		e := p.LeafPageElement(i)
+
+		// Format key as string.
+		var k string
+		if isPrintable(string(e.Key())) {
+			k = fmt.Sprintf("%q", string(e.Key()))
+		} else {
+			k = fmt.Sprintf("%x", string(e.Key()))
+		}
+
+		// Format value as string.
+		var v string
+		if e.IsBucketEntry() {
+			b := e.Bucket()
+			v = b.String()
+		} else {
+			var err error
+			v, err = formatBytes(e.Value(), formatValue)
+			if err != nil {
+				return err
+			}
+		}
+
+		fmt.Fprintf(w, "%s: %s\n", k, v)
+	}
+	fmt.Fprintf(w, "\n")
+	return nil
+}
+
+// PrintBranch prints the data for a leaf page.
+func (cmd *pageCommand) PrintBranch(w io.Writer, buf []byte) error {
+	p := guts_cli.LoadPage(buf)
+
+	// Print number of items.
+	fmt.Fprintf(w, "Item Count: %d\n", p.Count())
+	fmt.Fprintf(w, "\n")
+
+	// Print each key/value.
+	for i := uint16(0); i < p.Count(); i++ {
+		e := p.BranchPageElement(i)
+
+		// Format key as string.
+		var k string
+		if isPrintable(string(e.Key())) {
+			k = fmt.Sprintf("%q", string(e.Key()))
+		} else {
+			k = fmt.Sprintf("%x", string(e.Key()))
+		}
+
+		fmt.Fprintf(w, "%s: <pgid=%d>\n", k, e.PgId())
+	}
+	fmt.Fprintf(w, "\n")
+	return nil
+}
+
+// PrintFreelist prints the data for a freelist page.
+func (cmd *pageCommand) PrintFreelist(w io.Writer, buf []byte) error {
+	p := guts_cli.LoadPage(buf)
+
+	// Print number of items.
+	fmt.Fprintf(w, "Item Count: %d\n", p.FreelistPageCount())
+	fmt.Fprintf(w, "Overflow: %d\n", p.Overflow())
+
+	fmt.Fprintf(w, "\n")
+
+	// Print each page in the freelist.
+	ids := p.FreelistPagePages()
+	for _, ids := range ids {
+		fmt.Fprintf(w, "%d\n", ids)
+	}
+	fmt.Fprintf(w, "\n")
+	return nil
+}
+
+// PrintPage prints a given page as hexadecimal.
+func (cmd *pageCommand) PrintPage(w io.Writer, r io.ReaderAt, pageID int, pageSize int) error {
+	const bytesPerLineN = 16
+
+	// Read page into buffer.
+	buf := make([]byte, pageSize)
+	addr := pageID * pageSize
+	if n, err := r.ReadAt(buf, int64(addr)); err != nil {
+		return err
+	} else if n != pageSize {
+		return io.ErrUnexpectedEOF
+	}
+
+	// Write out to writer in 16-byte lines.
+	var prev []byte
+	var skipped bool
+	for offset := 0; offset < pageSize; offset += bytesPerLineN {
+		// Retrieve current 16-byte line.
+		line := buf[offset : offset+bytesPerLineN]
+		isLastLine := (offset == (pageSize - bytesPerLineN))
+
+		// If it's the same as the previous line then print a skip.
+		if bytes.Equal(line, prev) && !isLastLine {
+			if !skipped {
+				fmt.Fprintf(w, "%07x *\n", addr+offset)
+				skipped = true
+			}
+		} else {
+			// Print line as hexadecimal in 2-byte groups.
+			fmt.Fprintf(w, "%07x %04x %04x %04x %04x %04x %04x %04x %04x\n", addr+offset,
+				line[0:2], line[2:4], line[4:6], line[6:8],
+				line[8:10], line[10:12], line[12:14], line[14:16],
+			)
+
+			skipped = false
+		}
+
+		// Save the previous line.
+		prev = line
+	}
+	fmt.Fprint(w, "\n")
+
+	return nil
+}
+
+// Usage returns the help message.
+func (cmd *pageCommand) Usage() string {
+	return strings.TrimLeft(`
+usage: bolt page PATH pageid [pageid...]
+   or: bolt page --all PATH
+
+Additional options include:
+
+	--all
+		prints all pages (only skips pages that were considered successful overflow pages) 
+	--format-value=`+FORMAT_MODES+` (default: auto)
+		prints values (on the leaf page) using the given format.
+
+Page prints one or more pages in human readable format.
+`, "\n")
+}
diff --git a/cmd/bbolt/surgery_commands.go b/cmd/bbolt/surgery_commands.go
new file mode 100644
index 0000000..5553c8f
--- /dev/null
+++ b/cmd/bbolt/surgery_commands.go
@@ -0,0 +1,300 @@
+package main
+
+import (
+	"errors"
+	"flag"
+	"fmt"
+	"io"
+	"os"
+	"strconv"
+	"strings"
+
+	"go.etcd.io/bbolt/internal/guts_cli"
+	"go.etcd.io/bbolt/internal/surgeon"
+)
+
+// surgeryCommand represents the "surgery" command execution.
+type surgeryCommand struct {
+	baseCommand
+
+	srcPath string
+	dstPath string
+}
+
+// newSurgeryCommand returns a SurgeryCommand.
+func newSurgeryCommand(m *Main) *surgeryCommand {
+	c := &surgeryCommand{}
+	c.baseCommand = m.baseCommand
+	return c
+}
+
+// Run executes the `surgery` program.
+func (cmd *surgeryCommand) Run(args ...string) error {
+	// Require a command at the beginning.
+	if len(args) == 0 || strings.HasPrefix(args[0], "-") {
+		fmt.Fprintln(cmd.Stderr, cmd.Usage())
+		return ErrUsage
+	}
+
+	// Execute command.
+	switch args[0] {
+	case "help":
+		fmt.Fprintln(cmd.Stderr, cmd.Usage())
+		return ErrUsage
+	case "revert-meta-page":
+		return newRevertMetaPageCommand(cmd).Run(args[1:]...)
+	case "copy-page":
+		return newCopyPageCommand(cmd).Run(args[1:]...)
+	case "clear-page":
+		return newClearPageCommand(cmd).Run(args[1:]...)
+	default:
+		return ErrUnknownCommand
+	}
+}
+
+func (cmd *surgeryCommand) parsePathsAndCopyFile(fs *flag.FlagSet) error {
+	// Require database paths.
+	cmd.srcPath = fs.Arg(0)
+	if cmd.srcPath == "" {
+		return ErrPathRequired
+	}
+
+	cmd.dstPath = fs.Arg(1)
+	if cmd.dstPath == "" {
+		return errors.New("output file required")
+	}
+
+	// Ensure source file exists.
+	_, err := os.Stat(cmd.srcPath)
+	if os.IsNotExist(err) {
+		return ErrFileNotFound
+	} else if err != nil {
+		return err
+	}
+
+	// Ensure output file not exist.
+	_, err = os.Stat(cmd.dstPath)
+	if err == nil {
+		return fmt.Errorf("output file %q already exists", cmd.dstPath)
+	} else if !os.IsNotExist(err) {
+		return err
+	}
+
+	// Copy database from SrcPath to DstPath
+	if err := copyFile(cmd.srcPath, cmd.dstPath); err != nil {
+		return fmt.Errorf("failed to copy file: %w", err)
+	}
+
+	return nil
+}
+
+func copyFile(srcPath, dstPath string) error {
+	srcDB, err := os.Open(srcPath)
+	if err != nil {
+		return fmt.Errorf("failed to open source file %q: %w", srcPath, err)
+	}
+	defer srcDB.Close()
+	dstDB, err := os.Create(dstPath)
+	if err != nil {
+		return fmt.Errorf("failed to create output file %q: %w", dstPath, err)
+	}
+	defer dstDB.Close()
+	written, err := io.Copy(dstDB, srcDB)
+	if err != nil {
+		return fmt.Errorf("failed to copy database file from %q to %q: %w", srcPath, dstPath, err)
+	}
+
+	srcFi, err := srcDB.Stat()
+	if err != nil {
+		return fmt.Errorf("failed to get source file info %q: %w", srcPath, err)
+	}
+	initialSize := srcFi.Size()
+	if initialSize != written {
+		return fmt.Errorf("the byte copied (%q: %d) isn't equal to the initial db size (%q: %d)", dstPath, written, srcPath, initialSize)
+	}
+
+	return nil
+}
+
+// Usage returns the help message.
+func (cmd *surgeryCommand) Usage() string {
+	return strings.TrimLeft(`
+Surgery is a command for performing low level update on bbolt databases.
+
+Usage:
+
+	bbolt surgery command [arguments]
+
+The commands are:
+    copy-page              copy page from source pageid to target pageid	
+    help                   print this screen
+    revert-meta-page       revert the meta page change made by the last transaction
+
+Use "bbolt surgery [command] -h" for more information about a command.
+`, "\n")
+}
+
+// revertMetaPageCommand represents the "surgery revert-meta-page" command execution.
+type revertMetaPageCommand struct {
+	*surgeryCommand
+}
+
+// newRevertMetaPageCommand returns a revertMetaPageCommand.
+func newRevertMetaPageCommand(m *surgeryCommand) *revertMetaPageCommand {
+	c := &revertMetaPageCommand{}
+	c.surgeryCommand = m
+	return c
+}
+
+// Run executes the command.
+func (cmd *revertMetaPageCommand) Run(args ...string) error {
+	// Parse flags.
+	fs := flag.NewFlagSet("", flag.ContinueOnError)
+	help := fs.Bool("h", false, "")
+	if err := fs.Parse(args); err != nil {
+		return err
+	} else if *help {
+		fmt.Fprintln(cmd.Stderr, cmd.Usage())
+		return ErrUsage
+	}
+
+	if err := cmd.parsePathsAndCopyFile(fs); err != nil {
+		return fmt.Errorf("revertMetaPageCommand failed to parse paths and copy file: %w", err)
+	}
+
+	// revert the meta page
+	if err := surgeon.RevertMetaPage(cmd.dstPath); err != nil {
+		return fmt.Errorf("revertMetaPageCommand failed: %w", err)
+	}
+
+	fmt.Fprintln(cmd.Stdout, "The meta page is reverted.")
+	return nil
+}
+
+// Usage returns the help message.
+func (cmd *revertMetaPageCommand) Usage() string {
+	return strings.TrimLeft(`
+usage: bolt surgery revert-meta-page SRC DST
+
+RevertMetaPage copies the database file at SRC to a newly created database
+file at DST. Afterwards, it reverts the meta page on the newly created
+database at DST.
+
+The original database is left untouched.
+`, "\n")
+}
+
+// copyPageCommand represents the "surgery copy-page" command execution.
+type copyPageCommand struct {
+	*surgeryCommand
+}
+
+// newCopyPageCommand returns a copyPageCommand.
+func newCopyPageCommand(m *surgeryCommand) *copyPageCommand {
+	c := &copyPageCommand{}
+	c.surgeryCommand = m
+	return c
+}
+
+// Run executes the command.
+func (cmd *copyPageCommand) Run(args ...string) error {
+	// Parse flags.
+	fs := flag.NewFlagSet("", flag.ContinueOnError)
+	help := fs.Bool("h", false, "")
+	if err := fs.Parse(args); err != nil {
+		return err
+	} else if *help {
+		fmt.Fprintln(cmd.Stderr, cmd.Usage())
+		return ErrUsage
+	}
+
+	if err := cmd.parsePathsAndCopyFile(fs); err != nil {
+		return fmt.Errorf("copyPageCommand failed to parse paths and copy file: %w", err)
+	}
+
+	// Read page id.
+	srcPageId, err := strconv.ParseUint(fs.Arg(2), 10, 64)
+	if err != nil {
+		return err
+	}
+	dstPageId, err := strconv.ParseUint(fs.Arg(3), 10, 64)
+	if err != nil {
+		return err
+	}
+
+	// copy the page
+	if err := surgeon.CopyPage(cmd.dstPath, guts_cli.Pgid(srcPageId), guts_cli.Pgid(dstPageId)); err != nil {
+		return fmt.Errorf("copyPageCommand failed: %w", err)
+	}
+
+	fmt.Fprintf(cmd.Stdout, "The page %d was copied to page %d\n", srcPageId, dstPageId)
+	return nil
+}
+
+// Usage returns the help message.
+func (cmd *copyPageCommand) Usage() string {
+	return strings.TrimLeft(`
+usage: bolt surgery copy-page SRC DST srcPageId dstPageid
+
+CopyPage copies the database file at SRC to a newly created database
+file at DST. Afterwards, it copies the page at srcPageId to the page
+at dstPageId in DST.
+
+The original database is left untouched.
+`, "\n")
+}
+
+// clearPageCommand represents the "surgery clear-page" command execution.
+type clearPageCommand struct {
+	*surgeryCommand
+}
+
+// newClearPageCommand returns a clearPageCommand.
+func newClearPageCommand(m *surgeryCommand) *clearPageCommand {
+	c := &clearPageCommand{}
+	c.surgeryCommand = m
+	return c
+}
+
+// Run executes the command.
+func (cmd *clearPageCommand) Run(args ...string) error {
+	// Parse flags.
+	fs := flag.NewFlagSet("", flag.ContinueOnError)
+	help := fs.Bool("h", false, "")
+	if err := fs.Parse(args); err != nil {
+		return err
+	} else if *help {
+		fmt.Fprintln(cmd.Stderr, cmd.Usage())
+		return ErrUsage
+	}
+
+	if err := cmd.parsePathsAndCopyFile(fs); err != nil {
+		return fmt.Errorf("clearPageCommand failed to parse paths and copy file: %w", err)
+	}
+
+	// Read page id.
+	pageId, err := strconv.ParseUint(fs.Arg(2), 10, 64)
+	if err != nil {
+		return err
+	}
+
+	if err := surgeon.ClearPage(cmd.dstPath, guts_cli.Pgid(pageId)); err != nil {
+		return fmt.Errorf("clearPageCommand failed: %w", err)
+	}
+
+	fmt.Fprintf(cmd.Stdout, "Page (%d) was cleared\n", pageId)
+	return nil
+}
+
+// Usage returns the help message.
+func (cmd *clearPageCommand) Usage() string {
+	return strings.TrimLeft(`
+usage: bolt surgery clear-page SRC DST pageId
+
+ClearPage copies the database file at SRC to a newly created database
+file at DST. Afterwards, it clears all elements in the page at pageId
+in DST.
+
+The original database is left untouched.
+`, "\n")
+}
diff --git a/cmd/bbolt/surgery_commands_test.go b/cmd/bbolt/surgery_commands_test.go
new file mode 100644
index 0000000..9978368
--- /dev/null
+++ b/cmd/bbolt/surgery_commands_test.go
@@ -0,0 +1,142 @@
+package main_test
+
+import (
+	"fmt"
+	"os"
+	"path/filepath"
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+
+	bolt "go.etcd.io/bbolt"
+	"go.etcd.io/bbolt/internal/btesting"
+	"go.etcd.io/bbolt/internal/guts_cli"
+)
+
+func TestSurgery_RevertMetaPage(t *testing.T) {
+	pageSize := 4096
+	db := btesting.MustCreateDBWithOption(t, &bolt.Options{PageSize: pageSize})
+	srcPath := db.Path()
+
+	defer requireDBNoChange(t, dbData(t, db.Path()), db.Path())
+
+	srcFile, err := os.Open(srcPath)
+	require.NoError(t, err)
+	defer srcFile.Close()
+
+	// Read both meta0 and meta1 from srcFile
+	srcBuf0 := readPage(t, srcPath, 0, pageSize)
+	srcBuf1 := readPage(t, srcPath, 1, pageSize)
+	meta0Page := guts_cli.LoadPageMeta(srcBuf0)
+	meta1Page := guts_cli.LoadPageMeta(srcBuf1)
+
+	// Get the non-active meta page
+	nonActiveSrcBuf := srcBuf0
+	nonActiveMetaPageId := 0
+	if meta0Page.Txid() > meta1Page.Txid() {
+		nonActiveSrcBuf = srcBuf1
+		nonActiveMetaPageId = 1
+	}
+	t.Logf("non active meta page id: %d", nonActiveMetaPageId)
+
+	// revert the meta page
+	dstPath := filepath.Join(t.TempDir(), "dstdb")
+	m := NewMain()
+	err = m.Run("surgery", "revert-meta-page", srcPath, dstPath)
+	require.NoError(t, err)
+
+	// read both meta0 and meta1 from dst file
+	dstBuf0 := readPage(t, dstPath, 0, pageSize)
+	dstBuf1 := readPage(t, dstPath, 1, pageSize)
+
+	// check result. Note we should skip the page ID
+	assert.Equal(t, pageDataWithoutPageId(nonActiveSrcBuf), pageDataWithoutPageId(dstBuf0))
+	assert.Equal(t, pageDataWithoutPageId(nonActiveSrcBuf), pageDataWithoutPageId(dstBuf1))
+}
+
+func TestSurgery_CopyPage(t *testing.T) {
+	pageSize := 4096
+	db := btesting.MustCreateDBWithOption(t, &bolt.Options{PageSize: pageSize})
+	srcPath := db.Path()
+
+	// Insert some sample data
+	t.Log("Insert some sample data")
+	err := db.Fill([]byte("data"), 1, 20,
+		func(tx int, k int) []byte { return []byte(fmt.Sprintf("%04d", k)) },
+		func(tx int, k int) []byte { return make([]byte, 10) },
+	)
+	require.NoError(t, err)
+
+	defer requireDBNoChange(t, dbData(t, srcPath), srcPath)
+
+	// copy page 3 to page 2
+	t.Log("copy page 3 to page 2")
+	dstPath := filepath.Join(t.TempDir(), "dstdb")
+	m := NewMain()
+	err = m.Run("surgery", "copy-page", srcPath, dstPath, "3", "2")
+	require.NoError(t, err)
+
+	// The page 2 should have exactly the same data as page 3.
+	t.Log("Verify result")
+	srcPageId3Data := readPage(t, srcPath, 3, pageSize)
+	dstPageId3Data := readPage(t, dstPath, 3, pageSize)
+	dstPageId2Data := readPage(t, dstPath, 2, pageSize)
+
+	assert.Equal(t, srcPageId3Data, dstPageId3Data)
+	assert.Equal(t, pageDataWithoutPageId(srcPageId3Data), pageDataWithoutPageId(dstPageId2Data))
+}
+
+// TODO(ahrtr): add test case below for `surgery clear-page` command:
+//  1. The page is a branch page. All its children should become free pages.
+func TestSurgery_ClearPage(t *testing.T) {
+	pageSize := 4096
+	db := btesting.MustCreateDBWithOption(t, &bolt.Options{PageSize: pageSize})
+	srcPath := db.Path()
+
+	// Insert some sample data
+	t.Log("Insert some sample data")
+	err := db.Fill([]byte("data"), 1, 20,
+		func(tx int, k int) []byte { return []byte(fmt.Sprintf("%04d", k)) },
+		func(tx int, k int) []byte { return make([]byte, 10) },
+	)
+	require.NoError(t, err)
+
+	defer requireDBNoChange(t, dbData(t, srcPath), srcPath)
+
+	// clear page 3
+	t.Log("clear page 3")
+	dstPath := filepath.Join(t.TempDir(), "dstdb")
+	m := NewMain()
+	err = m.Run("surgery", "clear-page", srcPath, dstPath, "3")
+	require.NoError(t, err)
+
+	// The page 2 should have exactly the same data as page 3.
+	t.Log("Verify result")
+	dstPageId3Data := readPage(t, dstPath, 3, pageSize)
+
+	p := guts_cli.LoadPage(dstPageId3Data)
+	assert.Equal(t, uint16(0), p.Count())
+	assert.Equal(t, uint32(0), p.Overflow())
+}
+
+func readPage(t *testing.T, path string, pageId int, pageSize int) []byte {
+	dbFile, err := os.Open(path)
+	require.NoError(t, err)
+	defer dbFile.Close()
+
+	fi, err := dbFile.Stat()
+	require.NoError(t, err)
+	require.GreaterOrEqual(t, fi.Size(), int64((pageId+1)*pageSize))
+
+	buf := make([]byte, pageSize)
+	byteRead, err := dbFile.ReadAt(buf, int64(pageId*pageSize))
+	require.NoError(t, err)
+	require.Equal(t, pageSize, byteRead)
+
+	return buf
+}
+
+func pageDataWithoutPageId(buf []byte) []byte {
+	return buf[8:]
+}
diff --git a/compact.go b/compact.go
index e4fe91b..5f1d4c3 100644
--- a/compact.go
+++ b/compact.go
@@ -12,7 +12,11 @@ func Compact(dst, src *DB, txMaxSize int64) error {
 	if err != nil {
 		return err
 	}
-	defer tx.Rollback()
+	defer func() {
+		if tempErr := tx.Rollback(); tempErr != nil {
+			err = tempErr
+		}
+	}()
 
 	if err := walk(src, func(keys [][]byte, k, v []byte, seq uint64) error {
 		// On each key/value, check if we have exceeded tx size.
@@ -73,8 +77,9 @@ func Compact(dst, src *DB, txMaxSize int64) error {
 	}); err != nil {
 		return err
 	}
+	err = tx.Commit()
 
-	return tx.Commit()
+	return err
 }
 
 // walkFunc is the type of the function called for keys (buckets and "normal"
diff --git a/cursor.go b/cursor.go
index 98aeb44..5dafb0c 100644
--- a/cursor.go
+++ b/cursor.go
@@ -6,7 +6,8 @@ import (
 	"sort"
 )
 
-// Cursor represents an iterator that can traverse over all key/value pairs in a bucket in sorted order.
+// Cursor represents an iterator that can traverse over all key/value pairs in a bucket
+// in lexicographical order.
 // Cursors see nested buckets with value == nil.
 // Cursors can be obtained from a transaction and are valid as long as the transaction is open.
 //
@@ -30,10 +31,18 @@ func (c *Cursor) Bucket() *Bucket {
 // The returned key and value are only valid for the life of the transaction.
 func (c *Cursor) First() (key []byte, value []byte) {
 	_assert(c.bucket.tx.db != nil, "tx closed")
+	k, v, flags := c.first()
+	if (flags & uint32(bucketLeafFlag)) != 0 {
+		return k, nil
+	}
+	return k, v
+}
+
+func (c *Cursor) first() (key []byte, value []byte, flags uint32) {
 	c.stack = c.stack[:0]
 	p, n := c.bucket.pageNode(c.bucket.root)
 	c.stack = append(c.stack, elemRef{page: p, node: n, index: 0})
-	c.first()
+	c.goToFirstElementOnTheStack()
 
 	// If we land on an empty page then move to the next value.
 	// https://github.com/boltdb/bolt/issues/450
@@ -43,10 +52,9 @@ func (c *Cursor) First() (key []byte, value []byte) {
 
 	k, v, flags := c.keyValue()
 	if (flags & uint32(bucketLeafFlag)) != 0 {
-		return k, nil
+		return k, nil, flags
 	}
-	return k, v
-
+	return k, v, flags
 }
 
 // Last moves the cursor to the last item in the bucket and returns its key and value.
@@ -60,6 +68,17 @@ func (c *Cursor) Last() (key []byte, value []byte) {
 	ref.index = ref.count() - 1
 	c.stack = append(c.stack, ref)
 	c.last()
+
+	// If this is an empty page (calling Delete may result in empty pages)
+	// we call prev to find the last page that is not empty
+	for len(c.stack) > 0 && c.stack[len(c.stack)-1].count() == 0 {
+		c.prev()
+	}
+
+	if len(c.stack) == 0 {
+		return nil, nil
+	}
+
 	k, v, flags := c.keyValue()
 	if (flags & uint32(bucketLeafFlag)) != 0 {
 		return k, nil
@@ -84,37 +103,20 @@ func (c *Cursor) Next() (key []byte, value []byte) {
 // The returned key and value are only valid for the life of the transaction.
 func (c *Cursor) Prev() (key []byte, value []byte) {
 	_assert(c.bucket.tx.db != nil, "tx closed")
-
-	// Attempt to move back one element until we're successful.
-	// Move up the stack as we hit the beginning of each page in our stack.
-	for i := len(c.stack) - 1; i >= 0; i-- {
-		elem := &c.stack[i]
-		if elem.index > 0 {
-			elem.index--
-			break
-		}
-		c.stack = c.stack[:i]
-	}
-
-	// If we've hit the end then return nil.
-	if len(c.stack) == 0 {
-		return nil, nil
-	}
-
-	// Move down the stack to find the last element of the last leaf under this branch.
-	c.last()
-	k, v, flags := c.keyValue()
+	k, v, flags := c.prev()
 	if (flags & uint32(bucketLeafFlag)) != 0 {
 		return k, nil
 	}
 	return k, v
 }
 
-// Seek moves the cursor to a given key and returns it.
+// Seek moves the cursor to a given key using a b-tree search and returns it.
 // If the key does not exist then the next key is used. If no keys
 // follow, a nil key is returned.
 // The returned key and value are only valid for the life of the transaction.
 func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) {
+	_assert(c.bucket.tx.db != nil, "tx closed")
+
 	k, v, flags := c.seek(seek)
 
 	// If we ended up after the last element of a page then move to the next one.
@@ -152,8 +154,6 @@ func (c *Cursor) Delete() error {
 // seek moves the cursor to a given key and returns it.
 // If the key does not exist then the next key is used.
 func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) {
-	_assert(c.bucket.tx.db != nil, "tx closed")
-
 	// Start from root page/node and traverse to correct page.
 	c.stack = c.stack[:0]
 	c.search(seek, c.bucket.root)
@@ -163,7 +163,7 @@ func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) {
 }
 
 // first moves the cursor to the first leaf element under the last page in the stack.
-func (c *Cursor) first() {
+func (c *Cursor) goToFirstElementOnTheStack() {
 	for {
 		// Exit when we hit a leaf page.
 		var ref = &c.stack[len(c.stack)-1]
@@ -172,13 +172,13 @@ func (c *Cursor) first() {
 		}
 
 		// Keep adding pages pointing to the first element to the stack.
-		var pgid pgid
+		var pgId pgid
 		if ref.node != nil {
-			pgid = ref.node.inodes[ref.index].pgid
+			pgId = ref.node.inodes[ref.index].pgid
 		} else {
-			pgid = ref.page.branchPageElement(uint16(ref.index)).pgid
+			pgId = ref.page.branchPageElement(uint16(ref.index)).pgid
 		}
-		p, n := c.bucket.pageNode(pgid)
+		p, n := c.bucket.pageNode(pgId)
 		c.stack = append(c.stack, elemRef{page: p, node: n, index: 0})
 	}
 }
@@ -193,13 +193,13 @@ func (c *Cursor) last() {
 		}
 
 		// Keep adding pages pointing to the last element in the stack.
-		var pgid pgid
+		var pgId pgid
 		if ref.node != nil {
-			pgid = ref.node.inodes[ref.index].pgid
+			pgId = ref.node.inodes[ref.index].pgid
 		} else {
-			pgid = ref.page.branchPageElement(uint16(ref.index)).pgid
+			pgId = ref.page.branchPageElement(uint16(ref.index)).pgid
 		}
-		p, n := c.bucket.pageNode(pgid)
+		p, n := c.bucket.pageNode(pgId)
 
 		var nextRef = elemRef{page: p, node: n}
 		nextRef.index = nextRef.count() - 1
@@ -231,7 +231,7 @@ func (c *Cursor) next() (key []byte, value []byte, flags uint32) {
 		// Otherwise start from where we left off in the stack and find the
 		// first element of the first leaf page.
 		c.stack = c.stack[:i+1]
-		c.first()
+		c.goToFirstElementOnTheStack()
 
 		// If this is an empty page then restart and move back up the stack.
 		// https://github.com/boltdb/bolt/issues/450
@@ -243,9 +243,33 @@ func (c *Cursor) next() (key []byte, value []byte, flags uint32) {
 	}
 }
 
+// prev moves the cursor to the previous item in the bucket and returns its key and value.
+// If the cursor is at the beginning of the bucket then a nil key and value are returned.
+func (c *Cursor) prev() (key []byte, value []byte, flags uint32) {
+	// Attempt to move back one element until we're successful.
+	// Move up the stack as we hit the beginning of each page in our stack.
+	for i := len(c.stack) - 1; i >= 0; i-- {
+		elem := &c.stack[i]
+		if elem.index > 0 {
+			elem.index--
+			break
+		}
+		c.stack = c.stack[:i]
+	}
+
+	// If we've hit the end then return nil.
+	if len(c.stack) == 0 {
+		return nil, nil, 0
+	}
+
+	// Move down the stack to find the last element of the last leaf under this branch.
+	c.last()
+	return c.keyValue()
+}
+
 // search recursively performs a binary search against a given page/node until it finds a given key.
-func (c *Cursor) search(key []byte, pgid pgid) {
-	p, n := c.bucket.pageNode(pgid)
+func (c *Cursor) search(key []byte, pgId pgid) {
+	p, n := c.bucket.pageNode(pgId)
 	if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 {
 		panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags))
 	}
diff --git a/cursor_test.go b/cursor_test.go
index d2a8bc7..8e112c1 100644
--- a/cursor_test.go
+++ b/cursor_test.go
@@ -12,12 +12,12 @@ import (
 	"testing/quick"
 
 	bolt "go.etcd.io/bbolt"
+	"go.etcd.io/bbolt/internal/btesting"
 )
 
 // Ensure that a cursor can return a reference to the bucket that created it.
 func TestCursor_Bucket(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 	if err := db.Update(func(tx *bolt.Tx) error {
 		b, err := tx.CreateBucket([]byte("widgets"))
 		if err != nil {
@@ -34,8 +34,7 @@ func TestCursor_Bucket(t *testing.T) {
 
 // Ensure that a Tx cursor can seek to the appropriate keys.
 func TestCursor_Seek(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 	if err := db.Update(func(tx *bolt.Tx) error {
 		b, err := tx.CreateBucket([]byte("widgets"))
 		if err != nil {
@@ -104,8 +103,7 @@ func TestCursor_Seek(t *testing.T) {
 }
 
 func TestCursor_Delete(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	const count = 1000
 
@@ -167,8 +165,7 @@ func TestCursor_Delete(t *testing.T) {
 //
 // Related: https://github.com/boltdb/bolt/pull/187
 func TestCursor_Seek_Large(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	var count = 10000
 
@@ -231,8 +228,7 @@ func TestCursor_Seek_Large(t *testing.T) {
 
 // Ensure that a cursor can iterate over an empty bucket without error.
 func TestCursor_EmptyBucket(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 	if err := db.Update(func(tx *bolt.Tx) error {
 		_, err := tx.CreateBucket([]byte("widgets"))
 		return err
@@ -256,8 +252,7 @@ func TestCursor_EmptyBucket(t *testing.T) {
 
 // Ensure that a Tx cursor can reverse iterate over an empty bucket without error.
 func TestCursor_EmptyBucketReverse(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	if err := db.Update(func(tx *bolt.Tx) error {
 		_, err := tx.CreateBucket([]byte("widgets"))
@@ -281,8 +276,7 @@ func TestCursor_EmptyBucketReverse(t *testing.T) {
 
 // Ensure that a Tx cursor can iterate over a single root with a couple elements.
 func TestCursor_Iterate_Leaf(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	if err := db.Update(func(tx *bolt.Tx) error {
 		b, err := tx.CreateBucket([]byte("widgets"))
@@ -352,8 +346,7 @@ func TestCursor_Iterate_Leaf(t *testing.T) {
 
 // Ensure that a Tx cursor can iterate in reverse over a single root with a couple elements.
 func TestCursor_LeafRootReverse(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	if err := db.Update(func(tx *bolt.Tx) error {
 		b, err := tx.CreateBucket([]byte("widgets"))
@@ -416,8 +409,7 @@ func TestCursor_LeafRootReverse(t *testing.T) {
 
 // Ensure that a Tx cursor can restart from the beginning.
 func TestCursor_Restart(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	if err := db.Update(func(tx *bolt.Tx) error {
 		b, err := tx.CreateBucket([]byte("widgets"))
@@ -462,8 +454,7 @@ func TestCursor_Restart(t *testing.T) {
 
 // Ensure that a cursor can skip over empty pages that have been deleted.
 func TestCursor_First_EmptyPages(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	// Create 1000 keys in the "widgets" bucket.
 	if err := db.Update(func(tx *bolt.Tx) error {
@@ -507,10 +498,56 @@ func TestCursor_First_EmptyPages(t *testing.T) {
 	}
 }
 
+// Ensure that a cursor can skip over empty pages that have been deleted.
+func TestCursor_Last_EmptyPages(t *testing.T) {
+	db := btesting.MustCreateDB(t)
+
+	// Create 1000 keys in the "widgets" bucket.
+	if err := db.Update(func(tx *bolt.Tx) error {
+		b, err := tx.CreateBucket([]byte("widgets"))
+		if err != nil {
+			t.Fatal(err)
+		}
+
+		for i := 0; i < 1000; i++ {
+			if err := b.Put(u64tob(uint64(i)), []byte{}); err != nil {
+				t.Fatal(err)
+			}
+		}
+
+		return nil
+	}); err != nil {
+		t.Fatal(err)
+	}
+
+	// Delete last 800 elements to ensure last page is empty
+	if err := db.Update(func(tx *bolt.Tx) error {
+		b := tx.Bucket([]byte("widgets"))
+		for i := 200; i < 1000; i++ {
+			if err := b.Delete(u64tob(uint64(i))); err != nil {
+				t.Fatal(err)
+			}
+		}
+
+		c := b.Cursor()
+		var n int
+		for k, _ := c.Last(); k != nil; k, _ = c.Prev() {
+			n++
+		}
+		if n != 200 {
+			t.Fatalf("unexpected key count: %d", n)
+		}
+
+		return nil
+	}); err != nil {
+		t.Fatal(err)
+	}
+}
+
 // Ensure that a Tx can iterate over all elements in a bucket.
 func TestCursor_QuickCheck(t *testing.T) {
 	f := func(items testdata) bool {
-		db := MustOpenDB()
+		db := btesting.MustCreateDB(t)
 		defer db.MustClose()
 
 		// Bulk insert all values.
@@ -568,7 +605,7 @@ func TestCursor_QuickCheck(t *testing.T) {
 // Ensure that a transaction can iterate over all elements in a bucket in reverse.
 func TestCursor_QuickCheck_Reverse(t *testing.T) {
 	f := func(items testdata) bool {
-		db := MustOpenDB()
+		db := btesting.MustCreateDB(t)
 		defer db.MustClose()
 
 		// Bulk insert all values.
@@ -624,8 +661,7 @@ func TestCursor_QuickCheck_Reverse(t *testing.T) {
 
 // Ensure that a Tx cursor can iterate over subbuckets.
 func TestCursor_QuickCheck_BucketsOnly(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	if err := db.Update(func(tx *bolt.Tx) error {
 		b, err := tx.CreateBucket([]byte("widgets"))
@@ -666,8 +702,7 @@ func TestCursor_QuickCheck_BucketsOnly(t *testing.T) {
 
 // Ensure that a Tx cursor can reverse iterate over subbuckets.
 func TestCursor_QuickCheck_BucketsOnly_Reverse(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	if err := db.Update(func(tx *bolt.Tx) error {
 		b, err := tx.CreateBucket([]byte("widgets"))
diff --git a/db.go b/db.go
index a798c39..c942212 100644
--- a/db.go
+++ b/db.go
@@ -4,7 +4,7 @@ import (
 	"errors"
 	"fmt"
 	"hash/fnv"
-	"log"
+	"io"
 	"os"
 	"runtime"
 	"sort"
@@ -81,7 +81,7 @@ type DB struct {
 	NoFreelistSync bool
 
 	// FreelistType sets the backend freelist type. There are two options. Array which is simple but endures
-	// dramatic performance degradation if database is large and framentation in freelist is common.
+	// dramatic performance degradation if database is large and fragmentation in freelist is common.
 	// The alternative one is using hashmap, it is faster in almost all circumstances
 	// but it doesn't guarantee that it offers the smallest page id available. In normal case it is safe.
 	// The default type is array
@@ -95,6 +95,11 @@ type DB struct {
 	// https://github.com/boltdb/bolt/issues/284
 	NoGrowSync bool
 
+	// When `true`, bbolt will always load the free pages when opening the DB.
+	// When opening db in write mode, this flag will always automatically
+	// set to `true`.
+	PreLoadFreelist bool
+
 	// If you want to read the entire database fast, you can set MmapFlag to
 	// syscall.MAP_POPULATE on Linux 2.6.23+ for sequential read-ahead.
 	MmapFlags int
@@ -129,6 +134,9 @@ type DB struct {
 	path     string
 	openFile func(string, int, os.FileMode) (*os.File, error)
 	file     *os.File
+	// `dataref` isn't used at all on Windows, and the golangci-lint
+	// always fails on Windows platform.
+	//nolint
 	dataref  []byte // mmap'ed readonly, write throws SEGV
 	data     *[maxMapSize]byte
 	datasz   int
@@ -193,6 +201,7 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
 	db.NoGrowSync = options.NoGrowSync
 	db.MmapFlags = options.MmapFlags
 	db.NoFreelistSync = options.NoFreelistSync
+	db.PreLoadFreelist = options.PreLoadFreelist
 	db.FreelistType = options.FreelistType
 	db.Mlock = options.Mlock
 
@@ -205,6 +214,9 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
 	if options.ReadOnly {
 		flag = os.O_RDONLY
 		db.readOnly = true
+	} else {
+		// always load free pages in write mode
+		db.PreLoadFreelist = true
 	}
 
 	db.openFile = options.OpenFile
@@ -252,21 +264,9 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
 			return nil, err
 		}
 	} else {
-		// Read the first meta page to determine the page size.
-		var buf [0x1000]byte
-		// If we can't read the page size, but can read a page, assume
-		// it's the same as the OS or one given -- since that's how the
-		// page size was chosen in the first place.
-		//
-		// If the first page is invalid and this OS uses a different
-		// page size than what the database was created with then we
-		// are out of luck and cannot access the database.
-		//
-		// TODO: scan for next page
-		if bw, err := db.file.ReadAt(buf[:], 0); err == nil && bw == len(buf) {
-			if m := db.pageInBuffer(buf[:], 0).meta(); m.validate() == nil {
-				db.pageSize = int(m.pageSize)
-			}
+		// try to get the page size from the metadata pages
+		if pgSize, err := db.getPageSize(); err == nil {
+			db.pageSize = pgSize
 		} else {
 			_ = db.close()
 			return nil, ErrInvalid
@@ -286,12 +286,14 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
 		return nil, err
 	}
 
+	if db.PreLoadFreelist {
+		db.loadFreelist()
+	}
+
 	if db.readOnly {
 		return db, nil
 	}
 
-	db.loadFreelist()
-
 	// Flush freelist when transitioning from no sync to sync so
 	// NoFreelistSync unaware boltdb can open the db later.
 	if !db.NoFreelistSync && !db.hasSyncedFreelist() {
@@ -309,6 +311,96 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
 	return db, nil
 }
 
+// getPageSize reads the pageSize from the meta pages. It tries
+// to read the first meta page firstly. If the first page is invalid,
+// then it tries to read the second page using the default page size.
+func (db *DB) getPageSize() (int, error) {
+	var (
+		meta0CanRead, meta1CanRead bool
+	)
+
+	// Read the first meta page to determine the page size.
+	if pgSize, canRead, err := db.getPageSizeFromFirstMeta(); err != nil {
+		// We cannot read the page size from page 0, but can read page 0.
+		meta0CanRead = canRead
+	} else {
+		return pgSize, nil
+	}
+
+	// Read the second meta page to determine the page size.
+	if pgSize, canRead, err := db.getPageSizeFromSecondMeta(); err != nil {
+		// We cannot read the page size from page 1, but can read page 1.
+		meta1CanRead = canRead
+	} else {
+		return pgSize, nil
+	}
+
+	// If we can't read the page size from both pages, but can read
+	// either page, then we assume it's the same as the OS or the one
+	// given, since that's how the page size was chosen in the first place.
+	//
+	// If both pages are invalid, and (this OS uses a different page size
+	// from what the database was created with or the given page size is
+	// different from what the database was created with), then we are out
+	// of luck and cannot access the database.
+	if meta0CanRead || meta1CanRead {
+		return db.pageSize, nil
+	}
+
+	return 0, ErrInvalid
+}
+
+// getPageSizeFromFirstMeta reads the pageSize from the first meta page
+func (db *DB) getPageSizeFromFirstMeta() (int, bool, error) {
+	var buf [0x1000]byte
+	var metaCanRead bool
+	if bw, err := db.file.ReadAt(buf[:], 0); err == nil && bw == len(buf) {
+		metaCanRead = true
+		if m := db.pageInBuffer(buf[:], 0).meta(); m.validate() == nil {
+			return int(m.pageSize), metaCanRead, nil
+		}
+	}
+	return 0, metaCanRead, ErrInvalid
+}
+
+// getPageSizeFromSecondMeta reads the pageSize from the second meta page
+func (db *DB) getPageSizeFromSecondMeta() (int, bool, error) {
+	var (
+		fileSize    int64
+		metaCanRead bool
+	)
+
+	// get the db file size
+	if info, err := db.file.Stat(); err != nil {
+		return 0, metaCanRead, err
+	} else {
+		fileSize = info.Size()
+	}
+
+	// We need to read the second meta page, so we should skip the first page;
+	// but we don't know the exact page size yet, it's chicken & egg problem.
+	// The solution is to try all the possible page sizes, which starts from 1KB
+	// and until 16MB (1024<<14) or the end of the db file
+	//
+	// TODO: should we support larger page size?
+	for i := 0; i <= 14; i++ {
+		var buf [0x1000]byte
+		var pos int64 = 1024 << uint(i)
+		if pos >= fileSize-1024 {
+			break
+		}
+		bw, err := db.file.ReadAt(buf[:], pos)
+		if (err == nil && bw == len(buf)) || (err == io.EOF && int64(bw) == (fileSize-pos)) {
+			metaCanRead = true
+			if m := db.pageInBuffer(buf[:], 0).meta(); m.validate() == nil {
+				return int(m.pageSize), metaCanRead, nil
+			}
+		}
+	}
+
+	return 0, metaCanRead, ErrInvalid
+}
+
 // loadFreelist reads the freelist if it is synced, or reconstructs it
 // by scanning the DB if it is not synced. It assumes there are no
 // concurrent accesses being made to the freelist.
@@ -372,6 +464,8 @@ func (db *DB) mmap(minsz int) error {
 	}
 
 	// Memory-map the data file as a byte slice.
+	// gofail: var mapError string
+	// return errors.New(mapError)
 	if err := mmap(db, size); err != nil {
 		return err
 	}
@@ -399,11 +493,25 @@ func (db *DB) mmap(minsz int) error {
 	return nil
 }
 
+func (db *DB) invalidate() {
+	db.dataref = nil
+	db.data = nil
+	db.datasz = 0
+
+	db.meta0 = nil
+	db.meta1 = nil
+}
+
 // munmap unmaps the data file from memory.
 func (db *DB) munmap() error {
+	defer db.invalidate()
+
+	// gofail: var unmapError string
+	// return errors.New(unmapError)
 	if err := munmap(db); err != nil {
 		return fmt.Errorf("unmap error: " + err.Error())
 	}
+
 	return nil
 }
 
@@ -552,7 +660,7 @@ func (db *DB) close() error {
 		if !db.readOnly {
 			// Unlock the file.
 			if err := funlock(db); err != nil {
-				log.Printf("bolt.Close(): funlock error: %s", err)
+				return fmt.Errorf("bolt.Close(): funlock error: %w", err)
 			}
 		}
 
@@ -609,6 +717,13 @@ func (db *DB) beginTx() (*Tx, error) {
 		return nil, ErrDatabaseNotOpen
 	}
 
+	// Exit if the database is not correctly mapped.
+	if db.data == nil {
+		db.mmaplock.RUnlock()
+		db.metalock.Unlock()
+		return nil, ErrInvalidMapping
+	}
+
 	// Create a transaction associated with the database.
 	t := &Tx{}
 	t.init(db)
@@ -650,6 +765,12 @@ func (db *DB) beginRWTx() (*Tx, error) {
 		return nil, ErrDatabaseNotOpen
 	}
 
+	// Exit if the database is not correctly mapped.
+	if db.data == nil {
+		db.rwlock.Unlock()
+		return nil, ErrInvalidMapping
+	}
+
 	// Create a transaction associated with the database.
 	t := &Tx{writable: true}
 	t.init(db)
@@ -924,6 +1045,7 @@ func (db *DB) Stats() Stats {
 // This is for internal access to the raw data bytes from the C cursor, use
 // carefully, or not at all.
 func (db *DB) Info() *Info {
+	_assert(db.data != nil, "database file isn't correctly mapped")
 	return &Info{uintptr(unsafe.Pointer(&db.data[0])), db.pageSize}
 }
 
@@ -950,7 +1072,7 @@ func (db *DB) meta() *meta {
 		metaB = db.meta0
 	}
 
-	// Use higher meta page if valid. Otherwise fallback to previous, if valid.
+	// Use higher meta page if valid. Otherwise, fallback to previous, if valid.
 	if err := metaA.validate(); err == nil {
 		return metaA
 	} else if err := metaB.validate(); err == nil {
@@ -1003,7 +1125,7 @@ func (db *DB) grow(sz int) error {
 
 	// If the data is smaller than the alloc size then only allocate what's needed.
 	// Once it goes over the allocation size then allocate in chunks.
-	if db.datasz < db.AllocSize {
+	if db.datasz <= db.AllocSize {
 		sz = db.datasz
 	} else {
 		sz += db.AllocSize
@@ -1056,9 +1178,11 @@ func (db *DB) freepages() []pgid {
 			panic(fmt.Sprintf("freepages: failed to get all reachable pages (%v)", e))
 		}
 	}()
-	tx.checkBucket(&tx.root, reachable, nofreed, ech)
+	tx.checkBucket(&tx.root, reachable, nofreed, HexKVStringer(), ech)
 	close(ech)
 
+	// TODO: If check bucket reported any corruptions (ech) we shouldn't proceed to freeing the pages.
+
 	var fids []pgid
 	for i := pgid(2); i < db.meta().pgid; i++ {
 		if _, ok := reachable[i]; !ok {
@@ -1082,8 +1206,13 @@ type Options struct {
 	// under normal operation, but requires a full database re-sync during recovery.
 	NoFreelistSync bool
 
+	// PreLoadFreelist sets whether to load the free pages when opening
+	// the db file. Note when opening db in write mode, bbolt will always
+	// load the free pages.
+	PreLoadFreelist bool
+
 	// FreelistType sets the backend freelist type. There are two options. Array which is simple but endures
-	// dramatic performance degradation if database is large and framentation in freelist is common.
+	// dramatic performance degradation if database is large and fragmentation in freelist is common.
 	// The alternative one is using hashmap, it is faster in almost all circumstances
 	// but it doesn't guarantee that it offers the smallest page id available. In normal case it is safe.
 	// The default type is array
@@ -1187,7 +1316,7 @@ func (m *meta) validate() error {
 		return ErrInvalid
 	} else if m.version != version {
 		return ErrVersionMismatch
-	} else if m.checksum != 0 && m.checksum != m.sum64() {
+	} else if m.checksum != m.sum64() {
 		return ErrChecksum
 	}
 	return nil
diff --git a/db_test.go b/db_test.go
index 7af780b..9f1076f 100644
--- a/db_test.go
+++ b/db_test.go
@@ -4,25 +4,25 @@ import (
 	"bytes"
 	"encoding/binary"
 	"errors"
-	"flag"
 	"fmt"
 	"hash/fnv"
-	"io/ioutil"
 	"log"
 	"math/rand"
 	"os"
 	"path/filepath"
-	"regexp"
+	"reflect"
 	"sync"
 	"testing"
 	"time"
 	"unsafe"
 
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+
 	bolt "go.etcd.io/bbolt"
+	"go.etcd.io/bbolt/internal/btesting"
 )
 
-var statsFlag = flag.Bool("stats", false, "show performance stats")
-
 // pageSize is the size of one page in the data file.
 const pageSize = 4096
 
@@ -31,15 +31,15 @@ const pageHeaderSize = 16
 
 // meta represents a simplified version of a database meta page for testing.
 type meta struct {
-	magic    uint32
-	version  uint32
-	_        uint32
-	_        uint32
-	_        [16]byte
-	_        uint64
-	pgid     uint64
-	_        uint64
-	checksum uint64
+	_       uint32
+	version uint32
+	_       uint32
+	_       uint32
+	_       [16]byte
+	_       uint64
+	pgid    uint64
+	_       uint64
+	_       uint64
 }
 
 // Ensure that a database can be opened without error.
@@ -148,17 +148,16 @@ func TestOpen_ErrVersionMismatch(t *testing.T) {
 	}
 
 	// Create empty database.
-	db := MustOpenDB()
+	db := btesting.MustCreateDB(t)
 	path := db.Path()
-	defer db.MustClose()
 
 	// Close database.
-	if err := db.DB.Close(); err != nil {
+	if err := db.Close(); err != nil {
 		t.Fatal(err)
 	}
 
 	// Read data file.
-	buf, err := ioutil.ReadFile(path)
+	buf, err := os.ReadFile(path)
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -168,7 +167,7 @@ func TestOpen_ErrVersionMismatch(t *testing.T) {
 	meta0.version++
 	meta1 := (*meta)(unsafe.Pointer(&buf[pageSize+pageHeaderSize]))
 	meta1.version++
-	if err := ioutil.WriteFile(path, buf, 0666); err != nil {
+	if err := os.WriteFile(path, buf, 0666); err != nil {
 		t.Fatal(err)
 	}
 
@@ -185,17 +184,16 @@ func TestOpen_ErrChecksum(t *testing.T) {
 	}
 
 	// Create empty database.
-	db := MustOpenDB()
+	db := btesting.MustCreateDB(t)
 	path := db.Path()
-	defer db.MustClose()
 
 	// Close database.
-	if err := db.DB.Close(); err != nil {
+	if err := db.Close(); err != nil {
 		t.Fatal(err)
 	}
 
 	// Read data file.
-	buf, err := ioutil.ReadFile(path)
+	buf, err := os.ReadFile(path)
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -205,7 +203,7 @@ func TestOpen_ErrChecksum(t *testing.T) {
 	meta0.pgid++
 	meta1 := (*meta)(unsafe.Pointer(&buf[pageSize+pageHeaderSize]))
 	meta1.pgid++
-	if err := ioutil.WriteFile(path, buf, 0666); err != nil {
+	if err := os.WriteFile(path, buf, 0666); err != nil {
 		t.Fatal(err)
 	}
 
@@ -215,44 +213,93 @@ func TestOpen_ErrChecksum(t *testing.T) {
 	}
 }
 
+// Ensure that it can read the page size from the second meta page if the first one is invalid.
+// The page size is expected to be the OS's page size in this case.
+func TestOpen_ReadPageSize_FromMeta1_OS(t *testing.T) {
+	// Create empty database.
+	db := btesting.MustCreateDB(t)
+	path := db.Path()
+	// Close the database
+	db.MustClose()
+
+	// Read data file.
+	buf, err := os.ReadFile(path)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Rewrite first meta page.
+	meta0 := (*meta)(unsafe.Pointer(&buf[pageHeaderSize]))
+	meta0.pgid++
+	if err := os.WriteFile(path, buf, 0666); err != nil {
+		t.Fatal(err)
+	}
+
+	// Reopen data file.
+	db = btesting.MustOpenDBWithOption(t, path, nil)
+	require.Equalf(t, os.Getpagesize(), db.Info().PageSize, "check page size failed")
+}
+
+// Ensure that it can read the page size from the second meta page if the first one is invalid.
+// The page size is expected to be the given page size in this case.
+func TestOpen_ReadPageSize_FromMeta1_Given(t *testing.T) {
+	// test page size from 1KB (1024<<0) to 16MB(1024<<14)
+	for i := 0; i <= 14; i++ {
+		givenPageSize := 1024 << uint(i)
+		t.Logf("Testing page size %d", givenPageSize)
+		// Create empty database.
+		db := btesting.MustCreateDBWithOption(t, &bolt.Options{PageSize: givenPageSize})
+		path := db.Path()
+		// Close the database
+		db.MustClose()
+
+		// Read data file.
+		buf, err := os.ReadFile(path)
+		require.NoError(t, err)
+
+		// Rewrite meta pages.
+		if i%3 == 0 {
+			t.Logf("#%d: Intentionally corrupt the first meta page for pageSize %d", i, givenPageSize)
+			meta0 := (*meta)(unsafe.Pointer(&buf[pageHeaderSize]))
+			meta0.pgid++
+			err = os.WriteFile(path, buf, 0666)
+			require.NoError(t, err)
+		}
+
+		// Reopen data file.
+		db = btesting.MustOpenDBWithOption(t, path, nil)
+		require.Equalf(t, givenPageSize, db.Info().PageSize, "check page size failed")
+		db.MustClose()
+	}
+}
+
 // Ensure that opening a database does not increase its size.
 // https://github.com/boltdb/bolt/issues/291
 func TestOpen_Size(t *testing.T) {
 	// Open a data file.
-	db := MustOpenDB()
-	path := db.Path()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	pagesize := db.Info().PageSize
 
 	// Insert until we get above the minimum 4MB size.
-	if err := db.Update(func(tx *bolt.Tx) error {
-		b, _ := tx.CreateBucketIfNotExists([]byte("data"))
-		for i := 0; i < 10000; i++ {
-			if err := b.Put([]byte(fmt.Sprintf("%04d", i)), make([]byte, 1000)); err != nil {
-				t.Fatal(err)
-			}
-		}
-		return nil
-	}); err != nil {
+	err := db.Fill([]byte("data"), 1, 10000,
+		func(tx int, k int) []byte { return []byte(fmt.Sprintf("%04d", k)) },
+		func(tx int, k int) []byte { return make([]byte, 1000) },
+	)
+	if err != nil {
 		t.Fatal(err)
 	}
 
-	// Close database and grab the size.
-	if err := db.DB.Close(); err != nil {
-		t.Fatal(err)
-	}
+	path := db.Path()
+	db.MustClose()
+
 	sz := fileSize(path)
 	if sz == 0 {
 		t.Fatalf("unexpected new file size: %d", sz)
 	}
 
-	// Reopen database, update, and check size again.
-	db0, err := bolt.Open(path, 0666, nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-	if err := db0.Update(func(tx *bolt.Tx) error {
+	db.MustReopen()
+	if err := db.Update(func(tx *bolt.Tx) error {
 		if err := tx.Bucket([]byte("data")).Put([]byte{0}, []byte{0}); err != nil {
 			t.Fatal(err)
 		}
@@ -260,7 +307,7 @@ func TestOpen_Size(t *testing.T) {
 	}); err != nil {
 		t.Fatal(err)
 	}
-	if err := db0.Close(); err != nil {
+	if err := db.Close(); err != nil {
 		t.Fatal(err)
 	}
 	newSz := fileSize(path)
@@ -283,9 +330,8 @@ func TestOpen_Size_Large(t *testing.T) {
 	}
 
 	// Open a data file.
-	db := MustOpenDB()
+	db := btesting.MustCreateDB(t)
 	path := db.Path()
-	defer db.MustClose()
 
 	pagesize := db.Info().PageSize
 
@@ -307,7 +353,7 @@ func TestOpen_Size_Large(t *testing.T) {
 	}
 
 	// Close database and grab the size.
-	if err := db.DB.Close(); err != nil {
+	if err := db.Close(); err != nil {
 		t.Fatal(err)
 	}
 	sz := fileSize(path)
@@ -465,8 +511,7 @@ func TestDB_Open_InitialMmapSize(t *testing.T) {
 // TestDB_Open_ReadOnly checks a database in read only mode can read but not write.
 func TestDB_Open_ReadOnly(t *testing.T) {
 	// Create a writable db, write k-v and close it.
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	if err := db.Update(func(tx *bolt.Tx) error {
 		b, err := tx.CreateBucket([]byte("widgets"))
@@ -480,11 +525,11 @@ func TestDB_Open_ReadOnly(t *testing.T) {
 	}); err != nil {
 		t.Fatal(err)
 	}
-	if err := db.DB.Close(); err != nil {
+	if err := db.Close(); err != nil {
 		t.Fatal(err)
 	}
 
-	f := db.f
+	f := db.Path()
 	o := &bolt.Options{ReadOnly: true}
 	readOnlyDB, err := bolt.Open(f, 0666, o)
 	if err != nil {
@@ -521,13 +566,11 @@ func TestDB_Open_ReadOnly(t *testing.T) {
 func TestOpen_BigPage(t *testing.T) {
 	pageSize := os.Getpagesize()
 
-	db1 := MustOpenWithOption(&bolt.Options{PageSize: pageSize * 2})
-	defer db1.MustClose()
+	db1 := btesting.MustCreateDBWithOption(t, &bolt.Options{PageSize: pageSize * 2})
 
-	db2 := MustOpenWithOption(&bolt.Options{PageSize: pageSize * 4})
-	defer db2.MustClose()
+	db2 := btesting.MustCreateDBWithOption(t, &bolt.Options{PageSize: pageSize * 4})
 
-	if db1sz, db2sz := fileSize(db1.f), fileSize(db2.f); db1sz >= db2sz {
+	if db1sz, db2sz := fileSize(db1.Path()), fileSize(db2.Path()); db1sz >= db2sz {
 		t.Errorf("expected %d < %d", db1sz, db2sz)
 	}
 }
@@ -536,8 +579,7 @@ func TestOpen_BigPage(t *testing.T) {
 // write-out after no free list sync will recover the free list
 // and write it out.
 func TestOpen_RecoverFreeList(t *testing.T) {
-	db := MustOpenWithOption(&bolt.Options{NoFreelistSync: true})
-	defer db.MustClose()
+	db := btesting.MustCreateDBWithOption(t, &bolt.Options{NoFreelistSync: true})
 
 	// Write some pages.
 	tx, err := db.Begin(true)
@@ -576,9 +618,7 @@ func TestOpen_RecoverFreeList(t *testing.T) {
 	if err := tx.Commit(); err != nil {
 		t.Fatal(err)
 	}
-	if err := db.DB.Close(); err != nil {
-		t.Fatal(err)
-	}
+	db.MustClose()
 
 	// Record freelist count from opening with NoFreelistSync.
 	db.MustReopen()
@@ -586,12 +626,10 @@ func TestOpen_RecoverFreeList(t *testing.T) {
 	if freepages == 0 {
 		t.Fatalf("no free pages on NoFreelistSync reopen")
 	}
-	if err := db.DB.Close(); err != nil {
-		t.Fatal(err)
-	}
+	db.MustClose()
 
 	// Check free page count is reconstructed when opened with freelist sync.
-	db.o = &bolt.Options{}
+	db.SetOptions(&bolt.Options{})
 	db.MustReopen()
 	// One less free page for syncing the free list on open.
 	freepages--
@@ -610,49 +648,45 @@ func TestDB_Begin_ErrDatabaseNotOpen(t *testing.T) {
 
 // Ensure that a read-write transaction can be retrieved.
 func TestDB_BeginRW(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	tx, err := db.Begin(true)
-	if err != nil {
-		t.Fatal(err)
-	} else if tx == nil {
-		t.Fatal("expected tx")
-	}
+	require.NoError(t, err)
+	require.NotNil(t, tx, "expected tx")
+	defer func() { require.NoError(t, tx.Commit()) }()
 
-	if tx.DB() != db.DB {
-		t.Fatal("unexpected tx database")
-	} else if !tx.Writable() {
-		t.Fatal("expected writable tx")
-	}
-
-	if err := tx.Commit(); err != nil {
-		t.Fatal(err)
-	}
+	require.True(t, tx.Writable(), "expected writable tx")
+	require.Same(t, db.DB, tx.DB())
 }
 
 // TestDB_Concurrent_WriteTo checks that issuing WriteTo operations concurrently
 // with commits does not produce corrupted db files.
 func TestDB_Concurrent_WriteTo(t *testing.T) {
 	o := &bolt.Options{NoFreelistSync: false}
-	db := MustOpenWithOption(o)
-	defer db.MustClose()
+	db := btesting.MustCreateDBWithOption(t, o)
 
 	var wg sync.WaitGroup
 	wtxs, rtxs := 5, 5
 	wg.Add(wtxs * rtxs)
 	f := func(tx *bolt.Tx) {
 		defer wg.Done()
-		f, err := ioutil.TempFile("", "bolt-")
+		f, err := os.CreateTemp("", "bolt-")
 		if err != nil {
 			panic(err)
 		}
 		time.Sleep(time.Duration(rand.Intn(20)+1) * time.Millisecond)
-		tx.WriteTo(f)
-		tx.Rollback()
+		_, err = tx.WriteTo(f)
+		if err != nil {
+			panic(err)
+		}
+		err = tx.Rollback()
+		if err != nil {
+			panic(err)
+		}
 		f.Close()
-		snap := &DB{nil, f.Name(), o}
-		snap.MustReopen()
+
+		copyOpt := *o
+		snap := btesting.MustOpenDBWithOption(t, f.Name(), &copyOpt)
 		defer snap.MustClose()
 		snap.MustCheck()
 	}
@@ -703,7 +737,7 @@ func TestDB_Close_PendingTx_RO(t *testing.T) { testDB_Close_PendingTx(t, false)
 
 // Ensure that a database cannot close while transactions are open.
 func testDB_Close_PendingTx(t *testing.T, writable bool) {
-	db := MustOpenDB()
+	db := btesting.MustCreateDB(t)
 
 	// Start transaction.
 	tx, err := db.Begin(writable)
@@ -753,8 +787,7 @@ func testDB_Close_PendingTx(t *testing.T, writable bool) {
 
 // Ensure a database can provide a transactional block.
 func TestDB_Update(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 	if err := db.Update(func(tx *bolt.Tx) error {
 		b, err := tx.CreateBucket([]byte("widgets"))
 		if err != nil {
@@ -802,8 +835,7 @@ func TestDB_Update_Closed(t *testing.T) {
 
 // Ensure a panic occurs while trying to commit a managed transaction.
 func TestDB_Update_ManualCommit(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	var panicked bool
 	if err := db.Update(func(tx *bolt.Tx) error {
@@ -828,8 +860,7 @@ func TestDB_Update_ManualCommit(t *testing.T) {
 
 // Ensure a panic occurs while trying to rollback a managed transaction.
 func TestDB_Update_ManualRollback(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	var panicked bool
 	if err := db.Update(func(tx *bolt.Tx) error {
@@ -854,8 +885,7 @@ func TestDB_Update_ManualRollback(t *testing.T) {
 
 // Ensure a panic occurs while trying to commit a managed transaction.
 func TestDB_View_ManualCommit(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	var panicked bool
 	if err := db.View(func(tx *bolt.Tx) error {
@@ -880,8 +910,7 @@ func TestDB_View_ManualCommit(t *testing.T) {
 
 // Ensure a panic occurs while trying to rollback a managed transaction.
 func TestDB_View_ManualRollback(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	var panicked bool
 	if err := db.View(func(tx *bolt.Tx) error {
@@ -906,8 +935,7 @@ func TestDB_View_ManualRollback(t *testing.T) {
 
 // Ensure a write transaction that panics does not hold open locks.
 func TestDB_Update_Panic(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	// Panic during update but recover.
 	func() {
@@ -950,8 +978,7 @@ func TestDB_Update_Panic(t *testing.T) {
 
 // Ensure a database can return an error through a read-only transactional block.
 func TestDB_View_Error(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	if err := db.View(func(tx *bolt.Tx) error {
 		return errors.New("xxx")
@@ -962,8 +989,7 @@ func TestDB_View_Error(t *testing.T) {
 
 // Ensure a read transaction that panics does not hold open locks.
 func TestDB_View_Panic(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	if err := db.Update(func(tx *bolt.Tx) error {
 		if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
@@ -1005,8 +1031,7 @@ func TestDB_View_Panic(t *testing.T) {
 
 // Ensure that DB stats can be returned.
 func TestDB_Stats(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 	if err := db.Update(func(tx *bolt.Tx) error {
 		_, err := tx.CreateBucket([]byte("widgets"))
 		return err
@@ -1015,8 +1040,8 @@ func TestDB_Stats(t *testing.T) {
 	}
 
 	stats := db.Stats()
-	if stats.TxStats.PageCount != 2 {
-		t.Fatalf("unexpected TxStats.PageCount: %d", stats.TxStats.PageCount)
+	if stats.TxStats.GetPageCount() != 2 {
+		t.Fatalf("unexpected TxStats.PageCount: %d", stats.TxStats.GetPageCount())
 	} else if stats.FreePageN != 0 {
 		t.Fatalf("unexpected FreePageN != 0: %d", stats.FreePageN)
 	} else if stats.PendingPageN != 2 {
@@ -1026,8 +1051,7 @@ func TestDB_Stats(t *testing.T) {
 
 // Ensure that database pages are in expected order and type.
 func TestDB_Consistency(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 	if err := db.Update(func(tx *bolt.Tx) error {
 		_, err := tx.CreateBucket([]byte("widgets"))
 		return err
@@ -1100,8 +1124,8 @@ func TestDBStats_Sub(t *testing.T) {
 	b.TxStats.PageCount = 10
 	b.FreePageN = 14
 	diff := b.Sub(&a)
-	if diff.TxStats.PageCount != 7 {
-		t.Fatalf("unexpected TxStats.PageCount: %d", diff.TxStats.PageCount)
+	if diff.TxStats.GetPageCount() != 7 {
+		t.Fatalf("unexpected TxStats.PageCount: %d", diff.TxStats.GetPageCount())
 	}
 
 	// free page stats are copied from the receiver and not subtracted
@@ -1112,8 +1136,7 @@ func TestDBStats_Sub(t *testing.T) {
 
 // Ensure two functions can perform updates in a single batch.
 func TestDB_Batch(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	if err := db.Update(func(tx *bolt.Tx) error {
 		if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
@@ -1157,8 +1180,7 @@ func TestDB_Batch(t *testing.T) {
 }
 
 func TestDB_Batch_Panic(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	var sentinel int
 	var bork = &sentinel
@@ -1188,8 +1210,7 @@ func TestDB_Batch_Panic(t *testing.T) {
 }
 
 func TestDB_BatchFull(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 	if err := db.Update(func(tx *bolt.Tx) error {
 		_, err := tx.CreateBucket([]byte("widgets"))
 		return err
@@ -1247,8 +1268,7 @@ func TestDB_BatchFull(t *testing.T) {
 }
 
 func TestDB_BatchTime(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 	if err := db.Update(func(tx *bolt.Tx) error {
 		_, err := tx.CreateBucket([]byte("widgets"))
 		return err
@@ -1293,6 +1313,28 @@ func TestDB_BatchTime(t *testing.T) {
 	}
 }
 
+// TestDBUnmap verifes that `dataref`, `data` and `datasz` must be reset
+// to zero values respectively after unmapping the db.
+func TestDBUnmap(t *testing.T) {
+	db := btesting.MustCreateDB(t)
+
+	require.NoError(t, db.DB.Close())
+
+	// Ignore the following error:
+	// Error: copylocks: call of reflect.ValueOf copies lock value: go.etcd.io/bbolt.DB contains sync.Once contains sync.Mutex (govet)
+	//nolint:govet
+	v := reflect.ValueOf(*db.DB)
+	dataref := v.FieldByName("dataref")
+	data := v.FieldByName("data")
+	datasz := v.FieldByName("datasz")
+	assert.True(t, dataref.IsNil())
+	assert.True(t, data.IsNil())
+	assert.True(t, datasz.IsZero())
+
+	// Set db.DB to nil to prevent MustCheck from panicking.
+	db.DB = nil
+}
+
 func ExampleDB_Update() {
 	// Open the database.
 	db, err := bolt.Open(tempfile(), 0666, nil)
@@ -1436,8 +1478,8 @@ func ExampleDB_Begin() {
 }
 
 func BenchmarkDBBatchAutomatic(b *testing.B) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(b)
+
 	if err := db.Update(func(tx *bolt.Tx) error {
 		_, err := tx.CreateBucket([]byte("bench"))
 		return err
@@ -1481,8 +1523,7 @@ func BenchmarkDBBatchAutomatic(b *testing.B) {
 }
 
 func BenchmarkDBBatchSingle(b *testing.B) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(b)
 	if err := db.Update(func(tx *bolt.Tx) error {
 		_, err := tx.CreateBucket([]byte("bench"))
 		return err
@@ -1525,8 +1566,7 @@ func BenchmarkDBBatchSingle(b *testing.B) {
 }
 
 func BenchmarkDBBatchManual10x100(b *testing.B) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(b)
 	if err := db.Update(func(tx *bolt.Tx) error {
 		_, err := tx.CreateBucket([]byte("bench"))
 		return err
@@ -1579,7 +1619,7 @@ func BenchmarkDBBatchManual10x100(b *testing.B) {
 	validateBatchBench(b, db)
 }
 
-func validateBatchBench(b *testing.B, db *DB) {
+func validateBatchBench(b *testing.B, db *btesting.DB) {
 	var rollback = errors.New("sentinel error to cause rollback")
 	validate := func(tx *bolt.Tx) error {
 		bucket := tx.Bucket([]byte("bench"))
@@ -1614,140 +1654,9 @@ func validateBatchBench(b *testing.B, db *DB) {
 	}
 }
 
-// DB is a test wrapper for bolt.DB.
-type DB struct {
-	*bolt.DB
-	f string
-	o *bolt.Options
-}
-
-// MustOpenDB returns a new, open DB at a temporary location.
-func MustOpenDB() *DB {
-	return MustOpenWithOption(nil)
-}
-
-// MustOpenDBWithOption returns a new, open DB at a temporary location with given options.
-func MustOpenWithOption(o *bolt.Options) *DB {
-	f := tempfile()
-	if o == nil {
-		o = bolt.DefaultOptions
-	}
-
-	freelistType := bolt.FreelistArrayType
-	if env := os.Getenv(bolt.TestFreelistType); env == string(bolt.FreelistMapType) {
-		freelistType = bolt.FreelistMapType
-	}
-	o.FreelistType = freelistType
-
-	db, err := bolt.Open(f, 0666, o)
-	if err != nil {
-		panic(err)
-	}
-	return &DB{
-		DB: db,
-		f:  f,
-		o:  o,
-	}
-}
-
-// Close closes the database and deletes the underlying file.
-func (db *DB) Close() error {
-	// Log statistics.
-	if *statsFlag {
-		db.PrintStats()
-	}
-
-	// Check database consistency after every test.
-	db.MustCheck()
-
-	// Close database and remove file.
-	defer os.Remove(db.Path())
-	return db.DB.Close()
-}
-
-// MustClose closes the database and deletes the underlying file. Panic on error.
-func (db *DB) MustClose() {
-	if err := db.Close(); err != nil {
-		panic(err)
-	}
-}
-
-// MustReopen reopen the database. Panic on error.
-func (db *DB) MustReopen() {
-	indb, err := bolt.Open(db.f, 0666, db.o)
-	if err != nil {
-		panic(err)
-	}
-	db.DB = indb
-}
-
-// PrintStats prints the database stats
-func (db *DB) PrintStats() {
-	var stats = db.Stats()
-	fmt.Printf("[db] %-20s %-20s %-20s\n",
-		fmt.Sprintf("pg(%d/%d)", stats.TxStats.PageCount, stats.TxStats.PageAlloc),
-		fmt.Sprintf("cur(%d)", stats.TxStats.CursorCount),
-		fmt.Sprintf("node(%d/%d)", stats.TxStats.NodeCount, stats.TxStats.NodeDeref),
-	)
-	fmt.Printf("     %-20s %-20s %-20s\n",
-		fmt.Sprintf("rebal(%d/%v)", stats.TxStats.Rebalance, truncDuration(stats.TxStats.RebalanceTime)),
-		fmt.Sprintf("spill(%d/%v)", stats.TxStats.Spill, truncDuration(stats.TxStats.SpillTime)),
-		fmt.Sprintf("w(%d/%v)", stats.TxStats.Write, truncDuration(stats.TxStats.WriteTime)),
-	)
-}
-
-// MustCheck runs a consistency check on the database and panics if any errors are found.
-func (db *DB) MustCheck() {
-	if err := db.Update(func(tx *bolt.Tx) error {
-		// Collect all the errors.
-		var errors []error
-		for err := range tx.Check() {
-			errors = append(errors, err)
-			if len(errors) > 10 {
-				break
-			}
-		}
-
-		// If errors occurred, copy the DB and print the errors.
-		if len(errors) > 0 {
-			var path = tempfile()
-			if err := tx.CopyFile(path, 0600); err != nil {
-				panic(err)
-			}
-
-			// Print errors.
-			fmt.Print("\n\n")
-			fmt.Printf("consistency check failed (%d errors)\n", len(errors))
-			for _, err := range errors {
-				fmt.Println(err)
-			}
-			fmt.Println("")
-			fmt.Println("db saved to:")
-			fmt.Println(path)
-			fmt.Print("\n\n")
-			os.Exit(-1)
-		}
-
-		return nil
-	}); err != nil && err != bolt.ErrDatabaseNotOpen {
-		panic(err)
-	}
-}
-
-// CopyTempFile copies a database to a temporary file.
-func (db *DB) CopyTempFile() {
-	path := tempfile()
-	if err := db.View(func(tx *bolt.Tx) error {
-		return tx.CopyFile(path, 0600)
-	}); err != nil {
-		panic(err)
-	}
-	fmt.Println("db copied to: ", path)
-}
-
 // tempfile returns a temporary file path.
 func tempfile() string {
-	f, err := ioutil.TempFile("", "bolt-")
+	f, err := os.CreateTemp("", "bolt-")
 	if err != nil {
 		panic(err)
 	}
@@ -1767,10 +1676,6 @@ func trunc(b []byte, length int) []byte {
 	return b
 }
 
-func truncDuration(d time.Duration) string {
-	return regexp.MustCompile(`^(\d+)(\.\d+)`).ReplaceAllString(d.String(), "$1")
-}
-
 func fileSize(path string) int64 {
 	fi, err := os.Stat(path)
 	if err != nil {
diff --git a/db_whitebox_test.go b/db_whitebox_test.go
new file mode 100644
index 0000000..eb95155
--- /dev/null
+++ b/db_whitebox_test.go
@@ -0,0 +1,124 @@
+package bbolt
+
+import (
+	"path/filepath"
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+func TestOpenWithPreLoadFreelist(t *testing.T) {
+	testCases := []struct {
+		name                    string
+		readonly                bool
+		preLoadFreePage         bool
+		expectedFreePagesLoaded bool
+	}{
+		{
+			name:                    "write mode always load free pages",
+			readonly:                false,
+			preLoadFreePage:         false,
+			expectedFreePagesLoaded: true,
+		},
+		{
+			name:                    "readonly mode load free pages when flag set",
+			readonly:                true,
+			preLoadFreePage:         true,
+			expectedFreePagesLoaded: true,
+		},
+		{
+			name:                    "readonly mode doesn't load free pages when flag not set",
+			readonly:                true,
+			preLoadFreePage:         false,
+			expectedFreePagesLoaded: false,
+		},
+	}
+
+	fileName, err := prepareData(t)
+	require.NoError(t, err)
+
+	for _, tc := range testCases {
+		t.Run(tc.name, func(t *testing.T) {
+			db, err := Open(fileName, 0666, &Options{
+				ReadOnly:        tc.readonly,
+				PreLoadFreelist: tc.preLoadFreePage,
+			})
+			require.NoError(t, err)
+
+			assert.Equal(t, tc.expectedFreePagesLoaded, db.freelist != nil)
+
+			assert.NoError(t, db.Close())
+		})
+	}
+}
+
+func TestMethodPage(t *testing.T) {
+	testCases := []struct {
+		name            string
+		readonly        bool
+		preLoadFreePage bool
+		expectedError   error
+	}{
+		{
+			name:            "write mode",
+			readonly:        false,
+			preLoadFreePage: false,
+			expectedError:   nil,
+		},
+		{
+			name:            "readonly mode with preloading free pages",
+			readonly:        true,
+			preLoadFreePage: true,
+			expectedError:   nil,
+		},
+		{
+			name:            "readonly mode without preloading free pages",
+			readonly:        true,
+			preLoadFreePage: false,
+			expectedError:   ErrFreePagesNotLoaded,
+		},
+	}
+
+	fileName, err := prepareData(t)
+	require.NoError(t, err)
+
+	for _, tc := range testCases {
+		tc := tc
+		t.Run(tc.name, func(t *testing.T) {
+			db, err := Open(fileName, 0666, &Options{
+				ReadOnly:        tc.readonly,
+				PreLoadFreelist: tc.preLoadFreePage,
+			})
+			require.NoError(t, err)
+			defer db.Close()
+
+			tx, err := db.Begin(!tc.readonly)
+			require.NoError(t, err)
+
+			_, err = tx.Page(0)
+			require.Equal(t, tc.expectedError, err)
+
+			if tc.readonly {
+				require.NoError(t, tx.Rollback())
+			} else {
+				require.NoError(t, tx.Commit())
+			}
+
+			require.NoError(t, db.Close())
+		})
+	}
+}
+
+func prepareData(t *testing.T) (string, error) {
+	fileName := filepath.Join(t.TempDir(), "db")
+	db, err := Open(fileName, 0666, nil)
+	if err != nil {
+		return "", err
+	}
+	if err := db.Close(); err != nil {
+		return "", err
+	}
+
+	return fileName, nil
+}
diff --git a/debian/changelog b/debian/changelog
index 5eb3881..5fb9cb2 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,9 @@
+golang-github-coreos-bbolt (1.3.7-1) UNRELEASED; urgency=low
+
+  * New upstream release.
+
+ -- Debian Janitor <janitor@jelmer.uk>  Sat, 06 May 2023 17:55:24 -0000
+
 golang-github-coreos-bbolt (1.3.6-2) unstable; urgency=medium
 
   * Team upload
diff --git a/debian/patches/0001-Increase-write-waiting-time-when-testing.patch b/debian/patches/0001-Increase-write-waiting-time-when-testing.patch
index e52c72f..4ed9432 100644
--- a/debian/patches/0001-Increase-write-waiting-time-when-testing.patch
+++ b/debian/patches/0001-Increase-write-waiting-time-when-testing.patch
@@ -8,11 +8,11 @@ Bug-Debian: https://bugs.debian.org/832834
  db_test.go | 2 +-
  1 file changed, 1 insertion(+), 1 deletion(-)
 
-diff --git a/db_test.go b/db_test.go
-index 7af780b..aa7ad5e 100644
---- a/db_test.go
-+++ b/db_test.go
-@@ -449,7 +449,7 @@ func TestDB_Open_InitialMmapSize(t *testing.T) {
+Index: golang-github-coreos-bbolt.git/db_test.go
+===================================================================
+--- golang-github-coreos-bbolt.git.orig/db_test.go
++++ golang-github-coreos-bbolt.git/db_test.go
+@@ -495,7 +495,7 @@ func TestDB_Open_InitialMmapSize(t *test
  	}()
  
  	select {
diff --git a/debian/patches/0002-Skip-OOM-tests.patch b/debian/patches/0002-Skip-OOM-tests.patch
index 82b6c47..44cb0b0 100644
--- a/debian/patches/0002-Skip-OOM-tests.patch
+++ b/debian/patches/0002-Skip-OOM-tests.patch
@@ -7,11 +7,11 @@ Subject: Skip OOM tests
  db_test.go     | 1 +
  2 files changed, 2 insertions(+)
 
-diff --git a/bucket_test.go b/bucket_test.go
-index 2ac9263..1c00fe5 100644
---- a/bucket_test.go
-+++ b/bucket_test.go
-@@ -1161,6 +1161,7 @@ func TestBucket_Put_KeyTooLarge(t *testing.T) {
+Index: golang-github-coreos-bbolt.git/bucket_test.go
+===================================================================
+--- golang-github-coreos-bbolt.git.orig/bucket_test.go
++++ golang-github-coreos-bbolt.git/bucket_test.go
+@@ -1203,6 +1203,7 @@ func TestBucket_Put_KeyTooLarge(t *testi
  
  // Ensure that an error is returned when inserting a value that's too large.
  func TestBucket_Put_ValueTooLarge(t *testing.T) {
@@ -19,11 +19,11 @@ index 2ac9263..1c00fe5 100644
  	// Skip this test on DroneCI because the machine is resource constrained.
  	if os.Getenv("DRONE") == "true" {
  		t.Skip("not enough RAM for test")
-diff --git a/db_test.go b/db_test.go
-index aa7ad5e..47ecec4 100644
---- a/db_test.go
-+++ b/db_test.go
-@@ -406,6 +406,7 @@ func TestOpen_FileTooSmall(t *testing.T) {
+Index: golang-github-coreos-bbolt.git/db_test.go
+===================================================================
+--- golang-github-coreos-bbolt.git.orig/db_test.go
++++ golang-github-coreos-bbolt.git/db_test.go
+@@ -452,6 +452,7 @@ func TestOpen_FileTooSmall(t *testing.T)
  // read transaction blocks the write transaction and causes deadlock.
  // This is a very hacky test since the mmap size is not exposed.
  func TestDB_Open_InitialMmapSize(t *testing.T) {
diff --git a/doc.go b/doc.go
index 95f25f0..d1007e4 100644
--- a/doc.go
+++ b/doc.go
@@ -14,8 +14,7 @@ The design of Bolt is based on Howard Chu's LMDB database project.
 
 Bolt currently works on Windows, Mac OS X, and Linux.
 
-
-Basics
+# Basics
 
 There are only a few types in Bolt: DB, Bucket, Tx, and Cursor. The DB is
 a collection of buckets and is represented by a single file on disk. A bucket is
@@ -27,8 +26,7 @@ iterate over the dataset sequentially. Read-write transactions can create and
 delete buckets and can insert and remove keys. Only one read-write transaction
 is allowed at a time.
 
-
-Caveats
+# Caveats
 
 The database uses a read-only, memory-mapped data file to ensure that
 applications cannot corrupt the database, however, this means that keys and
@@ -38,7 +36,5 @@ will cause Go to panic.
 Keys and values retrieved from the database are only valid for the life of
 the transaction. When used outside the transaction, these byte slices can
 point to different data or can point to invalid memory which will cause a panic.
-
-
 */
 package bbolt
diff --git a/errors.go b/errors.go
index 48758ca..f2c3b20 100644
--- a/errors.go
+++ b/errors.go
@@ -16,6 +16,9 @@ var (
 	// This typically occurs when a file is not a bolt database.
 	ErrInvalid = errors.New("invalid database")
 
+	// ErrInvalidMapping is returned when the database file fails to get mapped.
+	ErrInvalidMapping = errors.New("database isn't correctly mapped")
+
 	// ErrVersionMismatch is returned when the data file was created with a
 	// different version of Bolt.
 	ErrVersionMismatch = errors.New("version mismatch")
@@ -41,6 +44,10 @@ var (
 	// ErrDatabaseReadOnly is returned when a mutating transaction is started on a
 	// read-only database.
 	ErrDatabaseReadOnly = errors.New("database is in read-only mode")
+
+	// ErrFreePagesNotLoaded is returned when a readonly transaction without
+	// preloading the free pages is trying to access the free pages.
+	ErrFreePagesNotLoaded = errors.New("free pages are not pre-loaded")
 )
 
 // These errors can occur when putting or deleting a value or a bucket.
diff --git a/freelist.go b/freelist.go
index 697a469..50f2d0e 100644
--- a/freelist.go
+++ b/freelist.go
@@ -24,7 +24,7 @@ type freelist struct {
 	ids            []pgid                      // all free and available free page ids.
 	allocs         map[pgid]txid               // mapping of txid that allocated a pgid.
 	pending        map[txid]*txPending         // mapping of soon-to-be free page ids by tx.
-	cache          map[pgid]bool               // fast lookup of all free and pending page ids.
+	cache          map[pgid]struct{}           // fast lookup of all free and pending page ids.
 	freemaps       map[uint64]pidSet           // key is the size of continuous pages(span), value is a set which contains the starting pgids of same size
 	forwardMap     map[pgid]uint64             // key is start pgid, value is its span size
 	backwardMap    map[pgid]uint64             // key is end pgid, value is its span size
@@ -41,7 +41,7 @@ func newFreelist(freelistType FreelistType) *freelist {
 		freelistType: freelistType,
 		allocs:       make(map[pgid]txid),
 		pending:      make(map[txid]*txPending),
-		cache:        make(map[pgid]bool),
+		cache:        make(map[pgid]struct{}),
 		freemaps:     make(map[uint64]pidSet),
 		forwardMap:   make(map[pgid]uint64),
 		backwardMap:  make(map[pgid]uint64),
@@ -171,13 +171,13 @@ func (f *freelist) free(txid txid, p *page) {
 
 	for id := p.id; id <= p.id+pgid(p.overflow); id++ {
 		// Verify that page is not already free.
-		if f.cache[id] {
+		if _, ok := f.cache[id]; ok {
 			panic(fmt.Sprintf("page %d already freed", id))
 		}
 		// Add to the freelist and cache.
 		txp.ids = append(txp.ids, id)
 		txp.alloctx = append(txp.alloctx, allocTxid)
-		f.cache[id] = true
+		f.cache[id] = struct{}{}
 	}
 }
 
@@ -256,8 +256,9 @@ func (f *freelist) rollback(txid txid) {
 }
 
 // freed returns whether a given page is in the free list.
-func (f *freelist) freed(pgid pgid) bool {
-	return f.cache[pgid]
+func (f *freelist) freed(pgId pgid) bool {
+	_, ok := f.cache[pgId]
+	return ok
 }
 
 // read initializes the freelist from a freelist page.
@@ -386,13 +387,13 @@ func (f *freelist) noSyncReload(pgids []pgid) {
 // reindex rebuilds the free cache based on available and pending free lists.
 func (f *freelist) reindex() {
 	ids := f.getFreePageIDs()
-	f.cache = make(map[pgid]bool, len(ids))
+	f.cache = make(map[pgid]struct{}, len(ids))
 	for _, id := range ids {
-		f.cache[id] = true
+		f.cache[id] = struct{}{}
 	}
 	for _, txp := range f.pending {
 		for _, pendingID := range txp.ids {
-			f.cache[pendingID] = true
+			f.cache[pendingID] = struct{}{}
 		}
 	}
 }
diff --git a/go.mod b/go.mod
index 96355a6..511a392 100644
--- a/go.mod
+++ b/go.mod
@@ -1,5 +1,15 @@
 module go.etcd.io/bbolt
 
-go 1.12
+go 1.17
 
-require golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d
+require (
+	github.com/stretchr/testify v1.8.1
+	go.etcd.io/gofail v0.1.0
+	golang.org/x/sys v0.4.0
+)
+
+require (
+	github.com/davecgh/go-spew v1.1.1 // indirect
+	github.com/pmezard/go-difflib v1.0.0 // indirect
+	gopkg.in/yaml.v3 v3.0.1 // indirect
+)
diff --git a/go.sum b/go.sum
index c13f8f4..f0f96bf 100644
--- a/go.sum
+++ b/go.sum
@@ -1,2 +1,21 @@
-golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d h1:L/IKR6COd7ubZrs2oTnTi73IhgqJ71c9s80WsQnh0Es=
-golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
+github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+go.etcd.io/gofail v0.1.0 h1:XItAMIhOojXFQMgrxjnd2EIIHun/d5qL0Pf7FzVTkFg=
+go.etcd.io/gofail v0.1.0/go.mod h1:VZBCXYGZhHAinaBiiqYvuDynvahNsAyLFwB3kEHKz1M=
+golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
+golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/internal/btesting/btesting.go b/internal/btesting/btesting.go
new file mode 100644
index 0000000..b305072
--- /dev/null
+++ b/internal/btesting/btesting.go
@@ -0,0 +1,205 @@
+package btesting
+
+import (
+	"flag"
+	"fmt"
+	"os"
+	"path/filepath"
+	"regexp"
+	"testing"
+	"time"
+
+	"github.com/stretchr/testify/require"
+
+	bolt "go.etcd.io/bbolt"
+)
+
+var statsFlag = flag.Bool("stats", false, "show performance stats")
+
+// TestFreelistType is used as a env variable for test to indicate the backend type
+const TestFreelistType = "TEST_FREELIST_TYPE"
+
+// DB is a test wrapper for bolt.DB.
+type DB struct {
+	*bolt.DB
+	f string
+	o *bolt.Options
+	t testing.TB
+}
+
+// MustCreateDB returns a new, open DB at a temporary location.
+func MustCreateDB(t testing.TB) *DB {
+	return MustCreateDBWithOption(t, nil)
+}
+
+// MustCreateDBWithOption returns a new, open DB at a temporary location with given options.
+func MustCreateDBWithOption(t testing.TB, o *bolt.Options) *DB {
+	f := filepath.Join(t.TempDir(), "db")
+	return MustOpenDBWithOption(t, f, o)
+}
+
+func MustOpenDBWithOption(t testing.TB, f string, o *bolt.Options) *DB {
+	t.Logf("Opening bbolt DB at: %s", f)
+	if o == nil {
+		o = bolt.DefaultOptions
+	}
+
+	freelistType := bolt.FreelistArrayType
+	if env := os.Getenv(TestFreelistType); env == string(bolt.FreelistMapType) {
+		freelistType = bolt.FreelistMapType
+	}
+
+	o.FreelistType = freelistType
+
+	db, err := bolt.Open(f, 0666, o)
+	require.NoError(t, err)
+	resDB := &DB{
+		DB: db,
+		f:  f,
+		o:  o,
+		t:  t,
+	}
+	t.Cleanup(resDB.PostTestCleanup)
+	return resDB
+}
+
+func (db *DB) PostTestCleanup() {
+	// Check database consistency after every test.
+	if db.DB != nil {
+		db.MustCheck()
+		db.MustClose()
+	}
+}
+
+// Close closes the database but does NOT delete the underlying file.
+func (db *DB) Close() error {
+	if db.DB != nil {
+		// Log statistics.
+		if *statsFlag {
+			db.PrintStats()
+		}
+		db.t.Logf("Closing bbolt DB at: %s", db.f)
+		err := db.DB.Close()
+		if err != nil {
+			return err
+		}
+		db.DB = nil
+	}
+	return nil
+}
+
+// MustClose closes the database but does NOT delete the underlying file.
+func (db *DB) MustClose() {
+	err := db.Close()
+	require.NoError(db.t, err)
+}
+
+func (db *DB) MustDeleteFile() {
+	err := os.Remove(db.Path())
+	require.NoError(db.t, err)
+}
+
+func (db *DB) SetOptions(o *bolt.Options) {
+	db.o = o
+}
+
+// MustReopen reopen the database. Panic on error.
+func (db *DB) MustReopen() {
+	if db.DB != nil {
+		panic("Please call Close() before MustReopen()")
+	}
+	db.t.Logf("Reopening bbolt DB at: %s", db.f)
+	indb, err := bolt.Open(db.Path(), 0666, db.o)
+	require.NoError(db.t, err)
+	db.DB = indb
+}
+
+// MustCheck runs a consistency check on the database and panics if any errors are found.
+func (db *DB) MustCheck() {
+	err := db.Update(func(tx *bolt.Tx) error {
+		// Collect all the errors.
+		var errors []error
+		for err := range tx.Check() {
+			errors = append(errors, err)
+			if len(errors) > 10 {
+				break
+			}
+		}
+
+		// If errors occurred, copy the DB and print the errors.
+		if len(errors) > 0 {
+			var path = filepath.Join(db.t.TempDir(), "db.backup")
+			err := tx.CopyFile(path, 0600)
+			require.NoError(db.t, err)
+
+			// Print errors.
+			fmt.Print("\n\n")
+			fmt.Printf("consistency check failed (%d errors)\n", len(errors))
+			for _, err := range errors {
+				fmt.Println(err)
+			}
+			fmt.Println("")
+			fmt.Println("db saved to:")
+			fmt.Println(path)
+			fmt.Print("\n\n")
+			os.Exit(-1)
+		}
+
+		return nil
+	})
+	require.NoError(db.t, err)
+}
+
+// Fill - fills the DB using numTx transactions and numKeysPerTx.
+func (db *DB) Fill(bucket []byte, numTx int, numKeysPerTx int,
+	keyGen func(tx int, key int) []byte,
+	valueGen func(tx int, key int) []byte) error {
+	for tr := 0; tr < numTx; tr++ {
+		err := db.Update(func(tx *bolt.Tx) error {
+			b, _ := tx.CreateBucketIfNotExists(bucket)
+			for i := 0; i < numKeysPerTx; i++ {
+				if err := b.Put(keyGen(tr, i), valueGen(tr, i)); err != nil {
+					return err
+				}
+			}
+			return nil
+		})
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (db *DB) Path() string {
+	return db.f
+}
+
+// CopyTempFile copies a database to a temporary file.
+func (db *DB) CopyTempFile() {
+	path := filepath.Join(db.t.TempDir(), "db.copy")
+	err := db.View(func(tx *bolt.Tx) error {
+		return tx.CopyFile(path, 0600)
+	})
+	require.NoError(db.t, err)
+	fmt.Println("db copied to: ", path)
+}
+
+// PrintStats prints the database stats
+func (db *DB) PrintStats() {
+	var stats = db.Stats()
+	fmt.Printf("[db] %-20s %-20s %-20s\n",
+		fmt.Sprintf("pg(%d/%d)", stats.TxStats.GetPageCount(), stats.TxStats.GetPageAlloc()),
+		fmt.Sprintf("cur(%d)", stats.TxStats.GetCursorCount()),
+		fmt.Sprintf("node(%d/%d)", stats.TxStats.GetNodeCount(), stats.TxStats.GetNodeDeref()),
+	)
+	fmt.Printf("     %-20s %-20s %-20s\n",
+		fmt.Sprintf("rebal(%d/%v)", stats.TxStats.GetRebalance(), truncDuration(stats.TxStats.GetRebalanceTime())),
+		fmt.Sprintf("spill(%d/%v)", stats.TxStats.GetSpill(), truncDuration(stats.TxStats.GetSpillTime())),
+		fmt.Sprintf("w(%d/%v)", stats.TxStats.GetWrite(), truncDuration(stats.TxStats.GetWriteTime())),
+	)
+}
+
+func truncDuration(d time.Duration) string {
+	return regexp.MustCompile(`^(\d+)(\.\d+)`).ReplaceAllString(d.String(), "$1")
+}
diff --git a/internal/guts_cli/guts_cli.go b/internal/guts_cli/guts_cli.go
new file mode 100644
index 0000000..30e5566
--- /dev/null
+++ b/internal/guts_cli/guts_cli.go
@@ -0,0 +1,351 @@
+package guts_cli
+
+// Low level access to pages / data-structures of the bbolt file.
+
+// TODO(ptab): Merge with bbolt/page file that should get ported to internal.
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"os"
+	"unsafe"
+)
+
+var (
+	// ErrCorrupt is returned when a checking a data file finds errors.
+	ErrCorrupt = errors.New("invalid value")
+)
+
+// PageHeaderSize represents the size of the bolt.Page header.
+const PageHeaderSize = 16
+
+// Represents a marker value to indicate that a file (Meta Page) is a Bolt DB.
+const magic uint32 = 0xED0CDAED
+
+// DO NOT EDIT. Copied from the "bolt" package.
+const maxAllocSize = 0xFFFFFFF
+
+// DO NOT EDIT. Copied from the "bolt" package.
+const (
+	branchPageFlag   = 0x01
+	leafPageFlag     = 0x02
+	metaPageFlag     = 0x04
+	freelistPageFlag = 0x10
+)
+
+// DO NOT EDIT. Copied from the "bolt" package.
+const bucketLeafFlag = 0x01
+
+// DO NOT EDIT. Copied from the "bolt" package.
+type Pgid uint64
+
+// DO NOT EDIT. Copied from the "bolt" package.
+type txid uint64
+
+// DO NOT EDIT. Copied from the "bolt" package.
+type Meta struct {
+	magic    uint32
+	version  uint32
+	pageSize uint32
+	flags    uint32
+	root     Bucket
+	freelist Pgid
+	pgid     Pgid // High Water Mark (id of next added Page if the file growths)
+	txid     txid
+	checksum uint64
+}
+
+func LoadPageMeta(buf []byte) *Meta {
+	return (*Meta)(unsafe.Pointer(&buf[PageHeaderSize]))
+}
+
+func (m *Meta) RootBucket() *Bucket {
+	return &m.root
+}
+
+func (m *Meta) Txid() uint64 {
+	return uint64(m.txid)
+}
+
+func (m *Meta) Print(w io.Writer) {
+	fmt.Fprintf(w, "Version:    %d\n", m.version)
+	fmt.Fprintf(w, "Page Size:  %d bytes\n", m.pageSize)
+	fmt.Fprintf(w, "Flags:      %08x\n", m.flags)
+	fmt.Fprintf(w, "Root:       <pgid=%d>\n", m.root.root)
+	fmt.Fprintf(w, "Freelist:   <pgid=%d>\n", m.freelist)
+	fmt.Fprintf(w, "HWM:        <pgid=%d>\n", m.pgid)
+	fmt.Fprintf(w, "Txn ID:     %d\n", m.txid)
+	fmt.Fprintf(w, "Checksum:   %016x\n", m.checksum)
+	fmt.Fprintf(w, "\n")
+}
+
+// DO NOT EDIT. Copied from the "bolt" package.
+type Bucket struct {
+	root     Pgid
+	sequence uint64
+}
+
+const bucketHeaderSize = int(unsafe.Sizeof(Bucket{}))
+
+func LoadBucket(buf []byte) *Bucket {
+	return (*Bucket)(unsafe.Pointer(&buf[0]))
+}
+
+func (b *Bucket) String() string {
+	return fmt.Sprintf("<pgid=%d,seq=%d>", b.root, b.sequence)
+}
+
+func (b *Bucket) RootPage() Pgid {
+	return b.root
+}
+
+func (b *Bucket) InlinePage(v []byte) *Page {
+	return (*Page)(unsafe.Pointer(&v[bucketHeaderSize]))
+}
+
+// DO NOT EDIT. Copied from the "bolt" package.
+type Page struct {
+	id       Pgid
+	flags    uint16
+	count    uint16
+	overflow uint32
+	ptr      uintptr
+}
+
+func LoadPage(buf []byte) *Page {
+	return (*Page)(unsafe.Pointer(&buf[0]))
+}
+
+func (p *Page) FreelistPageCount() int {
+	// Check for overflow and, if present, adjust actual element count.
+	if p.count == 0xFFFF {
+		return int(((*[maxAllocSize]Pgid)(unsafe.Pointer(&p.ptr)))[0])
+	} else {
+		return int(p.count)
+	}
+}
+
+func (p *Page) FreelistPagePages() []Pgid {
+	// Check for overflow and, if present, adjust starting index.
+	idx := 0
+	if p.count == 0xFFFF {
+		idx = 1
+	}
+	return (*[maxAllocSize]Pgid)(unsafe.Pointer(&p.ptr))[idx:p.FreelistPageCount()]
+}
+
+func (p *Page) Overflow() uint32 {
+	return p.overflow
+}
+
+func (p *Page) String() string {
+	return fmt.Sprintf("ID: %d, Type: %s, count: %d, overflow: %d", p.id, p.Type(), p.count, p.overflow)
+}
+
+// DO NOT EDIT. Copied from the "bolt" package.
+
+// TODO(ptabor): Make the page-types an enum.
+func (p *Page) Type() string {
+	if (p.flags & branchPageFlag) != 0 {
+		return "branch"
+	} else if (p.flags & leafPageFlag) != 0 {
+		return "leaf"
+	} else if (p.flags & metaPageFlag) != 0 {
+		return "meta"
+	} else if (p.flags & freelistPageFlag) != 0 {
+		return "freelist"
+	}
+	return fmt.Sprintf("unknown<%02x>", p.flags)
+}
+
+func (p *Page) Count() uint16 {
+	return p.count
+}
+
+func (p *Page) Id() Pgid {
+	return p.id
+}
+
+// DO NOT EDIT. Copied from the "bolt" package.
+func (p *Page) LeafPageElement(index uint16) *LeafPageElement {
+	n := &((*[0x7FFFFFF]LeafPageElement)(unsafe.Pointer(&p.ptr)))[index]
+	return n
+}
+
+// DO NOT EDIT. Copied from the "bolt" package.
+func (p *Page) BranchPageElement(index uint16) *BranchPageElement {
+	return &((*[0x7FFFFFF]BranchPageElement)(unsafe.Pointer(&p.ptr)))[index]
+}
+
+func (p *Page) SetId(target Pgid) {
+	p.id = target
+}
+
+func (p *Page) SetCount(target uint16) {
+	p.count = target
+}
+
+func (p *Page) SetOverflow(target uint32) {
+	p.overflow = target
+}
+
+// DO NOT EDIT. Copied from the "bolt" package.
+type BranchPageElement struct {
+	pos   uint32
+	ksize uint32
+	pgid  Pgid
+}
+
+// DO NOT EDIT. Copied from the "bolt" package.
+func (n *BranchPageElement) Key() []byte {
+	buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
+	return buf[n.pos : n.pos+n.ksize]
+}
+
+func (n *BranchPageElement) PgId() Pgid {
+	return n.pgid
+}
+
+// DO NOT EDIT. Copied from the "bolt" package.
+type LeafPageElement struct {
+	flags uint32
+	pos   uint32
+	ksize uint32
+	vsize uint32
+}
+
+// DO NOT EDIT. Copied from the "bolt" package.
+func (n *LeafPageElement) Key() []byte {
+	buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
+	return buf[n.pos : n.pos+n.ksize]
+}
+
+// DO NOT EDIT. Copied from the "bolt" package.
+func (n *LeafPageElement) Value() []byte {
+	buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
+	return buf[n.pos+n.ksize : n.pos+n.ksize+n.vsize]
+}
+
+func (n *LeafPageElement) IsBucketEntry() bool {
+	return n.flags&uint32(bucketLeafFlag) != 0
+}
+
+func (n *LeafPageElement) Bucket() *Bucket {
+	if n.IsBucketEntry() {
+		return LoadBucket(n.Value())
+	} else {
+		return nil
+	}
+}
+
+// ReadPage reads Page info & full Page data from a path.
+// This is not transactionally safe.
+func ReadPage(path string, pageID uint64) (*Page, []byte, error) {
+	// Find Page size.
+	pageSize, hwm, err := ReadPageAndHWMSize(path)
+	if err != nil {
+		return nil, nil, fmt.Errorf("read Page size: %s", err)
+	}
+
+	// Open database file.
+	f, err := os.Open(path)
+	if err != nil {
+		return nil, nil, err
+	}
+	defer f.Close()
+
+	// Read one block into buffer.
+	buf := make([]byte, pageSize)
+	if n, err := f.ReadAt(buf, int64(pageID*pageSize)); err != nil {
+		return nil, nil, err
+	} else if n != len(buf) {
+		return nil, nil, io.ErrUnexpectedEOF
+	}
+
+	// Determine total number of blocks.
+	p := LoadPage(buf)
+	if p.id != Pgid(pageID) {
+		return nil, nil, fmt.Errorf("error: %w due to unexpected Page id: %d != %d", ErrCorrupt, p.id, pageID)
+	}
+	overflowN := p.overflow
+	if overflowN >= uint32(hwm)-3 { // we exclude 2 Meta pages and the current Page.
+		return nil, nil, fmt.Errorf("error: %w, Page claims to have %d overflow pages (>=hwm=%d). Interrupting to avoid risky OOM", ErrCorrupt, overflowN, hwm)
+	}
+
+	// Re-read entire Page (with overflow) into buffer.
+	buf = make([]byte, (uint64(overflowN)+1)*pageSize)
+	if n, err := f.ReadAt(buf, int64(pageID*pageSize)); err != nil {
+		return nil, nil, err
+	} else if n != len(buf) {
+		return nil, nil, io.ErrUnexpectedEOF
+	}
+	p = LoadPage(buf)
+	if p.id != Pgid(pageID) {
+		return nil, nil, fmt.Errorf("error: %w due to unexpected Page id: %d != %d", ErrCorrupt, p.id, pageID)
+	}
+
+	return p, buf, nil
+}
+
+func WritePage(path string, pageBuf []byte) error {
+	page := LoadPage(pageBuf)
+	pageSize, _, err := ReadPageAndHWMSize(path)
+	if err != nil {
+		return err
+	}
+	expectedLen := pageSize * (uint64(page.Overflow()) + 1)
+	if expectedLen != uint64(len(pageBuf)) {
+		return fmt.Errorf("WritePage: len(buf):%d != pageSize*(overflow+1):%d", len(pageBuf), expectedLen)
+	}
+	f, err := os.OpenFile(path, os.O_WRONLY, 0)
+	if err != nil {
+		return err
+	}
+	defer f.Close()
+	_, err = f.WriteAt(pageBuf, int64(page.Id())*int64(pageSize))
+	return err
+}
+
+// ReadPageAndHWMSize reads Page size and HWM (id of the last+1 Page).
+// This is not transactionally safe.
+func ReadPageAndHWMSize(path string) (uint64, Pgid, error) {
+	// Open database file.
+	f, err := os.Open(path)
+	if err != nil {
+		return 0, 0, err
+	}
+	defer f.Close()
+
+	// Read 4KB chunk.
+	buf := make([]byte, 4096)
+	if _, err := io.ReadFull(f, buf); err != nil {
+		return 0, 0, err
+	}
+
+	// Read Page size from metadata.
+	m := LoadPageMeta(buf)
+	if m.magic != magic {
+		return 0, 0, fmt.Errorf("the Meta Page has wrong (unexpected) magic")
+	}
+	return uint64(m.pageSize), Pgid(m.pgid), nil
+}
+
+// GetRootPage returns the root-page (according to the most recent transaction).
+func GetRootPage(path string) (root Pgid, activeMeta Pgid, err error) {
+	_, buf0, err0 := ReadPage(path, 0)
+	if err0 != nil {
+		return 0, 0, err0
+	}
+	m0 := LoadPageMeta(buf0)
+	_, buf1, err1 := ReadPage(path, 1)
+	if err1 != nil {
+		return 0, 1, err1
+	}
+	m1 := LoadPageMeta(buf1)
+	if m0.txid < m1.txid {
+		return m1.root.root, 1, nil
+	} else {
+		return m0.root.root, 0, nil
+	}
+}
diff --git a/internal/surgeon/surgeon.go b/internal/surgeon/surgeon.go
new file mode 100644
index 0000000..7635837
--- /dev/null
+++ b/internal/surgeon/surgeon.go
@@ -0,0 +1,48 @@
+package surgeon
+
+import (
+	"fmt"
+	"go.etcd.io/bbolt/internal/guts_cli"
+)
+
+func CopyPage(path string, srcPage guts_cli.Pgid, target guts_cli.Pgid) error {
+	p1, d1, err1 := guts_cli.ReadPage(path, uint64(srcPage))
+	if err1 != nil {
+		return err1
+	}
+	p1.SetId(target)
+	return guts_cli.WritePage(path, d1)
+}
+
+func ClearPage(path string, pgId guts_cli.Pgid) error {
+	// Read the page
+	p, buf, err := guts_cli.ReadPage(path, uint64(pgId))
+	if err != nil {
+		return fmt.Errorf("ReadPage failed: %w", err)
+	}
+
+	// Update and rewrite the page
+	p.SetCount(0)
+	p.SetOverflow(0)
+	if err := guts_cli.WritePage(path, buf); err != nil {
+		return fmt.Errorf("WritePage failed: %w", err)
+	}
+
+	return nil
+}
+
+// RevertMetaPage replaces the newer metadata page with the older.
+// It usually means that one transaction is being lost. But frequently
+// data corruption happens on the last transaction pages and the
+// previous state is consistent.
+func RevertMetaPage(path string) error {
+	_, activeMetaPage, err := guts_cli.GetRootPage(path)
+	if err != nil {
+		return err
+	}
+	if activeMetaPage == 0 {
+		return CopyPage(path, 1, 0)
+	} else {
+		return CopyPage(path, 0, 1)
+	}
+}
diff --git a/internal/surgeon/surgeon_test.go b/internal/surgeon/surgeon_test.go
new file mode 100644
index 0000000..3d50988
--- /dev/null
+++ b/internal/surgeon/surgeon_test.go
@@ -0,0 +1,57 @@
+package surgeon_test
+
+import (
+	"fmt"
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+
+	bolt "go.etcd.io/bbolt"
+	"go.etcd.io/bbolt/internal/btesting"
+	"go.etcd.io/bbolt/internal/surgeon"
+)
+
+func TestRevertMetaPage(t *testing.T) {
+	db := btesting.MustCreateDB(t)
+	assert.NoError(t,
+		db.Fill([]byte("data"), 1, 500,
+			func(tx int, k int) []byte { return []byte(fmt.Sprintf("%04d", k)) },
+			func(tx int, k int) []byte { return make([]byte, 100) },
+		))
+	assert.NoError(t,
+		db.Update(
+			func(tx *bolt.Tx) error {
+				b := tx.Bucket([]byte("data"))
+				assert.NoError(t, b.Put([]byte("0123"), []byte("new Value for 123")))
+				assert.NoError(t, b.Put([]byte("1234b"), []byte("additional object")))
+				assert.NoError(t, b.Delete([]byte("0246")))
+				return nil
+			}))
+
+	assert.NoError(t,
+		db.View(
+			func(tx *bolt.Tx) error {
+				b := tx.Bucket([]byte("data"))
+				assert.Equal(t, []byte("new Value for 123"), b.Get([]byte("0123")))
+				assert.Equal(t, []byte("additional object"), b.Get([]byte("1234b")))
+				assert.Nil(t, b.Get([]byte("0246")))
+				return nil
+			}))
+
+	db.Close()
+
+	// This causes the whole tree to be linked to the previous state
+	assert.NoError(t, surgeon.RevertMetaPage(db.Path()))
+
+	db.MustReopen()
+	db.MustCheck()
+	assert.NoError(t,
+		db.View(
+			func(tx *bolt.Tx) error {
+				b := tx.Bucket([]byte("data"))
+				assert.Equal(t, make([]byte, 100), b.Get([]byte("0123")))
+				assert.Nil(t, b.Get([]byte("1234b")))
+				assert.Equal(t, make([]byte, 100), b.Get([]byte("0246")))
+				return nil
+			}))
+}
diff --git a/internal/surgeon/xray.go b/internal/surgeon/xray.go
new file mode 100644
index 0000000..4469341
--- /dev/null
+++ b/internal/surgeon/xray.go
@@ -0,0 +1,101 @@
+package surgeon
+
+// Library contains raw access to bbolt files for sake of testing or fixing of corrupted files.
+//
+// The library must not be used bbolt btree - just by CLI or tests.
+// It's not optimized for performance.
+
+import (
+	"bytes"
+	"fmt"
+
+	"go.etcd.io/bbolt/internal/guts_cli"
+)
+
+type XRay struct {
+	path string
+}
+
+func NewXRay(path string) XRay {
+	return XRay{path}
+}
+
+func (n XRay) traverse(stack []guts_cli.Pgid, callback func(page *guts_cli.Page, stack []guts_cli.Pgid) error) error {
+	p, data, err := guts_cli.ReadPage(n.path, uint64(stack[len(stack)-1]))
+	if err != nil {
+		return fmt.Errorf("failed reading page (stack %v): %w", stack, err)
+	}
+	err = callback(p, stack)
+	if err != nil {
+		return fmt.Errorf("failed callback for page (stack %v): %w", stack, err)
+	}
+	switch p.Type() {
+	case "meta":
+		{
+			m := guts_cli.LoadPageMeta(data)
+			r := m.RootBucket().RootPage()
+			return n.traverse(append(stack, r), callback)
+		}
+	case "branch":
+		{
+			for i := uint16(0); i < p.Count(); i++ {
+				bpe := p.BranchPageElement(i)
+				if err := n.traverse(append(stack, bpe.PgId()), callback); err != nil {
+					return err
+				}
+			}
+		}
+	case "leaf":
+		for i := uint16(0); i < p.Count(); i++ {
+			lpe := p.LeafPageElement(i)
+			if lpe.IsBucketEntry() {
+				pgid := lpe.Bucket().RootPage()
+				if pgid > 0 {
+					if err := n.traverse(append(stack, pgid), callback); err != nil {
+						return err
+					}
+				} else {
+					inlinePage := lpe.Bucket().InlinePage(lpe.Value())
+					if err := callback(inlinePage, stack); err != nil {
+						return fmt.Errorf("failed callback for inline page  (stack %v): %w", stack, err)
+					}
+				}
+			}
+		}
+	case "freelist":
+		return nil
+		// Free does not have children.
+	}
+	return nil
+}
+
+// FindPathsToKey finds all paths from root to the page that contains the given key.
+// As it traverses multiple buckets, so in theory there might be multiple keys with the given name.
+// Note: For simplicity it's currently implemented as traversing of the whole reachable tree.
+// If key is a bucket name, a page-path referencing the key will be returned as well.
+func (n XRay) FindPathsToKey(key []byte) ([][]guts_cli.Pgid, error) {
+	var found [][]guts_cli.Pgid
+
+	rootPage, _, err := guts_cli.GetRootPage(n.path)
+	if err != nil {
+		return nil, err
+	}
+	err = n.traverse([]guts_cli.Pgid{rootPage},
+		func(page *guts_cli.Page, stack []guts_cli.Pgid) error {
+			if page.Type() == "leaf" {
+				for i := uint16(0); i < page.Count(); i++ {
+					if bytes.Equal(page.LeafPageElement(i).Key(), key) {
+						var copyPath []guts_cli.Pgid
+						copyPath = append(copyPath, stack...)
+						found = append(found, copyPath)
+					}
+				}
+			}
+			return nil
+		})
+	if err != nil {
+		return nil, err
+	} else {
+		return found, nil
+	}
+}
diff --git a/internal/surgeon/xray_test.go b/internal/surgeon/xray_test.go
new file mode 100644
index 0000000..5f38d96
--- /dev/null
+++ b/internal/surgeon/xray_test.go
@@ -0,0 +1,66 @@
+package surgeon_test
+
+import (
+	"fmt"
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+
+	"go.etcd.io/bbolt"
+	"go.etcd.io/bbolt/internal/btesting"
+	"go.etcd.io/bbolt/internal/guts_cli"
+	"go.etcd.io/bbolt/internal/surgeon"
+)
+
+func TestFindPathsToKey(t *testing.T) {
+	db := btesting.MustCreateDB(t)
+	assert.NoError(t,
+		db.Fill([]byte("data"), 1, 500,
+			func(tx int, k int) []byte { return []byte(fmt.Sprintf("%04d", k)) },
+			func(tx int, k int) []byte { return make([]byte, 100) },
+		))
+	assert.NoError(t, db.Close())
+
+	navigator := surgeon.NewXRay(db.Path())
+	path1, err := navigator.FindPathsToKey([]byte("0451"))
+	assert.NoError(t, err)
+	assert.NotEmpty(t, path1)
+
+	page := path1[0][len(path1[0])-1]
+	p, _, err := guts_cli.ReadPage(db.Path(), uint64(page))
+	assert.NoError(t, err)
+	assert.GreaterOrEqual(t, []byte("0451"), p.LeafPageElement(0).Key())
+	assert.LessOrEqual(t, []byte("0451"), p.LeafPageElement(p.Count()-1).Key())
+}
+
+func TestFindPathsToKey_Bucket(t *testing.T) {
+	rootBucket := []byte("data")
+	subBucket := []byte("0451A")
+
+	db := btesting.MustCreateDB(t)
+	assert.NoError(t,
+		db.Fill(rootBucket, 1, 500,
+			func(tx int, k int) []byte { return []byte(fmt.Sprintf("%04d", k)) },
+			func(tx int, k int) []byte { return make([]byte, 100) },
+		))
+	require.NoError(t, db.Update(func(tx *bbolt.Tx) error {
+		sb, err := tx.Bucket(rootBucket).CreateBucket(subBucket)
+		require.NoError(t, err)
+		require.NoError(t, sb.Put([]byte("foo"), []byte("bar")))
+		return nil
+	}))
+
+	assert.NoError(t, db.Close())
+
+	navigator := surgeon.NewXRay(db.Path())
+	path1, err := navigator.FindPathsToKey(subBucket)
+	assert.NoError(t, err)
+	assert.NotEmpty(t, path1)
+
+	page := path1[0][len(path1[0])-1]
+	p, _, err := guts_cli.ReadPage(db.Path(), uint64(page))
+	assert.NoError(t, err)
+	assert.GreaterOrEqual(t, subBucket, p.LeafPageElement(0).Key())
+	assert.LessOrEqual(t, subBucket, p.LeafPageElement(p.Count()-1).Key())
+}
diff --git a/internal/tests/tx_check_test.go b/internal/tests/tx_check_test.go
new file mode 100644
index 0000000..0476d01
--- /dev/null
+++ b/internal/tests/tx_check_test.go
@@ -0,0 +1,87 @@
+package tests_test
+
+import (
+	"fmt"
+	"testing"
+
+	"github.com/stretchr/testify/require"
+
+	bolt "go.etcd.io/bbolt"
+	"go.etcd.io/bbolt/internal/btesting"
+	"go.etcd.io/bbolt/internal/guts_cli"
+	"go.etcd.io/bbolt/internal/surgeon"
+)
+
+func TestTx_RecursivelyCheckPages_MisplacedPage(t *testing.T) {
+	db := btesting.MustCreateDB(t)
+	require.NoError(t,
+		db.Fill([]byte("data"), 1, 10000,
+			func(tx int, k int) []byte { return []byte(fmt.Sprintf("%04d", k)) },
+			func(tx int, k int) []byte { return make([]byte, 100) },
+		))
+	require.NoError(t, db.Close())
+
+	xRay := surgeon.NewXRay(db.Path())
+
+	path1, err := xRay.FindPathsToKey([]byte("0451"))
+	require.NoError(t, err, "cannot find page that contains key:'0451'")
+	require.Len(t, path1, 1, "Expected only one page that contains key:'0451'")
+
+	path2, err := xRay.FindPathsToKey([]byte("7563"))
+	require.NoError(t, err, "cannot find page that contains key:'7563'")
+	require.Len(t, path2, 1, "Expected only one page that contains key:'7563'")
+
+	srcPage := path1[0][len(path1[0])-1]
+	targetPage := path2[0][len(path2[0])-1]
+	require.NoError(t, surgeon.CopyPage(db.Path(), srcPage, targetPage))
+
+	db.MustReopen()
+	require.NoError(t, db.Update(func(tx *bolt.Tx) error {
+		// Collect all the errors.
+		var errors []error
+		for err := range tx.Check() {
+			errors = append(errors, err)
+		}
+		require.Len(t, errors, 1)
+		require.ErrorContains(t, errors[0], fmt.Sprintf("leaf page(%v) needs to be >= the key in the ancestor", targetPage))
+		return nil
+	}))
+	require.NoError(t, db.Close())
+}
+
+func TestTx_RecursivelyCheckPages_CorruptedLeaf(t *testing.T) {
+	db := btesting.MustCreateDB(t)
+	require.NoError(t,
+		db.Fill([]byte("data"), 1, 10000,
+			func(tx int, k int) []byte { return []byte(fmt.Sprintf("%04d", k)) },
+			func(tx int, k int) []byte { return make([]byte, 100) },
+		))
+	require.NoError(t, db.Close())
+
+	xray := surgeon.NewXRay(db.Path())
+
+	path1, err := xray.FindPathsToKey([]byte("0451"))
+	require.NoError(t, err, "cannot find page that contains key:'0451'")
+	require.Len(t, path1, 1, "Expected only one page that contains key:'0451'")
+
+	srcPage := path1[0][len(path1[0])-1]
+	p, pbuf, err := guts_cli.ReadPage(db.Path(), uint64(srcPage))
+	require.NoError(t, err)
+	require.Positive(t, p.Count(), "page must be not empty")
+	p.LeafPageElement(p.Count() / 2).Key()[0] = 'z'
+	require.NoError(t, guts_cli.WritePage(db.Path(), pbuf))
+
+	db.MustReopen()
+	require.NoError(t, db.Update(func(tx *bolt.Tx) error {
+		// Collect all the errors.
+		var errors []error
+		for err := range tx.Check() {
+			errors = append(errors, err)
+		}
+		require.Len(t, errors, 2)
+		require.ErrorContains(t, errors[0], fmt.Sprintf("leaf page(%v) needs to be < than key of the next element in ancestor", srcPage))
+		require.ErrorContains(t, errors[1], fmt.Sprintf("leaf page(%v) needs to be > (found <) than previous element", srcPage))
+		return nil
+	}))
+	require.NoError(t, db.Close())
+}
diff --git a/manydbs_test.go b/manydbs_test.go
index b58fc1b..48bc211 100644
--- a/manydbs_test.go
+++ b/manydbs_test.go
@@ -2,7 +2,6 @@ package bbolt
 
 import (
 	"fmt"
-	"io/ioutil"
 	"math/rand"
 	"os"
 	"path/filepath"
@@ -12,7 +11,7 @@ import (
 func createDb(t *testing.T) (*DB, func()) {
 	// First, create a temporary directory to be used for the duration of
 	// this test.
-	tempDirName, err := ioutil.TempDir("", "bboltmemtest")
+	tempDirName, err := os.MkdirTemp("", "bboltmemtest")
 	if err != nil {
 		t.Fatalf("error creating temp dir: %v", err)
 	}
diff --git a/mlock_unix.go b/mlock_unix.go
index 6a6c7b3..744a972 100644
--- a/mlock_unix.go
+++ b/mlock_unix.go
@@ -1,3 +1,4 @@
+//go:build !windows
 // +build !windows
 
 package bbolt
@@ -17,7 +18,7 @@ func mlock(db *DB, fileSize int) error {
 	return nil
 }
 
-//munlock unlocks memory of db file
+// munlock unlocks memory of db file
 func munlock(db *DB, fileSize int) error {
 	if db.dataref == nil {
 		return nil
diff --git a/mlock_windows.go b/mlock_windows.go
index b4a36a4..00b0fb4 100644
--- a/mlock_windows.go
+++ b/mlock_windows.go
@@ -5,7 +5,7 @@ func mlock(_ *DB, _ int) error {
 	panic("mlock is supported only on UNIX systems")
 }
 
-//munlock unlocks memory of db file
+// munlock unlocks memory of db file
 func munlock(_ *DB, _ int) error {
 	panic("munlock is supported only on UNIX systems")
 }
diff --git a/node.go b/node.go
index 73988b5..9c56150 100644
--- a/node.go
+++ b/node.go
@@ -113,9 +113,9 @@ func (n *node) prevSibling() *node {
 }
 
 // put inserts a key/value.
-func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) {
-	if pgid >= n.bucket.tx.meta.pgid {
-		panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid))
+func (n *node) put(oldKey, newKey, value []byte, pgId pgid, flags uint32) {
+	if pgId >= n.bucket.tx.meta.pgid {
+		panic(fmt.Sprintf("pgId (%d) above high water mark (%d)", pgId, n.bucket.tx.meta.pgid))
 	} else if len(oldKey) <= 0 {
 		panic("put: zero-length old key")
 	} else if len(newKey) <= 0 {
@@ -136,7 +136,7 @@ func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) {
 	inode.flags = flags
 	inode.key = newKey
 	inode.value = value
-	inode.pgid = pgid
+	inode.pgid = pgId
 	_assert(len(inode.key) > 0, "put: zero-length inode key")
 }
 
@@ -188,12 +188,16 @@ func (n *node) read(p *page) {
 }
 
 // write writes the items onto one or more pages.
+// The page should have p.id (might be 0 for meta or bucket-inline page) and p.overflow set
+// and the rest should be zeroed.
 func (n *node) write(p *page) {
+	_assert(p.count == 0 && p.flags == 0, "node cannot be written into a not empty page")
+
 	// Initialize page.
 	if n.isLeaf {
-		p.flags |= leafPageFlag
+		p.flags = leafPageFlag
 	} else {
-		p.flags |= branchPageFlag
+		p.flags = branchPageFlag
 	}
 
 	if len(n.inodes) >= 0xFFFF {
@@ -300,7 +304,7 @@ func (n *node) splitTwo(pageSize uintptr) (*node, *node) {
 	n.inodes = n.inodes[:splitIndex]
 
 	// Update the statistics.
-	n.bucket.tx.stats.Split++
+	n.bucket.tx.stats.IncSplit(1)
 
 	return n, next
 }
@@ -387,7 +391,7 @@ func (n *node) spill() error {
 		}
 
 		// Update the statistics.
-		tx.stats.Spill++
+		tx.stats.IncSpill(1)
 	}
 
 	// If the root node split and created a new root then we need to spill that
@@ -409,7 +413,7 @@ func (n *node) rebalance() {
 	n.unbalanced = false
 
 	// Update statistics.
-	n.bucket.tx.stats.Rebalance++
+	n.bucket.tx.stats.IncRebalance(1)
 
 	// Ignore if node is above threshold (25%) and has enough keys.
 	var threshold = n.bucket.tx.db.pageSize / 4
@@ -543,7 +547,7 @@ func (n *node) dereference() {
 	}
 
 	// Update statistics.
-	n.bucket.tx.stats.NodeDeref++
+	n.bucket.tx.stats.IncNodeDeref(1)
 }
 
 // free adds the node's underlying page to the freelist.
@@ -581,6 +585,10 @@ func (n *node) dump() {
 }
 */
 
+func compareKeys(left, right []byte) int {
+	return bytes.Compare(left, right)
+}
+
 type nodes []*node
 
 func (s nodes) Len() int      { return len(s) }
diff --git a/page.go b/page.go
index c9a158f..379645c 100644
--- a/page.go
+++ b/page.go
@@ -53,6 +53,16 @@ func (p *page) meta() *meta {
 	return (*meta)(unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)))
 }
 
+func (p *page) fastCheck(id pgid) {
+	_assert(p.id == id, "Page expected to be: %v, but self identifies as %v", id, p.id)
+	// Only one flag of page-type can be set.
+	_assert(p.flags == branchPageFlag ||
+		p.flags == leafPageFlag ||
+		p.flags == metaPageFlag ||
+		p.flags == freelistPageFlag,
+		"page %v: has unexpected type/flags: %x", p.id, p.flags)
+}
+
 // leafPageElement retrieves the leaf node by index
 func (p *page) leafPageElement(index uint16) *leafPageElement {
 	return (*leafPageElement)(unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p),
diff --git a/scripts/fix.sh b/scripts/fix.sh
new file mode 100755
index 0000000..6b933c9
--- /dev/null
+++ b/scripts/fix.sh
@@ -0,0 +1,13 @@
+GO_CMD="go"
+
+# TODO(ptabor): Expand to cover different architectures (GOOS GOARCH), or just list go files.
+
+GOFILES=$(${GO_CMD} list  --f "{{with \$d:=.}}{{range .GoFiles}}{{\$d.Dir}}/{{.}}{{\"\n\"}}{{end}}{{end}}" ./...)
+TESTGOFILES=$(${GO_CMD} list  --f "{{with \$d:=.}}{{range .TestGoFiles}}{{\$d.Dir}}/{{.}}{{\"\n\"}}{{end}}{{end}}" ./...)
+XTESTGOFILES=$(${GO_CMD} list  --f "{{with \$d:=.}}{{range .XTestGoFiles}}{{\$d.Dir}}/{{.}}{{\"\n\"}}{{end}}{{end}}" ./...)
+
+
+echo "${GOFILES}" "${TESTGOFILES}" "${XTESTGOFILES}"| xargs -n 100 go run golang.org/x/tools/cmd/goimports@latest -w -local go.etcd.io
+
+go fmt ./...
+go mod tidy
diff --git a/simulation_test.go b/simulation_test.go
index a96a241..037b718 100644
--- a/simulation_test.go
+++ b/simulation_test.go
@@ -9,6 +9,7 @@ import (
 	"testing"
 
 	bolt "go.etcd.io/bbolt"
+	"go.etcd.io/bbolt/internal/btesting"
 )
 
 func TestSimulate_1op_1p(t *testing.T)     { testSimulate(t, nil, 1, 1, 1) }
@@ -43,8 +44,7 @@ func testSimulate(t *testing.T, openOption *bolt.Options, round, threadCount, pa
 	var versions = make(map[int]*QuickDB)
 	versions[1] = NewQuickDB()
 
-	db := MustOpenWithOption(openOption)
-	defer db.MustClose()
+	db := btesting.MustCreateDBWithOption(t, openOption)
 
 	var mutex sync.Mutex
 
@@ -146,6 +146,9 @@ func testSimulate(t *testing.T, openOption *bolt.Options, round, threadCount, pa
 		}
 
 		db.MustClose()
+		// I have doubts the DB drop is indented here (as 'versions' is not being reset).
+		// But I'm preserving for now the original behavior.
+		db.MustDeleteFile()
 		db.MustReopen()
 	}
 
diff --git a/tests/failpoint/db_failpoint_test.go b/tests/failpoint/db_failpoint_test.go
new file mode 100644
index 0000000..798c6b9
--- /dev/null
+++ b/tests/failpoint/db_failpoint_test.go
@@ -0,0 +1,25 @@
+package failpoint
+
+import (
+	"path/filepath"
+	"testing"
+
+	"github.com/stretchr/testify/require"
+
+	bolt "go.etcd.io/bbolt"
+	gofail "go.etcd.io/gofail/runtime"
+)
+
+func TestFailpoint_MapFail(t *testing.T) {
+	err := gofail.Enable("mapError", `return("map somehow failed")`)
+	require.NoError(t, err)
+	defer func() {
+		err = gofail.Disable("mapError")
+		require.NoError(t, err)
+	}()
+
+	f := filepath.Join(t.TempDir(), "db")
+	_, err = bolt.Open(f, 0666, nil)
+	require.Error(t, err)
+	require.ErrorContains(t, err, "map somehow failed")
+}
diff --git a/tx.go b/tx.go
index 869d412..2fac8c0 100644
--- a/tx.go
+++ b/tx.go
@@ -6,6 +6,7 @@ import (
 	"os"
 	"sort"
 	"strings"
+	"sync/atomic"
 	"time"
 	"unsafe"
 )
@@ -151,17 +152,19 @@ func (tx *Tx) Commit() error {
 	// Rebalance nodes which have had deletions.
 	var startTime = time.Now()
 	tx.root.rebalance()
-	if tx.stats.Rebalance > 0 {
-		tx.stats.RebalanceTime += time.Since(startTime)
+	if tx.stats.GetRebalance() > 0 {
+		tx.stats.IncRebalanceTime(time.Since(startTime))
 	}
 
+	opgid := tx.meta.pgid
+
 	// spill data onto dirty pages.
 	startTime = time.Now()
 	if err := tx.root.spill(); err != nil {
 		tx.rollback()
 		return err
 	}
-	tx.stats.SpillTime += time.Since(startTime)
+	tx.stats.IncSpillTime(time.Since(startTime))
 
 	// Free the old root bucket.
 	tx.meta.root.root = tx.root.root
@@ -180,6 +183,14 @@ func (tx *Tx) Commit() error {
 		tx.meta.freelist = pgidNoFreelist
 	}
 
+	// If the high water mark has moved up then attempt to grow the database.
+	if tx.meta.pgid > opgid {
+		if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil {
+			tx.rollback()
+			return err
+		}
+	}
+
 	// Write dirty pages to disk.
 	startTime = time.Now()
 	if err := tx.write(); err != nil {
@@ -208,7 +219,7 @@ func (tx *Tx) Commit() error {
 		tx.rollback()
 		return err
 	}
-	tx.stats.WriteTime += time.Since(startTime)
+	tx.stats.IncWriteTime(time.Since(startTime))
 
 	// Finalize the transaction.
 	tx.close()
@@ -224,7 +235,6 @@ func (tx *Tx) Commit() error {
 func (tx *Tx) commitFreelist() error {
 	// Allocate new pages for the new free list. This will overestimate
 	// the size of the freelist but not underestimate the size (which would be bad).
-	opgid := tx.meta.pgid
 	p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1)
 	if err != nil {
 		tx.rollback()
@@ -235,13 +245,6 @@ func (tx *Tx) commitFreelist() error {
 		return err
 	}
 	tx.meta.freelist = p.id
-	// If the high water mark has moved up then attempt to grow the database.
-	if tx.meta.pgid > opgid {
-		if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil {
-			tx.rollback()
-			return err
-		}
-	}
 
 	return nil
 }
@@ -275,13 +278,17 @@ func (tx *Tx) rollback() {
 	}
 	if tx.writable {
 		tx.db.freelist.rollback(tx.meta.txid)
-		if !tx.db.hasSyncedFreelist() {
-			// Reconstruct free page list by scanning the DB to get the whole free page list.
-			// Note: scaning the whole db is heavy if your db size is large in NoSyncFreeList mode.
-			tx.db.freelist.noSyncReload(tx.db.freepages())
-		} else {
-			// Read free page list from freelist page.
-			tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist))
+		// When mmap fails, the `data`, `dataref` and `datasz` may be reset to
+		// zero values, and there is no way to reload free page IDs in this case.
+		if tx.db.data != nil {
+			if !tx.db.hasSyncedFreelist() {
+				// Reconstruct free page list by scanning the DB to get the whole free page list.
+				// Note: scaning the whole db is heavy if your db size is large in NoSyncFreeList mode.
+				tx.db.freelist.noSyncReload(tx.db.freepages())
+			} else {
+				// Read free page list from freelist page.
+				tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist))
+			}
 		}
 	}
 	tx.close()
@@ -400,98 +407,6 @@ func (tx *Tx) CopyFile(path string, mode os.FileMode) error {
 	return f.Close()
 }
 
-// Check performs several consistency checks on the database for this transaction.
-// An error is returned if any inconsistency is found.
-//
-// It can be safely run concurrently on a writable transaction. However, this
-// incurs a high cost for large databases and databases with a lot of subbuckets
-// because of caching. This overhead can be removed if running on a read-only
-// transaction, however, it is not safe to execute other writer transactions at
-// the same time.
-func (tx *Tx) Check() <-chan error {
-	ch := make(chan error)
-	go tx.check(ch)
-	return ch
-}
-
-func (tx *Tx) check(ch chan error) {
-	// Force loading free list if opened in ReadOnly mode.
-	tx.db.loadFreelist()
-
-	// Check if any pages are double freed.
-	freed := make(map[pgid]bool)
-	all := make([]pgid, tx.db.freelist.count())
-	tx.db.freelist.copyall(all)
-	for _, id := range all {
-		if freed[id] {
-			ch <- fmt.Errorf("page %d: already freed", id)
-		}
-		freed[id] = true
-	}
-
-	// Track every reachable page.
-	reachable := make(map[pgid]*page)
-	reachable[0] = tx.page(0) // meta0
-	reachable[1] = tx.page(1) // meta1
-	if tx.meta.freelist != pgidNoFreelist {
-		for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ {
-			reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist)
-		}
-	}
-
-	// Recursively check buckets.
-	tx.checkBucket(&tx.root, reachable, freed, ch)
-
-	// Ensure all pages below high water mark are either reachable or freed.
-	for i := pgid(0); i < tx.meta.pgid; i++ {
-		_, isReachable := reachable[i]
-		if !isReachable && !freed[i] {
-			ch <- fmt.Errorf("page %d: unreachable unfreed", int(i))
-		}
-	}
-
-	// Close the channel to signal completion.
-	close(ch)
-}
-
-func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool, ch chan error) {
-	// Ignore inline buckets.
-	if b.root == 0 {
-		return
-	}
-
-	// Check every page used by this bucket.
-	b.tx.forEachPage(b.root, 0, func(p *page, _ int) {
-		if p.id > tx.meta.pgid {
-			ch <- fmt.Errorf("page %d: out of bounds: %d", int(p.id), int(b.tx.meta.pgid))
-		}
-
-		// Ensure each page is only referenced once.
-		for i := pgid(0); i <= pgid(p.overflow); i++ {
-			var id = p.id + i
-			if _, ok := reachable[id]; ok {
-				ch <- fmt.Errorf("page %d: multiple references", int(id))
-			}
-			reachable[id] = p
-		}
-
-		// We should only encounter un-freed leaf and branch pages.
-		if freed[p.id] {
-			ch <- fmt.Errorf("page %d: reachable freed", int(p.id))
-		} else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 {
-			ch <- fmt.Errorf("page %d: invalid type: %s", int(p.id), p.typ())
-		}
-	})
-
-	// Check each bucket within this bucket.
-	_ = b.ForEach(func(k, v []byte) error {
-		if child := b.Bucket(k); child != nil {
-			tx.checkBucket(child, reachable, freed, ch)
-		}
-		return nil
-	})
-}
-
 // allocate returns a contiguous block of memory starting at a given page.
 func (tx *Tx) allocate(count int) (*page, error) {
 	p, err := tx.db.allocate(tx.meta.txid, count)
@@ -503,8 +418,8 @@ func (tx *Tx) allocate(count int) (*page, error) {
 	tx.pages[p.id] = p
 
 	// Update statistics.
-	tx.stats.PageCount += count
-	tx.stats.PageAlloc += count * tx.db.pageSize
+	tx.stats.IncPageCount(int64(count))
+	tx.stats.IncPageAlloc(int64(count * tx.db.pageSize))
 
 	return p, nil
 }
@@ -539,7 +454,7 @@ func (tx *Tx) write() error {
 			}
 
 			// Update statistics.
-			tx.stats.Write++
+			tx.stats.IncWrite(1)
 
 			// Exit inner for loop if we've written all the chunks.
 			rem -= sz
@@ -574,7 +489,7 @@ func (tx *Tx) write() error {
 		for i := range buf {
 			buf[i] = 0
 		}
-		tx.db.pagePool.Put(buf)
+		tx.db.pagePool.Put(buf) //nolint:staticcheck
 	}
 
 	return nil
@@ -598,7 +513,7 @@ func (tx *Tx) writeMeta() error {
 	}
 
 	// Update statistics.
-	tx.stats.Write++
+	tx.stats.IncWrite(1)
 
 	return nil
 }
@@ -609,26 +524,35 @@ func (tx *Tx) page(id pgid) *page {
 	// Check the dirty pages first.
 	if tx.pages != nil {
 		if p, ok := tx.pages[id]; ok {
+			p.fastCheck(id)
 			return p
 		}
 	}
 
 	// Otherwise return directly from the mmap.
-	return tx.db.page(id)
+	p := tx.db.page(id)
+	p.fastCheck(id)
+	return p
 }
 
 // forEachPage iterates over every page within a given page and executes a function.
-func (tx *Tx) forEachPage(pgid pgid, depth int, fn func(*page, int)) {
-	p := tx.page(pgid)
+func (tx *Tx) forEachPage(pgidnum pgid, fn func(*page, int, []pgid)) {
+	stack := make([]pgid, 10)
+	stack[0] = pgidnum
+	tx.forEachPageInternal(stack[:1], fn)
+}
+
+func (tx *Tx) forEachPageInternal(pgidstack []pgid, fn func(*page, int, []pgid)) {
+	p := tx.page(pgidstack[len(pgidstack)-1])
 
 	// Execute function.
-	fn(p, depth)
+	fn(p, len(pgidstack)-1, pgidstack)
 
 	// Recursively loop over children.
 	if (p.flags & branchPageFlag) != 0 {
 		for i := 0; i < int(p.count); i++ {
 			elem := p.branchPageElement(uint16(i))
-			tx.forEachPage(elem.pgid, depth+1, fn)
+			tx.forEachPageInternal(append(pgidstack, elem.pgid), fn)
 		}
 	}
 }
@@ -642,6 +566,10 @@ func (tx *Tx) Page(id int) (*PageInfo, error) {
 		return nil, nil
 	}
 
+	if tx.db.freelist == nil {
+		return nil, ErrFreePagesNotLoaded
+	}
+
 	// Build the page info.
 	p := tx.db.page(pgid(id))
 	info := &PageInfo{
@@ -663,43 +591,61 @@ func (tx *Tx) Page(id int) (*PageInfo, error) {
 // TxStats represents statistics about the actions performed by the transaction.
 type TxStats struct {
 	// Page statistics.
-	PageCount int // number of page allocations
-	PageAlloc int // total bytes allocated
+	//
+	// DEPRECATED: Use GetPageCount() or IncPageCount()
+	PageCount int64 // number of page allocations
+	// DEPRECATED: Use GetPageAlloc() or IncPageAlloc()
+	PageAlloc int64 // total bytes allocated
 
 	// Cursor statistics.
-	CursorCount int // number of cursors created
+	//
+	// DEPRECATED: Use GetCursorCount() or IncCursorCount()
+	CursorCount int64 // number of cursors created
 
 	// Node statistics
-	NodeCount int // number of node allocations
-	NodeDeref int // number of node dereferences
+	//
+	// DEPRECATED: Use GetNodeCount() or IncNodeCount()
+	NodeCount int64 // number of node allocations
+	// DEPRECATED: Use GetNodeDeref() or IncNodeDeref()
+	NodeDeref int64 // number of node dereferences
 
 	// Rebalance statistics.
-	Rebalance     int           // number of node rebalances
+	//
+	// DEPRECATED: Use GetRebalance() or IncRebalance()
+	Rebalance int64 // number of node rebalances
+	// DEPRECATED: Use GetRebalanceTime() or IncRebalanceTime()
 	RebalanceTime time.Duration // total time spent rebalancing
 
 	// Split/Spill statistics.
-	Split     int           // number of nodes split
-	Spill     int           // number of nodes spilled
+	//
+	// DEPRECATED: Use GetSplit() or IncSplit()
+	Split int64 // number of nodes split
+	// DEPRECATED: Use GetSpill() or IncSpill()
+	Spill int64 // number of nodes spilled
+	// DEPRECATED: Use GetSpillTime() or IncSpillTime()
 	SpillTime time.Duration // total time spent spilling
 
 	// Write statistics.
-	Write     int           // number of writes performed
+	//
+	// DEPRECATED: Use GetWrite() or IncWrite()
+	Write int64 // number of writes performed
+	// DEPRECATED: Use GetWriteTime() or IncWriteTime()
 	WriteTime time.Duration // total time spent writing to disk
 }
 
 func (s *TxStats) add(other *TxStats) {
-	s.PageCount += other.PageCount
-	s.PageAlloc += other.PageAlloc
-	s.CursorCount += other.CursorCount
-	s.NodeCount += other.NodeCount
-	s.NodeDeref += other.NodeDeref
-	s.Rebalance += other.Rebalance
-	s.RebalanceTime += other.RebalanceTime
-	s.Split += other.Split
-	s.Spill += other.Spill
-	s.SpillTime += other.SpillTime
-	s.Write += other.Write
-	s.WriteTime += other.WriteTime
+	s.IncPageCount(other.GetPageCount())
+	s.IncPageAlloc(other.GetPageAlloc())
+	s.IncCursorCount(other.GetCursorCount())
+	s.IncNodeCount(other.GetNodeCount())
+	s.IncNodeDeref(other.GetNodeDeref())
+	s.IncRebalance(other.GetRebalance())
+	s.IncRebalanceTime(other.GetRebalanceTime())
+	s.IncSplit(other.GetSplit())
+	s.IncSpill(other.GetSpill())
+	s.IncSpillTime(other.GetSpillTime())
+	s.IncWrite(other.GetWrite())
+	s.IncWriteTime(other.GetWriteTime())
 }
 
 // Sub calculates and returns the difference between two sets of transaction stats.
@@ -707,17 +653,145 @@ func (s *TxStats) add(other *TxStats) {
 // you need the performance counters that occurred within that time span.
 func (s *TxStats) Sub(other *TxStats) TxStats {
 	var diff TxStats
-	diff.PageCount = s.PageCount - other.PageCount
-	diff.PageAlloc = s.PageAlloc - other.PageAlloc
-	diff.CursorCount = s.CursorCount - other.CursorCount
-	diff.NodeCount = s.NodeCount - other.NodeCount
-	diff.NodeDeref = s.NodeDeref - other.NodeDeref
-	diff.Rebalance = s.Rebalance - other.Rebalance
-	diff.RebalanceTime = s.RebalanceTime - other.RebalanceTime
-	diff.Split = s.Split - other.Split
-	diff.Spill = s.Spill - other.Spill
-	diff.SpillTime = s.SpillTime - other.SpillTime
-	diff.Write = s.Write - other.Write
-	diff.WriteTime = s.WriteTime - other.WriteTime
+	diff.PageCount = s.GetPageCount() - other.GetPageCount()
+	diff.PageAlloc = s.GetPageAlloc() - other.GetPageAlloc()
+	diff.CursorCount = s.GetCursorCount() - other.GetCursorCount()
+	diff.NodeCount = s.GetNodeCount() - other.GetNodeCount()
+	diff.NodeDeref = s.GetNodeDeref() - other.GetNodeDeref()
+	diff.Rebalance = s.GetRebalance() - other.GetRebalance()
+	diff.RebalanceTime = s.GetRebalanceTime() - other.GetRebalanceTime()
+	diff.Split = s.GetSplit() - other.GetSplit()
+	diff.Spill = s.GetSpill() - other.GetSpill()
+	diff.SpillTime = s.GetSpillTime() - other.GetSpillTime()
+	diff.Write = s.GetWrite() - other.GetWrite()
+	diff.WriteTime = s.GetWriteTime() - other.GetWriteTime()
 	return diff
 }
+
+// GetPageCount returns PageCount atomically.
+func (s *TxStats) GetPageCount() int64 {
+	return atomic.LoadInt64(&s.PageCount)
+}
+
+// IncPageCount increases PageCount atomically and returns the new value.
+func (s *TxStats) IncPageCount(delta int64) int64 {
+	return atomic.AddInt64(&s.PageCount, delta)
+}
+
+// GetPageAlloc returns PageAlloc atomically.
+func (s *TxStats) GetPageAlloc() int64 {
+	return atomic.LoadInt64(&s.PageAlloc)
+}
+
+// IncPageAlloc increases PageAlloc atomically and returns the new value.
+func (s *TxStats) IncPageAlloc(delta int64) int64 {
+	return atomic.AddInt64(&s.PageAlloc, delta)
+}
+
+// GetCursorCount returns CursorCount atomically.
+func (s *TxStats) GetCursorCount() int64 {
+	return atomic.LoadInt64(&s.CursorCount)
+}
+
+// IncCursorCount increases CursorCount atomically and return the new value.
+func (s *TxStats) IncCursorCount(delta int64) int64 {
+	return atomic.AddInt64(&s.CursorCount, delta)
+}
+
+// GetNodeCount returns NodeCount atomically.
+func (s *TxStats) GetNodeCount() int64 {
+	return atomic.LoadInt64(&s.NodeCount)
+}
+
+// IncNodeCount increases NodeCount atomically and returns the new value.
+func (s *TxStats) IncNodeCount(delta int64) int64 {
+	return atomic.AddInt64(&s.NodeCount, delta)
+}
+
+// GetNodeDeref returns NodeDeref atomically.
+func (s *TxStats) GetNodeDeref() int64 {
+	return atomic.LoadInt64(&s.NodeDeref)
+}
+
+// IncNodeDeref increases NodeDeref atomically and returns the new value.
+func (s *TxStats) IncNodeDeref(delta int64) int64 {
+	return atomic.AddInt64(&s.NodeDeref, delta)
+}
+
+// GetRebalance returns Rebalance atomically.
+func (s *TxStats) GetRebalance() int64 {
+	return atomic.LoadInt64(&s.Rebalance)
+}
+
+// IncRebalance increases Rebalance atomically and returns the new value.
+func (s *TxStats) IncRebalance(delta int64) int64 {
+	return atomic.AddInt64(&s.Rebalance, delta)
+}
+
+// GetRebalanceTime returns RebalanceTime atomically.
+func (s *TxStats) GetRebalanceTime() time.Duration {
+	return atomicLoadDuration(&s.RebalanceTime)
+}
+
+// IncRebalanceTime increases RebalanceTime atomically and returns the new value.
+func (s *TxStats) IncRebalanceTime(delta time.Duration) time.Duration {
+	return atomicAddDuration(&s.RebalanceTime, delta)
+}
+
+// GetSplit returns Split atomically.
+func (s *TxStats) GetSplit() int64 {
+	return atomic.LoadInt64(&s.Split)
+}
+
+// IncSplit increases Split atomically and returns the new value.
+func (s *TxStats) IncSplit(delta int64) int64 {
+	return atomic.AddInt64(&s.Split, delta)
+}
+
+// GetSpill returns Spill atomically.
+func (s *TxStats) GetSpill() int64 {
+	return atomic.LoadInt64(&s.Spill)
+}
+
+// IncSpill increases Spill atomically and returns the new value.
+func (s *TxStats) IncSpill(delta int64) int64 {
+	return atomic.AddInt64(&s.Spill, delta)
+}
+
+// GetSpillTime returns SpillTime atomically.
+func (s *TxStats) GetSpillTime() time.Duration {
+	return atomicLoadDuration(&s.SpillTime)
+}
+
+// IncSpillTime increases SpillTime atomically and returns the new value.
+func (s *TxStats) IncSpillTime(delta time.Duration) time.Duration {
+	return atomicAddDuration(&s.SpillTime, delta)
+}
+
+// GetWrite returns Write atomically.
+func (s *TxStats) GetWrite() int64 {
+	return atomic.LoadInt64(&s.Write)
+}
+
+// IncWrite increases Write atomically and returns the new value.
+func (s *TxStats) IncWrite(delta int64) int64 {
+	return atomic.AddInt64(&s.Write, delta)
+}
+
+// GetWriteTime returns WriteTime atomically.
+func (s *TxStats) GetWriteTime() time.Duration {
+	return atomicLoadDuration(&s.WriteTime)
+}
+
+// IncWriteTime increases WriteTime atomically and returns the new value.
+func (s *TxStats) IncWriteTime(delta time.Duration) time.Duration {
+	return atomicAddDuration(&s.WriteTime, delta)
+}
+
+func atomicAddDuration(ptr *time.Duration, du time.Duration) time.Duration {
+	return time.Duration(atomic.AddInt64((*int64)(unsafe.Pointer(ptr)), int64(du)))
+}
+
+func atomicLoadDuration(ptr *time.Duration) time.Duration {
+	return time.Duration(atomic.LoadInt64((*int64)(unsafe.Pointer(ptr))))
+}
diff --git a/tx_check.go b/tx_check.go
new file mode 100644
index 0000000..75c7c08
--- /dev/null
+++ b/tx_check.go
@@ -0,0 +1,226 @@
+package bbolt
+
+import (
+	"encoding/hex"
+	"fmt"
+)
+
+// Check performs several consistency checks on the database for this transaction.
+// An error is returned if any inconsistency is found.
+//
+// It can be safely run concurrently on a writable transaction. However, this
+// incurs a high cost for large databases and databases with a lot of subbuckets
+// because of caching. This overhead can be removed if running on a read-only
+// transaction, however, it is not safe to execute other writer transactions at
+// the same time.
+func (tx *Tx) Check() <-chan error {
+	return tx.CheckWithOptions()
+}
+
+// CheckWithOptions allows users to provide a customized `KVStringer` implementation,
+// so that bolt can generate human-readable diagnostic messages.
+func (tx *Tx) CheckWithOptions(options ...CheckOption) <-chan error {
+	chkConfig := checkConfig{
+		kvStringer: HexKVStringer(),
+	}
+	for _, op := range options {
+		op(&chkConfig)
+	}
+
+	ch := make(chan error)
+	go tx.check(chkConfig.kvStringer, ch)
+	return ch
+}
+
+func (tx *Tx) check(kvStringer KVStringer, ch chan error) {
+	// Force loading free list if opened in ReadOnly mode.
+	tx.db.loadFreelist()
+
+	// Check if any pages are double freed.
+	freed := make(map[pgid]bool)
+	all := make([]pgid, tx.db.freelist.count())
+	tx.db.freelist.copyall(all)
+	for _, id := range all {
+		if freed[id] {
+			ch <- fmt.Errorf("page %d: already freed", id)
+		}
+		freed[id] = true
+	}
+
+	// Track every reachable page.
+	reachable := make(map[pgid]*page)
+	reachable[0] = tx.page(0) // meta0
+	reachable[1] = tx.page(1) // meta1
+	if tx.meta.freelist != pgidNoFreelist {
+		for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ {
+			reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist)
+		}
+	}
+
+	// Recursively check buckets.
+	tx.checkBucket(&tx.root, reachable, freed, kvStringer, ch)
+
+	// Ensure all pages below high water mark are either reachable or freed.
+	for i := pgid(0); i < tx.meta.pgid; i++ {
+		_, isReachable := reachable[i]
+		if !isReachable && !freed[i] {
+			ch <- fmt.Errorf("page %d: unreachable unfreed", int(i))
+		}
+	}
+
+	// Close the channel to signal completion.
+	close(ch)
+}
+
+func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool,
+	kvStringer KVStringer, ch chan error) {
+	// Ignore inline buckets.
+	if b.root == 0 {
+		return
+	}
+
+	// Check every page used by this bucket.
+	b.tx.forEachPage(b.root, func(p *page, _ int, stack []pgid) {
+		if p.id > tx.meta.pgid {
+			ch <- fmt.Errorf("page %d: out of bounds: %d (stack: %v)", int(p.id), int(b.tx.meta.pgid), stack)
+		}
+
+		// Ensure each page is only referenced once.
+		for i := pgid(0); i <= pgid(p.overflow); i++ {
+			var id = p.id + i
+			if _, ok := reachable[id]; ok {
+				ch <- fmt.Errorf("page %d: multiple references (stack: %v)", int(id), stack)
+			}
+			reachable[id] = p
+		}
+
+		// We should only encounter un-freed leaf and branch pages.
+		if freed[p.id] {
+			ch <- fmt.Errorf("page %d: reachable freed", int(p.id))
+		} else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 {
+			ch <- fmt.Errorf("page %d: invalid type: %s (stack: %v)", int(p.id), p.typ(), stack)
+		}
+	})
+
+	tx.recursivelyCheckPages(b.root, kvStringer.KeyToString, ch)
+
+	// Check each bucket within this bucket.
+	_ = b.ForEachBucket(func(k []byte) error {
+		if child := b.Bucket(k); child != nil {
+			tx.checkBucket(child, reachable, freed, kvStringer, ch)
+		}
+		return nil
+	})
+}
+
+// recursivelyCheckPages confirms database consistency with respect to b-tree
+// key order constraints:
+//   - keys on pages must be sorted
+//   - keys on children pages are between 2 consecutive keys on the parent's branch page).
+func (tx *Tx) recursivelyCheckPages(pgId pgid, keyToString func([]byte) string, ch chan error) {
+	tx.recursivelyCheckPagesInternal(pgId, nil, nil, nil, keyToString, ch)
+}
+
+// recursivelyCheckPagesInternal verifies that all keys in the subtree rooted at `pgid` are:
+//   - >=`minKeyClosed` (can be nil)
+//   - <`maxKeyOpen` (can be nil)
+//   - Are in right ordering relationship to their parents.
+//     `pagesStack` is expected to contain IDs of pages from the tree root to `pgid` for the clean debugging message.
+func (tx *Tx) recursivelyCheckPagesInternal(
+	pgId pgid, minKeyClosed, maxKeyOpen []byte, pagesStack []pgid,
+	keyToString func([]byte) string, ch chan error) (maxKeyInSubtree []byte) {
+
+	p := tx.page(pgId)
+	pagesStack = append(pagesStack, pgId)
+	switch {
+	case p.flags&branchPageFlag != 0:
+		// For branch page we navigate ranges of all subpages.
+		runningMin := minKeyClosed
+		for i := range p.branchPageElements() {
+			elem := p.branchPageElement(uint16(i))
+			verifyKeyOrder(elem.pgid, "branch", i, elem.key(), runningMin, maxKeyOpen, ch, keyToString, pagesStack)
+
+			maxKey := maxKeyOpen
+			if i < len(p.branchPageElements())-1 {
+				maxKey = p.branchPageElement(uint16(i + 1)).key()
+			}
+			maxKeyInSubtree = tx.recursivelyCheckPagesInternal(elem.pgid, elem.key(), maxKey, pagesStack, keyToString, ch)
+			runningMin = maxKeyInSubtree
+		}
+		return maxKeyInSubtree
+	case p.flags&leafPageFlag != 0:
+		runningMin := minKeyClosed
+		for i := range p.leafPageElements() {
+			elem := p.leafPageElement(uint16(i))
+			verifyKeyOrder(pgId, "leaf", i, elem.key(), runningMin, maxKeyOpen, ch, keyToString, pagesStack)
+			runningMin = elem.key()
+		}
+		if p.count > 0 {
+			return p.leafPageElement(p.count - 1).key()
+		}
+	default:
+		ch <- fmt.Errorf("unexpected page type for pgId:%d", pgId)
+	}
+	return maxKeyInSubtree
+}
+
+/***
+ * verifyKeyOrder checks whether an entry with given #index on pgId (pageType: "branch|leaf") that has given "key",
+ * is within range determined by (previousKey..maxKeyOpen) and reports found violations to the channel (ch).
+ */
+func verifyKeyOrder(pgId pgid, pageType string, index int, key []byte, previousKey []byte, maxKeyOpen []byte, ch chan error, keyToString func([]byte) string, pagesStack []pgid) {
+	if index == 0 && previousKey != nil && compareKeys(previousKey, key) > 0 {
+		ch <- fmt.Errorf("the first key[%d]=(hex)%s on %s page(%d) needs to be >= the key in the ancestor (%s). Stack: %v",
+			index, keyToString(key), pageType, pgId, keyToString(previousKey), pagesStack)
+	}
+	if index > 0 {
+		cmpRet := compareKeys(previousKey, key)
+		if cmpRet > 0 {
+			ch <- fmt.Errorf("key[%d]=(hex)%s on %s page(%d) needs to be > (found <) than previous element (hex)%s. Stack: %v",
+				index, keyToString(key), pageType, pgId, keyToString(previousKey), pagesStack)
+		}
+		if cmpRet == 0 {
+			ch <- fmt.Errorf("key[%d]=(hex)%s on %s page(%d) needs to be > (found =) than previous element (hex)%s. Stack: %v",
+				index, keyToString(key), pageType, pgId, keyToString(previousKey), pagesStack)
+		}
+	}
+	if maxKeyOpen != nil && compareKeys(key, maxKeyOpen) >= 0 {
+		ch <- fmt.Errorf("key[%d]=(hex)%s on %s page(%d) needs to be < than key of the next element in ancestor (hex)%s. Pages stack: %v",
+			index, keyToString(key), pageType, pgId, keyToString(previousKey), pagesStack)
+	}
+}
+
+// ===========================================================================================
+
+type checkConfig struct {
+	kvStringer KVStringer
+}
+
+type CheckOption func(options *checkConfig)
+
+func WithKVStringer(kvStringer KVStringer) CheckOption {
+	return func(c *checkConfig) {
+		c.kvStringer = kvStringer
+	}
+}
+
+// KVStringer allows to prepare human-readable diagnostic messages.
+type KVStringer interface {
+	KeyToString([]byte) string
+	ValueToString([]byte) string
+}
+
+// HexKVStringer serializes both key & value to hex representation.
+func HexKVStringer() KVStringer {
+	return hexKvStringer{}
+}
+
+type hexKvStringer struct{}
+
+func (_ hexKvStringer) KeyToString(key []byte) string {
+	return hex.EncodeToString(key)
+}
+
+func (_ hexKvStringer) ValueToString(value []byte) string {
+	return hex.EncodeToString(value)
+}
diff --git a/tx_stats_test.go b/tx_stats_test.go
new file mode 100644
index 0000000..e0cbbd4
--- /dev/null
+++ b/tx_stats_test.go
@@ -0,0 +1,54 @@
+package bbolt
+
+import (
+	"testing"
+	"time"
+
+	"github.com/stretchr/testify/assert"
+)
+
+func TestTxStats_add(t *testing.T) {
+	statsA := TxStats{
+		PageCount:     1,
+		PageAlloc:     2,
+		CursorCount:   3,
+		NodeCount:     100,
+		NodeDeref:     101,
+		Rebalance:     1000,
+		RebalanceTime: 1001 * time.Second,
+		Split:         10000,
+		Spill:         10001,
+		SpillTime:     10001 * time.Second,
+		Write:         100000,
+		WriteTime:     100001 * time.Second,
+	}
+
+	statsB := TxStats{
+		PageCount:     2,
+		PageAlloc:     3,
+		CursorCount:   4,
+		NodeCount:     101,
+		NodeDeref:     102,
+		Rebalance:     1001,
+		RebalanceTime: 1002 * time.Second,
+		Split:         11001,
+		Spill:         11002,
+		SpillTime:     11002 * time.Second,
+		Write:         110001,
+		WriteTime:     110010 * time.Second,
+	}
+
+	statsB.add(&statsA)
+	assert.Equal(t, int64(3), statsB.GetPageCount())
+	assert.Equal(t, int64(5), statsB.GetPageAlloc())
+	assert.Equal(t, int64(7), statsB.GetCursorCount())
+	assert.Equal(t, int64(201), statsB.GetNodeCount())
+	assert.Equal(t, int64(203), statsB.GetNodeDeref())
+	assert.Equal(t, int64(2001), statsB.GetRebalance())
+	assert.Equal(t, 2003*time.Second, statsB.GetRebalanceTime())
+	assert.Equal(t, int64(21001), statsB.GetSplit())
+	assert.Equal(t, int64(21003), statsB.GetSpill())
+	assert.Equal(t, 21003*time.Second, statsB.GetSpillTime())
+	assert.Equal(t, int64(210001), statsB.GetWrite())
+	assert.Equal(t, 210011*time.Second, statsB.GetWriteTime())
+}
diff --git a/tx_test.go b/tx_test.go
index 14345d0..fa8302d 100644
--- a/tx_test.go
+++ b/tx_test.go
@@ -6,15 +6,20 @@ import (
 	"fmt"
 	"log"
 	"os"
+	"runtime"
 	"testing"
+	"time"
+
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
 
 	bolt "go.etcd.io/bbolt"
+	"go.etcd.io/bbolt/internal/btesting"
 )
 
 // TestTx_Check_ReadOnly tests consistency checking on a ReadOnly database.
 func TestTx_Check_ReadOnly(t *testing.T) {
-	db := MustOpenDB()
-	defer db.Close()
+	db := btesting.MustCreateDB(t)
 	if err := db.Update(func(tx *bolt.Tx) error {
 		b, err := tx.CreateBucket([]byte("widgets"))
 		if err != nil {
@@ -27,11 +32,11 @@ func TestTx_Check_ReadOnly(t *testing.T) {
 	}); err != nil {
 		t.Fatal(err)
 	}
-	if err := db.DB.Close(); err != nil {
+	if err := db.Close(); err != nil {
 		t.Fatal(err)
 	}
 
-	readOnlyDB, err := bolt.Open(db.f, 0666, &bolt.Options{ReadOnly: true})
+	readOnlyDB, err := bolt.Open(db.Path(), 0666, &bolt.Options{ReadOnly: true})
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -57,13 +62,15 @@ func TestTx_Check_ReadOnly(t *testing.T) {
 		}
 	}
 	// Close the view transaction
-	tx.Rollback()
+	err = tx.Rollback()
+	if err != nil {
+		t.Fatal(err)
+	}
 }
 
 // Ensure that committing a closed transaction returns an error.
 func TestTx_Commit_ErrTxClosed(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 	tx, err := db.Begin(true)
 	if err != nil {
 		t.Fatal(err)
@@ -84,8 +91,7 @@ func TestTx_Commit_ErrTxClosed(t *testing.T) {
 
 // Ensure that rolling back a closed transaction returns an error.
 func TestTx_Rollback_ErrTxClosed(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	tx, err := db.Begin(true)
 	if err != nil {
@@ -102,8 +108,7 @@ func TestTx_Rollback_ErrTxClosed(t *testing.T) {
 
 // Ensure that committing a read-only transaction returns an error.
 func TestTx_Commit_ErrTxNotWritable(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 	tx, err := db.Begin(false)
 	if err != nil {
 		t.Fatal(err)
@@ -112,13 +117,15 @@ func TestTx_Commit_ErrTxNotWritable(t *testing.T) {
 		t.Fatal(err)
 	}
 	// Close the view transaction
-	tx.Rollback()
+	err = tx.Rollback()
+	if err != nil {
+		t.Fatal(err)
+	}
 }
 
 // Ensure that a transaction can retrieve a cursor on the root bucket.
 func TestTx_Cursor(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 	if err := db.Update(func(tx *bolt.Tx) error {
 		if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
 			t.Fatal(err)
@@ -155,8 +162,7 @@ func TestTx_Cursor(t *testing.T) {
 
 // Ensure that creating a bucket with a read-only transaction returns an error.
 func TestTx_CreateBucket_ErrTxNotWritable(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 	if err := db.View(func(tx *bolt.Tx) error {
 		_, err := tx.CreateBucket([]byte("foo"))
 		if err != bolt.ErrTxNotWritable {
@@ -170,8 +176,7 @@ func TestTx_CreateBucket_ErrTxNotWritable(t *testing.T) {
 
 // Ensure that creating a bucket on a closed transaction returns an error.
 func TestTx_CreateBucket_ErrTxClosed(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 	tx, err := db.Begin(true)
 	if err != nil {
 		t.Fatal(err)
@@ -187,8 +192,7 @@ func TestTx_CreateBucket_ErrTxClosed(t *testing.T) {
 
 // Ensure that a Tx can retrieve a bucket.
 func TestTx_Bucket(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 	if err := db.Update(func(tx *bolt.Tx) error {
 		if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
 			t.Fatal(err)
@@ -204,8 +208,7 @@ func TestTx_Bucket(t *testing.T) {
 
 // Ensure that a Tx retrieving a non-existent key returns nil.
 func TestTx_Get_NotFound(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 	if err := db.Update(func(tx *bolt.Tx) error {
 		b, err := tx.CreateBucket([]byte("widgets"))
 		if err != nil {
@@ -226,8 +229,7 @@ func TestTx_Get_NotFound(t *testing.T) {
 
 // Ensure that a bucket can be created and retrieved.
 func TestTx_CreateBucket(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	// Create a bucket.
 	if err := db.Update(func(tx *bolt.Tx) error {
@@ -255,8 +257,7 @@ func TestTx_CreateBucket(t *testing.T) {
 
 // Ensure that a bucket can be created if it doesn't already exist.
 func TestTx_CreateBucketIfNotExists(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 	if err := db.Update(func(tx *bolt.Tx) error {
 		// Create bucket.
 		if b, err := tx.CreateBucketIfNotExists([]byte("widgets")); err != nil {
@@ -290,8 +291,7 @@ func TestTx_CreateBucketIfNotExists(t *testing.T) {
 
 // Ensure transaction returns an error if creating an unnamed bucket.
 func TestTx_CreateBucketIfNotExists_ErrBucketNameRequired(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 	if err := db.Update(func(tx *bolt.Tx) error {
 		if _, err := tx.CreateBucketIfNotExists([]byte{}); err != bolt.ErrBucketNameRequired {
 			t.Fatalf("unexpected error: %s", err)
@@ -309,8 +309,7 @@ func TestTx_CreateBucketIfNotExists_ErrBucketNameRequired(t *testing.T) {
 
 // Ensure that a bucket cannot be created twice.
 func TestTx_CreateBucket_ErrBucketExists(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	// Create a bucket.
 	if err := db.Update(func(tx *bolt.Tx) error {
@@ -335,8 +334,7 @@ func TestTx_CreateBucket_ErrBucketExists(t *testing.T) {
 
 // Ensure that a bucket is created with a non-blank name.
 func TestTx_CreateBucket_ErrBucketNameRequired(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 	if err := db.Update(func(tx *bolt.Tx) error {
 		if _, err := tx.CreateBucket(nil); err != bolt.ErrBucketNameRequired {
 			t.Fatalf("unexpected error: %s", err)
@@ -349,8 +347,7 @@ func TestTx_CreateBucket_ErrBucketNameRequired(t *testing.T) {
 
 // Ensure that a bucket can be deleted.
 func TestTx_DeleteBucket(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	// Create a bucket and add a value.
 	if err := db.Update(func(tx *bolt.Tx) error {
@@ -396,8 +393,7 @@ func TestTx_DeleteBucket(t *testing.T) {
 
 // Ensure that deleting a bucket on a closed transaction returns an error.
 func TestTx_DeleteBucket_ErrTxClosed(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 	tx, err := db.Begin(true)
 	if err != nil {
 		t.Fatal(err)
@@ -412,8 +408,7 @@ func TestTx_DeleteBucket_ErrTxClosed(t *testing.T) {
 
 // Ensure that deleting a bucket with a read-only transaction returns an error.
 func TestTx_DeleteBucket_ReadOnly(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 	if err := db.View(func(tx *bolt.Tx) error {
 		if err := tx.DeleteBucket([]byte("foo")); err != bolt.ErrTxNotWritable {
 			t.Fatalf("unexpected error: %s", err)
@@ -426,8 +421,7 @@ func TestTx_DeleteBucket_ReadOnly(t *testing.T) {
 
 // Ensure that nothing happens when deleting a bucket that doesn't exist.
 func TestTx_DeleteBucket_NotFound(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 	if err := db.Update(func(tx *bolt.Tx) error {
 		if err := tx.DeleteBucket([]byte("widgets")); err != bolt.ErrBucketNotFound {
 			t.Fatalf("unexpected error: %s", err)
@@ -441,8 +435,7 @@ func TestTx_DeleteBucket_NotFound(t *testing.T) {
 // Ensure that no error is returned when a tx.ForEach function does not return
 // an error.
 func TestTx_ForEach_NoError(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 	if err := db.Update(func(tx *bolt.Tx) error {
 		b, err := tx.CreateBucket([]byte("widgets"))
 		if err != nil {
@@ -465,8 +458,7 @@ func TestTx_ForEach_NoError(t *testing.T) {
 
 // Ensure that an error is returned when a tx.ForEach function returns an error.
 func TestTx_ForEach_WithError(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 	if err := db.Update(func(tx *bolt.Tx) error {
 		b, err := tx.CreateBucket([]byte("widgets"))
 		if err != nil {
@@ -490,8 +482,7 @@ func TestTx_ForEach_WithError(t *testing.T) {
 
 // Ensure that Tx commit handlers are called after a transaction successfully commits.
 func TestTx_OnCommit(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	var x int
 	if err := db.Update(func(tx *bolt.Tx) error {
@@ -510,8 +501,7 @@ func TestTx_OnCommit(t *testing.T) {
 
 // Ensure that Tx commit handlers are NOT called after a transaction rolls back.
 func TestTx_OnCommit_Rollback(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	var x int
 	if err := db.Update(func(tx *bolt.Tx) error {
@@ -530,8 +520,7 @@ func TestTx_OnCommit_Rollback(t *testing.T) {
 
 // Ensure that the database can be copied to a file path.
 func TestTx_CopyFile(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 
 	path := tempfile()
 	if err := db.Update(func(tx *bolt.Tx) error {
@@ -601,8 +590,7 @@ func (f *failWriter) Write(p []byte) (n int, err error) {
 
 // Ensure that Copy handles write errors right.
 func TestTx_CopyFile_Error_Meta(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 	if err := db.Update(func(tx *bolt.Tx) error {
 		b, err := tx.CreateBucket([]byte("widgets"))
 		if err != nil {
@@ -628,8 +616,7 @@ func TestTx_CopyFile_Error_Meta(t *testing.T) {
 
 // Ensure that Copy handles write errors right.
 func TestTx_CopyFile_Error_Normal(t *testing.T) {
-	db := MustOpenDB()
-	defer db.MustClose()
+	db := btesting.MustCreateDB(t)
 	if err := db.Update(func(tx *bolt.Tx) error {
 		b, err := tx.CreateBucket([]byte("widgets"))
 		if err != nil {
@@ -713,8 +700,7 @@ func TestTx_releaseRange(t *testing.T) {
 	// Set initial mmap size well beyond the limit we will hit in this
 	// test, since we are testing with long running read transactions
 	// and will deadlock if db.grow is triggered.
-	db := MustOpenWithOption(&bolt.Options{InitialMmapSize: os.Getpagesize() * 100})
-	defer db.MustClose()
+	db := btesting.MustCreateDBWithOption(t, &bolt.Options{InitialMmapSize: os.Getpagesize() * 100})
 
 	bucket := "bucket"
 
@@ -921,3 +907,149 @@ func ExampleTx_CopyFile() {
 	// Output:
 	// The value for 'foo' in the clone is: bar
 }
+
+func TestTxStats_GetAndIncAtomically(t *testing.T) {
+	var stats bolt.TxStats
+
+	stats.IncPageCount(1)
+	assert.Equal(t, int64(1), stats.GetPageCount())
+
+	stats.IncPageAlloc(2)
+	assert.Equal(t, int64(2), stats.GetPageAlloc())
+
+	stats.IncCursorCount(3)
+	assert.Equal(t, int64(3), stats.GetCursorCount())
+
+	stats.IncNodeCount(100)
+	assert.Equal(t, int64(100), stats.GetNodeCount())
+
+	stats.IncNodeDeref(101)
+	assert.Equal(t, int64(101), stats.GetNodeDeref())
+
+	stats.IncRebalance(1000)
+	assert.Equal(t, int64(1000), stats.GetRebalance())
+
+	stats.IncRebalanceTime(1001 * time.Second)
+	assert.Equal(t, 1001*time.Second, stats.GetRebalanceTime())
+
+	stats.IncSplit(10000)
+	assert.Equal(t, int64(10000), stats.GetSplit())
+
+	stats.IncSpill(10001)
+	assert.Equal(t, int64(10001), stats.GetSpill())
+
+	stats.IncSpillTime(10001 * time.Second)
+	assert.Equal(t, 10001*time.Second, stats.GetSpillTime())
+
+	stats.IncWrite(100000)
+	assert.Equal(t, int64(100000), stats.GetWrite())
+
+	stats.IncWriteTime(100001 * time.Second)
+	assert.Equal(t, 100001*time.Second, stats.GetWriteTime())
+
+	assert.Equal(t,
+		bolt.TxStats{
+			PageCount:     1,
+			PageAlloc:     2,
+			CursorCount:   3,
+			NodeCount:     100,
+			NodeDeref:     101,
+			Rebalance:     1000,
+			RebalanceTime: 1001 * time.Second,
+			Split:         10000,
+			Spill:         10001,
+			SpillTime:     10001 * time.Second,
+			Write:         100000,
+			WriteTime:     100001 * time.Second,
+		},
+		stats,
+	)
+}
+
+func TestTxStats_Sub(t *testing.T) {
+	statsA := bolt.TxStats{
+		PageCount:     1,
+		PageAlloc:     2,
+		CursorCount:   3,
+		NodeCount:     100,
+		NodeDeref:     101,
+		Rebalance:     1000,
+		RebalanceTime: 1001 * time.Second,
+		Split:         10000,
+		Spill:         10001,
+		SpillTime:     10001 * time.Second,
+		Write:         100000,
+		WriteTime:     100001 * time.Second,
+	}
+
+	statsB := bolt.TxStats{
+		PageCount:     2,
+		PageAlloc:     3,
+		CursorCount:   4,
+		NodeCount:     101,
+		NodeDeref:     102,
+		Rebalance:     1001,
+		RebalanceTime: 1002 * time.Second,
+		Split:         11001,
+		Spill:         11002,
+		SpillTime:     11002 * time.Second,
+		Write:         110001,
+		WriteTime:     110010 * time.Second,
+	}
+
+	diff := statsB.Sub(&statsA)
+	assert.Equal(t, int64(1), diff.GetPageCount())
+	assert.Equal(t, int64(1), diff.GetPageAlloc())
+	assert.Equal(t, int64(1), diff.GetCursorCount())
+	assert.Equal(t, int64(1), diff.GetNodeCount())
+	assert.Equal(t, int64(1), diff.GetNodeDeref())
+	assert.Equal(t, int64(1), diff.GetRebalance())
+	assert.Equal(t, time.Second, diff.GetRebalanceTime())
+	assert.Equal(t, int64(1001), diff.GetSplit())
+	assert.Equal(t, int64(1001), diff.GetSpill())
+	assert.Equal(t, 1001*time.Second, diff.GetSpillTime())
+	assert.Equal(t, int64(10001), diff.GetWrite())
+	assert.Equal(t, 10009*time.Second, diff.GetWriteTime())
+}
+
+// TestTx_TruncateBeforeWrite ensures the file is truncated ahead whether we sync freelist or not.
+func TestTx_TruncateBeforeWrite(t *testing.T) {
+	if runtime.GOOS == "windows" {
+		return
+	}
+	for _, isSyncFreelist := range []bool{false, true} {
+		t.Run(fmt.Sprintf("isSyncFreelist:%v", isSyncFreelist), func(t *testing.T) {
+			// Open the database.
+			db := btesting.MustCreateDBWithOption(t, &bolt.Options{
+				NoFreelistSync: isSyncFreelist,
+			})
+
+			bigvalue := make([]byte, db.AllocSize/100)
+			count := 0
+			for {
+				count++
+				tx, err := db.Begin(true)
+				require.NoError(t, err)
+				b, err := tx.CreateBucketIfNotExists([]byte("bucket"))
+				require.NoError(t, err)
+				err = b.Put([]byte{byte(count)}, bigvalue)
+				require.NoError(t, err)
+				err = tx.Commit()
+				require.NoError(t, err)
+
+				size := fileSize(db.Path())
+
+				if size > int64(db.AllocSize) && size < int64(db.AllocSize)*2 {
+					// db.grow expands the file aggresively, that double the size while smaller than db.AllocSize,
+					// or increase with a step of db.AllocSize if larger, by which we can test if db.grow has run.
+					t.Fatalf("db.grow doesn't run when file size changes. file size: %d", size)
+				}
+				if size > int64(db.AllocSize) {
+					break
+				}
+			}
+			db.MustClose()
+			db.MustDeleteFile()
+		})
+	}
+}
diff --git a/unix_test.go b/unix_test.go
index 267eab1..8924abf 100644
--- a/unix_test.go
+++ b/unix_test.go
@@ -1,3 +1,4 @@
+//go:build !windows
 // +build !windows
 
 package bbolt_test
@@ -6,16 +7,17 @@ import (
 	"fmt"
 	"testing"
 
-	bolt "go.etcd.io/bbolt"
 	"golang.org/x/sys/unix"
+
+	bolt "go.etcd.io/bbolt"
+	"go.etcd.io/bbolt/internal/btesting"
 )
 
 func TestMlock_DbOpen(t *testing.T) {
 	// 32KB
 	skipOnMemlockLimitBelow(t, 32*1024)
 
-	db := MustOpenWithOption(&bolt.Options{Mlock: true})
-	defer db.MustClose()
+	btesting.MustCreateDBWithOption(t, &bolt.Options{Mlock: true})
 }
 
 // Test change between "empty" (16KB) and "non-empty" db
@@ -23,8 +25,7 @@ func TestMlock_DbCanGrow_Small(t *testing.T) {
 	// 32KB
 	skipOnMemlockLimitBelow(t, 32*1024)
 
-	db := MustOpenWithOption(&bolt.Options{Mlock: true})
-	defer db.MustClose()
+	db := btesting.MustCreateDBWithOption(t, &bolt.Options{Mlock: true})
 
 	if err := db.Update(func(tx *bolt.Tx) error {
 		b, err := tx.CreateBucketIfNotExists([]byte("bucket"))
@@ -57,25 +58,24 @@ func TestMlock_DbCanGrow_Big(t *testing.T) {
 	chunksBefore := 64
 	chunksAfter := 64
 
-	db := MustOpenWithOption(&bolt.Options{Mlock: true})
-	defer db.MustClose()
+	db := btesting.MustCreateDBWithOption(t, &bolt.Options{Mlock: true})
 
 	for chunk := 0; chunk < chunksBefore; chunk++ {
 		insertChunk(t, db, chunk)
 	}
-	dbSize := fileSize(db.f)
+	dbSize := fileSize(db.Path())
 
 	for chunk := 0; chunk < chunksAfter; chunk++ {
 		insertChunk(t, db, chunksBefore+chunk)
 	}
-	newDbSize := fileSize(db.f)
+	newDbSize := fileSize(db.Path())
 
 	if newDbSize <= dbSize {
 		t.Errorf("db didn't grow: %v <= %v", newDbSize, dbSize)
 	}
 }
 
-func insertChunk(t *testing.T, db *DB, chunkId int) {
+func insertChunk(t *testing.T, db *btesting.DB, chunkId int) {
 	chunkSize := 1024
 
 	if err := db.Update(func(tx *bolt.Tx) error {
@@ -107,10 +107,10 @@ func skipOnMemlockLimitBelow(t *testing.T, memlockLimitRequest uint64) {
 	}
 
 	if info.Cur < memlockLimitRequest {
-		t.Skip(fmt.Sprintf(
-			"skipping as RLIMIT_MEMLOCK is unsufficient: %v < %v",
+		t.Skipf(
+			"skipping as RLIMIT_MEMLOCK is insufficient: %v < %v",
 			info.Cur,
 			memlockLimitRequest,
-		))
+		)
 	}
 }

More details

Full run details

Historical runs