New Upstream Release - golang-github-peterbourgon-diskv

Ready changes

Summary

Merged new upstream version: 3.0.1 (was: 3.0.0).

Resulting package

Built on 2023-04-30T05:41 (took 11m53s)

The resulting binary packages can be installed (if you have the apt repository enabled) by running one of:

apt install -t fresh-releases golang-github-peterbourgon-diskv-dev

Lintian Result

Diff

diff --git a/README.md b/README.md
index c6581e0..2a2ed23 100644
--- a/README.md
+++ b/README.md
@@ -18,7 +18,7 @@ Install [Go 1][3], either [from source][4] or [with a prepackaged binary][5].
 Then,
 
 ```bash
-$ go get github.com/peterbourgon/diskv
+$ go get github.com/peterbourgon/diskv/v3
 ```
 
 [3]: http://golang.org
@@ -33,7 +33,7 @@ package main
 
 import (
 	"fmt"
-	"github.com/peterbourgon/diskv"
+	"github.com/peterbourgon/diskv/v3"
 )
 
 func main() {
diff --git a/debian/changelog b/debian/changelog
index 08f81c3..d48afa1 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,9 @@
+golang-github-peterbourgon-diskv (3.0.1-1) UNRELEASED; urgency=low
+
+  * New upstream release.
+
+ -- Debian Janitor <janitor@jelmer.uk>  Sun, 30 Apr 2023 05:30:35 -0000
+
 golang-github-peterbourgon-diskv (3.0.0-2) unstable; urgency=medium
 
   [ Debian Janitor ]
diff --git a/diskv.go b/diskv.go
index 0716da3..9f07b85 100644
--- a/diskv.go
+++ b/diskv.go
@@ -208,7 +208,7 @@ func (d *Diskv) createKeyFileWithLock(pathKey *PathKey) (*os.File, error) {
 			return nil, fmt.Errorf("temp file: %s", err)
 		}
 
-		if err := f.Chmod(d.FilePerm); err != nil {
+		if err := os.Chmod(f.Name(), d.FilePerm); err != nil {
 			f.Close()           // error deliberately ignored
 			os.Remove(f.Name()) // error deliberately ignored
 			return nil, fmt.Errorf("chmod: %s", err)
@@ -630,6 +630,9 @@ func (d *Diskv) completeFilename(pathKey *PathKey) string {
 // cacheWithLock attempts to cache the given key-value pair in the store's
 // cache. It can fail if the value is larger than the cache's maximum size.
 func (d *Diskv) cacheWithLock(key string, val []byte) error {
+	// If the key already exists, delete it.
+	d.bustCacheWithLock(key)
+
 	valueSize := uint64(len(val))
 	if err := d.ensureCacheSpaceWithLock(valueSize); err != nil {
 		return fmt.Errorf("%s; not caching", err)
diff --git a/issues_test.go b/issues_test.go
index 914269c..5bf9e35 100644
--- a/issues_test.go
+++ b/issues_test.go
@@ -3,6 +3,7 @@ package diskv
 import (
 	"bytes"
 	"io/ioutil"
+	"math/rand"
 	"sync"
 	"testing"
 	"time"
@@ -118,3 +119,73 @@ func TestIssue17(t *testing.T) {
 	close(start)
 	wg.Wait()
 }
+
+// Test for issue #40, where acquiring two stream readers on the same k/v pair
+// caused the value to be written into the cache twice, messing up the
+// size calculations.
+func TestIssue40(t *testing.T) {
+	var (
+		basePath = "test-data"
+	)
+	// Simplest transform function: put all the data files into the base dir.
+	flatTransform := func(s string) []string { return []string{} }
+
+	// Initialize a new diskv store, rooted at "my-data-dir",
+	// with a 100 byte cache.
+	d := New(Options{
+		BasePath:     basePath,
+		Transform:    flatTransform,
+		CacheSizeMax: 100,
+	})
+
+	defer d.EraseAll()
+
+	// Write a 50 byte value, filling the cache half-way
+	k1 := "key1"
+	d1 := make([]byte, 50)
+	rand.Read(d1)
+	d.Write(k1, d1)
+
+	// Get *two* read streams on it. Because the key is not yet in the cache,
+	// and will not be in the cache until a stream is fully read, both
+	// readers use the 'siphon' object, which always writes to the cache
+	// after reading.
+	s1, err := d.ReadStream(k1, false)
+	if err != nil {
+		t.Fatal(err)
+	}
+	s2, err := d.ReadStream(k1, false)
+	if err != nil {
+		t.Fatal(err)
+	}
+	// When each stream is drained, the underlying siphon will write
+	// the value into the cache's map and increment the cache size.
+	// This means we will have 1 entry in the cache map
+	// ("key1" mapping to a 50 byte slice) but the cache size will be 100,
+	// because the buggy code does not check if an entry already exists
+	// in the map.
+	// s1 drains:
+	//   cache[k] = v
+	//   cacheSize += len(v)
+	// s2 drains:
+	//   cache[k] = v /* overwrites existing */
+	//   cacheSize += len(v) /* blindly adds to the cache size */
+	ioutil.ReadAll(s1)
+	ioutil.ReadAll(s2)
+
+	// Now write a different k/v pair, with a 60 byte array.
+	k2 := "key2"
+	d2 := make([]byte, 60)
+	rand.Read(d2)
+	d.Write(k2, d2)
+	// The act of reading the k/v pair back out causes it to be cached.
+	// Because the cache is only 100 bytes, it needs to delete existing
+	// entries to make room.
+	// If the cache is buggy, it will delete the single 50-byte entry
+	// from the cache map & decrement cacheSize by 50... but because
+	// cacheSize was improperly incremented twice earlier, this will
+	// leave us with no entries in the cacheMap but with cacheSize==50.
+	// Since CacheSizeMax-cacheSize (100-50) is less than 60, there
+	// is no room in the cache for this entry and it panics.
+	d.Read(k2)
+}

Debdiff

File lists identical (after any substitutions)

No differences were encountered in the control files

More details

Full run details