Import upstream version 1.0.1, md5 79c0cec91a4cae056732d079c428ca56
Debian Janitor
4 years ago
0 | github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= | |
1 | github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= |
0 | language: go | |
1 | ||
2 | go: | |
3 | - 1.6 | |
4 | ||
5 | script: | |
6 | # build test for supported platforms | |
7 | - GOOS=linux go build | |
8 | - GOOS=darwin go build | |
9 | - GOOS=freebsd go build | |
10 | - GOOS=windows go build | |
11 | - GOARCH=386 go build | |
12 | ||
13 | # run tests on a standard platform | |
14 | - go test -v ./... |
0 | The MIT License (MIT) | |
1 | ||
2 | Copyright (c) 2016 Aliaksandr Valialkin, VertaMedia | |
3 | ||
4 | Permission is hereby granted, free of charge, to any person obtaining a copy | |
5 | of this software and associated documentation files (the "Software"), to deal | |
6 | in the Software without restriction, including without limitation the rights | |
7 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
8 | copies of the Software, and to permit persons to whom the Software is | |
9 | furnished to do so, subject to the following conditions: | |
10 | ||
11 | The above copyright notice and this permission notice shall be included in all | |
12 | copies or substantial portions of the Software. | |
13 | ||
14 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | |
17 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
18 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
19 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
20 | SOFTWARE. | |
21 |
0 | [![Build Status](https://travis-ci.org/valyala/bytebufferpool.svg)](https://travis-ci.org/valyala/bytebufferpool) | |
1 | [![GoDoc](https://godoc.org/github.com/valyala/bytebufferpool?status.svg)](http://godoc.org/github.com/valyala/bytebufferpool) | |
2 | [![Go Report](http://goreportcard.com/badge/valyala/bytebufferpool)](http://goreportcard.com/report/valyala/bytebufferpool) | |
3 | ||
4 | # bytebufferpool | |
5 | ||
6 | An implementation of a pool of byte buffers with anti-memory-waste protection. | |
7 | ||
8 | The pool may waste limited amount of memory due to fragmentation. | |
9 | This amount equals to the maximum total size of the byte buffers | |
10 | in concurrent use. | |
11 | ||
12 | # Benchmark results | |
13 | Currently bytebufferpool is fastest and most effective buffer pool written in Go. | |
14 | ||
15 | You can find results [here](https://omgnull.github.io/go-benchmark/buffer/). | |
16 | ||
17 | # bytebufferpool users | |
18 | ||
19 | * [fasthttp](https://github.com/valyala/fasthttp) | |
20 | * [quicktemplate](https://github.com/valyala/quicktemplate) |
0 | package bytebufferpool | |
1 | ||
2 | import "io" | |
3 | ||
4 | // ByteBuffer provides byte buffer, which can be used for minimizing | |
5 | // memory allocations. | |
6 | // | |
7 | // ByteBuffer may be used with functions appending data to the given []byte | |
8 | // slice. See example code for details. | |
9 | // | |
10 | // Use Get for obtaining an empty byte buffer. | |
11 | type ByteBuffer struct { | |
12 | ||
13 | // B is a byte buffer to use in append-like workloads. | |
14 | // See example code for details. | |
15 | B []byte | |
16 | } | |
17 | ||
18 | // Len returns the size of the byte buffer. | |
19 | func (b *ByteBuffer) Len() int { | |
20 | return len(b.B) | |
21 | } | |
22 | ||
23 | // ReadFrom implements io.ReaderFrom. | |
24 | // | |
25 | // The function appends all the data read from r to b. | |
26 | func (b *ByteBuffer) ReadFrom(r io.Reader) (int64, error) { | |
27 | p := b.B | |
28 | nStart := int64(len(p)) | |
29 | nMax := int64(cap(p)) | |
30 | n := nStart | |
31 | if nMax == 0 { | |
32 | nMax = 64 | |
33 | p = make([]byte, nMax) | |
34 | } else { | |
35 | p = p[:nMax] | |
36 | } | |
37 | for { | |
38 | if n == nMax { | |
39 | nMax *= 2 | |
40 | bNew := make([]byte, nMax) | |
41 | copy(bNew, p) | |
42 | p = bNew | |
43 | } | |
44 | nn, err := r.Read(p[n:]) | |
45 | n += int64(nn) | |
46 | if err != nil { | |
47 | b.B = p[:n] | |
48 | n -= nStart | |
49 | if err == io.EOF { | |
50 | return n, nil | |
51 | } | |
52 | return n, err | |
53 | } | |
54 | } | |
55 | } | |
56 | ||
57 | // WriteTo implements io.WriterTo. | |
58 | func (b *ByteBuffer) WriteTo(w io.Writer) (int64, error) { | |
59 | n, err := w.Write(b.B) | |
60 | return int64(n), err | |
61 | } | |
62 | ||
63 | // Bytes returns b.B, i.e. all the bytes accumulated in the buffer. | |
64 | // | |
65 | // The purpose of this function is bytes.Buffer compatibility. | |
66 | func (b *ByteBuffer) Bytes() []byte { | |
67 | return b.B | |
68 | } | |
69 | ||
70 | // Write implements io.Writer - it appends p to ByteBuffer.B | |
71 | func (b *ByteBuffer) Write(p []byte) (int, error) { | |
72 | b.B = append(b.B, p...) | |
73 | return len(p), nil | |
74 | } | |
75 | ||
76 | // WriteByte appends the byte c to the buffer. | |
77 | // | |
78 | // The purpose of this function is bytes.Buffer compatibility. | |
79 | // | |
80 | // The function always returns nil. | |
81 | func (b *ByteBuffer) WriteByte(c byte) error { | |
82 | b.B = append(b.B, c) | |
83 | return nil | |
84 | } | |
85 | ||
86 | // WriteString appends s to ByteBuffer.B. | |
87 | func (b *ByteBuffer) WriteString(s string) (int, error) { | |
88 | b.B = append(b.B, s...) | |
89 | return len(s), nil | |
90 | } | |
91 | ||
92 | // Set sets ByteBuffer.B to p. | |
93 | func (b *ByteBuffer) Set(p []byte) { | |
94 | b.B = append(b.B[:0], p...) | |
95 | } | |
96 | ||
97 | // SetString sets ByteBuffer.B to s. | |
98 | func (b *ByteBuffer) SetString(s string) { | |
99 | b.B = append(b.B[:0], s...) | |
100 | } | |
101 | ||
102 | // String returns string representation of ByteBuffer.B. | |
103 | func (b *ByteBuffer) String() string { | |
104 | return string(b.B) | |
105 | } | |
106 | ||
107 | // Reset makes ByteBuffer.B empty. | |
108 | func (b *ByteBuffer) Reset() { | |
109 | b.B = b.B[:0] | |
110 | } |
0 | package bytebufferpool_test | |
1 | ||
2 | import ( | |
3 | "fmt" | |
4 | ||
5 | "github.com/valyala/bytebufferpool" | |
6 | ) | |
7 | ||
8 | func ExampleByteBuffer() { | |
9 | bb := bytebufferpool.Get() | |
10 | ||
11 | bb.WriteString("first line\n") | |
12 | bb.Write([]byte("second line\n")) | |
13 | bb.B = append(bb.B, "third line\n"...) | |
14 | ||
15 | fmt.Printf("bytebuffer contents=%q", bb.B) | |
16 | ||
17 | // It is safe to release byte buffer now, since it is | |
18 | // no longer used. | |
19 | bytebufferpool.Put(bb) | |
20 | } |
0 | package bytebufferpool | |
1 | ||
2 | import ( | |
3 | "bytes" | |
4 | "fmt" | |
5 | "io" | |
6 | "testing" | |
7 | "time" | |
8 | ) | |
9 | ||
10 | func TestByteBufferReadFrom(t *testing.T) { | |
11 | prefix := "foobar" | |
12 | expectedS := "asadfsdafsadfasdfisdsdfa" | |
13 | prefixLen := int64(len(prefix)) | |
14 | expectedN := int64(len(expectedS)) | |
15 | ||
16 | var bb ByteBuffer | |
17 | bb.WriteString(prefix) | |
18 | ||
19 | rf := (io.ReaderFrom)(&bb) | |
20 | for i := 0; i < 20; i++ { | |
21 | r := bytes.NewBufferString(expectedS) | |
22 | n, err := rf.ReadFrom(r) | |
23 | if n != expectedN { | |
24 | t.Fatalf("unexpected n=%d. Expecting %d. iteration %d", n, expectedN, i) | |
25 | } | |
26 | if err != nil { | |
27 | t.Fatalf("unexpected error: %s", err) | |
28 | } | |
29 | bbLen := int64(bb.Len()) | |
30 | expectedLen := prefixLen + int64(i+1)*expectedN | |
31 | if bbLen != expectedLen { | |
32 | t.Fatalf("unexpected byteBuffer length: %d. Expecting %d", bbLen, expectedLen) | |
33 | } | |
34 | for j := 0; j < i; j++ { | |
35 | start := prefixLen + int64(j)*expectedN | |
36 | b := bb.B[start : start+expectedN] | |
37 | if string(b) != expectedS { | |
38 | t.Fatalf("unexpected byteBuffer contents: %q. Expecting %q", b, expectedS) | |
39 | } | |
40 | } | |
41 | } | |
42 | } | |
43 | ||
44 | func TestByteBufferWriteTo(t *testing.T) { | |
45 | expectedS := "foobarbaz" | |
46 | var bb ByteBuffer | |
47 | bb.WriteString(expectedS[:3]) | |
48 | bb.WriteString(expectedS[3:]) | |
49 | ||
50 | wt := (io.WriterTo)(&bb) | |
51 | var w bytes.Buffer | |
52 | for i := 0; i < 10; i++ { | |
53 | n, err := wt.WriteTo(&w) | |
54 | if n != int64(len(expectedS)) { | |
55 | t.Fatalf("unexpected n returned from WriteTo: %d. Expecting %d", n, len(expectedS)) | |
56 | } | |
57 | if err != nil { | |
58 | t.Fatalf("unexpected error: %s", err) | |
59 | } | |
60 | s := string(w.Bytes()) | |
61 | if s != expectedS { | |
62 | t.Fatalf("unexpected string written %q. Expecting %q", s, expectedS) | |
63 | } | |
64 | w.Reset() | |
65 | } | |
66 | } | |
67 | ||
68 | func TestByteBufferGetPutSerial(t *testing.T) { | |
69 | testByteBufferGetPut(t) | |
70 | } | |
71 | ||
72 | func TestByteBufferGetPutConcurrent(t *testing.T) { | |
73 | concurrency := 10 | |
74 | ch := make(chan struct{}, concurrency) | |
75 | for i := 0; i < concurrency; i++ { | |
76 | go func() { | |
77 | testByteBufferGetPut(t) | |
78 | ch <- struct{}{} | |
79 | }() | |
80 | } | |
81 | ||
82 | for i := 0; i < concurrency; i++ { | |
83 | select { | |
84 | case <-ch: | |
85 | case <-time.After(time.Second): | |
86 | t.Fatalf("timeout!") | |
87 | } | |
88 | } | |
89 | } | |
90 | ||
91 | func testByteBufferGetPut(t *testing.T) { | |
92 | for i := 0; i < 10; i++ { | |
93 | expectedS := fmt.Sprintf("num %d", i) | |
94 | b := Get() | |
95 | b.B = append(b.B, "num "...) | |
96 | b.B = append(b.B, fmt.Sprintf("%d", i)...) | |
97 | if string(b.B) != expectedS { | |
98 | t.Fatalf("unexpected result: %q. Expecting %q", b.B, expectedS) | |
99 | } | |
100 | Put(b) | |
101 | } | |
102 | } | |
103 | ||
104 | func testByteBufferGetString(t *testing.T) { | |
105 | for i := 0; i < 10; i++ { | |
106 | expectedS := fmt.Sprintf("num %d", i) | |
107 | b := Get() | |
108 | b.SetString(expectedS) | |
109 | if b.String() != expectedS { | |
110 | t.Fatalf("unexpected result: %q. Expecting %q", b.B, expectedS) | |
111 | } | |
112 | Put(b) | |
113 | } | |
114 | } | |
115 | ||
116 | func TestByteBufferGetStringSerial(t *testing.T) { | |
117 | testByteBufferGetString(t) | |
118 | } | |
119 | ||
120 | func TestByteBufferGetStringConcurrent(t *testing.T) { | |
121 | concurrency := 10 | |
122 | ch := make(chan struct{}, concurrency) | |
123 | for i := 0; i < concurrency; i++ { | |
124 | go func() { | |
125 | testByteBufferGetString(t) | |
126 | ch <- struct{}{} | |
127 | }() | |
128 | } | |
129 | ||
130 | for i := 0; i < concurrency; i++ { | |
131 | select { | |
132 | case <-ch: | |
133 | case <-time.After(time.Second): | |
134 | t.Fatalf("timeout!") | |
135 | } | |
136 | } | |
137 | } |
0 | package bytebufferpool | |
1 | ||
2 | import ( | |
3 | "bytes" | |
4 | "testing" | |
5 | ) | |
6 | ||
7 | func BenchmarkByteBufferWrite(b *testing.B) { | |
8 | s := []byte("foobarbaz") | |
9 | b.RunParallel(func(pb *testing.PB) { | |
10 | var buf ByteBuffer | |
11 | for pb.Next() { | |
12 | for i := 0; i < 100; i++ { | |
13 | buf.Write(s) | |
14 | } | |
15 | buf.Reset() | |
16 | } | |
17 | }) | |
18 | } | |
19 | ||
20 | func BenchmarkBytesBufferWrite(b *testing.B) { | |
21 | s := []byte("foobarbaz") | |
22 | b.RunParallel(func(pb *testing.PB) { | |
23 | var buf bytes.Buffer | |
24 | for pb.Next() { | |
25 | for i := 0; i < 100; i++ { | |
26 | buf.Write(s) | |
27 | } | |
28 | buf.Reset() | |
29 | } | |
30 | }) | |
31 | } |
0 | // Package bytebufferpool implements a pool of byte buffers | |
1 | // with anti-fragmentation protection. | |
2 | // | |
3 | // The pool may waste limited amount of memory due to fragmentation. | |
4 | // This amount equals to the maximum total size of the byte buffers | |
5 | // in concurrent use. | |
6 | package bytebufferpool |
0 | package bytebufferpool | |
1 | ||
2 | import ( | |
3 | "sort" | |
4 | "sync" | |
5 | "sync/atomic" | |
6 | ) | |
7 | ||
8 | const ( | |
9 | minBitSize = 6 // 2**6=64 is a CPU cache line size | |
10 | steps = 20 | |
11 | ||
12 | minSize = 1 << minBitSize | |
13 | maxSize = 1 << (minBitSize + steps - 1) | |
14 | ||
15 | calibrateCallsThreshold = 42000 | |
16 | maxPercentile = 0.95 | |
17 | ) | |
18 | ||
19 | // Pool represents byte buffer pool. | |
20 | // | |
21 | // Distinct pools may be used for distinct types of byte buffers. | |
22 | // Properly determined byte buffer types with their own pools may help reducing | |
23 | // memory waste. | |
24 | type Pool struct { | |
25 | calls [steps]uint64 | |
26 | calibrating uint64 | |
27 | ||
28 | defaultSize uint64 | |
29 | maxSize uint64 | |
30 | ||
31 | pool sync.Pool | |
32 | } | |
33 | ||
34 | var defaultPool Pool | |
35 | ||
36 | // Get returns an empty byte buffer from the pool. | |
37 | // | |
38 | // Got byte buffer may be returned to the pool via Put call. | |
39 | // This reduces the number of memory allocations required for byte buffer | |
40 | // management. | |
41 | func Get() *ByteBuffer { return defaultPool.Get() } | |
42 | ||
43 | // Get returns new byte buffer with zero length. | |
44 | // | |
45 | // The byte buffer may be returned to the pool via Put after the use | |
46 | // in order to minimize GC overhead. | |
47 | func (p *Pool) Get() *ByteBuffer { | |
48 | v := p.pool.Get() | |
49 | if v != nil { | |
50 | return v.(*ByteBuffer) | |
51 | } | |
52 | return &ByteBuffer{ | |
53 | B: make([]byte, 0, atomic.LoadUint64(&p.defaultSize)), | |
54 | } | |
55 | } | |
56 | ||
57 | // Put returns byte buffer to the pool. | |
58 | // | |
59 | // ByteBuffer.B mustn't be touched after returning it to the pool. | |
60 | // Otherwise data races will occur. | |
61 | func Put(b *ByteBuffer) { defaultPool.Put(b) } | |
62 | ||
63 | // Put releases byte buffer obtained via Get to the pool. | |
64 | // | |
65 | // The buffer mustn't be accessed after returning to the pool. | |
66 | func (p *Pool) Put(b *ByteBuffer) { | |
67 | idx := index(len(b.B)) | |
68 | ||
69 | if atomic.AddUint64(&p.calls[idx], 1) > calibrateCallsThreshold { | |
70 | p.calibrate() | |
71 | } | |
72 | ||
73 | maxSize := int(atomic.LoadUint64(&p.maxSize)) | |
74 | if maxSize == 0 || cap(b.B) <= maxSize { | |
75 | b.Reset() | |
76 | p.pool.Put(b) | |
77 | } | |
78 | } | |
79 | ||
80 | func (p *Pool) calibrate() { | |
81 | if !atomic.CompareAndSwapUint64(&p.calibrating, 0, 1) { | |
82 | return | |
83 | } | |
84 | ||
85 | a := make(callSizes, 0, steps) | |
86 | var callsSum uint64 | |
87 | for i := uint64(0); i < steps; i++ { | |
88 | calls := atomic.SwapUint64(&p.calls[i], 0) | |
89 | callsSum += calls | |
90 | a = append(a, callSize{ | |
91 | calls: calls, | |
92 | size: minSize << i, | |
93 | }) | |
94 | } | |
95 | sort.Sort(a) | |
96 | ||
97 | defaultSize := a[0].size | |
98 | maxSize := defaultSize | |
99 | ||
100 | maxSum := uint64(float64(callsSum) * maxPercentile) | |
101 | callsSum = 0 | |
102 | for i := 0; i < steps; i++ { | |
103 | if callsSum > maxSum { | |
104 | break | |
105 | } | |
106 | callsSum += a[i].calls | |
107 | size := a[i].size | |
108 | if size > maxSize { | |
109 | maxSize = size | |
110 | } | |
111 | } | |
112 | ||
113 | atomic.StoreUint64(&p.defaultSize, defaultSize) | |
114 | atomic.StoreUint64(&p.maxSize, maxSize) | |
115 | ||
116 | atomic.StoreUint64(&p.calibrating, 0) | |
117 | } | |
118 | ||
119 | type callSize struct { | |
120 | calls uint64 | |
121 | size uint64 | |
122 | } | |
123 | ||
124 | type callSizes []callSize | |
125 | ||
126 | func (ci callSizes) Len() int { | |
127 | return len(ci) | |
128 | } | |
129 | ||
130 | func (ci callSizes) Less(i, j int) bool { | |
131 | return ci[i].calls > ci[j].calls | |
132 | } | |
133 | ||
134 | func (ci callSizes) Swap(i, j int) { | |
135 | ci[i], ci[j] = ci[j], ci[i] | |
136 | } | |
137 | ||
138 | func index(n int) int { | |
139 | n-- | |
140 | n >>= minBitSize | |
141 | idx := 0 | |
142 | for n > 0 { | |
143 | n >>= 1 | |
144 | idx++ | |
145 | } | |
146 | if idx >= steps { | |
147 | idx = steps - 1 | |
148 | } | |
149 | return idx | |
150 | } |
0 | package bytebufferpool | |
1 | ||
2 | import ( | |
3 | "math/rand" | |
4 | "testing" | |
5 | "time" | |
6 | ) | |
7 | ||
8 | func TestIndex(t *testing.T) { | |
9 | testIndex(t, 0, 0) | |
10 | testIndex(t, 1, 0) | |
11 | ||
12 | testIndex(t, minSize-1, 0) | |
13 | testIndex(t, minSize, 0) | |
14 | testIndex(t, minSize+1, 1) | |
15 | ||
16 | testIndex(t, 2*minSize-1, 1) | |
17 | testIndex(t, 2*minSize, 1) | |
18 | testIndex(t, 2*minSize+1, 2) | |
19 | ||
20 | testIndex(t, maxSize-1, steps-1) | |
21 | testIndex(t, maxSize, steps-1) | |
22 | testIndex(t, maxSize+1, steps-1) | |
23 | } | |
24 | ||
25 | func testIndex(t *testing.T, n, expectedIdx int) { | |
26 | idx := index(n) | |
27 | if idx != expectedIdx { | |
28 | t.Fatalf("unexpected idx for n=%d: %d. Expecting %d", n, idx, expectedIdx) | |
29 | } | |
30 | } | |
31 | ||
32 | func TestPoolCalibrate(t *testing.T) { | |
33 | for i := 0; i < steps*calibrateCallsThreshold; i++ { | |
34 | n := 1004 | |
35 | if i%15 == 0 { | |
36 | n = rand.Intn(15234) | |
37 | } | |
38 | testGetPut(t, n) | |
39 | } | |
40 | } | |
41 | ||
42 | func TestPoolVariousSizesSerial(t *testing.T) { | |
43 | testPoolVariousSizes(t) | |
44 | } | |
45 | ||
46 | func TestPoolVariousSizesConcurrent(t *testing.T) { | |
47 | concurrency := 5 | |
48 | ch := make(chan struct{}) | |
49 | for i := 0; i < concurrency; i++ { | |
50 | go func() { | |
51 | testPoolVariousSizes(t) | |
52 | ch <- struct{}{} | |
53 | }() | |
54 | } | |
55 | for i := 0; i < concurrency; i++ { | |
56 | select { | |
57 | case <-ch: | |
58 | case <-time.After(3 * time.Second): | |
59 | t.Fatalf("timeout") | |
60 | } | |
61 | } | |
62 | } | |
63 | ||
64 | func testPoolVariousSizes(t *testing.T) { | |
65 | for i := 0; i < steps+1; i++ { | |
66 | n := (1 << uint32(i)) | |
67 | ||
68 | testGetPut(t, n) | |
69 | testGetPut(t, n+1) | |
70 | testGetPut(t, n-1) | |
71 | ||
72 | for j := 0; j < 10; j++ { | |
73 | testGetPut(t, j+n) | |
74 | } | |
75 | } | |
76 | } | |
77 | ||
78 | func testGetPut(t *testing.T, n int) { | |
79 | bb := Get() | |
80 | if len(bb.B) > 0 { | |
81 | t.Fatalf("non-empty byte buffer returned from acquire") | |
82 | } | |
83 | bb.B = allocNBytes(bb.B, n) | |
84 | Put(bb) | |
85 | } | |
86 | ||
87 | func allocNBytes(dst []byte, n int) []byte { | |
88 | diff := n - cap(dst) | |
89 | if diff <= 0 { | |
90 | return dst[:n] | |
91 | } | |
92 | return append(dst, make([]byte, diff)...) | |
93 | } |