diff --git a/README.md b/README.md index 5203b92..79f3851 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,8 @@ -queue +Queue ===== -Fast golang queue using ring-buffer +A fast Golang queue using a ring-buffer, based on the version suggested by Dariusz Górecki. +Using this instead of other, simpler, queue implementations (slice+append or linked list) provides +substantial memory and time benefits, and fewer GC pauses. + +The queue implemented here is as fast as it is in part because it is *not* thread-safe. diff --git a/queue.go b/queue.go new file mode 100644 index 0000000..56218bb --- /dev/null +++ b/queue.go @@ -0,0 +1,71 @@ +/* +Package queue provides a fast, ring-buffer queue based on the version suggested by Dariusz Górecki. +Using this instead of other, simpler, queue implementations (slice+append or linked list) provides +substantial memory and time benefits, and fewer GC pauses. + +The queue implemented here is as fast as it is for two additional reasons: it is *not* thread-safe, and it +intentionally does not follow go best-practices regarding errors - if you make a mistake with this +queue (such as trying to remove an element from an empty queue) then who knows what will happen. +*/ +package queue + +const minQueueLen = 16 + +type Queue struct { + buf []interface{} + head, tail, count int +} + +// New constructs and returns a new Queue +func New() *Queue { + return &Queue{buf: make([]interface{}, minQueueLen)} +} + +// Length returns the number of elements currently stored in the queue +func (q *Queue) Length() int { + return q.count +} + +func (q *Queue) resize() { + newBuf := make([]interface{}, q.count*2) + + if q.tail > q.head { + copy(newBuf, q.buf[q.head:q.tail]) + } else { + copy(newBuf, q.buf[q.head:len(q.buf)]) + copy(newBuf[len(q.buf)-q.head:], q.buf[:q.tail]) + } + + q.head = 0 + q.tail = q.count + q.buf = newBuf +} + +// Add puts an element on the end of the queue +func (q *Queue) Add(elem interface{}) { + if q.count == len(q.buf) { + q.resize() + } + + q.buf[q.tail] = elem + q.tail = (q.tail + 1) % len(q.buf) + q.count++ +} + +// Peek returns the element at the head of the queue. If the queue is empty (Length == 0), +// Peek does not panic, it simply returns garbage. +func (q *Queue) Peek() interface{} { + return q.buf[q.head] +} + +// Remove removes the element from the front of the queue. If you actually want the element, +// call Peek first. If the queue is empty (Length == 0), Remove will put the queue in a bad +// state and all further operations will be undefined. +func (q *Queue) Remove() { + q.buf[q.head] = nil + q.head = (q.head + 1) % len(q.buf) + q.count-- + if len(q.buf) > minQueueLen && q.count*4 <= len(q.buf) { + q.resize() + } +} diff --git a/queue_test.go b/queue_test.go new file mode 100644 index 0000000..a8fb824 --- /dev/null +++ b/queue_test.go @@ -0,0 +1,26 @@ +package queue + +import "testing" + +// General warning: Go's benchmark utility (go test -bench .) increases the number of +// iterations until the benchmarks take a reasonable amount of time to run; memory usage +// is *NOT* considered. On my machine, these benchmarks hit around ~1GB before they've had +// enough, but if you have less than that available and start swapping, then all bets are off. + +func BenchmarkQueueSerial(b *testing.B) { + q := New() + for i := 0; i < b.N; i++ { + q.Add(nil) + } + for i := 0; i < b.N; i++ { + q.Remove() + } +} + +func BenchmarkQueueTickTock(b *testing.B) { + q := New() + for i := 0; i < b.N; i++ { + q.Add(nil) + q.Remove() + } +}