This repository was archived by the owner on Aug 30, 2019. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathbench.go
More file actions
170 lines (143 loc) · 4.08 KB
/
bench.go
File metadata and controls
170 lines (143 loc) · 4.08 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
//Package bench makes it easy to write benchamrking apps.
package bench
import (
"bytes"
"fmt"
"sync"
"time"
"github.com/codahale/hdrhistogram"
"golang.org/x/net/context"
)
// benchmarking function
type benchmarkFn func(t *Context)
// Bench holds data to be used for benchmarking.
// It can perform aggresively wherein it repeatedly sends calls the benchmarking function, or it can perform a uniform bench wherein it can invoke the benchmarking function at a uniform rate.
type Bench struct {
// values configured by the user
concurrentRuns int
duration time.Duration
fn benchmarkFn
rps int
// context for individual runs
runContexts []*Context
// aggregated metrics
timers map[string]*hdrhistogram.Histogram
counters map[string]int64
// total calls made to each task
calls int64
// time taken for the full run
timeTaken time.Duration
}
// NewBench creates a new instance of Bench
func NewBench(concurrency int, duration time.Duration, rps int, fn benchmarkFn) *Bench {
b := &Bench{
concurrentRuns: concurrency,
duration: duration,
fn: fn,
rps: rps,
}
b.timers = make(map[string]*hdrhistogram.Histogram)
b.counters = make(map[string]int64)
for i := 0; i < b.concurrentRuns; i++ {
b.runContexts = append(b.runContexts, newContext(i+1))
}
return b
}
// Run the benchmark
func (b *Bench) Run() {
// Only run it once
if b.calls != 0 {
return
}
var wg sync.WaitGroup
var ts *TokenStream
ctx, _ := context.WithTimeout(context.Background(), b.duration)
if b.rps > 0 {
ts = NewTokenStream(b.rps)
defer ts.Stop()
}
// start the runs
start := time.Now()
for i := 1; i <= b.concurrentRuns; i++ {
if b.rps > 0 {
wg.Add(1)
go onToken(ctx, b.runContexts[i-1], &wg, ts, b.fn)
} else {
wg.Add(1)
go continuous(ctx, b.runContexts[i-1], &wg, b.fn)
}
}
wg.Wait()
b.timeTaken = time.Since(start)
b.aggregate()
}
// aggregate run contexts
func (b *Bench) aggregate() {
// aggregate timer metrics
for n := range b.runContexts[0].timers {
t := hdrhistogram.New(min, max, precision)
b.timers[n] = t
for i := 0; i < b.concurrentRuns; i++ {
t.Merge(b.runContexts[i].timers[n])
}
}
// aggregate counters
for n := range b.runContexts[0].counters {
for i := 0; i < b.concurrentRuns; i++ {
b.counters[n] += b.runContexts[i].counters[n]
}
}
// aggregate call counts
for i := 0; i < b.concurrentRuns; i++ {
b.calls += b.runContexts[i].Iteration
}
}
// String converts the output of the bench into a printable form
func (b *Bench) String() string {
prefix := " "
var buf bytes.Buffer
percentiles := []float64{5, 50, 99.9, 100}
if b.rps <= 0 {
fmt.Fprintf(&buf, "Duration: %2.2fs, Concurrency: %d, Total runs: %d\n", b.timeTaken.Seconds(), b.concurrentRuns, b.calls)
} else {
fmt.Fprintf(&buf, "Rate: %d calls/sec, Duration: %2.2fs, Concurrency: %d, Total runs: %d\n", b.rps, b.timeTaken.Seconds(), b.concurrentRuns, b.calls)
}
for n, h := range b.timers {
fmt.Fprintf(&buf, "%s>>Timer: %s \n", prefix, n)
for _, p := range percentiles {
fmt.Fprintf(&buf, "%s%s%2.1fth percentile: %.2fms\n", prefix, prefix, p, float64(h.ValueAtQuantile(p))/1000000)
}
}
for n, count := range b.counters {
fmt.Fprintf(&buf, "%s>>Counter: %s\n", prefix, n)
fmt.Fprintf(&buf, "%s%sValue: %d \n", prefix, prefix, count)
}
return buf.String()
}
// continuous invokes the benchmarking function continously
// unlike onToken, it doesn't read from a token stream to minimise the overhead of creating and populating a channel
func continuous(ctx context.Context, runContext *Context, wg *sync.WaitGroup, fn benchmarkFn) {
defer wg.Done()
for j := 1; ; j++ {
select {
case <-ctx.Done():
return
default:
runContext.Iteration = int64(j)
fn(runContext)
}
}
}
// onToken invokes the benchmarking function whenever it gets a token
func onToken(ctx context.Context, runContext *Context, wg *sync.WaitGroup, ts *TokenStream, fn benchmarkFn) {
defer wg.Done()
for j := 1; ; j++ {
select {
case <-ctx.Done():
return
case <-ts.S:
runContext.Iteration = int64(j)
fn(runContext)
}
}
}