benchmark: add latency/MTU/bandwidth into testcases (#1304)

This commit is contained in:
ZhouyihaiDing
2017-06-23 14:22:51 -07:00
committed by Menghan Li
parent 2f3320d9d6
commit f0c566b827
6 changed files with 82 additions and 52 deletions

View File

@ -9,9 +9,6 @@ updatedeps:
testdeps: testdeps:
go get -d -v -t google.golang.org/grpc/... go get -d -v -t google.golang.org/grpc/...
benchdeps: testdeps
go get -d -v golang.org/x/perf/cmd/benchstat
updatetestdeps: updatetestdeps:
go get -d -v -t -u -f google.golang.org/grpc/... go get -d -v -t -u -f google.golang.org/grpc/...
@ -35,9 +32,6 @@ test: testdeps
testrace: testdeps testrace: testdeps
go test -v -race -cpu 1,4 google.golang.org/grpc/... go test -v -race -cpu 1,4 google.golang.org/grpc/...
benchmark: benchdeps
go test google.golang.org/grpc/benchmark/... -benchmem -bench=. | tee /tmp/tmp.result && benchstat /tmp/tmp.result && rm /tmp/tmp.result
clean: clean:
go clean -i google.golang.org/grpc/... go clean -i google.golang.org/grpc/...
@ -55,6 +49,4 @@ coverage: testdeps
test \ test \
testrace \ testrace \
clean \ clean \
coverage \ coverage
benchdeps \
benchmark

View File

@ -32,6 +32,7 @@ import (
"golang.org/x/net/context" "golang.org/x/net/context"
"google.golang.org/grpc" "google.golang.org/grpc"
testpb "google.golang.org/grpc/benchmark/grpc_testing" testpb "google.golang.org/grpc/benchmark/grpc_testing"
"google.golang.org/grpc/benchmark/latency"
"google.golang.org/grpc/benchmark/stats" "google.golang.org/grpc/benchmark/stats"
"google.golang.org/grpc/grpclog" "google.golang.org/grpc/grpclog"
) )
@ -133,6 +134,9 @@ type ServerInfo struct {
// For "protobuf", it's ignored. // For "protobuf", it's ignored.
// For "bytebuf", it should be an int representing response size. // For "bytebuf", it should be an int representing response size.
Metadata interface{} Metadata interface{}
// Network can simulate latency
Network *latency.Network
} }
// StartServer starts a gRPC server serving a benchmark service according to info. // StartServer starts a gRPC server serving a benchmark service according to info.
@ -142,6 +146,10 @@ func StartServer(info ServerInfo, opts ...grpc.ServerOption) (string, func()) {
if err != nil { if err != nil {
grpclog.Fatalf("Failed to listen: %v", err) grpclog.Fatalf("Failed to listen: %v", err)
} }
nw := info.Network
if nw != nil {
lis = nw.Listener(lis)
}
s := grpc.NewServer(opts...) s := grpc.NewServer(opts...)
switch info.Type { switch info.Type {
case "protobuf": case "protobuf":
@ -222,12 +230,18 @@ func NewClientConn(addr string, opts ...grpc.DialOption) *grpc.ClientConn {
return conn return conn
} }
func runUnary(b *testing.B, maxConcurrentCalls, reqSize, respSize int) { func runUnary(b *testing.B, maxConcurrentCalls, reqSize, respSize, kbps, mtu int, ltc time.Duration) {
s := stats.AddStats(b, 38) s := stats.AddStats(b, 38)
nw := &latency.Network{Kbps: kbps, Latency: ltc, MTU: mtu}
b.StopTimer() b.StopTimer()
target, stopper := StartServer(ServerInfo{Addr: "localhost:0", Type: "protobuf"}, grpc.MaxConcurrentStreams(uint32(maxConcurrentCalls+1))) target, stopper := StartServer(ServerInfo{Addr: "localhost:0", Type: "protobuf", Network: nw}, grpc.MaxConcurrentStreams(uint32(maxConcurrentCalls+1)))
defer stopper() defer stopper()
conn := NewClientConn(target, grpc.WithInsecure()) conn := NewClientConn(
target, grpc.WithInsecure(),
grpc.WithDialer(func(address string, timeout time.Duration) (net.Conn, error) {
return nw.TimeoutDialer(net.DialTimeout)("tcp", address, timeout)
}),
)
tc := testpb.NewBenchmarkServiceClient(conn) tc := testpb.NewBenchmarkServiceClient(conn)
// Warm up connection. // Warm up connection.
@ -265,12 +279,18 @@ func runUnary(b *testing.B, maxConcurrentCalls, reqSize, respSize int) {
conn.Close() conn.Close()
} }
func runStream(b *testing.B, maxConcurrentCalls, reqSize, respSize int) { func runStream(b *testing.B, maxConcurrentCalls, reqSize, respSize, kbps, mtu int, ltc time.Duration) {
s := stats.AddStats(b, 38) s := stats.AddStats(b, 38)
nw := &latency.Network{Kbps: kbps, Latency: ltc, MTU: mtu}
b.StopTimer() b.StopTimer()
target, stopper := StartServer(ServerInfo{Addr: "localhost:0", Type: "protobuf"}, grpc.MaxConcurrentStreams(uint32(maxConcurrentCalls+1))) target, stopper := StartServer(ServerInfo{Addr: "localhost:0", Type: "protobuf", Network: nw}, grpc.MaxConcurrentStreams(uint32(maxConcurrentCalls+1)))
defer stopper() defer stopper()
conn := NewClientConn(target, grpc.WithInsecure()) conn := NewClientConn(
target, grpc.WithInsecure(),
grpc.WithDialer(func(address string, timeout time.Duration) (net.Conn, error) {
return nw.TimeoutDialer(net.DialTimeout)("tcp", address, timeout)
}),
)
tc := testpb.NewBenchmarkServiceClient(conn) tc := testpb.NewBenchmarkServiceClient(conn)
// Warm up connection. // Warm up connection.

View File

@ -30,80 +30,80 @@ import (
func BenchmarkClientStreamc1(b *testing.B) { func BenchmarkClientStreamc1(b *testing.B) {
grpc.EnableTracing = true grpc.EnableTracing = true
runStream(b, 1, 1, 1) runStream(b, 1, 1, 1, 0, 0, 0)
} }
func BenchmarkClientStreamc8(b *testing.B) { func BenchmarkClientStreamc8(b *testing.B) {
grpc.EnableTracing = true grpc.EnableTracing = true
runStream(b, 8, 1, 1) runStream(b, 8, 1, 1, 0, 0, 0)
} }
func BenchmarkClientStreamc64(b *testing.B) { func BenchmarkClientStreamc64(b *testing.B) {
grpc.EnableTracing = true grpc.EnableTracing = true
runStream(b, 64, 1, 1) runStream(b, 64, 1, 1, 0, 0, 0)
} }
func BenchmarkClientStreamc512(b *testing.B) { func BenchmarkClientStreamc512(b *testing.B) {
grpc.EnableTracing = true grpc.EnableTracing = true
runStream(b, 512, 1, 1) runStream(b, 512, 1, 1, 0, 0, 0)
} }
func BenchmarkClientUnaryc1(b *testing.B) { func BenchmarkClientUnaryc1(b *testing.B) {
grpc.EnableTracing = true grpc.EnableTracing = true
runUnary(b, 1, 1, 1) runUnary(b, 1, 1, 1, 0, 0, 0)
} }
func BenchmarkClientUnaryc8(b *testing.B) { func BenchmarkClientUnaryc8(b *testing.B) {
grpc.EnableTracing = true grpc.EnableTracing = true
runUnary(b, 8, 1, 1) runUnary(b, 8, 1, 1, 0, 0, 0)
} }
func BenchmarkClientUnaryc64(b *testing.B) { func BenchmarkClientUnaryc64(b *testing.B) {
grpc.EnableTracing = true grpc.EnableTracing = true
runUnary(b, 64, 1, 1) runUnary(b, 64, 1, 1, 0, 0, 0)
} }
func BenchmarkClientUnaryc512(b *testing.B) { func BenchmarkClientUnaryc512(b *testing.B) {
grpc.EnableTracing = true grpc.EnableTracing = true
runUnary(b, 512, 1, 1) runUnary(b, 512, 1, 1, 0, 0, 0)
} }
func BenchmarkClientStreamNoTracec1(b *testing.B) { func BenchmarkClientStreamNoTracec1(b *testing.B) {
grpc.EnableTracing = false grpc.EnableTracing = false
runStream(b, 1, 1, 1) runStream(b, 1, 1, 1, 0, 0, 0)
} }
func BenchmarkClientStreamNoTracec8(b *testing.B) { func BenchmarkClientStreamNoTracec8(b *testing.B) {
grpc.EnableTracing = false grpc.EnableTracing = false
runStream(b, 8, 1, 1) runStream(b, 8, 1, 1, 0, 0, 0)
} }
func BenchmarkClientStreamNoTracec64(b *testing.B) { func BenchmarkClientStreamNoTracec64(b *testing.B) {
grpc.EnableTracing = false grpc.EnableTracing = false
runStream(b, 64, 1, 1) runStream(b, 64, 1, 1, 0, 0, 0)
} }
func BenchmarkClientStreamNoTracec512(b *testing.B) { func BenchmarkClientStreamNoTracec512(b *testing.B) {
grpc.EnableTracing = false grpc.EnableTracing = false
runStream(b, 512, 1, 1) runStream(b, 512, 1, 1, 0, 0, 0)
} }
func BenchmarkClientUnaryNoTracec1(b *testing.B) { func BenchmarkClientUnaryNoTracec1(b *testing.B) {
grpc.EnableTracing = false grpc.EnableTracing = false
runUnary(b, 1, 1, 1) runUnary(b, 1, 1, 1, 0, 0, 0)
} }
func BenchmarkClientUnaryNoTracec8(b *testing.B) { func BenchmarkClientUnaryNoTracec8(b *testing.B) {
grpc.EnableTracing = false grpc.EnableTracing = false
runUnary(b, 8, 1, 1) runUnary(b, 8, 1, 1, 0, 0, 0)
} }
func BenchmarkClientUnaryNoTracec64(b *testing.B) { func BenchmarkClientUnaryNoTracec64(b *testing.B) {
grpc.EnableTracing = false grpc.EnableTracing = false
runUnary(b, 64, 1, 1) runUnary(b, 64, 1, 1, 0, 0, 0)
} }
func BenchmarkClientUnaryNoTracec512(b *testing.B) { func BenchmarkClientUnaryNoTracec512(b *testing.B) {
grpc.EnableTracing = false grpc.EnableTracing = false
runUnary(b, 512, 1, 1) runUnary(b, 512, 1, 1, 0, 0, 0)
} }
func TestMain(m *testing.M) { func TestMain(m *testing.M) {

View File

@ -24,6 +24,7 @@ import (
"fmt" "fmt"
"os" "os"
"testing" "testing"
"time"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/benchmark/stats" "google.golang.org/grpc/benchmark/stats"
@ -31,25 +32,39 @@ import (
func BenchmarkClient(b *testing.B) { func BenchmarkClient(b *testing.B) {
maxConcurrentCalls := []int{1, 8, 64, 512} maxConcurrentCalls := []int{1, 8, 64, 512}
reqSizeBytes := []int{1, 1024} reqSizeBytes := []int{1, 1024, 1024 * 1024}
reqspSizeBytes := []int{1, 1024} reqspSizeBytes := []int{1, 1024, 1024 * 1024}
kbps := []int{0, 10240} // if non-positive, infinite
MTU := []int{0, 10} // if non-positive, infinite
// When set the latency to 0 (no delay), the result is slower than the real result with no delay
// because latency simulation section has extra operations
latency := []time.Duration{0, 40 * time.Millisecond} // if non-positive, no delay.
for _, enableTracing := range []bool{true, false} { for _, enableTracing := range []bool{true, false} {
grpc.EnableTracing = enableTracing grpc.EnableTracing = enableTracing
tracing := "Tracing" tracing := "Tracing"
if !enableTracing { if !enableTracing {
tracing = "noTrace" tracing = "noTrace"
} }
for _, maxC := range maxConcurrentCalls { for _, ltc := range latency {
for _, reqS := range reqSizeBytes { for _, k := range kbps {
for _, respS := range reqspSizeBytes { for _, mtu := range MTU {
b.Run(fmt.Sprintf("Unary-%s-maxConcurrentCalls_"+ for _, maxC := range maxConcurrentCalls {
"%#v-reqSize_%#vB-respSize_%#vB", tracing, maxC, reqS, respS), func(b *testing.B) { for _, reqS := range reqSizeBytes {
runUnary(b, maxC, reqS, respS) for _, respS := range reqspSizeBytes {
}) b.Run(fmt.Sprintf("Unary-%s-kbps_%#v-MTU_%#v-maxConcurrentCalls_"+
b.Run(fmt.Sprintf("Stream-%s-maxConcurrentCalls_"+ "%#v-reqSize_%#vB-respSize_%#vB-latency_%s",
"%#v-reqSize_%#vB-respSize_%#vB", tracing, maxC, reqS, respS), func(b *testing.B) { tracing, k, mtu, maxC, reqS, respS, ltc.String()), func(b *testing.B) {
runStream(b, maxC, reqS, respS) runUnary(b, maxC, reqS, respS, k, mtu, ltc)
}) })
b.Run(fmt.Sprintf("Stream-%s-kbps_%#v-MTU_%#v-maxConcurrentCalls_"+
"%#v-reqSize_%#vB-respSize_%#vB-latency_%s",
tracing, k, mtu, maxC, reqS, respS, ltc.String()), func(b *testing.B) {
runStream(b, maxC, reqS, respS, k, mtu, ltc)
})
}
}
}
} }
} }
} }

View File

@ -104,8 +104,14 @@ func NewHistogram(opts HistogramOptions) *Histogram {
// Print writes textual output of the histogram values. // Print writes textual output of the histogram values.
func (h *Histogram) Print(w io.Writer) { func (h *Histogram) Print(w io.Writer) {
h.PrintWithUnit(w, 1)
}
// PrintWithUnit writes textual output of the histogram values .
// Data in histogram is divided by a Unit before print.
func (h *Histogram) PrintWithUnit(w io.Writer, unit float64) {
avg := float64(h.Sum) / float64(h.Count) avg := float64(h.Sum) / float64(h.Count)
fmt.Fprintf(w, "Count: %d Min: %d Max: %d Avg: %.2f\n", h.Count, h.Min, h.Max, avg) fmt.Fprintf(w, "Count: %d Min: %5.1f Max: %5.1f Avg: %.2f\n", h.Count, float64(h.Min)/unit, float64(h.Max)/unit, avg/unit)
fmt.Fprintf(w, "%s\n", strings.Repeat("-", 60)) fmt.Fprintf(w, "%s\n", strings.Repeat("-", 60))
if h.Count <= 0 { if h.Count <= 0 {
return return
@ -121,9 +127,9 @@ func (h *Histogram) Print(w io.Writer) {
accCount := int64(0) accCount := int64(0)
for i, b := range h.Buckets { for i, b := range h.Buckets {
fmt.Fprintf(w, "[%*f, ", maxBucketDigitLen, b.LowBound) fmt.Fprintf(w, "[%*f, ", maxBucketDigitLen, b.LowBound/unit)
if i+1 < len(h.Buckets) { if i+1 < len(h.Buckets) {
fmt.Fprintf(w, "%*f)", maxBucketDigitLen, h.Buckets[i+1].LowBound) fmt.Fprintf(w, "%*f)", maxBucketDigitLen, h.Buckets[i+1].LowBound/unit)
} else { } else {
fmt.Fprintf(w, "%*s)", maxBucketDigitLen, "inf") fmt.Fprintf(w, "%*s)", maxBucketDigitLen, "inf")
} }

View File

@ -93,9 +93,6 @@ func (stats *Stats) maybeUpdate() {
stats.unit = u stats.unit = u
} }
// Adjust the min/max according to the new unit.
stats.min /= int64(stats.unit)
stats.max /= int64(stats.unit)
numBuckets := stats.numBuckets numBuckets := stats.numBuckets
if n := int(stats.max - stats.min + 1); n < numBuckets { if n := int(stats.max - stats.min + 1); n < numBuckets {
numBuckets = n numBuckets = n
@ -108,7 +105,7 @@ func (stats *Stats) maybeUpdate() {
MinValue: stats.min}) MinValue: stats.min})
for _, d := range stats.durations { for _, d := range stats.durations {
stats.histogram.Add(int64(d / stats.unit)) stats.histogram.Add(int64(d))
} }
stats.dirty = false stats.dirty = false
@ -122,7 +119,7 @@ func (stats *Stats) Print(w io.Writer) {
fmt.Fprint(w, "Histogram (empty)\n") fmt.Fprint(w, "Histogram (empty)\n")
} else { } else {
fmt.Fprintf(w, "Histogram (unit: %s)\n", fmt.Sprintf("%v", stats.unit)[1:]) fmt.Fprintf(w, "Histogram (unit: %s)\n", fmt.Sprintf("%v", stats.unit)[1:])
stats.histogram.Print(w) stats.histogram.PrintWithUnit(w, float64(stats.unit))
} }
} }