From 142fdefa83e6522687482f6e5364add2c7faf609 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Thu, 7 Apr 2016 14:13:42 -0700 Subject: [PATCH 01/43] Sync proto and update service name in src --- benchmark/benchmark.go | 10 +- benchmark/benchmark_test.go | 8 +- benchmark/client/main.go | 8 +- benchmark/grpc_testing/control.pb.go | 973 ++++++++++++++++++++++++++ benchmark/grpc_testing/control.proto | 201 ++++++ benchmark/grpc_testing/messages.pb.go | 345 +++++++++ benchmark/grpc_testing/messages.proto | 173 +++++ benchmark/grpc_testing/payloads.pb.go | 221 ++++++ benchmark/grpc_testing/payloads.proto | 55 ++ benchmark/grpc_testing/services.pb.go | 439 ++++++++++++ benchmark/grpc_testing/services.proto | 71 ++ benchmark/grpc_testing/stats.pb.go | 109 +++ benchmark/grpc_testing/stats.proto | 70 ++ benchmark/grpc_testing/test.pb.go | 944 ------------------------- benchmark/grpc_testing/test.proto | 148 ---- 15 files changed, 2670 insertions(+), 1105 deletions(-) create mode 100644 benchmark/grpc_testing/control.pb.go create mode 100644 benchmark/grpc_testing/control.proto create mode 100644 benchmark/grpc_testing/messages.pb.go create mode 100644 benchmark/grpc_testing/messages.proto create mode 100644 benchmark/grpc_testing/payloads.pb.go create mode 100644 benchmark/grpc_testing/payloads.proto create mode 100644 benchmark/grpc_testing/services.pb.go create mode 100644 benchmark/grpc_testing/services.proto create mode 100644 benchmark/grpc_testing/stats.pb.go create mode 100644 benchmark/grpc_testing/stats.proto delete mode 100644 benchmark/grpc_testing/test.pb.go delete mode 100644 benchmark/grpc_testing/test.proto diff --git a/benchmark/benchmark.go b/benchmark/benchmark.go index 7215d35a..e2cd51b3 100644 --- a/benchmark/benchmark.go +++ b/benchmark/benchmark.go @@ -74,7 +74,7 @@ func (s *testServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (* }, nil } -func (s *testServer) StreamingCall(stream testpb.TestService_StreamingCallServer) error { +func (s *testServer) StreamingCall(stream testpb.BenchmarkService_StreamingCallServer) error { for { in, err := stream.Recv() if err == io.EOF { @@ -101,7 +101,7 @@ func StartServer(addr string) (string, func()) { grpclog.Fatalf("Failed to listen: %v", err) } s := grpc.NewServer(grpc.MaxConcurrentStreams(math.MaxUint32)) - testpb.RegisterTestServiceServer(s, &testServer{}) + testpb.RegisterBenchmarkServiceServer(s, &testServer{}) go s.Serve(lis) return lis.Addr().String(), func() { s.Stop() @@ -109,7 +109,7 @@ func StartServer(addr string) (string, func()) { } // DoUnaryCall performs an unary RPC with given stub and request and response sizes. -func DoUnaryCall(tc testpb.TestServiceClient, reqSize, respSize int) { +func DoUnaryCall(tc testpb.BenchmarkServiceClient, reqSize, respSize int) { pl := newPayload(testpb.PayloadType_COMPRESSABLE, reqSize) req := &testpb.SimpleRequest{ ResponseType: pl.Type, @@ -117,12 +117,12 @@ func DoUnaryCall(tc testpb.TestServiceClient, reqSize, respSize int) { Payload: pl, } if _, err := tc.UnaryCall(context.Background(), req); err != nil { - grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err) + grpclog.Fatal("/BenchmarkService/UnaryCall RPC failed: ", err) } } // DoStreamingRoundTrip performs a round trip for a single streaming rpc. -func DoStreamingRoundTrip(tc testpb.TestServiceClient, stream testpb.TestService_StreamingCallClient, reqSize, respSize int) { +func DoStreamingRoundTrip(tc testpb.BenchmarkServiceClient, stream testpb.BenchmarkService_StreamingCallClient, reqSize, respSize int) { pl := newPayload(testpb.PayloadType_COMPRESSABLE, reqSize) req := &testpb.SimpleRequest{ ResponseType: pl.Type, diff --git a/benchmark/benchmark_test.go b/benchmark/benchmark_test.go index 97779e22..70b3d5db 100644 --- a/benchmark/benchmark_test.go +++ b/benchmark/benchmark_test.go @@ -18,7 +18,7 @@ func runUnary(b *testing.B, maxConcurrentCalls int) { target, stopper := StartServer("localhost:0") defer stopper() conn := NewClientConn(target) - tc := testpb.NewTestServiceClient(conn) + tc := testpb.NewBenchmarkServiceClient(conn) // Warm up connection. for i := 0; i < 10; i++ { @@ -61,7 +61,7 @@ func runStream(b *testing.B, maxConcurrentCalls int) { target, stopper := StartServer("localhost:0") defer stopper() conn := NewClientConn(target) - tc := testpb.NewTestServiceClient(conn) + tc := testpb.NewBenchmarkServiceClient(conn) // Warm up connection. stream, err := tc.StreamingCall(context.Background()) @@ -106,11 +106,11 @@ func runStream(b *testing.B, maxConcurrentCalls int) { wg.Wait() conn.Close() } -func unaryCaller(client testpb.TestServiceClient) { +func unaryCaller(client testpb.BenchmarkServiceClient) { DoUnaryCall(client, 1, 1) } -func streamCaller(client testpb.TestServiceClient, stream testpb.TestService_StreamingCallClient) { +func streamCaller(client testpb.BenchmarkServiceClient, stream testpb.BenchmarkService_StreamingCallClient) { DoStreamingRoundTrip(client, stream, 1, 1) } diff --git a/benchmark/client/main.go b/benchmark/client/main.go index e7f0a8fb..27cc1a8b 100644 --- a/benchmark/client/main.go +++ b/benchmark/client/main.go @@ -28,18 +28,18 @@ var ( 1 : streaming call.`) ) -func unaryCaller(client testpb.TestServiceClient) { +func unaryCaller(client testpb.BenchmarkServiceClient) { benchmark.DoUnaryCall(client, 1, 1) } -func streamCaller(client testpb.TestServiceClient, stream testpb.TestService_StreamingCallClient) { +func streamCaller(client testpb.BenchmarkServiceClient, stream testpb.BenchmarkService_StreamingCallClient) { benchmark.DoStreamingRoundTrip(client, stream, 1, 1) } -func buildConnection() (s *stats.Stats, conn *grpc.ClientConn, tc testpb.TestServiceClient) { +func buildConnection() (s *stats.Stats, conn *grpc.ClientConn, tc testpb.BenchmarkServiceClient) { s = stats.NewStats(256) conn = benchmark.NewClientConn(*server) - tc = testpb.NewTestServiceClient(conn) + tc = testpb.NewBenchmarkServiceClient(conn) return s, conn, tc } diff --git a/benchmark/grpc_testing/control.pb.go b/benchmark/grpc_testing/control.pb.go new file mode 100644 index 00000000..fe5fe870 --- /dev/null +++ b/benchmark/grpc_testing/control.pb.go @@ -0,0 +1,973 @@ +// Code generated by protoc-gen-go. +// source: control.proto +// DO NOT EDIT! + +/* +Package grpc_testing is a generated protocol buffer package. + +It is generated from these files: + control.proto + messages.proto + payloads.proto + services.proto + stats.proto + +It has these top-level messages: + PoissonParams + UniformParams + DeterministicParams + ParetoParams + ClosedLoopParams + LoadParams + SecurityParams + ClientConfig + ClientStatus + Mark + ClientArgs + ServerConfig + ServerArgs + ServerStatus + CoreRequest + CoreResponse + Void + Scenario + Scenarios + Payload + EchoStatus + SimpleRequest + SimpleResponse + StreamingInputCallRequest + StreamingInputCallResponse + ResponseParameters + StreamingOutputCallRequest + StreamingOutputCallResponse + ReconnectParams + ReconnectInfo + ByteBufferParams + SimpleProtoParams + ComplexProtoParams + PayloadConfig + ServerStats + HistogramParams + HistogramData + ClientStats +*/ +package grpc_testing + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.ProtoPackageIsVersion1 + +type ClientType int32 + +const ( + ClientType_SYNC_CLIENT ClientType = 0 + ClientType_ASYNC_CLIENT ClientType = 1 +) + +var ClientType_name = map[int32]string{ + 0: "SYNC_CLIENT", + 1: "ASYNC_CLIENT", +} +var ClientType_value = map[string]int32{ + "SYNC_CLIENT": 0, + "ASYNC_CLIENT": 1, +} + +func (x ClientType) String() string { + return proto.EnumName(ClientType_name, int32(x)) +} +func (ClientType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type ServerType int32 + +const ( + ServerType_SYNC_SERVER ServerType = 0 + ServerType_ASYNC_SERVER ServerType = 1 + ServerType_ASYNC_GENERIC_SERVER ServerType = 2 +) + +var ServerType_name = map[int32]string{ + 0: "SYNC_SERVER", + 1: "ASYNC_SERVER", + 2: "ASYNC_GENERIC_SERVER", +} +var ServerType_value = map[string]int32{ + "SYNC_SERVER": 0, + "ASYNC_SERVER": 1, + "ASYNC_GENERIC_SERVER": 2, +} + +func (x ServerType) String() string { + return proto.EnumName(ServerType_name, int32(x)) +} +func (ServerType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +type RpcType int32 + +const ( + RpcType_UNARY RpcType = 0 + RpcType_STREAMING RpcType = 1 +) + +var RpcType_name = map[int32]string{ + 0: "UNARY", + 1: "STREAMING", +} +var RpcType_value = map[string]int32{ + "UNARY": 0, + "STREAMING": 1, +} + +func (x RpcType) String() string { + return proto.EnumName(RpcType_name, int32(x)) +} +func (RpcType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +// Parameters of poisson process distribution, which is a good representation +// of activity coming in from independent identical stationary sources. +type PoissonParams struct { + // The rate of arrivals (a.k.a. lambda parameter of the exp distribution). + OfferedLoad float64 `protobuf:"fixed64,1,opt,name=offered_load,json=offeredLoad" json:"offered_load,omitempty"` +} + +func (m *PoissonParams) Reset() { *m = PoissonParams{} } +func (m *PoissonParams) String() string { return proto.CompactTextString(m) } +func (*PoissonParams) ProtoMessage() {} +func (*PoissonParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type UniformParams struct { + InterarrivalLo float64 `protobuf:"fixed64,1,opt,name=interarrival_lo,json=interarrivalLo" json:"interarrival_lo,omitempty"` + InterarrivalHi float64 `protobuf:"fixed64,2,opt,name=interarrival_hi,json=interarrivalHi" json:"interarrival_hi,omitempty"` +} + +func (m *UniformParams) Reset() { *m = UniformParams{} } +func (m *UniformParams) String() string { return proto.CompactTextString(m) } +func (*UniformParams) ProtoMessage() {} +func (*UniformParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +type DeterministicParams struct { + OfferedLoad float64 `protobuf:"fixed64,1,opt,name=offered_load,json=offeredLoad" json:"offered_load,omitempty"` +} + +func (m *DeterministicParams) Reset() { *m = DeterministicParams{} } +func (m *DeterministicParams) String() string { return proto.CompactTextString(m) } +func (*DeterministicParams) ProtoMessage() {} +func (*DeterministicParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +type ParetoParams struct { + InterarrivalBase float64 `protobuf:"fixed64,1,opt,name=interarrival_base,json=interarrivalBase" json:"interarrival_base,omitempty"` + Alpha float64 `protobuf:"fixed64,2,opt,name=alpha" json:"alpha,omitempty"` +} + +func (m *ParetoParams) Reset() { *m = ParetoParams{} } +func (m *ParetoParams) String() string { return proto.CompactTextString(m) } +func (*ParetoParams) ProtoMessage() {} +func (*ParetoParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +// Once an RPC finishes, immediately start a new one. +// No configuration parameters needed. +type ClosedLoopParams struct { +} + +func (m *ClosedLoopParams) Reset() { *m = ClosedLoopParams{} } +func (m *ClosedLoopParams) String() string { return proto.CompactTextString(m) } +func (*ClosedLoopParams) ProtoMessage() {} +func (*ClosedLoopParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +type LoadParams struct { + // Types that are valid to be assigned to Load: + // *LoadParams_ClosedLoop + // *LoadParams_Poisson + // *LoadParams_Uniform + // *LoadParams_Determ + // *LoadParams_Pareto + Load isLoadParams_Load `protobuf_oneof:"load"` +} + +func (m *LoadParams) Reset() { *m = LoadParams{} } +func (m *LoadParams) String() string { return proto.CompactTextString(m) } +func (*LoadParams) ProtoMessage() {} +func (*LoadParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +type isLoadParams_Load interface { + isLoadParams_Load() +} + +type LoadParams_ClosedLoop struct { + ClosedLoop *ClosedLoopParams `protobuf:"bytes,1,opt,name=closed_loop,json=closedLoop,oneof"` +} +type LoadParams_Poisson struct { + Poisson *PoissonParams `protobuf:"bytes,2,opt,name=poisson,oneof"` +} +type LoadParams_Uniform struct { + Uniform *UniformParams `protobuf:"bytes,3,opt,name=uniform,oneof"` +} +type LoadParams_Determ struct { + Determ *DeterministicParams `protobuf:"bytes,4,opt,name=determ,oneof"` +} +type LoadParams_Pareto struct { + Pareto *ParetoParams `protobuf:"bytes,5,opt,name=pareto,oneof"` +} + +func (*LoadParams_ClosedLoop) isLoadParams_Load() {} +func (*LoadParams_Poisson) isLoadParams_Load() {} +func (*LoadParams_Uniform) isLoadParams_Load() {} +func (*LoadParams_Determ) isLoadParams_Load() {} +func (*LoadParams_Pareto) isLoadParams_Load() {} + +func (m *LoadParams) GetLoad() isLoadParams_Load { + if m != nil { + return m.Load + } + return nil +} + +func (m *LoadParams) GetClosedLoop() *ClosedLoopParams { + if x, ok := m.GetLoad().(*LoadParams_ClosedLoop); ok { + return x.ClosedLoop + } + return nil +} + +func (m *LoadParams) GetPoisson() *PoissonParams { + if x, ok := m.GetLoad().(*LoadParams_Poisson); ok { + return x.Poisson + } + return nil +} + +func (m *LoadParams) GetUniform() *UniformParams { + if x, ok := m.GetLoad().(*LoadParams_Uniform); ok { + return x.Uniform + } + return nil +} + +func (m *LoadParams) GetDeterm() *DeterministicParams { + if x, ok := m.GetLoad().(*LoadParams_Determ); ok { + return x.Determ + } + return nil +} + +func (m *LoadParams) GetPareto() *ParetoParams { + if x, ok := m.GetLoad().(*LoadParams_Pareto); ok { + return x.Pareto + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*LoadParams) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _LoadParams_OneofMarshaler, _LoadParams_OneofUnmarshaler, _LoadParams_OneofSizer, []interface{}{ + (*LoadParams_ClosedLoop)(nil), + (*LoadParams_Poisson)(nil), + (*LoadParams_Uniform)(nil), + (*LoadParams_Determ)(nil), + (*LoadParams_Pareto)(nil), + } +} + +func _LoadParams_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*LoadParams) + // load + switch x := m.Load.(type) { + case *LoadParams_ClosedLoop: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ClosedLoop); err != nil { + return err + } + case *LoadParams_Poisson: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Poisson); err != nil { + return err + } + case *LoadParams_Uniform: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Uniform); err != nil { + return err + } + case *LoadParams_Determ: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Determ); err != nil { + return err + } + case *LoadParams_Pareto: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Pareto); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("LoadParams.Load has unexpected type %T", x) + } + return nil +} + +func _LoadParams_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*LoadParams) + switch tag { + case 1: // load.closed_loop + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ClosedLoopParams) + err := b.DecodeMessage(msg) + m.Load = &LoadParams_ClosedLoop{msg} + return true, err + case 2: // load.poisson + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(PoissonParams) + err := b.DecodeMessage(msg) + m.Load = &LoadParams_Poisson{msg} + return true, err + case 3: // load.uniform + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(UniformParams) + err := b.DecodeMessage(msg) + m.Load = &LoadParams_Uniform{msg} + return true, err + case 4: // load.determ + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(DeterministicParams) + err := b.DecodeMessage(msg) + m.Load = &LoadParams_Determ{msg} + return true, err + case 5: // load.pareto + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ParetoParams) + err := b.DecodeMessage(msg) + m.Load = &LoadParams_Pareto{msg} + return true, err + default: + return false, nil + } +} + +func _LoadParams_OneofSizer(msg proto.Message) (n int) { + m := msg.(*LoadParams) + // load + switch x := m.Load.(type) { + case *LoadParams_ClosedLoop: + s := proto.Size(x.ClosedLoop) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *LoadParams_Poisson: + s := proto.Size(x.Poisson) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *LoadParams_Uniform: + s := proto.Size(x.Uniform) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *LoadParams_Determ: + s := proto.Size(x.Determ) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *LoadParams_Pareto: + s := proto.Size(x.Pareto) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// presence of SecurityParams implies use of TLS +type SecurityParams struct { + UseTestCa bool `protobuf:"varint,1,opt,name=use_test_ca,json=useTestCa" json:"use_test_ca,omitempty"` + ServerHostOverride string `protobuf:"bytes,2,opt,name=server_host_override,json=serverHostOverride" json:"server_host_override,omitempty"` +} + +func (m *SecurityParams) Reset() { *m = SecurityParams{} } +func (m *SecurityParams) String() string { return proto.CompactTextString(m) } +func (*SecurityParams) ProtoMessage() {} +func (*SecurityParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +type ClientConfig struct { + // List of targets to connect to. At least one target needs to be specified. + ServerTargets []string `protobuf:"bytes,1,rep,name=server_targets,json=serverTargets" json:"server_targets,omitempty"` + ClientType ClientType `protobuf:"varint,2,opt,name=client_type,json=clientType,enum=grpc.testing.ClientType" json:"client_type,omitempty"` + SecurityParams *SecurityParams `protobuf:"bytes,3,opt,name=security_params,json=securityParams" json:"security_params,omitempty"` + // How many concurrent RPCs to start for each channel. + // For synchronous client, use a separate thread for each outstanding RPC. + OutstandingRpcsPerChannel int32 `protobuf:"varint,4,opt,name=outstanding_rpcs_per_channel,json=outstandingRpcsPerChannel" json:"outstanding_rpcs_per_channel,omitempty"` + // Number of independent client channels to create. + // i-th channel will connect to server_target[i % server_targets.size()] + ClientChannels int32 `protobuf:"varint,5,opt,name=client_channels,json=clientChannels" json:"client_channels,omitempty"` + // Only for async client. Number of threads to use to start/manage RPCs. + AsyncClientThreads int32 `protobuf:"varint,7,opt,name=async_client_threads,json=asyncClientThreads" json:"async_client_threads,omitempty"` + RpcType RpcType `protobuf:"varint,8,opt,name=rpc_type,json=rpcType,enum=grpc.testing.RpcType" json:"rpc_type,omitempty"` + // The requested load for the entire client (aggregated over all the threads). + LoadParams *LoadParams `protobuf:"bytes,10,opt,name=load_params,json=loadParams" json:"load_params,omitempty"` + PayloadConfig *PayloadConfig `protobuf:"bytes,11,opt,name=payload_config,json=payloadConfig" json:"payload_config,omitempty"` + HistogramParams *HistogramParams `protobuf:"bytes,12,opt,name=histogram_params,json=histogramParams" json:"histogram_params,omitempty"` + // Specify the cores we should run the client on, if desired + CoreList []int32 `protobuf:"varint,13,rep,name=core_list,json=coreList" json:"core_list,omitempty"` + CoreLimit int32 `protobuf:"varint,14,opt,name=core_limit,json=coreLimit" json:"core_limit,omitempty"` +} + +func (m *ClientConfig) Reset() { *m = ClientConfig{} } +func (m *ClientConfig) String() string { return proto.CompactTextString(m) } +func (*ClientConfig) ProtoMessage() {} +func (*ClientConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *ClientConfig) GetSecurityParams() *SecurityParams { + if m != nil { + return m.SecurityParams + } + return nil +} + +func (m *ClientConfig) GetLoadParams() *LoadParams { + if m != nil { + return m.LoadParams + } + return nil +} + +func (m *ClientConfig) GetPayloadConfig() *PayloadConfig { + if m != nil { + return m.PayloadConfig + } + return nil +} + +func (m *ClientConfig) GetHistogramParams() *HistogramParams { + if m != nil { + return m.HistogramParams + } + return nil +} + +type ClientStatus struct { + Stats *ClientStats `protobuf:"bytes,1,opt,name=stats" json:"stats,omitempty"` +} + +func (m *ClientStatus) Reset() { *m = ClientStatus{} } +func (m *ClientStatus) String() string { return proto.CompactTextString(m) } +func (*ClientStatus) ProtoMessage() {} +func (*ClientStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *ClientStatus) GetStats() *ClientStats { + if m != nil { + return m.Stats + } + return nil +} + +// Request current stats +type Mark struct { + // if true, the stats will be reset after taking their snapshot. + Reset_ bool `protobuf:"varint,1,opt,name=reset" json:"reset,omitempty"` +} + +func (m *Mark) Reset() { *m = Mark{} } +func (m *Mark) String() string { return proto.CompactTextString(m) } +func (*Mark) ProtoMessage() {} +func (*Mark) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +type ClientArgs struct { + // Types that are valid to be assigned to Argtype: + // *ClientArgs_Setup + // *ClientArgs_Mark + Argtype isClientArgs_Argtype `protobuf_oneof:"argtype"` +} + +func (m *ClientArgs) Reset() { *m = ClientArgs{} } +func (m *ClientArgs) String() string { return proto.CompactTextString(m) } +func (*ClientArgs) ProtoMessage() {} +func (*ClientArgs) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +type isClientArgs_Argtype interface { + isClientArgs_Argtype() +} + +type ClientArgs_Setup struct { + Setup *ClientConfig `protobuf:"bytes,1,opt,name=setup,oneof"` +} +type ClientArgs_Mark struct { + Mark *Mark `protobuf:"bytes,2,opt,name=mark,oneof"` +} + +func (*ClientArgs_Setup) isClientArgs_Argtype() {} +func (*ClientArgs_Mark) isClientArgs_Argtype() {} + +func (m *ClientArgs) GetArgtype() isClientArgs_Argtype { + if m != nil { + return m.Argtype + } + return nil +} + +func (m *ClientArgs) GetSetup() *ClientConfig { + if x, ok := m.GetArgtype().(*ClientArgs_Setup); ok { + return x.Setup + } + return nil +} + +func (m *ClientArgs) GetMark() *Mark { + if x, ok := m.GetArgtype().(*ClientArgs_Mark); ok { + return x.Mark + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ClientArgs) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ClientArgs_OneofMarshaler, _ClientArgs_OneofUnmarshaler, _ClientArgs_OneofSizer, []interface{}{ + (*ClientArgs_Setup)(nil), + (*ClientArgs_Mark)(nil), + } +} + +func _ClientArgs_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ClientArgs) + // argtype + switch x := m.Argtype.(type) { + case *ClientArgs_Setup: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Setup); err != nil { + return err + } + case *ClientArgs_Mark: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Mark); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ClientArgs.Argtype has unexpected type %T", x) + } + return nil +} + +func _ClientArgs_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ClientArgs) + switch tag { + case 1: // argtype.setup + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ClientConfig) + err := b.DecodeMessage(msg) + m.Argtype = &ClientArgs_Setup{msg} + return true, err + case 2: // argtype.mark + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Mark) + err := b.DecodeMessage(msg) + m.Argtype = &ClientArgs_Mark{msg} + return true, err + default: + return false, nil + } +} + +func _ClientArgs_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ClientArgs) + // argtype + switch x := m.Argtype.(type) { + case *ClientArgs_Setup: + s := proto.Size(x.Setup) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ClientArgs_Mark: + s := proto.Size(x.Mark) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type ServerConfig struct { + ServerType ServerType `protobuf:"varint,1,opt,name=server_type,json=serverType,enum=grpc.testing.ServerType" json:"server_type,omitempty"` + SecurityParams *SecurityParams `protobuf:"bytes,2,opt,name=security_params,json=securityParams" json:"security_params,omitempty"` + // Port on which to listen. Zero means pick unused port. + Port int32 `protobuf:"varint,4,opt,name=port" json:"port,omitempty"` + // Only for async server. Number of threads used to serve the requests. + AsyncServerThreads int32 `protobuf:"varint,7,opt,name=async_server_threads,json=asyncServerThreads" json:"async_server_threads,omitempty"` + // Specify the number of cores to limit server to, if desired + CoreLimit int32 `protobuf:"varint,8,opt,name=core_limit,json=coreLimit" json:"core_limit,omitempty"` + // payload config, used in generic server + PayloadConfig *PayloadConfig `protobuf:"bytes,9,opt,name=payload_config,json=payloadConfig" json:"payload_config,omitempty"` + // Specify the cores we should run the server on, if desired + CoreList []int32 `protobuf:"varint,10,rep,name=core_list,json=coreList" json:"core_list,omitempty"` +} + +func (m *ServerConfig) Reset() { *m = ServerConfig{} } +func (m *ServerConfig) String() string { return proto.CompactTextString(m) } +func (*ServerConfig) ProtoMessage() {} +func (*ServerConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +func (m *ServerConfig) GetSecurityParams() *SecurityParams { + if m != nil { + return m.SecurityParams + } + return nil +} + +func (m *ServerConfig) GetPayloadConfig() *PayloadConfig { + if m != nil { + return m.PayloadConfig + } + return nil +} + +type ServerArgs struct { + // Types that are valid to be assigned to Argtype: + // *ServerArgs_Setup + // *ServerArgs_Mark + Argtype isServerArgs_Argtype `protobuf_oneof:"argtype"` +} + +func (m *ServerArgs) Reset() { *m = ServerArgs{} } +func (m *ServerArgs) String() string { return proto.CompactTextString(m) } +func (*ServerArgs) ProtoMessage() {} +func (*ServerArgs) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +type isServerArgs_Argtype interface { + isServerArgs_Argtype() +} + +type ServerArgs_Setup struct { + Setup *ServerConfig `protobuf:"bytes,1,opt,name=setup,oneof"` +} +type ServerArgs_Mark struct { + Mark *Mark `protobuf:"bytes,2,opt,name=mark,oneof"` +} + +func (*ServerArgs_Setup) isServerArgs_Argtype() {} +func (*ServerArgs_Mark) isServerArgs_Argtype() {} + +func (m *ServerArgs) GetArgtype() isServerArgs_Argtype { + if m != nil { + return m.Argtype + } + return nil +} + +func (m *ServerArgs) GetSetup() *ServerConfig { + if x, ok := m.GetArgtype().(*ServerArgs_Setup); ok { + return x.Setup + } + return nil +} + +func (m *ServerArgs) GetMark() *Mark { + if x, ok := m.GetArgtype().(*ServerArgs_Mark); ok { + return x.Mark + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ServerArgs) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ServerArgs_OneofMarshaler, _ServerArgs_OneofUnmarshaler, _ServerArgs_OneofSizer, []interface{}{ + (*ServerArgs_Setup)(nil), + (*ServerArgs_Mark)(nil), + } +} + +func _ServerArgs_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ServerArgs) + // argtype + switch x := m.Argtype.(type) { + case *ServerArgs_Setup: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Setup); err != nil { + return err + } + case *ServerArgs_Mark: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Mark); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ServerArgs.Argtype has unexpected type %T", x) + } + return nil +} + +func _ServerArgs_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ServerArgs) + switch tag { + case 1: // argtype.setup + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ServerConfig) + err := b.DecodeMessage(msg) + m.Argtype = &ServerArgs_Setup{msg} + return true, err + case 2: // argtype.mark + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Mark) + err := b.DecodeMessage(msg) + m.Argtype = &ServerArgs_Mark{msg} + return true, err + default: + return false, nil + } +} + +func _ServerArgs_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ServerArgs) + // argtype + switch x := m.Argtype.(type) { + case *ServerArgs_Setup: + s := proto.Size(x.Setup) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ServerArgs_Mark: + s := proto.Size(x.Mark) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type ServerStatus struct { + Stats *ServerStats `protobuf:"bytes,1,opt,name=stats" json:"stats,omitempty"` + // the port bound by the server + Port int32 `protobuf:"varint,2,opt,name=port" json:"port,omitempty"` + // Number of cores available to the server + Cores int32 `protobuf:"varint,3,opt,name=cores" json:"cores,omitempty"` +} + +func (m *ServerStatus) Reset() { *m = ServerStatus{} } +func (m *ServerStatus) String() string { return proto.CompactTextString(m) } +func (*ServerStatus) ProtoMessage() {} +func (*ServerStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + +func (m *ServerStatus) GetStats() *ServerStats { + if m != nil { + return m.Stats + } + return nil +} + +type CoreRequest struct { +} + +func (m *CoreRequest) Reset() { *m = CoreRequest{} } +func (m *CoreRequest) String() string { return proto.CompactTextString(m) } +func (*CoreRequest) ProtoMessage() {} +func (*CoreRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + +type CoreResponse struct { + // Number of cores available on the server + Cores int32 `protobuf:"varint,1,opt,name=cores" json:"cores,omitempty"` +} + +func (m *CoreResponse) Reset() { *m = CoreResponse{} } +func (m *CoreResponse) String() string { return proto.CompactTextString(m) } +func (*CoreResponse) ProtoMessage() {} +func (*CoreResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } + +type Void struct { +} + +func (m *Void) Reset() { *m = Void{} } +func (m *Void) String() string { return proto.CompactTextString(m) } +func (*Void) ProtoMessage() {} +func (*Void) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } + +// A single performance scenario: input to qps_json_driver +type Scenario struct { + // Human readable name for this scenario + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Client configuration + ClientConfig *ClientConfig `protobuf:"bytes,2,opt,name=client_config,json=clientConfig" json:"client_config,omitempty"` + // Number of clients to start for the test + NumClients int32 `protobuf:"varint,3,opt,name=num_clients,json=numClients" json:"num_clients,omitempty"` + // Server configuration + ServerConfig *ServerConfig `protobuf:"bytes,4,opt,name=server_config,json=serverConfig" json:"server_config,omitempty"` + // Number of servers to start for the test + NumServers int32 `protobuf:"varint,5,opt,name=num_servers,json=numServers" json:"num_servers,omitempty"` + // Warmup period, in seconds + WarmupSeconds int32 `protobuf:"varint,6,opt,name=warmup_seconds,json=warmupSeconds" json:"warmup_seconds,omitempty"` + // Benchmark time, in seconds + BenchmarkSeconds int32 `protobuf:"varint,7,opt,name=benchmark_seconds,json=benchmarkSeconds" json:"benchmark_seconds,omitempty"` + // Number of workers to spawn locally (usually zero) + SpawnLocalWorkerCount int32 `protobuf:"varint,8,opt,name=spawn_local_worker_count,json=spawnLocalWorkerCount" json:"spawn_local_worker_count,omitempty"` +} + +func (m *Scenario) Reset() { *m = Scenario{} } +func (m *Scenario) String() string { return proto.CompactTextString(m) } +func (*Scenario) ProtoMessage() {} +func (*Scenario) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } + +func (m *Scenario) GetClientConfig() *ClientConfig { + if m != nil { + return m.ClientConfig + } + return nil +} + +func (m *Scenario) GetServerConfig() *ServerConfig { + if m != nil { + return m.ServerConfig + } + return nil +} + +// A set of scenarios to be run with qps_json_driver +type Scenarios struct { + Scenarios []*Scenario `protobuf:"bytes,1,rep,name=scenarios" json:"scenarios,omitempty"` +} + +func (m *Scenarios) Reset() { *m = Scenarios{} } +func (m *Scenarios) String() string { return proto.CompactTextString(m) } +func (*Scenarios) ProtoMessage() {} +func (*Scenarios) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } + +func (m *Scenarios) GetScenarios() []*Scenario { + if m != nil { + return m.Scenarios + } + return nil +} + +func init() { + proto.RegisterType((*PoissonParams)(nil), "grpc.testing.PoissonParams") + proto.RegisterType((*UniformParams)(nil), "grpc.testing.UniformParams") + proto.RegisterType((*DeterministicParams)(nil), "grpc.testing.DeterministicParams") + proto.RegisterType((*ParetoParams)(nil), "grpc.testing.ParetoParams") + proto.RegisterType((*ClosedLoopParams)(nil), "grpc.testing.ClosedLoopParams") + proto.RegisterType((*LoadParams)(nil), "grpc.testing.LoadParams") + proto.RegisterType((*SecurityParams)(nil), "grpc.testing.SecurityParams") + proto.RegisterType((*ClientConfig)(nil), "grpc.testing.ClientConfig") + proto.RegisterType((*ClientStatus)(nil), "grpc.testing.ClientStatus") + proto.RegisterType((*Mark)(nil), "grpc.testing.Mark") + proto.RegisterType((*ClientArgs)(nil), "grpc.testing.ClientArgs") + proto.RegisterType((*ServerConfig)(nil), "grpc.testing.ServerConfig") + proto.RegisterType((*ServerArgs)(nil), "grpc.testing.ServerArgs") + proto.RegisterType((*ServerStatus)(nil), "grpc.testing.ServerStatus") + proto.RegisterType((*CoreRequest)(nil), "grpc.testing.CoreRequest") + proto.RegisterType((*CoreResponse)(nil), "grpc.testing.CoreResponse") + proto.RegisterType((*Void)(nil), "grpc.testing.Void") + proto.RegisterType((*Scenario)(nil), "grpc.testing.Scenario") + proto.RegisterType((*Scenarios)(nil), "grpc.testing.Scenarios") + proto.RegisterEnum("grpc.testing.ClientType", ClientType_name, ClientType_value) + proto.RegisterEnum("grpc.testing.ServerType", ServerType_name, ServerType_value) + proto.RegisterEnum("grpc.testing.RpcType", RpcType_name, RpcType_value) +} + +var fileDescriptor0 = []byte{ + // 1162 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xa4, 0x56, 0xdd, 0x6e, 0xdb, 0x46, + 0x13, 0x8d, 0x14, 0xc9, 0x96, 0x86, 0x92, 0xac, 0x6f, 0xbf, 0xa4, 0x60, 0x1c, 0x27, 0x6d, 0xd8, + 0x16, 0x0d, 0x5c, 0xc0, 0x29, 0xd4, 0x02, 0x69, 0xd1, 0x8b, 0x40, 0x56, 0x85, 0xd8, 0x80, 0xe3, + 0xba, 0x2b, 0x27, 0x45, 0xae, 0x08, 0x9a, 0x5a, 0x4b, 0x44, 0x24, 0x2e, 0xbb, 0x4b, 0xc6, 0xf0, + 0x2b, 0xf4, 0x99, 0xfa, 0x1c, 0x7d, 0x8d, 0xbe, 0x42, 0x67, 0xff, 0x64, 0x52, 0x11, 0x10, 0xb7, + 0xbd, 0xe3, 0xce, 0x9c, 0xb3, 0x3b, 0x3b, 0x67, 0x66, 0x96, 0xd0, 0x8d, 0x79, 0x9a, 0x0b, 0xbe, + 0x38, 0xc8, 0x04, 0xcf, 0x39, 0xe9, 0xcc, 0x44, 0x16, 0x1f, 0xe4, 0x4c, 0xe6, 0x49, 0x3a, 0xdb, + 0xed, 0x65, 0xd1, 0xf5, 0x82, 0x47, 0x53, 0x69, 0xbc, 0xbb, 0x9e, 0xcc, 0xa3, 0xdc, 0x2e, 0x82, + 0x01, 0x74, 0xcf, 0x78, 0x22, 0x25, 0x4f, 0xcf, 0x22, 0x11, 0x2d, 0x25, 0x79, 0x02, 0x1d, 0x7e, + 0x79, 0xc9, 0x04, 0x9b, 0x86, 0x8a, 0xe4, 0xd7, 0x3e, 0xab, 0x3d, 0xad, 0x51, 0xcf, 0xda, 0x4e, + 0xd0, 0x14, 0x44, 0xd0, 0x7d, 0x9d, 0x26, 0x97, 0x5c, 0x2c, 0x2d, 0xe7, 0x2b, 0xd8, 0x49, 0xd2, + 0x9c, 0x89, 0x48, 0x88, 0xe4, 0x7d, 0xb4, 0x40, 0xa2, 0xa5, 0xf5, 0xca, 0xe6, 0x13, 0xfe, 0x01, + 0x70, 0x9e, 0xf8, 0xf5, 0x0f, 0x81, 0x47, 0x49, 0xf0, 0x3d, 0xfc, 0xff, 0x27, 0x86, 0x96, 0x65, + 0x92, 0x26, 0x78, 0x8b, 0xf8, 0xf6, 0xc1, 0xfd, 0x02, 0x1d, 0x04, 0xb3, 0x9c, 0x5b, 0xca, 0xd7, + 0xf0, 0xbf, 0xca, 0x91, 0x17, 0x91, 0x64, 0x96, 0xd7, 0x2f, 0x3b, 0x0e, 0xd1, 0x4e, 0xee, 0x41, + 0x33, 0x5a, 0x64, 0xf3, 0xc8, 0x46, 0x65, 0x16, 0x01, 0x81, 0xfe, 0x68, 0xc1, 0xa5, 0x3a, 0x80, + 0x67, 0x66, 0xdb, 0xe0, 0x8f, 0x3a, 0x80, 0x3a, 0xcf, 0x9e, 0x32, 0x04, 0x2f, 0xd6, 0x10, 0x8c, + 0x8b, 0x67, 0x7a, 0x7f, 0x6f, 0xf0, 0xf8, 0xa0, 0xac, 0xc3, 0xc1, 0xfa, 0x1e, 0x47, 0x77, 0x28, + 0xc4, 0x2b, 0x1b, 0x79, 0x0e, 0xdb, 0x99, 0x51, 0x42, 0x9f, 0xee, 0x0d, 0x1e, 0x56, 0xe9, 0x15, + 0x99, 0x90, 0xeb, 0xd0, 0x8a, 0x58, 0x18, 0x39, 0xfc, 0xbb, 0x9b, 0x88, 0x15, 0xad, 0x14, 0xd1, + 0xa2, 0xc9, 0x8f, 0xb0, 0x35, 0xd5, 0x49, 0xf6, 0x1b, 0x9a, 0xf7, 0xa4, 0xca, 0xdb, 0x20, 0x00, + 0xb2, 0x2d, 0x85, 0x7c, 0x07, 0x5b, 0x99, 0xce, 0xb3, 0xdf, 0xd4, 0xe4, 0xdd, 0xb5, 0x68, 0x4b, + 0x1a, 0x28, 0x96, 0xc1, 0x1e, 0x6e, 0x41, 0x43, 0x09, 0x17, 0x5c, 0x40, 0x6f, 0xc2, 0xe2, 0x42, + 0x24, 0xf9, 0xb5, 0xcd, 0xe0, 0x63, 0xf0, 0x0a, 0xc9, 0x42, 0xc5, 0x0f, 0xe3, 0x48, 0x67, 0xb0, + 0x45, 0xdb, 0x68, 0x3a, 0x47, 0xcb, 0x28, 0x22, 0xdf, 0xc0, 0x3d, 0xc9, 0xc4, 0x7b, 0x26, 0xc2, + 0x39, 0x47, 0x08, 0xc7, 0x2f, 0x91, 0x4c, 0x99, 0xce, 0x55, 0x9b, 0x12, 0xe3, 0x3b, 0x42, 0xd7, + 0xcf, 0xd6, 0x13, 0xfc, 0xde, 0x84, 0xce, 0x68, 0x91, 0xb0, 0x34, 0x1f, 0xf1, 0xf4, 0x32, 0x99, + 0x91, 0x2f, 0xa1, 0x67, 0xb7, 0xc8, 0x23, 0x31, 0x63, 0xb9, 0xc4, 0x53, 0xee, 0x22, 0xb9, 0x6b, + 0xac, 0xe7, 0xc6, 0x48, 0x7e, 0x50, 0x5a, 0x2a, 0x5a, 0x98, 0x5f, 0x67, 0xe6, 0x80, 0xde, 0xc0, + 0x5f, 0xd7, 0x52, 0x01, 0xce, 0xd1, 0xaf, 0x34, 0x74, 0xdf, 0x64, 0x0c, 0x3b, 0xd2, 0x5e, 0x2b, + 0xcc, 0xf4, 0xbd, 0xac, 0x24, 0x7b, 0x55, 0x7a, 0xf5, 0xee, 0xb4, 0x27, 0xab, 0xb9, 0x78, 0x01, + 0x7b, 0xbc, 0xc8, 0xb1, 0x4d, 0xd3, 0x29, 0xa2, 0x43, 0x64, 0xca, 0x30, 0xc3, 0xb0, 0xe3, 0x79, + 0x94, 0xa6, 0x6c, 0xa1, 0xe5, 0x6a, 0xd2, 0x07, 0x25, 0x0c, 0x45, 0xc8, 0x19, 0x13, 0x23, 0x03, + 0x50, 0x7d, 0x66, 0xaf, 0x60, 0x29, 0x52, 0xab, 0xd4, 0xa4, 0x3d, 0x63, 0xb6, 0x38, 0xa9, 0xb2, + 0x1a, 0xc9, 0xeb, 0x34, 0x0e, 0xdd, 0x8d, 0xe7, 0x82, 0xe1, 0xa4, 0xf0, 0xb7, 0x35, 0x9a, 0x68, + 0x9f, 0xbd, 0xab, 0xf1, 0x20, 0xa3, 0x85, 0xf1, 0x98, 0xd4, 0xb4, 0x74, 0x6a, 0xee, 0x57, 0xef, + 0x86, 0xa1, 0xe8, 0xbc, 0x6c, 0x0b, 0xf3, 0xa1, 0xf2, 0xa9, 0x34, 0x77, 0x09, 0x01, 0x9d, 0x90, + 0xb5, 0x7c, 0xde, 0xb4, 0x12, 0x85, 0xc5, 0x4d, 0x5b, 0x1d, 0x82, 0x1b, 0x5e, 0x61, 0xac, 0x35, + 0xf4, 0xbd, 0x8d, 0xad, 0x61, 0x30, 0x46, 0x66, 0xda, 0xcd, 0xca, 0x4b, 0x72, 0x04, 0xfd, 0x39, + 0x96, 0x30, 0x9f, 0xe1, 0x8e, 0x2e, 0x86, 0x8e, 0xde, 0xe5, 0x51, 0x75, 0x97, 0x23, 0x87, 0xb2, + 0x81, 0xec, 0xcc, 0xab, 0x06, 0xf2, 0x10, 0xda, 0x31, 0x17, 0x2c, 0x5c, 0xa0, 0xdd, 0xef, 0x62, + 0xe9, 0x34, 0x69, 0x4b, 0x19, 0x4e, 0x70, 0x4d, 0x1e, 0x01, 0x58, 0xe7, 0x32, 0xc9, 0xfd, 0x9e, + 0xce, 0x5f, 0xdb, 0x78, 0xd1, 0x10, 0xbc, 0x70, 0xb5, 0x38, 0xc1, 0xe1, 0x5b, 0x48, 0xf2, 0x0c, + 0x9a, 0x7a, 0x0c, 0xdb, 0x51, 0xf1, 0x60, 0x53, 0x79, 0x29, 0xa8, 0xa4, 0x06, 0x17, 0xec, 0x41, + 0xe3, 0x55, 0x24, 0xde, 0xa9, 0x11, 0x25, 0x98, 0x64, 0xb9, 0xed, 0x10, 0xb3, 0x08, 0x0a, 0x00, + 0xc3, 0x19, 0x8a, 0x99, 0x24, 0x03, 0xdc, 0x9c, 0xe5, 0x85, 0x9b, 0x43, 0xbb, 0x9b, 0x36, 0x37, + 0xd9, 0xc1, 0xd6, 0x34, 0x50, 0xf2, 0x14, 0x1a, 0x4b, 0xdc, 0xdf, 0xce, 0x1e, 0x52, 0xa5, 0xa8, + 0x93, 0x11, 0xaa, 0x11, 0x87, 0x6d, 0xd8, 0xc6, 0x4e, 0x51, 0x05, 0x10, 0xfc, 0x59, 0x87, 0xce, + 0x44, 0x37, 0x8f, 0x4d, 0x36, 0x6a, 0xed, 0x5a, 0x4c, 0x15, 0x48, 0x6d, 0x53, 0xef, 0x18, 0x82, + 0xe9, 0x1d, 0xb9, 0xfa, 0xde, 0xd4, 0x3b, 0xf5, 0x7f, 0xd1, 0x3b, 0x04, 0x1a, 0x19, 0x17, 0xb9, + 0xed, 0x11, 0xfd, 0x7d, 0x53, 0xe5, 0x2e, 0xb6, 0x0d, 0x55, 0x6e, 0xa3, 0xb2, 0x55, 0x5e, 0x55, + 0xb3, 0xb5, 0xa6, 0xe6, 0x86, 0xba, 0x6c, 0xff, 0xe3, 0xba, 0xac, 0x54, 0x13, 0x54, 0xab, 0x49, + 0xe9, 0x69, 0x02, 0xba, 0x85, 0x9e, 0x65, 0x01, 0xfe, 0xa3, 0x9e, 0x89, 0x93, 0xf3, 0x56, 0x55, + 0x7a, 0x03, 0x75, 0x55, 0xba, 0xca, 0x7e, 0xbd, 0x94, 0x7d, 0xac, 0x58, 0x75, 0x2f, 0x33, 0x0a, + 0x9b, 0xd4, 0x2c, 0x82, 0x2e, 0x78, 0x23, 0xfc, 0xa0, 0xec, 0xb7, 0x02, 0xb7, 0x0b, 0xbe, 0xc0, + 0xfe, 0xd0, 0x4b, 0x99, 0xf1, 0xd4, 0xbc, 0xc4, 0x86, 0x54, 0x2b, 0x93, 0xf0, 0xf9, 0x78, 0xc3, + 0x93, 0x69, 0xf0, 0x57, 0x1d, 0x5a, 0x93, 0x98, 0xa5, 0x91, 0x48, 0xb8, 0x3a, 0x33, 0x8d, 0x96, + 0xa6, 0xd8, 0xda, 0x54, 0x7f, 0xe3, 0x04, 0xed, 0xba, 0x01, 0x68, 0xf4, 0xa9, 0x7f, 0xac, 0x13, + 0x68, 0x27, 0x2e, 0xbf, 0x15, 0x9f, 0x82, 0x97, 0x16, 0x4b, 0x3b, 0x16, 0x5d, 0xe8, 0x80, 0x26, + 0xc3, 0x51, 0x33, 0xda, 0x3e, 0x1b, 0xee, 0x84, 0xc6, 0xc7, 0xb4, 0xa1, 0x1d, 0x59, 0x6e, 0x15, + 0x7b, 0x82, 0xb1, 0xb9, 0xf9, 0xac, 0x4e, 0x30, 0x1c, 0xa9, 0x9e, 0xab, 0xab, 0x48, 0x2c, 0x8b, + 0x0c, 0x31, 0x78, 0x06, 0xd6, 0xeb, 0x96, 0xc6, 0x74, 0x8d, 0x75, 0x62, 0x8c, 0xea, 0x07, 0xe7, + 0x82, 0xa5, 0xf1, 0x5c, 0x69, 0xb9, 0x42, 0x9a, 0xca, 0xee, 0xaf, 0x1c, 0x0e, 0xfc, 0x1c, 0x7c, + 0x99, 0x45, 0x57, 0x29, 0xfe, 0xa6, 0xc4, 0xf8, 0x33, 0x74, 0xc5, 0xc5, 0x3b, 0x7d, 0x83, 0x22, + 0x75, 0x55, 0x7e, 0x5f, 0xfb, 0x4f, 0x94, 0xfb, 0x57, 0xed, 0x1d, 0x29, 0x67, 0x30, 0x84, 0xb6, + 0x4b, 0xb8, 0xc4, 0xb7, 0xbf, 0x2d, 0xdd, 0x42, 0xbf, 0xa1, 0xde, 0xe0, 0x93, 0xb5, 0x7b, 0x5b, + 0x37, 0xbd, 0x01, 0xee, 0x3f, 0x73, 0x33, 0x4a, 0xb7, 0xfb, 0x0e, 0x78, 0x93, 0xb7, 0xa7, 0xa3, + 0x70, 0x74, 0x72, 0x3c, 0x3e, 0x3d, 0xef, 0xdf, 0x21, 0x7d, 0xe8, 0x0c, 0xcb, 0x96, 0xda, 0xfe, + 0xb1, 0x6b, 0x82, 0x0a, 0x61, 0x32, 0xa6, 0x6f, 0xc6, 0xb4, 0x4c, 0xb0, 0x96, 0x1a, 0xf1, 0xe1, + 0x9e, 0xb1, 0xbc, 0x1c, 0x9f, 0x8e, 0xe9, 0xf1, 0xca, 0x53, 0xdf, 0xff, 0x1c, 0xb6, 0xed, 0xbb, + 0x44, 0xda, 0xd0, 0x7c, 0x7d, 0x3a, 0xa4, 0x6f, 0x71, 0x87, 0x2e, 0x5e, 0xea, 0x9c, 0x8e, 0x87, + 0xaf, 0x8e, 0x4f, 0x5f, 0xf6, 0x6b, 0x17, 0x5b, 0xfa, 0x97, 0xf8, 0xdb, 0xbf, 0x03, 0x00, 0x00, + 0xff, 0xff, 0x75, 0x59, 0xf4, 0x03, 0x4e, 0x0b, 0x00, 0x00, +} diff --git a/benchmark/grpc_testing/control.proto b/benchmark/grpc_testing/control.proto new file mode 100644 index 00000000..4913c86a --- /dev/null +++ b/benchmark/grpc_testing/control.proto @@ -0,0 +1,201 @@ +// Copyright 2015, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +import "payloads.proto"; +import "stats.proto"; + +package grpc.testing; + +enum ClientType { + SYNC_CLIENT = 0; + ASYNC_CLIENT = 1; +} + +enum ServerType { + SYNC_SERVER = 0; + ASYNC_SERVER = 1; + ASYNC_GENERIC_SERVER = 2; +} + +enum RpcType { + UNARY = 0; + STREAMING = 1; +} + +// Parameters of poisson process distribution, which is a good representation +// of activity coming in from independent identical stationary sources. +message PoissonParams { + // The rate of arrivals (a.k.a. lambda parameter of the exp distribution). + double offered_load = 1; +} + +message UniformParams { + double interarrival_lo = 1; + double interarrival_hi = 2; +} + +message DeterministicParams { + double offered_load = 1; +} + +message ParetoParams { + double interarrival_base = 1; + double alpha = 2; +} + +// Once an RPC finishes, immediately start a new one. +// No configuration parameters needed. +message ClosedLoopParams { +} + +message LoadParams { + oneof load { + ClosedLoopParams closed_loop = 1; + PoissonParams poisson = 2; + UniformParams uniform = 3; + DeterministicParams determ = 4; + ParetoParams pareto = 5; + }; +} + +// presence of SecurityParams implies use of TLS +message SecurityParams { + bool use_test_ca = 1; + string server_host_override = 2; +} + +message ClientConfig { + // List of targets to connect to. At least one target needs to be specified. + repeated string server_targets = 1; + ClientType client_type = 2; + SecurityParams security_params = 3; + // How many concurrent RPCs to start for each channel. + // For synchronous client, use a separate thread for each outstanding RPC. + int32 outstanding_rpcs_per_channel = 4; + // Number of independent client channels to create. + // i-th channel will connect to server_target[i % server_targets.size()] + int32 client_channels = 5; + // Only for async client. Number of threads to use to start/manage RPCs. + int32 async_client_threads = 7; + RpcType rpc_type = 8; + // The requested load for the entire client (aggregated over all the threads). + LoadParams load_params = 10; + PayloadConfig payload_config = 11; + HistogramParams histogram_params = 12; + + // Specify the cores we should run the client on, if desired + repeated int32 core_list = 13; + int32 core_limit = 14; +} + +message ClientStatus { + ClientStats stats = 1; +} + +// Request current stats +message Mark { + // if true, the stats will be reset after taking their snapshot. + bool reset = 1; +} + +message ClientArgs { + oneof argtype { + ClientConfig setup = 1; + Mark mark = 2; + } +} + +message ServerConfig { + ServerType server_type = 1; + SecurityParams security_params = 2; + // Port on which to listen. Zero means pick unused port. + int32 port = 4; + // Only for async server. Number of threads used to serve the requests. + int32 async_server_threads = 7; + // Specify the number of cores to limit server to, if desired + int32 core_limit = 8; + // payload config, used in generic server + PayloadConfig payload_config = 9; + + // Specify the cores we should run the server on, if desired + repeated int32 core_list = 10; +} + +message ServerArgs { + oneof argtype { + ServerConfig setup = 1; + Mark mark = 2; + } +} + +message ServerStatus { + ServerStats stats = 1; + // the port bound by the server + int32 port = 2; + // Number of cores available to the server + int32 cores = 3; +} + +message CoreRequest { +} + +message CoreResponse { + // Number of cores available on the server + int32 cores = 1; +} + +message Void { +} + +// A single performance scenario: input to qps_json_driver +message Scenario { + // Human readable name for this scenario + string name = 1; + // Client configuration + ClientConfig client_config = 2; + // Number of clients to start for the test + int32 num_clients = 3; + // Server configuration + ServerConfig server_config = 4; + // Number of servers to start for the test + int32 num_servers = 5; + // Warmup period, in seconds + int32 warmup_seconds = 6; + // Benchmark time, in seconds + int32 benchmark_seconds = 7; + // Number of workers to spawn locally (usually zero) + int32 spawn_local_worker_count = 8; +} + +// A set of scenarios to be run with qps_json_driver +message Scenarios { + repeated Scenario scenarios = 1; +} diff --git a/benchmark/grpc_testing/messages.pb.go b/benchmark/grpc_testing/messages.pb.go new file mode 100644 index 00000000..214d6d0f --- /dev/null +++ b/benchmark/grpc_testing/messages.pb.go @@ -0,0 +1,345 @@ +// Code generated by protoc-gen-go. +// source: messages.proto +// DO NOT EDIT! + +package grpc_testing + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// The type of payload that should be returned. +type PayloadType int32 + +const ( + // Compressable text format. + PayloadType_COMPRESSABLE PayloadType = 0 + // Uncompressable binary format. + PayloadType_UNCOMPRESSABLE PayloadType = 1 + // Randomly chosen from all other formats defined in this enum. + PayloadType_RANDOM PayloadType = 2 +) + +var PayloadType_name = map[int32]string{ + 0: "COMPRESSABLE", + 1: "UNCOMPRESSABLE", + 2: "RANDOM", +} +var PayloadType_value = map[string]int32{ + "COMPRESSABLE": 0, + "UNCOMPRESSABLE": 1, + "RANDOM": 2, +} + +func (x PayloadType) String() string { + return proto.EnumName(PayloadType_name, int32(x)) +} +func (PayloadType) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +// Compression algorithms +type CompressionType int32 + +const ( + // No compression + CompressionType_NONE CompressionType = 0 + CompressionType_GZIP CompressionType = 1 + CompressionType_DEFLATE CompressionType = 2 +) + +var CompressionType_name = map[int32]string{ + 0: "NONE", + 1: "GZIP", + 2: "DEFLATE", +} +var CompressionType_value = map[string]int32{ + "NONE": 0, + "GZIP": 1, + "DEFLATE": 2, +} + +func (x CompressionType) String() string { + return proto.EnumName(CompressionType_name, int32(x)) +} +func (CompressionType) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } + +// A block of data, to simply increase gRPC message size. +type Payload struct { + // The type of data in body. + Type PayloadType `protobuf:"varint,1,opt,name=type,enum=grpc.testing.PayloadType" json:"type,omitempty"` + // Primary contents of payload. + Body []byte `protobuf:"bytes,2,opt,name=body,proto3" json:"body,omitempty"` +} + +func (m *Payload) Reset() { *m = Payload{} } +func (m *Payload) String() string { return proto.CompactTextString(m) } +func (*Payload) ProtoMessage() {} +func (*Payload) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +// A protobuf representation for grpc status. This is used by test +// clients to specify a status that the server should attempt to return. +type EchoStatus struct { + Code int32 `protobuf:"varint,1,opt,name=code" json:"code,omitempty"` + Message string `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"` +} + +func (m *EchoStatus) Reset() { *m = EchoStatus{} } +func (m *EchoStatus) String() string { return proto.CompactTextString(m) } +func (*EchoStatus) ProtoMessage() {} +func (*EchoStatus) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } + +// Unary request. +type SimpleRequest struct { + // Desired payload type in the response from the server. + // If response_type is RANDOM, server randomly chooses one from other formats. + ResponseType PayloadType `protobuf:"varint,1,opt,name=response_type,json=responseType,enum=grpc.testing.PayloadType" json:"response_type,omitempty"` + // Desired payload size in the response from the server. + // If response_type is COMPRESSABLE, this denotes the size before compression. + ResponseSize int32 `protobuf:"varint,2,opt,name=response_size,json=responseSize" json:"response_size,omitempty"` + // Optional input payload sent along with the request. + Payload *Payload `protobuf:"bytes,3,opt,name=payload" json:"payload,omitempty"` + // Whether SimpleResponse should include username. + FillUsername bool `protobuf:"varint,4,opt,name=fill_username,json=fillUsername" json:"fill_username,omitempty"` + // Whether SimpleResponse should include OAuth scope. + FillOauthScope bool `protobuf:"varint,5,opt,name=fill_oauth_scope,json=fillOauthScope" json:"fill_oauth_scope,omitempty"` + // Compression algorithm to be used by the server for the response (stream) + ResponseCompression CompressionType `protobuf:"varint,6,opt,name=response_compression,json=responseCompression,enum=grpc.testing.CompressionType" json:"response_compression,omitempty"` + // Whether server should return a given status + ResponseStatus *EchoStatus `protobuf:"bytes,7,opt,name=response_status,json=responseStatus" json:"response_status,omitempty"` +} + +func (m *SimpleRequest) Reset() { *m = SimpleRequest{} } +func (m *SimpleRequest) String() string { return proto.CompactTextString(m) } +func (*SimpleRequest) ProtoMessage() {} +func (*SimpleRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} } + +func (m *SimpleRequest) GetPayload() *Payload { + if m != nil { + return m.Payload + } + return nil +} + +func (m *SimpleRequest) GetResponseStatus() *EchoStatus { + if m != nil { + return m.ResponseStatus + } + return nil +} + +// Unary response, as configured by the request. +type SimpleResponse struct { + // Payload to increase message size. + Payload *Payload `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"` + // The user the request came from, for verifying authentication was + // successful when the client expected it. + Username string `protobuf:"bytes,2,opt,name=username" json:"username,omitempty"` + // OAuth scope. + OauthScope string `protobuf:"bytes,3,opt,name=oauth_scope,json=oauthScope" json:"oauth_scope,omitempty"` +} + +func (m *SimpleResponse) Reset() { *m = SimpleResponse{} } +func (m *SimpleResponse) String() string { return proto.CompactTextString(m) } +func (*SimpleResponse) ProtoMessage() {} +func (*SimpleResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{3} } + +func (m *SimpleResponse) GetPayload() *Payload { + if m != nil { + return m.Payload + } + return nil +} + +// Client-streaming request. +type StreamingInputCallRequest struct { + // Optional input payload sent along with the request. + Payload *Payload `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"` +} + +func (m *StreamingInputCallRequest) Reset() { *m = StreamingInputCallRequest{} } +func (m *StreamingInputCallRequest) String() string { return proto.CompactTextString(m) } +func (*StreamingInputCallRequest) ProtoMessage() {} +func (*StreamingInputCallRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{4} } + +func (m *StreamingInputCallRequest) GetPayload() *Payload { + if m != nil { + return m.Payload + } + return nil +} + +// Client-streaming response. +type StreamingInputCallResponse struct { + // Aggregated size of payloads received from the client. + AggregatedPayloadSize int32 `protobuf:"varint,1,opt,name=aggregated_payload_size,json=aggregatedPayloadSize" json:"aggregated_payload_size,omitempty"` +} + +func (m *StreamingInputCallResponse) Reset() { *m = StreamingInputCallResponse{} } +func (m *StreamingInputCallResponse) String() string { return proto.CompactTextString(m) } +func (*StreamingInputCallResponse) ProtoMessage() {} +func (*StreamingInputCallResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{5} } + +// Configuration for a particular response. +type ResponseParameters struct { + // Desired payload sizes in responses from the server. + // If response_type is COMPRESSABLE, this denotes the size before compression. + Size int32 `protobuf:"varint,1,opt,name=size" json:"size,omitempty"` + // Desired interval between consecutive responses in the response stream in + // microseconds. + IntervalUs int32 `protobuf:"varint,2,opt,name=interval_us,json=intervalUs" json:"interval_us,omitempty"` +} + +func (m *ResponseParameters) Reset() { *m = ResponseParameters{} } +func (m *ResponseParameters) String() string { return proto.CompactTextString(m) } +func (*ResponseParameters) ProtoMessage() {} +func (*ResponseParameters) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{6} } + +// Server-streaming request. +type StreamingOutputCallRequest struct { + // Desired payload type in the response from the server. + // If response_type is RANDOM, the payload from each response in the stream + // might be of different types. This is to simulate a mixed type of payload + // stream. + ResponseType PayloadType `protobuf:"varint,1,opt,name=response_type,json=responseType,enum=grpc.testing.PayloadType" json:"response_type,omitempty"` + // Configuration for each expected response message. + ResponseParameters []*ResponseParameters `protobuf:"bytes,2,rep,name=response_parameters,json=responseParameters" json:"response_parameters,omitempty"` + // Optional input payload sent along with the request. + Payload *Payload `protobuf:"bytes,3,opt,name=payload" json:"payload,omitempty"` + // Compression algorithm to be used by the server for the response (stream) + ResponseCompression CompressionType `protobuf:"varint,6,opt,name=response_compression,json=responseCompression,enum=grpc.testing.CompressionType" json:"response_compression,omitempty"` + // Whether server should return a given status + ResponseStatus *EchoStatus `protobuf:"bytes,7,opt,name=response_status,json=responseStatus" json:"response_status,omitempty"` +} + +func (m *StreamingOutputCallRequest) Reset() { *m = StreamingOutputCallRequest{} } +func (m *StreamingOutputCallRequest) String() string { return proto.CompactTextString(m) } +func (*StreamingOutputCallRequest) ProtoMessage() {} +func (*StreamingOutputCallRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{7} } + +func (m *StreamingOutputCallRequest) GetResponseParameters() []*ResponseParameters { + if m != nil { + return m.ResponseParameters + } + return nil +} + +func (m *StreamingOutputCallRequest) GetPayload() *Payload { + if m != nil { + return m.Payload + } + return nil +} + +func (m *StreamingOutputCallRequest) GetResponseStatus() *EchoStatus { + if m != nil { + return m.ResponseStatus + } + return nil +} + +// Server-streaming response, as configured by the request and parameters. +type StreamingOutputCallResponse struct { + // Payload to increase response size. + Payload *Payload `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"` +} + +func (m *StreamingOutputCallResponse) Reset() { *m = StreamingOutputCallResponse{} } +func (m *StreamingOutputCallResponse) String() string { return proto.CompactTextString(m) } +func (*StreamingOutputCallResponse) ProtoMessage() {} +func (*StreamingOutputCallResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{8} } + +func (m *StreamingOutputCallResponse) GetPayload() *Payload { + if m != nil { + return m.Payload + } + return nil +} + +// For reconnect interop test only. +// Client tells server what reconnection parameters it used. +type ReconnectParams struct { + MaxReconnectBackoffMs int32 `protobuf:"varint,1,opt,name=max_reconnect_backoff_ms,json=maxReconnectBackoffMs" json:"max_reconnect_backoff_ms,omitempty"` +} + +func (m *ReconnectParams) Reset() { *m = ReconnectParams{} } +func (m *ReconnectParams) String() string { return proto.CompactTextString(m) } +func (*ReconnectParams) ProtoMessage() {} +func (*ReconnectParams) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{9} } + +// For reconnect interop test only. +// Server tells client whether its reconnects are following the spec and the +// reconnect backoffs it saw. +type ReconnectInfo struct { + Passed bool `protobuf:"varint,1,opt,name=passed" json:"passed,omitempty"` + BackoffMs []int32 `protobuf:"varint,2,rep,name=backoff_ms,json=backoffMs" json:"backoff_ms,omitempty"` +} + +func (m *ReconnectInfo) Reset() { *m = ReconnectInfo{} } +func (m *ReconnectInfo) String() string { return proto.CompactTextString(m) } +func (*ReconnectInfo) ProtoMessage() {} +func (*ReconnectInfo) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{10} } + +func init() { + proto.RegisterType((*Payload)(nil), "grpc.testing.Payload") + proto.RegisterType((*EchoStatus)(nil), "grpc.testing.EchoStatus") + proto.RegisterType((*SimpleRequest)(nil), "grpc.testing.SimpleRequest") + proto.RegisterType((*SimpleResponse)(nil), "grpc.testing.SimpleResponse") + proto.RegisterType((*StreamingInputCallRequest)(nil), "grpc.testing.StreamingInputCallRequest") + proto.RegisterType((*StreamingInputCallResponse)(nil), "grpc.testing.StreamingInputCallResponse") + proto.RegisterType((*ResponseParameters)(nil), "grpc.testing.ResponseParameters") + proto.RegisterType((*StreamingOutputCallRequest)(nil), "grpc.testing.StreamingOutputCallRequest") + proto.RegisterType((*StreamingOutputCallResponse)(nil), "grpc.testing.StreamingOutputCallResponse") + proto.RegisterType((*ReconnectParams)(nil), "grpc.testing.ReconnectParams") + proto.RegisterType((*ReconnectInfo)(nil), "grpc.testing.ReconnectInfo") + proto.RegisterEnum("grpc.testing.PayloadType", PayloadType_name, PayloadType_value) + proto.RegisterEnum("grpc.testing.CompressionType", CompressionType_name, CompressionType_value) +} + +var fileDescriptor1 = []byte{ + // 645 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xcc, 0x55, 0x4d, 0x6f, 0xd3, 0x40, + 0x10, 0x25, 0xdf, 0xe9, 0x24, 0x4d, 0xa3, 0x85, 0x82, 0x5b, 0x54, 0x51, 0x99, 0x4b, 0x55, 0x89, + 0x20, 0x15, 0x09, 0x24, 0x0e, 0xa0, 0xb4, 0x4d, 0x51, 0x50, 0x9b, 0x84, 0x75, 0x7b, 0xe1, 0x62, + 0x6d, 0x9c, 0x4d, 0x1a, 0x11, 0x7b, 0x8d, 0x77, 0x8d, 0x28, 0x07, 0xee, 0xfc, 0x60, 0xee, 0xec, + 0xae, 0xbd, 0x8e, 0xd3, 0xf6, 0xd0, 0xc2, 0x85, 0xdb, 0xce, 0xcc, 0x9b, 0x97, 0x79, 0x33, 0xcf, + 0x0a, 0xb4, 0x7c, 0xca, 0x39, 0x99, 0x51, 0xde, 0x09, 0x23, 0x26, 0x18, 0x6a, 0xce, 0xa2, 0xd0, + 0xeb, 0x08, 0xca, 0xc5, 0x3c, 0x98, 0xd9, 0xa7, 0x50, 0x1b, 0x91, 0xab, 0x05, 0x23, 0x13, 0xf4, + 0x02, 0xca, 0xe2, 0x2a, 0xa4, 0x56, 0x61, 0xb7, 0xb0, 0xd7, 0x3a, 0xd8, 0xea, 0xe4, 0x71, 0x9d, + 0x14, 0x74, 0x2e, 0x01, 0x58, 0xc3, 0x10, 0x82, 0xf2, 0x98, 0x4d, 0xae, 0xac, 0xa2, 0x84, 0x37, + 0xb1, 0x7e, 0xdb, 0x6f, 0x01, 0x7a, 0xde, 0x25, 0x73, 0x04, 0x11, 0x31, 0x57, 0x08, 0x8f, 0x4d, + 0x12, 0xc2, 0x0a, 0xd6, 0x6f, 0x64, 0x41, 0x2d, 0x9d, 0x47, 0x37, 0xae, 0x61, 0x13, 0xda, 0xbf, + 0x4a, 0xb0, 0xee, 0xcc, 0xfd, 0x70, 0x41, 0x31, 0xfd, 0x1a, 0xcb, 0x9f, 0x45, 0xef, 0x60, 0x3d, + 0xa2, 0x3c, 0x64, 0x01, 0xa7, 0xee, 0xdd, 0x26, 0x6b, 0x1a, 0xbc, 0x8a, 0xd0, 0xf3, 0x5c, 0x3f, + 0x9f, 0xff, 0x48, 0x7e, 0xb1, 0xb2, 0x04, 0x39, 0x32, 0x87, 0x5e, 0x42, 0x2d, 0x4c, 0x18, 0xac, + 0x92, 0x2c, 0x37, 0x0e, 0x36, 0x6f, 0xa5, 0xc7, 0x06, 0xa5, 0x58, 0xa7, 0xf3, 0xc5, 0xc2, 0x8d, + 0x39, 0x8d, 0x02, 0xe2, 0x53, 0xab, 0x2c, 0xdb, 0xea, 0xb8, 0xa9, 0x92, 0x17, 0x69, 0x0e, 0xed, + 0x41, 0x5b, 0x83, 0x18, 0x89, 0xc5, 0xa5, 0xcb, 0x3d, 0x26, 0xa7, 0xaf, 0x68, 0x5c, 0x4b, 0xe5, + 0x87, 0x2a, 0xed, 0xa8, 0x2c, 0x1a, 0xc1, 0xa3, 0x6c, 0x48, 0x8f, 0xf9, 0xa1, 0x0c, 0xf8, 0x9c, + 0x05, 0x56, 0x55, 0x6b, 0xdd, 0x59, 0x1d, 0xe6, 0x68, 0x09, 0xd0, 0x7a, 0x1f, 0x9a, 0xd6, 0x5c, + 0x01, 0x75, 0x61, 0x63, 0x29, 0x5b, 0x5f, 0xc2, 0xaa, 0x69, 0x65, 0xd6, 0x2a, 0xd9, 0xf2, 0x52, + 0xb8, 0x95, 0xad, 0x44, 0xc7, 0xf6, 0x4f, 0x68, 0x99, 0x53, 0x24, 0xf9, 0xfc, 0x9a, 0x0a, 0x77, + 0x5a, 0xd3, 0x36, 0xd4, 0xb3, 0x0d, 0x25, 0x97, 0xce, 0x62, 0xf4, 0x0c, 0x1a, 0xf9, 0xc5, 0x94, + 0x74, 0x19, 0x58, 0xb6, 0x14, 0xe9, 0xca, 0x2d, 0x47, 0x44, 0x94, 0xf8, 0x92, 0xba, 0x1f, 0x84, + 0xb1, 0x38, 0x22, 0x8b, 0x85, 0xb1, 0xc5, 0x7d, 0x47, 0xb1, 0xcf, 0x61, 0xfb, 0x36, 0xb6, 0x54, + 0xd9, 0x6b, 0x78, 0x42, 0x66, 0xb3, 0x88, 0xce, 0x88, 0xa0, 0x13, 0x37, 0xed, 0x49, 0xfc, 0x92, + 0x18, 0x77, 0x73, 0x59, 0x4e, 0xa9, 0x95, 0x71, 0xec, 0x3e, 0x20, 0xc3, 0x31, 0x22, 0x91, 0x94, + 0x25, 0x68, 0xa4, 0x3d, 0x9f, 0x6b, 0xd5, 0x6f, 0x25, 0x77, 0x1e, 0xc8, 0xea, 0x37, 0xa2, 0x5c, + 0x93, 0xba, 0x10, 0x4c, 0xea, 0x82, 0xdb, 0xbf, 0x8b, 0xb9, 0x09, 0x87, 0xb1, 0xb8, 0x26, 0xf8, + 0x5f, 0xbf, 0x83, 0x4f, 0x90, 0xf9, 0x44, 0xea, 0x33, 0xa3, 0xca, 0x39, 0x4a, 0x72, 0x79, 0xbb, + 0xab, 0x2c, 0x37, 0x25, 0x61, 0x14, 0xdd, 0x94, 0x79, 0xef, 0xaf, 0xe6, 0xbf, 0xb4, 0xf9, 0x00, + 0x9e, 0xde, 0xba, 0xf6, 0xbf, 0xf4, 0xbc, 0xfd, 0x11, 0x36, 0x30, 0xf5, 0x58, 0x10, 0x50, 0x4f, + 0xe8, 0x65, 0x71, 0xf4, 0x06, 0x2c, 0x9f, 0x7c, 0x77, 0x23, 0x93, 0x76, 0xc7, 0xc4, 0xfb, 0xc2, + 0xa6, 0x53, 0xd7, 0xe7, 0xc6, 0x5e, 0xb2, 0x9e, 0x75, 0x1d, 0x26, 0xd5, 0x33, 0x6e, 0x9f, 0xc0, + 0x7a, 0x96, 0xed, 0x07, 0x53, 0x86, 0x1e, 0x43, 0x35, 0x24, 0x9c, 0xd3, 0x64, 0x98, 0x3a, 0x4e, + 0x23, 0xb4, 0x03, 0x90, 0xe3, 0x54, 0x47, 0xad, 0xe0, 0xb5, 0xb1, 0xe1, 0xd9, 0x7f, 0x0f, 0x8d, + 0x9c, 0x33, 0x50, 0x1b, 0x9a, 0x47, 0xc3, 0xb3, 0x11, 0xee, 0x39, 0x4e, 0xf7, 0xf0, 0xb4, 0xd7, + 0x7e, 0x20, 0x1d, 0xdb, 0xba, 0x18, 0xac, 0xe4, 0x0a, 0x08, 0xa0, 0x8a, 0xbb, 0x83, 0xe3, 0xe1, + 0x59, 0xbb, 0xb8, 0x7f, 0x00, 0x1b, 0xd7, 0xee, 0x81, 0xea, 0x50, 0x1e, 0x0c, 0x07, 0xaa, 0x59, + 0xbe, 0x3e, 0x7c, 0xee, 0x8f, 0x64, 0x4b, 0x03, 0x6a, 0xc7, 0xbd, 0x93, 0xd3, 0xee, 0x79, 0xaf, + 0x5d, 0x1c, 0x57, 0xf5, 0x5f, 0xcd, 0xab, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xc2, 0x6a, 0xce, + 0x1e, 0x7c, 0x06, 0x00, 0x00, +} diff --git a/benchmark/grpc_testing/messages.proto b/benchmark/grpc_testing/messages.proto new file mode 100644 index 00000000..a063b470 --- /dev/null +++ b/benchmark/grpc_testing/messages.proto @@ -0,0 +1,173 @@ + +// Copyright 2015-2016, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Message definitions to be used by integration test service definitions. + +syntax = "proto3"; + +package grpc.testing; + +// The type of payload that should be returned. +enum PayloadType { + // Compressable text format. + COMPRESSABLE = 0; + + // Uncompressable binary format. + UNCOMPRESSABLE = 1; + + // Randomly chosen from all other formats defined in this enum. + RANDOM = 2; +} + +// Compression algorithms +enum CompressionType { + // No compression + NONE = 0; + GZIP = 1; + DEFLATE = 2; +} + +// A block of data, to simply increase gRPC message size. +message Payload { + // The type of data in body. + PayloadType type = 1; + // Primary contents of payload. + bytes body = 2; +} + +// A protobuf representation for grpc status. This is used by test +// clients to specify a status that the server should attempt to return. +message EchoStatus { + int32 code = 1; + string message = 2; +} + +// Unary request. +message SimpleRequest { + // Desired payload type in the response from the server. + // If response_type is RANDOM, server randomly chooses one from other formats. + PayloadType response_type = 1; + + // Desired payload size in the response from the server. + // If response_type is COMPRESSABLE, this denotes the size before compression. + int32 response_size = 2; + + // Optional input payload sent along with the request. + Payload payload = 3; + + // Whether SimpleResponse should include username. + bool fill_username = 4; + + // Whether SimpleResponse should include OAuth scope. + bool fill_oauth_scope = 5; + + // Compression algorithm to be used by the server for the response (stream) + CompressionType response_compression = 6; + + // Whether server should return a given status + EchoStatus response_status = 7; +} + +// Unary response, as configured by the request. +message SimpleResponse { + // Payload to increase message size. + Payload payload = 1; + // The user the request came from, for verifying authentication was + // successful when the client expected it. + string username = 2; + // OAuth scope. + string oauth_scope = 3; +} + +// Client-streaming request. +message StreamingInputCallRequest { + // Optional input payload sent along with the request. + Payload payload = 1; + + // Not expecting any payload from the response. +} + +// Client-streaming response. +message StreamingInputCallResponse { + // Aggregated size of payloads received from the client. + int32 aggregated_payload_size = 1; +} + +// Configuration for a particular response. +message ResponseParameters { + // Desired payload sizes in responses from the server. + // If response_type is COMPRESSABLE, this denotes the size before compression. + int32 size = 1; + + // Desired interval between consecutive responses in the response stream in + // microseconds. + int32 interval_us = 2; +} + +// Server-streaming request. +message StreamingOutputCallRequest { + // Desired payload type in the response from the server. + // If response_type is RANDOM, the payload from each response in the stream + // might be of different types. This is to simulate a mixed type of payload + // stream. + PayloadType response_type = 1; + + // Configuration for each expected response message. + repeated ResponseParameters response_parameters = 2; + + // Optional input payload sent along with the request. + Payload payload = 3; + + // Compression algorithm to be used by the server for the response (stream) + CompressionType response_compression = 6; + + // Whether server should return a given status + EchoStatus response_status = 7; +} + +// Server-streaming response, as configured by the request and parameters. +message StreamingOutputCallResponse { + // Payload to increase response size. + Payload payload = 1; +} + +// For reconnect interop test only. +// Client tells server what reconnection parameters it used. +message ReconnectParams { + int32 max_reconnect_backoff_ms = 1; +} + +// For reconnect interop test only. +// Server tells client whether its reconnects are following the spec and the +// reconnect backoffs it saw. +message ReconnectInfo { + bool passed = 1; + repeated int32 backoff_ms = 2; +} diff --git a/benchmark/grpc_testing/payloads.pb.go b/benchmark/grpc_testing/payloads.pb.go new file mode 100644 index 00000000..4394d55e --- /dev/null +++ b/benchmark/grpc_testing/payloads.pb.go @@ -0,0 +1,221 @@ +// Code generated by protoc-gen-go. +// source: payloads.proto +// DO NOT EDIT! + +package grpc_testing + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type ByteBufferParams struct { + ReqSize int32 `protobuf:"varint,1,opt,name=req_size,json=reqSize" json:"req_size,omitempty"` + RespSize int32 `protobuf:"varint,2,opt,name=resp_size,json=respSize" json:"resp_size,omitempty"` +} + +func (m *ByteBufferParams) Reset() { *m = ByteBufferParams{} } +func (m *ByteBufferParams) String() string { return proto.CompactTextString(m) } +func (*ByteBufferParams) ProtoMessage() {} +func (*ByteBufferParams) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} } + +type SimpleProtoParams struct { + ReqSize int32 `protobuf:"varint,1,opt,name=req_size,json=reqSize" json:"req_size,omitempty"` + RespSize int32 `protobuf:"varint,2,opt,name=resp_size,json=respSize" json:"resp_size,omitempty"` +} + +func (m *SimpleProtoParams) Reset() { *m = SimpleProtoParams{} } +func (m *SimpleProtoParams) String() string { return proto.CompactTextString(m) } +func (*SimpleProtoParams) ProtoMessage() {} +func (*SimpleProtoParams) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{1} } + +type ComplexProtoParams struct { +} + +func (m *ComplexProtoParams) Reset() { *m = ComplexProtoParams{} } +func (m *ComplexProtoParams) String() string { return proto.CompactTextString(m) } +func (*ComplexProtoParams) ProtoMessage() {} +func (*ComplexProtoParams) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{2} } + +type PayloadConfig struct { + // Types that are valid to be assigned to Payload: + // *PayloadConfig_BytebufParams + // *PayloadConfig_SimpleParams + // *PayloadConfig_ComplexParams + Payload isPayloadConfig_Payload `protobuf_oneof:"payload"` +} + +func (m *PayloadConfig) Reset() { *m = PayloadConfig{} } +func (m *PayloadConfig) String() string { return proto.CompactTextString(m) } +func (*PayloadConfig) ProtoMessage() {} +func (*PayloadConfig) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{3} } + +type isPayloadConfig_Payload interface { + isPayloadConfig_Payload() +} + +type PayloadConfig_BytebufParams struct { + BytebufParams *ByteBufferParams `protobuf:"bytes,1,opt,name=bytebuf_params,json=bytebufParams,oneof"` +} +type PayloadConfig_SimpleParams struct { + SimpleParams *SimpleProtoParams `protobuf:"bytes,2,opt,name=simple_params,json=simpleParams,oneof"` +} +type PayloadConfig_ComplexParams struct { + ComplexParams *ComplexProtoParams `protobuf:"bytes,3,opt,name=complex_params,json=complexParams,oneof"` +} + +func (*PayloadConfig_BytebufParams) isPayloadConfig_Payload() {} +func (*PayloadConfig_SimpleParams) isPayloadConfig_Payload() {} +func (*PayloadConfig_ComplexParams) isPayloadConfig_Payload() {} + +func (m *PayloadConfig) GetPayload() isPayloadConfig_Payload { + if m != nil { + return m.Payload + } + return nil +} + +func (m *PayloadConfig) GetBytebufParams() *ByteBufferParams { + if x, ok := m.GetPayload().(*PayloadConfig_BytebufParams); ok { + return x.BytebufParams + } + return nil +} + +func (m *PayloadConfig) GetSimpleParams() *SimpleProtoParams { + if x, ok := m.GetPayload().(*PayloadConfig_SimpleParams); ok { + return x.SimpleParams + } + return nil +} + +func (m *PayloadConfig) GetComplexParams() *ComplexProtoParams { + if x, ok := m.GetPayload().(*PayloadConfig_ComplexParams); ok { + return x.ComplexParams + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*PayloadConfig) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _PayloadConfig_OneofMarshaler, _PayloadConfig_OneofUnmarshaler, _PayloadConfig_OneofSizer, []interface{}{ + (*PayloadConfig_BytebufParams)(nil), + (*PayloadConfig_SimpleParams)(nil), + (*PayloadConfig_ComplexParams)(nil), + } +} + +func _PayloadConfig_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*PayloadConfig) + // payload + switch x := m.Payload.(type) { + case *PayloadConfig_BytebufParams: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.BytebufParams); err != nil { + return err + } + case *PayloadConfig_SimpleParams: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.SimpleParams); err != nil { + return err + } + case *PayloadConfig_ComplexParams: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ComplexParams); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("PayloadConfig.Payload has unexpected type %T", x) + } + return nil +} + +func _PayloadConfig_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*PayloadConfig) + switch tag { + case 1: // payload.bytebuf_params + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ByteBufferParams) + err := b.DecodeMessage(msg) + m.Payload = &PayloadConfig_BytebufParams{msg} + return true, err + case 2: // payload.simple_params + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SimpleProtoParams) + err := b.DecodeMessage(msg) + m.Payload = &PayloadConfig_SimpleParams{msg} + return true, err + case 3: // payload.complex_params + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ComplexProtoParams) + err := b.DecodeMessage(msg) + m.Payload = &PayloadConfig_ComplexParams{msg} + return true, err + default: + return false, nil + } +} + +func _PayloadConfig_OneofSizer(msg proto.Message) (n int) { + m := msg.(*PayloadConfig) + // payload + switch x := m.Payload.(type) { + case *PayloadConfig_BytebufParams: + s := proto.Size(x.BytebufParams) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *PayloadConfig_SimpleParams: + s := proto.Size(x.SimpleParams) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *PayloadConfig_ComplexParams: + s := proto.Size(x.ComplexParams) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +func init() { + proto.RegisterType((*ByteBufferParams)(nil), "grpc.testing.ByteBufferParams") + proto.RegisterType((*SimpleProtoParams)(nil), "grpc.testing.SimpleProtoParams") + proto.RegisterType((*ComplexProtoParams)(nil), "grpc.testing.ComplexProtoParams") + proto.RegisterType((*PayloadConfig)(nil), "grpc.testing.PayloadConfig") +} + +var fileDescriptor2 = []byte{ + // 250 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x2b, 0x48, 0xac, 0xcc, + 0xc9, 0x4f, 0x4c, 0x29, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x49, 0x2f, 0x2a, 0x48, + 0xd6, 0x2b, 0x49, 0x2d, 0x2e, 0xc9, 0xcc, 0x4b, 0x57, 0xf2, 0xe2, 0x12, 0x70, 0xaa, 0x2c, 0x49, + 0x75, 0x2a, 0x4d, 0x4b, 0x4b, 0x2d, 0x0a, 0x48, 0x2c, 0x4a, 0xcc, 0x2d, 0x16, 0x92, 0xe4, 0xe2, + 0x28, 0x4a, 0x2d, 0x8c, 0x2f, 0xce, 0xac, 0x4a, 0x95, 0x60, 0x54, 0x60, 0xd4, 0x60, 0x0d, 0x62, + 0x07, 0xf2, 0x83, 0x81, 0x5c, 0x21, 0x69, 0x2e, 0xce, 0xa2, 0xd4, 0xe2, 0x02, 0x88, 0x1c, 0x13, + 0x58, 0x8e, 0x03, 0x24, 0x00, 0x92, 0x54, 0xf2, 0xe6, 0x12, 0x0c, 0xce, 0xcc, 0x2d, 0xc8, 0x49, + 0x0d, 0x00, 0x59, 0x44, 0xa1, 0x61, 0x22, 0x5c, 0x42, 0xce, 0xf9, 0x20, 0xc3, 0x2a, 0x90, 0x4c, + 0x53, 0xfa, 0xc6, 0xc8, 0xc5, 0x1b, 0x00, 0xf1, 0x8f, 0x73, 0x7e, 0x5e, 0x5a, 0x66, 0xba, 0x90, + 0x3b, 0x17, 0x5f, 0x12, 0xd0, 0x03, 0x49, 0xa5, 0x69, 0xf1, 0x05, 0x60, 0x35, 0x60, 0x5b, 0xb8, + 0x8d, 0xe4, 0xf4, 0x90, 0xfd, 0xa9, 0x87, 0xee, 0x49, 0x0f, 0x86, 0x20, 0x5e, 0xa8, 0x3e, 0xa8, + 0x43, 0xdd, 0xb8, 0x78, 0x8b, 0xc1, 0xae, 0x87, 0x99, 0xc3, 0x04, 0x36, 0x47, 0x1e, 0xd5, 0x1c, + 0x0c, 0x0f, 0x02, 0x0d, 0xe2, 0x81, 0xe8, 0x83, 0x9a, 0xe3, 0xc9, 0xc5, 0x97, 0x0c, 0x71, 0x38, + 0xcc, 0x20, 0x66, 0xb0, 0x41, 0x0a, 0xa8, 0x06, 0x61, 0x7a, 0x0e, 0xe4, 0x24, 0xa8, 0x4e, 0x88, + 0x80, 0x13, 0x27, 0x17, 0x3b, 0x34, 0xf2, 0x92, 0xd8, 0xc0, 0x91, 0x67, 0x0c, 0x08, 0x00, 0x00, + 0xff, 0xff, 0xb0, 0x8c, 0x18, 0x4e, 0xce, 0x01, 0x00, 0x00, +} diff --git a/benchmark/grpc_testing/payloads.proto b/benchmark/grpc_testing/payloads.proto new file mode 100644 index 00000000..7e5b2c61 --- /dev/null +++ b/benchmark/grpc_testing/payloads.proto @@ -0,0 +1,55 @@ +// Copyright 2015, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package grpc.testing; + +message ByteBufferParams { + int32 req_size = 1; + int32 resp_size = 2; +} + +message SimpleProtoParams { + int32 req_size = 1; + int32 resp_size = 2; +} + +message ComplexProtoParams { + // TODO (vpai): Fill this in once the details of complex, representative + // protos are decided +} + +message PayloadConfig { + oneof payload { + ByteBufferParams bytebuf_params = 1; + SimpleProtoParams simple_params = 2; + ComplexProtoParams complex_params = 3; + } +} diff --git a/benchmark/grpc_testing/services.pb.go b/benchmark/grpc_testing/services.pb.go new file mode 100644 index 00000000..9f1d8fd2 --- /dev/null +++ b/benchmark/grpc_testing/services.pb.go @@ -0,0 +1,439 @@ +// Code generated by protoc-gen-go. +// source: services.proto +// DO NOT EDIT! + +package grpc_testing + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion2 + +// Client API for BenchmarkService service + +type BenchmarkServiceClient interface { + // One request followed by one response. + // The server returns the client payload as-is. + UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) + // One request followed by one response. + // The server returns the client payload as-is. + StreamingCall(ctx context.Context, opts ...grpc.CallOption) (BenchmarkService_StreamingCallClient, error) +} + +type benchmarkServiceClient struct { + cc *grpc.ClientConn +} + +func NewBenchmarkServiceClient(cc *grpc.ClientConn) BenchmarkServiceClient { + return &benchmarkServiceClient{cc} +} + +func (c *benchmarkServiceClient) UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) { + out := new(SimpleResponse) + err := grpc.Invoke(ctx, "/grpc.testing.BenchmarkService/UnaryCall", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *benchmarkServiceClient) StreamingCall(ctx context.Context, opts ...grpc.CallOption) (BenchmarkService_StreamingCallClient, error) { + stream, err := grpc.NewClientStream(ctx, &_BenchmarkService_serviceDesc.Streams[0], c.cc, "/grpc.testing.BenchmarkService/StreamingCall", opts...) + if err != nil { + return nil, err + } + x := &benchmarkServiceStreamingCallClient{stream} + return x, nil +} + +type BenchmarkService_StreamingCallClient interface { + Send(*SimpleRequest) error + Recv() (*SimpleResponse, error) + grpc.ClientStream +} + +type benchmarkServiceStreamingCallClient struct { + grpc.ClientStream +} + +func (x *benchmarkServiceStreamingCallClient) Send(m *SimpleRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *benchmarkServiceStreamingCallClient) Recv() (*SimpleResponse, error) { + m := new(SimpleResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for BenchmarkService service + +type BenchmarkServiceServer interface { + // One request followed by one response. + // The server returns the client payload as-is. + UnaryCall(context.Context, *SimpleRequest) (*SimpleResponse, error) + // One request followed by one response. + // The server returns the client payload as-is. + StreamingCall(BenchmarkService_StreamingCallServer) error +} + +func RegisterBenchmarkServiceServer(s *grpc.Server, srv BenchmarkServiceServer) { + s.RegisterService(&_BenchmarkService_serviceDesc, srv) +} + +func _BenchmarkService_UnaryCall_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SimpleRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BenchmarkServiceServer).UnaryCall(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/grpc.testing.BenchmarkService/UnaryCall", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BenchmarkServiceServer).UnaryCall(ctx, req.(*SimpleRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BenchmarkService_StreamingCall_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(BenchmarkServiceServer).StreamingCall(&benchmarkServiceStreamingCallServer{stream}) +} + +type BenchmarkService_StreamingCallServer interface { + Send(*SimpleResponse) error + Recv() (*SimpleRequest, error) + grpc.ServerStream +} + +type benchmarkServiceStreamingCallServer struct { + grpc.ServerStream +} + +func (x *benchmarkServiceStreamingCallServer) Send(m *SimpleResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *benchmarkServiceStreamingCallServer) Recv() (*SimpleRequest, error) { + m := new(SimpleRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _BenchmarkService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.testing.BenchmarkService", + HandlerType: (*BenchmarkServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "UnaryCall", + Handler: _BenchmarkService_UnaryCall_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamingCall", + Handler: _BenchmarkService_StreamingCall_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, +} + +// Client API for WorkerService service + +type WorkerServiceClient interface { + // Start server with specified workload. + // First request sent specifies the ServerConfig followed by ServerStatus + // response. After that, a "Mark" can be sent anytime to request the latest + // stats. Closing the stream will initiate shutdown of the test server + // and once the shutdown has finished, the OK status is sent to terminate + // this RPC. + RunServer(ctx context.Context, opts ...grpc.CallOption) (WorkerService_RunServerClient, error) + // Start client with specified workload. + // First request sent specifies the ClientConfig followed by ClientStatus + // response. After that, a "Mark" can be sent anytime to request the latest + // stats. Closing the stream will initiate shutdown of the test client + // and once the shutdown has finished, the OK status is sent to terminate + // this RPC. + RunClient(ctx context.Context, opts ...grpc.CallOption) (WorkerService_RunClientClient, error) + // Just return the core count - unary call + CoreCount(ctx context.Context, in *CoreRequest, opts ...grpc.CallOption) (*CoreResponse, error) + // Quit this worker + QuitWorker(ctx context.Context, in *Void, opts ...grpc.CallOption) (*Void, error) +} + +type workerServiceClient struct { + cc *grpc.ClientConn +} + +func NewWorkerServiceClient(cc *grpc.ClientConn) WorkerServiceClient { + return &workerServiceClient{cc} +} + +func (c *workerServiceClient) RunServer(ctx context.Context, opts ...grpc.CallOption) (WorkerService_RunServerClient, error) { + stream, err := grpc.NewClientStream(ctx, &_WorkerService_serviceDesc.Streams[0], c.cc, "/grpc.testing.WorkerService/RunServer", opts...) + if err != nil { + return nil, err + } + x := &workerServiceRunServerClient{stream} + return x, nil +} + +type WorkerService_RunServerClient interface { + Send(*ServerArgs) error + Recv() (*ServerStatus, error) + grpc.ClientStream +} + +type workerServiceRunServerClient struct { + grpc.ClientStream +} + +func (x *workerServiceRunServerClient) Send(m *ServerArgs) error { + return x.ClientStream.SendMsg(m) +} + +func (x *workerServiceRunServerClient) Recv() (*ServerStatus, error) { + m := new(ServerStatus) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *workerServiceClient) RunClient(ctx context.Context, opts ...grpc.CallOption) (WorkerService_RunClientClient, error) { + stream, err := grpc.NewClientStream(ctx, &_WorkerService_serviceDesc.Streams[1], c.cc, "/grpc.testing.WorkerService/RunClient", opts...) + if err != nil { + return nil, err + } + x := &workerServiceRunClientClient{stream} + return x, nil +} + +type WorkerService_RunClientClient interface { + Send(*ClientArgs) error + Recv() (*ClientStatus, error) + grpc.ClientStream +} + +type workerServiceRunClientClient struct { + grpc.ClientStream +} + +func (x *workerServiceRunClientClient) Send(m *ClientArgs) error { + return x.ClientStream.SendMsg(m) +} + +func (x *workerServiceRunClientClient) Recv() (*ClientStatus, error) { + m := new(ClientStatus) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *workerServiceClient) CoreCount(ctx context.Context, in *CoreRequest, opts ...grpc.CallOption) (*CoreResponse, error) { + out := new(CoreResponse) + err := grpc.Invoke(ctx, "/grpc.testing.WorkerService/CoreCount", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *workerServiceClient) QuitWorker(ctx context.Context, in *Void, opts ...grpc.CallOption) (*Void, error) { + out := new(Void) + err := grpc.Invoke(ctx, "/grpc.testing.WorkerService/QuitWorker", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for WorkerService service + +type WorkerServiceServer interface { + // Start server with specified workload. + // First request sent specifies the ServerConfig followed by ServerStatus + // response. After that, a "Mark" can be sent anytime to request the latest + // stats. Closing the stream will initiate shutdown of the test server + // and once the shutdown has finished, the OK status is sent to terminate + // this RPC. + RunServer(WorkerService_RunServerServer) error + // Start client with specified workload. + // First request sent specifies the ClientConfig followed by ClientStatus + // response. After that, a "Mark" can be sent anytime to request the latest + // stats. Closing the stream will initiate shutdown of the test client + // and once the shutdown has finished, the OK status is sent to terminate + // this RPC. + RunClient(WorkerService_RunClientServer) error + // Just return the core count - unary call + CoreCount(context.Context, *CoreRequest) (*CoreResponse, error) + // Quit this worker + QuitWorker(context.Context, *Void) (*Void, error) +} + +func RegisterWorkerServiceServer(s *grpc.Server, srv WorkerServiceServer) { + s.RegisterService(&_WorkerService_serviceDesc, srv) +} + +func _WorkerService_RunServer_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(WorkerServiceServer).RunServer(&workerServiceRunServerServer{stream}) +} + +type WorkerService_RunServerServer interface { + Send(*ServerStatus) error + Recv() (*ServerArgs, error) + grpc.ServerStream +} + +type workerServiceRunServerServer struct { + grpc.ServerStream +} + +func (x *workerServiceRunServerServer) Send(m *ServerStatus) error { + return x.ServerStream.SendMsg(m) +} + +func (x *workerServiceRunServerServer) Recv() (*ServerArgs, error) { + m := new(ServerArgs) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _WorkerService_RunClient_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(WorkerServiceServer).RunClient(&workerServiceRunClientServer{stream}) +} + +type WorkerService_RunClientServer interface { + Send(*ClientStatus) error + Recv() (*ClientArgs, error) + grpc.ServerStream +} + +type workerServiceRunClientServer struct { + grpc.ServerStream +} + +func (x *workerServiceRunClientServer) Send(m *ClientStatus) error { + return x.ServerStream.SendMsg(m) +} + +func (x *workerServiceRunClientServer) Recv() (*ClientArgs, error) { + m := new(ClientArgs) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _WorkerService_CoreCount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CoreRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(WorkerServiceServer).CoreCount(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/grpc.testing.WorkerService/CoreCount", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(WorkerServiceServer).CoreCount(ctx, req.(*CoreRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _WorkerService_QuitWorker_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Void) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(WorkerServiceServer).QuitWorker(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/grpc.testing.WorkerService/QuitWorker", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(WorkerServiceServer).QuitWorker(ctx, req.(*Void)) + } + return interceptor(ctx, in, info, handler) +} + +var _WorkerService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.testing.WorkerService", + HandlerType: (*WorkerServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CoreCount", + Handler: _WorkerService_CoreCount_Handler, + }, + { + MethodName: "QuitWorker", + Handler: _WorkerService_QuitWorker_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "RunServer", + Handler: _WorkerService_RunServer_Handler, + ServerStreams: true, + ClientStreams: true, + }, + { + StreamName: "RunClient", + Handler: _WorkerService_RunClient_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, +} + +var fileDescriptor3 = []byte{ + // 254 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xa4, 0x91, 0xc1, 0x4a, 0xc4, 0x30, + 0x10, 0x86, 0xa9, 0x07, 0xa1, 0xc1, 0x2e, 0x92, 0x93, 0x46, 0x1f, 0xc0, 0x53, 0x91, 0xd5, 0x17, + 0x70, 0x8b, 0x1e, 0x05, 0xb7, 0xa8, 0xe7, 0x58, 0x87, 0x1a, 0x36, 0x4d, 0xea, 0xcc, 0x44, 0xf0, + 0x49, 0x7c, 0x07, 0x9f, 0xd2, 0xee, 0x66, 0x0b, 0xb5, 0xe4, 0xb6, 0xc7, 0xf9, 0xbf, 0xe1, 0x23, + 0x7f, 0x46, 0x2c, 0x08, 0xf0, 0xcb, 0x34, 0x40, 0x65, 0x8f, 0x9e, 0xbd, 0x3c, 0x69, 0xb1, 0x6f, + 0x4a, 0x06, 0x62, 0xe3, 0x5a, 0xb5, 0xe8, 0x80, 0x48, 0xb7, 0x23, 0x55, 0x45, 0xe3, 0x1d, 0xa3, + 0xb7, 0x71, 0x5c, 0xfe, 0x66, 0xe2, 0x74, 0x05, 0xae, 0xf9, 0xe8, 0x34, 0x6e, 0xea, 0x28, 0x92, + 0x0f, 0x22, 0x7f, 0x76, 0x1a, 0xbf, 0x2b, 0x6d, 0xad, 0xbc, 0x28, 0xa7, 0xbe, 0xb2, 0x36, 0x5d, + 0x6f, 0x61, 0x0d, 0x9f, 0x61, 0x08, 0xd4, 0x65, 0x1a, 0x52, 0xef, 0x1d, 0x81, 0x7c, 0x14, 0x45, + 0xcd, 0x08, 0xba, 0x1b, 0xd8, 0x81, 0xae, 0xab, 0xec, 0x3a, 0x5b, 0xfe, 0x1c, 0x89, 0xe2, 0xd5, + 0xe3, 0x06, 0x70, 0x7c, 0xe9, 0xbd, 0xc8, 0xd7, 0xc1, 0x6d, 0x27, 0x40, 0x79, 0x36, 0x13, 0xec, + 0xd2, 0x3b, 0x6c, 0x49, 0xa9, 0x14, 0xa9, 0x59, 0x73, 0xa0, 0xad, 0x78, 0xaf, 0xa9, 0xac, 0x01, + 0xc7, 0x73, 0x4d, 0x4c, 0x53, 0x9a, 0x48, 0x26, 0x9a, 0x95, 0xc8, 0x2b, 0x8f, 0x50, 0xf9, 0x30, + 0x68, 0xce, 0x67, 0xcb, 0x03, 0x18, 0x9b, 0xaa, 0x14, 0xda, 0xff, 0xd9, 0xad, 0x10, 0x4f, 0xc1, + 0x70, 0xac, 0x29, 0xe5, 0xff, 0xcd, 0x17, 0x6f, 0xde, 0x55, 0x22, 0x7b, 0x3b, 0xde, 0x5d, 0xf3, + 0xe6, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x3b, 0x84, 0x02, 0xe3, 0x0c, 0x02, 0x00, 0x00, +} diff --git a/benchmark/grpc_testing/services.proto b/benchmark/grpc_testing/services.proto new file mode 100644 index 00000000..19b55c31 --- /dev/null +++ b/benchmark/grpc_testing/services.proto @@ -0,0 +1,71 @@ +// Copyright 2015, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// An integration test service that covers all the method signature permutations +// of unary/streaming requests/responses. +syntax = "proto3"; + +import "messages.proto"; +import "control.proto"; + +package grpc.testing; + +service BenchmarkService { + // One request followed by one response. + // The server returns the client payload as-is. + rpc UnaryCall(SimpleRequest) returns (SimpleResponse); + + // One request followed by one response. + // The server returns the client payload as-is. + rpc StreamingCall(stream SimpleRequest) returns (stream SimpleResponse); +} + +service WorkerService { + // Start server with specified workload. + // First request sent specifies the ServerConfig followed by ServerStatus + // response. After that, a "Mark" can be sent anytime to request the latest + // stats. Closing the stream will initiate shutdown of the test server + // and once the shutdown has finished, the OK status is sent to terminate + // this RPC. + rpc RunServer(stream ServerArgs) returns (stream ServerStatus); + + // Start client with specified workload. + // First request sent specifies the ClientConfig followed by ClientStatus + // response. After that, a "Mark" can be sent anytime to request the latest + // stats. Closing the stream will initiate shutdown of the test client + // and once the shutdown has finished, the OK status is sent to terminate + // this RPC. + rpc RunClient(stream ClientArgs) returns (stream ClientStatus); + + // Just return the core count - unary call + rpc CoreCount(CoreRequest) returns (CoreResponse); + + // Quit this worker + rpc QuitWorker(Void) returns (Void); +} diff --git a/benchmark/grpc_testing/stats.pb.go b/benchmark/grpc_testing/stats.pb.go new file mode 100644 index 00000000..ef04acc6 --- /dev/null +++ b/benchmark/grpc_testing/stats.pb.go @@ -0,0 +1,109 @@ +// Code generated by protoc-gen-go. +// source: stats.proto +// DO NOT EDIT! + +package grpc_testing + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type ServerStats struct { + // wall clock time change in seconds since last reset + TimeElapsed float64 `protobuf:"fixed64,1,opt,name=time_elapsed,json=timeElapsed" json:"time_elapsed,omitempty"` + // change in user time (in seconds) used by the server since last reset + TimeUser float64 `protobuf:"fixed64,2,opt,name=time_user,json=timeUser" json:"time_user,omitempty"` + // change in server time (in seconds) used by the server process and all + // threads since last reset + TimeSystem float64 `protobuf:"fixed64,3,opt,name=time_system,json=timeSystem" json:"time_system,omitempty"` +} + +func (m *ServerStats) Reset() { *m = ServerStats{} } +func (m *ServerStats) String() string { return proto.CompactTextString(m) } +func (*ServerStats) ProtoMessage() {} +func (*ServerStats) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{0} } + +// Histogram params based on grpc/support/histogram.c +type HistogramParams struct { + Resolution float64 `protobuf:"fixed64,1,opt,name=resolution" json:"resolution,omitempty"` + MaxPossible float64 `protobuf:"fixed64,2,opt,name=max_possible,json=maxPossible" json:"max_possible,omitempty"` +} + +func (m *HistogramParams) Reset() { *m = HistogramParams{} } +func (m *HistogramParams) String() string { return proto.CompactTextString(m) } +func (*HistogramParams) ProtoMessage() {} +func (*HistogramParams) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{1} } + +// Histogram data based on grpc/support/histogram.c +type HistogramData struct { + Bucket []uint32 `protobuf:"varint,1,rep,name=bucket" json:"bucket,omitempty"` + MinSeen float64 `protobuf:"fixed64,2,opt,name=min_seen,json=minSeen" json:"min_seen,omitempty"` + MaxSeen float64 `protobuf:"fixed64,3,opt,name=max_seen,json=maxSeen" json:"max_seen,omitempty"` + Sum float64 `protobuf:"fixed64,4,opt,name=sum" json:"sum,omitempty"` + SumOfSquares float64 `protobuf:"fixed64,5,opt,name=sum_of_squares,json=sumOfSquares" json:"sum_of_squares,omitempty"` + Count float64 `protobuf:"fixed64,6,opt,name=count" json:"count,omitempty"` +} + +func (m *HistogramData) Reset() { *m = HistogramData{} } +func (m *HistogramData) String() string { return proto.CompactTextString(m) } +func (*HistogramData) ProtoMessage() {} +func (*HistogramData) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{2} } + +type ClientStats struct { + // Latency histogram. Data points are in nanoseconds. + Latencies *HistogramData `protobuf:"bytes,1,opt,name=latencies" json:"latencies,omitempty"` + // See ServerStats for details. + TimeElapsed float64 `protobuf:"fixed64,2,opt,name=time_elapsed,json=timeElapsed" json:"time_elapsed,omitempty"` + TimeUser float64 `protobuf:"fixed64,3,opt,name=time_user,json=timeUser" json:"time_user,omitempty"` + TimeSystem float64 `protobuf:"fixed64,4,opt,name=time_system,json=timeSystem" json:"time_system,omitempty"` +} + +func (m *ClientStats) Reset() { *m = ClientStats{} } +func (m *ClientStats) String() string { return proto.CompactTextString(m) } +func (*ClientStats) ProtoMessage() {} +func (*ClientStats) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{3} } + +func (m *ClientStats) GetLatencies() *HistogramData { + if m != nil { + return m.Latencies + } + return nil +} + +func init() { + proto.RegisterType((*ServerStats)(nil), "grpc.testing.ServerStats") + proto.RegisterType((*HistogramParams)(nil), "grpc.testing.HistogramParams") + proto.RegisterType((*HistogramData)(nil), "grpc.testing.HistogramData") + proto.RegisterType((*ClientStats)(nil), "grpc.testing.ClientStats") +} + +var fileDescriptor4 = []byte{ + // 342 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x84, 0x92, 0x4f, 0x4f, 0xe3, 0x30, + 0x10, 0xc5, 0x95, 0xa6, 0xed, 0xb6, 0x93, 0x76, 0x77, 0x65, 0xad, 0x56, 0x41, 0x95, 0xf8, 0x13, + 0x71, 0xe8, 0x29, 0x07, 0x38, 0x71, 0x06, 0x24, 0x6e, 0x54, 0x0d, 0x9c, 0x23, 0x37, 0x4c, 0x2b, + 0x8b, 0xc4, 0x0e, 0x99, 0x09, 0x2a, 0x1f, 0x09, 0xf1, 0x25, 0x71, 0x9c, 0x08, 0x0a, 0x48, 0x70, + 0x49, 0xf2, 0x7e, 0x6f, 0x34, 0xe3, 0xc9, 0x33, 0x04, 0xc4, 0x92, 0x29, 0x2e, 0x2b, 0xc3, 0x46, + 0x4c, 0x36, 0x55, 0x99, 0xc5, 0x8c, 0xc4, 0x4a, 0x6f, 0x22, 0x0d, 0x41, 0x82, 0xd5, 0x23, 0x56, + 0x49, 0x53, 0x22, 0x8e, 0x60, 0xc2, 0xaa, 0xc0, 0x14, 0x73, 0x59, 0x12, 0xde, 0x85, 0xde, 0xa1, + 0x37, 0xf7, 0x96, 0x41, 0xc3, 0x2e, 0x5b, 0x24, 0x66, 0x30, 0x76, 0x25, 0x35, 0x61, 0x15, 0xf6, + 0x9c, 0x3f, 0x6a, 0xc0, 0xad, 0xd5, 0xe2, 0x00, 0x5c, 0x6d, 0x4a, 0x4f, 0xc4, 0x58, 0x84, 0xbe, + 0xb3, 0xa1, 0x41, 0x89, 0x23, 0xd1, 0x0d, 0xfc, 0xb9, 0x52, 0xc4, 0x66, 0x53, 0xc9, 0x62, 0x21, + 0xed, 0x83, 0xc4, 0x3e, 0x40, 0x85, 0x64, 0xf2, 0x9a, 0x95, 0xd1, 0xdd, 0xc4, 0x1d, 0xd2, 0x9c, + 0xa9, 0x90, 0xdb, 0xb4, 0x34, 0x44, 0x6a, 0x95, 0x63, 0x37, 0x33, 0xb0, 0x6c, 0xd1, 0xa1, 0xe8, + 0xc5, 0x83, 0xe9, 0x5b, 0xdb, 0x0b, 0xc9, 0x52, 0xfc, 0x87, 0xe1, 0xaa, 0xce, 0xee, 0x91, 0x6d, + 0x43, 0x7f, 0x3e, 0x5d, 0x76, 0x4a, 0xec, 0xc1, 0xa8, 0x50, 0x3a, 0x25, 0x44, 0xdd, 0x35, 0xfa, + 0x65, 0x75, 0x62, 0xa5, 0xb3, 0xec, 0x1c, 0x67, 0xf9, 0x9d, 0x25, 0xb7, 0xce, 0xfa, 0x0b, 0x3e, + 0xd5, 0x45, 0xd8, 0x77, 0xb4, 0xf9, 0x14, 0xc7, 0xf0, 0xdb, 0xbe, 0x52, 0xb3, 0x4e, 0xe9, 0xa1, + 0x96, 0xf6, 0xb4, 0xe1, 0xc0, 0x99, 0x13, 0x4b, 0xaf, 0xd7, 0x49, 0xcb, 0xc4, 0x3f, 0x18, 0x64, + 0xa6, 0xd6, 0x1c, 0x0e, 0x9d, 0xd9, 0x8a, 0xe8, 0xd9, 0x83, 0xe0, 0x3c, 0x57, 0xa8, 0xb9, 0xfd, + 0xe9, 0x67, 0x30, 0xce, 0x25, 0xa3, 0xce, 0x94, 0x6d, 0xd3, 0xec, 0x1f, 0x9c, 0xcc, 0xe2, 0xdd, + 0x94, 0xe2, 0x0f, 0xbb, 0x2d, 0xdf, 0xab, 0xbf, 0xe4, 0xd5, 0xfb, 0x21, 0x2f, 0xff, 0xfb, 0xbc, + 0xfa, 0x9f, 0xf3, 0x5a, 0x0d, 0xdd, 0xa5, 0x39, 0x7d, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xea, 0x75, + 0x34, 0x90, 0x43, 0x02, 0x00, 0x00, +} diff --git a/benchmark/grpc_testing/stats.proto b/benchmark/grpc_testing/stats.proto new file mode 100644 index 00000000..f9d11611 --- /dev/null +++ b/benchmark/grpc_testing/stats.proto @@ -0,0 +1,70 @@ +// Copyright 2015, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package grpc.testing; + +message ServerStats { + // wall clock time change in seconds since last reset + double time_elapsed = 1; + + // change in user time (in seconds) used by the server since last reset + double time_user = 2; + + // change in server time (in seconds) used by the server process and all + // threads since last reset + double time_system = 3; +} + +// Histogram params based on grpc/support/histogram.c +message HistogramParams { + double resolution = 1; // first bucket is [0, 1 + resolution) + double max_possible = 2; // use enough buckets to allow this value +} + +// Histogram data based on grpc/support/histogram.c +message HistogramData { + repeated uint32 bucket = 1; + double min_seen = 2; + double max_seen = 3; + double sum = 4; + double sum_of_squares = 5; + double count = 6; +} + +message ClientStats { + // Latency histogram. Data points are in nanoseconds. + HistogramData latencies = 1; + + // See ServerStats for details. + double time_elapsed = 2; + double time_user = 3; + double time_system = 4; +} diff --git a/benchmark/grpc_testing/test.pb.go b/benchmark/grpc_testing/test.pb.go deleted file mode 100644 index b24dc696..00000000 --- a/benchmark/grpc_testing/test.pb.go +++ /dev/null @@ -1,944 +0,0 @@ -// Code generated by protoc-gen-go. -// source: benchmark/grpc_testing/test.proto -// DO NOT EDIT! - -/* -Package grpc_testing is a generated protocol buffer package. - -It is generated from these files: - benchmark/grpc_testing/test.proto - -It has these top-level messages: - StatsRequest - ServerStats - Payload - HistogramData - ClientConfig - Mark - ClientArgs - ClientStats - ClientStatus - ServerConfig - ServerArgs - ServerStatus - SimpleRequest - SimpleResponse -*/ -package grpc_testing - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -import ( - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -const _ = proto.ProtoPackageIsVersion1 - -type PayloadType int32 - -const ( - // Compressable text format. - PayloadType_COMPRESSABLE PayloadType = 0 - // Uncompressable binary format. - PayloadType_UNCOMPRESSABLE PayloadType = 1 - // Randomly chosen from all other formats defined in this enum. - PayloadType_RANDOM PayloadType = 2 -) - -var PayloadType_name = map[int32]string{ - 0: "COMPRESSABLE", - 1: "UNCOMPRESSABLE", - 2: "RANDOM", -} -var PayloadType_value = map[string]int32{ - "COMPRESSABLE": 0, - "UNCOMPRESSABLE": 1, - "RANDOM": 2, -} - -func (x PayloadType) String() string { - return proto.EnumName(PayloadType_name, int32(x)) -} -func (PayloadType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } - -type ClientType int32 - -const ( - ClientType_SYNCHRONOUS_CLIENT ClientType = 0 - ClientType_ASYNC_CLIENT ClientType = 1 -) - -var ClientType_name = map[int32]string{ - 0: "SYNCHRONOUS_CLIENT", - 1: "ASYNC_CLIENT", -} -var ClientType_value = map[string]int32{ - "SYNCHRONOUS_CLIENT": 0, - "ASYNC_CLIENT": 1, -} - -func (x ClientType) String() string { - return proto.EnumName(ClientType_name, int32(x)) -} -func (ClientType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } - -type ServerType int32 - -const ( - ServerType_SYNCHRONOUS_SERVER ServerType = 0 - ServerType_ASYNC_SERVER ServerType = 1 -) - -var ServerType_name = map[int32]string{ - 0: "SYNCHRONOUS_SERVER", - 1: "ASYNC_SERVER", -} -var ServerType_value = map[string]int32{ - "SYNCHRONOUS_SERVER": 0, - "ASYNC_SERVER": 1, -} - -func (x ServerType) String() string { - return proto.EnumName(ServerType_name, int32(x)) -} -func (ServerType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } - -type RpcType int32 - -const ( - RpcType_UNARY RpcType = 0 - RpcType_STREAMING RpcType = 1 -) - -var RpcType_name = map[int32]string{ - 0: "UNARY", - 1: "STREAMING", -} -var RpcType_value = map[string]int32{ - "UNARY": 0, - "STREAMING": 1, -} - -func (x RpcType) String() string { - return proto.EnumName(RpcType_name, int32(x)) -} -func (RpcType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } - -type StatsRequest struct { - // run number - TestNum int32 `protobuf:"varint,1,opt,name=test_num" json:"test_num,omitempty"` -} - -func (m *StatsRequest) Reset() { *m = StatsRequest{} } -func (m *StatsRequest) String() string { return proto.CompactTextString(m) } -func (*StatsRequest) ProtoMessage() {} -func (*StatsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } - -type ServerStats struct { - // wall clock time - TimeElapsed float64 `protobuf:"fixed64,1,opt,name=time_elapsed" json:"time_elapsed,omitempty"` - // user time used by the server process and threads - TimeUser float64 `protobuf:"fixed64,2,opt,name=time_user" json:"time_user,omitempty"` - // server time used by the server process and all threads - TimeSystem float64 `protobuf:"fixed64,3,opt,name=time_system" json:"time_system,omitempty"` -} - -func (m *ServerStats) Reset() { *m = ServerStats{} } -func (m *ServerStats) String() string { return proto.CompactTextString(m) } -func (*ServerStats) ProtoMessage() {} -func (*ServerStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } - -type Payload struct { - // The type of data in body. - Type PayloadType `protobuf:"varint,1,opt,name=type,enum=grpc.testing.PayloadType" json:"type,omitempty"` - // Primary contents of payload. - Body []byte `protobuf:"bytes,2,opt,name=body,proto3" json:"body,omitempty"` -} - -func (m *Payload) Reset() { *m = Payload{} } -func (m *Payload) String() string { return proto.CompactTextString(m) } -func (*Payload) ProtoMessage() {} -func (*Payload) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } - -type HistogramData struct { - Bucket []uint32 `protobuf:"varint,1,rep,name=bucket" json:"bucket,omitempty"` - MinSeen float64 `protobuf:"fixed64,2,opt,name=min_seen" json:"min_seen,omitempty"` - MaxSeen float64 `protobuf:"fixed64,3,opt,name=max_seen" json:"max_seen,omitempty"` - Sum float64 `protobuf:"fixed64,4,opt,name=sum" json:"sum,omitempty"` - SumOfSquares float64 `protobuf:"fixed64,5,opt,name=sum_of_squares" json:"sum_of_squares,omitempty"` - Count float64 `protobuf:"fixed64,6,opt,name=count" json:"count,omitempty"` -} - -func (m *HistogramData) Reset() { *m = HistogramData{} } -func (m *HistogramData) String() string { return proto.CompactTextString(m) } -func (*HistogramData) ProtoMessage() {} -func (*HistogramData) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } - -type ClientConfig struct { - ServerTargets []string `protobuf:"bytes,1,rep,name=server_targets" json:"server_targets,omitempty"` - ClientType ClientType `protobuf:"varint,2,opt,name=client_type,enum=grpc.testing.ClientType" json:"client_type,omitempty"` - EnableSsl bool `protobuf:"varint,3,opt,name=enable_ssl" json:"enable_ssl,omitempty"` - OutstandingRpcsPerChannel int32 `protobuf:"varint,4,opt,name=outstanding_rpcs_per_channel" json:"outstanding_rpcs_per_channel,omitempty"` - ClientChannels int32 `protobuf:"varint,5,opt,name=client_channels" json:"client_channels,omitempty"` - PayloadSize int32 `protobuf:"varint,6,opt,name=payload_size" json:"payload_size,omitempty"` - // only for async client: - AsyncClientThreads int32 `protobuf:"varint,7,opt,name=async_client_threads" json:"async_client_threads,omitempty"` - RpcType RpcType `protobuf:"varint,8,opt,name=rpc_type,enum=grpc.testing.RpcType" json:"rpc_type,omitempty"` -} - -func (m *ClientConfig) Reset() { *m = ClientConfig{} } -func (m *ClientConfig) String() string { return proto.CompactTextString(m) } -func (*ClientConfig) ProtoMessage() {} -func (*ClientConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } - -// Request current stats -type Mark struct { -} - -func (m *Mark) Reset() { *m = Mark{} } -func (m *Mark) String() string { return proto.CompactTextString(m) } -func (*Mark) ProtoMessage() {} -func (*Mark) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } - -type ClientArgs struct { - // Types that are valid to be assigned to Argtype: - // *ClientArgs_Setup - // *ClientArgs_Mark - Argtype isClientArgs_Argtype `protobuf_oneof:"argtype"` -} - -func (m *ClientArgs) Reset() { *m = ClientArgs{} } -func (m *ClientArgs) String() string { return proto.CompactTextString(m) } -func (*ClientArgs) ProtoMessage() {} -func (*ClientArgs) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } - -type isClientArgs_Argtype interface { - isClientArgs_Argtype() -} - -type ClientArgs_Setup struct { - Setup *ClientConfig `protobuf:"bytes,1,opt,name=setup,oneof"` -} -type ClientArgs_Mark struct { - Mark *Mark `protobuf:"bytes,2,opt,name=mark,oneof"` -} - -func (*ClientArgs_Setup) isClientArgs_Argtype() {} -func (*ClientArgs_Mark) isClientArgs_Argtype() {} - -func (m *ClientArgs) GetArgtype() isClientArgs_Argtype { - if m != nil { - return m.Argtype - } - return nil -} - -func (m *ClientArgs) GetSetup() *ClientConfig { - if x, ok := m.GetArgtype().(*ClientArgs_Setup); ok { - return x.Setup - } - return nil -} - -func (m *ClientArgs) GetMark() *Mark { - if x, ok := m.GetArgtype().(*ClientArgs_Mark); ok { - return x.Mark - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*ClientArgs) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _ClientArgs_OneofMarshaler, _ClientArgs_OneofUnmarshaler, _ClientArgs_OneofSizer, []interface{}{ - (*ClientArgs_Setup)(nil), - (*ClientArgs_Mark)(nil), - } -} - -func _ClientArgs_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*ClientArgs) - // argtype - switch x := m.Argtype.(type) { - case *ClientArgs_Setup: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Setup); err != nil { - return err - } - case *ClientArgs_Mark: - b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Mark); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("ClientArgs.Argtype has unexpected type %T", x) - } - return nil -} - -func _ClientArgs_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*ClientArgs) - switch tag { - case 1: // argtype.setup - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(ClientConfig) - err := b.DecodeMessage(msg) - m.Argtype = &ClientArgs_Setup{msg} - return true, err - case 2: // argtype.mark - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Mark) - err := b.DecodeMessage(msg) - m.Argtype = &ClientArgs_Mark{msg} - return true, err - default: - return false, nil - } -} - -func _ClientArgs_OneofSizer(msg proto.Message) (n int) { - m := msg.(*ClientArgs) - // argtype - switch x := m.Argtype.(type) { - case *ClientArgs_Setup: - s := proto.Size(x.Setup) - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *ClientArgs_Mark: - s := proto.Size(x.Mark) - n += proto.SizeVarint(2<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -type ClientStats struct { - Latencies *HistogramData `protobuf:"bytes,1,opt,name=latencies" json:"latencies,omitempty"` - TimeElapsed float64 `protobuf:"fixed64,3,opt,name=time_elapsed" json:"time_elapsed,omitempty"` - TimeUser float64 `protobuf:"fixed64,4,opt,name=time_user" json:"time_user,omitempty"` - TimeSystem float64 `protobuf:"fixed64,5,opt,name=time_system" json:"time_system,omitempty"` -} - -func (m *ClientStats) Reset() { *m = ClientStats{} } -func (m *ClientStats) String() string { return proto.CompactTextString(m) } -func (*ClientStats) ProtoMessage() {} -func (*ClientStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } - -func (m *ClientStats) GetLatencies() *HistogramData { - if m != nil { - return m.Latencies - } - return nil -} - -type ClientStatus struct { - Stats *ClientStats `protobuf:"bytes,1,opt,name=stats" json:"stats,omitempty"` -} - -func (m *ClientStatus) Reset() { *m = ClientStatus{} } -func (m *ClientStatus) String() string { return proto.CompactTextString(m) } -func (*ClientStatus) ProtoMessage() {} -func (*ClientStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } - -func (m *ClientStatus) GetStats() *ClientStats { - if m != nil { - return m.Stats - } - return nil -} - -type ServerConfig struct { - ServerType ServerType `protobuf:"varint,1,opt,name=server_type,enum=grpc.testing.ServerType" json:"server_type,omitempty"` - Threads int32 `protobuf:"varint,2,opt,name=threads" json:"threads,omitempty"` - EnableSsl bool `protobuf:"varint,3,opt,name=enable_ssl" json:"enable_ssl,omitempty"` -} - -func (m *ServerConfig) Reset() { *m = ServerConfig{} } -func (m *ServerConfig) String() string { return proto.CompactTextString(m) } -func (*ServerConfig) ProtoMessage() {} -func (*ServerConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } - -type ServerArgs struct { - // Types that are valid to be assigned to Argtype: - // *ServerArgs_Setup - // *ServerArgs_Mark - Argtype isServerArgs_Argtype `protobuf_oneof:"argtype"` -} - -func (m *ServerArgs) Reset() { *m = ServerArgs{} } -func (m *ServerArgs) String() string { return proto.CompactTextString(m) } -func (*ServerArgs) ProtoMessage() {} -func (*ServerArgs) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } - -type isServerArgs_Argtype interface { - isServerArgs_Argtype() -} - -type ServerArgs_Setup struct { - Setup *ServerConfig `protobuf:"bytes,1,opt,name=setup,oneof"` -} -type ServerArgs_Mark struct { - Mark *Mark `protobuf:"bytes,2,opt,name=mark,oneof"` -} - -func (*ServerArgs_Setup) isServerArgs_Argtype() {} -func (*ServerArgs_Mark) isServerArgs_Argtype() {} - -func (m *ServerArgs) GetArgtype() isServerArgs_Argtype { - if m != nil { - return m.Argtype - } - return nil -} - -func (m *ServerArgs) GetSetup() *ServerConfig { - if x, ok := m.GetArgtype().(*ServerArgs_Setup); ok { - return x.Setup - } - return nil -} - -func (m *ServerArgs) GetMark() *Mark { - if x, ok := m.GetArgtype().(*ServerArgs_Mark); ok { - return x.Mark - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*ServerArgs) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _ServerArgs_OneofMarshaler, _ServerArgs_OneofUnmarshaler, _ServerArgs_OneofSizer, []interface{}{ - (*ServerArgs_Setup)(nil), - (*ServerArgs_Mark)(nil), - } -} - -func _ServerArgs_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*ServerArgs) - // argtype - switch x := m.Argtype.(type) { - case *ServerArgs_Setup: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Setup); err != nil { - return err - } - case *ServerArgs_Mark: - b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Mark); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("ServerArgs.Argtype has unexpected type %T", x) - } - return nil -} - -func _ServerArgs_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*ServerArgs) - switch tag { - case 1: // argtype.setup - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(ServerConfig) - err := b.DecodeMessage(msg) - m.Argtype = &ServerArgs_Setup{msg} - return true, err - case 2: // argtype.mark - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Mark) - err := b.DecodeMessage(msg) - m.Argtype = &ServerArgs_Mark{msg} - return true, err - default: - return false, nil - } -} - -func _ServerArgs_OneofSizer(msg proto.Message) (n int) { - m := msg.(*ServerArgs) - // argtype - switch x := m.Argtype.(type) { - case *ServerArgs_Setup: - s := proto.Size(x.Setup) - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *ServerArgs_Mark: - s := proto.Size(x.Mark) - n += proto.SizeVarint(2<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -type ServerStatus struct { - Stats *ServerStats `protobuf:"bytes,1,opt,name=stats" json:"stats,omitempty"` - Port int32 `protobuf:"varint,2,opt,name=port" json:"port,omitempty"` -} - -func (m *ServerStatus) Reset() { *m = ServerStatus{} } -func (m *ServerStatus) String() string { return proto.CompactTextString(m) } -func (*ServerStatus) ProtoMessage() {} -func (*ServerStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } - -func (m *ServerStatus) GetStats() *ServerStats { - if m != nil { - return m.Stats - } - return nil -} - -type SimpleRequest struct { - // Desired payload type in the response from the server. - // If response_type is RANDOM, server randomly chooses one from other formats. - ResponseType PayloadType `protobuf:"varint,1,opt,name=response_type,enum=grpc.testing.PayloadType" json:"response_type,omitempty"` - // Desired payload size in the response from the server. - // If response_type is COMPRESSABLE, this denotes the size before compression. - ResponseSize int32 `protobuf:"varint,2,opt,name=response_size" json:"response_size,omitempty"` - // Optional input payload sent along with the request. - Payload *Payload `protobuf:"bytes,3,opt,name=payload" json:"payload,omitempty"` -} - -func (m *SimpleRequest) Reset() { *m = SimpleRequest{} } -func (m *SimpleRequest) String() string { return proto.CompactTextString(m) } -func (*SimpleRequest) ProtoMessage() {} -func (*SimpleRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } - -func (m *SimpleRequest) GetPayload() *Payload { - if m != nil { - return m.Payload - } - return nil -} - -type SimpleResponse struct { - Payload *Payload `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"` -} - -func (m *SimpleResponse) Reset() { *m = SimpleResponse{} } -func (m *SimpleResponse) String() string { return proto.CompactTextString(m) } -func (*SimpleResponse) ProtoMessage() {} -func (*SimpleResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } - -func (m *SimpleResponse) GetPayload() *Payload { - if m != nil { - return m.Payload - } - return nil -} - -func init() { - proto.RegisterType((*StatsRequest)(nil), "grpc.testing.StatsRequest") - proto.RegisterType((*ServerStats)(nil), "grpc.testing.ServerStats") - proto.RegisterType((*Payload)(nil), "grpc.testing.Payload") - proto.RegisterType((*HistogramData)(nil), "grpc.testing.HistogramData") - proto.RegisterType((*ClientConfig)(nil), "grpc.testing.ClientConfig") - proto.RegisterType((*Mark)(nil), "grpc.testing.Mark") - proto.RegisterType((*ClientArgs)(nil), "grpc.testing.ClientArgs") - proto.RegisterType((*ClientStats)(nil), "grpc.testing.ClientStats") - proto.RegisterType((*ClientStatus)(nil), "grpc.testing.ClientStatus") - proto.RegisterType((*ServerConfig)(nil), "grpc.testing.ServerConfig") - proto.RegisterType((*ServerArgs)(nil), "grpc.testing.ServerArgs") - proto.RegisterType((*ServerStatus)(nil), "grpc.testing.ServerStatus") - proto.RegisterType((*SimpleRequest)(nil), "grpc.testing.SimpleRequest") - proto.RegisterType((*SimpleResponse)(nil), "grpc.testing.SimpleResponse") - proto.RegisterEnum("grpc.testing.PayloadType", PayloadType_name, PayloadType_value) - proto.RegisterEnum("grpc.testing.ClientType", ClientType_name, ClientType_value) - proto.RegisterEnum("grpc.testing.ServerType", ServerType_name, ServerType_value) - proto.RegisterEnum("grpc.testing.RpcType", RpcType_name, RpcType_value) -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion2 - -// Client API for TestService service - -type TestServiceClient interface { - // One request followed by one response. - // The server returns the client payload as-is. - UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) - // One request followed by one response. - // The server returns the client payload as-is. - StreamingCall(ctx context.Context, opts ...grpc.CallOption) (TestService_StreamingCallClient, error) -} - -type testServiceClient struct { - cc *grpc.ClientConn -} - -func NewTestServiceClient(cc *grpc.ClientConn) TestServiceClient { - return &testServiceClient{cc} -} - -func (c *testServiceClient) UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) { - out := new(SimpleResponse) - err := grpc.Invoke(ctx, "/grpc.testing.TestService/UnaryCall", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *testServiceClient) StreamingCall(ctx context.Context, opts ...grpc.CallOption) (TestService_StreamingCallClient, error) { - stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[0], c.cc, "/grpc.testing.TestService/StreamingCall", opts...) - if err != nil { - return nil, err - } - x := &testServiceStreamingCallClient{stream} - return x, nil -} - -type TestService_StreamingCallClient interface { - Send(*SimpleRequest) error - Recv() (*SimpleResponse, error) - grpc.ClientStream -} - -type testServiceStreamingCallClient struct { - grpc.ClientStream -} - -func (x *testServiceStreamingCallClient) Send(m *SimpleRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *testServiceStreamingCallClient) Recv() (*SimpleResponse, error) { - m := new(SimpleResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// Server API for TestService service - -type TestServiceServer interface { - // One request followed by one response. - // The server returns the client payload as-is. - UnaryCall(context.Context, *SimpleRequest) (*SimpleResponse, error) - // One request followed by one response. - // The server returns the client payload as-is. - StreamingCall(TestService_StreamingCallServer) error -} - -func RegisterTestServiceServer(s *grpc.Server, srv TestServiceServer) { - s.RegisterService(&_TestService_serviceDesc, srv) -} - -func _TestService_UnaryCall_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SimpleRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TestServiceServer).UnaryCall(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/grpc.testing.TestService/UnaryCall", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TestServiceServer).UnaryCall(ctx, req.(*SimpleRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _TestService_StreamingCall_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(TestServiceServer).StreamingCall(&testServiceStreamingCallServer{stream}) -} - -type TestService_StreamingCallServer interface { - Send(*SimpleResponse) error - Recv() (*SimpleRequest, error) - grpc.ServerStream -} - -type testServiceStreamingCallServer struct { - grpc.ServerStream -} - -func (x *testServiceStreamingCallServer) Send(m *SimpleResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *testServiceStreamingCallServer) Recv() (*SimpleRequest, error) { - m := new(SimpleRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -var _TestService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "grpc.testing.TestService", - HandlerType: (*TestServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "UnaryCall", - Handler: _TestService_UnaryCall_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "StreamingCall", - Handler: _TestService_StreamingCall_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, -} - -// Client API for Worker service - -type WorkerClient interface { - // Start test with specified workload - RunTest(ctx context.Context, opts ...grpc.CallOption) (Worker_RunTestClient, error) - // Start test with specified workload - RunServer(ctx context.Context, opts ...grpc.CallOption) (Worker_RunServerClient, error) -} - -type workerClient struct { - cc *grpc.ClientConn -} - -func NewWorkerClient(cc *grpc.ClientConn) WorkerClient { - return &workerClient{cc} -} - -func (c *workerClient) RunTest(ctx context.Context, opts ...grpc.CallOption) (Worker_RunTestClient, error) { - stream, err := grpc.NewClientStream(ctx, &_Worker_serviceDesc.Streams[0], c.cc, "/grpc.testing.Worker/RunTest", opts...) - if err != nil { - return nil, err - } - x := &workerRunTestClient{stream} - return x, nil -} - -type Worker_RunTestClient interface { - Send(*ClientArgs) error - Recv() (*ClientStatus, error) - grpc.ClientStream -} - -type workerRunTestClient struct { - grpc.ClientStream -} - -func (x *workerRunTestClient) Send(m *ClientArgs) error { - return x.ClientStream.SendMsg(m) -} - -func (x *workerRunTestClient) Recv() (*ClientStatus, error) { - m := new(ClientStatus) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *workerClient) RunServer(ctx context.Context, opts ...grpc.CallOption) (Worker_RunServerClient, error) { - stream, err := grpc.NewClientStream(ctx, &_Worker_serviceDesc.Streams[1], c.cc, "/grpc.testing.Worker/RunServer", opts...) - if err != nil { - return nil, err - } - x := &workerRunServerClient{stream} - return x, nil -} - -type Worker_RunServerClient interface { - Send(*ServerArgs) error - Recv() (*ServerStatus, error) - grpc.ClientStream -} - -type workerRunServerClient struct { - grpc.ClientStream -} - -func (x *workerRunServerClient) Send(m *ServerArgs) error { - return x.ClientStream.SendMsg(m) -} - -func (x *workerRunServerClient) Recv() (*ServerStatus, error) { - m := new(ServerStatus) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// Server API for Worker service - -type WorkerServer interface { - // Start test with specified workload - RunTest(Worker_RunTestServer) error - // Start test with specified workload - RunServer(Worker_RunServerServer) error -} - -func RegisterWorkerServer(s *grpc.Server, srv WorkerServer) { - s.RegisterService(&_Worker_serviceDesc, srv) -} - -func _Worker_RunTest_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(WorkerServer).RunTest(&workerRunTestServer{stream}) -} - -type Worker_RunTestServer interface { - Send(*ClientStatus) error - Recv() (*ClientArgs, error) - grpc.ServerStream -} - -type workerRunTestServer struct { - grpc.ServerStream -} - -func (x *workerRunTestServer) Send(m *ClientStatus) error { - return x.ServerStream.SendMsg(m) -} - -func (x *workerRunTestServer) Recv() (*ClientArgs, error) { - m := new(ClientArgs) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _Worker_RunServer_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(WorkerServer).RunServer(&workerRunServerServer{stream}) -} - -type Worker_RunServerServer interface { - Send(*ServerStatus) error - Recv() (*ServerArgs, error) - grpc.ServerStream -} - -type workerRunServerServer struct { - grpc.ServerStream -} - -func (x *workerRunServerServer) Send(m *ServerStatus) error { - return x.ServerStream.SendMsg(m) -} - -func (x *workerRunServerServer) Recv() (*ServerArgs, error) { - m := new(ServerArgs) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -var _Worker_serviceDesc = grpc.ServiceDesc{ - ServiceName: "grpc.testing.Worker", - HandlerType: (*WorkerServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{ - { - StreamName: "RunTest", - Handler: _Worker_RunTest_Handler, - ServerStreams: true, - ClientStreams: true, - }, - { - StreamName: "RunServer", - Handler: _Worker_RunServer_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, -} - -var fileDescriptor0 = []byte{ - // 867 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xa4, 0x55, 0x51, 0x8f, 0xdb, 0x44, - 0x10, 0x3e, 0xe7, 0xe2, 0xe4, 0x3c, 0x4e, 0xd2, 0xb0, 0xb4, 0x25, 0x3d, 0xee, 0xa1, 0x18, 0x44, - 0x4f, 0x87, 0x48, 0xab, 0x20, 0x21, 0xde, 0x20, 0x4d, 0x53, 0xae, 0xa2, 0xe7, 0xab, 0xec, 0x3b, - 0x50, 0x9f, 0xac, 0x8d, 0xb3, 0x4d, 0xac, 0x73, 0xd6, 0xae, 0x77, 0x0d, 0x04, 0x5e, 0x78, 0xe1, - 0x1f, 0xf0, 0x17, 0xf8, 0x9f, 0xec, 0x8e, 0x6d, 0xe1, 0x04, 0x83, 0x0e, 0xf5, 0x29, 0xf2, 0xec, - 0xcc, 0xb7, 0xdf, 0x37, 0xf3, 0xcd, 0x06, 0x3e, 0x5a, 0x30, 0x1e, 0xae, 0x37, 0x34, 0xbb, 0x79, - 0xbc, 0xca, 0xd2, 0x30, 0x90, 0x4c, 0xc8, 0x88, 0xaf, 0x1e, 0xeb, 0xdf, 0x71, 0x9a, 0x25, 0x32, - 0x21, 0x3d, 0x7d, 0x30, 0x2e, 0x0f, 0x9c, 0x87, 0xd0, 0xf3, 0x25, 0x95, 0xc2, 0x63, 0x6f, 0x73, - 0x15, 0x22, 0x43, 0x38, 0xd2, 0x47, 0x01, 0xcf, 0x37, 0x23, 0xe3, 0xa1, 0x71, 0x6a, 0x3a, 0xdf, - 0x81, 0xed, 0xb3, 0xec, 0x47, 0x96, 0x61, 0x1e, 0xb9, 0x0b, 0x3d, 0x19, 0x6d, 0x58, 0xc0, 0x62, - 0x9a, 0x0a, 0xb6, 0xc4, 0x24, 0x83, 0xbc, 0x07, 0x16, 0x46, 0x73, 0xc1, 0xb2, 0x51, 0x0b, 0x43, - 0xef, 0x83, 0x8d, 0x21, 0xb1, 0x15, 0x92, 0x6d, 0x46, 0x87, 0x3a, 0xe8, 0x7c, 0x03, 0xdd, 0x57, - 0x74, 0x1b, 0x27, 0x74, 0x49, 0x1e, 0x41, 0x5b, 0x6e, 0x53, 0x86, 0x00, 0x83, 0xc9, 0x83, 0x71, - 0x9d, 0xd6, 0xb8, 0x4c, 0xba, 0x52, 0x09, 0xa4, 0x07, 0xed, 0x45, 0xb2, 0xdc, 0x22, 0x6c, 0xcf, - 0xf9, 0x09, 0xfa, 0xe7, 0x91, 0x90, 0xc9, 0x2a, 0xa3, 0x9b, 0x67, 0x54, 0x52, 0x32, 0x80, 0xce, - 0x22, 0x0f, 0x6f, 0x98, 0x54, 0x48, 0x87, 0xa7, 0x7d, 0xad, 0x60, 0x13, 0xf1, 0x40, 0x30, 0xc6, - 0x4b, 0x26, 0x3a, 0x42, 0x7f, 0x2e, 0x22, 0x48, 0x83, 0xd8, 0x70, 0x28, 0x94, 0xc0, 0x36, 0x7e, - 0xdc, 0x87, 0x81, 0xfa, 0x08, 0x92, 0x37, 0x81, 0x78, 0x9b, 0xd3, 0x8c, 0x89, 0x91, 0x89, 0xf1, - 0x3e, 0x98, 0x61, 0x92, 0x73, 0x39, 0xea, 0x20, 0xf5, 0xdf, 0x5b, 0xd0, 0x9b, 0xc5, 0x11, 0xe3, - 0x72, 0x96, 0xf0, 0x37, 0xd1, 0x0a, 0xeb, 0xb0, 0x31, 0x81, 0xa4, 0xd9, 0x8a, 0x49, 0x81, 0x04, - 0x2c, 0xf2, 0x39, 0xd8, 0x21, 0xe6, 0x05, 0xa8, 0xaf, 0x85, 0xfa, 0x46, 0xbb, 0xfa, 0x0a, 0x20, - 0x94, 0x47, 0x00, 0x18, 0xa7, 0x8b, 0x58, 0x75, 0x4a, 0xc4, 0xc8, 0xef, 0x88, 0x7c, 0x02, 0x27, - 0x49, 0x2e, 0x85, 0xa4, 0x7c, 0xa9, 0xb2, 0x03, 0x55, 0x29, 0x82, 0x54, 0x5d, 0x14, 0xae, 0x29, - 0xe7, 0x2c, 0x46, 0xe2, 0x26, 0xf9, 0x00, 0xee, 0x94, 0x17, 0x95, 0xf1, 0x82, 0xb9, 0xa9, 0x67, - 0x94, 0x16, 0x0d, 0x0c, 0x44, 0xf4, 0x0b, 0x43, 0x01, 0x26, 0x39, 0x81, 0xbb, 0x54, 0x6c, 0x79, - 0x18, 0x54, 0xec, 0xd6, 0x19, 0xa3, 0x4b, 0x31, 0xea, 0xe2, 0xe9, 0x23, 0x38, 0x42, 0xc3, 0x68, - 0xca, 0x47, 0x48, 0xf9, 0xde, 0x2e, 0x65, 0x2f, 0x0d, 0x35, 0x5f, 0xa7, 0x03, 0xed, 0x0b, 0xe5, - 0x2f, 0x67, 0x0d, 0x50, 0xa8, 0x98, 0x66, 0x2b, 0x41, 0x3e, 0x03, 0x53, 0x30, 0x99, 0xa7, 0x38, - 0x4e, 0x7b, 0x72, 0xdc, 0x24, 0xb7, 0xe8, 0xdb, 0xf9, 0x01, 0x71, 0xa0, 0xad, 0x2d, 0x8a, 0xad, - 0xb1, 0x27, 0x64, 0x37, 0x57, 0x83, 0x9f, 0x1f, 0x3c, 0xb5, 0xa0, 0xab, 0xba, 0xaa, 0xe9, 0x38, - 0xbf, 0x82, 0x5d, 0x00, 0x14, 0x0e, 0x1c, 0x83, 0x15, 0x53, 0xa9, 0x7c, 0x1e, 0x31, 0x51, 0x5e, - 0xf7, 0xe1, 0x2e, 0xc4, 0xae, 0x41, 0xf6, 0x1d, 0x7b, 0xf8, 0x4f, 0xc7, 0xb6, 0x9b, 0x1c, 0x8b, - 0x2e, 0x70, 0xbe, 0xaa, 0xa6, 0xae, 0x2f, 0xcf, 0x05, 0x39, 0x55, 0x42, 0x35, 0x8d, 0xf2, 0xe6, - 0x07, 0x4d, 0x42, 0x91, 0xa7, 0xb3, 0x50, 0xab, 0x85, 0xfe, 0x28, 0xfd, 0xa2, 0x7c, 0x51, 0xf9, - 0xe5, 0x6f, 0xdf, 0xef, 0xf9, 0xa2, 0x28, 0x40, 0x5f, 0xdc, 0x81, 0x6e, 0x35, 0xa1, 0x16, 0x4e, - 0xa8, 0xc1, 0x28, 0x7a, 0x08, 0x45, 0xc9, 0x2d, 0x86, 0x50, 0x27, 0xf3, 0xff, 0x87, 0xf0, 0xbc, - 0x52, 0x73, 0xab, 0x3e, 0xd4, 0x5f, 0x0c, 0xb5, 0xbf, 0x69, 0x92, 0xc9, 0x42, 0x85, 0xf3, 0x9b, - 0x01, 0x7d, 0x3f, 0xda, 0xa4, 0x31, 0xab, 0x9e, 0x9c, 0x27, 0xd0, 0x57, 0x4b, 0x97, 0x26, 0x5c, - 0xb0, 0xe0, 0x76, 0x2f, 0xc2, 0xbd, 0x5a, 0x05, 0x1a, 0xbc, 0x68, 0xd0, 0xa7, 0xd0, 0x2d, 0x6d, - 0x8f, 0xdd, 0xb1, 0xf7, 0x1d, 0x5c, 0x42, 0xa8, 0x91, 0x0e, 0x2a, 0x06, 0x05, 0x48, 0xbd, 0xd2, - 0xf8, 0x8f, 0xca, 0xb3, 0xaf, 0xc1, 0xae, 0xf3, 0x18, 0x2a, 0x6f, 0x5c, 0x5e, 0xbc, 0xf2, 0xe6, - 0xbe, 0x3f, 0x7d, 0xfa, 0x72, 0x3e, 0x3c, 0x50, 0x33, 0x1a, 0x5c, 0xbb, 0x3b, 0x31, 0x43, 0x8d, - 0xad, 0xe3, 0x4d, 0xdd, 0x67, 0x97, 0x17, 0xc3, 0xd6, 0xd9, 0x97, 0xd5, 0xd2, 0x60, 0xfd, 0x7d, - 0x20, 0xfe, 0x6b, 0x77, 0x76, 0xee, 0x5d, 0xba, 0x97, 0xd7, 0x7e, 0x30, 0x7b, 0xf9, 0x62, 0xee, - 0x5e, 0x29, 0x14, 0x85, 0x3b, 0xd5, 0x07, 0x55, 0xc4, 0xd0, 0x75, 0x35, 0x6b, 0xec, 0xd5, 0xf9, - 0x73, 0xef, 0xfb, 0xb9, 0x57, 0xaf, 0x2b, 0x23, 0xc6, 0xd9, 0xc7, 0xd0, 0x2d, 0xf7, 0x96, 0x58, - 0x60, 0x5e, 0xbb, 0x53, 0xef, 0xb5, 0xca, 0xeb, 0x83, 0xe5, 0x5f, 0x79, 0xf3, 0xe9, 0xc5, 0x0b, - 0xf7, 0xdb, 0xa1, 0x31, 0xf9, 0xd3, 0x00, 0xfb, 0x4a, 0x29, 0xd5, 0x37, 0x44, 0x21, 0x23, 0xcf, - 0xc1, 0xba, 0xe6, 0x34, 0xdb, 0xce, 0x68, 0x1c, 0x93, 0xbd, 0xd5, 0xda, 0x19, 0xdd, 0xf1, 0x49, - 0xf3, 0x61, 0xd9, 0x55, 0x57, 0x4d, 0x5a, 0x2a, 0x07, 0xab, 0xe7, 0x78, 0xf5, 0x8e, 0x58, 0xa7, - 0xc6, 0x13, 0x63, 0xf2, 0x87, 0x01, 0x9d, 0x1f, 0x92, 0xec, 0x86, 0x65, 0x64, 0xa6, 0x74, 0xe5, - 0x5c, 0x93, 0x26, 0x8d, 0x2f, 0xab, 0x5e, 0x87, 0xe3, 0xe3, 0x7f, 0xdb, 0xcd, 0x5c, 0x68, 0x3c, - 0x32, 0x07, 0x4b, 0x81, 0x14, 0x7d, 0x25, 0x8d, 0x8b, 0xd8, 0x04, 0x53, 0xdf, 0x02, 0x0d, 0xb3, - 0xe8, 0xe0, 0xff, 0xea, 0x17, 0x7f, 0x05, 0x00, 0x00, 0xff, 0xff, 0xc5, 0xdf, 0x5b, 0x40, 0x7c, - 0x07, 0x00, 0x00, -} diff --git a/benchmark/grpc_testing/test.proto b/benchmark/grpc_testing/test.proto deleted file mode 100644 index b0b2f80b..00000000 --- a/benchmark/grpc_testing/test.proto +++ /dev/null @@ -1,148 +0,0 @@ -// An integration test service that covers all the method signature permutations -// of unary/streaming requests/responses. -syntax = "proto3"; - -package grpc.testing; - -enum PayloadType { - // Compressable text format. - COMPRESSABLE = 0; - - // Uncompressable binary format. - UNCOMPRESSABLE = 1; - - // Randomly chosen from all other formats defined in this enum. - RANDOM = 2; -} - -message StatsRequest { - // run number - int32 test_num = 1; -} - -message ServerStats { - // wall clock time - double time_elapsed = 1; - - // user time used by the server process and threads - double time_user = 2; - - // server time used by the server process and all threads - double time_system = 3; -} - -message Payload { - // The type of data in body. - PayloadType type = 1; - // Primary contents of payload. - bytes body = 2; -} - -message HistogramData { - repeated uint32 bucket = 1; - double min_seen = 2; - double max_seen = 3; - double sum = 4; - double sum_of_squares = 5; - double count = 6; -} - -enum ClientType { - SYNCHRONOUS_CLIENT = 0; - ASYNC_CLIENT = 1; -} - -enum ServerType { - SYNCHRONOUS_SERVER = 0; - ASYNC_SERVER = 1; -} - -enum RpcType { - UNARY = 0; - STREAMING = 1; -} - -message ClientConfig { - repeated string server_targets = 1; - ClientType client_type = 2; - bool enable_ssl = 3; - int32 outstanding_rpcs_per_channel = 4; - int32 client_channels = 5; - int32 payload_size = 6; - // only for async client: - int32 async_client_threads = 7; - RpcType rpc_type = 8; -} - -// Request current stats -message Mark {} - -message ClientArgs { - oneof argtype { - ClientConfig setup = 1; - Mark mark = 2; - } -} - -message ClientStats { - HistogramData latencies = 1; - double time_elapsed = 3; - double time_user = 4; - double time_system = 5; -} - -message ClientStatus { - ClientStats stats = 1; -} - -message ServerConfig { - ServerType server_type = 1; - int32 threads = 2; - bool enable_ssl = 3; -} - -message ServerArgs { - oneof argtype { - ServerConfig setup = 1; - Mark mark = 2; - } -} - -message ServerStatus { - ServerStats stats = 1; - int32 port = 2; -} - -message SimpleRequest { - // Desired payload type in the response from the server. - // If response_type is RANDOM, server randomly chooses one from other formats. - PayloadType response_type = 1; - - // Desired payload size in the response from the server. - // If response_type is COMPRESSABLE, this denotes the size before compression. - int32 response_size = 2; - - // Optional input payload sent along with the request. - Payload payload = 3; -} - -message SimpleResponse { - Payload payload = 1; -} - -service TestService { - // One request followed by one response. - // The server returns the client payload as-is. - rpc UnaryCall(SimpleRequest) returns (SimpleResponse); - - // One request followed by one response. - // The server returns the client payload as-is. - rpc StreamingCall(stream SimpleRequest) returns (stream SimpleResponse); -} - -service Worker { - // Start test with specified workload - rpc RunTest(stream ClientArgs) returns (stream ClientStatus); - // Start test with specified workload - rpc RunServer(stream ServerArgs) returns (stream ServerStatus); -} From 9fd1d5bee86a10d55ecdd45c9d2dff95d250cd72 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Mon, 18 Apr 2016 14:48:19 -0700 Subject: [PATCH 02/43] Implement QPS workerServer and Add RunServer() for benchmarkServer --- benchmark/benchmark.go | 7 +- benchmark/server/testdata/server1.key | 16 ++++ benchmark/server/testdata/server1.pem | 16 ++++ benchmark/worker/benchmark_server.go | 109 ++++++++++++++++++++++++++ benchmark/worker/main.go | 108 +++++++++++++++++++++++++ 5 files changed, 252 insertions(+), 4 deletions(-) create mode 100644 benchmark/server/testdata/server1.key create mode 100644 benchmark/server/testdata/server1.pem create mode 100644 benchmark/worker/benchmark_server.go create mode 100644 benchmark/worker/main.go diff --git a/benchmark/benchmark.go b/benchmark/benchmark.go index e2cd51b3..c86d96c2 100644 --- a/benchmark/benchmark.go +++ b/benchmark/benchmark.go @@ -38,7 +38,6 @@ package benchmark import ( "io" - "math" "net" "golang.org/x/net/context" @@ -95,15 +94,15 @@ func (s *testServer) StreamingCall(stream testpb.BenchmarkService_StreamingCallS // StartServer starts a gRPC server serving a benchmark service on the given // address, which may be something like "localhost:0". It returns its listen // address and a function to stop the server. -func StartServer(addr string) (string, func()) { +func StartServer(addr string, opts ...grpc.ServerOption) (int, func()) { lis, err := net.Listen("tcp", addr) if err != nil { grpclog.Fatalf("Failed to listen: %v", err) } - s := grpc.NewServer(grpc.MaxConcurrentStreams(math.MaxUint32)) + s := grpc.NewServer(opts...) testpb.RegisterBenchmarkServiceServer(s, &testServer{}) go s.Serve(lis) - return lis.Addr().String(), func() { + return lis.Addr().(*net.TCPAddr).Port, func() { s.Stop() } } diff --git a/benchmark/server/testdata/server1.key b/benchmark/server/testdata/server1.key new file mode 100644 index 00000000..143a5b87 --- /dev/null +++ b/benchmark/server/testdata/server1.key @@ -0,0 +1,16 @@ +-----BEGIN PRIVATE KEY----- +MIICdQIBADANBgkqhkiG9w0BAQEFAASCAl8wggJbAgEAAoGBAOHDFScoLCVJpYDD +M4HYtIdV6Ake/sMNaaKdODjDMsux/4tDydlumN+fm+AjPEK5GHhGn1BgzkWF+slf +3BxhrA/8dNsnunstVA7ZBgA/5qQxMfGAq4wHNVX77fBZOgp9VlSMVfyd9N8YwbBY +AckOeUQadTi2X1S6OgJXgQ0m3MWhAgMBAAECgYAn7qGnM2vbjJNBm0VZCkOkTIWm +V10okw7EPJrdL2mkre9NasghNXbE1y5zDshx5Nt3KsazKOxTT8d0Jwh/3KbaN+YY +tTCbKGW0pXDRBhwUHRcuRzScjli8Rih5UOCiZkhefUTcRb6xIhZJuQy71tjaSy0p +dHZRmYyBYO2YEQ8xoQJBAPrJPhMBkzmEYFtyIEqAxQ/o/A6E+E4w8i+KM7nQCK7q +K4JXzyXVAjLfyBZWHGM2uro/fjqPggGD6QH1qXCkI4MCQQDmdKeb2TrKRh5BY1LR +81aJGKcJ2XbcDu6wMZK4oqWbTX2KiYn9GB0woM6nSr/Y6iy1u145YzYxEV/iMwff +DJULAkB8B2MnyzOg0pNFJqBJuH29bKCcHa8gHJzqXhNO5lAlEbMK95p/P2Wi+4Hd +aiEIAF1BF326QJcvYKmwSmrORp85AkAlSNxRJ50OWrfMZnBgzVjDx3xG6KsFQVk2 +ol6VhqL6dFgKUORFUWBvnKSyhjJxurlPEahV6oo6+A+mPhFY8eUvAkAZQyTdupP3 +XEFQKctGz+9+gKkemDp7LBBMEMBXrGTLPhpEfcjv/7KPdnFHYmhYeBTBnuVmTVWe +F98XJ7tIFfJq +-----END PRIVATE KEY----- diff --git a/benchmark/server/testdata/server1.pem b/benchmark/server/testdata/server1.pem new file mode 100644 index 00000000..f3d43fcc --- /dev/null +++ b/benchmark/server/testdata/server1.pem @@ -0,0 +1,16 @@ +-----BEGIN CERTIFICATE----- +MIICnDCCAgWgAwIBAgIBBzANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJBVTET +MBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0cyBQ +dHkgTHRkMQ8wDQYDVQQDEwZ0ZXN0Y2EwHhcNMTUxMTA0MDIyMDI0WhcNMjUxMTAx +MDIyMDI0WjBlMQswCQYDVQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNV +BAcTB0NoaWNhZ28xFTATBgNVBAoTDEV4YW1wbGUsIENvLjEaMBgGA1UEAxQRKi50 +ZXN0Lmdvb2dsZS5jb20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAOHDFSco +LCVJpYDDM4HYtIdV6Ake/sMNaaKdODjDMsux/4tDydlumN+fm+AjPEK5GHhGn1Bg +zkWF+slf3BxhrA/8dNsnunstVA7ZBgA/5qQxMfGAq4wHNVX77fBZOgp9VlSMVfyd +9N8YwbBYAckOeUQadTi2X1S6OgJXgQ0m3MWhAgMBAAGjazBpMAkGA1UdEwQCMAAw +CwYDVR0PBAQDAgXgME8GA1UdEQRIMEaCECoudGVzdC5nb29nbGUuZnKCGHdhdGVy +em9vaS50ZXN0Lmdvb2dsZS5iZYISKi50ZXN0LnlvdXR1YmUuY29thwTAqAEDMA0G +CSqGSIb3DQEBCwUAA4GBAJFXVifQNub1LUP4JlnX5lXNlo8FxZ2a12AFQs+bzoJ6 +hM044EDjqyxUqSbVePK0ni3w1fHQB5rY9yYC5f8G7aqqTY1QOhoUk8ZTSTRpnkTh +y4jjdvTZeLDVBlueZUTDRmy2feY5aZIU18vFDK08dTG0A87pppuv1LNIR3loveU8 +-----END CERTIFICATE----- diff --git a/benchmark/worker/benchmark_server.go b/benchmark/worker/benchmark_server.go new file mode 100644 index 00000000..76d4be93 --- /dev/null +++ b/benchmark/worker/benchmark_server.go @@ -0,0 +1,109 @@ +package main + +import ( + "runtime" + "strconv" + "sync" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/benchmark" + testpb "google.golang.org/grpc/benchmark/grpc_testing" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" +) + +var ( + // TODO change filepath + certFile = "/usr/local/google/home/menghanl/go/src/google.golang.org/grpc/benchmark/server/testdata/server1.pem" + keyFile = "/usr/local/google/home/menghanl/go/src/google.golang.org/grpc/benchmark/server/testdata/server1.key" +) + +type benchmarkServer struct { + port int + close func() + mu sync.RWMutex + lastResetTime time.Time +} + +func startBenchmarkServerWithSetup(setup *testpb.ServerConfig) (*benchmarkServer, error) { + var opts []grpc.ServerOption + + grpclog.Printf(" - server type: %v", setup.ServerType) + switch setup.ServerType { + // Ignore server type. + case testpb.ServerType_SYNC_SERVER: + case testpb.ServerType_ASYNC_SERVER: + case testpb.ServerType_ASYNC_GENERIC_SERVER: + default: + return nil, grpc.Errorf(codes.InvalidArgument, "unknow server type: %v", setup.ServerType) + } + + grpclog.Printf(" - security params: %v", setup.SecurityParams) + if setup.SecurityParams != nil { + creds, err := credentials.NewServerTLSFromFile(certFile, keyFile) + if err != nil { + grpclog.Fatalf("failed to generate credentials %v", err) + } + opts = append(opts, grpc.Creds(creds)) + } + + // Ignore async server threads. + + grpclog.Printf(" - core limit: %v", setup.CoreLimit) + + grpclog.Printf(" - payload config: %v", setup.PayloadConfig) + if setup.PayloadConfig != nil { + // TODO payload config + grpclog.Printf("payload config: %v", setup.PayloadConfig) + switch setup.PayloadConfig.Payload.(type) { + case *testpb.PayloadConfig_BytebufParams: + case *testpb.PayloadConfig_SimpleParams: + case *testpb.PayloadConfig_ComplexParams: + default: + return nil, grpc.Errorf(codes.InvalidArgument, "unknow payload config: %v", setup.PayloadConfig) + } + } + + grpclog.Printf(" - core list: %v", setup.CoreList) + if len(setup.CoreList) > 0 { + // TODO core list + grpclog.Printf("specifying cores to run server on: %v", setup.CoreList) + } + + if setup.CoreLimit > 0 { + runtime.GOMAXPROCS(int(setup.CoreLimit)) + } else { + // runtime.GOMAXPROCS(runtime.NumCPU()) + runtime.GOMAXPROCS(1) + } + + grpclog.Printf(" - port: %v", setup.Port) + p, close := benchmark.StartServer(":"+strconv.Itoa(int(setup.Port)), opts...) + grpclog.Printf("benchmark server listening at port %v", p) + + // temp := strings.Split(addr, ":") + // if len(temp) <= 0 { + // return nil, grpc.Errorf(codes.Internal, "benchmark test address not valid: %v", addr) + // } + // p, err := strconv.Atoi(temp[len(temp)-1]) + // if err != nil { + // return nil, grpc.Errorf(codes.Internal, "%v", err) + // } + + bs := &benchmarkServer{port: p, close: close, lastResetTime: time.Now()} + return bs, nil +} + +func (bs *benchmarkServer) getStats() *testpb.ServerStats { + bs.mu.RLock() + defer bs.mu.RUnlock() + return &testpb.ServerStats{TimeElapsed: time.Since(bs.lastResetTime).Seconds(), TimeUser: 0, TimeSystem: 0} +} + +func (bs *benchmarkServer) reset() { + bs.mu.Lock() + defer bs.mu.Unlock() + bs.lastResetTime = time.Now() +} diff --git a/benchmark/worker/main.go b/benchmark/worker/main.go new file mode 100644 index 00000000..e31b3c6c --- /dev/null +++ b/benchmark/worker/main.go @@ -0,0 +1,108 @@ +package main + +import ( + "io" + "net" + "runtime" + "sync" + + "golang.org/x/net/context" + "google.golang.org/grpc" + testpb "google.golang.org/grpc/benchmark/grpc_testing" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" +) + +var ( + ports = []string{":10000"} + // ports = []string{":10010"} +) + +type workerServer struct { + bs *benchmarkServer +} + +func (s *workerServer) RunServer(stream testpb.WorkerService_RunServerServer) error { + for { + in, err := stream.Recv() + if err == io.EOF { + grpclog.Printf("closing benchmark server") + if s.bs != nil { + s.bs.close() + s.bs = nil + } + return nil + } + if err != nil { + return err + } + + switch t := in.Argtype.(type) { + case *testpb.ServerArgs_Setup: + grpclog.Printf("server setup received:") + + bs, err := startBenchmarkServerWithSetup(t.Setup) + if err != nil { + return err + } + s.bs = bs + case *testpb.ServerArgs_Mark: + grpclog.Printf("server mark received:") + grpclog.Printf(" - %v", t) + if s.bs == nil { + return grpc.Errorf(codes.InvalidArgument, "server does not exist when mark received") + } + if t.Mark.Reset_ { + s.bs.reset() + } + } + + out := &testpb.ServerStatus{ + Stats: s.bs.getStats(), + Port: int32(s.bs.port), + Cores: 1, + } + if err := stream.Send(out); err != nil { + return err + } + } + + return nil +} + +func (s *workerServer) RunClient(stream testpb.WorkerService_RunClientServer) error { + return nil +} + +func (s *workerServer) CoreCount(ctx context.Context, in *testpb.CoreRequest) (*testpb.CoreResponse, error) { + grpclog.Printf("core count: %v", runtime.NumCPU()) + return &testpb.CoreResponse{int32(runtime.NumCPU())}, nil +} + +func (s *workerServer) QuitWorker(ctx context.Context, in *testpb.Void) (*testpb.Void, error) { + grpclog.Printf("quiting worker") + if s.bs != nil { + s.bs.close() + } + return &testpb.Void{}, nil +} + +func main() { + var wg sync.WaitGroup + wg.Add(len(ports)) + for i := 0; i < len(ports); i++ { + lis, err := net.Listen("tcp", ports[i]) + if err != nil { + grpclog.Fatalf("failed to listen: %v", err) + } + grpclog.Printf("worker %d listening at port %v", i, ports[i]) + + s := grpc.NewServer() + testpb.RegisterWorkerServiceServer(s, &workerServer{}) + go func() { + defer wg.Done() + s.Serve(lis) + }() + } + wg.Wait() +} From 643486f0842d9f7904e4b9bdfeace0eeb7c590af Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Wed, 20 Apr 2016 11:54:51 -0700 Subject: [PATCH 03/43] Add bytebuf codec and generic benchmark server --- benchmark/benchmark.go | 43 +++++++++++++++++++++++++++- benchmark/worker/benchmark_server.go | 42 +++++++++++++++------------ benchmark/worker/main.go | 16 +++++++++++ 3 files changed, 82 insertions(+), 19 deletions(-) diff --git a/benchmark/benchmark.go b/benchmark/benchmark.go index c86d96c2..321f251a 100644 --- a/benchmark/benchmark.go +++ b/benchmark/benchmark.go @@ -93,7 +93,7 @@ func (s *testServer) StreamingCall(stream testpb.BenchmarkService_StreamingCallS // StartServer starts a gRPC server serving a benchmark service on the given // address, which may be something like "localhost:0". It returns its listen -// address and a function to stop the server. +// port number and a function to stop the server. func StartServer(addr string, opts ...grpc.ServerOption) (int, func()) { lis, err := net.Listen("tcp", addr) if err != nil { @@ -107,6 +107,47 @@ func StartServer(addr string, opts ...grpc.ServerOption) (int, func()) { } } +type genericTestServer struct { + reqSize int32 + respSize int32 +} + +func (s *genericTestServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + return &testpb.SimpleResponse{}, nil +} + +func (s *genericTestServer) StreamingCall(stream testpb.BenchmarkService_StreamingCallServer) error { + for { + m := make([]byte, s.reqSize) + err := stream.(grpc.ServerStream).RecvMsg(m) + if err == io.EOF { + // read done. + return nil + } + if err != nil { + return err + } + if err := stream.(grpc.ServerStream).SendMsg(make([]byte, s.respSize)); err != nil { + return err + } + } +} + +// StartGenericServer starts a gRPC a benchmark service server, which supports custom codec. +// It returns its listen port number and a function to stop the server. +func StartGenericServer(addr string, reqSize, respSize int32, opts ...grpc.ServerOption) (int, func()) { + lis, err := net.Listen("tcp", addr) + if err != nil { + grpclog.Fatalf("Failed to listen: %v", err) + } + s := grpc.NewServer(opts...) + testpb.RegisterBenchmarkServiceServer(s, &genericTestServer{reqSize: reqSize, respSize: respSize}) + go s.Serve(lis) + return lis.Addr().(*net.TCPAddr).Port, func() { + s.Stop() + } +} + // DoUnaryCall performs an unary RPC with given stub and request and response sizes. func DoUnaryCall(tc testpb.BenchmarkServiceClient, reqSize, respSize int) { pl := newPayload(testpb.PayloadType_COMPRESSABLE, reqSize) diff --git a/benchmark/worker/benchmark_server.go b/benchmark/worker/benchmark_server.go index 76d4be93..41c430a0 100644 --- a/benchmark/worker/benchmark_server.go +++ b/benchmark/worker/benchmark_server.go @@ -52,18 +52,11 @@ func startBenchmarkServerWithSetup(setup *testpb.ServerConfig) (*benchmarkServer // Ignore async server threads. grpclog.Printf(" - core limit: %v", setup.CoreLimit) - - grpclog.Printf(" - payload config: %v", setup.PayloadConfig) - if setup.PayloadConfig != nil { - // TODO payload config - grpclog.Printf("payload config: %v", setup.PayloadConfig) - switch setup.PayloadConfig.Payload.(type) { - case *testpb.PayloadConfig_BytebufParams: - case *testpb.PayloadConfig_SimpleParams: - case *testpb.PayloadConfig_ComplexParams: - default: - return nil, grpc.Errorf(codes.InvalidArgument, "unknow payload config: %v", setup.PayloadConfig) - } + if setup.CoreLimit > 0 { + runtime.GOMAXPROCS(int(setup.CoreLimit)) + } else { + // runtime.GOMAXPROCS(runtime.NumCPU()) + runtime.GOMAXPROCS(1) } grpclog.Printf(" - core list: %v", setup.CoreList) @@ -72,15 +65,28 @@ func startBenchmarkServerWithSetup(setup *testpb.ServerConfig) (*benchmarkServer grpclog.Printf("specifying cores to run server on: %v", setup.CoreList) } - if setup.CoreLimit > 0 { - runtime.GOMAXPROCS(int(setup.CoreLimit)) + grpclog.Printf(" - port: %v", setup.Port) + grpclog.Printf(" - payload config: %v", setup.PayloadConfig) + var p int + var close func() + if setup.PayloadConfig != nil { + // TODO payload config + grpclog.Printf("payload config: %v", setup.PayloadConfig) + switch payload := setup.PayloadConfig.Payload.(type) { + case *testpb.PayloadConfig_BytebufParams: + opts = append(opts, grpc.CustomCodec(byteBufCodec{})) + p, close = benchmark.StartGenericServer(":"+strconv.Itoa(int(setup.Port)), payload.BytebufParams.ReqSize, payload.BytebufParams.RespSize, opts...) + case *testpb.PayloadConfig_SimpleParams: + p, close = benchmark.StartServer(":"+strconv.Itoa(int(setup.Port)), opts...) + case *testpb.PayloadConfig_ComplexParams: + default: + return nil, grpc.Errorf(codes.InvalidArgument, "unknow payload config: %v", setup.PayloadConfig) + } } else { - // runtime.GOMAXPROCS(runtime.NumCPU()) - runtime.GOMAXPROCS(1) + // Start protobuf server is payload config is nil + p, close = benchmark.StartServer(":"+strconv.Itoa(int(setup.Port)), opts...) } - grpclog.Printf(" - port: %v", setup.Port) - p, close := benchmark.StartServer(":"+strconv.Itoa(int(setup.Port)), opts...) grpclog.Printf("benchmark server listening at port %v", p) // temp := strings.Split(addr, ":") diff --git a/benchmark/worker/main.go b/benchmark/worker/main.go index e31b3c6c..dc4782dc 100644 --- a/benchmark/worker/main.go +++ b/benchmark/worker/main.go @@ -18,6 +18,22 @@ var ( // ports = []string{":10010"} ) +type byteBufCodec struct { +} + +func (byteBufCodec) Marshal(v interface{}) ([]byte, error) { + return v.([]byte), nil +} + +func (byteBufCodec) Unmarshal(data []byte, v interface{}) error { + v = data + return nil +} + +func (byteBufCodec) String() string { + return "byteBufCodec" +} + type workerServer struct { bs *benchmarkServer } From 26b336d4917661a430d121cc48a32a1b21f311c9 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Thu, 21 Apr 2016 15:59:24 -0700 Subject: [PATCH 04/43] Add driver command line options --- benchmark/worker/benchmark_server.go | 31 ++++++++----------- benchmark/worker/main.go | 45 +++++++++++++++------------- 2 files changed, 37 insertions(+), 39 deletions(-) diff --git a/benchmark/worker/benchmark_server.go b/benchmark/worker/benchmark_server.go index 41c430a0..4a7072fe 100644 --- a/benchmark/worker/benchmark_server.go +++ b/benchmark/worker/benchmark_server.go @@ -27,7 +27,7 @@ type benchmarkServer struct { lastResetTime time.Time } -func startBenchmarkServerWithSetup(setup *testpb.ServerConfig) (*benchmarkServer, error) { +func startBenchmarkServerWithSetup(setup *testpb.ServerConfig, serverPort int) (*benchmarkServer, error) { var opts []grpc.ServerOption grpclog.Printf(" - server type: %v", setup.ServerType) @@ -55,49 +55,43 @@ func startBenchmarkServerWithSetup(setup *testpb.ServerConfig) (*benchmarkServer if setup.CoreLimit > 0 { runtime.GOMAXPROCS(int(setup.CoreLimit)) } else { - // runtime.GOMAXPROCS(runtime.NumCPU()) runtime.GOMAXPROCS(1) } grpclog.Printf(" - core list: %v", setup.CoreList) if len(setup.CoreList) > 0 { - // TODO core list - grpclog.Printf("specifying cores to run server on: %v", setup.CoreList) + return nil, grpc.Errorf(codes.InvalidArgument, "specifying core list is not supported") } grpclog.Printf(" - port: %v", setup.Port) + var port int + if setup.Port != 0 { + port = int(setup.Port) + } else if serverPort != 0 { + port = serverPort + } grpclog.Printf(" - payload config: %v", setup.PayloadConfig) var p int var close func() if setup.PayloadConfig != nil { - // TODO payload config - grpclog.Printf("payload config: %v", setup.PayloadConfig) switch payload := setup.PayloadConfig.Payload.(type) { case *testpb.PayloadConfig_BytebufParams: opts = append(opts, grpc.CustomCodec(byteBufCodec{})) - p, close = benchmark.StartGenericServer(":"+strconv.Itoa(int(setup.Port)), payload.BytebufParams.ReqSize, payload.BytebufParams.RespSize, opts...) + p, close = benchmark.StartGenericServer(":"+strconv.Itoa(port), payload.BytebufParams.ReqSize, payload.BytebufParams.RespSize, opts...) case *testpb.PayloadConfig_SimpleParams: - p, close = benchmark.StartServer(":"+strconv.Itoa(int(setup.Port)), opts...) + p, close = benchmark.StartServer(":"+strconv.Itoa(port), opts...) case *testpb.PayloadConfig_ComplexParams: + return nil, grpc.Errorf(codes.InvalidArgument, "unsupported payload config: %v", setup.PayloadConfig) default: return nil, grpc.Errorf(codes.InvalidArgument, "unknow payload config: %v", setup.PayloadConfig) } } else { // Start protobuf server is payload config is nil - p, close = benchmark.StartServer(":"+strconv.Itoa(int(setup.Port)), opts...) + p, close = benchmark.StartServer(":"+strconv.Itoa(port), opts...) } grpclog.Printf("benchmark server listening at port %v", p) - // temp := strings.Split(addr, ":") - // if len(temp) <= 0 { - // return nil, grpc.Errorf(codes.Internal, "benchmark test address not valid: %v", addr) - // } - // p, err := strconv.Atoi(temp[len(temp)-1]) - // if err != nil { - // return nil, grpc.Errorf(codes.Internal, "%v", err) - // } - bs := &benchmarkServer{port: p, close: close, lastResetTime: time.Now()} return bs, nil } @@ -109,6 +103,7 @@ func (bs *benchmarkServer) getStats() *testpb.ServerStats { } func (bs *benchmarkServer) reset() { + // TODO wall time, sys time, user time bs.mu.Lock() defer bs.mu.Unlock() bs.lastResetTime = time.Now() diff --git a/benchmark/worker/main.go b/benchmark/worker/main.go index dc4782dc..006b46d8 100644 --- a/benchmark/worker/main.go +++ b/benchmark/worker/main.go @@ -1,10 +1,11 @@ package main import ( + "flag" "io" "net" "runtime" - "sync" + "strconv" "golang.org/x/net/context" "google.golang.org/grpc" @@ -14,8 +15,8 @@ import ( ) var ( - ports = []string{":10000"} - // ports = []string{":10010"} + driverPort = flag.Int("driver_port", 10000, "port for communication with driver") + serverPort = flag.Int("server_port", 0, "port for operation as a server") ) type byteBufCodec struct { @@ -35,7 +36,9 @@ func (byteBufCodec) String() string { } type workerServer struct { - bs *benchmarkServer + bs *benchmarkServer + stop chan<- bool + serverPort int } func (s *workerServer) RunServer(stream testpb.WorkerService_RunServerServer) error { @@ -57,7 +60,7 @@ func (s *workerServer) RunServer(stream testpb.WorkerService_RunServerServer) er case *testpb.ServerArgs_Setup: grpclog.Printf("server setup received:") - bs, err := startBenchmarkServerWithSetup(t.Setup) + bs, err := startBenchmarkServerWithSetup(t.Setup, s.serverPort) if err != nil { return err } @@ -100,25 +103,25 @@ func (s *workerServer) QuitWorker(ctx context.Context, in *testpb.Void) (*testpb if s.bs != nil { s.bs.close() } + s.stop <- true return &testpb.Void{}, nil } func main() { - var wg sync.WaitGroup - wg.Add(len(ports)) - for i := 0; i < len(ports); i++ { - lis, err := net.Listen("tcp", ports[i]) - if err != nil { - grpclog.Fatalf("failed to listen: %v", err) - } - grpclog.Printf("worker %d listening at port %v", i, ports[i]) - - s := grpc.NewServer() - testpb.RegisterWorkerServiceServer(s, &workerServer{}) - go func() { - defer wg.Done() - s.Serve(lis) - }() + flag.Parse() + lis, err := net.Listen("tcp", ":"+strconv.Itoa(*driverPort)) + if err != nil { + grpclog.Fatalf("failed to listen: %v", err) } - wg.Wait() + grpclog.Printf("worker listening at port %v", *driverPort) + + s := grpc.NewServer() + stop := make(chan bool) + testpb.RegisterWorkerServiceServer(s, &workerServer{ + stop: stop, + serverPort: *serverPort, + }) + go s.Serve(lis) + <-stop + s.Stop() } From bdd0e9ff617a5ac8e099fc15174430fcfc8d25d3 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Thu, 21 Apr 2016 16:11:27 -0700 Subject: [PATCH 05/43] Fix error in benchmark_test --- benchmark/benchmark_test.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/benchmark/benchmark_test.go b/benchmark/benchmark_test.go index 70b3d5db..e8eaa9bb 100644 --- a/benchmark/benchmark_test.go +++ b/benchmark/benchmark_test.go @@ -2,6 +2,7 @@ package benchmark import ( "os" + "strconv" "sync" "testing" "time" @@ -15,9 +16,9 @@ import ( func runUnary(b *testing.B, maxConcurrentCalls int) { s := stats.AddStats(b, 38) b.StopTimer() - target, stopper := StartServer("localhost:0") + targetPort, stopper := StartServer("localhost:0") defer stopper() - conn := NewClientConn(target) + conn := NewClientConn(":" + strconv.Itoa(targetPort)) tc := testpb.NewBenchmarkServiceClient(conn) // Warm up connection. @@ -58,9 +59,9 @@ func runUnary(b *testing.B, maxConcurrentCalls int) { func runStream(b *testing.B, maxConcurrentCalls int) { s := stats.AddStats(b, 38) b.StopTimer() - target, stopper := StartServer("localhost:0") + targetPort, stopper := StartServer("localhost:0") defer stopper() - conn := NewClientConn(target) + conn := NewClientConn(":" + strconv.Itoa(targetPort)) tc := testpb.NewBenchmarkServiceClient(conn) // Warm up connection. From c2e8421003117be0bbe3992fd27506160b776659 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Fri, 22 Apr 2016 11:34:05 -0700 Subject: [PATCH 06/43] Minor changes Change log print Ignore cpu core list and return the actual number of cores used Add copyright Change certfile and keyfile path to relative RunClient() returns "not implemented" error Close existing server when new server setup received Main goroutine will wait for server to stop Move benchmarkServer out of workerServer --- benchmark/benchmark.go | 3 +- benchmark/worker/benchmark_server.go | 74 ++++++++++++++++++------ benchmark/worker/main.go | 86 +++++++++++++++++++++------- benchmark/worker/util.go | 75 ++++++++++++++++++++++++ 4 files changed, 195 insertions(+), 43 deletions(-) create mode 100644 benchmark/worker/util.go diff --git a/benchmark/benchmark.go b/benchmark/benchmark.go index 321f251a..514c0dd9 100644 --- a/benchmark/benchmark.go +++ b/benchmark/benchmark.go @@ -121,7 +121,6 @@ func (s *genericTestServer) StreamingCall(stream testpb.BenchmarkService_Streami m := make([]byte, s.reqSize) err := stream.(grpc.ServerStream).RecvMsg(m) if err == io.EOF { - // read done. return nil } if err != nil { @@ -133,7 +132,7 @@ func (s *genericTestServer) StreamingCall(stream testpb.BenchmarkService_Streami } } -// StartGenericServer starts a gRPC a benchmark service server, which supports custom codec. +// StartGenericServer starts a benchmark service server that supports custom codec. // It returns its listen port number and a function to stop the server. func StartGenericServer(addr string, reqSize, respSize int32, opts ...grpc.ServerOption) (int, func()) { lis, err := net.Listen("tcp", addr) diff --git a/benchmark/worker/benchmark_server.go b/benchmark/worker/benchmark_server.go index 4a7072fe..b6c2cd50 100644 --- a/benchmark/worker/benchmark_server.go +++ b/benchmark/worker/benchmark_server.go @@ -1,3 +1,36 @@ +/* + * + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + package main import ( @@ -15,13 +48,14 @@ import ( ) var ( - // TODO change filepath - certFile = "/usr/local/google/home/menghanl/go/src/google.golang.org/grpc/benchmark/server/testdata/server1.pem" - keyFile = "/usr/local/google/home/menghanl/go/src/google.golang.org/grpc/benchmark/server/testdata/server1.key" + // File path related to google.golang.org/grpc. + certFile = "benchmark/server/testdata/server1.pem" + keyFile = "benchmark/server/testdata/server1.key" ) type benchmarkServer struct { port int + cores int close func() mu sync.RWMutex lastResetTime time.Time @@ -30,46 +64,48 @@ type benchmarkServer struct { func startBenchmarkServerWithSetup(setup *testpb.ServerConfig, serverPort int) (*benchmarkServer, error) { var opts []grpc.ServerOption - grpclog.Printf(" - server type: %v", setup.ServerType) + // Some setup options are ignored: + // - server type: + // will always start sync server + // - async server threads + // - core list + grpclog.Printf(" * server type: %v (ignored, always starts sync server)", setup.ServerType) switch setup.ServerType { - // Ignore server type. case testpb.ServerType_SYNC_SERVER: case testpb.ServerType_ASYNC_SERVER: case testpb.ServerType_ASYNC_GENERIC_SERVER: default: return nil, grpc.Errorf(codes.InvalidArgument, "unknow server type: %v", setup.ServerType) } + grpclog.Printf(" * async server threads: %v (ignored)", setup.AsyncServerThreads) + grpclog.Printf(" * core list: %v (ignored)", setup.CoreList) grpclog.Printf(" - security params: %v", setup.SecurityParams) if setup.SecurityParams != nil { - creds, err := credentials.NewServerTLSFromFile(certFile, keyFile) + creds, err := credentials.NewServerTLSFromFile(Abs(certFile), Abs(keyFile)) if err != nil { grpclog.Fatalf("failed to generate credentials %v", err) } opts = append(opts, grpc.Creds(creds)) } - // Ignore async server threads. - grpclog.Printf(" - core limit: %v", setup.CoreLimit) + // Use one cpu core by default. + numOfCores := 1 if setup.CoreLimit > 0 { - runtime.GOMAXPROCS(int(setup.CoreLimit)) - } else { - runtime.GOMAXPROCS(1) - } - - grpclog.Printf(" - core list: %v", setup.CoreList) - if len(setup.CoreList) > 0 { - return nil, grpc.Errorf(codes.InvalidArgument, "specifying core list is not supported") + numOfCores = int(setup.CoreLimit) } + runtime.GOMAXPROCS(numOfCores) grpclog.Printf(" - port: %v", setup.Port) var port int + // Priority: setup.Port > serverPort > default (0). if setup.Port != 0 { port = int(setup.Port) } else if serverPort != 0 { port = serverPort } + grpclog.Printf(" - payload config: %v", setup.PayloadConfig) var p int var close func() @@ -86,24 +122,24 @@ func startBenchmarkServerWithSetup(setup *testpb.ServerConfig, serverPort int) ( return nil, grpc.Errorf(codes.InvalidArgument, "unknow payload config: %v", setup.PayloadConfig) } } else { - // Start protobuf server is payload config is nil + // Start protobuf server is payload config is nil. p, close = benchmark.StartServer(":"+strconv.Itoa(port), opts...) } grpclog.Printf("benchmark server listening at port %v", p) - bs := &benchmarkServer{port: p, close: close, lastResetTime: time.Now()} + bs := &benchmarkServer{port: p, cores: numOfCores, close: close, lastResetTime: time.Now()} return bs, nil } func (bs *benchmarkServer) getStats() *testpb.ServerStats { + // TODO wall time, sys time, user time. bs.mu.RLock() defer bs.mu.RUnlock() return &testpb.ServerStats{TimeElapsed: time.Since(bs.lastResetTime).Seconds(), TimeUser: 0, TimeSystem: 0} } func (bs *benchmarkServer) reset() { - // TODO wall time, sys time, user time bs.mu.Lock() defer bs.mu.Unlock() bs.lastResetTime = time.Now() diff --git a/benchmark/worker/main.go b/benchmark/worker/main.go index 006b46d8..fb6f3e82 100644 --- a/benchmark/worker/main.go +++ b/benchmark/worker/main.go @@ -1,3 +1,36 @@ +/* + * + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + package main import ( @@ -16,7 +49,7 @@ import ( var ( driverPort = flag.Int("driver_port", 10000, "port for communication with driver") - serverPort = flag.Int("server_port", 0, "port for operation as a server") + serverPort = flag.Int("server_port", 0, "default port for benchmark server") ) type byteBufCodec struct { @@ -36,19 +69,20 @@ func (byteBufCodec) String() string { } type workerServer struct { - bs *benchmarkServer stop chan<- bool serverPort int } func (s *workerServer) RunServer(stream testpb.WorkerService_RunServerServer) error { + var bs *benchmarkServer for { in, err := stream.Recv() if err == io.EOF { + // Close benchmark server when stream ends. grpclog.Printf("closing benchmark server") - if s.bs != nil { - s.bs.close() - s.bs = nil + if bs != nil { + bs.close() + bs = nil } return nil } @@ -56,30 +90,34 @@ func (s *workerServer) RunServer(stream testpb.WorkerService_RunServerServer) er return err } - switch t := in.Argtype.(type) { + switch argtype := in.Argtype.(type) { case *testpb.ServerArgs_Setup: grpclog.Printf("server setup received:") - - bs, err := startBenchmarkServerWithSetup(t.Setup, s.serverPort) + newbs, err := startBenchmarkServerWithSetup(argtype.Setup, s.serverPort) if err != nil { return err } - s.bs = bs + if bs != nil { + grpclog.Printf("server setup received when server already exists, closing the existing server") + bs.close() + } + bs = newbs + case *testpb.ServerArgs_Mark: grpclog.Printf("server mark received:") - grpclog.Printf(" - %v", t) - if s.bs == nil { + grpclog.Printf(" - %v", argtype) + if bs == nil { return grpc.Errorf(codes.InvalidArgument, "server does not exist when mark received") } - if t.Mark.Reset_ { - s.bs.reset() + if argtype.Mark.Reset_ { + bs.reset() } } out := &testpb.ServerStatus{ - Stats: s.bs.getStats(), - Port: int32(s.bs.port), - Cores: 1, + Stats: bs.getStats(), + Port: int32(bs.port), + Cores: int32(bs.cores), } if err := stream.Send(out); err != nil { return err @@ -90,7 +128,7 @@ func (s *workerServer) RunServer(stream testpb.WorkerService_RunServerServer) er } func (s *workerServer) RunClient(stream testpb.WorkerService_RunClientServer) error { - return nil + return grpc.Errorf(codes.Unimplemented, "RunClient not implemented") } func (s *workerServer) CoreCount(ctx context.Context, in *testpb.CoreRequest) (*testpb.CoreResponse, error) { @@ -100,10 +138,7 @@ func (s *workerServer) CoreCount(ctx context.Context, in *testpb.CoreRequest) (* func (s *workerServer) QuitWorker(ctx context.Context, in *testpb.Void) (*testpb.Void, error) { grpclog.Printf("quiting worker") - if s.bs != nil { - s.bs.close() - } - s.stop <- true + defer func() { s.stop <- true }() return &testpb.Void{}, nil } @@ -121,7 +156,14 @@ func main() { stop: stop, serverPort: *serverPort, }) - go s.Serve(lis) + + stopped := make(chan bool) + go func() { + s.Serve(lis) + stopped <- true + }() + <-stop s.Stop() + <-stopped } diff --git a/benchmark/worker/util.go b/benchmark/worker/util.go new file mode 100644 index 00000000..3a652b62 --- /dev/null +++ b/benchmark/worker/util.go @@ -0,0 +1,75 @@ +/* + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package main + +import ( + "log" + "os" + "path/filepath" +) + +// Abs returns the absolute path the given relative file or directory path, +// relative to the google.golang.org/grpc directory in the user's GOPATH. +// If rel is already absolute, it is returned unmodified. +func Abs(rel string) string { + if filepath.IsAbs(rel) { + return rel + } + v, err := goPackagePath("google.golang.org/grpc") + if err != nil { + log.Fatalf("Error finding google.golang.org/grpc/testdata directory: %v", err) + } + return filepath.Join(v, rel) +} + +func goPackagePath(pkg string) (path string, err error) { + gp := os.Getenv("GOPATH") + if gp == "" { + return path, os.ErrNotExist + } + for _, p := range filepath.SplitList(gp) { + dir := filepath.Join(p, "src", filepath.FromSlash(pkg)) + fi, err := os.Stat(dir) + if os.IsNotExist(err) { + continue + } + if err != nil { + return "", err + } + if !fi.IsDir() { + continue + } + return dir, nil + } + return path, os.ErrNotExist +} From 2aaff82a6ecfbb37a2593245df8c5144bb24d745 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Mon, 18 Apr 2016 14:49:05 -0700 Subject: [PATCH 07/43] Add RunClient() --- benchmark/benchmark.go | 9 +- benchmark/benchmark_test.go | 8 +- benchmark/client/main.go | 8 +- benchmark/server/testdata/ca.pem | 15 ++ benchmark/stats/histogram.go | 9 ++ benchmark/worker/benchmark_client.go | 214 +++++++++++++++++++++++++++ benchmark/worker/main.go | 47 +++++- 7 files changed, 298 insertions(+), 12 deletions(-) create mode 100644 benchmark/server/testdata/ca.pem create mode 100644 benchmark/worker/benchmark_client.go diff --git a/benchmark/benchmark.go b/benchmark/benchmark.go index 514c0dd9..1a2e0cc7 100644 --- a/benchmark/benchmark.go +++ b/benchmark/benchmark.go @@ -161,7 +161,7 @@ func DoUnaryCall(tc testpb.BenchmarkServiceClient, reqSize, respSize int) { } // DoStreamingRoundTrip performs a round trip for a single streaming rpc. -func DoStreamingRoundTrip(tc testpb.BenchmarkServiceClient, stream testpb.BenchmarkService_StreamingCallClient, reqSize, respSize int) { +func DoStreamingRoundTrip(stream testpb.BenchmarkService_StreamingCallClient, reqSize, respSize int) { pl := newPayload(testpb.PayloadType_COMPRESSABLE, reqSize) req := &testpb.SimpleRequest{ ResponseType: pl.Type, @@ -177,8 +177,11 @@ func DoStreamingRoundTrip(tc testpb.BenchmarkServiceClient, stream testpb.Benchm } // NewClientConn creates a gRPC client connection to addr. -func NewClientConn(addr string) *grpc.ClientConn { - conn, err := grpc.Dial(addr, grpc.WithInsecure()) +func NewClientConn(addr string, opts ...grpc.DialOption) *grpc.ClientConn { + if len(opts) <= 0 { + opts = append(opts, grpc.WithInsecure()) + } + conn, err := grpc.Dial(addr, opts...) if err != nil { grpclog.Fatalf("NewClientConn(%q) failed to create a ClientConn %v", addr, err) } diff --git a/benchmark/benchmark_test.go b/benchmark/benchmark_test.go index e8eaa9bb..8057c064 100644 --- a/benchmark/benchmark_test.go +++ b/benchmark/benchmark_test.go @@ -70,7 +70,7 @@ func runStream(b *testing.B, maxConcurrentCalls int) { b.Fatalf("%v.StreamingCall(_) = _, %v", tc, err) } for i := 0; i < 10; i++ { - streamCaller(tc, stream) + streamCaller(stream) } ch := make(chan int, maxConcurrentCalls*4) @@ -89,7 +89,7 @@ func runStream(b *testing.B, maxConcurrentCalls int) { } for range ch { start := time.Now() - streamCaller(tc, stream) + streamCaller(stream) elapse := time.Since(start) mu.Lock() s.Add(elapse) @@ -111,8 +111,8 @@ func unaryCaller(client testpb.BenchmarkServiceClient) { DoUnaryCall(client, 1, 1) } -func streamCaller(client testpb.BenchmarkServiceClient, stream testpb.BenchmarkService_StreamingCallClient) { - DoStreamingRoundTrip(client, stream, 1, 1) +func streamCaller(stream testpb.BenchmarkService_StreamingCallClient) { + DoStreamingRoundTrip(stream, 1, 1) } func BenchmarkClientStreamc1(b *testing.B) { diff --git a/benchmark/client/main.go b/benchmark/client/main.go index 27cc1a8b..5dfbe6a3 100644 --- a/benchmark/client/main.go +++ b/benchmark/client/main.go @@ -32,8 +32,8 @@ func unaryCaller(client testpb.BenchmarkServiceClient) { benchmark.DoUnaryCall(client, 1, 1) } -func streamCaller(client testpb.BenchmarkServiceClient, stream testpb.BenchmarkService_StreamingCallClient) { - benchmark.DoStreamingRoundTrip(client, stream, 1, 1) +func streamCaller(stream testpb.BenchmarkService_StreamingCallClient) { + benchmark.DoStreamingRoundTrip(stream, 1, 1) } func buildConnection() (s *stats.Stats, conn *grpc.ClientConn, tc testpb.BenchmarkServiceClient) { @@ -107,11 +107,11 @@ func closeLoopStream() { } // Do some warm up. for i := 0; i < 100; i++ { - streamCaller(tc, stream) + streamCaller(stream) } for range ch { start := time.Now() - streamCaller(tc, stream) + streamCaller(stream) elapse := time.Since(start) mu.Lock() s.Add(elapse) diff --git a/benchmark/server/testdata/ca.pem b/benchmark/server/testdata/ca.pem new file mode 100644 index 00000000..6c8511a7 --- /dev/null +++ b/benchmark/server/testdata/ca.pem @@ -0,0 +1,15 @@ +-----BEGIN CERTIFICATE----- +MIICSjCCAbOgAwIBAgIJAJHGGR4dGioHMA0GCSqGSIb3DQEBCwUAMFYxCzAJBgNV +BAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBX +aWRnaXRzIFB0eSBMdGQxDzANBgNVBAMTBnRlc3RjYTAeFw0xNDExMTEyMjMxMjla +Fw0yNDExMDgyMjMxMjlaMFYxCzAJBgNVBAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0 +YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQxDzANBgNVBAMT +BnRlc3RjYTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAwEDfBV5MYdlHVHJ7 ++L4nxrZy7mBfAVXpOc5vMYztssUI7mL2/iYujiIXM+weZYNTEpLdjyJdu7R5gGUu +g1jSVK/EPHfc74O7AyZU34PNIP4Sh33N+/A5YexrNgJlPY+E3GdVYi4ldWJjgkAd +Qah2PH5ACLrIIC6tRka9hcaBlIECAwEAAaMgMB4wDAYDVR0TBAUwAwEB/zAOBgNV +HQ8BAf8EBAMCAgQwDQYJKoZIhvcNAQELBQADgYEAHzC7jdYlzAVmddi/gdAeKPau +sPBG/C2HCWqHzpCUHcKuvMzDVkY/MP2o6JIW2DBbY64bO/FceExhjcykgaYtCH/m +oIU63+CFOTtR7otyQAWHqXa7q4SbCDlG7DyRFxqG0txPtGvy12lgldA2+RgcigQG +Dfcog5wrJytaQ6UA0wE= +-----END CERTIFICATE----- diff --git a/benchmark/stats/histogram.go b/benchmark/stats/histogram.go index cfb40c90..60763960 100644 --- a/benchmark/stats/histogram.go +++ b/benchmark/stats/histogram.go @@ -145,6 +145,15 @@ func NewHistogram(opts HistogramOptions) *Histogram { return &h } +func (h *Histogram) Clear() { + h.count = newCounter() + h.sum = newCounter() + h.tracker = newTracker() + for _, v := range h.buckets { + v.count = newCounter() + } +} + // Opts returns a copy of the options used to create the Histogram. func (h *Histogram) Opts() HistogramOptions { return h.opts diff --git a/benchmark/worker/benchmark_client.go b/benchmark/worker/benchmark_client.go new file mode 100644 index 00000000..94d53c7b --- /dev/null +++ b/benchmark/worker/benchmark_client.go @@ -0,0 +1,214 @@ +package main + +import ( + "math" + "runtime" + "sync" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/benchmark" + testpb "google.golang.org/grpc/benchmark/grpc_testing" + "google.golang.org/grpc/benchmark/stats" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" +) + +var ( + caFile = "/usr/local/google/home/menghanl/go/src/google.golang.org/grpc/benchmark/server/testdata/ca.pem" +) + +type benchmarkClient struct { + conns []*grpc.ClientConn + histogramGrowFactor float64 + histogramMaxPossible float64 + stop chan bool + mu sync.RWMutex + lastResetTime time.Time + histogram *stats.Histogram +} + +func startBenchmarkClientWithSetup(setup *testpb.ClientConfig) (*benchmarkClient, error) { + var opts []grpc.DialOption + + grpclog.Printf(" - client type: %v", setup.ClientType) + switch setup.ClientType { + // Ignore client type + case testpb.ClientType_SYNC_CLIENT: + case testpb.ClientType_ASYNC_CLIENT: + default: + return nil, grpc.Errorf(codes.InvalidArgument, "unknow client type: %v", setup.ClientType) + } + + grpclog.Printf(" - security params: %v", setup.SecurityParams) + if setup.SecurityParams != nil { + creds, err := credentials.NewClientTLSFromFile(caFile, setup.SecurityParams.ServerHostOverride) + if err != nil { + grpclog.Fatalf("failed to create TLS credentials %v", err) + } + opts = append(opts, grpc.WithTransportCredentials(creds)) + } else { + opts = append(opts, grpc.WithInsecure()) + } + + // Ignore async client threads. + + grpclog.Printf(" - core limit: %v", setup.CoreLimit) + if setup.CoreLimit > 0 { + runtime.GOMAXPROCS(int(setup.CoreLimit)) + } else { + // runtime.GOMAXPROCS(runtime.NumCPU()) + runtime.GOMAXPROCS(1) + } + + // TODO payload config + grpclog.Printf(" - payload config: %v", setup.PayloadConfig) + var payloadReqSize, payloadRespSize int + if setup.PayloadConfig != nil { + // TODO payload config + grpclog.Printf("payload config: %v", setup.PayloadConfig) + switch c := setup.PayloadConfig.Payload.(type) { + case *testpb.PayloadConfig_BytebufParams: + opts = append(opts, grpc.WithCodec(byteBufCodec{})) + payloadReqSize = int(c.BytebufParams.ReqSize) + payloadRespSize = int(c.BytebufParams.RespSize) + case *testpb.PayloadConfig_SimpleParams: + payloadReqSize = int(c.SimpleParams.ReqSize) + payloadRespSize = int(c.SimpleParams.RespSize) + case *testpb.PayloadConfig_ComplexParams: + default: + return nil, grpc.Errorf(codes.InvalidArgument, "unknow payload config: %v", setup.PayloadConfig) + } + } + + // TODO core list + grpclog.Printf(" - core list: %v", setup.CoreList) + + grpclog.Printf(" - histogram params: %v", setup.HistogramParams) + grpclog.Printf(" - server targets: %v", setup.ServerTargets) + grpclog.Printf(" - rpcs per chann: %v", setup.OutstandingRpcsPerChannel) + grpclog.Printf(" - channel number: %v", setup.ClientChannels) + + rpcCount, connCount := int(setup.OutstandingRpcsPerChannel), int(setup.ClientChannels) + + bc := &benchmarkClient{ + conns: make([]*grpc.ClientConn, connCount), + histogramGrowFactor: setup.HistogramParams.Resolution, + histogramMaxPossible: setup.HistogramParams.MaxPossible, + } + + for connIndex := 0; connIndex < connCount; connIndex++ { + bc.conns[connIndex] = benchmark.NewClientConn(setup.ServerTargets[connIndex%len(setup.ServerTargets)], opts...) + } + + bc.histogram = stats.NewHistogram(stats.HistogramOptions{ + NumBuckets: int(math.Log(bc.histogramMaxPossible)/math.Log(1+bc.histogramGrowFactor)) + 1, + GrowthFactor: bc.histogramGrowFactor, + MinValue: 0, + }) + + grpclog.Printf(" - rpc type: %v", setup.RpcType) + var rpc func(testpb.BenchmarkServiceClient) + switch setup.RpcType { + case testpb.RpcType_UNARY: + rpc = func(client testpb.BenchmarkServiceClient) { + benchmark.DoUnaryCall(client, payloadReqSize, payloadRespSize) + } + case testpb.RpcType_STREAMING: + default: + return nil, grpc.Errorf(codes.InvalidArgument, "unknown rpc type: %v", setup.RpcType) + } + + grpclog.Printf(" - load params: %v", setup.LoadParams) + bc.stop = make(chan bool) + switch lp := setup.LoadParams.Load.(type) { + case *testpb.LoadParams_ClosedLoop: + grpclog.Printf(" - %v", lp.ClosedLoop) + doCloseLoop(bc.histogram, bc.conns, rpcCount, rpc, bc.stop) + case *testpb.LoadParams_Poisson: + grpclog.Printf(" - %v", lp.Poisson) + case *testpb.LoadParams_Uniform: + grpclog.Printf(" - %v", lp.Uniform) + case *testpb.LoadParams_Determ: + grpclog.Printf(" - %v", lp.Determ) + case *testpb.LoadParams_Pareto: + grpclog.Printf(" - %v", lp.Pareto) + default: + return nil, grpc.Errorf(codes.InvalidArgument, "unknown load params: %v", setup.LoadParams) + } + + bc.mu.Lock() + defer bc.mu.Unlock() + bc.lastResetTime = time.Now() + return bc, nil +} + +func doCloseLoop(h *stats.Histogram, conns []*grpc.ClientConn, rpcCount int, rpc func(testpb.BenchmarkServiceClient), stop <-chan bool) { + clients := make([]testpb.BenchmarkServiceClient, len(conns)) + for ic, conn := range conns { + clients[ic] = testpb.NewBenchmarkServiceClient(conn) + for j := 0; j < 100/len(conns); j++ { + rpc(clients[ic]) + } + } + var mu sync.Mutex + for ic, _ := range conns { + for j := 0; j < rpcCount; j++ { + go func() { + for { + select { + case <-stop: + grpclog.Printf("stopped") + return + default: + start := time.Now() + rpc(clients[ic]) + elapse := time.Since(start) + go func() { + mu.Lock() + h.Add(int64(elapse / time.Nanosecond)) + mu.Unlock() + }() + } + } + }() + } + } + grpclog.Printf("close loop done, count: %v", rpcCount) +} + +func (bc *benchmarkClient) getStats() *testpb.ClientStats { + bc.mu.RLock() + // time.Sleep(1 * time.Second) + defer bc.mu.RUnlock() + histogramValue := bc.histogram.Value() + b := make([]uint32, len(histogramValue.Buckets)) + tempCount := make(map[int64]int) + for i, v := range histogramValue.Buckets { + b[i] = uint32(v.Count) + tempCount[v.Count] += 1 + } + grpclog.Printf("+++++\n%v count: %v\n+++++", tempCount, histogramValue.Count) + return &testpb.ClientStats{ + Latencies: &testpb.HistogramData{ + Bucket: b, + MinSeen: float64(histogramValue.Min), + MaxSeen: float64(histogramValue.Max), + Sum: float64(histogramValue.Sum), + // TODO change to squares + SumOfSquares: float64(histogramValue.Sum), + Count: float64(histogramValue.Count), + }, + TimeElapsed: time.Since(bc.lastResetTime).Seconds(), + TimeUser: 0, + TimeSystem: 0, + } +} + +func (bc *benchmarkClient) reset() { + bc.mu.Lock() + defer bc.mu.Unlock() + bc.lastResetTime = time.Now() + bc.histogram.Clear() +} diff --git a/benchmark/worker/main.go b/benchmark/worker/main.go index fb6f3e82..04668f16 100644 --- a/benchmark/worker/main.go +++ b/benchmark/worker/main.go @@ -71,6 +71,7 @@ func (byteBufCodec) String() string { type workerServer struct { stop chan<- bool serverPort int + bc *benchmarkClient } func (s *workerServer) RunServer(stream testpb.WorkerService_RunServerServer) error { @@ -128,7 +129,48 @@ func (s *workerServer) RunServer(stream testpb.WorkerService_RunServerServer) er } func (s *workerServer) RunClient(stream testpb.WorkerService_RunClientServer) error { - return grpc.Errorf(codes.Unimplemented, "RunClient not implemented") + + for { + in, err := stream.Recv() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + + var out *testpb.ClientStatus + switch t := in.Argtype.(type) { + case *testpb.ClientArgs_Setup: + grpclog.Printf("client setup received:") + + bc, err := startBenchmarkClientWithSetup(t.Setup) + if err != nil { + return err + } + s.bc = bc + out = &testpb.ClientStatus{ + Stats: s.bc.getStats(), + } + case *testpb.ClientArgs_Mark: + grpclog.Printf("client mark received:") + grpclog.Printf(" - %v", t) + if s.bc == nil { + return grpc.Errorf(codes.InvalidArgument, "client does not exist when mark received") + } + out = &testpb.ClientStatus{ + Stats: s.bc.getStats(), + } + if t.Mark.Reset_ { + s.bc.reset() + } + } + if err := stream.Send(out); err != nil { + return err + } + } + + return nil } func (s *workerServer) CoreCount(ctx context.Context, in *testpb.CoreRequest) (*testpb.CoreResponse, error) { @@ -139,6 +181,9 @@ func (s *workerServer) CoreCount(ctx context.Context, in *testpb.CoreRequest) (* func (s *workerServer) QuitWorker(ctx context.Context, in *testpb.Void) (*testpb.Void, error) { grpclog.Printf("quiting worker") defer func() { s.stop <- true }() + if s.bc != nil { + close(s.bc.stop) + } return &testpb.Void{}, nil } From 257710d39c10a553d53773ec536582df07af8709 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Mon, 18 Apr 2016 18:30:38 -0700 Subject: [PATCH 08/43] Add close benchmark rpc calls --- benchmark/worker/benchmark_client.go | 87 ++++++++++++++++++++-------- 1 file changed, 64 insertions(+), 23 deletions(-) diff --git a/benchmark/worker/benchmark_client.go b/benchmark/worker/benchmark_client.go index 94d53c7b..9ace2bce 100644 --- a/benchmark/worker/benchmark_client.go +++ b/benchmark/worker/benchmark_client.go @@ -6,6 +6,7 @@ import ( "sync" "time" + "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/benchmark" testpb "google.golang.org/grpc/benchmark/grpc_testing" @@ -108,24 +109,10 @@ func startBenchmarkClientWithSetup(setup *testpb.ClientConfig) (*benchmarkClient MinValue: 0, }) - grpclog.Printf(" - rpc type: %v", setup.RpcType) - var rpc func(testpb.BenchmarkServiceClient) - switch setup.RpcType { - case testpb.RpcType_UNARY: - rpc = func(client testpb.BenchmarkServiceClient) { - benchmark.DoUnaryCall(client, payloadReqSize, payloadRespSize) - } - case testpb.RpcType_STREAMING: - default: - return nil, grpc.Errorf(codes.InvalidArgument, "unknown rpc type: %v", setup.RpcType) - } - grpclog.Printf(" - load params: %v", setup.LoadParams) - bc.stop = make(chan bool) switch lp := setup.LoadParams.Load.(type) { case *testpb.LoadParams_ClosedLoop: grpclog.Printf(" - %v", lp.ClosedLoop) - doCloseLoop(bc.histogram, bc.conns, rpcCount, rpc, bc.stop) case *testpb.LoadParams_Poisson: grpclog.Printf(" - %v", lp.Poisson) case *testpb.LoadParams_Uniform: @@ -138,18 +125,29 @@ func startBenchmarkClientWithSetup(setup *testpb.ClientConfig) (*benchmarkClient return nil, grpc.Errorf(codes.InvalidArgument, "unknown load params: %v", setup.LoadParams) } + grpclog.Printf(" - rpc type: %v", setup.RpcType) + bc.stop = make(chan bool) + switch setup.RpcType { + case testpb.RpcType_UNARY: + doCloseLoopUnaryBenchmark(bc.histogram, bc.conns, rpcCount, payloadReqSize, payloadRespSize, bc.stop) + case testpb.RpcType_STREAMING: + doCloseLoopStreamingBenchmark(bc.histogram, bc.conns, rpcCount, payloadReqSize, payloadRespSize, bc.stop) + default: + return nil, grpc.Errorf(codes.InvalidArgument, "unknown rpc type: %v", setup.RpcType) + } + bc.mu.Lock() defer bc.mu.Unlock() bc.lastResetTime = time.Now() return bc, nil } -func doCloseLoop(h *stats.Histogram, conns []*grpc.ClientConn, rpcCount int, rpc func(testpb.BenchmarkServiceClient), stop <-chan bool) { +func doCloseLoopUnaryBenchmark(h *stats.Histogram, conns []*grpc.ClientConn, rpcCount int, reqSize int, respSize int, stop <-chan bool) { clients := make([]testpb.BenchmarkServiceClient, len(conns)) for ic, conn := range conns { clients[ic] = testpb.NewBenchmarkServiceClient(conn) for j := 0; j < 100/len(conns); j++ { - rpc(clients[ic]) + benchmark.DoUnaryCall(clients[ic], reqSize, respSize) } } var mu sync.Mutex @@ -157,19 +155,62 @@ func doCloseLoop(h *stats.Histogram, conns []*grpc.ClientConn, rpcCount int, rpc for j := 0; j < rpcCount; j++ { go func() { for { + done := make(chan bool) + go func() { + start := time.Now() + benchmark.DoUnaryCall(clients[ic], reqSize, respSize) + elapse := time.Since(start) + mu.Lock() + h.Add(int64(elapse / time.Nanosecond)) + mu.Unlock() + done <- true + }() select { case <-stop: grpclog.Printf("stopped") return - default: + case <-done: + } + } + }() + } + } + grpclog.Printf("close loop done, count: %v", rpcCount) +} + +func doCloseLoopStreamingBenchmark(h *stats.Histogram, conns []*grpc.ClientConn, rpcCount int, reqSize int, respSize int, stop <-chan bool) { + streams := make([]testpb.BenchmarkService_StreamingCallClient, len(conns)) + for ic, conn := range conns { + c := testpb.NewBenchmarkServiceClient(conn) + s, err := c.StreamingCall(context.Background()) + if err != nil { + grpclog.Fatalf("%v.StreamingCall(_) = _, %v", c, err) + } + streams[ic] = s + for j := 0; j < 100/len(conns); j++ { + benchmark.DoStreamingRoundTrip(streams[ic], reqSize, respSize) + } + } + var mu sync.Mutex + for ic, _ := range conns { + for j := 0; j < rpcCount; j++ { + go func() { + for { + done := make(chan bool) + go func() { start := time.Now() - rpc(clients[ic]) + benchmark.DoStreamingRoundTrip(streams[ic], reqSize, respSize) elapse := time.Since(start) - go func() { - mu.Lock() - h.Add(int64(elapse / time.Nanosecond)) - mu.Unlock() - }() + mu.Lock() + h.Add(int64(elapse / time.Nanosecond)) + mu.Unlock() + done <- true + }() + select { + case <-stop: + grpclog.Printf("stopped") + return + case <-done: } } }() From b5e39adfb295299493f201df4c24dbc3708ce758 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Thu, 21 Apr 2016 12:19:46 -0700 Subject: [PATCH 09/43] Add generic close loop --- benchmark/benchmark.go | 11 +++++++++++ benchmark/worker/benchmark_client.go | 19 +++++++++++++++---- 2 files changed, 26 insertions(+), 4 deletions(-) diff --git a/benchmark/benchmark.go b/benchmark/benchmark.go index 1a2e0cc7..9b7b49f4 100644 --- a/benchmark/benchmark.go +++ b/benchmark/benchmark.go @@ -176,6 +176,17 @@ func DoStreamingRoundTrip(stream testpb.BenchmarkService_StreamingCallClient, re } } +// DoGenericStreamingRoundTrip performs a round trip for a single streaming rpc, using custom codec. +func DoGenericStreamingRoundTrip(stream testpb.BenchmarkService_StreamingCallClient, reqSize, respSize int) { + if err := stream.(grpc.ClientStream).SendMsg(make([]byte, reqSize)); err != nil { + grpclog.Fatalf("StreamingCall(_).Send: %v", err) + } + m := make([]byte, respSize) + if err := stream.(grpc.ClientStream).RecvMsg(m); err != nil { + grpclog.Fatalf("StreamingCall(_).Recv: %v", err) + } +} + // NewClientConn creates a gRPC client connection to addr. func NewClientConn(addr string, opts ...grpc.DialOption) *grpc.ClientConn { if len(opts) <= 0 { diff --git a/benchmark/worker/benchmark_client.go b/benchmark/worker/benchmark_client.go index 9ace2bce..5e4070dc 100644 --- a/benchmark/worker/benchmark_client.go +++ b/benchmark/worker/benchmark_client.go @@ -66,6 +66,7 @@ func startBenchmarkClientWithSetup(setup *testpb.ClientConfig) (*benchmarkClient // TODO payload config grpclog.Printf(" - payload config: %v", setup.PayloadConfig) var payloadReqSize, payloadRespSize int + var payloadType string if setup.PayloadConfig != nil { // TODO payload config grpclog.Printf("payload config: %v", setup.PayloadConfig) @@ -74,10 +75,13 @@ func startBenchmarkClientWithSetup(setup *testpb.ClientConfig) (*benchmarkClient opts = append(opts, grpc.WithCodec(byteBufCodec{})) payloadReqSize = int(c.BytebufParams.ReqSize) payloadRespSize = int(c.BytebufParams.RespSize) + payloadType = "bytebuf" case *testpb.PayloadConfig_SimpleParams: payloadReqSize = int(c.SimpleParams.ReqSize) payloadRespSize = int(c.SimpleParams.RespSize) + payloadType = "protobuf" case *testpb.PayloadConfig_ComplexParams: + return nil, grpc.Errorf(codes.InvalidArgument, "unsupported payload config: %v", setup.PayloadConfig) default: return nil, grpc.Errorf(codes.InvalidArgument, "unknow payload config: %v", setup.PayloadConfig) } @@ -131,7 +135,7 @@ func startBenchmarkClientWithSetup(setup *testpb.ClientConfig) (*benchmarkClient case testpb.RpcType_UNARY: doCloseLoopUnaryBenchmark(bc.histogram, bc.conns, rpcCount, payloadReqSize, payloadRespSize, bc.stop) case testpb.RpcType_STREAMING: - doCloseLoopStreamingBenchmark(bc.histogram, bc.conns, rpcCount, payloadReqSize, payloadRespSize, bc.stop) + doCloseLoopStreamingBenchmark(bc.histogram, bc.conns, rpcCount, payloadReqSize, payloadRespSize, payloadType, bc.stop) default: return nil, grpc.Errorf(codes.InvalidArgument, "unknown rpc type: %v", setup.RpcType) } @@ -143,6 +147,7 @@ func startBenchmarkClientWithSetup(setup *testpb.ClientConfig) (*benchmarkClient } func doCloseLoopUnaryBenchmark(h *stats.Histogram, conns []*grpc.ClientConn, rpcCount int, reqSize int, respSize int, stop <-chan bool) { + clients := make([]testpb.BenchmarkServiceClient, len(conns)) for ic, conn := range conns { clients[ic] = testpb.NewBenchmarkServiceClient(conn) @@ -178,7 +183,13 @@ func doCloseLoopUnaryBenchmark(h *stats.Histogram, conns []*grpc.ClientConn, rpc grpclog.Printf("close loop done, count: %v", rpcCount) } -func doCloseLoopStreamingBenchmark(h *stats.Histogram, conns []*grpc.ClientConn, rpcCount int, reqSize int, respSize int, stop <-chan bool) { +func doCloseLoopStreamingBenchmark(h *stats.Histogram, conns []*grpc.ClientConn, rpcCount int, reqSize int, respSize int, payloadType string, stop <-chan bool) { + var doRPC func(testpb.BenchmarkService_StreamingCallClient, int, int) + if payloadType == "bytebuf" { + doRPC = benchmark.DoGenericStreamingRoundTrip + } else { + doRPC = benchmark.DoStreamingRoundTrip + } streams := make([]testpb.BenchmarkService_StreamingCallClient, len(conns)) for ic, conn := range conns { c := testpb.NewBenchmarkServiceClient(conn) @@ -188,7 +199,7 @@ func doCloseLoopStreamingBenchmark(h *stats.Histogram, conns []*grpc.ClientConn, } streams[ic] = s for j := 0; j < 100/len(conns); j++ { - benchmark.DoStreamingRoundTrip(streams[ic], reqSize, respSize) + doRPC(streams[ic], reqSize, respSize) } } var mu sync.Mutex @@ -199,7 +210,7 @@ func doCloseLoopStreamingBenchmark(h *stats.Histogram, conns []*grpc.ClientConn, done := make(chan bool) go func() { start := time.Now() - benchmark.DoStreamingRoundTrip(streams[ic], reqSize, respSize) + doRPC(streams[ic], reqSize, respSize) elapse := time.Since(start) mu.Lock() h.Add(int64(elapse / time.Nanosecond)) From fc76af457fd2aca9533c67cc990e733b2d0233c8 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Tue, 19 Apr 2016 15:50:32 -0700 Subject: [PATCH 10/43] Add distribution variable to be used by open loop --- benchmark/worker/benchmark_client.go | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/benchmark/worker/benchmark_client.go b/benchmark/worker/benchmark_client.go index 5e4070dc..44c12541 100644 --- a/benchmark/worker/benchmark_client.go +++ b/benchmark/worker/benchmark_client.go @@ -114,17 +114,24 @@ func startBenchmarkClientWithSetup(setup *testpb.ClientConfig) (*benchmarkClient }) grpclog.Printf(" - load params: %v", setup.LoadParams) + // TODO distribution + var dist *int switch lp := setup.LoadParams.Load.(type) { case *testpb.LoadParams_ClosedLoop: grpclog.Printf(" - %v", lp.ClosedLoop) case *testpb.LoadParams_Poisson: grpclog.Printf(" - %v", lp.Poisson) + return nil, grpc.Errorf(codes.InvalidArgument, "unsupported load params: %v", setup.LoadParams) + // TODO poisson case *testpb.LoadParams_Uniform: grpclog.Printf(" - %v", lp.Uniform) + return nil, grpc.Errorf(codes.InvalidArgument, "unsupported load params: %v", setup.LoadParams) case *testpb.LoadParams_Determ: grpclog.Printf(" - %v", lp.Determ) + return nil, grpc.Errorf(codes.InvalidArgument, "unsupported load params: %v", setup.LoadParams) case *testpb.LoadParams_Pareto: grpclog.Printf(" - %v", lp.Pareto) + return nil, grpc.Errorf(codes.InvalidArgument, "unsupported load params: %v", setup.LoadParams) default: return nil, grpc.Errorf(codes.InvalidArgument, "unknown load params: %v", setup.LoadParams) } @@ -133,9 +140,15 @@ func startBenchmarkClientWithSetup(setup *testpb.ClientConfig) (*benchmarkClient bc.stop = make(chan bool) switch setup.RpcType { case testpb.RpcType_UNARY: - doCloseLoopUnaryBenchmark(bc.histogram, bc.conns, rpcCount, payloadReqSize, payloadRespSize, bc.stop) + if dist == nil { + doCloseLoopUnaryBenchmark(bc.histogram, bc.conns, rpcCount, payloadReqSize, payloadRespSize, bc.stop) + } + // TODO else do open loop case testpb.RpcType_STREAMING: - doCloseLoopStreamingBenchmark(bc.histogram, bc.conns, rpcCount, payloadReqSize, payloadRespSize, payloadType, bc.stop) + if dist == nil { + doCloseLoopStreamingBenchmark(bc.histogram, bc.conns, rpcCount, payloadReqSize, payloadRespSize, payloadType, bc.stop) + } + // TODO else do open loop default: return nil, grpc.Errorf(codes.InvalidArgument, "unknown rpc type: %v", setup.RpcType) } From 9a595041db930cdcf325a916d93cac2c3d205565 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Mon, 25 Apr 2016 14:51:05 -0700 Subject: [PATCH 11/43] Add benchmarkClient shutdown() --- benchmark/benchmark.go | 24 +++++--- benchmark/benchmark_test.go | 9 ++- benchmark/worker/benchmark_client.go | 86 ++++++++++++++++++++-------- benchmark/worker/main.go | 7 +-- 4 files changed, 88 insertions(+), 38 deletions(-) diff --git a/benchmark/benchmark.go b/benchmark/benchmark.go index 9b7b49f4..19612542 100644 --- a/benchmark/benchmark.go +++ b/benchmark/benchmark.go @@ -148,7 +148,7 @@ func StartGenericServer(addr string, reqSize, respSize int32, opts ...grpc.Serve } // DoUnaryCall performs an unary RPC with given stub and request and response sizes. -func DoUnaryCall(tc testpb.BenchmarkServiceClient, reqSize, respSize int) { +func DoUnaryCall(tc testpb.BenchmarkServiceClient, reqSize, respSize int) error { pl := newPayload(testpb.PayloadType_COMPRESSABLE, reqSize) req := &testpb.SimpleRequest{ ResponseType: pl.Type, @@ -156,12 +156,14 @@ func DoUnaryCall(tc testpb.BenchmarkServiceClient, reqSize, respSize int) { Payload: pl, } if _, err := tc.UnaryCall(context.Background(), req); err != nil { - grpclog.Fatal("/BenchmarkService/UnaryCall RPC failed: ", err) + // grpclog.Print("/BenchmarkService/UnaryCall RPC failed: ", err) + return grpc.Errorf(grpc.Code(err), "/BenchmarkService/UnaryCall RPC failed: %v", grpc.ErrorDesc(err)) } + return nil } // DoStreamingRoundTrip performs a round trip for a single streaming rpc. -func DoStreamingRoundTrip(stream testpb.BenchmarkService_StreamingCallClient, reqSize, respSize int) { +func DoStreamingRoundTrip(stream testpb.BenchmarkService_StreamingCallClient, reqSize, respSize int) error { pl := newPayload(testpb.PayloadType_COMPRESSABLE, reqSize) req := &testpb.SimpleRequest{ ResponseType: pl.Type, @@ -169,22 +171,28 @@ func DoStreamingRoundTrip(stream testpb.BenchmarkService_StreamingCallClient, re Payload: pl, } if err := stream.Send(req); err != nil { - grpclog.Fatalf("StreamingCall(_).Send: %v", err) + // grpclog.Printf("StreamingCall(_).Send: %v", err) + return grpc.Errorf(grpc.Code(err), "StreamingCall(_).Send: %v", grpc.ErrorDesc(err)) } if _, err := stream.Recv(); err != nil { - grpclog.Fatalf("StreamingCall(_).Recv: %v", err) + // grpclog.Printf("StreamingCall(_).Recv: %v", err) + return grpc.Errorf(grpc.Code(err), "StreamingCall(_).Recv: %v", grpc.ErrorDesc(err)) } + return nil } // DoGenericStreamingRoundTrip performs a round trip for a single streaming rpc, using custom codec. -func DoGenericStreamingRoundTrip(stream testpb.BenchmarkService_StreamingCallClient, reqSize, respSize int) { +func DoGenericStreamingRoundTrip(stream testpb.BenchmarkService_StreamingCallClient, reqSize, respSize int) error { if err := stream.(grpc.ClientStream).SendMsg(make([]byte, reqSize)); err != nil { - grpclog.Fatalf("StreamingCall(_).Send: %v", err) + // grpclog.Printf("StreamingCall(_).(ClientStream).SendMsg: %v", err) + return grpc.Errorf(grpc.Code(err), "StreamingCall(_).(ClientStream).SendMsg: %v", grpc.ErrorDesc(err)) } m := make([]byte, respSize) if err := stream.(grpc.ClientStream).RecvMsg(m); err != nil { - grpclog.Fatalf("StreamingCall(_).Recv: %v", err) + // grpclog.Printf("StreamingCall(_).(ClientStream).RecvMsg: %v", err) + return grpc.Errorf(grpc.Code(err), "StreamingCall(_).(ClientStream).RecvMsg: %v", grpc.ErrorDesc(err)) } + return nil } // NewClientConn creates a gRPC client connection to addr. diff --git a/benchmark/benchmark_test.go b/benchmark/benchmark_test.go index 8057c064..d7dfa25e 100644 --- a/benchmark/benchmark_test.go +++ b/benchmark/benchmark_test.go @@ -11,6 +11,7 @@ import ( "google.golang.org/grpc" testpb "google.golang.org/grpc/benchmark/grpc_testing" "google.golang.org/grpc/benchmark/stats" + "google.golang.org/grpc/grpclog" ) func runUnary(b *testing.B, maxConcurrentCalls int) { @@ -108,11 +109,15 @@ func runStream(b *testing.B, maxConcurrentCalls int) { conn.Close() } func unaryCaller(client testpb.BenchmarkServiceClient) { - DoUnaryCall(client, 1, 1) + if err := DoUnaryCall(client, 1, 1); err != nil { + grpclog.Fatalf("DoUnaryCall failed: %v", err) + } } func streamCaller(stream testpb.BenchmarkService_StreamingCallClient) { - DoStreamingRoundTrip(stream, 1, 1) + if err := DoStreamingRoundTrip(stream, 1, 1); err != nil { + grpclog.Fatalf("DoStreamingRoundTrip failed: %v", err) + } } func BenchmarkClientStreamc1(b *testing.B) { diff --git a/benchmark/worker/benchmark_client.go b/benchmark/worker/benchmark_client.go index 44c12541..3760eab4 100644 --- a/benchmark/worker/benchmark_client.go +++ b/benchmark/worker/benchmark_client.go @@ -97,22 +97,6 @@ func startBenchmarkClientWithSetup(setup *testpb.ClientConfig) (*benchmarkClient rpcCount, connCount := int(setup.OutstandingRpcsPerChannel), int(setup.ClientChannels) - bc := &benchmarkClient{ - conns: make([]*grpc.ClientConn, connCount), - histogramGrowFactor: setup.HistogramParams.Resolution, - histogramMaxPossible: setup.HistogramParams.MaxPossible, - } - - for connIndex := 0; connIndex < connCount; connIndex++ { - bc.conns[connIndex] = benchmark.NewClientConn(setup.ServerTargets[connIndex%len(setup.ServerTargets)], opts...) - } - - bc.histogram = stats.NewHistogram(stats.HistogramOptions{ - NumBuckets: int(math.Log(bc.histogramMaxPossible)/math.Log(1+bc.histogramGrowFactor)) + 1, - GrowthFactor: bc.histogramGrowFactor, - MinValue: 0, - }) - grpclog.Printf(" - load params: %v", setup.LoadParams) // TODO distribution var dist *int @@ -137,20 +121,44 @@ func startBenchmarkClientWithSetup(setup *testpb.ClientConfig) (*benchmarkClient } grpclog.Printf(" - rpc type: %v", setup.RpcType) - bc.stop = make(chan bool) + var rpcType string switch setup.RpcType { case testpb.RpcType_UNARY: + rpcType = "unary" + case testpb.RpcType_STREAMING: + rpcType = "streaming" + default: + return nil, grpc.Errorf(codes.InvalidArgument, "unknown rpc type: %v", setup.RpcType) + } + + bc := &benchmarkClient{ + conns: make([]*grpc.ClientConn, connCount), + histogramGrowFactor: setup.HistogramParams.Resolution, + histogramMaxPossible: setup.HistogramParams.MaxPossible, + } + + for connIndex := 0; connIndex < connCount; connIndex++ { + bc.conns[connIndex] = benchmark.NewClientConn(setup.ServerTargets[connIndex%len(setup.ServerTargets)], opts...) + } + + bc.histogram = stats.NewHistogram(stats.HistogramOptions{ + NumBuckets: int(math.Log(bc.histogramMaxPossible)/math.Log(1+bc.histogramGrowFactor)) + 1, + GrowthFactor: bc.histogramGrowFactor, + MinValue: 0, + }) + + bc.stop = make(chan bool) + switch rpcType { + case "unary": if dist == nil { doCloseLoopUnaryBenchmark(bc.histogram, bc.conns, rpcCount, payloadReqSize, payloadRespSize, bc.stop) } // TODO else do open loop - case testpb.RpcType_STREAMING: + case "streaming": if dist == nil { doCloseLoopStreamingBenchmark(bc.histogram, bc.conns, rpcCount, payloadReqSize, payloadRespSize, payloadType, bc.stop) } // TODO else do open loop - default: - return nil, grpc.Errorf(codes.InvalidArgument, "unknown rpc type: %v", setup.RpcType) } bc.mu.Lock() @@ -168,15 +176,21 @@ func doCloseLoopUnaryBenchmark(h *stats.Histogram, conns []*grpc.ClientConn, rpc benchmark.DoUnaryCall(clients[ic], reqSize, respSize) } } + var wg sync.WaitGroup + wg.Add(len(conns) * rpcCount) var mu sync.Mutex for ic, _ := range conns { for j := 0; j < rpcCount; j++ { go func() { + defer wg.Done() for { done := make(chan bool) go func() { start := time.Now() - benchmark.DoUnaryCall(clients[ic], reqSize, respSize) + if err := benchmark.DoUnaryCall(clients[ic], reqSize, respSize); err != nil { + done <- false + return + } elapse := time.Since(start) mu.Lock() h.Add(int64(elapse / time.Nanosecond)) @@ -194,10 +208,17 @@ func doCloseLoopUnaryBenchmark(h *stats.Histogram, conns []*grpc.ClientConn, rpc } } grpclog.Printf("close loop done, count: %v", rpcCount) + go func() { + wg.Wait() + for _, c := range conns { + c.Close() + } + grpclog.Printf("conns closed") + }() } func doCloseLoopStreamingBenchmark(h *stats.Histogram, conns []*grpc.ClientConn, rpcCount int, reqSize int, respSize int, payloadType string, stop <-chan bool) { - var doRPC func(testpb.BenchmarkService_StreamingCallClient, int, int) + var doRPC func(testpb.BenchmarkService_StreamingCallClient, int, int) error if payloadType == "bytebuf" { doRPC = benchmark.DoGenericStreamingRoundTrip } else { @@ -208,22 +229,28 @@ func doCloseLoopStreamingBenchmark(h *stats.Histogram, conns []*grpc.ClientConn, c := testpb.NewBenchmarkServiceClient(conn) s, err := c.StreamingCall(context.Background()) if err != nil { - grpclog.Fatalf("%v.StreamingCall(_) = _, %v", c, err) + grpclog.Printf("%v.StreamingCall(_) = _, %v", c, err) } streams[ic] = s for j := 0; j < 100/len(conns); j++ { doRPC(streams[ic], reqSize, respSize) } } + var wg sync.WaitGroup + wg.Add(len(conns) * rpcCount) var mu sync.Mutex for ic, _ := range conns { for j := 0; j < rpcCount; j++ { go func() { + defer wg.Done() for { done := make(chan bool) go func() { start := time.Now() - doRPC(streams[ic], reqSize, respSize) + if err := doRPC(streams[ic], reqSize, respSize); err != nil { + done <- false + return + } elapse := time.Since(start) mu.Lock() h.Add(int64(elapse / time.Nanosecond)) @@ -241,6 +268,13 @@ func doCloseLoopStreamingBenchmark(h *stats.Histogram, conns []*grpc.ClientConn, } } grpclog.Printf("close loop done, count: %v", rpcCount) + go func() { + wg.Wait() + for _, c := range conns { + c.Close() + } + grpclog.Printf("conns closed") + }() } func (bc *benchmarkClient) getStats() *testpb.ClientStats { @@ -277,3 +311,7 @@ func (bc *benchmarkClient) reset() { bc.lastResetTime = time.Now() bc.histogram.Clear() } + +func (bc *benchmarkClient) shutdown() { + close(bc.stop) +} diff --git a/benchmark/worker/main.go b/benchmark/worker/main.go index 04668f16..76606373 100644 --- a/benchmark/worker/main.go +++ b/benchmark/worker/main.go @@ -71,7 +71,8 @@ func (byteBufCodec) String() string { type workerServer struct { stop chan<- bool serverPort int - bc *benchmarkClient + // TODO move bc out of workerServer + bc *benchmarkClient } func (s *workerServer) RunServer(stream testpb.WorkerService_RunServerServer) error { @@ -133,6 +134,7 @@ func (s *workerServer) RunClient(stream testpb.WorkerService_RunClientServer) er for { in, err := stream.Recv() if err == io.EOF { + s.bc.shutdown() return nil } if err != nil { @@ -181,9 +183,6 @@ func (s *workerServer) CoreCount(ctx context.Context, in *testpb.CoreRequest) (* func (s *workerServer) QuitWorker(ctx context.Context, in *testpb.Void) (*testpb.Void, error) { grpclog.Printf("quiting worker") defer func() { s.stop <- true }() - if s.bc != nil { - close(s.bc.stop) - } return &testpb.Void{}, nil } From ad0677d6a91efec5711c80b27e618904a3a19fc9 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Tue, 26 Apr 2016 13:46:47 -0700 Subject: [PATCH 12/43] Close loop: Create multiple streams on one connection --- benchmark/worker/benchmark_client.go | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/benchmark/worker/benchmark_client.go b/benchmark/worker/benchmark_client.go index 3760eab4..f1ea2c9b 100644 --- a/benchmark/worker/benchmark_client.go +++ b/benchmark/worker/benchmark_client.go @@ -224,30 +224,32 @@ func doCloseLoopStreamingBenchmark(h *stats.Histogram, conns []*grpc.ClientConn, } else { doRPC = benchmark.DoStreamingRoundTrip } - streams := make([]testpb.BenchmarkService_StreamingCallClient, len(conns)) + streams := make([]testpb.BenchmarkService_StreamingCallClient, len(conns)*rpcCount) for ic, conn := range conns { - c := testpb.NewBenchmarkServiceClient(conn) - s, err := c.StreamingCall(context.Background()) - if err != nil { - grpclog.Printf("%v.StreamingCall(_) = _, %v", c, err) - } - streams[ic] = s - for j := 0; j < 100/len(conns); j++ { - doRPC(streams[ic], reqSize, respSize) + for is := 0; is < rpcCount; is++ { + c := testpb.NewBenchmarkServiceClient(conn) + s, err := c.StreamingCall(context.Background()) + if err != nil { + grpclog.Printf("%v.StreamingCall(_) = _, %v", c, err) + } + streams[ic*rpcCount+is] = s + for j := 0; j < 100/len(conns); j++ { + doRPC(streams[ic], reqSize, respSize) + } } } var wg sync.WaitGroup wg.Add(len(conns) * rpcCount) var mu sync.Mutex for ic, _ := range conns { - for j := 0; j < rpcCount; j++ { - go func() { + for is := 0; is < rpcCount; is++ { + go func(ic, is int) { defer wg.Done() for { done := make(chan bool) go func() { start := time.Now() - if err := doRPC(streams[ic], reqSize, respSize); err != nil { + if err := doRPC(streams[ic*rpcCount+is], reqSize, respSize); err != nil { done <- false return } @@ -264,7 +266,7 @@ func doCloseLoopStreamingBenchmark(h *stats.Histogram, conns []*grpc.ClientConn, case <-done: } } - }() + }(ic, is) } } grpclog.Printf("close loop done, count: %v", rpcCount) From b54a56774d952ade6676d70f8dae8c17654c4446 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Tue, 26 Apr 2016 14:08:06 -0700 Subject: [PATCH 13/43] Update histogram creating options --- benchmark/worker/benchmark_client.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/benchmark/worker/benchmark_client.go b/benchmark/worker/benchmark_client.go index f1ea2c9b..4726d818 100644 --- a/benchmark/worker/benchmark_client.go +++ b/benchmark/worker/benchmark_client.go @@ -142,9 +142,10 @@ func startBenchmarkClientWithSetup(setup *testpb.ClientConfig) (*benchmarkClient } bc.histogram = stats.NewHistogram(stats.HistogramOptions{ - NumBuckets: int(math.Log(bc.histogramMaxPossible)/math.Log(1+bc.histogramGrowFactor)) + 1, - GrowthFactor: bc.histogramGrowFactor, - MinValue: 0, + NumBuckets: int(math.Log(bc.histogramMaxPossible)/math.Log(1+bc.histogramGrowFactor)) + 1, + GrowthFactor: bc.histogramGrowFactor, + BaseBucketSize: (1 + bc.histogramGrowFactor), + MinValue: 0, }) bc.stop = make(chan bool) From 61623241f44d5c3fcddd2f050bfabee52ce2332a Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Tue, 26 Apr 2016 16:25:14 -0700 Subject: [PATCH 14/43] Small fixes in worker main.go --- benchmark/worker/main.go | 45 +++++++++++++++++++++++++--------------- 1 file changed, 28 insertions(+), 17 deletions(-) diff --git a/benchmark/worker/main.go b/benchmark/worker/main.go index 76606373..7be3bea7 100644 --- a/benchmark/worker/main.go +++ b/benchmark/worker/main.go @@ -71,8 +71,6 @@ func (byteBufCodec) String() string { type workerServer struct { stop chan<- bool serverPort int - // TODO move bc out of workerServer - bc *benchmarkClient } func (s *workerServer) RunServer(stream testpb.WorkerService_RunServerServer) error { @@ -92,6 +90,7 @@ func (s *workerServer) RunServer(stream testpb.WorkerService_RunServerServer) er return err } + var out *testpb.ServerStatus switch argtype := in.Argtype.(type) { case *testpb.ServerArgs_Setup: grpclog.Printf("server setup received:") @@ -104,6 +103,11 @@ func (s *workerServer) RunServer(stream testpb.WorkerService_RunServerServer) er bs.close() } bs = newbs + out = &testpb.ServerStatus{ + Stats: bs.getStats(), + Port: int32(bs.port), + Cores: int32(bs.cores), + } case *testpb.ServerArgs_Mark: grpclog.Printf("server mark received:") @@ -111,16 +115,16 @@ func (s *workerServer) RunServer(stream testpb.WorkerService_RunServerServer) er if bs == nil { return grpc.Errorf(codes.InvalidArgument, "server does not exist when mark received") } + out = &testpb.ServerStatus{ + Stats: bs.getStats(), + Port: int32(bs.port), + Cores: int32(bs.cores), + } if argtype.Mark.Reset_ { bs.reset() } } - out := &testpb.ServerStatus{ - Stats: bs.getStats(), - Port: int32(bs.port), - Cores: int32(bs.cores), - } if err := stream.Send(out); err != nil { return err } @@ -130,11 +134,13 @@ func (s *workerServer) RunServer(stream testpb.WorkerService_RunServerServer) er } func (s *workerServer) RunClient(stream testpb.WorkerService_RunClientServer) error { - + var bc *benchmarkClient for { in, err := stream.Recv() if err == io.EOF { - s.bc.shutdown() + if bc != nil { + bc.shutdown() + } return nil } if err != nil { @@ -145,28 +151,33 @@ func (s *workerServer) RunClient(stream testpb.WorkerService_RunClientServer) er switch t := in.Argtype.(type) { case *testpb.ClientArgs_Setup: grpclog.Printf("client setup received:") - - bc, err := startBenchmarkClientWithSetup(t.Setup) + newbc, err := startBenchmarkClientWithSetup(t.Setup) if err != nil { return err } - s.bc = bc - out = &testpb.ClientStatus{ - Stats: s.bc.getStats(), + if bc != nil { + grpclog.Printf("client setup received when client already exists, shuting down the existing client") + bc.shutdown() } + bc = newbc + out = &testpb.ClientStatus{ + Stats: bc.getStats(), + } + case *testpb.ClientArgs_Mark: grpclog.Printf("client mark received:") grpclog.Printf(" - %v", t) - if s.bc == nil { + if bc == nil { return grpc.Errorf(codes.InvalidArgument, "client does not exist when mark received") } out = &testpb.ClientStatus{ - Stats: s.bc.getStats(), + Stats: bc.getStats(), } if t.Mark.Reset_ { - s.bc.reset() + bc.reset() } } + if err := stream.Send(out); err != nil { return err } From bc7b50cb4191c3a4567427224ba3034b41eb174d Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Tue, 26 Apr 2016 16:25:36 -0700 Subject: [PATCH 15/43] Minor fixes Add license Remove comments and debug logging Change client status timeElapsed Change error code to unimplemented Change warm up rpc size Move conn.Close go routine Defer server and client shutdown Add histogram sumOfSquare clear Change worker stop Change benchmark go routine argument Use same lock inside and out of benchmark --- benchmark/benchmark.go | 5 - benchmark/stats/histogram.go | 1 + benchmark/worker/benchmark_client.go | 233 ++++++++++++++------------- benchmark/worker/benchmark_server.go | 5 +- benchmark/worker/main.go | 32 ++-- 5 files changed, 143 insertions(+), 133 deletions(-) diff --git a/benchmark/benchmark.go b/benchmark/benchmark.go index 19612542..67aa6215 100644 --- a/benchmark/benchmark.go +++ b/benchmark/benchmark.go @@ -156,7 +156,6 @@ func DoUnaryCall(tc testpb.BenchmarkServiceClient, reqSize, respSize int) error Payload: pl, } if _, err := tc.UnaryCall(context.Background(), req); err != nil { - // grpclog.Print("/BenchmarkService/UnaryCall RPC failed: ", err) return grpc.Errorf(grpc.Code(err), "/BenchmarkService/UnaryCall RPC failed: %v", grpc.ErrorDesc(err)) } return nil @@ -171,11 +170,9 @@ func DoStreamingRoundTrip(stream testpb.BenchmarkService_StreamingCallClient, re Payload: pl, } if err := stream.Send(req); err != nil { - // grpclog.Printf("StreamingCall(_).Send: %v", err) return grpc.Errorf(grpc.Code(err), "StreamingCall(_).Send: %v", grpc.ErrorDesc(err)) } if _, err := stream.Recv(); err != nil { - // grpclog.Printf("StreamingCall(_).Recv: %v", err) return grpc.Errorf(grpc.Code(err), "StreamingCall(_).Recv: %v", grpc.ErrorDesc(err)) } return nil @@ -184,12 +181,10 @@ func DoStreamingRoundTrip(stream testpb.BenchmarkService_StreamingCallClient, re // DoGenericStreamingRoundTrip performs a round trip for a single streaming rpc, using custom codec. func DoGenericStreamingRoundTrip(stream testpb.BenchmarkService_StreamingCallClient, reqSize, respSize int) error { if err := stream.(grpc.ClientStream).SendMsg(make([]byte, reqSize)); err != nil { - // grpclog.Printf("StreamingCall(_).(ClientStream).SendMsg: %v", err) return grpc.Errorf(grpc.Code(err), "StreamingCall(_).(ClientStream).SendMsg: %v", grpc.ErrorDesc(err)) } m := make([]byte, respSize) if err := stream.(grpc.ClientStream).RecvMsg(m); err != nil { - // grpclog.Printf("StreamingCall(_).(ClientStream).RecvMsg: %v", err) return grpc.Errorf(grpc.Code(err), "StreamingCall(_).(ClientStream).RecvMsg: %v", grpc.ErrorDesc(err)) } return nil diff --git a/benchmark/stats/histogram.go b/benchmark/stats/histogram.go index 60763960..13bde953 100644 --- a/benchmark/stats/histogram.go +++ b/benchmark/stats/histogram.go @@ -148,6 +148,7 @@ func NewHistogram(opts HistogramOptions) *Histogram { func (h *Histogram) Clear() { h.count = newCounter() h.sum = newCounter() + h.sumOfSquares = newCounter() h.tracker = newTracker() for _, v := range h.buckets { v.count = newCounter() diff --git a/benchmark/worker/benchmark_client.go b/benchmark/worker/benchmark_client.go index 4726d818..8c74e1d9 100644 --- a/benchmark/worker/benchmark_client.go +++ b/benchmark/worker/benchmark_client.go @@ -1,3 +1,36 @@ +/* + * + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + package main import ( @@ -17,59 +50,57 @@ import ( ) var ( - caFile = "/usr/local/google/home/menghanl/go/src/google.golang.org/grpc/benchmark/server/testdata/ca.pem" + caFile = "benchmark/server/testdata/ca.pem" ) type benchmarkClient struct { - conns []*grpc.ClientConn - histogramGrowFactor float64 - histogramMaxPossible float64 - stop chan bool - mu sync.RWMutex - lastResetTime time.Time - histogram *stats.Histogram + stop chan bool + mu sync.RWMutex + lastResetTime time.Time + histogram *stats.Histogram } func startBenchmarkClientWithSetup(setup *testpb.ClientConfig) (*benchmarkClient, error) { var opts []grpc.DialOption - grpclog.Printf(" - client type: %v", setup.ClientType) + // Some setup options are ignored: + // - client type: + // will always create sync client + // - async client threads. + // - core list + grpclog.Printf(" * client type: %v (ignored, always creates sync client)", setup.ClientType) switch setup.ClientType { - // Ignore client type case testpb.ClientType_SYNC_CLIENT: case testpb.ClientType_ASYNC_CLIENT: default: return nil, grpc.Errorf(codes.InvalidArgument, "unknow client type: %v", setup.ClientType) } + grpclog.Printf(" * async client threads: %v (ignored)", setup.AsyncClientThreads) + grpclog.Printf(" * core list: %v (ignored)", setup.CoreList) grpclog.Printf(" - security params: %v", setup.SecurityParams) if setup.SecurityParams != nil { - creds, err := credentials.NewClientTLSFromFile(caFile, setup.SecurityParams.ServerHostOverride) + creds, err := credentials.NewClientTLSFromFile(Abs(caFile), setup.SecurityParams.ServerHostOverride) if err != nil { - grpclog.Fatalf("failed to create TLS credentials %v", err) + return nil, grpc.Errorf(codes.InvalidArgument, "failed to create TLS credentials %v", err) } opts = append(opts, grpc.WithTransportCredentials(creds)) } else { opts = append(opts, grpc.WithInsecure()) } - // Ignore async client threads. - grpclog.Printf(" - core limit: %v", setup.CoreLimit) + // Use one cpu core by default + numOfCores := 1 if setup.CoreLimit > 0 { - runtime.GOMAXPROCS(int(setup.CoreLimit)) - } else { - // runtime.GOMAXPROCS(runtime.NumCPU()) - runtime.GOMAXPROCS(1) + numOfCores = int(setup.CoreLimit) } + runtime.GOMAXPROCS(numOfCores) - // TODO payload config grpclog.Printf(" - payload config: %v", setup.PayloadConfig) var payloadReqSize, payloadRespSize int var payloadType string if setup.PayloadConfig != nil { - // TODO payload config - grpclog.Printf("payload config: %v", setup.PayloadConfig) switch c := setup.PayloadConfig.Payload.(type) { case *testpb.PayloadConfig_BytebufParams: opts = append(opts, grpc.WithCodec(byteBufCodec{})) @@ -81,41 +112,31 @@ func startBenchmarkClientWithSetup(setup *testpb.ClientConfig) (*benchmarkClient payloadRespSize = int(c.SimpleParams.RespSize) payloadType = "protobuf" case *testpb.PayloadConfig_ComplexParams: - return nil, grpc.Errorf(codes.InvalidArgument, "unsupported payload config: %v", setup.PayloadConfig) + return nil, grpc.Errorf(codes.Unimplemented, "unsupported payload config: %v", setup.PayloadConfig) default: return nil, grpc.Errorf(codes.InvalidArgument, "unknow payload config: %v", setup.PayloadConfig) } } - // TODO core list - grpclog.Printf(" - core list: %v", setup.CoreList) - - grpclog.Printf(" - histogram params: %v", setup.HistogramParams) - grpclog.Printf(" - server targets: %v", setup.ServerTargets) grpclog.Printf(" - rpcs per chann: %v", setup.OutstandingRpcsPerChannel) grpclog.Printf(" - channel number: %v", setup.ClientChannels) - rpcCount, connCount := int(setup.OutstandingRpcsPerChannel), int(setup.ClientChannels) + rpcCountPerConn, connCount := int(setup.OutstandingRpcsPerChannel), int(setup.ClientChannels) grpclog.Printf(" - load params: %v", setup.LoadParams) - // TODO distribution var dist *int switch lp := setup.LoadParams.Load.(type) { case *testpb.LoadParams_ClosedLoop: - grpclog.Printf(" - %v", lp.ClosedLoop) case *testpb.LoadParams_Poisson: grpclog.Printf(" - %v", lp.Poisson) - return nil, grpc.Errorf(codes.InvalidArgument, "unsupported load params: %v", setup.LoadParams) + return nil, grpc.Errorf(codes.Unimplemented, "unsupported load params: %v", setup.LoadParams) // TODO poisson case *testpb.LoadParams_Uniform: - grpclog.Printf(" - %v", lp.Uniform) - return nil, grpc.Errorf(codes.InvalidArgument, "unsupported load params: %v", setup.LoadParams) + return nil, grpc.Errorf(codes.Unimplemented, "unsupported load params: %v", setup.LoadParams) case *testpb.LoadParams_Determ: - grpclog.Printf(" - %v", lp.Determ) - return nil, grpc.Errorf(codes.InvalidArgument, "unsupported load params: %v", setup.LoadParams) + return nil, grpc.Errorf(codes.Unimplemented, "unsupported load params: %v", setup.LoadParams) case *testpb.LoadParams_Pareto: - grpclog.Printf(" - %v", lp.Pareto) - return nil, grpc.Errorf(codes.InvalidArgument, "unsupported load params: %v", setup.LoadParams) + return nil, grpc.Errorf(codes.Unimplemented, "unsupported load params: %v", setup.LoadParams) default: return nil, grpc.Errorf(codes.InvalidArgument, "unknown load params: %v", setup.LoadParams) } @@ -131,64 +152,67 @@ func startBenchmarkClientWithSetup(setup *testpb.ClientConfig) (*benchmarkClient return nil, grpc.Errorf(codes.InvalidArgument, "unknown rpc type: %v", setup.RpcType) } - bc := &benchmarkClient{ - conns: make([]*grpc.ClientConn, connCount), - histogramGrowFactor: setup.HistogramParams.Resolution, - histogramMaxPossible: setup.HistogramParams.MaxPossible, - } + grpclog.Printf(" - histogram params: %v", setup.HistogramParams) + grpclog.Printf(" - server targets: %v", setup.ServerTargets) + + conns := make([]*grpc.ClientConn, connCount) for connIndex := 0; connIndex < connCount; connIndex++ { - bc.conns[connIndex] = benchmark.NewClientConn(setup.ServerTargets[connIndex%len(setup.ServerTargets)], opts...) + conns[connIndex] = benchmark.NewClientConn(setup.ServerTargets[connIndex%len(setup.ServerTargets)], opts...) } - bc.histogram = stats.NewHistogram(stats.HistogramOptions{ - NumBuckets: int(math.Log(bc.histogramMaxPossible)/math.Log(1+bc.histogramGrowFactor)) + 1, - GrowthFactor: bc.histogramGrowFactor, - BaseBucketSize: (1 + bc.histogramGrowFactor), + hist := stats.NewHistogram(stats.HistogramOptions{ + NumBuckets: int(math.Log(setup.HistogramParams.MaxPossible)/math.Log(1+setup.HistogramParams.Resolution)) + 1, + GrowthFactor: setup.HistogramParams.Resolution, + BaseBucketSize: (1 + setup.HistogramParams.Resolution), MinValue: 0, }) + stop := make(chan bool) + var mu sync.RWMutex - bc.stop = make(chan bool) switch rpcType { case "unary": if dist == nil { - doCloseLoopUnaryBenchmark(bc.histogram, bc.conns, rpcCount, payloadReqSize, payloadRespSize, bc.stop) + doCloseLoopUnaryBenchmark(hist, mu, conns, rpcCountPerConn, payloadReqSize, payloadRespSize, stop) } // TODO else do open loop case "streaming": if dist == nil { - doCloseLoopStreamingBenchmark(bc.histogram, bc.conns, rpcCount, payloadReqSize, payloadRespSize, payloadType, bc.stop) + doCloseLoopStreamingBenchmark(hist, mu, conns, rpcCountPerConn, payloadReqSize, payloadRespSize, payloadType, stop) } // TODO else do open loop } - bc.mu.Lock() - defer bc.mu.Unlock() - bc.lastResetTime = time.Now() - return bc, nil + return &benchmarkClient{ + stop: stop, + mu: mu, + lastResetTime: time.Now(), + histogram: hist, + }, nil } -func doCloseLoopUnaryBenchmark(h *stats.Histogram, conns []*grpc.ClientConn, rpcCount int, reqSize int, respSize int, stop <-chan bool) { - +func doCloseLoopUnaryBenchmark(h *stats.Histogram, mu sync.RWMutex, conns []*grpc.ClientConn, rpcCountPerConn int, reqSize int, respSize int, stop <-chan bool) { clients := make([]testpb.BenchmarkServiceClient, len(conns)) for ic, conn := range conns { clients[ic] = testpb.NewBenchmarkServiceClient(conn) - for j := 0; j < 100/len(conns); j++ { - benchmark.DoUnaryCall(clients[ic], reqSize, respSize) + // Do some warm up. + for j := 0; j < 10; j++ { + benchmark.DoUnaryCall(clients[ic], 1, 1) } } - var wg sync.WaitGroup - wg.Add(len(conns) * rpcCount) - var mu sync.Mutex - for ic, _ := range conns { - for j := 0; j < rpcCount; j++ { - go func() { + for ic, conn := range conns { + // For each connection, create rpcCountPerConn goroutines to do rpc. + // Close this connection after all go routines finish. + var wg sync.WaitGroup + wg.Add(rpcCountPerConn) + for j := 0; j < rpcCountPerConn; j++ { + go func(client testpb.BenchmarkServiceClient) { defer wg.Done() for { done := make(chan bool) go func() { start := time.Now() - if err := benchmark.DoUnaryCall(clients[ic], reqSize, respSize); err != nil { + if err := benchmark.DoUnaryCall(client, reqSize, respSize); err != nil { done <- false return } @@ -200,57 +224,54 @@ func doCloseLoopUnaryBenchmark(h *stats.Histogram, conns []*grpc.ClientConn, rpc }() select { case <-stop: - grpclog.Printf("stopped") return case <-done: } } - }() + }(clients[ic]) } + go func(conn *grpc.ClientConn) { + wg.Wait() + conn.Close() + }(conn) } - grpclog.Printf("close loop done, count: %v", rpcCount) - go func() { - wg.Wait() - for _, c := range conns { - c.Close() - } - grpclog.Printf("conns closed") - }() } -func doCloseLoopStreamingBenchmark(h *stats.Histogram, conns []*grpc.ClientConn, rpcCount int, reqSize int, respSize int, payloadType string, stop <-chan bool) { +func doCloseLoopStreamingBenchmark(h *stats.Histogram, mu sync.RWMutex, conns []*grpc.ClientConn, rpcCountPerConn int, reqSize int, respSize int, payloadType string, stop <-chan bool) { var doRPC func(testpb.BenchmarkService_StreamingCallClient, int, int) error if payloadType == "bytebuf" { doRPC = benchmark.DoGenericStreamingRoundTrip } else { doRPC = benchmark.DoStreamingRoundTrip } - streams := make([]testpb.BenchmarkService_StreamingCallClient, len(conns)*rpcCount) + streams := make([]testpb.BenchmarkService_StreamingCallClient, len(conns)*rpcCountPerConn) for ic, conn := range conns { - for is := 0; is < rpcCount; is++ { + for j := 0; j < rpcCountPerConn; j++ { c := testpb.NewBenchmarkServiceClient(conn) s, err := c.StreamingCall(context.Background()) if err != nil { - grpclog.Printf("%v.StreamingCall(_) = _, %v", c, err) + grpclog.Fatalf("%v.StreamingCall(_) = _, %v", c, err) } - streams[ic*rpcCount+is] = s - for j := 0; j < 100/len(conns); j++ { - doRPC(streams[ic], reqSize, respSize) + streams[ic*rpcCountPerConn+j] = s + // Do some warm up. + for j := 0; j < 10; j++ { + doRPC(streams[ic], 1, 1) } } } - var wg sync.WaitGroup - wg.Add(len(conns) * rpcCount) - var mu sync.Mutex - for ic, _ := range conns { - for is := 0; is < rpcCount; is++ { - go func(ic, is int) { + for ic, conn := range conns { + // For each connection, create rpcCountPerConn goroutines to do rpc. + // Close this connection after all go routines finish. + var wg sync.WaitGroup + wg.Add(rpcCountPerConn) + for j := 0; j < rpcCountPerConn; j++ { + go func(stream testpb.BenchmarkService_StreamingCallClient) { defer wg.Done() for { done := make(chan bool) go func() { start := time.Now() - if err := doRPC(streams[ic*rpcCount+is], reqSize, respSize); err != nil { + if err := doRPC(stream, reqSize, respSize); err != nil { done <- false return } @@ -262,47 +283,39 @@ func doCloseLoopStreamingBenchmark(h *stats.Histogram, conns []*grpc.ClientConn, }() select { case <-stop: - grpclog.Printf("stopped") return case <-done: } } - }(ic, is) + }(streams[ic*rpcCountPerConn+j]) } + go func(conn *grpc.ClientConn) { + wg.Wait() + conn.Close() + }(conn) } - grpclog.Printf("close loop done, count: %v", rpcCount) - go func() { - wg.Wait() - for _, c := range conns { - c.Close() - } - grpclog.Printf("conns closed") - }() } func (bc *benchmarkClient) getStats() *testpb.ClientStats { bc.mu.RLock() - // time.Sleep(1 * time.Second) defer bc.mu.RUnlock() + timeElapsed := time.Since(bc.lastResetTime).Seconds() + histogramValue := bc.histogram.Value() b := make([]uint32, len(histogramValue.Buckets)) - tempCount := make(map[int64]int) for i, v := range histogramValue.Buckets { b[i] = uint32(v.Count) - tempCount[v.Count] += 1 } - grpclog.Printf("+++++\n%v count: %v\n+++++", tempCount, histogramValue.Count) return &testpb.ClientStats{ Latencies: &testpb.HistogramData{ - Bucket: b, - MinSeen: float64(histogramValue.Min), - MaxSeen: float64(histogramValue.Max), - Sum: float64(histogramValue.Sum), - // TODO change to squares - SumOfSquares: float64(histogramValue.Sum), + Bucket: b, + MinSeen: float64(histogramValue.Min), + MaxSeen: float64(histogramValue.Max), + Sum: float64(histogramValue.Sum), + SumOfSquares: float64(histogramValue.SumOfSquares), Count: float64(histogramValue.Count), }, - TimeElapsed: time.Since(bc.lastResetTime).Seconds(), + TimeElapsed: timeElapsed, TimeUser: 0, TimeSystem: 0, } diff --git a/benchmark/worker/benchmark_server.go b/benchmark/worker/benchmark_server.go index b6c2cd50..b8f90840 100644 --- a/benchmark/worker/benchmark_server.go +++ b/benchmark/worker/benchmark_server.go @@ -117,7 +117,7 @@ func startBenchmarkServerWithSetup(setup *testpb.ServerConfig, serverPort int) ( case *testpb.PayloadConfig_SimpleParams: p, close = benchmark.StartServer(":"+strconv.Itoa(port), opts...) case *testpb.PayloadConfig_ComplexParams: - return nil, grpc.Errorf(codes.InvalidArgument, "unsupported payload config: %v", setup.PayloadConfig) + return nil, grpc.Errorf(codes.Unimplemented, "unsupported payload config: %v", setup.PayloadConfig) default: return nil, grpc.Errorf(codes.InvalidArgument, "unknow payload config: %v", setup.PayloadConfig) } @@ -128,8 +128,7 @@ func startBenchmarkServerWithSetup(setup *testpb.ServerConfig, serverPort int) ( grpclog.Printf("benchmark server listening at port %v", p) - bs := &benchmarkServer{port: p, cores: numOfCores, close: close, lastResetTime: time.Now()} - return bs, nil + return &benchmarkServer{port: p, cores: numOfCores, close: close, lastResetTime: time.Now()}, nil } func (bs *benchmarkServer) getStats() *testpb.ServerStats { diff --git a/benchmark/worker/main.go b/benchmark/worker/main.go index 7be3bea7..52990929 100644 --- a/benchmark/worker/main.go +++ b/benchmark/worker/main.go @@ -75,15 +75,16 @@ type workerServer struct { func (s *workerServer) RunServer(stream testpb.WorkerService_RunServerServer) error { var bs *benchmarkServer + defer func() { + // Close benchmark server when stream ends. + grpclog.Printf("closing benchmark server") + if bs != nil { + bs.close() + } + }() for { in, err := stream.Recv() if err == io.EOF { - // Close benchmark server when stream ends. - grpclog.Printf("closing benchmark server") - if bs != nil { - bs.close() - bs = nil - } return nil } if err != nil { @@ -135,12 +136,16 @@ func (s *workerServer) RunServer(stream testpb.WorkerService_RunServerServer) er func (s *workerServer) RunClient(stream testpb.WorkerService_RunClientServer) error { var bc *benchmarkClient + defer func() { + // Shut down benchmark client when stream ends. + grpclog.Printf("shuting down benchmark client") + if bc != nil { + bc.shutdown() + } + }() for { in, err := stream.Recv() if err == io.EOF { - if bc != nil { - bc.shutdown() - } return nil } if err != nil { @@ -212,13 +217,10 @@ func main() { serverPort: *serverPort, }) - stopped := make(chan bool) go func() { - s.Serve(lis) - stopped <- true + <-stop + s.Stop() }() - <-stop - s.Stop() - <-stopped + s.Serve(lis) } From 63410e345381482c8f4cec67f2f7f71ed6d7da34 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Wed, 27 Apr 2016 15:53:45 -0700 Subject: [PATCH 16/43] Change doBenchmark to client struct member function --- benchmark/worker/benchmark_client.go | 49 +++++++++++++--------------- 1 file changed, 23 insertions(+), 26 deletions(-) diff --git a/benchmark/worker/benchmark_client.go b/benchmark/worker/benchmark_client.go index 8c74e1d9..703ddd5d 100644 --- a/benchmark/worker/benchmark_client.go +++ b/benchmark/worker/benchmark_client.go @@ -161,37 +161,34 @@ func startBenchmarkClientWithSetup(setup *testpb.ClientConfig) (*benchmarkClient conns[connIndex] = benchmark.NewClientConn(setup.ServerTargets[connIndex%len(setup.ServerTargets)], opts...) } - hist := stats.NewHistogram(stats.HistogramOptions{ - NumBuckets: int(math.Log(setup.HistogramParams.MaxPossible)/math.Log(1+setup.HistogramParams.Resolution)) + 1, - GrowthFactor: setup.HistogramParams.Resolution, - BaseBucketSize: (1 + setup.HistogramParams.Resolution), - MinValue: 0, - }) - stop := make(chan bool) - var mu sync.RWMutex + bc := benchmarkClient{ + histogram: stats.NewHistogram(stats.HistogramOptions{ + NumBuckets: int(math.Log(setup.HistogramParams.MaxPossible)/math.Log(1+setup.HistogramParams.Resolution)) + 1, + GrowthFactor: setup.HistogramParams.Resolution, + BaseBucketSize: (1 + setup.HistogramParams.Resolution), + MinValue: 0, + }), + stop: make(chan bool), + lastResetTime: time.Now(), + } switch rpcType { case "unary": if dist == nil { - doCloseLoopUnaryBenchmark(hist, mu, conns, rpcCountPerConn, payloadReqSize, payloadRespSize, stop) + bc.doCloseLoopUnaryBenchmark(conns, rpcCountPerConn, payloadReqSize, payloadRespSize) } // TODO else do open loop case "streaming": if dist == nil { - doCloseLoopStreamingBenchmark(hist, mu, conns, rpcCountPerConn, payloadReqSize, payloadRespSize, payloadType, stop) + bc.doCloseLoopStreamingBenchmark(conns, rpcCountPerConn, payloadReqSize, payloadRespSize, payloadType) } // TODO else do open loop } - return &benchmarkClient{ - stop: stop, - mu: mu, - lastResetTime: time.Now(), - histogram: hist, - }, nil + return &bc, nil } -func doCloseLoopUnaryBenchmark(h *stats.Histogram, mu sync.RWMutex, conns []*grpc.ClientConn, rpcCountPerConn int, reqSize int, respSize int, stop <-chan bool) { +func (bc *benchmarkClient) doCloseLoopUnaryBenchmark(conns []*grpc.ClientConn, rpcCountPerConn int, reqSize int, respSize int) { clients := make([]testpb.BenchmarkServiceClient, len(conns)) for ic, conn := range conns { clients[ic] = testpb.NewBenchmarkServiceClient(conn) @@ -217,13 +214,13 @@ func doCloseLoopUnaryBenchmark(h *stats.Histogram, mu sync.RWMutex, conns []*grp return } elapse := time.Since(start) - mu.Lock() - h.Add(int64(elapse / time.Nanosecond)) - mu.Unlock() + bc.mu.Lock() + bc.histogram.Add(int64(elapse / time.Nanosecond)) + bc.mu.Unlock() done <- true }() select { - case <-stop: + case <-bc.stop: return case <-done: } @@ -237,7 +234,7 @@ func doCloseLoopUnaryBenchmark(h *stats.Histogram, mu sync.RWMutex, conns []*grp } } -func doCloseLoopStreamingBenchmark(h *stats.Histogram, mu sync.RWMutex, conns []*grpc.ClientConn, rpcCountPerConn int, reqSize int, respSize int, payloadType string, stop <-chan bool) { +func (bc *benchmarkClient) doCloseLoopStreamingBenchmark(conns []*grpc.ClientConn, rpcCountPerConn int, reqSize int, respSize int, payloadType string) { var doRPC func(testpb.BenchmarkService_StreamingCallClient, int, int) error if payloadType == "bytebuf" { doRPC = benchmark.DoGenericStreamingRoundTrip @@ -276,13 +273,13 @@ func doCloseLoopStreamingBenchmark(h *stats.Histogram, mu sync.RWMutex, conns [] return } elapse := time.Since(start) - mu.Lock() - h.Add(int64(elapse / time.Nanosecond)) - mu.Unlock() + bc.mu.Lock() + bc.histogram.Add(int64(elapse / time.Nanosecond)) + bc.mu.Unlock() done <- true }() select { - case <-stop: + case <-bc.stop: return case <-done: } From 7a5269acfe4ab305dcd5fc96d50b367acd40028a Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Thu, 28 Apr 2016 14:35:25 -0700 Subject: [PATCH 17/43] Change StartServer back to return address rather than port number --- benchmark/benchmark.go | 12 ++++++------ benchmark/benchmark_test.go | 9 ++++----- benchmark/worker/benchmark_server.go | 16 +++++++++++----- 3 files changed, 21 insertions(+), 16 deletions(-) diff --git a/benchmark/benchmark.go b/benchmark/benchmark.go index 67aa6215..5a9c733d 100644 --- a/benchmark/benchmark.go +++ b/benchmark/benchmark.go @@ -93,8 +93,8 @@ func (s *testServer) StreamingCall(stream testpb.BenchmarkService_StreamingCallS // StartServer starts a gRPC server serving a benchmark service on the given // address, which may be something like "localhost:0". It returns its listen -// port number and a function to stop the server. -func StartServer(addr string, opts ...grpc.ServerOption) (int, func()) { +// address and a function to stop the server. +func StartServer(addr string, opts ...grpc.ServerOption) (string, func()) { lis, err := net.Listen("tcp", addr) if err != nil { grpclog.Fatalf("Failed to listen: %v", err) @@ -102,7 +102,7 @@ func StartServer(addr string, opts ...grpc.ServerOption) (int, func()) { s := grpc.NewServer(opts...) testpb.RegisterBenchmarkServiceServer(s, &testServer{}) go s.Serve(lis) - return lis.Addr().(*net.TCPAddr).Port, func() { + return lis.Addr().String(), func() { s.Stop() } } @@ -133,8 +133,8 @@ func (s *genericTestServer) StreamingCall(stream testpb.BenchmarkService_Streami } // StartGenericServer starts a benchmark service server that supports custom codec. -// It returns its listen port number and a function to stop the server. -func StartGenericServer(addr string, reqSize, respSize int32, opts ...grpc.ServerOption) (int, func()) { +// It returns its listen address and a function to stop the server. +func StartGenericServer(addr string, reqSize, respSize int32, opts ...grpc.ServerOption) (string, func()) { lis, err := net.Listen("tcp", addr) if err != nil { grpclog.Fatalf("Failed to listen: %v", err) @@ -142,7 +142,7 @@ func StartGenericServer(addr string, reqSize, respSize int32, opts ...grpc.Serve s := grpc.NewServer(opts...) testpb.RegisterBenchmarkServiceServer(s, &genericTestServer{reqSize: reqSize, respSize: respSize}) go s.Serve(lis) - return lis.Addr().(*net.TCPAddr).Port, func() { + return lis.Addr().String(), func() { s.Stop() } } diff --git a/benchmark/benchmark_test.go b/benchmark/benchmark_test.go index d7dfa25e..65e97ad7 100644 --- a/benchmark/benchmark_test.go +++ b/benchmark/benchmark_test.go @@ -2,7 +2,6 @@ package benchmark import ( "os" - "strconv" "sync" "testing" "time" @@ -17,9 +16,9 @@ import ( func runUnary(b *testing.B, maxConcurrentCalls int) { s := stats.AddStats(b, 38) b.StopTimer() - targetPort, stopper := StartServer("localhost:0") + target, stopper := StartServer("localhost:0") defer stopper() - conn := NewClientConn(":" + strconv.Itoa(targetPort)) + conn := NewClientConn(target) tc := testpb.NewBenchmarkServiceClient(conn) // Warm up connection. @@ -60,9 +59,9 @@ func runUnary(b *testing.B, maxConcurrentCalls int) { func runStream(b *testing.B, maxConcurrentCalls int) { s := stats.AddStats(b, 38) b.StopTimer() - targetPort, stopper := StartServer("localhost:0") + target, stopper := StartServer("localhost:0") defer stopper() - conn := NewClientConn(":" + strconv.Itoa(targetPort)) + conn := NewClientConn(target) tc := testpb.NewBenchmarkServiceClient(conn) // Warm up connection. diff --git a/benchmark/worker/benchmark_server.go b/benchmark/worker/benchmark_server.go index b8f90840..f6b91412 100644 --- a/benchmark/worker/benchmark_server.go +++ b/benchmark/worker/benchmark_server.go @@ -36,6 +36,7 @@ package main import ( "runtime" "strconv" + "strings" "sync" "time" @@ -107,15 +108,15 @@ func startBenchmarkServerWithSetup(setup *testpb.ServerConfig, serverPort int) ( } grpclog.Printf(" - payload config: %v", setup.PayloadConfig) - var p int + var addr string var close func() if setup.PayloadConfig != nil { switch payload := setup.PayloadConfig.Payload.(type) { case *testpb.PayloadConfig_BytebufParams: opts = append(opts, grpc.CustomCodec(byteBufCodec{})) - p, close = benchmark.StartGenericServer(":"+strconv.Itoa(port), payload.BytebufParams.ReqSize, payload.BytebufParams.RespSize, opts...) + addr, close = benchmark.StartGenericServer(":"+strconv.Itoa(port), payload.BytebufParams.ReqSize, payload.BytebufParams.RespSize, opts...) case *testpb.PayloadConfig_SimpleParams: - p, close = benchmark.StartServer(":"+strconv.Itoa(port), opts...) + addr, close = benchmark.StartServer(":"+strconv.Itoa(port), opts...) case *testpb.PayloadConfig_ComplexParams: return nil, grpc.Errorf(codes.Unimplemented, "unsupported payload config: %v", setup.PayloadConfig) default: @@ -123,10 +124,15 @@ func startBenchmarkServerWithSetup(setup *testpb.ServerConfig, serverPort int) ( } } else { // Start protobuf server is payload config is nil. - p, close = benchmark.StartServer(":"+strconv.Itoa(port), opts...) + addr, close = benchmark.StartServer(":"+strconv.Itoa(port), opts...) } - grpclog.Printf("benchmark server listening at port %v", p) + grpclog.Printf("benchmark server listening at %v", addr) + addrSplitted := strings.Split(addr, ":") + p, err := strconv.Atoi(addrSplitted[len(addrSplitted)-1]) + if err != nil { + grpclog.Fatalf("failed to get port number from server address: %v", err) + } return &benchmarkServer{port: p, cores: numOfCores, close: close, lastResetTime: time.Now()}, nil } From add9c0b9f2f28679a94da4c643f74282b932ca23 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Thu, 28 Apr 2016 14:38:30 -0700 Subject: [PATCH 18/43] Change NewClientConn so that all options need to be specified by caller --- benchmark/benchmark.go | 3 --- benchmark/benchmark_test.go | 4 ++-- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/benchmark/benchmark.go b/benchmark/benchmark.go index 5a9c733d..5bb87ea0 100644 --- a/benchmark/benchmark.go +++ b/benchmark/benchmark.go @@ -192,9 +192,6 @@ func DoGenericStreamingRoundTrip(stream testpb.BenchmarkService_StreamingCallCli // NewClientConn creates a gRPC client connection to addr. func NewClientConn(addr string, opts ...grpc.DialOption) *grpc.ClientConn { - if len(opts) <= 0 { - opts = append(opts, grpc.WithInsecure()) - } conn, err := grpc.Dial(addr, opts...) if err != nil { grpclog.Fatalf("NewClientConn(%q) failed to create a ClientConn %v", addr, err) diff --git a/benchmark/benchmark_test.go b/benchmark/benchmark_test.go index 65e97ad7..ccf0f457 100644 --- a/benchmark/benchmark_test.go +++ b/benchmark/benchmark_test.go @@ -18,7 +18,7 @@ func runUnary(b *testing.B, maxConcurrentCalls int) { b.StopTimer() target, stopper := StartServer("localhost:0") defer stopper() - conn := NewClientConn(target) + conn := NewClientConn(target, grpc.WithInsecure()) tc := testpb.NewBenchmarkServiceClient(conn) // Warm up connection. @@ -61,7 +61,7 @@ func runStream(b *testing.B, maxConcurrentCalls int) { b.StopTimer() target, stopper := StartServer("localhost:0") defer stopper() - conn := NewClientConn(target) + conn := NewClientConn(target, grpc.WithInsecure()) tc := testpb.NewBenchmarkServiceClient(conn) // Warm up connection. From 988c93472001a686b41fc25647125d342ad055a3 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Thu, 28 Apr 2016 14:42:16 -0700 Subject: [PATCH 19/43] Rename genericServer to byteBufServer --- benchmark/benchmark.go | 16 ++++++++-------- benchmark/worker/benchmark_client.go | 2 +- benchmark/worker/benchmark_server.go | 2 +- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/benchmark/benchmark.go b/benchmark/benchmark.go index 5bb87ea0..574c4a92 100644 --- a/benchmark/benchmark.go +++ b/benchmark/benchmark.go @@ -107,16 +107,16 @@ func StartServer(addr string, opts ...grpc.ServerOption) (string, func()) { } } -type genericTestServer struct { +type byteBufServer struct { reqSize int32 respSize int32 } -func (s *genericTestServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { +func (s *byteBufServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { return &testpb.SimpleResponse{}, nil } -func (s *genericTestServer) StreamingCall(stream testpb.BenchmarkService_StreamingCallServer) error { +func (s *byteBufServer) StreamingCall(stream testpb.BenchmarkService_StreamingCallServer) error { for { m := make([]byte, s.reqSize) err := stream.(grpc.ServerStream).RecvMsg(m) @@ -132,15 +132,15 @@ func (s *genericTestServer) StreamingCall(stream testpb.BenchmarkService_Streami } } -// StartGenericServer starts a benchmark service server that supports custom codec. +// StartbyteBufServer starts a benchmark service server that supports custom codec. // It returns its listen address and a function to stop the server. -func StartGenericServer(addr string, reqSize, respSize int32, opts ...grpc.ServerOption) (string, func()) { +func StartByteBufServer(addr string, reqSize, respSize int32, opts ...grpc.ServerOption) (string, func()) { lis, err := net.Listen("tcp", addr) if err != nil { grpclog.Fatalf("Failed to listen: %v", err) } s := grpc.NewServer(opts...) - testpb.RegisterBenchmarkServiceServer(s, &genericTestServer{reqSize: reqSize, respSize: respSize}) + testpb.RegisterBenchmarkServiceServer(s, &byteBufServer{reqSize: reqSize, respSize: respSize}) go s.Serve(lis) return lis.Addr().String(), func() { s.Stop() @@ -178,8 +178,8 @@ func DoStreamingRoundTrip(stream testpb.BenchmarkService_StreamingCallClient, re return nil } -// DoGenericStreamingRoundTrip performs a round trip for a single streaming rpc, using custom codec. -func DoGenericStreamingRoundTrip(stream testpb.BenchmarkService_StreamingCallClient, reqSize, respSize int) error { +// DoByteBufStreamingRoundTrip performs a round trip for a single streaming rpc, using custom codec. +func DoByteBufStreamingRoundTrip(stream testpb.BenchmarkService_StreamingCallClient, reqSize, respSize int) error { if err := stream.(grpc.ClientStream).SendMsg(make([]byte, reqSize)); err != nil { return grpc.Errorf(grpc.Code(err), "StreamingCall(_).(ClientStream).SendMsg: %v", grpc.ErrorDesc(err)) } diff --git a/benchmark/worker/benchmark_client.go b/benchmark/worker/benchmark_client.go index 703ddd5d..a5b1dce5 100644 --- a/benchmark/worker/benchmark_client.go +++ b/benchmark/worker/benchmark_client.go @@ -237,7 +237,7 @@ func (bc *benchmarkClient) doCloseLoopUnaryBenchmark(conns []*grpc.ClientConn, r func (bc *benchmarkClient) doCloseLoopStreamingBenchmark(conns []*grpc.ClientConn, rpcCountPerConn int, reqSize int, respSize int, payloadType string) { var doRPC func(testpb.BenchmarkService_StreamingCallClient, int, int) error if payloadType == "bytebuf" { - doRPC = benchmark.DoGenericStreamingRoundTrip + doRPC = benchmark.DoByteBufStreamingRoundTrip } else { doRPC = benchmark.DoStreamingRoundTrip } diff --git a/benchmark/worker/benchmark_server.go b/benchmark/worker/benchmark_server.go index f6b91412..1b58a854 100644 --- a/benchmark/worker/benchmark_server.go +++ b/benchmark/worker/benchmark_server.go @@ -114,7 +114,7 @@ func startBenchmarkServerWithSetup(setup *testpb.ServerConfig, serverPort int) ( switch payload := setup.PayloadConfig.Payload.(type) { case *testpb.PayloadConfig_BytebufParams: opts = append(opts, grpc.CustomCodec(byteBufCodec{})) - addr, close = benchmark.StartGenericServer(":"+strconv.Itoa(port), payload.BytebufParams.ReqSize, payload.BytebufParams.RespSize, opts...) + addr, close = benchmark.StartByteBufServer(":"+strconv.Itoa(port), payload.BytebufParams.ReqSize, payload.BytebufParams.RespSize, opts...) case *testpb.PayloadConfig_SimpleParams: addr, close = benchmark.StartServer(":"+strconv.Itoa(port), opts...) case *testpb.PayloadConfig_ComplexParams: From 997b80914b4634b9c1cbc9e5ae51cf2d75e42906 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Thu, 28 Apr 2016 15:49:24 -0700 Subject: [PATCH 20/43] Modify byteBufCodec to use *[]byte and remove reqSize from bytebufserver --- benchmark/benchmark.go | 19 ++++++++++--------- benchmark/worker/benchmark_server.go | 2 +- benchmark/worker/main.go | 13 +++++++++++-- 3 files changed, 22 insertions(+), 12 deletions(-) diff --git a/benchmark/benchmark.go b/benchmark/benchmark.go index 574c4a92..56e2b78f 100644 --- a/benchmark/benchmark.go +++ b/benchmark/benchmark.go @@ -108,7 +108,6 @@ func StartServer(addr string, opts ...grpc.ServerOption) (string, func()) { } type byteBufServer struct { - reqSize int32 respSize int32 } @@ -118,15 +117,16 @@ func (s *byteBufServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) func (s *byteBufServer) StreamingCall(stream testpb.BenchmarkService_StreamingCallServer) error { for { - m := make([]byte, s.reqSize) - err := stream.(grpc.ServerStream).RecvMsg(m) + var in []byte + err := stream.(grpc.ServerStream).RecvMsg(&in) if err == io.EOF { return nil } if err != nil { return err } - if err := stream.(grpc.ServerStream).SendMsg(make([]byte, s.respSize)); err != nil { + out := make([]byte, s.respSize) + if err := stream.(grpc.ServerStream).SendMsg(&out); err != nil { return err } } @@ -134,13 +134,13 @@ func (s *byteBufServer) StreamingCall(stream testpb.BenchmarkService_StreamingCa // StartbyteBufServer starts a benchmark service server that supports custom codec. // It returns its listen address and a function to stop the server. -func StartByteBufServer(addr string, reqSize, respSize int32, opts ...grpc.ServerOption) (string, func()) { +func StartByteBufServer(addr string, respSize int32, opts ...grpc.ServerOption) (string, func()) { lis, err := net.Listen("tcp", addr) if err != nil { grpclog.Fatalf("Failed to listen: %v", err) } s := grpc.NewServer(opts...) - testpb.RegisterBenchmarkServiceServer(s, &byteBufServer{reqSize: reqSize, respSize: respSize}) + testpb.RegisterBenchmarkServiceServer(s, &byteBufServer{respSize: respSize}) go s.Serve(lis) return lis.Addr().String(), func() { s.Stop() @@ -180,11 +180,12 @@ func DoStreamingRoundTrip(stream testpb.BenchmarkService_StreamingCallClient, re // DoByteBufStreamingRoundTrip performs a round trip for a single streaming rpc, using custom codec. func DoByteBufStreamingRoundTrip(stream testpb.BenchmarkService_StreamingCallClient, reqSize, respSize int) error { - if err := stream.(grpc.ClientStream).SendMsg(make([]byte, reqSize)); err != nil { + out := make([]byte, reqSize) + if err := stream.(grpc.ClientStream).SendMsg(&out); err != nil { return grpc.Errorf(grpc.Code(err), "StreamingCall(_).(ClientStream).SendMsg: %v", grpc.ErrorDesc(err)) } - m := make([]byte, respSize) - if err := stream.(grpc.ClientStream).RecvMsg(m); err != nil { + var in []byte + if err := stream.(grpc.ClientStream).RecvMsg(&in); err != nil { return grpc.Errorf(grpc.Code(err), "StreamingCall(_).(ClientStream).RecvMsg: %v", grpc.ErrorDesc(err)) } return nil diff --git a/benchmark/worker/benchmark_server.go b/benchmark/worker/benchmark_server.go index 1b58a854..5e2fd7b3 100644 --- a/benchmark/worker/benchmark_server.go +++ b/benchmark/worker/benchmark_server.go @@ -114,7 +114,7 @@ func startBenchmarkServerWithSetup(setup *testpb.ServerConfig, serverPort int) ( switch payload := setup.PayloadConfig.Payload.(type) { case *testpb.PayloadConfig_BytebufParams: opts = append(opts, grpc.CustomCodec(byteBufCodec{})) - addr, close = benchmark.StartByteBufServer(":"+strconv.Itoa(port), payload.BytebufParams.ReqSize, payload.BytebufParams.RespSize, opts...) + addr, close = benchmark.StartByteBufServer(":"+strconv.Itoa(port), payload.BytebufParams.RespSize, opts...) case *testpb.PayloadConfig_SimpleParams: addr, close = benchmark.StartServer(":"+strconv.Itoa(port), opts...) case *testpb.PayloadConfig_ComplexParams: diff --git a/benchmark/worker/main.go b/benchmark/worker/main.go index 52990929..72ed63d4 100644 --- a/benchmark/worker/main.go +++ b/benchmark/worker/main.go @@ -35,6 +35,7 @@ package main import ( "flag" + "fmt" "io" "net" "runtime" @@ -56,11 +57,19 @@ type byteBufCodec struct { } func (byteBufCodec) Marshal(v interface{}) ([]byte, error) { - return v.([]byte), nil + b, ok := v.(*[]byte) + if !ok { + return nil, fmt.Errorf("Failed to marshal: %v is not type of *[]byte") + } + return *b, nil } func (byteBufCodec) Unmarshal(data []byte, v interface{}) error { - v = data + b, ok := v.(*[]byte) + if !ok { + return fmt.Errorf("Failed to marshal: %v is not type of *[]byte") + } + *b = data return nil } From dad9308fa3c8bce6a801a4723ac2664aca476c5d Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Thu, 28 Apr 2016 16:02:27 -0700 Subject: [PATCH 21/43] Address review comments --- benchmark/benchmark.go | 4 ++++ benchmark/grpc_testing/control.proto | 2 +- benchmark/grpc_testing/messages.proto | 3 +-- benchmark/grpc_testing/payloads.proto | 2 +- benchmark/grpc_testing/services.proto | 2 +- benchmark/grpc_testing/stats.proto | 6 +++--- benchmark/stats/histogram.go | 1 + 7 files changed, 12 insertions(+), 8 deletions(-) diff --git a/benchmark/benchmark.go b/benchmark/benchmark.go index 56e2b78f..208a5431 100644 --- a/benchmark/benchmark.go +++ b/benchmark/benchmark.go @@ -173,6 +173,10 @@ func DoStreamingRoundTrip(stream testpb.BenchmarkService_StreamingCallClient, re return grpc.Errorf(grpc.Code(err), "StreamingCall(_).Send: %v", grpc.ErrorDesc(err)) } if _, err := stream.Recv(); err != nil { + // EOF should be a valid error here. + if err == io.EOF { + return nil + } return grpc.Errorf(grpc.Code(err), "StreamingCall(_).Recv: %v", grpc.ErrorDesc(err)) } return nil diff --git a/benchmark/grpc_testing/control.proto b/benchmark/grpc_testing/control.proto index 4913c86a..e0fe0ec7 100644 --- a/benchmark/grpc_testing/control.proto +++ b/benchmark/grpc_testing/control.proto @@ -1,4 +1,4 @@ -// Copyright 2015, Google Inc. +// Copyright 2016, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without diff --git a/benchmark/grpc_testing/messages.proto b/benchmark/grpc_testing/messages.proto index a063b470..b1abc9e8 100644 --- a/benchmark/grpc_testing/messages.proto +++ b/benchmark/grpc_testing/messages.proto @@ -1,5 +1,4 @@ - -// Copyright 2015-2016, Google Inc. +// Copyright 2016, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without diff --git a/benchmark/grpc_testing/payloads.proto b/benchmark/grpc_testing/payloads.proto index 7e5b2c61..056fe0c7 100644 --- a/benchmark/grpc_testing/payloads.proto +++ b/benchmark/grpc_testing/payloads.proto @@ -1,4 +1,4 @@ -// Copyright 2015, Google Inc. +// Copyright 2016, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without diff --git a/benchmark/grpc_testing/services.proto b/benchmark/grpc_testing/services.proto index 19b55c31..c2acca7f 100644 --- a/benchmark/grpc_testing/services.proto +++ b/benchmark/grpc_testing/services.proto @@ -1,4 +1,4 @@ -// Copyright 2015, Google Inc. +// Copyright 2016, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without diff --git a/benchmark/grpc_testing/stats.proto b/benchmark/grpc_testing/stats.proto index f9d11611..9bc3cb21 100644 --- a/benchmark/grpc_testing/stats.proto +++ b/benchmark/grpc_testing/stats.proto @@ -1,4 +1,4 @@ -// Copyright 2015, Google Inc. +// Copyright 2016, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without @@ -45,8 +45,8 @@ message ServerStats { // Histogram params based on grpc/support/histogram.c message HistogramParams { - double resolution = 1; // first bucket is [0, 1 + resolution) - double max_possible = 2; // use enough buckets to allow this value + double resolution = 1; // first bucket is [0, 1 + resolution) + double max_possible = 2; // use enough buckets to allow this value } // Histogram data based on grpc/support/histogram.c diff --git a/benchmark/stats/histogram.go b/benchmark/stats/histogram.go index 13bde953..36dbc4fa 100644 --- a/benchmark/stats/histogram.go +++ b/benchmark/stats/histogram.go @@ -145,6 +145,7 @@ func NewHistogram(opts HistogramOptions) *Histogram { return &h } +// Clear resets all the content of histogram. func (h *Histogram) Clear() { h.count = newCounter() h.sum = newCounter() From 3a13913bbac6ba6aad589b16c615124b55eeebec Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Thu, 28 Apr 2016 16:16:54 -0700 Subject: [PATCH 22/43] Address review comments Change startBenchmarkClient and startBenchmarkServer --- benchmark/worker/benchmark_client.go | 74 ++++++++++++++-------------- benchmark/worker/benchmark_server.go | 42 ++++++++-------- benchmark/worker/main.go | 6 +-- 3 files changed, 61 insertions(+), 61 deletions(-) diff --git a/benchmark/worker/benchmark_client.go b/benchmark/worker/benchmark_client.go index a5b1dce5..facb9e7a 100644 --- a/benchmark/worker/benchmark_client.go +++ b/benchmark/worker/benchmark_client.go @@ -60,27 +60,27 @@ type benchmarkClient struct { histogram *stats.Histogram } -func startBenchmarkClientWithSetup(setup *testpb.ClientConfig) (*benchmarkClient, error) { +func startBenchmarkClient(config *testpb.ClientConfig) (*benchmarkClient, error) { var opts []grpc.DialOption - // Some setup options are ignored: + // Some config options are ignored: // - client type: // will always create sync client // - async client threads. // - core list - grpclog.Printf(" * client type: %v (ignored, always creates sync client)", setup.ClientType) - switch setup.ClientType { + grpclog.Printf(" * client type: %v (ignored, always creates sync client)", config.ClientType) + switch config.ClientType { case testpb.ClientType_SYNC_CLIENT: case testpb.ClientType_ASYNC_CLIENT: default: - return nil, grpc.Errorf(codes.InvalidArgument, "unknow client type: %v", setup.ClientType) + return nil, grpc.Errorf(codes.InvalidArgument, "unknow client type: %v", config.ClientType) } - grpclog.Printf(" * async client threads: %v (ignored)", setup.AsyncClientThreads) - grpclog.Printf(" * core list: %v (ignored)", setup.CoreList) + grpclog.Printf(" * async client threads: %v (ignored)", config.AsyncClientThreads) + grpclog.Printf(" * core list: %v (ignored)", config.CoreList) - grpclog.Printf(" - security params: %v", setup.SecurityParams) - if setup.SecurityParams != nil { - creds, err := credentials.NewClientTLSFromFile(Abs(caFile), setup.SecurityParams.ServerHostOverride) + grpclog.Printf(" - security params: %v", config.SecurityParams) + if config.SecurityParams != nil { + creds, err := credentials.NewClientTLSFromFile(Abs(caFile), config.SecurityParams.ServerHostOverride) if err != nil { return nil, grpc.Errorf(codes.InvalidArgument, "failed to create TLS credentials %v", err) } @@ -89,19 +89,19 @@ func startBenchmarkClientWithSetup(setup *testpb.ClientConfig) (*benchmarkClient opts = append(opts, grpc.WithInsecure()) } - grpclog.Printf(" - core limit: %v", setup.CoreLimit) + grpclog.Printf(" - core limit: %v", config.CoreLimit) // Use one cpu core by default numOfCores := 1 - if setup.CoreLimit > 0 { - numOfCores = int(setup.CoreLimit) + if config.CoreLimit > 0 { + numOfCores = int(config.CoreLimit) } runtime.GOMAXPROCS(numOfCores) - grpclog.Printf(" - payload config: %v", setup.PayloadConfig) + grpclog.Printf(" - payload config: %v", config.PayloadConfig) var payloadReqSize, payloadRespSize int var payloadType string - if setup.PayloadConfig != nil { - switch c := setup.PayloadConfig.Payload.(type) { + if config.PayloadConfig != nil { + switch c := config.PayloadConfig.Payload.(type) { case *testpb.PayloadConfig_BytebufParams: opts = append(opts, grpc.WithCodec(byteBufCodec{})) payloadReqSize = int(c.BytebufParams.ReqSize) @@ -112,60 +112,60 @@ func startBenchmarkClientWithSetup(setup *testpb.ClientConfig) (*benchmarkClient payloadRespSize = int(c.SimpleParams.RespSize) payloadType = "protobuf" case *testpb.PayloadConfig_ComplexParams: - return nil, grpc.Errorf(codes.Unimplemented, "unsupported payload config: %v", setup.PayloadConfig) + return nil, grpc.Errorf(codes.Unimplemented, "unsupported payload config: %v", config.PayloadConfig) default: - return nil, grpc.Errorf(codes.InvalidArgument, "unknow payload config: %v", setup.PayloadConfig) + return nil, grpc.Errorf(codes.InvalidArgument, "unknow payload config: %v", config.PayloadConfig) } } - grpclog.Printf(" - rpcs per chann: %v", setup.OutstandingRpcsPerChannel) - grpclog.Printf(" - channel number: %v", setup.ClientChannels) + grpclog.Printf(" - rpcs per chann: %v", config.OutstandingRpcsPerChannel) + grpclog.Printf(" - channel number: %v", config.ClientChannels) - rpcCountPerConn, connCount := int(setup.OutstandingRpcsPerChannel), int(setup.ClientChannels) + rpcCountPerConn, connCount := int(config.OutstandingRpcsPerChannel), int(config.ClientChannels) - grpclog.Printf(" - load params: %v", setup.LoadParams) + grpclog.Printf(" - load params: %v", config.LoadParams) var dist *int - switch lp := setup.LoadParams.Load.(type) { + switch lp := config.LoadParams.Load.(type) { case *testpb.LoadParams_ClosedLoop: case *testpb.LoadParams_Poisson: grpclog.Printf(" - %v", lp.Poisson) - return nil, grpc.Errorf(codes.Unimplemented, "unsupported load params: %v", setup.LoadParams) + return nil, grpc.Errorf(codes.Unimplemented, "unsupported load params: %v", config.LoadParams) // TODO poisson case *testpb.LoadParams_Uniform: - return nil, grpc.Errorf(codes.Unimplemented, "unsupported load params: %v", setup.LoadParams) + return nil, grpc.Errorf(codes.Unimplemented, "unsupported load params: %v", config.LoadParams) case *testpb.LoadParams_Determ: - return nil, grpc.Errorf(codes.Unimplemented, "unsupported load params: %v", setup.LoadParams) + return nil, grpc.Errorf(codes.Unimplemented, "unsupported load params: %v", config.LoadParams) case *testpb.LoadParams_Pareto: - return nil, grpc.Errorf(codes.Unimplemented, "unsupported load params: %v", setup.LoadParams) + return nil, grpc.Errorf(codes.Unimplemented, "unsupported load params: %v", config.LoadParams) default: - return nil, grpc.Errorf(codes.InvalidArgument, "unknown load params: %v", setup.LoadParams) + return nil, grpc.Errorf(codes.InvalidArgument, "unknown load params: %v", config.LoadParams) } - grpclog.Printf(" - rpc type: %v", setup.RpcType) + grpclog.Printf(" - rpc type: %v", config.RpcType) var rpcType string - switch setup.RpcType { + switch config.RpcType { case testpb.RpcType_UNARY: rpcType = "unary" case testpb.RpcType_STREAMING: rpcType = "streaming" default: - return nil, grpc.Errorf(codes.InvalidArgument, "unknown rpc type: %v", setup.RpcType) + return nil, grpc.Errorf(codes.InvalidArgument, "unknown rpc type: %v", config.RpcType) } - grpclog.Printf(" - histogram params: %v", setup.HistogramParams) - grpclog.Printf(" - server targets: %v", setup.ServerTargets) + grpclog.Printf(" - histogram params: %v", config.HistogramParams) + grpclog.Printf(" - server targets: %v", config.ServerTargets) conns := make([]*grpc.ClientConn, connCount) for connIndex := 0; connIndex < connCount; connIndex++ { - conns[connIndex] = benchmark.NewClientConn(setup.ServerTargets[connIndex%len(setup.ServerTargets)], opts...) + conns[connIndex] = benchmark.NewClientConn(config.ServerTargets[connIndex%len(config.ServerTargets)], opts...) } bc := benchmarkClient{ histogram: stats.NewHistogram(stats.HistogramOptions{ - NumBuckets: int(math.Log(setup.HistogramParams.MaxPossible)/math.Log(1+setup.HistogramParams.Resolution)) + 1, - GrowthFactor: setup.HistogramParams.Resolution, - BaseBucketSize: (1 + setup.HistogramParams.Resolution), + NumBuckets: int(math.Log(config.HistogramParams.MaxPossible)/math.Log(1+config.HistogramParams.Resolution)) + 1, + GrowthFactor: config.HistogramParams.Resolution, + BaseBucketSize: (1 + config.HistogramParams.Resolution), MinValue: 0, }), stop: make(chan bool), diff --git a/benchmark/worker/benchmark_server.go b/benchmark/worker/benchmark_server.go index 5e2fd7b3..de937c0c 100644 --- a/benchmark/worker/benchmark_server.go +++ b/benchmark/worker/benchmark_server.go @@ -62,27 +62,27 @@ type benchmarkServer struct { lastResetTime time.Time } -func startBenchmarkServerWithSetup(setup *testpb.ServerConfig, serverPort int) (*benchmarkServer, error) { +func startBenchmarkServer(config *testpb.ServerConfig, serverPort int) (*benchmarkServer, error) { var opts []grpc.ServerOption - // Some setup options are ignored: + // Some config options are ignored: // - server type: // will always start sync server // - async server threads // - core list - grpclog.Printf(" * server type: %v (ignored, always starts sync server)", setup.ServerType) - switch setup.ServerType { + grpclog.Printf(" * server type: %v (ignored, always starts sync server)", config.ServerType) + switch config.ServerType { case testpb.ServerType_SYNC_SERVER: case testpb.ServerType_ASYNC_SERVER: case testpb.ServerType_ASYNC_GENERIC_SERVER: default: - return nil, grpc.Errorf(codes.InvalidArgument, "unknow server type: %v", setup.ServerType) + return nil, grpc.Errorf(codes.InvalidArgument, "unknow server type: %v", config.ServerType) } - grpclog.Printf(" * async server threads: %v (ignored)", setup.AsyncServerThreads) - grpclog.Printf(" * core list: %v (ignored)", setup.CoreList) + grpclog.Printf(" * async server threads: %v (ignored)", config.AsyncServerThreads) + grpclog.Printf(" * core list: %v (ignored)", config.CoreList) - grpclog.Printf(" - security params: %v", setup.SecurityParams) - if setup.SecurityParams != nil { + grpclog.Printf(" - security params: %v", config.SecurityParams) + if config.SecurityParams != nil { creds, err := credentials.NewServerTLSFromFile(Abs(certFile), Abs(keyFile)) if err != nil { grpclog.Fatalf("failed to generate credentials %v", err) @@ -90,37 +90,37 @@ func startBenchmarkServerWithSetup(setup *testpb.ServerConfig, serverPort int) ( opts = append(opts, grpc.Creds(creds)) } - grpclog.Printf(" - core limit: %v", setup.CoreLimit) + grpclog.Printf(" - core limit: %v", config.CoreLimit) // Use one cpu core by default. numOfCores := 1 - if setup.CoreLimit > 0 { - numOfCores = int(setup.CoreLimit) + if config.CoreLimit > 0 { + numOfCores = int(config.CoreLimit) } runtime.GOMAXPROCS(numOfCores) - grpclog.Printf(" - port: %v", setup.Port) + grpclog.Printf(" - port: %v", config.Port) var port int - // Priority: setup.Port > serverPort > default (0). - if setup.Port != 0 { - port = int(setup.Port) + // Priority: config.Port > serverPort > default (0). + if config.Port != 0 { + port = int(config.Port) } else if serverPort != 0 { port = serverPort } - grpclog.Printf(" - payload config: %v", setup.PayloadConfig) + grpclog.Printf(" - payload config: %v", config.PayloadConfig) var addr string var close func() - if setup.PayloadConfig != nil { - switch payload := setup.PayloadConfig.Payload.(type) { + if config.PayloadConfig != nil { + switch payload := config.PayloadConfig.Payload.(type) { case *testpb.PayloadConfig_BytebufParams: opts = append(opts, grpc.CustomCodec(byteBufCodec{})) addr, close = benchmark.StartByteBufServer(":"+strconv.Itoa(port), payload.BytebufParams.RespSize, opts...) case *testpb.PayloadConfig_SimpleParams: addr, close = benchmark.StartServer(":"+strconv.Itoa(port), opts...) case *testpb.PayloadConfig_ComplexParams: - return nil, grpc.Errorf(codes.Unimplemented, "unsupported payload config: %v", setup.PayloadConfig) + return nil, grpc.Errorf(codes.Unimplemented, "unsupported payload config: %v", config.PayloadConfig) default: - return nil, grpc.Errorf(codes.InvalidArgument, "unknow payload config: %v", setup.PayloadConfig) + return nil, grpc.Errorf(codes.InvalidArgument, "unknow payload config: %v", config.PayloadConfig) } } else { // Start protobuf server is payload config is nil. diff --git a/benchmark/worker/main.go b/benchmark/worker/main.go index 72ed63d4..e16a6304 100644 --- a/benchmark/worker/main.go +++ b/benchmark/worker/main.go @@ -50,7 +50,7 @@ import ( var ( driverPort = flag.Int("driver_port", 10000, "port for communication with driver") - serverPort = flag.Int("server_port", 0, "default port for benchmark server") + serverPort = flag.Int("server_port", 0, "port for benchmark server if not specified by server config message") ) type byteBufCodec struct { @@ -104,7 +104,7 @@ func (s *workerServer) RunServer(stream testpb.WorkerService_RunServerServer) er switch argtype := in.Argtype.(type) { case *testpb.ServerArgs_Setup: grpclog.Printf("server setup received:") - newbs, err := startBenchmarkServerWithSetup(argtype.Setup, s.serverPort) + newbs, err := startBenchmarkServer(argtype.Setup, s.serverPort) if err != nil { return err } @@ -165,7 +165,7 @@ func (s *workerServer) RunClient(stream testpb.WorkerService_RunClientServer) er switch t := in.Argtype.(type) { case *testpb.ClientArgs_Setup: grpclog.Printf("client setup received:") - newbc, err := startBenchmarkClientWithSetup(t.Setup) + newbc, err := startBenchmarkClient(t.Setup) if err != nil { return err } From bb1be7190b02abc5377e52917fed6e578e9f2513 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Thu, 28 Apr 2016 16:43:23 -0700 Subject: [PATCH 23/43] Change cpu core set and var declaration --- benchmark/worker/benchmark_client.go | 13 +++++++------ benchmark/worker/benchmark_server.go | 8 +++++--- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/benchmark/worker/benchmark_client.go b/benchmark/worker/benchmark_client.go index facb9e7a..88d942ee 100644 --- a/benchmark/worker/benchmark_client.go +++ b/benchmark/worker/benchmark_client.go @@ -91,15 +91,16 @@ func startBenchmarkClient(config *testpb.ClientConfig) (*benchmarkClient, error) grpclog.Printf(" - core limit: %v", config.CoreLimit) // Use one cpu core by default - numOfCores := 1 - if config.CoreLimit > 0 { - numOfCores = int(config.CoreLimit) + // TODO: change default number of cores used if 1 is not fastest. + if config.CoreLimit > 1 { + runtime.GOMAXPROCS(int(config.CoreLimit)) } - runtime.GOMAXPROCS(numOfCores) grpclog.Printf(" - payload config: %v", config.PayloadConfig) - var payloadReqSize, payloadRespSize int - var payloadType string + var ( + payloadReqSize, payloadRespSize int + payloadType string + ) if config.PayloadConfig != nil { switch c := config.PayloadConfig.Payload.(type) { case *testpb.PayloadConfig_BytebufParams: diff --git a/benchmark/worker/benchmark_server.go b/benchmark/worker/benchmark_server.go index de937c0c..de104f1f 100644 --- a/benchmark/worker/benchmark_server.go +++ b/benchmark/worker/benchmark_server.go @@ -93,7 +93,7 @@ func startBenchmarkServer(config *testpb.ServerConfig, serverPort int) (*benchma grpclog.Printf(" - core limit: %v", config.CoreLimit) // Use one cpu core by default. numOfCores := 1 - if config.CoreLimit > 0 { + if config.CoreLimit > 1 { numOfCores = int(config.CoreLimit) } runtime.GOMAXPROCS(numOfCores) @@ -108,8 +108,10 @@ func startBenchmarkServer(config *testpb.ServerConfig, serverPort int) (*benchma } grpclog.Printf(" - payload config: %v", config.PayloadConfig) - var addr string - var close func() + var ( + addr string + close func() + ) if config.PayloadConfig != nil { switch payload := config.PayloadConfig.Payload.(type) { case *testpb.PayloadConfig_BytebufParams: From 19d3a3572d2fa06db2ce192ce41e6f515a927635 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Thu, 28 Apr 2016 17:34:55 -0700 Subject: [PATCH 24/43] Merge StartServer and StartByteBufServer into a general StartServer --- benchmark/benchmark.go | 47 +++++++++++++++------------- benchmark/benchmark_test.go | 10 ++++-- benchmark/worker/benchmark_server.go | 26 +++++++++++++-- 3 files changed, 57 insertions(+), 26 deletions(-) diff --git a/benchmark/benchmark.go b/benchmark/benchmark.go index 208a5431..63bd41d7 100644 --- a/benchmark/benchmark.go +++ b/benchmark/benchmark.go @@ -37,6 +37,7 @@ Package benchmark implements the building blocks to setup end-to-end gRPC benchm package benchmark import ( + "fmt" "io" "net" @@ -91,22 +92,6 @@ func (s *testServer) StreamingCall(stream testpb.BenchmarkService_StreamingCallS } } -// StartServer starts a gRPC server serving a benchmark service on the given -// address, which may be something like "localhost:0". It returns its listen -// address and a function to stop the server. -func StartServer(addr string, opts ...grpc.ServerOption) (string, func()) { - lis, err := net.Listen("tcp", addr) - if err != nil { - grpclog.Fatalf("Failed to listen: %v", err) - } - s := grpc.NewServer(opts...) - testpb.RegisterBenchmarkServiceServer(s, &testServer{}) - go s.Serve(lis) - return lis.Addr().String(), func() { - s.Stop() - } -} - type byteBufServer struct { respSize int32 } @@ -132,19 +117,39 @@ func (s *byteBufServer) StreamingCall(stream testpb.BenchmarkService_StreamingCa } } -// StartbyteBufServer starts a benchmark service server that supports custom codec. +// ServerInfo is used to create server. +// It contains the address and type of the server to be created, and optional metadata. +type ServerInfo struct { + Addr string + Type string + Metadata interface{} +} + +// StartServer starts a gRPC server serving a benchmark service on the given ServerInfo. +// Different types of servers are created according to Type. // It returns its listen address and a function to stop the server. -func StartByteBufServer(addr string, respSize int32, opts ...grpc.ServerOption) (string, func()) { - lis, err := net.Listen("tcp", addr) +func StartServer(info ServerInfo, opts ...grpc.ServerOption) (string, func(), error) { + lis, err := net.Listen("tcp", info.Addr) if err != nil { grpclog.Fatalf("Failed to listen: %v", err) } s := grpc.NewServer(opts...) - testpb.RegisterBenchmarkServiceServer(s, &byteBufServer{respSize: respSize}) + switch info.Type { + case "protobuf": + testpb.RegisterBenchmarkServiceServer(s, &testServer{}) + case "bytebuf": + respSize, ok := info.Metadata.(int32) + if !ok { + return "", nil, fmt.Errorf("invalid metadata: %v, for Type: %v", info.Metadata, info.Type) + } + testpb.RegisterBenchmarkServiceServer(s, &byteBufServer{respSize: respSize}) + default: + return "", nil, fmt.Errorf("unknown Type: %v", info.Type) + } go s.Serve(lis) return lis.Addr().String(), func() { s.Stop() - } + }, nil } // DoUnaryCall performs an unary RPC with given stub and request and response sizes. diff --git a/benchmark/benchmark_test.go b/benchmark/benchmark_test.go index ccf0f457..4e47631a 100644 --- a/benchmark/benchmark_test.go +++ b/benchmark/benchmark_test.go @@ -16,7 +16,10 @@ import ( func runUnary(b *testing.B, maxConcurrentCalls int) { s := stats.AddStats(b, 38) b.StopTimer() - target, stopper := StartServer("localhost:0") + target, stopper, err := StartServer(ServerInfo{Addr: "localhost:0", Type: "protobuf"}) + if err != nil { + grpclog.Fatalf("failed to start server: %v", err) + } defer stopper() conn := NewClientConn(target, grpc.WithInsecure()) tc := testpb.NewBenchmarkServiceClient(conn) @@ -59,7 +62,10 @@ func runUnary(b *testing.B, maxConcurrentCalls int) { func runStream(b *testing.B, maxConcurrentCalls int) { s := stats.AddStats(b, 38) b.StopTimer() - target, stopper := StartServer("localhost:0") + target, stopper, err := StartServer(ServerInfo{Addr: "localhost:0", Type: "protobuf"}) + if err != nil { + grpclog.Fatalf("failed to start server: %v", err) + } defer stopper() conn := NewClientConn(target, grpc.WithInsecure()) tc := testpb.NewBenchmarkServiceClient(conn) diff --git a/benchmark/worker/benchmark_server.go b/benchmark/worker/benchmark_server.go index de104f1f..7638f75f 100644 --- a/benchmark/worker/benchmark_server.go +++ b/benchmark/worker/benchmark_server.go @@ -111,14 +111,28 @@ func startBenchmarkServer(config *testpb.ServerConfig, serverPort int) (*benchma var ( addr string close func() + err error ) if config.PayloadConfig != nil { switch payload := config.PayloadConfig.Payload.(type) { case *testpb.PayloadConfig_BytebufParams: opts = append(opts, grpc.CustomCodec(byteBufCodec{})) - addr, close = benchmark.StartByteBufServer(":"+strconv.Itoa(port), payload.BytebufParams.RespSize, opts...) + addr, close, err = benchmark.StartServer(benchmark.ServerInfo{ + Addr: ":" + strconv.Itoa(port), + Type: "bytebuf", + Metadata: payload.BytebufParams.RespSize, + }, opts...) + if err != nil { + grpclog.Fatalf("failed to start server: %v", err) + } case *testpb.PayloadConfig_SimpleParams: - addr, close = benchmark.StartServer(":"+strconv.Itoa(port), opts...) + addr, close, err = benchmark.StartServer(benchmark.ServerInfo{ + Addr: ":" + strconv.Itoa(port), + Type: "protobuf", + }, opts...) + if err != nil { + grpclog.Fatalf("failed to start server: %v", err) + } case *testpb.PayloadConfig_ComplexParams: return nil, grpc.Errorf(codes.Unimplemented, "unsupported payload config: %v", config.PayloadConfig) default: @@ -126,7 +140,13 @@ func startBenchmarkServer(config *testpb.ServerConfig, serverPort int) (*benchma } } else { // Start protobuf server is payload config is nil. - addr, close = benchmark.StartServer(":"+strconv.Itoa(port), opts...) + addr, close, err = benchmark.StartServer(benchmark.ServerInfo{ + Addr: ":" + strconv.Itoa(port), + Type: "protobuf", + }, opts...) + if err != nil { + grpclog.Fatalf("failed to start server: %v", err) + } } grpclog.Printf("benchmark server listening at %v", addr) From 0ca699c979b787f4aea8eb14757454ff4e29826b Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Thu, 28 Apr 2016 17:48:05 -0700 Subject: [PATCH 25/43] Address review comments Change Abs to abs Remove unimplemented distribution Name change Get server port from config or cmd line option --- benchmark/worker/benchmark_client.go | 28 ++++++++++++---------------- benchmark/worker/benchmark_server.go | 26 ++++++++++++-------------- benchmark/worker/main.go | 12 +++++++----- benchmark/worker/util.go | 4 ++-- 4 files changed, 33 insertions(+), 37 deletions(-) diff --git a/benchmark/worker/benchmark_client.go b/benchmark/worker/benchmark_client.go index 88d942ee..7d14007e 100644 --- a/benchmark/worker/benchmark_client.go +++ b/benchmark/worker/benchmark_client.go @@ -80,7 +80,7 @@ func startBenchmarkClient(config *testpb.ClientConfig) (*benchmarkClient, error) grpclog.Printf(" - security params: %v", config.SecurityParams) if config.SecurityParams != nil { - creds, err := credentials.NewClientTLSFromFile(Abs(caFile), config.SecurityParams.ServerHostOverride) + creds, err := credentials.NewClientTLSFromFile(abs(caFile), config.SecurityParams.ServerHostOverride) if err != nil { return nil, grpc.Errorf(codes.InvalidArgument, "failed to create TLS credentials %v", err) } @@ -90,7 +90,7 @@ func startBenchmarkClient(config *testpb.ClientConfig) (*benchmarkClient, error) } grpclog.Printf(" - core limit: %v", config.CoreLimit) - // Use one cpu core by default + // Use one cpu core by default. // TODO: change default number of cores used if 1 is not fastest. if config.CoreLimit > 1 { runtime.GOMAXPROCS(int(config.CoreLimit)) @@ -125,13 +125,11 @@ func startBenchmarkClient(config *testpb.ClientConfig) (*benchmarkClient, error) rpcCountPerConn, connCount := int(config.OutstandingRpcsPerChannel), int(config.ClientChannels) grpclog.Printf(" - load params: %v", config.LoadParams) - var dist *int - switch lp := config.LoadParams.Load.(type) { + // TODO add open loop distribution. + switch config.LoadParams.Load.(type) { case *testpb.LoadParams_ClosedLoop: case *testpb.LoadParams_Poisson: - grpclog.Printf(" - %v", lp.Poisson) return nil, grpc.Errorf(codes.Unimplemented, "unsupported load params: %v", config.LoadParams) - // TODO poisson case *testpb.LoadParams_Uniform: return nil, grpc.Errorf(codes.Unimplemented, "unsupported load params: %v", config.LoadParams) case *testpb.LoadParams_Determ: @@ -175,21 +173,17 @@ func startBenchmarkClient(config *testpb.ClientConfig) (*benchmarkClient, error) switch rpcType { case "unary": - if dist == nil { - bc.doCloseLoopUnaryBenchmark(conns, rpcCountPerConn, payloadReqSize, payloadRespSize) - } - // TODO else do open loop + bc.doCloseLoopUnary(conns, rpcCountPerConn, payloadReqSize, payloadRespSize) + // TODO open loop. case "streaming": - if dist == nil { - bc.doCloseLoopStreamingBenchmark(conns, rpcCountPerConn, payloadReqSize, payloadRespSize, payloadType) - } - // TODO else do open loop + bc.doCloseLoopStreaming(conns, rpcCountPerConn, payloadReqSize, payloadRespSize, payloadType) + // TODO open loop. } return &bc, nil } -func (bc *benchmarkClient) doCloseLoopUnaryBenchmark(conns []*grpc.ClientConn, rpcCountPerConn int, reqSize int, respSize int) { +func (bc *benchmarkClient) doCloseLoopUnary(conns []*grpc.ClientConn, rpcCountPerConn int, reqSize int, respSize int) { clients := make([]testpb.BenchmarkServiceClient, len(conns)) for ic, conn := range conns { clients[ic] = testpb.NewBenchmarkServiceClient(conn) @@ -235,7 +229,7 @@ func (bc *benchmarkClient) doCloseLoopUnaryBenchmark(conns []*grpc.ClientConn, r } } -func (bc *benchmarkClient) doCloseLoopStreamingBenchmark(conns []*grpc.ClientConn, rpcCountPerConn int, reqSize int, respSize int, payloadType string) { +func (bc *benchmarkClient) doCloseLoopStreaming(conns []*grpc.ClientConn, rpcCountPerConn int, reqSize int, respSize int, payloadType string) { var doRPC func(testpb.BenchmarkService_StreamingCallClient, int, int) error if payloadType == "bytebuf" { doRPC = benchmark.DoByteBufStreamingRoundTrip @@ -319,6 +313,8 @@ func (bc *benchmarkClient) getStats() *testpb.ClientStats { } } +// reset clears the contents for histogram and set lastResetTime to Now(). +// It is often called to get ready for benchmark runs. func (bc *benchmarkClient) reset() { bc.mu.Lock() defer bc.mu.Unlock() diff --git a/benchmark/worker/benchmark_server.go b/benchmark/worker/benchmark_server.go index 7638f75f..af46221a 100644 --- a/benchmark/worker/benchmark_server.go +++ b/benchmark/worker/benchmark_server.go @@ -57,7 +57,7 @@ var ( type benchmarkServer struct { port int cores int - close func() + closeFunc func() mu sync.RWMutex lastResetTime time.Time } @@ -83,7 +83,7 @@ func startBenchmarkServer(config *testpb.ServerConfig, serverPort int) (*benchma grpclog.Printf(" - security params: %v", config.SecurityParams) if config.SecurityParams != nil { - creds, err := credentials.NewServerTLSFromFile(Abs(certFile), Abs(keyFile)) + creds, err := credentials.NewServerTLSFromFile(abs(certFile), abs(keyFile)) if err != nil { grpclog.Fatalf("failed to generate credentials %v", err) } @@ -95,29 +95,27 @@ func startBenchmarkServer(config *testpb.ServerConfig, serverPort int) (*benchma numOfCores := 1 if config.CoreLimit > 1 { numOfCores = int(config.CoreLimit) + runtime.GOMAXPROCS(numOfCores) } - runtime.GOMAXPROCS(numOfCores) grpclog.Printf(" - port: %v", config.Port) - var port int // Priority: config.Port > serverPort > default (0). - if config.Port != 0 { - port = int(config.Port) - } else if serverPort != 0 { + port := int(config.Port) + if port == 0 { port = serverPort } grpclog.Printf(" - payload config: %v", config.PayloadConfig) var ( - addr string - close func() - err error + addr string + closeFunc func() + err error ) if config.PayloadConfig != nil { switch payload := config.PayloadConfig.Payload.(type) { case *testpb.PayloadConfig_BytebufParams: opts = append(opts, grpc.CustomCodec(byteBufCodec{})) - addr, close, err = benchmark.StartServer(benchmark.ServerInfo{ + addr, closeFunc, err = benchmark.StartServer(benchmark.ServerInfo{ Addr: ":" + strconv.Itoa(port), Type: "bytebuf", Metadata: payload.BytebufParams.RespSize, @@ -126,7 +124,7 @@ func startBenchmarkServer(config *testpb.ServerConfig, serverPort int) (*benchma grpclog.Fatalf("failed to start server: %v", err) } case *testpb.PayloadConfig_SimpleParams: - addr, close, err = benchmark.StartServer(benchmark.ServerInfo{ + addr, closeFunc, err = benchmark.StartServer(benchmark.ServerInfo{ Addr: ":" + strconv.Itoa(port), Type: "protobuf", }, opts...) @@ -140,7 +138,7 @@ func startBenchmarkServer(config *testpb.ServerConfig, serverPort int) (*benchma } } else { // Start protobuf server is payload config is nil. - addr, close, err = benchmark.StartServer(benchmark.ServerInfo{ + addr, closeFunc, err = benchmark.StartServer(benchmark.ServerInfo{ Addr: ":" + strconv.Itoa(port), Type: "protobuf", }, opts...) @@ -156,7 +154,7 @@ func startBenchmarkServer(config *testpb.ServerConfig, serverPort int) (*benchma grpclog.Fatalf("failed to get port number from server address: %v", err) } - return &benchmarkServer{port: p, cores: numOfCores, close: close, lastResetTime: time.Now()}, nil + return &benchmarkServer{port: p, cores: numOfCores, closeFunc: closeFunc, lastResetTime: time.Now()}, nil } func (bs *benchmarkServer) getStats() *testpb.ServerStats { diff --git a/benchmark/worker/main.go b/benchmark/worker/main.go index e16a6304..740188ea 100644 --- a/benchmark/worker/main.go +++ b/benchmark/worker/main.go @@ -59,7 +59,7 @@ type byteBufCodec struct { func (byteBufCodec) Marshal(v interface{}) ([]byte, error) { b, ok := v.(*[]byte) if !ok { - return nil, fmt.Errorf("Failed to marshal: %v is not type of *[]byte") + return nil, fmt.Errorf("failed to marshal: %v is not type of *[]byte") } return *b, nil } @@ -67,16 +67,18 @@ func (byteBufCodec) Marshal(v interface{}) ([]byte, error) { func (byteBufCodec) Unmarshal(data []byte, v interface{}) error { b, ok := v.(*[]byte) if !ok { - return fmt.Errorf("Failed to marshal: %v is not type of *[]byte") + return fmt.Errorf("failed to marshal: %v is not type of *[]byte") } *b = data return nil } func (byteBufCodec) String() string { - return "byteBufCodec" + return "bytebuffer" } +// workerServer implements WorkerService rpc handlers. +// It can create benchmarkServer or benchmarkClient on demand. type workerServer struct { stop chan<- bool serverPort int @@ -88,7 +90,7 @@ func (s *workerServer) RunServer(stream testpb.WorkerService_RunServerServer) er // Close benchmark server when stream ends. grpclog.Printf("closing benchmark server") if bs != nil { - bs.close() + bs.closeFunc() } }() for { @@ -110,7 +112,7 @@ func (s *workerServer) RunServer(stream testpb.WorkerService_RunServerServer) er } if bs != nil { grpclog.Printf("server setup received when server already exists, closing the existing server") - bs.close() + bs.closeFunc() } bs = newbs out = &testpb.ServerStatus{ diff --git a/benchmark/worker/util.go b/benchmark/worker/util.go index 3a652b62..f0016ce4 100644 --- a/benchmark/worker/util.go +++ b/benchmark/worker/util.go @@ -38,10 +38,10 @@ import ( "path/filepath" ) -// Abs returns the absolute path the given relative file or directory path, +// abs returns the absolute path the given relative file or directory path, // relative to the google.golang.org/grpc directory in the user's GOPATH. // If rel is already absolute, it is returned unmodified. -func Abs(rel string) string { +func abs(rel string) string { if filepath.IsAbs(rel) { return rel } From db85417dd0de6cc6f583672c6175a7237e5b5dd2 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Fri, 29 Apr 2016 10:43:27 -0700 Subject: [PATCH 26/43] Check old benchmarkserver before creating new benchmarkserver --- benchmark/worker/main.go | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/benchmark/worker/main.go b/benchmark/worker/main.go index 740188ea..063b5dd9 100644 --- a/benchmark/worker/main.go +++ b/benchmark/worker/main.go @@ -106,15 +106,14 @@ func (s *workerServer) RunServer(stream testpb.WorkerService_RunServerServer) er switch argtype := in.Argtype.(type) { case *testpb.ServerArgs_Setup: grpclog.Printf("server setup received:") - newbs, err := startBenchmarkServer(argtype.Setup, s.serverPort) - if err != nil { - return err - } if bs != nil { grpclog.Printf("server setup received when server already exists, closing the existing server") bs.closeFunc() } - bs = newbs + bs, err = startBenchmarkServer(argtype.Setup, s.serverPort) + if err != nil { + return err + } out = &testpb.ServerStatus{ Stats: bs.getStats(), Port: int32(bs.port), @@ -167,15 +166,14 @@ func (s *workerServer) RunClient(stream testpb.WorkerService_RunClientServer) er switch t := in.Argtype.(type) { case *testpb.ClientArgs_Setup: grpclog.Printf("client setup received:") - newbc, err := startBenchmarkClient(t.Setup) - if err != nil { - return err - } if bc != nil { grpclog.Printf("client setup received when client already exists, shuting down the existing client") bc.shutdown() } - bc = newbc + bc, err = startBenchmarkClient(t.Setup) + if err != nil { + return err + } out = &testpb.ClientStatus{ Stats: bc.getStats(), } From ceacfbcbc1514e4e677932fd55938ac455d182fb Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Fri, 29 Apr 2016 10:58:11 -0700 Subject: [PATCH 27/43] Add selecting on stop chan to avoid goroutine leak --- benchmark/worker/benchmark_client.go | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/benchmark/worker/benchmark_client.go b/benchmark/worker/benchmark_client.go index 7d14007e..2ee5dbb1 100644 --- a/benchmark/worker/benchmark_client.go +++ b/benchmark/worker/benchmark_client.go @@ -200,8 +200,8 @@ func (bc *benchmarkClient) doCloseLoopUnary(conns []*grpc.ClientConn, rpcCountPe for j := 0; j < rpcCountPerConn; j++ { go func(client testpb.BenchmarkServiceClient) { defer wg.Done() + done := make(chan bool) for { - done := make(chan bool) go func() { start := time.Now() if err := benchmark.DoUnaryCall(client, reqSize, respSize); err != nil { @@ -212,7 +212,10 @@ func (bc *benchmarkClient) doCloseLoopUnary(conns []*grpc.ClientConn, rpcCountPe bc.mu.Lock() bc.histogram.Add(int64(elapse / time.Nanosecond)) bc.mu.Unlock() - done <- true + select { + case <-bc.stop: + case done <- true: + } }() select { case <-bc.stop: @@ -259,8 +262,8 @@ func (bc *benchmarkClient) doCloseLoopStreaming(conns []*grpc.ClientConn, rpcCou for j := 0; j < rpcCountPerConn; j++ { go func(stream testpb.BenchmarkService_StreamingCallClient) { defer wg.Done() + done := make(chan bool) for { - done := make(chan bool) go func() { start := time.Now() if err := doRPC(stream, reqSize, respSize); err != nil { @@ -271,7 +274,10 @@ func (bc *benchmarkClient) doCloseLoopStreaming(conns []*grpc.ClientConn, rpcCou bc.mu.Lock() bc.histogram.Add(int64(elapse / time.Nanosecond)) bc.mu.Unlock() - done <- true + select { + case <-bc.stop: + case done <- true: + } }() select { case <-bc.stop: From 0d31c857aeb29dc9520da3a12f9dbbb0d6ee7017 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Fri, 29 Apr 2016 11:27:24 -0700 Subject: [PATCH 28/43] Merge stream init and rpc goroutine creation to the same loop, and remove warming up. --- benchmark/worker/benchmark_client.go | 50 ++++++++++++---------------- 1 file changed, 21 insertions(+), 29 deletions(-) diff --git a/benchmark/worker/benchmark_client.go b/benchmark/worker/benchmark_client.go index 2ee5dbb1..475acf80 100644 --- a/benchmark/worker/benchmark_client.go +++ b/benchmark/worker/benchmark_client.go @@ -184,21 +184,18 @@ func startBenchmarkClient(config *testpb.ClientConfig) (*benchmarkClient, error) } func (bc *benchmarkClient) doCloseLoopUnary(conns []*grpc.ClientConn, rpcCountPerConn int, reqSize int, respSize int) { - clients := make([]testpb.BenchmarkServiceClient, len(conns)) - for ic, conn := range conns { - clients[ic] = testpb.NewBenchmarkServiceClient(conn) - // Do some warm up. - for j := 0; j < 10; j++ { - benchmark.DoUnaryCall(clients[ic], 1, 1) - } - } - for ic, conn := range conns { + for _, conn := range conns { + client := testpb.NewBenchmarkServiceClient(conn) // For each connection, create rpcCountPerConn goroutines to do rpc. // Close this connection after all go routines finish. var wg sync.WaitGroup wg.Add(rpcCountPerConn) for j := 0; j < rpcCountPerConn; j++ { - go func(client testpb.BenchmarkServiceClient) { + go func() { + // TODO: do warm up if necessary. + // Now relying on driver client to reserve time to do warm up. + // The driver client needs to wait for some time after client is created, + // before starting benchmark. defer wg.Done() done := make(chan bool) for { @@ -223,7 +220,7 @@ func (bc *benchmarkClient) doCloseLoopUnary(conns []*grpc.ClientConn, rpcCountPe case <-done: } } - }(clients[ic]) + }() } go func(conn *grpc.ClientConn) { wg.Wait() @@ -239,28 +236,23 @@ func (bc *benchmarkClient) doCloseLoopStreaming(conns []*grpc.ClientConn, rpcCou } else { doRPC = benchmark.DoStreamingRoundTrip } - streams := make([]testpb.BenchmarkService_StreamingCallClient, len(conns)*rpcCountPerConn) - for ic, conn := range conns { - for j := 0; j < rpcCountPerConn; j++ { - c := testpb.NewBenchmarkServiceClient(conn) - s, err := c.StreamingCall(context.Background()) - if err != nil { - grpclog.Fatalf("%v.StreamingCall(_) = _, %v", c, err) - } - streams[ic*rpcCountPerConn+j] = s - // Do some warm up. - for j := 0; j < 10; j++ { - doRPC(streams[ic], 1, 1) - } - } - } - for ic, conn := range conns { + for _, conn := range conns { // For each connection, create rpcCountPerConn goroutines to do rpc. // Close this connection after all go routines finish. var wg sync.WaitGroup wg.Add(rpcCountPerConn) for j := 0; j < rpcCountPerConn; j++ { - go func(stream testpb.BenchmarkService_StreamingCallClient) { + c := testpb.NewBenchmarkServiceClient(conn) + stream, err := c.StreamingCall(context.Background()) + if err != nil { + grpclog.Fatalf("%v.StreamingCall(_) = _, %v", c, err) + } + // Create benchmark rpc goroutine. + go func() { + // TODO: do warm up if necessary. + // Now relying on driver client to reserve time to do warm up. + // The driver client needs to wait for some time after client is created, + // before starting benchmark. defer wg.Done() done := make(chan bool) for { @@ -285,7 +277,7 @@ func (bc *benchmarkClient) doCloseLoopStreaming(conns []*grpc.ClientConn, rpcCou case <-done: } } - }(streams[ic*rpcCountPerConn+j]) + }() } go func(conn *grpc.ClientConn) { wg.Wait() From 67497aad6cece9af26acda98442494691563beac Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Fri, 29 Apr 2016 11:32:29 -0700 Subject: [PATCH 29/43] Minor fixes --- benchmark/benchmark.go | 4 ++++ benchmark/worker/benchmark_client.go | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/benchmark/benchmark.go b/benchmark/benchmark.go index 63bd41d7..a8727c17 100644 --- a/benchmark/benchmark.go +++ b/benchmark/benchmark.go @@ -195,6 +195,10 @@ func DoByteBufStreamingRoundTrip(stream testpb.BenchmarkService_StreamingCallCli } var in []byte if err := stream.(grpc.ClientStream).RecvMsg(&in); err != nil { + // EOF should be a valid error here. + if err == io.EOF { + return nil + } return grpc.Errorf(grpc.Code(err), "StreamingCall(_).(ClientStream).RecvMsg: %v", grpc.ErrorDesc(err)) } return nil diff --git a/benchmark/worker/benchmark_client.go b/benchmark/worker/benchmark_client.go index 475acf80..95785294 100644 --- a/benchmark/worker/benchmark_client.go +++ b/benchmark/worker/benchmark_client.go @@ -312,7 +312,7 @@ func (bc *benchmarkClient) getStats() *testpb.ClientStats { } // reset clears the contents for histogram and set lastResetTime to Now(). -// It is often called to get ready for benchmark runs. +// It is called to get ready for benchmark runs. func (bc *benchmarkClient) reset() { bc.mu.Lock() defer bc.mu.Unlock() From e7801b635a67714416e8283573b3182a482b72a8 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Fri, 29 Apr 2016 14:39:58 -0700 Subject: [PATCH 30/43] Fix error in benchmark/server/main.go --- benchmark/server/main.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/benchmark/server/main.go b/benchmark/server/main.go index 090f002f..aa73ffe9 100644 --- a/benchmark/server/main.go +++ b/benchmark/server/main.go @@ -28,7 +28,10 @@ func main() { grpclog.Fatalf("Failed to serve: %v", err) } }() - addr, stopper := benchmark.StartServer(":0") // listen on all interfaces + addr, stopper, err := benchmark.StartServer(benchmark.ServerInfo{Addr: ":0", Type: "protobuf"}) // listen on all interfaces + if err != nil { + grpclog.Fatalf("failed to start server: %v", err) + } grpclog.Println("Server Address: ", addr) <-time.After(time.Duration(*duration) * time.Second) stopper() From 02bd4c80b5de36ca004c03b8eb2f607b1decc02f Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Fri, 29 Apr 2016 16:43:01 -0700 Subject: [PATCH 31/43] Fix leak goroutine problem --- benchmark/worker/benchmark_client.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/benchmark/worker/benchmark_client.go b/benchmark/worker/benchmark_client.go index 95785294..d25e3ceb 100644 --- a/benchmark/worker/benchmark_client.go +++ b/benchmark/worker/benchmark_client.go @@ -202,7 +202,10 @@ func (bc *benchmarkClient) doCloseLoopUnary(conns []*grpc.ClientConn, rpcCountPe go func() { start := time.Now() if err := benchmark.DoUnaryCall(client, reqSize, respSize); err != nil { - done <- false + select { + case <-bc.stop: + case done <- false: + } return } elapse := time.Since(start) @@ -259,7 +262,10 @@ func (bc *benchmarkClient) doCloseLoopStreaming(conns []*grpc.ClientConn, rpcCou go func() { start := time.Now() if err := doRPC(stream, reqSize, respSize); err != nil { - done <- false + select { + case <-bc.stop: + case done <- false: + } return } elapse := time.Since(start) From c1cfebc3848177ea9b9806345e977b69f6bc447c Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Mon, 2 May 2016 10:55:12 -0700 Subject: [PATCH 32/43] Address review comments --- benchmark/benchmark.go | 28 +++++++++++++++++++--------- 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/benchmark/benchmark.go b/benchmark/benchmark.go index a8727c17..90566793 100644 --- a/benchmark/benchmark.go +++ b/benchmark/benchmark.go @@ -92,6 +92,9 @@ func (s *testServer) StreamingCall(stream testpb.BenchmarkService_StreamingCallS } } +// byteBufServer is a gRPC server that sends and receives byte buffer. +// The purpose is to remove the protobuf overhead from benchmark tests. +// Now only StreamingCall() is tested. type byteBufServer struct { respSize int32 } @@ -118,14 +121,21 @@ func (s *byteBufServer) StreamingCall(stream testpb.BenchmarkService_StreamingCa } // ServerInfo is used to create server. -// It contains the address and type of the server to be created, and optional metadata. type ServerInfo struct { - Addr string - Type string + // Addr is the address of the server. + Addr string + + // Type is the type of the server. + // It should be "protobuf" or "bytebuf". + Type string + + // Metadata is an optional configuration. + // For "protobuf", it's ignored. + // For "bytebuf", it should be an int representing response size. Metadata interface{} } -// StartServer starts a gRPC server serving a benchmark service on the given ServerInfo. +// StartServer starts a gRPC server serving a benchmark service according to info. // Different types of servers are created according to Type. // It returns its listen address and a function to stop the server. func StartServer(info ServerInfo, opts ...grpc.ServerOption) (string, func(), error) { @@ -161,7 +171,7 @@ func DoUnaryCall(tc testpb.BenchmarkServiceClient, reqSize, respSize int) error Payload: pl, } if _, err := tc.UnaryCall(context.Background(), req); err != nil { - return grpc.Errorf(grpc.Code(err), "/BenchmarkService/UnaryCall RPC failed: %v", grpc.ErrorDesc(err)) + return fmt.Errorf("/BenchmarkService/UnaryCall(_, _) = _, %v, want _, ", err) } return nil } @@ -175,14 +185,14 @@ func DoStreamingRoundTrip(stream testpb.BenchmarkService_StreamingCallClient, re Payload: pl, } if err := stream.Send(req); err != nil { - return grpc.Errorf(grpc.Code(err), "StreamingCall(_).Send: %v", grpc.ErrorDesc(err)) + return fmt.Errorf("StreamingCall(_).Send(_) = %v, want ", err) } if _, err := stream.Recv(); err != nil { // EOF should be a valid error here. if err == io.EOF { return nil } - return grpc.Errorf(grpc.Code(err), "StreamingCall(_).Recv: %v", grpc.ErrorDesc(err)) + return fmt.Errorf("StreamingCall(_).Recv(_) = %v, want ", err) } return nil } @@ -191,7 +201,7 @@ func DoStreamingRoundTrip(stream testpb.BenchmarkService_StreamingCallClient, re func DoByteBufStreamingRoundTrip(stream testpb.BenchmarkService_StreamingCallClient, reqSize, respSize int) error { out := make([]byte, reqSize) if err := stream.(grpc.ClientStream).SendMsg(&out); err != nil { - return grpc.Errorf(grpc.Code(err), "StreamingCall(_).(ClientStream).SendMsg: %v", grpc.ErrorDesc(err)) + return fmt.Errorf("StreamingCall(_).(ClientStream).SendMsg(_) = %v, want ", err) } var in []byte if err := stream.(grpc.ClientStream).RecvMsg(&in); err != nil { @@ -199,7 +209,7 @@ func DoByteBufStreamingRoundTrip(stream testpb.BenchmarkService_StreamingCallCli if err == io.EOF { return nil } - return grpc.Errorf(grpc.Code(err), "StreamingCall(_).(ClientStream).RecvMsg: %v", grpc.ErrorDesc(err)) + return fmt.Errorf("StreamingCall(_).(ClientStream).RecvMsg(_) = %v, want ", err) } return nil } From ad6a5173f88f0923554ea27e1efb2758011f9467 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Mon, 2 May 2016 10:57:37 -0700 Subject: [PATCH 33/43] Small fix in error desc --- benchmark/benchmark.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/benchmark/benchmark.go b/benchmark/benchmark.go index 90566793..7c3b4fd0 100644 --- a/benchmark/benchmark.go +++ b/benchmark/benchmark.go @@ -185,14 +185,14 @@ func DoStreamingRoundTrip(stream testpb.BenchmarkService_StreamingCallClient, re Payload: pl, } if err := stream.Send(req); err != nil { - return fmt.Errorf("StreamingCall(_).Send(_) = %v, want ", err) + return fmt.Errorf("/BenchmarkService/StreamingCall.Send(_) = %v, want ", err) } if _, err := stream.Recv(); err != nil { // EOF should be a valid error here. if err == io.EOF { return nil } - return fmt.Errorf("StreamingCall(_).Recv(_) = %v, want ", err) + return fmt.Errorf("/BenchmarkService/StreamingCall.Recv(_) = %v, want ", err) } return nil } @@ -201,7 +201,7 @@ func DoStreamingRoundTrip(stream testpb.BenchmarkService_StreamingCallClient, re func DoByteBufStreamingRoundTrip(stream testpb.BenchmarkService_StreamingCallClient, reqSize, respSize int) error { out := make([]byte, reqSize) if err := stream.(grpc.ClientStream).SendMsg(&out); err != nil { - return fmt.Errorf("StreamingCall(_).(ClientStream).SendMsg(_) = %v, want ", err) + return fmt.Errorf("/BenchmarkService/StreamingCall.(ClientStream).SendMsg(_) = %v, want ", err) } var in []byte if err := stream.(grpc.ClientStream).RecvMsg(&in); err != nil { @@ -209,7 +209,7 @@ func DoByteBufStreamingRoundTrip(stream testpb.BenchmarkService_StreamingCallCli if err == io.EOF { return nil } - return fmt.Errorf("StreamingCall(_).(ClientStream).RecvMsg(_) = %v, want ", err) + return fmt.Errorf("/BenchmarkService/StreamingCall.(ClientStream).RecvMsg(_) = %v, want ", err) } return nil } From 444ab5553f358552be0036905dabd8d83ff0c4e3 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Mon, 2 May 2016 12:13:01 -0700 Subject: [PATCH 34/43] Fix comments --- benchmark/worker/benchmark_client.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/benchmark/worker/benchmark_client.go b/benchmark/worker/benchmark_client.go index d25e3ceb..e2f78bb8 100644 --- a/benchmark/worker/benchmark_client.go +++ b/benchmark/worker/benchmark_client.go @@ -187,14 +187,14 @@ func (bc *benchmarkClient) doCloseLoopUnary(conns []*grpc.ClientConn, rpcCountPe for _, conn := range conns { client := testpb.NewBenchmarkServiceClient(conn) // For each connection, create rpcCountPerConn goroutines to do rpc. - // Close this connection after all go routines finish. + // Close this connection after all goroutines finish. var wg sync.WaitGroup wg.Add(rpcCountPerConn) for j := 0; j < rpcCountPerConn; j++ { go func() { // TODO: do warm up if necessary. - // Now relying on driver client to reserve time to do warm up. - // The driver client needs to wait for some time after client is created, + // Now relying on worker client to reserve time to do warm up. + // The worker client needs to wait for some time after client is created, // before starting benchmark. defer wg.Done() done := make(chan bool) @@ -241,7 +241,7 @@ func (bc *benchmarkClient) doCloseLoopStreaming(conns []*grpc.ClientConn, rpcCou } for _, conn := range conns { // For each connection, create rpcCountPerConn goroutines to do rpc. - // Close this connection after all go routines finish. + // Close this connection after all goroutines finish. var wg sync.WaitGroup wg.Add(rpcCountPerConn) for j := 0; j < rpcCountPerConn; j++ { @@ -253,8 +253,8 @@ func (bc *benchmarkClient) doCloseLoopStreaming(conns []*grpc.ClientConn, rpcCou // Create benchmark rpc goroutine. go func() { // TODO: do warm up if necessary. - // Now relying on driver client to reserve time to do warm up. - // The driver client needs to wait for some time after client is created, + // Now relying on worker client to reserve time to do warm up. + // The worker client needs to wait for some time after client is created, // before starting benchmark. defer wg.Done() done := make(chan bool) From 020c480810cab338178308913e6ff46ef8e032ec Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Tue, 3 May 2016 11:29:00 -0700 Subject: [PATCH 35/43] Not returning error in StartServer(). --- benchmark/benchmark.go | 16 ++++++++-------- benchmark/benchmark_test.go | 10 ++-------- benchmark/server/main.go | 5 +---- benchmark/worker/benchmark_server.go | 15 +++------------ 4 files changed, 14 insertions(+), 32 deletions(-) diff --git a/benchmark/benchmark.go b/benchmark/benchmark.go index 7c3b4fd0..46433bc9 100644 --- a/benchmark/benchmark.go +++ b/benchmark/benchmark.go @@ -93,12 +93,13 @@ func (s *testServer) StreamingCall(stream testpb.BenchmarkService_StreamingCallS } // byteBufServer is a gRPC server that sends and receives byte buffer. -// The purpose is to remove the protobuf overhead from benchmark tests. -// Now only StreamingCall() is tested. +// The purpose is to benchmark the gRPC performance without protobuf serialization/deserialization overhead. type byteBufServer struct { respSize int32 } +// UnaryCall is an empty function and is not used for benchmark. +// If bytebuf UnaryCall benchmark is needed later, the function body needs to be updated. func (s *byteBufServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { return &testpb.SimpleResponse{}, nil } @@ -120,7 +121,7 @@ func (s *byteBufServer) StreamingCall(stream testpb.BenchmarkService_StreamingCa } } -// ServerInfo is used to create server. +// ServerInfo contains the information to create a gRPC benchmark server. type ServerInfo struct { // Addr is the address of the server. Addr string @@ -136,9 +137,8 @@ type ServerInfo struct { } // StartServer starts a gRPC server serving a benchmark service according to info. -// Different types of servers are created according to Type. // It returns its listen address and a function to stop the server. -func StartServer(info ServerInfo, opts ...grpc.ServerOption) (string, func(), error) { +func StartServer(info ServerInfo, opts ...grpc.ServerOption) (string, func()) { lis, err := net.Listen("tcp", info.Addr) if err != nil { grpclog.Fatalf("Failed to listen: %v", err) @@ -150,16 +150,16 @@ func StartServer(info ServerInfo, opts ...grpc.ServerOption) (string, func(), er case "bytebuf": respSize, ok := info.Metadata.(int32) if !ok { - return "", nil, fmt.Errorf("invalid metadata: %v, for Type: %v", info.Metadata, info.Type) + grpclog.Fatalf("failed to StartServer, invalid metadata: %v, for Type: %v", info.Metadata, info.Type) } testpb.RegisterBenchmarkServiceServer(s, &byteBufServer{respSize: respSize}) default: - return "", nil, fmt.Errorf("unknown Type: %v", info.Type) + grpclog.Fatalf("failed to StartServer, unknown Type: %v", info.Type) } go s.Serve(lis) return lis.Addr().String(), func() { s.Stop() - }, nil + } } // DoUnaryCall performs an unary RPC with given stub and request and response sizes. diff --git a/benchmark/benchmark_test.go b/benchmark/benchmark_test.go index 4e47631a..8fe3fa15 100644 --- a/benchmark/benchmark_test.go +++ b/benchmark/benchmark_test.go @@ -16,10 +16,7 @@ import ( func runUnary(b *testing.B, maxConcurrentCalls int) { s := stats.AddStats(b, 38) b.StopTimer() - target, stopper, err := StartServer(ServerInfo{Addr: "localhost:0", Type: "protobuf"}) - if err != nil { - grpclog.Fatalf("failed to start server: %v", err) - } + target, stopper := StartServer(ServerInfo{Addr: "localhost:0", Type: "protobuf"}) defer stopper() conn := NewClientConn(target, grpc.WithInsecure()) tc := testpb.NewBenchmarkServiceClient(conn) @@ -62,10 +59,7 @@ func runUnary(b *testing.B, maxConcurrentCalls int) { func runStream(b *testing.B, maxConcurrentCalls int) { s := stats.AddStats(b, 38) b.StopTimer() - target, stopper, err := StartServer(ServerInfo{Addr: "localhost:0", Type: "protobuf"}) - if err != nil { - grpclog.Fatalf("failed to start server: %v", err) - } + target, stopper := StartServer(ServerInfo{Addr: "localhost:0", Type: "protobuf"}) defer stopper() conn := NewClientConn(target, grpc.WithInsecure()) tc := testpb.NewBenchmarkServiceClient(conn) diff --git a/benchmark/server/main.go b/benchmark/server/main.go index aa73ffe9..d43aad0b 100644 --- a/benchmark/server/main.go +++ b/benchmark/server/main.go @@ -28,10 +28,7 @@ func main() { grpclog.Fatalf("Failed to serve: %v", err) } }() - addr, stopper, err := benchmark.StartServer(benchmark.ServerInfo{Addr: ":0", Type: "protobuf"}) // listen on all interfaces - if err != nil { - grpclog.Fatalf("failed to start server: %v", err) - } + addr, stopper := benchmark.StartServer(benchmark.ServerInfo{Addr: ":0", Type: "protobuf"}) // listen on all interfaces grpclog.Println("Server Address: ", addr) <-time.After(time.Duration(*duration) * time.Second) stopper() diff --git a/benchmark/worker/benchmark_server.go b/benchmark/worker/benchmark_server.go index af46221a..75fc1656 100644 --- a/benchmark/worker/benchmark_server.go +++ b/benchmark/worker/benchmark_server.go @@ -115,22 +115,16 @@ func startBenchmarkServer(config *testpb.ServerConfig, serverPort int) (*benchma switch payload := config.PayloadConfig.Payload.(type) { case *testpb.PayloadConfig_BytebufParams: opts = append(opts, grpc.CustomCodec(byteBufCodec{})) - addr, closeFunc, err = benchmark.StartServer(benchmark.ServerInfo{ + addr, closeFunc = benchmark.StartServer(benchmark.ServerInfo{ Addr: ":" + strconv.Itoa(port), Type: "bytebuf", Metadata: payload.BytebufParams.RespSize, }, opts...) - if err != nil { - grpclog.Fatalf("failed to start server: %v", err) - } case *testpb.PayloadConfig_SimpleParams: - addr, closeFunc, err = benchmark.StartServer(benchmark.ServerInfo{ + addr, closeFunc = benchmark.StartServer(benchmark.ServerInfo{ Addr: ":" + strconv.Itoa(port), Type: "protobuf", }, opts...) - if err != nil { - grpclog.Fatalf("failed to start server: %v", err) - } case *testpb.PayloadConfig_ComplexParams: return nil, grpc.Errorf(codes.Unimplemented, "unsupported payload config: %v", config.PayloadConfig) default: @@ -138,13 +132,10 @@ func startBenchmarkServer(config *testpb.ServerConfig, serverPort int) (*benchma } } else { // Start protobuf server is payload config is nil. - addr, closeFunc, err = benchmark.StartServer(benchmark.ServerInfo{ + addr, closeFunc = benchmark.StartServer(benchmark.ServerInfo{ Addr: ":" + strconv.Itoa(port), Type: "protobuf", }, opts...) - if err != nil { - grpclog.Fatalf("failed to start server: %v", err) - } } grpclog.Printf("benchmark server listening at %v", addr) From 14d95fc6323eb5c631c2fae34e7b0734db9de316 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Tue, 3 May 2016 12:03:06 -0700 Subject: [PATCH 36/43] Small fixes in comments. --- benchmark/benchmark.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/benchmark/benchmark.go b/benchmark/benchmark.go index 46433bc9..d1143270 100644 --- a/benchmark/benchmark.go +++ b/benchmark/benchmark.go @@ -188,7 +188,7 @@ func DoStreamingRoundTrip(stream testpb.BenchmarkService_StreamingCallClient, re return fmt.Errorf("/BenchmarkService/StreamingCall.Send(_) = %v, want ", err) } if _, err := stream.Recv(); err != nil { - // EOF should be a valid error here. + // EOF is a valid error here. if err == io.EOF { return nil } @@ -197,7 +197,7 @@ func DoStreamingRoundTrip(stream testpb.BenchmarkService_StreamingCallClient, re return nil } -// DoByteBufStreamingRoundTrip performs a round trip for a single streaming rpc, using custom codec. +// DoByteBufStreamingRoundTrip performs a round trip for a single streaming rpc, using a custom codec for byte buffer. func DoByteBufStreamingRoundTrip(stream testpb.BenchmarkService_StreamingCallClient, reqSize, respSize int) error { out := make([]byte, reqSize) if err := stream.(grpc.ClientStream).SendMsg(&out); err != nil { @@ -205,7 +205,7 @@ func DoByteBufStreamingRoundTrip(stream testpb.BenchmarkService_StreamingCallCli } var in []byte if err := stream.(grpc.ClientStream).RecvMsg(&in); err != nil { - // EOF should be a valid error here. + // EOF is a valid error here. if err == io.EOF { return nil } From 47152e8076bea20c214cc9bcf98897282c44ff8c Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Tue, 3 May 2016 16:55:15 -0700 Subject: [PATCH 37/43] Reconstruct startBenchmarkClient --- benchmark/worker/benchmark_client.go | 133 +++++++++++++++++---------- 1 file changed, 82 insertions(+), 51 deletions(-) diff --git a/benchmark/worker/benchmark_client.go b/benchmark/worker/benchmark_client.go index e2f78bb8..a4350661 100644 --- a/benchmark/worker/benchmark_client.go +++ b/benchmark/worker/benchmark_client.go @@ -60,43 +60,84 @@ type benchmarkClient struct { histogram *stats.Histogram } -func startBenchmarkClient(config *testpb.ClientConfig) (*benchmarkClient, error) { - var opts []grpc.DialOption - +func printClientConfig(config *testpb.ClientConfig) { // Some config options are ignored: // - client type: // will always create sync client // - async client threads. // - core list grpclog.Printf(" * client type: %v (ignored, always creates sync client)", config.ClientType) - switch config.ClientType { - case testpb.ClientType_SYNC_CLIENT: - case testpb.ClientType_ASYNC_CLIENT: - default: - return nil, grpc.Errorf(codes.InvalidArgument, "unknow client type: %v", config.ClientType) - } grpclog.Printf(" * async client threads: %v (ignored)", config.AsyncClientThreads) grpclog.Printf(" * core list: %v (ignored)", config.CoreList) grpclog.Printf(" - security params: %v", config.SecurityParams) + grpclog.Printf(" - core limit: %v", config.CoreLimit) + grpclog.Printf(" - payload config: %v", config.PayloadConfig) + grpclog.Printf(" - rpcs per chann: %v", config.OutstandingRpcsPerChannel) + grpclog.Printf(" - channel number: %v", config.ClientChannels) + grpclog.Printf(" - load params: %v", config.LoadParams) + grpclog.Printf(" - rpc type: %v", config.RpcType) + grpclog.Printf(" - histogram params: %v", config.HistogramParams) + grpclog.Printf(" - server targets: %v", config.ServerTargets) +} + +func setupClientEnv(config *testpb.ClientConfig) { + // Use one cpu core by default. + // TODO: change default number of cores used if 1 is not fastest. + if config.CoreLimit > 1 { + runtime.GOMAXPROCS(int(config.CoreLimit)) + } +} + +func createConns(config *testpb.ClientConfig) ([]*grpc.ClientConn, func(), error) { + var opts []grpc.DialOption + + // Sanity check for client type. + switch config.ClientType { + case testpb.ClientType_SYNC_CLIENT: + case testpb.ClientType_ASYNC_CLIENT: + default: + return nil, nil, grpc.Errorf(codes.InvalidArgument, "unknow client type: %v", config.ClientType) + } + + // Check and set security options. if config.SecurityParams != nil { creds, err := credentials.NewClientTLSFromFile(abs(caFile), config.SecurityParams.ServerHostOverride) if err != nil { - return nil, grpc.Errorf(codes.InvalidArgument, "failed to create TLS credentials %v", err) + return nil, nil, grpc.Errorf(codes.InvalidArgument, "failed to create TLS credentials %v", err) } opts = append(opts, grpc.WithTransportCredentials(creds)) } else { opts = append(opts, grpc.WithInsecure()) } - grpclog.Printf(" - core limit: %v", config.CoreLimit) - // Use one cpu core by default. - // TODO: change default number of cores used if 1 is not fastest. - if config.CoreLimit > 1 { - runtime.GOMAXPROCS(int(config.CoreLimit)) + // Use byteBufCodec is required. + if config.PayloadConfig != nil { + switch config.PayloadConfig.Payload.(type) { + case *testpb.PayloadConfig_BytebufParams: + opts = append(opts, grpc.WithCodec(byteBufCodec{})) + case *testpb.PayloadConfig_SimpleParams: + default: + return nil, nil, grpc.Errorf(codes.InvalidArgument, "unknow payload config: %v", config.PayloadConfig) + } } - grpclog.Printf(" - payload config: %v", config.PayloadConfig) + // Create connestions. + connCount := int(config.ClientChannels) + conns := make([]*grpc.ClientConn, connCount) + for connIndex := 0; connIndex < connCount; connIndex++ { + conns[connIndex] = benchmark.NewClientConn(config.ServerTargets[connIndex%len(config.ServerTargets)], opts...) + } + + return conns, func() { + for _, conn := range conns { + conn.Close() + } + }, nil +} + +func performRPCs(config *testpb.ClientConfig, conns []*grpc.ClientConn, bc *benchmarkClient) error { + // Read payload size and type from config. var ( payloadReqSize, payloadRespSize int payloadType string @@ -104,7 +145,6 @@ func startBenchmarkClient(config *testpb.ClientConfig) (*benchmarkClient, error) if config.PayloadConfig != nil { switch c := config.PayloadConfig.Payload.(type) { case *testpb.PayloadConfig_BytebufParams: - opts = append(opts, grpc.WithCodec(byteBufCodec{})) payloadReqSize = int(c.BytebufParams.ReqSize) payloadRespSize = int(c.BytebufParams.RespSize) payloadType = "bytebuf" @@ -112,52 +152,45 @@ func startBenchmarkClient(config *testpb.ClientConfig) (*benchmarkClient, error) payloadReqSize = int(c.SimpleParams.ReqSize) payloadRespSize = int(c.SimpleParams.RespSize) payloadType = "protobuf" - case *testpb.PayloadConfig_ComplexParams: - return nil, grpc.Errorf(codes.Unimplemented, "unsupported payload config: %v", config.PayloadConfig) default: - return nil, grpc.Errorf(codes.InvalidArgument, "unknow payload config: %v", config.PayloadConfig) + return grpc.Errorf(codes.InvalidArgument, "unknow payload config: %v", config.PayloadConfig) } } - grpclog.Printf(" - rpcs per chann: %v", config.OutstandingRpcsPerChannel) - grpclog.Printf(" - channel number: %v", config.ClientChannels) - - rpcCountPerConn, connCount := int(config.OutstandingRpcsPerChannel), int(config.ClientChannels) - - grpclog.Printf(" - load params: %v", config.LoadParams) // TODO add open loop distribution. switch config.LoadParams.Load.(type) { case *testpb.LoadParams_ClosedLoop: case *testpb.LoadParams_Poisson: - return nil, grpc.Errorf(codes.Unimplemented, "unsupported load params: %v", config.LoadParams) - case *testpb.LoadParams_Uniform: - return nil, grpc.Errorf(codes.Unimplemented, "unsupported load params: %v", config.LoadParams) - case *testpb.LoadParams_Determ: - return nil, grpc.Errorf(codes.Unimplemented, "unsupported load params: %v", config.LoadParams) - case *testpb.LoadParams_Pareto: - return nil, grpc.Errorf(codes.Unimplemented, "unsupported load params: %v", config.LoadParams) + return grpc.Errorf(codes.Unimplemented, "unsupported load params: %v", config.LoadParams) default: - return nil, grpc.Errorf(codes.InvalidArgument, "unknown load params: %v", config.LoadParams) + return grpc.Errorf(codes.InvalidArgument, "unknown load params: %v", config.LoadParams) } - grpclog.Printf(" - rpc type: %v", config.RpcType) - var rpcType string + rpcCountPerConn := int(config.OutstandingRpcsPerChannel) + switch config.RpcType { case testpb.RpcType_UNARY: - rpcType = "unary" + bc.doCloseLoopUnary(conns, rpcCountPerConn, payloadReqSize, payloadRespSize) + // TODO open loop. case testpb.RpcType_STREAMING: - rpcType = "streaming" + bc.doCloseLoopStreaming(conns, rpcCountPerConn, payloadReqSize, payloadRespSize, payloadType) + // TODO open loop. default: - return nil, grpc.Errorf(codes.InvalidArgument, "unknown rpc type: %v", config.RpcType) + return grpc.Errorf(codes.InvalidArgument, "unknown rpc type: %v", config.RpcType) } - grpclog.Printf(" - histogram params: %v", config.HistogramParams) - grpclog.Printf(" - server targets: %v", config.ServerTargets) + return nil +} - conns := make([]*grpc.ClientConn, connCount) +func startBenchmarkClient(config *testpb.ClientConfig) (*benchmarkClient, error) { + printClientConfig(config) - for connIndex := 0; connIndex < connCount; connIndex++ { - conns[connIndex] = benchmark.NewClientConn(config.ServerTargets[connIndex%len(config.ServerTargets)], opts...) + // Set running environment like how many cores to use. + setupClientEnv(config) + + conns, closeConns, err := createConns(config) + if err != nil { + return nil, err } bc := benchmarkClient{ @@ -171,13 +204,11 @@ func startBenchmarkClient(config *testpb.ClientConfig) (*benchmarkClient, error) lastResetTime: time.Now(), } - switch rpcType { - case "unary": - bc.doCloseLoopUnary(conns, rpcCountPerConn, payloadReqSize, payloadRespSize) - // TODO open loop. - case "streaming": - bc.doCloseLoopStreaming(conns, rpcCountPerConn, payloadReqSize, payloadRespSize, payloadType) - // TODO open loop. + err = performRPCs(config, conns, &bc) + if err != nil { + // Close all conns if failed to performRPCs. + closeConns() + return nil, err } return &bc, nil From 8963fdb9cb13126fb4e937031555097c3560c454 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Tue, 3 May 2016 17:05:37 -0700 Subject: [PATCH 38/43] Reconstruct startBenchmarkServer --- benchmark/worker/benchmark_server.go | 41 +++++++++++++++++----------- 1 file changed, 25 insertions(+), 16 deletions(-) diff --git a/benchmark/worker/benchmark_server.go b/benchmark/worker/benchmark_server.go index 75fc1656..6a2fab2b 100644 --- a/benchmark/worker/benchmark_server.go +++ b/benchmark/worker/benchmark_server.go @@ -62,15 +62,35 @@ type benchmarkServer struct { lastResetTime time.Time } -func startBenchmarkServer(config *testpb.ServerConfig, serverPort int) (*benchmarkServer, error) { - var opts []grpc.ServerOption - +func printServerConfig(config *testpb.ServerConfig) { // Some config options are ignored: // - server type: // will always start sync server // - async server threads // - core list grpclog.Printf(" * server type: %v (ignored, always starts sync server)", config.ServerType) + grpclog.Printf(" * async server threads: %v (ignored)", config.AsyncServerThreads) + grpclog.Printf(" * core list: %v (ignored)", config.CoreList) + + grpclog.Printf(" - security params: %v", config.SecurityParams) + grpclog.Printf(" - core limit: %v", config.CoreLimit) + grpclog.Printf(" - port: %v", config.Port) + grpclog.Printf(" - payload config: %v", config.PayloadConfig) +} + +func startBenchmarkServer(config *testpb.ServerConfig, serverPort int) (*benchmarkServer, error) { + printServerConfig(config) + + // Use one cpu core by default. + numOfCores := 1 + if config.CoreLimit > 1 { + numOfCores = int(config.CoreLimit) + runtime.GOMAXPROCS(numOfCores) + } + + var opts []grpc.ServerOption + + // Sanity check for server type. switch config.ServerType { case testpb.ServerType_SYNC_SERVER: case testpb.ServerType_ASYNC_SERVER: @@ -78,10 +98,8 @@ func startBenchmarkServer(config *testpb.ServerConfig, serverPort int) (*benchma default: return nil, grpc.Errorf(codes.InvalidArgument, "unknow server type: %v", config.ServerType) } - grpclog.Printf(" * async server threads: %v (ignored)", config.AsyncServerThreads) - grpclog.Printf(" * core list: %v (ignored)", config.CoreList) - grpclog.Printf(" - security params: %v", config.SecurityParams) + // Set security options. if config.SecurityParams != nil { creds, err := credentials.NewServerTLSFromFile(abs(certFile), abs(keyFile)) if err != nil { @@ -90,22 +108,13 @@ func startBenchmarkServer(config *testpb.ServerConfig, serverPort int) (*benchma opts = append(opts, grpc.Creds(creds)) } - grpclog.Printf(" - core limit: %v", config.CoreLimit) - // Use one cpu core by default. - numOfCores := 1 - if config.CoreLimit > 1 { - numOfCores = int(config.CoreLimit) - runtime.GOMAXPROCS(numOfCores) - } - - grpclog.Printf(" - port: %v", config.Port) // Priority: config.Port > serverPort > default (0). port := int(config.Port) if port == 0 { port = serverPort } - grpclog.Printf(" - payload config: %v", config.PayloadConfig) + // Create different benchmark server according to config. var ( addr string closeFunc func() From d34f891713b965c89aca0f396dcd612aac3ae862 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Tue, 3 May 2016 17:26:27 -0700 Subject: [PATCH 39/43] Move closeConns to client shutdown() --- benchmark/worker/benchmark_client.go | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/benchmark/worker/benchmark_client.go b/benchmark/worker/benchmark_client.go index a4350661..5acd8e5b 100644 --- a/benchmark/worker/benchmark_client.go +++ b/benchmark/worker/benchmark_client.go @@ -54,6 +54,7 @@ var ( ) type benchmarkClient struct { + closeConns func() stop chan bool mu sync.RWMutex lastResetTime time.Time @@ -202,6 +203,7 @@ func startBenchmarkClient(config *testpb.ClientConfig) (*benchmarkClient, error) }), stop: make(chan bool), lastResetTime: time.Now(), + closeConns: closeConns, } err = performRPCs(config, conns, &bc) @@ -256,10 +258,6 @@ func (bc *benchmarkClient) doCloseLoopUnary(conns []*grpc.ClientConn, rpcCountPe } }() } - go func(conn *grpc.ClientConn) { - wg.Wait() - conn.Close() - }(conn) } } @@ -316,10 +314,6 @@ func (bc *benchmarkClient) doCloseLoopStreaming(conns []*grpc.ClientConn, rpcCou } }() } - go func(conn *grpc.ClientConn) { - wg.Wait() - conn.Close() - }(conn) } } @@ -359,4 +353,5 @@ func (bc *benchmarkClient) reset() { func (bc *benchmarkClient) shutdown() { close(bc.stop) + bc.closeConns() } From a0ea2d2050c473f28b384ebc9fcb5e724a967adc Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Tue, 3 May 2016 18:28:06 -0700 Subject: [PATCH 40/43] Fix comments and remove useless WaitGroup --- benchmark/worker/benchmark_client.go | 14 +++++--------- benchmark/worker/main.go | 4 +++- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/benchmark/worker/benchmark_client.go b/benchmark/worker/benchmark_client.go index 5acd8e5b..462807f2 100644 --- a/benchmark/worker/benchmark_client.go +++ b/benchmark/worker/benchmark_client.go @@ -90,6 +90,8 @@ func setupClientEnv(config *testpb.ClientConfig) { } } +// createConns creates connections according to given config. +// It returns a slice of connections created, the function to close all connections, and errors if any. func createConns(config *testpb.ClientConfig) ([]*grpc.ClientConn, func(), error) { var opts []grpc.DialOption @@ -112,7 +114,7 @@ func createConns(config *testpb.ClientConfig) ([]*grpc.ClientConn, func(), error opts = append(opts, grpc.WithInsecure()) } - // Use byteBufCodec is required. + // Use byteBufCodec if it is required. if config.PayloadConfig != nil { switch config.PayloadConfig.Payload.(type) { case *testpb.PayloadConfig_BytebufParams: @@ -123,7 +125,7 @@ func createConns(config *testpb.ClientConfig) ([]*grpc.ClientConn, func(), error } } - // Create connestions. + // Create connections. connCount := int(config.ClientChannels) conns := make([]*grpc.ClientConn, connCount) for connIndex := 0; connIndex < connCount; connIndex++ { @@ -208,7 +210,7 @@ func startBenchmarkClient(config *testpb.ClientConfig) (*benchmarkClient, error) err = performRPCs(config, conns, &bc) if err != nil { - // Close all conns if failed to performRPCs. + // Close all connections if performRPCs failed. closeConns() return nil, err } @@ -221,15 +223,12 @@ func (bc *benchmarkClient) doCloseLoopUnary(conns []*grpc.ClientConn, rpcCountPe client := testpb.NewBenchmarkServiceClient(conn) // For each connection, create rpcCountPerConn goroutines to do rpc. // Close this connection after all goroutines finish. - var wg sync.WaitGroup - wg.Add(rpcCountPerConn) for j := 0; j < rpcCountPerConn; j++ { go func() { // TODO: do warm up if necessary. // Now relying on worker client to reserve time to do warm up. // The worker client needs to wait for some time after client is created, // before starting benchmark. - defer wg.Done() done := make(chan bool) for { go func() { @@ -271,8 +270,6 @@ func (bc *benchmarkClient) doCloseLoopStreaming(conns []*grpc.ClientConn, rpcCou for _, conn := range conns { // For each connection, create rpcCountPerConn goroutines to do rpc. // Close this connection after all goroutines finish. - var wg sync.WaitGroup - wg.Add(rpcCountPerConn) for j := 0; j < rpcCountPerConn; j++ { c := testpb.NewBenchmarkServiceClient(conn) stream, err := c.StreamingCall(context.Background()) @@ -285,7 +282,6 @@ func (bc *benchmarkClient) doCloseLoopStreaming(conns []*grpc.ClientConn, rpcCou // Now relying on worker client to reserve time to do warm up. // The worker client needs to wait for some time after client is created, // before starting benchmark. - defer wg.Done() done := make(chan bool) for { go func() { diff --git a/benchmark/worker/main.go b/benchmark/worker/main.go index 063b5dd9..c7bad9b0 100644 --- a/benchmark/worker/main.go +++ b/benchmark/worker/main.go @@ -40,6 +40,7 @@ import ( "net" "runtime" "strconv" + "time" "golang.org/x/net/context" "google.golang.org/grpc" @@ -207,7 +208,7 @@ func (s *workerServer) CoreCount(ctx context.Context, in *testpb.CoreRequest) (* func (s *workerServer) QuitWorker(ctx context.Context, in *testpb.Void) (*testpb.Void, error) { grpclog.Printf("quiting worker") - defer func() { s.stop <- true }() + s.stop <- true return &testpb.Void{}, nil } @@ -228,6 +229,7 @@ func main() { go func() { <-stop + time.Sleep(time.Second) s.Stop() }() From 78994035bcbebd375d98aa7733a487fc54ff90c6 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Wed, 4 May 2016 10:50:09 -0700 Subject: [PATCH 41/43] Modify comments --- benchmark/worker/benchmark_client.go | 5 ++--- benchmark/worker/benchmark_server.go | 2 +- benchmark/worker/main.go | 2 ++ 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/benchmark/worker/benchmark_client.go b/benchmark/worker/benchmark_client.go index 462807f2..b73d7624 100644 --- a/benchmark/worker/benchmark_client.go +++ b/benchmark/worker/benchmark_client.go @@ -91,7 +91,8 @@ func setupClientEnv(config *testpb.ClientConfig) { } // createConns creates connections according to given config. -// It returns a slice of connections created, the function to close all connections, and errors if any. +// It returns the connections and corresponding function to close them. +// It returns non-nil error if there is anything wrong. func createConns(config *testpb.ClientConfig) ([]*grpc.ClientConn, func(), error) { var opts []grpc.DialOption @@ -222,7 +223,6 @@ func (bc *benchmarkClient) doCloseLoopUnary(conns []*grpc.ClientConn, rpcCountPe for _, conn := range conns { client := testpb.NewBenchmarkServiceClient(conn) // For each connection, create rpcCountPerConn goroutines to do rpc. - // Close this connection after all goroutines finish. for j := 0; j < rpcCountPerConn; j++ { go func() { // TODO: do warm up if necessary. @@ -269,7 +269,6 @@ func (bc *benchmarkClient) doCloseLoopStreaming(conns []*grpc.ClientConn, rpcCou } for _, conn := range conns { // For each connection, create rpcCountPerConn goroutines to do rpc. - // Close this connection after all goroutines finish. for j := 0; j < rpcCountPerConn; j++ { c := testpb.NewBenchmarkServiceClient(conn) stream, err := c.StreamingCall(context.Background()) diff --git a/benchmark/worker/benchmark_server.go b/benchmark/worker/benchmark_server.go index 6a2fab2b..94dc5b20 100644 --- a/benchmark/worker/benchmark_server.go +++ b/benchmark/worker/benchmark_server.go @@ -140,7 +140,7 @@ func startBenchmarkServer(config *testpb.ServerConfig, serverPort int) (*benchma return nil, grpc.Errorf(codes.InvalidArgument, "unknow payload config: %v", config.PayloadConfig) } } else { - // Start protobuf server is payload config is nil. + // Start protobuf server if payload config is nil. addr, closeFunc = benchmark.StartServer(benchmark.ServerInfo{ Addr: ":" + strconv.Itoa(port), Type: "protobuf", diff --git a/benchmark/worker/main.go b/benchmark/worker/main.go index c7bad9b0..236ca8df 100644 --- a/benchmark/worker/main.go +++ b/benchmark/worker/main.go @@ -229,6 +229,8 @@ func main() { go func() { <-stop + // Wait for 1 second before stopping the server to make sure the return value of QuitWorker is sent to client. + // TODO revise this once server graceful stop is supported in gRPC. time.Sleep(time.Second) s.Stop() }() From ac8e82e9d0c22e129f60aa7fbf3ec751aaff7344 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Wed, 4 May 2016 11:55:36 -0700 Subject: [PATCH 42/43] Explicitly set default cpu number to 1 --- benchmark/worker/benchmark_client.go | 2 ++ benchmark/worker/benchmark_server.go | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/benchmark/worker/benchmark_client.go b/benchmark/worker/benchmark_client.go index b73d7624..e592a7c0 100644 --- a/benchmark/worker/benchmark_client.go +++ b/benchmark/worker/benchmark_client.go @@ -87,6 +87,8 @@ func setupClientEnv(config *testpb.ClientConfig) { // TODO: change default number of cores used if 1 is not fastest. if config.CoreLimit > 1 { runtime.GOMAXPROCS(int(config.CoreLimit)) + } else { + runtime.GOMAXPROCS(1) } } diff --git a/benchmark/worker/benchmark_server.go b/benchmark/worker/benchmark_server.go index 94dc5b20..5d1c1a43 100644 --- a/benchmark/worker/benchmark_server.go +++ b/benchmark/worker/benchmark_server.go @@ -85,8 +85,8 @@ func startBenchmarkServer(config *testpb.ServerConfig, serverPort int) (*benchma numOfCores := 1 if config.CoreLimit > 1 { numOfCores = int(config.CoreLimit) - runtime.GOMAXPROCS(numOfCores) } + runtime.GOMAXPROCS(numOfCores) var opts []grpc.ServerOption From a04636c23633e640e2c9ecc0704415e505f32323 Mon Sep 17 00:00:00 2001 From: Menghan Li Date: Wed, 4 May 2016 13:40:26 -0700 Subject: [PATCH 43/43] Minor fixes. Comments Remove nanosecond, add elapse directly --- benchmark/worker/benchmark_client.go | 13 ++++++------- benchmark/worker/benchmark_server.go | 1 + 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/benchmark/worker/benchmark_client.go b/benchmark/worker/benchmark_client.go index e592a7c0..61d17d7f 100644 --- a/benchmark/worker/benchmark_client.go +++ b/benchmark/worker/benchmark_client.go @@ -84,7 +84,7 @@ func printClientConfig(config *testpb.ClientConfig) { func setupClientEnv(config *testpb.ClientConfig) { // Use one cpu core by default. - // TODO: change default number of cores used if 1 is not fastest. + // TODO: Revisit this for the optimal default setup. if config.CoreLimit > 1 { runtime.GOMAXPROCS(int(config.CoreLimit)) } else { @@ -199,7 +199,7 @@ func startBenchmarkClient(config *testpb.ClientConfig) (*benchmarkClient, error) return nil, err } - bc := benchmarkClient{ + bc := &benchmarkClient{ histogram: stats.NewHistogram(stats.HistogramOptions{ NumBuckets: int(math.Log(config.HistogramParams.MaxPossible)/math.Log(1+config.HistogramParams.Resolution)) + 1, GrowthFactor: config.HistogramParams.Resolution, @@ -211,14 +211,13 @@ func startBenchmarkClient(config *testpb.ClientConfig) (*benchmarkClient, error) closeConns: closeConns, } - err = performRPCs(config, conns, &bc) - if err != nil { + if err = performRPCs(config, conns, bc); err != nil { // Close all connections if performRPCs failed. closeConns() return nil, err } - return &bc, nil + return bc, nil } func (bc *benchmarkClient) doCloseLoopUnary(conns []*grpc.ClientConn, rpcCountPerConn int, reqSize int, respSize int) { @@ -244,7 +243,7 @@ func (bc *benchmarkClient) doCloseLoopUnary(conns []*grpc.ClientConn, rpcCountPe } elapse := time.Since(start) bc.mu.Lock() - bc.histogram.Add(int64(elapse / time.Nanosecond)) + bc.histogram.Add(int64(elapse)) bc.mu.Unlock() select { case <-bc.stop: @@ -296,7 +295,7 @@ func (bc *benchmarkClient) doCloseLoopStreaming(conns []*grpc.ClientConn, rpcCou } elapse := time.Since(start) bc.mu.Lock() - bc.histogram.Add(int64(elapse / time.Nanosecond)) + bc.histogram.Add(int64(elapse)) bc.mu.Unlock() select { case <-bc.stop: diff --git a/benchmark/worker/benchmark_server.go b/benchmark/worker/benchmark_server.go index 5d1c1a43..7ccb069c 100644 --- a/benchmark/worker/benchmark_server.go +++ b/benchmark/worker/benchmark_server.go @@ -82,6 +82,7 @@ func startBenchmarkServer(config *testpb.ServerConfig, serverPort int) (*benchma printServerConfig(config) // Use one cpu core by default. + // TODO: Revisit this for the optimal default setup. numOfCores := 1 if config.CoreLimit > 1 { numOfCores = int(config.CoreLimit)