internal: clean up all usages of grpclog, and replace with component (#3719)
This commit is contained in:
@ -100,6 +100,8 @@ var (
|
||||
useBufconn = flag.Bool("bufconn", false, "Use in-memory connection instead of system network I/O")
|
||||
enableKeepalive = flag.Bool("enable_keepalive", false, "Enable client keepalive. \n"+
|
||||
"Keepalive.Time is set to 10s, Keepalive.Timeout is set to 1s, Keepalive.PermitWithoutStream is set to true.")
|
||||
|
||||
logger = grpclog.Component("benchmark")
|
||||
)
|
||||
|
||||
const (
|
||||
@ -314,7 +316,7 @@ func makeClient(bf stats.Features) (testpb.BenchmarkServiceClient, func()) {
|
||||
var err error
|
||||
lis, err = net.Listen("tcp", "localhost:0")
|
||||
if err != nil {
|
||||
grpclog.Fatalf("Failed to listen: %v", err)
|
||||
logger.Fatalf("Failed to listen: %v", err)
|
||||
}
|
||||
opts = append(opts, grpc.WithContextDialer(func(ctx context.Context, address string) (net.Conn, error) {
|
||||
return nw.ContextDialer((&net.Dialer{}).DialContext)(ctx, "tcp", lis.Addr().String())
|
||||
@ -351,7 +353,7 @@ func makeFuncStream(bf stats.Features) (rpcCallFunc, rpcCleanupFunc) {
|
||||
for i := 0; i < bf.MaxConcurrentCalls; i++ {
|
||||
stream, err := tc.StreamingCall(context.Background())
|
||||
if err != nil {
|
||||
grpclog.Fatalf("%v.StreamingCall(_) = _, %v", tc, err)
|
||||
logger.Fatalf("%v.StreamingCall(_) = _, %v", tc, err)
|
||||
}
|
||||
streams[i] = stream
|
||||
}
|
||||
@ -377,7 +379,7 @@ func makeFuncUnconstrainedStreamPreloaded(bf stats.Features) (rpcSendFunc, rpcRe
|
||||
preparedMsg[i] = &grpc.PreparedMsg{}
|
||||
err := preparedMsg[i].Encode(stream, req)
|
||||
if err != nil {
|
||||
grpclog.Fatalf("%v.Encode(%v, %v) = %v", preparedMsg[i], req, stream, err)
|
||||
logger.Fatalf("%v.Encode(%v, %v) = %v", preparedMsg[i], req, stream, err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -405,7 +407,7 @@ func setupUnconstrainedStream(bf stats.Features) ([]testpb.BenchmarkService_Stre
|
||||
for i := 0; i < bf.MaxConcurrentCalls; i++ {
|
||||
stream, err := tc.UnconstrainedStreamingCall(context.Background())
|
||||
if err != nil {
|
||||
grpclog.Fatalf("%v.UnconstrainedStreamingCall(_) = _, %v", tc, err)
|
||||
logger.Fatalf("%v.UnconstrainedStreamingCall(_) = _, %v", tc, err)
|
||||
}
|
||||
streams[i] = stream
|
||||
}
|
||||
@ -424,13 +426,13 @@ func setupUnconstrainedStream(bf stats.Features) ([]testpb.BenchmarkService_Stre
|
||||
// request and response sizes.
|
||||
func unaryCaller(client testpb.BenchmarkServiceClient, reqSize, respSize int) {
|
||||
if err := bm.DoUnaryCall(client, reqSize, respSize); err != nil {
|
||||
grpclog.Fatalf("DoUnaryCall failed: %v", err)
|
||||
logger.Fatalf("DoUnaryCall failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func streamCaller(stream testpb.BenchmarkService_StreamingCallClient, reqSize, respSize int) {
|
||||
if err := bm.DoStreamingRoundTrip(stream, reqSize, respSize); err != nil {
|
||||
grpclog.Fatalf("DoStreamingRoundTrip failed: %v", err)
|
||||
logger.Fatalf("DoStreamingRoundTrip failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -35,18 +35,20 @@ import (
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
var logger = grpclog.Component("benchmark")
|
||||
|
||||
// Allows reuse of the same testpb.Payload object.
|
||||
func setPayload(p *testpb.Payload, t testpb.PayloadType, size int) {
|
||||
if size < 0 {
|
||||
grpclog.Fatalf("Requested a response with invalid length %d", size)
|
||||
logger.Fatalf("Requested a response with invalid length %d", size)
|
||||
}
|
||||
body := make([]byte, size)
|
||||
switch t {
|
||||
case testpb.PayloadType_COMPRESSABLE:
|
||||
case testpb.PayloadType_UNCOMPRESSABLE:
|
||||
grpclog.Fatalf("PayloadType UNCOMPRESSABLE is not supported")
|
||||
logger.Fatalf("PayloadType UNCOMPRESSABLE is not supported")
|
||||
default:
|
||||
grpclog.Fatalf("Unsupported payload type: %d", t)
|
||||
logger.Fatalf("Unsupported payload type: %d", t)
|
||||
}
|
||||
p.Type = t
|
||||
p.Body = body
|
||||
@ -213,11 +215,11 @@ func StartServer(info ServerInfo, opts ...grpc.ServerOption) func() {
|
||||
case "bytebuf":
|
||||
respSize, ok := info.Metadata.(int32)
|
||||
if !ok {
|
||||
grpclog.Fatalf("failed to StartServer, invalid metadata: %v, for Type: %v", info.Metadata, info.Type)
|
||||
logger.Fatalf("failed to StartServer, invalid metadata: %v, for Type: %v", info.Metadata, info.Type)
|
||||
}
|
||||
testpb.RegisterBenchmarkServiceServer(s, &byteBufServer{respSize: respSize})
|
||||
default:
|
||||
grpclog.Fatalf("failed to StartServer, unknown Type: %v", info.Type)
|
||||
logger.Fatalf("failed to StartServer, unknown Type: %v", info.Type)
|
||||
}
|
||||
go s.Serve(info.Listener)
|
||||
return func() {
|
||||
@ -288,7 +290,7 @@ func NewClientConnWithContext(ctx context.Context, addr string, opts ...grpc.Dia
|
||||
opts = append(opts, grpc.WithReadBufferSize(128*1024))
|
||||
conn, err := grpc.DialContext(ctx, addr, opts...)
|
||||
if err != nil {
|
||||
grpclog.Fatalf("NewClientConn(%q) failed to create a ClientConn %v", addr, err)
|
||||
logger.Fatalf("NewClientConn(%q) failed to create a ClientConn %v", addr, err)
|
||||
}
|
||||
return conn
|
||||
}
|
||||
|
@ -76,12 +76,14 @@ var (
|
||||
}
|
||||
mu sync.Mutex
|
||||
hists []*stats.Histogram
|
||||
|
||||
logger = grpclog.Component("benchmark")
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
if *testName == "" {
|
||||
grpclog.Fatalf("test_name not set")
|
||||
logger.Fatalf("test_name not set")
|
||||
}
|
||||
req := &testpb.SimpleRequest{
|
||||
ResponseType: testpb.PayloadType_COMPRESSABLE,
|
||||
@ -98,7 +100,7 @@ func main() {
|
||||
endDeadline := warmDeadline.Add(time.Duration(*duration) * time.Second)
|
||||
cf, err := os.Create("/tmp/" + *testName + ".cpu")
|
||||
if err != nil {
|
||||
grpclog.Fatalf("Error creating file: %v", err)
|
||||
logger.Fatalf("Error creating file: %v", err)
|
||||
}
|
||||
defer cf.Close()
|
||||
pprof.StartCPUProfile(cf)
|
||||
@ -111,12 +113,12 @@ func main() {
|
||||
pprof.StopCPUProfile()
|
||||
mf, err := os.Create("/tmp/" + *testName + ".mem")
|
||||
if err != nil {
|
||||
grpclog.Fatalf("Error creating file: %v", err)
|
||||
logger.Fatalf("Error creating file: %v", err)
|
||||
}
|
||||
defer mf.Close()
|
||||
runtime.GC() // materialize all statistics
|
||||
if err := pprof.WriteHeapProfile(mf); err != nil {
|
||||
grpclog.Fatalf("Error writing memory profile: %v", err)
|
||||
logger.Fatalf("Error writing memory profile: %v", err)
|
||||
}
|
||||
hist := stats.NewHistogram(hopts)
|
||||
for _, h := range hists {
|
||||
@ -166,20 +168,20 @@ func makeCaller(cc *grpc.ClientConn, req *testpb.SimpleRequest) func() {
|
||||
if *rpcType == "unary" {
|
||||
return func() {
|
||||
if _, err := client.UnaryCall(context.Background(), req); err != nil {
|
||||
grpclog.Fatalf("RPC failed: %v", err)
|
||||
logger.Fatalf("RPC failed: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
stream, err := client.StreamingCall(context.Background())
|
||||
if err != nil {
|
||||
grpclog.Fatalf("RPC failed: %v", err)
|
||||
logger.Fatalf("RPC failed: %v", err)
|
||||
}
|
||||
return func() {
|
||||
if err := stream.Send(req); err != nil {
|
||||
grpclog.Fatalf("Streaming RPC failed to send: %v", err)
|
||||
logger.Fatalf("Streaming RPC failed to send: %v", err)
|
||||
}
|
||||
if _, err := stream.Recv(); err != nil {
|
||||
grpclog.Fatalf("Streaming RPC failed to read: %v", err)
|
||||
logger.Fatalf("Streaming RPC failed to read: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -46,22 +46,24 @@ import (
|
||||
var (
|
||||
port = flag.String("port", "50051", "Localhost port to listen on.")
|
||||
testName = flag.String("test_name", "", "Name of the test used for creating profiles.")
|
||||
|
||||
logger = grpclog.Component("benchmark")
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
if *testName == "" {
|
||||
grpclog.Fatalf("test name not set")
|
||||
logger.Fatalf("test name not set")
|
||||
}
|
||||
lis, err := net.Listen("tcp", ":"+*port)
|
||||
if err != nil {
|
||||
grpclog.Fatalf("Failed to listen: %v", err)
|
||||
logger.Fatalf("Failed to listen: %v", err)
|
||||
}
|
||||
defer lis.Close()
|
||||
|
||||
cf, err := os.Create("/tmp/" + *testName + ".cpu")
|
||||
if err != nil {
|
||||
grpclog.Fatalf("Failed to create file: %v", err)
|
||||
logger.Fatalf("Failed to create file: %v", err)
|
||||
}
|
||||
defer cf.Close()
|
||||
pprof.StartCPUProfile(cf)
|
||||
@ -77,12 +79,12 @@ func main() {
|
||||
pprof.StopCPUProfile()
|
||||
mf, err := os.Create("/tmp/" + *testName + ".mem")
|
||||
if err != nil {
|
||||
grpclog.Fatalf("Failed to create file: %v", err)
|
||||
logger.Fatalf("Failed to create file: %v", err)
|
||||
}
|
||||
defer mf.Close()
|
||||
runtime.GC() // materialize all statistics
|
||||
if err := pprof.WriteHeapProfile(mf); err != nil {
|
||||
grpclog.Fatalf("Failed to write memory profile: %v", err)
|
||||
logger.Fatalf("Failed to write memory profile: %v", err)
|
||||
}
|
||||
fmt.Println("Server CPU utilization:", cpu)
|
||||
fmt.Println("Server CPU profile:", cf.Name())
|
||||
|
@ -32,7 +32,6 @@ import (
|
||||
"google.golang.org/grpc/benchmark/stats"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/internal/syscall"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/grpc/testdata"
|
||||
@ -81,20 +80,20 @@ func printClientConfig(config *testpb.ClientConfig) {
|
||||
// will always create sync client
|
||||
// - async client threads.
|
||||
// - core list
|
||||
grpclog.Infof(" * client type: %v (ignored, always creates sync client)", config.ClientType)
|
||||
grpclog.Infof(" * async client threads: %v (ignored)", config.AsyncClientThreads)
|
||||
logger.Infof(" * client type: %v (ignored, always creates sync client)", config.ClientType)
|
||||
logger.Infof(" * async client threads: %v (ignored)", config.AsyncClientThreads)
|
||||
// TODO: use cores specified by CoreList when setting list of cores is supported in go.
|
||||
grpclog.Infof(" * core list: %v (ignored)", config.CoreList)
|
||||
logger.Infof(" * core list: %v (ignored)", config.CoreList)
|
||||
|
||||
grpclog.Infof(" - security params: %v", config.SecurityParams)
|
||||
grpclog.Infof(" - core limit: %v", config.CoreLimit)
|
||||
grpclog.Infof(" - payload config: %v", config.PayloadConfig)
|
||||
grpclog.Infof(" - rpcs per chann: %v", config.OutstandingRpcsPerChannel)
|
||||
grpclog.Infof(" - channel number: %v", config.ClientChannels)
|
||||
grpclog.Infof(" - load params: %v", config.LoadParams)
|
||||
grpclog.Infof(" - rpc type: %v", config.RpcType)
|
||||
grpclog.Infof(" - histogram params: %v", config.HistogramParams)
|
||||
grpclog.Infof(" - server targets: %v", config.ServerTargets)
|
||||
logger.Infof(" - security params: %v", config.SecurityParams)
|
||||
logger.Infof(" - core limit: %v", config.CoreLimit)
|
||||
logger.Infof(" - payload config: %v", config.PayloadConfig)
|
||||
logger.Infof(" - rpcs per chann: %v", config.OutstandingRpcsPerChannel)
|
||||
logger.Infof(" - channel number: %v", config.ClientChannels)
|
||||
logger.Infof(" - load params: %v", config.LoadParams)
|
||||
logger.Infof(" - rpc type: %v", config.RpcType)
|
||||
logger.Infof(" - histogram params: %v", config.HistogramParams)
|
||||
logger.Infof(" - server targets: %v", config.ServerTargets)
|
||||
}
|
||||
|
||||
func setupClientEnv(config *testpb.ClientConfig) {
|
||||
@ -298,7 +297,7 @@ func (bc *benchmarkClient) doCloseLoopStreaming(conns []*grpc.ClientConn, rpcCou
|
||||
c := testpb.NewBenchmarkServiceClient(conn)
|
||||
stream, err := c.StreamingCall(context.Background())
|
||||
if err != nil {
|
||||
grpclog.Fatalf("%v.StreamingCall(_) = _, %v", c, err)
|
||||
logger.Fatalf("%v.StreamingCall(_) = _, %v", c, err)
|
||||
}
|
||||
// Create histogram for each goroutine.
|
||||
idx := ic*rpcCountPerConn + j
|
||||
|
@ -33,7 +33,6 @@ import (
|
||||
testpb "google.golang.org/grpc/benchmark/grpc_testing"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/internal/syscall"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/grpc/testdata"
|
||||
@ -59,15 +58,15 @@ func printServerConfig(config *testpb.ServerConfig) {
|
||||
// will always start sync server
|
||||
// - async server threads
|
||||
// - core list
|
||||
grpclog.Infof(" * server type: %v (ignored, always starts sync server)", config.ServerType)
|
||||
grpclog.Infof(" * async server threads: %v (ignored)", config.AsyncServerThreads)
|
||||
logger.Infof(" * server type: %v (ignored, always starts sync server)", config.ServerType)
|
||||
logger.Infof(" * async server threads: %v (ignored)", config.AsyncServerThreads)
|
||||
// TODO: use cores specified by CoreList when setting list of cores is supported in go.
|
||||
grpclog.Infof(" * core list: %v (ignored)", config.CoreList)
|
||||
logger.Infof(" * core list: %v (ignored)", config.CoreList)
|
||||
|
||||
grpclog.Infof(" - security params: %v", config.SecurityParams)
|
||||
grpclog.Infof(" - core limit: %v", config.CoreLimit)
|
||||
grpclog.Infof(" - port: %v", config.Port)
|
||||
grpclog.Infof(" - payload config: %v", config.PayloadConfig)
|
||||
logger.Infof(" - security params: %v", config.SecurityParams)
|
||||
logger.Infof(" - core limit: %v", config.CoreLimit)
|
||||
logger.Infof(" - port: %v", config.Port)
|
||||
logger.Infof(" - payload config: %v", config.PayloadConfig)
|
||||
}
|
||||
|
||||
func startBenchmarkServer(config *testpb.ServerConfig, serverPort int) (*benchmarkServer, error) {
|
||||
@ -102,7 +101,7 @@ func startBenchmarkServer(config *testpb.ServerConfig, serverPort int) (*benchma
|
||||
}
|
||||
creds, err := credentials.NewServerTLSFromFile(*certFile, *keyFile)
|
||||
if err != nil {
|
||||
grpclog.Fatalf("failed to generate credentials %v", err)
|
||||
logger.Fatalf("failed to generate credentials %v", err)
|
||||
}
|
||||
opts = append(opts, grpc.Creds(creds))
|
||||
}
|
||||
@ -114,7 +113,7 @@ func startBenchmarkServer(config *testpb.ServerConfig, serverPort int) (*benchma
|
||||
}
|
||||
lis, err := net.Listen("tcp", fmt.Sprintf(":%d", port))
|
||||
if err != nil {
|
||||
grpclog.Fatalf("Failed to listen: %v", err)
|
||||
logger.Fatalf("Failed to listen: %v", err)
|
||||
}
|
||||
addr := lis.Addr().String()
|
||||
|
||||
@ -147,11 +146,11 @@ func startBenchmarkServer(config *testpb.ServerConfig, serverPort int) (*benchma
|
||||
}, opts...)
|
||||
}
|
||||
|
||||
grpclog.Infof("benchmark server listening at %v", addr)
|
||||
logger.Infof("benchmark server listening at %v", addr)
|
||||
addrSplitted := strings.Split(addr, ":")
|
||||
p, err := strconv.Atoi(addrSplitted[len(addrSplitted)-1])
|
||||
if err != nil {
|
||||
grpclog.Fatalf("failed to get port number from server address: %v", err)
|
||||
logger.Fatalf("failed to get port number from server address: %v", err)
|
||||
}
|
||||
|
||||
return &benchmarkServer{
|
||||
|
@ -44,6 +44,8 @@ var (
|
||||
serverPort = flag.Int("server_port", 0, "port for benchmark server if not specified by server config message")
|
||||
pprofPort = flag.Int("pprof_port", -1, "Port for pprof debug server to listen on. Pprof server doesn't start if unset")
|
||||
blockProfRate = flag.Int("block_prof_rate", 0, "fraction of goroutine blocking events to report in blocking profile")
|
||||
|
||||
logger = grpclog.Component("benchmark")
|
||||
)
|
||||
|
||||
type byteBufCodec struct {
|
||||
@ -82,7 +84,7 @@ func (s *workerServer) RunServer(stream testpb.WorkerService_RunServerServer) er
|
||||
var bs *benchmarkServer
|
||||
defer func() {
|
||||
// Close benchmark server when stream ends.
|
||||
grpclog.Infof("closing benchmark server")
|
||||
logger.Infof("closing benchmark server")
|
||||
if bs != nil {
|
||||
bs.closeFunc()
|
||||
}
|
||||
@ -99,9 +101,9 @@ func (s *workerServer) RunServer(stream testpb.WorkerService_RunServerServer) er
|
||||
var out *testpb.ServerStatus
|
||||
switch argtype := in.Argtype.(type) {
|
||||
case *testpb.ServerArgs_Setup:
|
||||
grpclog.Infof("server setup received:")
|
||||
logger.Infof("server setup received:")
|
||||
if bs != nil {
|
||||
grpclog.Infof("server setup received when server already exists, closing the existing server")
|
||||
logger.Infof("server setup received when server already exists, closing the existing server")
|
||||
bs.closeFunc()
|
||||
}
|
||||
bs, err = startBenchmarkServer(argtype.Setup, s.serverPort)
|
||||
@ -115,8 +117,8 @@ func (s *workerServer) RunServer(stream testpb.WorkerService_RunServerServer) er
|
||||
}
|
||||
|
||||
case *testpb.ServerArgs_Mark:
|
||||
grpclog.Infof("server mark received:")
|
||||
grpclog.Infof(" - %v", argtype)
|
||||
logger.Infof("server mark received:")
|
||||
logger.Infof(" - %v", argtype)
|
||||
if bs == nil {
|
||||
return status.Error(codes.InvalidArgument, "server does not exist when mark received")
|
||||
}
|
||||
@ -137,7 +139,7 @@ func (s *workerServer) RunClient(stream testpb.WorkerService_RunClientServer) er
|
||||
var bc *benchmarkClient
|
||||
defer func() {
|
||||
// Shut down benchmark client when stream ends.
|
||||
grpclog.Infof("shuting down benchmark client")
|
||||
logger.Infof("shuting down benchmark client")
|
||||
if bc != nil {
|
||||
bc.shutdown()
|
||||
}
|
||||
@ -154,9 +156,9 @@ func (s *workerServer) RunClient(stream testpb.WorkerService_RunClientServer) er
|
||||
var out *testpb.ClientStatus
|
||||
switch t := in.Argtype.(type) {
|
||||
case *testpb.ClientArgs_Setup:
|
||||
grpclog.Infof("client setup received:")
|
||||
logger.Infof("client setup received:")
|
||||
if bc != nil {
|
||||
grpclog.Infof("client setup received when client already exists, shuting down the existing client")
|
||||
logger.Infof("client setup received when client already exists, shuting down the existing client")
|
||||
bc.shutdown()
|
||||
}
|
||||
bc, err = startBenchmarkClient(t.Setup)
|
||||
@ -168,8 +170,8 @@ func (s *workerServer) RunClient(stream testpb.WorkerService_RunClientServer) er
|
||||
}
|
||||
|
||||
case *testpb.ClientArgs_Mark:
|
||||
grpclog.Infof("client mark received:")
|
||||
grpclog.Infof(" - %v", t)
|
||||
logger.Infof("client mark received:")
|
||||
logger.Infof(" - %v", t)
|
||||
if bc == nil {
|
||||
return status.Error(codes.InvalidArgument, "client does not exist when mark received")
|
||||
}
|
||||
@ -185,12 +187,12 @@ func (s *workerServer) RunClient(stream testpb.WorkerService_RunClientServer) er
|
||||
}
|
||||
|
||||
func (s *workerServer) CoreCount(ctx context.Context, in *testpb.CoreRequest) (*testpb.CoreResponse, error) {
|
||||
grpclog.Infof("core count: %v", runtime.NumCPU())
|
||||
logger.Infof("core count: %v", runtime.NumCPU())
|
||||
return &testpb.CoreResponse{Cores: int32(runtime.NumCPU())}, nil
|
||||
}
|
||||
|
||||
func (s *workerServer) QuitWorker(ctx context.Context, in *testpb.Void) (*testpb.Void, error) {
|
||||
grpclog.Infof("quitting worker")
|
||||
logger.Infof("quitting worker")
|
||||
s.stop <- true
|
||||
return &testpb.Void{}, nil
|
||||
}
|
||||
@ -201,9 +203,9 @@ func main() {
|
||||
flag.Parse()
|
||||
lis, err := net.Listen("tcp", ":"+strconv.Itoa(*driverPort))
|
||||
if err != nil {
|
||||
grpclog.Fatalf("failed to listen: %v", err)
|
||||
logger.Fatalf("failed to listen: %v", err)
|
||||
}
|
||||
grpclog.Infof("worker listening at port %v", *driverPort)
|
||||
logger.Infof("worker listening at port %v", *driverPort)
|
||||
|
||||
s := grpc.NewServer()
|
||||
stop := make(chan bool)
|
||||
@ -224,8 +226,8 @@ func main() {
|
||||
|
||||
if *pprofPort >= 0 {
|
||||
go func() {
|
||||
grpclog.Infoln("Starting pprof server on port " + strconv.Itoa(*pprofPort))
|
||||
grpclog.Infoln(http.ListenAndServe("localhost:"+strconv.Itoa(*pprofPort), nil))
|
||||
logger.Infoln("Starting pprof server on port " + strconv.Itoa(*pprofPort))
|
||||
logger.Infoln(http.ListenAndServe("localhost:"+strconv.Itoa(*pprofPort), nil))
|
||||
}()
|
||||
}
|
||||
|
||||
|
@ -27,6 +27,8 @@ import (
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
var logger = grpclog.Component("core")
|
||||
|
||||
// State indicates the state of connectivity.
|
||||
// It can be the state of a ClientConn or SubConn.
|
||||
type State int
|
||||
@ -44,7 +46,7 @@ func (s State) String() string {
|
||||
case Shutdown:
|
||||
return "SHUTDOWN"
|
||||
default:
|
||||
grpclog.Errorf("unknown connectivity state: %d", s)
|
||||
logger.Errorf("unknown connectivity state: %d", s)
|
||||
return "Invalid-State"
|
||||
}
|
||||
}
|
||||
|
@ -33,6 +33,8 @@ import (
|
||||
|
||||
const tokenRequestTimeout = 30 * time.Second
|
||||
|
||||
var logger = grpclog.Component("credentials")
|
||||
|
||||
// NewDefaultCredentials returns a credentials bundle that is configured to work
|
||||
// with google services.
|
||||
//
|
||||
@ -44,14 +46,14 @@ func NewDefaultCredentials() credentials.Bundle {
|
||||
defer cancel()
|
||||
perRPCCreds, err := oauth.NewApplicationDefault(ctx)
|
||||
if err != nil {
|
||||
grpclog.Warningf("google default creds: failed to create application oauth: %v", err)
|
||||
logger.Warningf("google default creds: failed to create application oauth: %v", err)
|
||||
}
|
||||
return perRPCCreds
|
||||
},
|
||||
}
|
||||
bundle, err := c.NewWithMode(internal.CredsBundleModeFallback)
|
||||
if err != nil {
|
||||
grpclog.Warningf("google default creds: failed to create new creds: %v", err)
|
||||
logger.Warningf("google default creds: failed to create new creds: %v", err)
|
||||
}
|
||||
return bundle
|
||||
}
|
||||
@ -69,7 +71,7 @@ func NewComputeEngineCredentials() credentials.Bundle {
|
||||
}
|
||||
bundle, err := c.NewWithMode(internal.CredsBundleModeFallback)
|
||||
if err != nil {
|
||||
grpclog.Warningf("compute engine creds: failed to create new creds: %v", err)
|
||||
logger.Warningf("compute engine creds: failed to create new creds: %v", err)
|
||||
}
|
||||
return bundle
|
||||
}
|
||||
|
@ -62,6 +62,7 @@ var (
|
||||
makeHTTPDoer = makeHTTPClient
|
||||
readSubjectTokenFrom = ioutil.ReadFile
|
||||
readActorTokenFrom = ioutil.ReadFile
|
||||
logger = grpclog.Component("credentials")
|
||||
)
|
||||
|
||||
// Options configures the parameters used for an STS based token exchange.
|
||||
@ -311,7 +312,7 @@ func sendRequest(client httpDoer, req *http.Request) ([]byte, error) {
|
||||
if resp.StatusCode == http.StatusOK {
|
||||
return body, nil
|
||||
}
|
||||
grpclog.Warningf("http status %d, body: %s", resp.StatusCode, string(body))
|
||||
logger.Warningf("http status %d, body: %s", resp.StatusCode, string(body))
|
||||
return nil, fmt.Errorf("http status %d, body: %s", resp.StatusCode, string(body))
|
||||
}
|
||||
|
||||
|
@ -100,7 +100,7 @@ func (c *componentData) Fatalln(args ...interface{}) {
|
||||
}
|
||||
|
||||
func (c *componentData) V(l int) bool {
|
||||
return grpclog.Logger.V(l)
|
||||
return V(l)
|
||||
}
|
||||
|
||||
// Component creates a new component and returns it for logging. If a component
|
||||
|
23
health/logging.go
Normal file
23
health/logging.go
Normal file
@ -0,0 +1,23 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package health
|
||||
|
||||
import "google.golang.org/grpc/grpclog"
|
||||
|
||||
var logger = grpclog.Component("health_service")
|
@ -25,7 +25,6 @@ import (
|
||||
"sync"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
healthgrpc "google.golang.org/grpc/health/grpc_health_v1"
|
||||
healthpb "google.golang.org/grpc/health/grpc_health_v1"
|
||||
"google.golang.org/grpc/status"
|
||||
@ -114,7 +113,7 @@ func (s *Server) SetServingStatus(service string, servingStatus healthpb.HealthC
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if s.shutdown {
|
||||
grpclog.Infof("health: status changing for %s to %v is ignored because health service is shutdown", service, servingStatus)
|
||||
logger.Infof("health: status changing for %s to %v is ignored because health service is shutdown", service, servingStatus)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -40,6 +40,8 @@ import (
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
var grpclogLogger = grpclog.Component("binarylog")
|
||||
|
||||
type s struct {
|
||||
grpctest.Tester
|
||||
}
|
||||
@ -539,7 +541,7 @@ func (ed *expectedData) newClientMessageEntry(client bool, rpcID, inRPCID uint64
|
||||
}
|
||||
data, err := proto.Marshal(msg)
|
||||
if err != nil {
|
||||
grpclog.Infof("binarylogging_testing: failed to marshal proto message: %v", err)
|
||||
grpclogLogger.Infof("binarylogging_testing: failed to marshal proto message: %v", err)
|
||||
}
|
||||
return &pb.GrpcLogEntry{
|
||||
Timestamp: nil,
|
||||
@ -563,7 +565,7 @@ func (ed *expectedData) newServerMessageEntry(client bool, rpcID, inRPCID uint64
|
||||
}
|
||||
data, err := proto.Marshal(msg)
|
||||
if err != nil {
|
||||
grpclog.Infof("binarylogging_testing: failed to marshal proto message: %v", err)
|
||||
grpclogLogger.Infof("binarylogging_testing: failed to marshal proto message: %v", err)
|
||||
}
|
||||
return &pb.GrpcLogEntry{
|
||||
Timestamp: nil,
|
||||
@ -612,7 +614,7 @@ func (ed *expectedData) newServerTrailerEntry(client bool, rpcID, inRPCID uint64
|
||||
}
|
||||
st, ok := status.FromError(stErr)
|
||||
if !ok {
|
||||
grpclog.Info("binarylogging: error in trailer is not a status error")
|
||||
grpclogLogger.Info("binarylogging: error in trailer is not a status error")
|
||||
}
|
||||
stProto := st.Proto()
|
||||
var (
|
||||
@ -622,7 +624,7 @@ func (ed *expectedData) newServerTrailerEntry(client bool, rpcID, inRPCID uint64
|
||||
if stProto != nil && len(stProto.Details) != 0 {
|
||||
detailsBytes, err = proto.Marshal(stProto)
|
||||
if err != nil {
|
||||
grpclog.Infof("binarylogging: failed to marshal status proto: %v", err)
|
||||
grpclogLogger.Infof("binarylogging: failed to marshal status proto: %v", err)
|
||||
}
|
||||
}
|
||||
return &pb.GrpcLogEntry{
|
||||
|
@ -28,6 +28,8 @@ import (
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
var logger = grpclog.Component("credentials")
|
||||
|
||||
// SPIFFEIDFromState parses the SPIFFE ID from State. If the SPIFFE ID format
|
||||
// is invalid, return nil with warning.
|
||||
func SPIFFEIDFromState(state tls.ConnectionState) *url.URL {
|
||||
@ -41,20 +43,20 @@ func SPIFFEIDFromState(state tls.ConnectionState) *url.URL {
|
||||
}
|
||||
// From this point, we assume the uri is intended for a SPIFFE ID.
|
||||
if len(uri.String()) > 2048 {
|
||||
grpclog.Warning("invalid SPIFFE ID: total ID length larger than 2048 bytes")
|
||||
logger.Warning("invalid SPIFFE ID: total ID length larger than 2048 bytes")
|
||||
return nil
|
||||
}
|
||||
if len(uri.Host) == 0 || len(uri.RawPath) == 0 || len(uri.Path) == 0 {
|
||||
grpclog.Warning("invalid SPIFFE ID: domain or workload ID is empty")
|
||||
logger.Warning("invalid SPIFFE ID: domain or workload ID is empty")
|
||||
return nil
|
||||
}
|
||||
if len(uri.Host) > 255 {
|
||||
grpclog.Warning("invalid SPIFFE ID: domain length larger than 255 characters")
|
||||
logger.Warning("invalid SPIFFE ID: domain length larger than 255 characters")
|
||||
return nil
|
||||
}
|
||||
// A valid SPIFFE certificate can only have exactly one URI SAN field.
|
||||
if len(state.PeerCertificates[0].URIs) > 1 {
|
||||
grpclog.Warning("invalid SPIFFE ID: multiple URI SANs")
|
||||
logger.Warning("invalid SPIFFE ID: multiple URI SANs")
|
||||
return nil
|
||||
}
|
||||
spiffeID = uri
|
||||
|
@ -28,6 +28,8 @@ import (
|
||||
externalserviceconfig "google.golang.org/grpc/serviceconfig"
|
||||
)
|
||||
|
||||
var logger = grpclog.Component("core")
|
||||
|
||||
// BalancerConfig is the balancer config part that service config's
|
||||
// loadBalancingConfig fields can be unmarshalled to. It's a json unmarshaller.
|
||||
//
|
||||
@ -69,7 +71,7 @@ func (bc *BalancerConfig) UnmarshalJSON(b []byte) error {
|
||||
parser, ok := builder.(balancer.ConfigParser)
|
||||
if !ok {
|
||||
if string(jsonCfg) != "{}" {
|
||||
grpclog.Warningf("non-empty balancer configuration %q, but balancer does not implement ParseConfig", string(jsonCfg))
|
||||
logger.Warningf("non-empty balancer configuration %q, but balancer does not implement ParseConfig", string(jsonCfg))
|
||||
}
|
||||
// Stop at this, though the builder doesn't support parsing config.
|
||||
return nil
|
||||
|
@ -24,7 +24,7 @@ import (
|
||||
"flag"
|
||||
"time"
|
||||
|
||||
grpc "google.golang.org/grpc"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/alts"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
testpb "google.golang.org/grpc/interop/grpc_testing"
|
||||
@ -33,6 +33,8 @@ import (
|
||||
var (
|
||||
hsAddr = flag.String("alts_handshaker_service_address", "", "ALTS handshaker gRPC service address")
|
||||
serverAddr = flag.String("server_address", ":8080", "The port on which the server is listening")
|
||||
|
||||
logger = grpclog.Component("interop")
|
||||
)
|
||||
|
||||
func main() {
|
||||
@ -46,7 +48,7 @@ func main() {
|
||||
// Block until the server is ready.
|
||||
conn, err := grpc.Dial(*serverAddr, grpc.WithTransportCredentials(altsTC), grpc.WithBlock())
|
||||
if err != nil {
|
||||
grpclog.Fatalf("gRPC Client: failed to dial the server at %v: %v", *serverAddr, err)
|
||||
logger.Fatalf("gRPC Client: failed to dial the server at %v: %v", *serverAddr, err)
|
||||
}
|
||||
defer conn.Close()
|
||||
grpcClient := testpb.NewTestServiceClient(conn)
|
||||
@ -55,9 +57,9 @@ func main() {
|
||||
ctx := context.Background()
|
||||
request := &testpb.Empty{}
|
||||
if _, err := grpcClient.EmptyCall(ctx, request); err != nil {
|
||||
grpclog.Fatalf("grpc Client: EmptyCall(_, %v) failed: %v", request, err)
|
||||
logger.Fatalf("grpc Client: EmptyCall(_, %v) failed: %v", request, err)
|
||||
}
|
||||
grpclog.Info("grpc Client: empty call succeeded")
|
||||
logger.Info("grpc Client: empty call succeeded")
|
||||
|
||||
// This sleep prevents the connection from being abruptly disconnected
|
||||
// when running this binary (along with grpc_server) on GCP dev cluster.
|
||||
|
@ -25,7 +25,7 @@ import (
|
||||
"net"
|
||||
"strings"
|
||||
|
||||
grpc "google.golang.org/grpc"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/alts"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/interop"
|
||||
@ -40,6 +40,8 @@ const (
|
||||
var (
|
||||
hsAddr = flag.String("alts_handshaker_service_address", "", "ALTS handshaker gRPC service address")
|
||||
serverAddr = flag.String("server_address", ":8080", "The address on which the server is listening. Only two types of addresses are supported, 'host:port' and 'unix:/path'.")
|
||||
|
||||
logger = grpclog.Component("interop")
|
||||
)
|
||||
|
||||
func main() {
|
||||
@ -54,7 +56,7 @@ func main() {
|
||||
}
|
||||
lis, err := net.Listen(network, address)
|
||||
if err != nil {
|
||||
grpclog.Fatalf("gRPC Server: failed to start the server at %v: %v", address, err)
|
||||
logger.Fatalf("gRPC Server: failed to start the server at %v: %v", address, err)
|
||||
}
|
||||
opts := alts.DefaultServerOptions()
|
||||
if *hsAddr != "" {
|
||||
@ -74,12 +76,12 @@ func authz(ctx context.Context, info *tap.Info) (context.Context, error) {
|
||||
return nil, err
|
||||
}
|
||||
// Access all alts.AuthInfo data:
|
||||
grpclog.Infof("authInfo.ApplicationProtocol() = %v", authInfo.ApplicationProtocol())
|
||||
grpclog.Infof("authInfo.RecordProtocol() = %v", authInfo.RecordProtocol())
|
||||
grpclog.Infof("authInfo.SecurityLevel() = %v", authInfo.SecurityLevel())
|
||||
grpclog.Infof("authInfo.PeerServiceAccount() = %v", authInfo.PeerServiceAccount())
|
||||
grpclog.Infof("authInfo.LocalServiceAccount() = %v", authInfo.LocalServiceAccount())
|
||||
grpclog.Infof("authInfo.PeerRPCVersions() = %v", authInfo.PeerRPCVersions())
|
||||
grpclog.Infof("info.FullMethodName = %v", info.FullMethodName)
|
||||
logger.Infof("authInfo.ApplicationProtocol() = %v", authInfo.ApplicationProtocol())
|
||||
logger.Infof("authInfo.RecordProtocol() = %v", authInfo.RecordProtocol())
|
||||
logger.Infof("authInfo.SecurityLevel() = %v", authInfo.SecurityLevel())
|
||||
logger.Infof("authInfo.PeerServiceAccount() = %v", authInfo.PeerServiceAccount())
|
||||
logger.Infof("authInfo.LocalServiceAccount() = %v", authInfo.LocalServiceAccount())
|
||||
logger.Infof("authInfo.PeerRPCVersions() = %v", authInfo.PeerRPCVersions())
|
||||
logger.Infof("info.FullMethodName = %v", info.FullMethodName)
|
||||
return ctx, nil
|
||||
}
|
||||
|
@ -79,6 +79,8 @@ var (
|
||||
unimplemented_method: client attempts to call unimplemented method;
|
||||
unimplemented_service: client attempts to call unimplemented service;
|
||||
pick_first_unary: all requests are sent to one server despite multiple servers are resolved.`)
|
||||
|
||||
logger = grpclog.Component("interop")
|
||||
)
|
||||
|
||||
type credsMode uint8
|
||||
@ -102,12 +104,12 @@ func main() {
|
||||
case computeEngineCredsName:
|
||||
useCEC = true
|
||||
default:
|
||||
grpclog.Fatalf("If set, custom_credentials_type can only be set to one of %v or %v",
|
||||
logger.Fatalf("If set, custom_credentials_type can only be set to one of %v or %v",
|
||||
googleDefaultCredsName, computeEngineCredsName)
|
||||
}
|
||||
}
|
||||
if (*useTLS && *useALTS) || (*useTLS && useGDC) || (*useALTS && useGDC) || (*useTLS && useCEC) || (*useALTS && useCEC) {
|
||||
grpclog.Fatalf("only one of TLS, ALTS, google default creds, or compute engine creds can be used")
|
||||
logger.Fatalf("only one of TLS, ALTS, google default creds, or compute engine creds can be used")
|
||||
}
|
||||
|
||||
var credsChosen credsMode
|
||||
@ -139,7 +141,7 @@ func main() {
|
||||
}
|
||||
creds, err = credentials.NewClientTLSFromFile(*caFile, sn)
|
||||
if err != nil {
|
||||
grpclog.Fatalf("Failed to create TLS credentials %v", err)
|
||||
logger.Fatalf("Failed to create TLS credentials %v", err)
|
||||
}
|
||||
} else {
|
||||
creds = credentials.NewClientTLSFromCert(nil, sn)
|
||||
@ -159,7 +161,7 @@ func main() {
|
||||
case credsNone:
|
||||
opts = append(opts, grpc.WithInsecure())
|
||||
default:
|
||||
grpclog.Fatal("Invalid creds")
|
||||
logger.Fatal("Invalid creds")
|
||||
}
|
||||
if credsChosen == credsTLS {
|
||||
if *testCase == "compute_engine_creds" {
|
||||
@ -167,13 +169,13 @@ func main() {
|
||||
} else if *testCase == "service_account_creds" {
|
||||
jwtCreds, err := oauth.NewServiceAccountFromFile(*serviceAccountKeyFile, *oauthScope)
|
||||
if err != nil {
|
||||
grpclog.Fatalf("Failed to create JWT credentials: %v", err)
|
||||
logger.Fatalf("Failed to create JWT credentials: %v", err)
|
||||
}
|
||||
opts = append(opts, grpc.WithPerRPCCredentials(jwtCreds))
|
||||
} else if *testCase == "jwt_token_creds" {
|
||||
jwtCreds, err := oauth.NewJWTAccessFromFile(*serviceAccountKeyFile)
|
||||
if err != nil {
|
||||
grpclog.Fatalf("Failed to create JWT credentials: %v", err)
|
||||
logger.Fatalf("Failed to create JWT credentials: %v", err)
|
||||
}
|
||||
opts = append(opts, grpc.WithPerRPCCredentials(jwtCreds))
|
||||
} else if *testCase == "oauth2_auth_token" {
|
||||
@ -183,99 +185,99 @@ func main() {
|
||||
opts = append(opts, grpc.WithBlock())
|
||||
conn, err := grpc.Dial(serverAddr, opts...)
|
||||
if err != nil {
|
||||
grpclog.Fatalf("Fail to dial: %v", err)
|
||||
logger.Fatalf("Fail to dial: %v", err)
|
||||
}
|
||||
defer conn.Close()
|
||||
tc := testpb.NewTestServiceClient(conn)
|
||||
switch *testCase {
|
||||
case "empty_unary":
|
||||
interop.DoEmptyUnaryCall(tc)
|
||||
grpclog.Infoln("EmptyUnaryCall done")
|
||||
logger.Infoln("EmptyUnaryCall done")
|
||||
case "large_unary":
|
||||
interop.DoLargeUnaryCall(tc)
|
||||
grpclog.Infoln("LargeUnaryCall done")
|
||||
logger.Infoln("LargeUnaryCall done")
|
||||
case "client_streaming":
|
||||
interop.DoClientStreaming(tc)
|
||||
grpclog.Infoln("ClientStreaming done")
|
||||
logger.Infoln("ClientStreaming done")
|
||||
case "server_streaming":
|
||||
interop.DoServerStreaming(tc)
|
||||
grpclog.Infoln("ServerStreaming done")
|
||||
logger.Infoln("ServerStreaming done")
|
||||
case "ping_pong":
|
||||
interop.DoPingPong(tc)
|
||||
grpclog.Infoln("Pingpong done")
|
||||
logger.Infoln("Pingpong done")
|
||||
case "empty_stream":
|
||||
interop.DoEmptyStream(tc)
|
||||
grpclog.Infoln("Emptystream done")
|
||||
logger.Infoln("Emptystream done")
|
||||
case "timeout_on_sleeping_server":
|
||||
interop.DoTimeoutOnSleepingServer(tc)
|
||||
grpclog.Infoln("TimeoutOnSleepingServer done")
|
||||
logger.Infoln("TimeoutOnSleepingServer done")
|
||||
case "compute_engine_creds":
|
||||
if credsChosen != credsTLS {
|
||||
grpclog.Fatalf("TLS credentials need to be set for compute_engine_creds test case.")
|
||||
logger.Fatalf("TLS credentials need to be set for compute_engine_creds test case.")
|
||||
}
|
||||
interop.DoComputeEngineCreds(tc, *defaultServiceAccount, *oauthScope)
|
||||
grpclog.Infoln("ComputeEngineCreds done")
|
||||
logger.Infoln("ComputeEngineCreds done")
|
||||
case "service_account_creds":
|
||||
if credsChosen != credsTLS {
|
||||
grpclog.Fatalf("TLS credentials need to be set for service_account_creds test case.")
|
||||
logger.Fatalf("TLS credentials need to be set for service_account_creds test case.")
|
||||
}
|
||||
interop.DoServiceAccountCreds(tc, *serviceAccountKeyFile, *oauthScope)
|
||||
grpclog.Infoln("ServiceAccountCreds done")
|
||||
logger.Infoln("ServiceAccountCreds done")
|
||||
case "jwt_token_creds":
|
||||
if credsChosen != credsTLS {
|
||||
grpclog.Fatalf("TLS credentials need to be set for jwt_token_creds test case.")
|
||||
logger.Fatalf("TLS credentials need to be set for jwt_token_creds test case.")
|
||||
}
|
||||
interop.DoJWTTokenCreds(tc, *serviceAccountKeyFile)
|
||||
grpclog.Infoln("JWTtokenCreds done")
|
||||
logger.Infoln("JWTtokenCreds done")
|
||||
case "per_rpc_creds":
|
||||
if credsChosen != credsTLS {
|
||||
grpclog.Fatalf("TLS credentials need to be set for per_rpc_creds test case.")
|
||||
logger.Fatalf("TLS credentials need to be set for per_rpc_creds test case.")
|
||||
}
|
||||
interop.DoPerRPCCreds(tc, *serviceAccountKeyFile, *oauthScope)
|
||||
grpclog.Infoln("PerRPCCreds done")
|
||||
logger.Infoln("PerRPCCreds done")
|
||||
case "oauth2_auth_token":
|
||||
if credsChosen != credsTLS {
|
||||
grpclog.Fatalf("TLS credentials need to be set for oauth2_auth_token test case.")
|
||||
logger.Fatalf("TLS credentials need to be set for oauth2_auth_token test case.")
|
||||
}
|
||||
interop.DoOauth2TokenCreds(tc, *serviceAccountKeyFile, *oauthScope)
|
||||
grpclog.Infoln("Oauth2TokenCreds done")
|
||||
logger.Infoln("Oauth2TokenCreds done")
|
||||
case "google_default_credentials":
|
||||
if credsChosen != credsGoogleDefaultCreds {
|
||||
grpclog.Fatalf("GoogleDefaultCredentials need to be set for google_default_credentials test case.")
|
||||
logger.Fatalf("GoogleDefaultCredentials need to be set for google_default_credentials test case.")
|
||||
}
|
||||
interop.DoGoogleDefaultCredentials(tc, *defaultServiceAccount)
|
||||
grpclog.Infoln("GoogleDefaultCredentials done")
|
||||
logger.Infoln("GoogleDefaultCredentials done")
|
||||
case "compute_engine_channel_credentials":
|
||||
if credsChosen != credsComputeEngineCreds {
|
||||
grpclog.Fatalf("ComputeEngineCreds need to be set for compute_engine_channel_credentials test case.")
|
||||
logger.Fatalf("ComputeEngineCreds need to be set for compute_engine_channel_credentials test case.")
|
||||
}
|
||||
interop.DoComputeEngineChannelCredentials(tc, *defaultServiceAccount)
|
||||
grpclog.Infoln("ComputeEngineChannelCredentials done")
|
||||
logger.Infoln("ComputeEngineChannelCredentials done")
|
||||
case "cancel_after_begin":
|
||||
interop.DoCancelAfterBegin(tc)
|
||||
grpclog.Infoln("CancelAfterBegin done")
|
||||
logger.Infoln("CancelAfterBegin done")
|
||||
case "cancel_after_first_response":
|
||||
interop.DoCancelAfterFirstResponse(tc)
|
||||
grpclog.Infoln("CancelAfterFirstResponse done")
|
||||
logger.Infoln("CancelAfterFirstResponse done")
|
||||
case "status_code_and_message":
|
||||
interop.DoStatusCodeAndMessage(tc)
|
||||
grpclog.Infoln("StatusCodeAndMessage done")
|
||||
logger.Infoln("StatusCodeAndMessage done")
|
||||
case "special_status_message":
|
||||
interop.DoSpecialStatusMessage(tc)
|
||||
grpclog.Infoln("SpecialStatusMessage done")
|
||||
logger.Infoln("SpecialStatusMessage done")
|
||||
case "custom_metadata":
|
||||
interop.DoCustomMetadata(tc)
|
||||
grpclog.Infoln("CustomMetadata done")
|
||||
logger.Infoln("CustomMetadata done")
|
||||
case "unimplemented_method":
|
||||
interop.DoUnimplementedMethod(conn)
|
||||
grpclog.Infoln("UnimplementedMethod done")
|
||||
logger.Infoln("UnimplementedMethod done")
|
||||
case "unimplemented_service":
|
||||
interop.DoUnimplementedService(testpb.NewUnimplementedServiceClient(conn))
|
||||
grpclog.Infoln("UnimplementedService done")
|
||||
logger.Infoln("UnimplementedService done")
|
||||
case "pick_first_unary":
|
||||
interop.DoPickFirstUnary(tc)
|
||||
grpclog.Infoln("PickFirstUnary done")
|
||||
logger.Infoln("PickFirstUnary done")
|
||||
default:
|
||||
grpclog.Fatal("Unsupported test case: ", *testCase)
|
||||
logger.Fatal("Unsupported test case: ", *testCase)
|
||||
}
|
||||
}
|
||||
|
@ -45,6 +45,8 @@ var (
|
||||
useTLS = flag.Bool("use_tls", false, "Listen on TLS credentials, using a test certificate.")
|
||||
shortStream = flag.Bool("short_stream", false, "End the balancer stream immediately after sending the first server list.")
|
||||
serviceName = flag.String("service_name", "UNSET", "Name of the service being load balanced for.")
|
||||
|
||||
logger = grpclog.Component("interop")
|
||||
)
|
||||
|
||||
type loadBalancerServer struct {
|
||||
@ -53,17 +55,17 @@ type loadBalancerServer struct {
|
||||
}
|
||||
|
||||
func (l *loadBalancerServer) BalanceLoad(stream lbpb.LoadBalancer_BalanceLoadServer) error {
|
||||
grpclog.Info("Begin handling new BalancerLoad request.")
|
||||
logger.Info("Begin handling new BalancerLoad request.")
|
||||
var lbReq *lbpb.LoadBalanceRequest
|
||||
var err error
|
||||
if lbReq, err = stream.Recv(); err != nil {
|
||||
grpclog.Errorf("Error receiving LoadBalanceRequest: %v", err)
|
||||
logger.Errorf("Error receiving LoadBalanceRequest: %v", err)
|
||||
return err
|
||||
}
|
||||
grpclog.Info("LoadBalancerRequest received.")
|
||||
logger.Info("LoadBalancerRequest received.")
|
||||
initialReq := lbReq.GetInitialRequest()
|
||||
if initialReq == nil {
|
||||
grpclog.Info("Expected first request to be an InitialRequest. Got: %v", lbReq)
|
||||
logger.Info("Expected first request to be an InitialRequest. Got: %v", lbReq)
|
||||
return status.Error(codes.Unknown, "First request not an InitialRequest")
|
||||
}
|
||||
// gRPC clients targeting foo.bar.com:443 can sometimes include the ":443" suffix in
|
||||
@ -74,12 +76,12 @@ func (l *loadBalancerServer) BalanceLoad(stream lbpb.LoadBalancer_BalanceLoadSer
|
||||
cleanedName = initialReq.Name
|
||||
} else {
|
||||
if requestedNamePortNumber != "443" {
|
||||
grpclog.Info("Bad requested service name port number: %v.", requestedNamePortNumber)
|
||||
logger.Info("Bad requested service name port number: %v.", requestedNamePortNumber)
|
||||
return status.Error(codes.Unknown, "Bad requested service name port number")
|
||||
}
|
||||
}
|
||||
if cleanedName != *serviceName {
|
||||
grpclog.Info("Expected requested service name: %v. Got: %v", *serviceName, initialReq.Name)
|
||||
logger.Info("Expected requested service name: %v. Got: %v", *serviceName, initialReq.Name)
|
||||
return status.Error(codes.NotFound, "Bad requested service name")
|
||||
}
|
||||
if err := stream.Send(&lbpb.LoadBalanceResponse{
|
||||
@ -87,21 +89,21 @@ func (l *loadBalancerServer) BalanceLoad(stream lbpb.LoadBalancer_BalanceLoadSer
|
||||
InitialResponse: &lbpb.InitialLoadBalanceResponse{},
|
||||
},
|
||||
}); err != nil {
|
||||
grpclog.Errorf("Error sending initial LB response: %v", err)
|
||||
logger.Errorf("Error sending initial LB response: %v", err)
|
||||
return status.Error(codes.Unknown, "Error sending initial response")
|
||||
}
|
||||
grpclog.Info("Send LoadBalanceResponse: %v", l.serverListResponse)
|
||||
logger.Info("Send LoadBalanceResponse: %v", l.serverListResponse)
|
||||
if err := stream.Send(l.serverListResponse); err != nil {
|
||||
grpclog.Errorf("Error sending LB response: %v", err)
|
||||
logger.Errorf("Error sending LB response: %v", err)
|
||||
return status.Error(codes.Unknown, "Error sending response")
|
||||
}
|
||||
if *shortStream {
|
||||
return nil
|
||||
}
|
||||
for {
|
||||
grpclog.Info("Send LoadBalanceResponse: %v", l.serverListResponse)
|
||||
logger.Info("Send LoadBalanceResponse: %v", l.serverListResponse)
|
||||
if err := stream.Send(l.serverListResponse); err != nil {
|
||||
grpclog.Errorf("Error sending LB response: %v", err)
|
||||
logger.Errorf("Error sending LB response: %v", err)
|
||||
return status.Error(codes.Unknown, "Error sending response")
|
||||
}
|
||||
time.Sleep(10 * time.Second)
|
||||
@ -116,7 +118,7 @@ func main() {
|
||||
keyFile := testdata.Path("x509/server1_key.pem")
|
||||
creds, err := credentials.NewServerTLSFromFile(certFile, keyFile)
|
||||
if err != nil {
|
||||
grpclog.Fatalf("Failed to generate credentials %v", err)
|
||||
logger.Fatalf("Failed to generate credentials %v", err)
|
||||
}
|
||||
opts = append(opts, grpc.Creds(creds))
|
||||
} else if *useALTS {
|
||||
@ -133,17 +135,17 @@ func main() {
|
||||
for i := range rawBackendAddrs {
|
||||
rawIP, rawPort, err := net.SplitHostPort(rawBackendAddrs[i])
|
||||
if err != nil {
|
||||
grpclog.Fatalf("Failed to parse --backend_addrs[%d]=%v, error: %v", i, rawBackendAddrs[i], err)
|
||||
logger.Fatalf("Failed to parse --backend_addrs[%d]=%v, error: %v", i, rawBackendAddrs[i], err)
|
||||
}
|
||||
ip := net.ParseIP(rawIP)
|
||||
if ip == nil {
|
||||
grpclog.Fatalf("Failed to parse ip: %v", rawIP)
|
||||
logger.Fatalf("Failed to parse ip: %v", rawIP)
|
||||
}
|
||||
numericPort, err := strconv.Atoi(rawPort)
|
||||
if err != nil {
|
||||
grpclog.Fatalf("Failed to convert port %v to int", rawPort)
|
||||
logger.Fatalf("Failed to convert port %v to int", rawPort)
|
||||
}
|
||||
grpclog.Infof("Adding backend ip: %v, port: %d", ip.String(), numericPort)
|
||||
logger.Infof("Adding backend ip: %v, port: %d", ip.String(), numericPort)
|
||||
serverList[i] = &lbpb.Server{
|
||||
IpAddress: ip,
|
||||
Port: int32(numericPort),
|
||||
@ -158,10 +160,10 @@ func main() {
|
||||
},
|
||||
}
|
||||
server := grpc.NewServer(opts...)
|
||||
grpclog.Infof("Begin listening on %d.", *port)
|
||||
logger.Infof("Begin listening on %d.", *port)
|
||||
lis, err := net.Listen("tcp", ":"+strconv.Itoa(*port))
|
||||
if err != nil {
|
||||
grpclog.Fatalf("Failed to listen on port %v: %v", *port, err)
|
||||
logger.Fatalf("Failed to listen on port %v: %v", *port, err)
|
||||
}
|
||||
lbpb.RegisterLoadBalancerServer(server, &loadBalancerServer{
|
||||
serverListResponse: serverListResponse,
|
||||
|
@ -52,6 +52,8 @@ var (
|
||||
max_streams : server will ensure that the max_concurrent_streams limit is upheld;`)
|
||||
largeReqSize = 271828
|
||||
largeRespSize = 314159
|
||||
|
||||
logger = grpclog.Component("interop")
|
||||
)
|
||||
|
||||
func largeSimpleRequest() *testpb.SimpleRequest {
|
||||
@ -76,10 +78,10 @@ func rstAfterHeader(tc testpb.TestServiceClient) {
|
||||
req := largeSimpleRequest()
|
||||
reply, err := tc.UnaryCall(context.Background(), req)
|
||||
if reply != nil {
|
||||
grpclog.Fatalf("Client received reply despite server sending rst stream after header")
|
||||
logger.Fatalf("Client received reply despite server sending rst stream after header")
|
||||
}
|
||||
if status.Code(err) != codes.Internal {
|
||||
grpclog.Fatalf("%v.UnaryCall() = _, %v, want _, %v", tc, status.Code(err), codes.Internal)
|
||||
logger.Fatalf("%v.UnaryCall() = _, %v, want _, %v", tc, status.Code(err), codes.Internal)
|
||||
}
|
||||
}
|
||||
|
||||
@ -87,10 +89,10 @@ func rstDuringData(tc testpb.TestServiceClient) {
|
||||
req := largeSimpleRequest()
|
||||
reply, err := tc.UnaryCall(context.Background(), req)
|
||||
if reply != nil {
|
||||
grpclog.Fatalf("Client received reply despite server sending rst stream during data")
|
||||
logger.Fatalf("Client received reply despite server sending rst stream during data")
|
||||
}
|
||||
if status.Code(err) != codes.Unknown {
|
||||
grpclog.Fatalf("%v.UnaryCall() = _, %v, want _, %v", tc, status.Code(err), codes.Unknown)
|
||||
logger.Fatalf("%v.UnaryCall() = _, %v, want _, %v", tc, status.Code(err), codes.Unknown)
|
||||
}
|
||||
}
|
||||
|
||||
@ -98,10 +100,10 @@ func rstAfterData(tc testpb.TestServiceClient) {
|
||||
req := largeSimpleRequest()
|
||||
reply, err := tc.UnaryCall(context.Background(), req)
|
||||
if reply != nil {
|
||||
grpclog.Fatalf("Client received reply despite server sending rst stream after data")
|
||||
logger.Fatalf("Client received reply despite server sending rst stream after data")
|
||||
}
|
||||
if status.Code(err) != codes.Internal {
|
||||
grpclog.Fatalf("%v.UnaryCall() = _, %v, want _, %v", tc, status.Code(err), codes.Internal)
|
||||
logger.Fatalf("%v.UnaryCall() = _, %v, want _, %v", tc, status.Code(err), codes.Internal)
|
||||
}
|
||||
}
|
||||
|
||||
@ -130,30 +132,30 @@ func main() {
|
||||
opts = append(opts, grpc.WithInsecure())
|
||||
conn, err := grpc.Dial(serverAddr, opts...)
|
||||
if err != nil {
|
||||
grpclog.Fatalf("Fail to dial: %v", err)
|
||||
logger.Fatalf("Fail to dial: %v", err)
|
||||
}
|
||||
defer conn.Close()
|
||||
tc := testpb.NewTestServiceClient(conn)
|
||||
switch *testCase {
|
||||
case "goaway":
|
||||
goaway(tc)
|
||||
grpclog.Infoln("goaway done")
|
||||
logger.Infoln("goaway done")
|
||||
case "rst_after_header":
|
||||
rstAfterHeader(tc)
|
||||
grpclog.Infoln("rst_after_header done")
|
||||
logger.Infoln("rst_after_header done")
|
||||
case "rst_during_data":
|
||||
rstDuringData(tc)
|
||||
grpclog.Infoln("rst_during_data done")
|
||||
logger.Infoln("rst_during_data done")
|
||||
case "rst_after_data":
|
||||
rstAfterData(tc)
|
||||
grpclog.Infoln("rst_after_data done")
|
||||
logger.Infoln("rst_after_data done")
|
||||
case "ping":
|
||||
ping(tc)
|
||||
grpclog.Infoln("ping done")
|
||||
logger.Infoln("ping done")
|
||||
case "max_streams":
|
||||
maxStreams(tc)
|
||||
grpclog.Infoln("max_streams done")
|
||||
logger.Infoln("max_streams done")
|
||||
default:
|
||||
grpclog.Fatal("Unsupported test case: ", *testCase)
|
||||
logger.Fatal("Unsupported test case: ", *testCase)
|
||||
}
|
||||
}
|
||||
|
@ -40,17 +40,19 @@ var (
|
||||
certFile = flag.String("tls_cert_file", "", "The TLS cert file")
|
||||
keyFile = flag.String("tls_key_file", "", "The TLS key file")
|
||||
port = flag.Int("port", 10000, "The server port")
|
||||
|
||||
logger = grpclog.Component("interop")
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
if *useTLS && *useALTS {
|
||||
grpclog.Fatalf("use_tls and use_alts cannot be both set to true")
|
||||
logger.Fatalf("use_tls and use_alts cannot be both set to true")
|
||||
}
|
||||
p := strconv.Itoa(*port)
|
||||
lis, err := net.Listen("tcp", ":"+p)
|
||||
if err != nil {
|
||||
grpclog.Fatalf("failed to listen: %v", err)
|
||||
logger.Fatalf("failed to listen: %v", err)
|
||||
}
|
||||
var opts []grpc.ServerOption
|
||||
if *useTLS {
|
||||
@ -62,7 +64,7 @@ func main() {
|
||||
}
|
||||
creds, err := credentials.NewServerTLSFromFile(*certFile, *keyFile)
|
||||
if err != nil {
|
||||
grpclog.Fatalf("Failed to generate credentials %v", err)
|
||||
logger.Fatalf("Failed to generate credentials %v", err)
|
||||
}
|
||||
opts = append(opts, grpc.Creds(creds))
|
||||
} else if *useALTS {
|
||||
|
@ -45,20 +45,22 @@ var (
|
||||
largeRespSize = 314159
|
||||
initialMetadataKey = "x-grpc-test-echo-initial"
|
||||
trailingMetadataKey = "x-grpc-test-echo-trailing-bin"
|
||||
|
||||
logger = grpclog.Component("interop")
|
||||
)
|
||||
|
||||
// ClientNewPayload returns a payload of the given type and size.
|
||||
func ClientNewPayload(t testpb.PayloadType, size int) *testpb.Payload {
|
||||
if size < 0 {
|
||||
grpclog.Fatalf("Requested a response with invalid length %d", size)
|
||||
logger.Fatalf("Requested a response with invalid length %d", size)
|
||||
}
|
||||
body := make([]byte, size)
|
||||
switch t {
|
||||
case testpb.PayloadType_COMPRESSABLE:
|
||||
case testpb.PayloadType_UNCOMPRESSABLE:
|
||||
grpclog.Fatalf("PayloadType UNCOMPRESSABLE is not supported")
|
||||
logger.Fatalf("PayloadType UNCOMPRESSABLE is not supported")
|
||||
default:
|
||||
grpclog.Fatalf("Unsupported payload type: %d", t)
|
||||
logger.Fatalf("Unsupported payload type: %d", t)
|
||||
}
|
||||
return &testpb.Payload{
|
||||
Type: t,
|
||||
@ -70,10 +72,10 @@ func ClientNewPayload(t testpb.PayloadType, size int) *testpb.Payload {
|
||||
func DoEmptyUnaryCall(tc testpb.TestServiceClient, args ...grpc.CallOption) {
|
||||
reply, err := tc.EmptyCall(context.Background(), &testpb.Empty{}, args...)
|
||||
if err != nil {
|
||||
grpclog.Fatal("/TestService/EmptyCall RPC failed: ", err)
|
||||
logger.Fatal("/TestService/EmptyCall RPC failed: ", err)
|
||||
}
|
||||
if !proto.Equal(&testpb.Empty{}, reply) {
|
||||
grpclog.Fatalf("/TestService/EmptyCall receives %v, want %v", reply, testpb.Empty{})
|
||||
logger.Fatalf("/TestService/EmptyCall receives %v, want %v", reply, testpb.Empty{})
|
||||
}
|
||||
}
|
||||
|
||||
@ -87,12 +89,12 @@ func DoLargeUnaryCall(tc testpb.TestServiceClient, args ...grpc.CallOption) {
|
||||
}
|
||||
reply, err := tc.UnaryCall(context.Background(), req, args...)
|
||||
if err != nil {
|
||||
grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err)
|
||||
logger.Fatal("/TestService/UnaryCall RPC failed: ", err)
|
||||
}
|
||||
t := reply.GetPayload().GetType()
|
||||
s := len(reply.GetPayload().GetBody())
|
||||
if t != testpb.PayloadType_COMPRESSABLE || s != largeRespSize {
|
||||
grpclog.Fatalf("Got the reply with type %d len %d; want %d, %d", t, s, testpb.PayloadType_COMPRESSABLE, largeRespSize)
|
||||
logger.Fatalf("Got the reply with type %d len %d; want %d, %d", t, s, testpb.PayloadType_COMPRESSABLE, largeRespSize)
|
||||
}
|
||||
}
|
||||
|
||||
@ -100,7 +102,7 @@ func DoLargeUnaryCall(tc testpb.TestServiceClient, args ...grpc.CallOption) {
|
||||
func DoClientStreaming(tc testpb.TestServiceClient, args ...grpc.CallOption) {
|
||||
stream, err := tc.StreamingInputCall(context.Background(), args...)
|
||||
if err != nil {
|
||||
grpclog.Fatalf("%v.StreamingInputCall(_) = _, %v", tc, err)
|
||||
logger.Fatalf("%v.StreamingInputCall(_) = _, %v", tc, err)
|
||||
}
|
||||
var sum int
|
||||
for _, s := range reqSizes {
|
||||
@ -109,16 +111,16 @@ func DoClientStreaming(tc testpb.TestServiceClient, args ...grpc.CallOption) {
|
||||
Payload: pl,
|
||||
}
|
||||
if err := stream.Send(req); err != nil {
|
||||
grpclog.Fatalf("%v has error %v while sending %v", stream, err, req)
|
||||
logger.Fatalf("%v has error %v while sending %v", stream, err, req)
|
||||
}
|
||||
sum += s
|
||||
}
|
||||
reply, err := stream.CloseAndRecv()
|
||||
if err != nil {
|
||||
grpclog.Fatalf("%v.CloseAndRecv() got error %v, want %v", stream, err, nil)
|
||||
logger.Fatalf("%v.CloseAndRecv() got error %v, want %v", stream, err, nil)
|
||||
}
|
||||
if reply.GetAggregatedPayloadSize() != int32(sum) {
|
||||
grpclog.Fatalf("%v.CloseAndRecv().GetAggregatePayloadSize() = %v; want %v", stream, reply.GetAggregatedPayloadSize(), sum)
|
||||
logger.Fatalf("%v.CloseAndRecv().GetAggregatePayloadSize() = %v; want %v", stream, reply.GetAggregatedPayloadSize(), sum)
|
||||
}
|
||||
}
|
||||
|
||||
@ -136,7 +138,7 @@ func DoServerStreaming(tc testpb.TestServiceClient, args ...grpc.CallOption) {
|
||||
}
|
||||
stream, err := tc.StreamingOutputCall(context.Background(), req, args...)
|
||||
if err != nil {
|
||||
grpclog.Fatalf("%v.StreamingOutputCall(_) = _, %v", tc, err)
|
||||
logger.Fatalf("%v.StreamingOutputCall(_) = _, %v", tc, err)
|
||||
}
|
||||
var rpcStatus error
|
||||
var respCnt int
|
||||
@ -149,20 +151,20 @@ func DoServerStreaming(tc testpb.TestServiceClient, args ...grpc.CallOption) {
|
||||
}
|
||||
t := reply.GetPayload().GetType()
|
||||
if t != testpb.PayloadType_COMPRESSABLE {
|
||||
grpclog.Fatalf("Got the reply of type %d, want %d", t, testpb.PayloadType_COMPRESSABLE)
|
||||
logger.Fatalf("Got the reply of type %d, want %d", t, testpb.PayloadType_COMPRESSABLE)
|
||||
}
|
||||
size := len(reply.GetPayload().GetBody())
|
||||
if size != respSizes[index] {
|
||||
grpclog.Fatalf("Got reply body of length %d, want %d", size, respSizes[index])
|
||||
logger.Fatalf("Got reply body of length %d, want %d", size, respSizes[index])
|
||||
}
|
||||
index++
|
||||
respCnt++
|
||||
}
|
||||
if rpcStatus != io.EOF {
|
||||
grpclog.Fatalf("Failed to finish the server streaming rpc: %v", rpcStatus)
|
||||
logger.Fatalf("Failed to finish the server streaming rpc: %v", rpcStatus)
|
||||
}
|
||||
if respCnt != len(respSizes) {
|
||||
grpclog.Fatalf("Got %d reply, want %d", len(respSizes), respCnt)
|
||||
logger.Fatalf("Got %d reply, want %d", len(respSizes), respCnt)
|
||||
}
|
||||
}
|
||||
|
||||
@ -170,7 +172,7 @@ func DoServerStreaming(tc testpb.TestServiceClient, args ...grpc.CallOption) {
|
||||
func DoPingPong(tc testpb.TestServiceClient, args ...grpc.CallOption) {
|
||||
stream, err := tc.FullDuplexCall(context.Background(), args...)
|
||||
if err != nil {
|
||||
grpclog.Fatalf("%v.FullDuplexCall(_) = _, %v", tc, err)
|
||||
logger.Fatalf("%v.FullDuplexCall(_) = _, %v", tc, err)
|
||||
}
|
||||
var index int
|
||||
for index < len(reqSizes) {
|
||||
@ -186,27 +188,27 @@ func DoPingPong(tc testpb.TestServiceClient, args ...grpc.CallOption) {
|
||||
Payload: pl,
|
||||
}
|
||||
if err := stream.Send(req); err != nil {
|
||||
grpclog.Fatalf("%v has error %v while sending %v", stream, err, req)
|
||||
logger.Fatalf("%v has error %v while sending %v", stream, err, req)
|
||||
}
|
||||
reply, err := stream.Recv()
|
||||
if err != nil {
|
||||
grpclog.Fatalf("%v.Recv() = %v", stream, err)
|
||||
logger.Fatalf("%v.Recv() = %v", stream, err)
|
||||
}
|
||||
t := reply.GetPayload().GetType()
|
||||
if t != testpb.PayloadType_COMPRESSABLE {
|
||||
grpclog.Fatalf("Got the reply of type %d, want %d", t, testpb.PayloadType_COMPRESSABLE)
|
||||
logger.Fatalf("Got the reply of type %d, want %d", t, testpb.PayloadType_COMPRESSABLE)
|
||||
}
|
||||
size := len(reply.GetPayload().GetBody())
|
||||
if size != respSizes[index] {
|
||||
grpclog.Fatalf("Got reply body of length %d, want %d", size, respSizes[index])
|
||||
logger.Fatalf("Got reply body of length %d, want %d", size, respSizes[index])
|
||||
}
|
||||
index++
|
||||
}
|
||||
if err := stream.CloseSend(); err != nil {
|
||||
grpclog.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil)
|
||||
logger.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil)
|
||||
}
|
||||
if _, err := stream.Recv(); err != io.EOF {
|
||||
grpclog.Fatalf("%v failed to complele the ping pong test: %v", stream, err)
|
||||
logger.Fatalf("%v failed to complele the ping pong test: %v", stream, err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -214,13 +216,13 @@ func DoPingPong(tc testpb.TestServiceClient, args ...grpc.CallOption) {
|
||||
func DoEmptyStream(tc testpb.TestServiceClient, args ...grpc.CallOption) {
|
||||
stream, err := tc.FullDuplexCall(context.Background(), args...)
|
||||
if err != nil {
|
||||
grpclog.Fatalf("%v.FullDuplexCall(_) = _, %v", tc, err)
|
||||
logger.Fatalf("%v.FullDuplexCall(_) = _, %v", tc, err)
|
||||
}
|
||||
if err := stream.CloseSend(); err != nil {
|
||||
grpclog.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil)
|
||||
logger.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil)
|
||||
}
|
||||
if _, err := stream.Recv(); err != io.EOF {
|
||||
grpclog.Fatalf("%v failed to complete the empty stream test: %v", stream, err)
|
||||
logger.Fatalf("%v failed to complete the empty stream test: %v", stream, err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -233,7 +235,7 @@ func DoTimeoutOnSleepingServer(tc testpb.TestServiceClient, args ...grpc.CallOpt
|
||||
if status.Code(err) == codes.DeadlineExceeded {
|
||||
return
|
||||
}
|
||||
grpclog.Fatalf("%v.FullDuplexCall(_) = _, %v", tc, err)
|
||||
logger.Fatalf("%v.FullDuplexCall(_) = _, %v", tc, err)
|
||||
}
|
||||
pl := ClientNewPayload(testpb.PayloadType_COMPRESSABLE, 27182)
|
||||
req := &testpb.StreamingOutputCallRequest{
|
||||
@ -241,10 +243,10 @@ func DoTimeoutOnSleepingServer(tc testpb.TestServiceClient, args ...grpc.CallOpt
|
||||
Payload: pl,
|
||||
}
|
||||
if err := stream.Send(req); err != nil && err != io.EOF {
|
||||
grpclog.Fatalf("%v.Send(_) = %v", stream, err)
|
||||
logger.Fatalf("%v.Send(_) = %v", stream, err)
|
||||
}
|
||||
if _, err := stream.Recv(); status.Code(err) != codes.DeadlineExceeded {
|
||||
grpclog.Fatalf("%v.Recv() = _, %v, want error code %d", stream, err, codes.DeadlineExceeded)
|
||||
logger.Fatalf("%v.Recv() = _, %v, want error code %d", stream, err, codes.DeadlineExceeded)
|
||||
}
|
||||
}
|
||||
|
||||
@ -260,22 +262,22 @@ func DoComputeEngineCreds(tc testpb.TestServiceClient, serviceAccount, oauthScop
|
||||
}
|
||||
reply, err := tc.UnaryCall(context.Background(), req)
|
||||
if err != nil {
|
||||
grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err)
|
||||
logger.Fatal("/TestService/UnaryCall RPC failed: ", err)
|
||||
}
|
||||
user := reply.GetUsername()
|
||||
scope := reply.GetOauthScope()
|
||||
if user != serviceAccount {
|
||||
grpclog.Fatalf("Got user name %q, want %q.", user, serviceAccount)
|
||||
logger.Fatalf("Got user name %q, want %q.", user, serviceAccount)
|
||||
}
|
||||
if !strings.Contains(oauthScope, scope) {
|
||||
grpclog.Fatalf("Got OAuth scope %q which is NOT a substring of %q.", scope, oauthScope)
|
||||
logger.Fatalf("Got OAuth scope %q which is NOT a substring of %q.", scope, oauthScope)
|
||||
}
|
||||
}
|
||||
|
||||
func getServiceAccountJSONKey(keyFile string) []byte {
|
||||
jsonKey, err := ioutil.ReadFile(keyFile)
|
||||
if err != nil {
|
||||
grpclog.Fatalf("Failed to read the service account key file: %v", err)
|
||||
logger.Fatalf("Failed to read the service account key file: %v", err)
|
||||
}
|
||||
return jsonKey
|
||||
}
|
||||
@ -292,16 +294,16 @@ func DoServiceAccountCreds(tc testpb.TestServiceClient, serviceAccountKeyFile, o
|
||||
}
|
||||
reply, err := tc.UnaryCall(context.Background(), req)
|
||||
if err != nil {
|
||||
grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err)
|
||||
logger.Fatal("/TestService/UnaryCall RPC failed: ", err)
|
||||
}
|
||||
jsonKey := getServiceAccountJSONKey(serviceAccountKeyFile)
|
||||
user := reply.GetUsername()
|
||||
scope := reply.GetOauthScope()
|
||||
if !strings.Contains(string(jsonKey), user) {
|
||||
grpclog.Fatalf("Got user name %q which is NOT a substring of %q.", user, jsonKey)
|
||||
logger.Fatalf("Got user name %q which is NOT a substring of %q.", user, jsonKey)
|
||||
}
|
||||
if !strings.Contains(oauthScope, scope) {
|
||||
grpclog.Fatalf("Got OAuth scope %q which is NOT a substring of %q.", scope, oauthScope)
|
||||
logger.Fatalf("Got OAuth scope %q which is NOT a substring of %q.", scope, oauthScope)
|
||||
}
|
||||
}
|
||||
|
||||
@ -316,12 +318,12 @@ func DoJWTTokenCreds(tc testpb.TestServiceClient, serviceAccountKeyFile string)
|
||||
}
|
||||
reply, err := tc.UnaryCall(context.Background(), req)
|
||||
if err != nil {
|
||||
grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err)
|
||||
logger.Fatal("/TestService/UnaryCall RPC failed: ", err)
|
||||
}
|
||||
jsonKey := getServiceAccountJSONKey(serviceAccountKeyFile)
|
||||
user := reply.GetUsername()
|
||||
if !strings.Contains(string(jsonKey), user) {
|
||||
grpclog.Fatalf("Got user name %q which is NOT a substring of %q.", user, jsonKey)
|
||||
logger.Fatalf("Got user name %q which is NOT a substring of %q.", user, jsonKey)
|
||||
}
|
||||
}
|
||||
|
||||
@ -330,11 +332,11 @@ func GetToken(serviceAccountKeyFile string, oauthScope string) *oauth2.Token {
|
||||
jsonKey := getServiceAccountJSONKey(serviceAccountKeyFile)
|
||||
config, err := google.JWTConfigFromJSON(jsonKey, oauthScope)
|
||||
if err != nil {
|
||||
grpclog.Fatalf("Failed to get the config: %v", err)
|
||||
logger.Fatalf("Failed to get the config: %v", err)
|
||||
}
|
||||
token, err := config.TokenSource(context.Background()).Token()
|
||||
if err != nil {
|
||||
grpclog.Fatalf("Failed to get the token: %v", err)
|
||||
logger.Fatalf("Failed to get the token: %v", err)
|
||||
}
|
||||
return token
|
||||
}
|
||||
@ -351,16 +353,16 @@ func DoOauth2TokenCreds(tc testpb.TestServiceClient, serviceAccountKeyFile, oaut
|
||||
}
|
||||
reply, err := tc.UnaryCall(context.Background(), req)
|
||||
if err != nil {
|
||||
grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err)
|
||||
logger.Fatal("/TestService/UnaryCall RPC failed: ", err)
|
||||
}
|
||||
jsonKey := getServiceAccountJSONKey(serviceAccountKeyFile)
|
||||
user := reply.GetUsername()
|
||||
scope := reply.GetOauthScope()
|
||||
if !strings.Contains(string(jsonKey), user) {
|
||||
grpclog.Fatalf("Got user name %q which is NOT a substring of %q.", user, jsonKey)
|
||||
logger.Fatalf("Got user name %q which is NOT a substring of %q.", user, jsonKey)
|
||||
}
|
||||
if !strings.Contains(oauthScope, scope) {
|
||||
grpclog.Fatalf("Got OAuth scope %q which is NOT a substring of %q.", scope, oauthScope)
|
||||
logger.Fatalf("Got OAuth scope %q which is NOT a substring of %q.", scope, oauthScope)
|
||||
}
|
||||
}
|
||||
|
||||
@ -380,15 +382,15 @@ func DoPerRPCCreds(tc testpb.TestServiceClient, serviceAccountKeyFile, oauthScop
|
||||
ctx := metadata.NewOutgoingContext(context.Background(), metadata.MD{"authorization": []string{kv["authorization"]}})
|
||||
reply, err := tc.UnaryCall(ctx, req)
|
||||
if err != nil {
|
||||
grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err)
|
||||
logger.Fatal("/TestService/UnaryCall RPC failed: ", err)
|
||||
}
|
||||
user := reply.GetUsername()
|
||||
scope := reply.GetOauthScope()
|
||||
if !strings.Contains(string(jsonKey), user) {
|
||||
grpclog.Fatalf("Got user name %q which is NOT a substring of %q.", user, jsonKey)
|
||||
logger.Fatalf("Got user name %q which is NOT a substring of %q.", user, jsonKey)
|
||||
}
|
||||
if !strings.Contains(oauthScope, scope) {
|
||||
grpclog.Fatalf("Got OAuth scope %q which is NOT a substring of %q.", scope, oauthScope)
|
||||
logger.Fatalf("Got OAuth scope %q which is NOT a substring of %q.", scope, oauthScope)
|
||||
}
|
||||
}
|
||||
|
||||
@ -404,10 +406,10 @@ func DoGoogleDefaultCredentials(tc testpb.TestServiceClient, defaultServiceAccou
|
||||
}
|
||||
reply, err := tc.UnaryCall(context.Background(), req)
|
||||
if err != nil {
|
||||
grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err)
|
||||
logger.Fatal("/TestService/UnaryCall RPC failed: ", err)
|
||||
}
|
||||
if reply.GetUsername() != defaultServiceAccount {
|
||||
grpclog.Fatalf("Got user name %q; wanted %q. ", reply.GetUsername(), defaultServiceAccount)
|
||||
logger.Fatalf("Got user name %q; wanted %q. ", reply.GetUsername(), defaultServiceAccount)
|
||||
}
|
||||
}
|
||||
|
||||
@ -423,10 +425,10 @@ func DoComputeEngineChannelCredentials(tc testpb.TestServiceClient, defaultServi
|
||||
}
|
||||
reply, err := tc.UnaryCall(context.Background(), req)
|
||||
if err != nil {
|
||||
grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err)
|
||||
logger.Fatal("/TestService/UnaryCall RPC failed: ", err)
|
||||
}
|
||||
if reply.GetUsername() != defaultServiceAccount {
|
||||
grpclog.Fatalf("Got user name %q; wanted %q. ", reply.GetUsername(), defaultServiceAccount)
|
||||
logger.Fatalf("Got user name %q; wanted %q. ", reply.GetUsername(), defaultServiceAccount)
|
||||
}
|
||||
}
|
||||
|
||||
@ -440,12 +442,12 @@ func DoCancelAfterBegin(tc testpb.TestServiceClient, args ...grpc.CallOption) {
|
||||
ctx, cancel := context.WithCancel(metadata.NewOutgoingContext(context.Background(), testMetadata))
|
||||
stream, err := tc.StreamingInputCall(ctx, args...)
|
||||
if err != nil {
|
||||
grpclog.Fatalf("%v.StreamingInputCall(_) = _, %v", tc, err)
|
||||
logger.Fatalf("%v.StreamingInputCall(_) = _, %v", tc, err)
|
||||
}
|
||||
cancel()
|
||||
_, err = stream.CloseAndRecv()
|
||||
if status.Code(err) != codes.Canceled {
|
||||
grpclog.Fatalf("%v.CloseAndRecv() got error code %d, want %d", stream, status.Code(err), codes.Canceled)
|
||||
logger.Fatalf("%v.CloseAndRecv() got error code %d, want %d", stream, status.Code(err), codes.Canceled)
|
||||
}
|
||||
}
|
||||
|
||||
@ -454,7 +456,7 @@ func DoCancelAfterFirstResponse(tc testpb.TestServiceClient, args ...grpc.CallOp
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
stream, err := tc.FullDuplexCall(ctx, args...)
|
||||
if err != nil {
|
||||
grpclog.Fatalf("%v.FullDuplexCall(_) = _, %v", tc, err)
|
||||
logger.Fatalf("%v.FullDuplexCall(_) = _, %v", tc, err)
|
||||
}
|
||||
respParam := []*testpb.ResponseParameters{
|
||||
{
|
||||
@ -468,14 +470,14 @@ func DoCancelAfterFirstResponse(tc testpb.TestServiceClient, args ...grpc.CallOp
|
||||
Payload: pl,
|
||||
}
|
||||
if err := stream.Send(req); err != nil {
|
||||
grpclog.Fatalf("%v has error %v while sending %v", stream, err, req)
|
||||
logger.Fatalf("%v has error %v while sending %v", stream, err, req)
|
||||
}
|
||||
if _, err := stream.Recv(); err != nil {
|
||||
grpclog.Fatalf("%v.Recv() = %v", stream, err)
|
||||
logger.Fatalf("%v.Recv() = %v", stream, err)
|
||||
}
|
||||
cancel()
|
||||
if _, err := stream.Recv(); status.Code(err) != codes.Canceled {
|
||||
grpclog.Fatalf("%v compleled with error code %d, want %d", stream, status.Code(err), codes.Canceled)
|
||||
logger.Fatalf("%v compleled with error code %d, want %d", stream, status.Code(err), codes.Canceled)
|
||||
}
|
||||
}
|
||||
|
||||
@ -490,16 +492,16 @@ var (
|
||||
|
||||
func validateMetadata(header, trailer metadata.MD) {
|
||||
if len(header[initialMetadataKey]) != 1 {
|
||||
grpclog.Fatalf("Expected exactly one header from server. Received %d", len(header[initialMetadataKey]))
|
||||
logger.Fatalf("Expected exactly one header from server. Received %d", len(header[initialMetadataKey]))
|
||||
}
|
||||
if header[initialMetadataKey][0] != initialMetadataValue {
|
||||
grpclog.Fatalf("Got header %s; want %s", header[initialMetadataKey][0], initialMetadataValue)
|
||||
logger.Fatalf("Got header %s; want %s", header[initialMetadataKey][0], initialMetadataValue)
|
||||
}
|
||||
if len(trailer[trailingMetadataKey]) != 1 {
|
||||
grpclog.Fatalf("Expected exactly one trailer from server. Received %d", len(trailer[trailingMetadataKey]))
|
||||
logger.Fatalf("Expected exactly one trailer from server. Received %d", len(trailer[trailingMetadataKey]))
|
||||
}
|
||||
if trailer[trailingMetadataKey][0] != trailingMetadataValue {
|
||||
grpclog.Fatalf("Got trailer %s; want %s", trailer[trailingMetadataKey][0], trailingMetadataValue)
|
||||
logger.Fatalf("Got trailer %s; want %s", trailer[trailingMetadataKey][0], trailingMetadataValue)
|
||||
}
|
||||
}
|
||||
|
||||
@ -521,19 +523,19 @@ func DoCustomMetadata(tc testpb.TestServiceClient, args ...grpc.CallOption) {
|
||||
args...,
|
||||
)
|
||||
if err != nil {
|
||||
grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err)
|
||||
logger.Fatal("/TestService/UnaryCall RPC failed: ", err)
|
||||
}
|
||||
t := reply.GetPayload().GetType()
|
||||
s := len(reply.GetPayload().GetBody())
|
||||
if t != testpb.PayloadType_COMPRESSABLE || s != 1 {
|
||||
grpclog.Fatalf("Got the reply with type %d len %d; want %d, %d", t, s, testpb.PayloadType_COMPRESSABLE, 1)
|
||||
logger.Fatalf("Got the reply with type %d len %d; want %d, %d", t, s, testpb.PayloadType_COMPRESSABLE, 1)
|
||||
}
|
||||
validateMetadata(header, trailer)
|
||||
|
||||
// Testing with FullDuplex.
|
||||
stream, err := tc.FullDuplexCall(ctx, args...)
|
||||
if err != nil {
|
||||
grpclog.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
|
||||
logger.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
|
||||
}
|
||||
respParam := []*testpb.ResponseParameters{
|
||||
{
|
||||
@ -546,20 +548,20 @@ func DoCustomMetadata(tc testpb.TestServiceClient, args ...grpc.CallOption) {
|
||||
Payload: pl,
|
||||
}
|
||||
if err := stream.Send(streamReq); err != nil {
|
||||
grpclog.Fatalf("%v has error %v while sending %v", stream, err, streamReq)
|
||||
logger.Fatalf("%v has error %v while sending %v", stream, err, streamReq)
|
||||
}
|
||||
streamHeader, err := stream.Header()
|
||||
if err != nil {
|
||||
grpclog.Fatalf("%v.Header() = %v", stream, err)
|
||||
logger.Fatalf("%v.Header() = %v", stream, err)
|
||||
}
|
||||
if _, err := stream.Recv(); err != nil {
|
||||
grpclog.Fatalf("%v.Recv() = %v", stream, err)
|
||||
logger.Fatalf("%v.Recv() = %v", stream, err)
|
||||
}
|
||||
if err := stream.CloseSend(); err != nil {
|
||||
grpclog.Fatalf("%v.CloseSend() = %v, want <nil>", stream, err)
|
||||
logger.Fatalf("%v.CloseSend() = %v, want <nil>", stream, err)
|
||||
}
|
||||
if _, err := stream.Recv(); err != io.EOF {
|
||||
grpclog.Fatalf("%v failed to complete the custom metadata test: %v", stream, err)
|
||||
logger.Fatalf("%v failed to complete the custom metadata test: %v", stream, err)
|
||||
}
|
||||
streamTrailer := stream.Trailer()
|
||||
validateMetadata(streamHeader, streamTrailer)
|
||||
@ -579,24 +581,24 @@ func DoStatusCodeAndMessage(tc testpb.TestServiceClient, args ...grpc.CallOption
|
||||
ResponseStatus: respStatus,
|
||||
}
|
||||
if _, err := tc.UnaryCall(context.Background(), req, args...); err == nil || err.Error() != expectedErr.Error() {
|
||||
grpclog.Fatalf("%v.UnaryCall(_, %v) = _, %v, want _, %v", tc, req, err, expectedErr)
|
||||
logger.Fatalf("%v.UnaryCall(_, %v) = _, %v, want _, %v", tc, req, err, expectedErr)
|
||||
}
|
||||
// Test FullDuplexCall.
|
||||
stream, err := tc.FullDuplexCall(context.Background(), args...)
|
||||
if err != nil {
|
||||
grpclog.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
|
||||
logger.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err)
|
||||
}
|
||||
streamReq := &testpb.StreamingOutputCallRequest{
|
||||
ResponseStatus: respStatus,
|
||||
}
|
||||
if err := stream.Send(streamReq); err != nil {
|
||||
grpclog.Fatalf("%v has error %v while sending %v, want <nil>", stream, err, streamReq)
|
||||
logger.Fatalf("%v has error %v while sending %v, want <nil>", stream, err, streamReq)
|
||||
}
|
||||
if err := stream.CloseSend(); err != nil {
|
||||
grpclog.Fatalf("%v.CloseSend() = %v, want <nil>", stream, err)
|
||||
logger.Fatalf("%v.CloseSend() = %v, want <nil>", stream, err)
|
||||
}
|
||||
if _, err = stream.Recv(); err.Error() != expectedErr.Error() {
|
||||
grpclog.Fatalf("%v.Recv() returned error %v, want %v", stream, err, expectedErr)
|
||||
logger.Fatalf("%v.Recv() returned error %v, want %v", stream, err, expectedErr)
|
||||
}
|
||||
}
|
||||
|
||||
@ -617,7 +619,7 @@ func DoSpecialStatusMessage(tc testpb.TestServiceClient, args ...grpc.CallOption
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
if _, err := tc.UnaryCall(ctx, req, args...); err == nil || err.Error() != expectedErr.Error() {
|
||||
grpclog.Fatalf("%v.UnaryCall(_, %v) = _, %v, want _, %v", tc, req, err, expectedErr)
|
||||
logger.Fatalf("%v.UnaryCall(_, %v) = _, %v, want _, %v", tc, req, err, expectedErr)
|
||||
}
|
||||
}
|
||||
|
||||
@ -625,7 +627,7 @@ func DoSpecialStatusMessage(tc testpb.TestServiceClient, args ...grpc.CallOption
|
||||
func DoUnimplementedService(tc testpb.UnimplementedServiceClient) {
|
||||
_, err := tc.UnimplementedCall(context.Background(), &testpb.Empty{})
|
||||
if status.Code(err) != codes.Unimplemented {
|
||||
grpclog.Fatalf("%v.UnimplementedCall() = _, %v, want _, %v", tc, status.Code(err), codes.Unimplemented)
|
||||
logger.Fatalf("%v.UnimplementedCall() = _, %v, want _, %v", tc, status.Code(err), codes.Unimplemented)
|
||||
}
|
||||
}
|
||||
|
||||
@ -633,7 +635,7 @@ func DoUnimplementedService(tc testpb.UnimplementedServiceClient) {
|
||||
func DoUnimplementedMethod(cc *grpc.ClientConn) {
|
||||
var req, reply proto.Message
|
||||
if err := cc.Invoke(context.Background(), "/grpc.testing.TestService/UnimplementedCall", req, reply); err == nil || status.Code(err) != codes.Unimplemented {
|
||||
grpclog.Fatalf("ClientConn.Invoke(_, _, _, _, _) = %v, want error code %s", err, codes.Unimplemented)
|
||||
logger.Fatalf("ClientConn.Invoke(_, _, _, _, _) = %v, want error code %s", err, codes.Unimplemented)
|
||||
}
|
||||
}
|
||||
|
||||
@ -655,18 +657,18 @@ func DoPickFirstUnary(tc testpb.TestServiceClient) {
|
||||
for i := 0; i < rpcCount; i++ {
|
||||
resp, err := tc.UnaryCall(ctx, req)
|
||||
if err != nil {
|
||||
grpclog.Fatalf("iteration %d, failed to do UnaryCall: %v", i, err)
|
||||
logger.Fatalf("iteration %d, failed to do UnaryCall: %v", i, err)
|
||||
}
|
||||
id := resp.ServerId
|
||||
if id == "" {
|
||||
grpclog.Fatalf("iteration %d, got empty server ID", i)
|
||||
logger.Fatalf("iteration %d, got empty server ID", i)
|
||||
}
|
||||
if i == 0 {
|
||||
serverID = id
|
||||
continue
|
||||
}
|
||||
if serverID != id {
|
||||
grpclog.Fatalf("iteration %d, got different server ids: %q vs %q", i, serverID, id)
|
||||
logger.Fatalf("iteration %d, got different server ids: %q vs %q", i, serverID, id)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -91,6 +91,8 @@ var (
|
||||
// 0 or 1 representing an RPC has succeeded. Use hasRPCSucceeded and
|
||||
// setRPCSucceeded to access in a safe manner.
|
||||
rpcSucceeded uint32
|
||||
|
||||
logger = grpclog.Component("interop")
|
||||
)
|
||||
|
||||
type statsService struct {
|
||||
@ -155,7 +157,7 @@ func (s *statsService) GetClientStats(ctx context.Context, in *testpb.LoadBalanc
|
||||
return watcher.buildResp(), nil
|
||||
}
|
||||
case <-ctx.Done():
|
||||
grpclog.Info("Timed out, returning partial stats")
|
||||
logger.Info("Timed out, returning partial stats")
|
||||
return watcher.buildResp(), nil
|
||||
}
|
||||
}
|
||||
@ -220,7 +222,7 @@ func main() {
|
||||
|
||||
lis, err := net.Listen("tcp", fmt.Sprintf(":%d", *statsPort))
|
||||
if err != nil {
|
||||
grpclog.Fatalf("failed to listen: %v", err)
|
||||
logger.Fatalf("failed to listen: %v", err)
|
||||
}
|
||||
s := grpc.NewServer()
|
||||
defer s.Stop()
|
||||
@ -231,7 +233,7 @@ func main() {
|
||||
for i := 0; i < *numChannels; i++ {
|
||||
conn, err := grpc.DialContext(context.Background(), *server, grpc.WithInsecure())
|
||||
if err != nil {
|
||||
grpclog.Fatalf("Fail to dial: %v", err)
|
||||
logger.Fatalf("Fail to dial: %v", err)
|
||||
}
|
||||
defer conn.Close()
|
||||
clients[i] = testpb.NewTestServiceClient(conn)
|
||||
@ -305,7 +307,7 @@ func sendRPCs(clients []testpb.TestServiceClient, cfgs []*rpcConfig, ticker *tim
|
||||
watcher.chanHosts <- info
|
||||
}
|
||||
if err != nil && *failOnFailedRPC && hasRPCSucceeded() {
|
||||
grpclog.Fatalf("RPC failed: %v", err)
|
||||
logger.Fatalf("RPC failed: %v", err)
|
||||
}
|
||||
if err == nil {
|
||||
setRPCSucceeded()
|
||||
|
@ -37,6 +37,8 @@ var (
|
||||
port = flag.Int("port", 8080, "The server port")
|
||||
serverID = flag.String("server_id", "go_server", "Server ID included in response")
|
||||
hostname = getHostname()
|
||||
|
||||
logger = grpclog.Component("interop")
|
||||
)
|
||||
|
||||
func getHostname() string {
|
||||
@ -66,7 +68,7 @@ func main() {
|
||||
p := strconv.Itoa(*port)
|
||||
lis, err := net.Listen("tcp", ":"+p)
|
||||
if err != nil {
|
||||
grpclog.Fatalf("failed to listen: %v", err)
|
||||
logger.Fatalf("failed to listen: %v", err)
|
||||
}
|
||||
s := grpc.NewServer()
|
||||
testpb.RegisterTestServiceServer(s, &server{})
|
||||
|
@ -26,7 +26,6 @@ import (
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
ppb "google.golang.org/grpc/profiling/proto"
|
||||
)
|
||||
|
||||
@ -177,7 +176,7 @@ func streamStatsCatapultJSONSingle(stat *ppb.Stat, baseSec int64, baseNsec int32
|
||||
flowEndPID = opid
|
||||
flowEndTID = fmt.Sprintf("%d", stat.Timers[flowEndID].GoId)
|
||||
} else {
|
||||
grpclog.Infof("cannot find %s/grpc/stream/recv/header for %s/http2/recv/header", opid, opid)
|
||||
logger.Infof("cannot find %s/grpc/stream/recv/header for %s/http2/recv/header", opid, opid)
|
||||
}
|
||||
case "/http2/recv/dataFrame/loopyReader":
|
||||
flowEndID = filterCounter(stat, "/recvAndDecompress", lrc.GetAndInc("/http2/recv/dataFrame/loopyReader"))
|
||||
@ -185,7 +184,7 @@ func streamStatsCatapultJSONSingle(stat *ppb.Stat, baseSec int64, baseNsec int32
|
||||
flowEndPID = opid
|
||||
flowEndTID = fmt.Sprintf("%d", stat.Timers[flowEndID].GoId)
|
||||
} else {
|
||||
grpclog.Infof("cannot find %s/recvAndDecompress for %s/http2/recv/dataFrame/loopyReader", opid, opid)
|
||||
logger.Infof("cannot find %s/recvAndDecompress for %s/http2/recv/dataFrame/loopyReader", opid, opid)
|
||||
}
|
||||
default:
|
||||
flowEndID = -1
|
||||
@ -232,7 +231,7 @@ func streamStatsCatapultJSONSingle(stat *ppb.Stat, baseSec int64, baseNsec int32
|
||||
flowBeginPID = opid
|
||||
flowBeginTID = fmt.Sprintf("%d", stat.Timers[flowBeginID].GoId)
|
||||
} else {
|
||||
grpclog.Infof("cannot find /%d/transport/enqueue for /%d/http2/send/dataFrame/loopyWriter/preprocess", connectionCounter, connectionCounter)
|
||||
logger.Infof("cannot find /%d/transport/enqueue for /%d/http2/send/dataFrame/loopyWriter/preprocess", connectionCounter, connectionCounter)
|
||||
}
|
||||
default:
|
||||
flowBeginID = -1
|
||||
@ -305,14 +304,14 @@ func timerBeginIsBefore(ti *ppb.Timer, tj *ppb.Timer) bool {
|
||||
// write to. The grpc-go profiling snapshot is processed and converted to a
|
||||
// JSON format that can be understood by trace-viewer.
|
||||
func streamStatsCatapultJSON(s *snapshot, streamStatsCatapultJSONFileName string) (err error) {
|
||||
grpclog.Infof("calculating stream stats filters")
|
||||
logger.Infof("calculating stream stats filters")
|
||||
filterArray := strings.Split(*flagStreamStatsFilter, ",")
|
||||
filter := make(map[string]bool)
|
||||
for _, f := range filterArray {
|
||||
filter[f] = true
|
||||
}
|
||||
|
||||
grpclog.Infof("filter stream stats for %s", *flagStreamStatsFilter)
|
||||
logger.Infof("filter stream stats for %s", *flagStreamStatsFilter)
|
||||
var streamStats []*ppb.Stat
|
||||
for _, stat := range s.StreamStats {
|
||||
if _, ok := filter[stat.Tags]; ok {
|
||||
@ -320,14 +319,14 @@ func streamStatsCatapultJSON(s *snapshot, streamStatsCatapultJSONFileName string
|
||||
}
|
||||
}
|
||||
|
||||
grpclog.Infof("sorting timers within all stats")
|
||||
logger.Infof("sorting timers within all stats")
|
||||
for id := range streamStats {
|
||||
sort.Slice(streamStats[id].Timers, func(i, j int) bool {
|
||||
return timerBeginIsBefore(streamStats[id].Timers[i], streamStats[id].Timers[j])
|
||||
})
|
||||
}
|
||||
|
||||
grpclog.Infof("sorting stream stats")
|
||||
logger.Infof("sorting stream stats")
|
||||
sort.Slice(streamStats, func(i, j int) bool {
|
||||
if len(streamStats[j].Timers) == 0 {
|
||||
return true
|
||||
@ -352,7 +351,7 @@ func streamStatsCatapultJSON(s *snapshot, streamStatsCatapultJSONFileName string
|
||||
}
|
||||
|
||||
// All timestamps use the earliest timestamp available as the reference.
|
||||
grpclog.Infof("calculating the earliest timestamp across all timers")
|
||||
logger.Infof("calculating the earliest timestamp across all timers")
|
||||
var base *ppb.Timer
|
||||
for _, stat := range streamStats {
|
||||
for _, timer := range stat.Timers {
|
||||
@ -362,34 +361,34 @@ func streamStatsCatapultJSON(s *snapshot, streamStatsCatapultJSONFileName string
|
||||
}
|
||||
}
|
||||
|
||||
grpclog.Infof("converting %d stats to catapult JSON format", len(streamStats))
|
||||
logger.Infof("converting %d stats to catapult JSON format", len(streamStats))
|
||||
var jsonNodes []jsonNode
|
||||
for _, stat := range streamStats {
|
||||
jsonNodes = append(jsonNodes, streamStatsCatapultJSONSingle(stat, base.BeginSec, base.BeginNsec)...)
|
||||
}
|
||||
|
||||
grpclog.Infof("marshalling catapult JSON")
|
||||
logger.Infof("marshalling catapult JSON")
|
||||
b, err := json.Marshal(jsonNodes)
|
||||
if err != nil {
|
||||
grpclog.Errorf("cannot marshal JSON: %v", err)
|
||||
logger.Errorf("cannot marshal JSON: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
grpclog.Infof("creating catapult JSON file")
|
||||
logger.Infof("creating catapult JSON file")
|
||||
streamStatsCatapultJSONFile, err := os.Create(streamStatsCatapultJSONFileName)
|
||||
if err != nil {
|
||||
grpclog.Errorf("cannot create file %s: %v", streamStatsCatapultJSONFileName, err)
|
||||
logger.Errorf("cannot create file %s: %v", streamStatsCatapultJSONFileName, err)
|
||||
return err
|
||||
}
|
||||
defer streamStatsCatapultJSONFile.Close()
|
||||
|
||||
grpclog.Infof("writing catapult JSON to disk")
|
||||
logger.Infof("writing catapult JSON to disk")
|
||||
_, err = streamStatsCatapultJSONFile.Write(b)
|
||||
if err != nil {
|
||||
grpclog.Errorf("cannot write marshalled JSON: %v", err)
|
||||
logger.Errorf("cannot write marshalled JSON: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
grpclog.Infof("successfully wrote catapult JSON file %s", streamStatsCatapultJSONFileName)
|
||||
logger.Infof("successfully wrote catapult JSON file %s", streamStatsCatapultJSONFileName)
|
||||
return nil
|
||||
}
|
||||
|
@ -22,24 +22,22 @@ import (
|
||||
"encoding/gob"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
func loadSnapshot(snapshotFileName string) (*snapshot, error) {
|
||||
grpclog.Infof("opening snapshot file %s", snapshotFileName)
|
||||
logger.Infof("opening snapshot file %s", snapshotFileName)
|
||||
snapshotFile, err := os.Open(snapshotFileName)
|
||||
if err != nil {
|
||||
grpclog.Errorf("cannot open %s: %v", snapshotFileName, err)
|
||||
logger.Errorf("cannot open %s: %v", snapshotFileName, err)
|
||||
return nil, err
|
||||
}
|
||||
defer snapshotFile.Close()
|
||||
|
||||
grpclog.Infof("decoding snapshot file %s", snapshotFileName)
|
||||
logger.Infof("decoding snapshot file %s", snapshotFileName)
|
||||
s := &snapshot{}
|
||||
decoder := gob.NewDecoder(snapshotFile)
|
||||
if err = decoder.Decode(s); err != nil {
|
||||
grpclog.Errorf("cannot decode %s: %v", snapshotFileName, err)
|
||||
logger.Errorf("cannot decode %s: %v", snapshotFileName, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -27,24 +27,26 @@ import (
|
||||
ppb "google.golang.org/grpc/profiling/proto"
|
||||
)
|
||||
|
||||
var logger = grpclog.Component("profiling")
|
||||
|
||||
type snapshot struct {
|
||||
StreamStats []*ppb.Stat
|
||||
}
|
||||
|
||||
func main() {
|
||||
if err := parseArgs(); err != nil {
|
||||
grpclog.Errorf("error parsing flags: %v", err)
|
||||
logger.Errorf("error parsing flags: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if *flagAddress != "" {
|
||||
if err := remoteCommand(); err != nil {
|
||||
grpclog.Errorf("error: %v", err)
|
||||
logger.Errorf("error: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
} else {
|
||||
if err := localCommand(); err != nil {
|
||||
grpclog.Errorf("error: %v", err)
|
||||
logger.Errorf("error: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
@ -26,47 +26,46 @@ import (
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
ppb "google.golang.org/grpc/profiling/proto"
|
||||
)
|
||||
|
||||
func setEnabled(ctx context.Context, c ppb.ProfilingClient, enabled bool) error {
|
||||
_, err := c.Enable(ctx, &ppb.EnableRequest{Enabled: enabled})
|
||||
if err != nil {
|
||||
grpclog.Infof("error calling Enable: %v\n", err)
|
||||
logger.Infof("error calling Enable: %v\n", err)
|
||||
return err
|
||||
}
|
||||
|
||||
grpclog.Infof("successfully set enabled = %v", enabled)
|
||||
logger.Infof("successfully set enabled = %v", enabled)
|
||||
return nil
|
||||
}
|
||||
|
||||
func retrieveSnapshot(ctx context.Context, c ppb.ProfilingClient, f string) error {
|
||||
grpclog.Infof("getting stream stats")
|
||||
logger.Infof("getting stream stats")
|
||||
resp, err := c.GetStreamStats(ctx, &ppb.GetStreamStatsRequest{})
|
||||
if err != nil {
|
||||
grpclog.Errorf("error calling GetStreamStats: %v\n", err)
|
||||
logger.Errorf("error calling GetStreamStats: %v\n", err)
|
||||
return err
|
||||
}
|
||||
s := &snapshot{StreamStats: resp.StreamStats}
|
||||
|
||||
grpclog.Infof("creating snapshot file %s", f)
|
||||
logger.Infof("creating snapshot file %s", f)
|
||||
file, err := os.Create(f)
|
||||
if err != nil {
|
||||
grpclog.Errorf("cannot create %s: %v", f, err)
|
||||
logger.Errorf("cannot create %s: %v", f, err)
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
grpclog.Infof("encoding data and writing to snapshot file %s", f)
|
||||
logger.Infof("encoding data and writing to snapshot file %s", f)
|
||||
encoder := gob.NewEncoder(file)
|
||||
err = encoder.Encode(s)
|
||||
if err != nil {
|
||||
grpclog.Infof("error encoding: %v", err)
|
||||
logger.Infof("error encoding: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
grpclog.Infof("successfully wrote profiling snapshot to %s", f)
|
||||
logger.Infof("successfully wrote profiling snapshot to %s", f)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -78,10 +77,10 @@ func remoteCommand() error {
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
grpclog.Infof("dialing %s", *flagAddress)
|
||||
logger.Infof("dialing %s", *flagAddress)
|
||||
cc, err := grpc.Dial(*flagAddress, grpc.WithInsecure())
|
||||
if err != nil {
|
||||
grpclog.Errorf("cannot dial %s: %v", *flagAddress, err)
|
||||
logger.Errorf("cannot dial %s: %v", *flagAddress, err)
|
||||
return err
|
||||
}
|
||||
defer cc.Close()
|
||||
|
@ -35,6 +35,8 @@ import (
|
||||
ppb "google.golang.org/grpc/profiling/proto"
|
||||
)
|
||||
|
||||
var logger = grpclog.Component("profiling")
|
||||
|
||||
// ProfilingConfig defines configuration options for the Init method.
|
||||
type ProfilingConfig struct {
|
||||
// Setting this to true will enable profiling.
|
||||
@ -96,9 +98,9 @@ func getProfilingServerInstance() *profilingServer {
|
||||
|
||||
func (s *profilingServer) Enable(ctx context.Context, req *ppb.EnableRequest) (*ppb.EnableResponse, error) {
|
||||
if req.Enabled {
|
||||
grpclog.Infof("profilingServer: Enable: enabling profiling")
|
||||
logger.Infof("profilingServer: Enable: enabling profiling")
|
||||
} else {
|
||||
grpclog.Infof("profilingServer: Enable: disabling profiling")
|
||||
logger.Infof("profilingServer: Enable: disabling profiling")
|
||||
}
|
||||
profiling.Enable(req.Enabled)
|
||||
|
||||
@ -131,12 +133,12 @@ func statToProtoStat(stat *profiling.Stat) *ppb.Stat {
|
||||
func (s *profilingServer) GetStreamStats(ctx context.Context, req *ppb.GetStreamStatsRequest) (*ppb.GetStreamStatsResponse, error) {
|
||||
// Since the drain operation is destructive, only one client request should
|
||||
// be served at a time.
|
||||
grpclog.Infof("profilingServer: GetStreamStats: processing request")
|
||||
logger.Infof("profilingServer: GetStreamStats: processing request")
|
||||
s.drainMutex.Lock()
|
||||
results := profiling.StreamStats.Drain()
|
||||
s.drainMutex.Unlock()
|
||||
|
||||
grpclog.Infof("profilingServer: GetStreamStats: returning %v records", len(results))
|
||||
logger.Infof("profilingServer: GetStreamStats: returning %v records", len(results))
|
||||
streamStats := make([]*ppb.Stat, 0)
|
||||
for _, stat := range results {
|
||||
streamStats = append(streamStats, statToProtoStat(stat.(*profiling.Stat)))
|
||||
|
@ -52,6 +52,8 @@ var (
|
||||
testCA = flag.Bool("use_test_ca", false, "Whether to replace platform root CAs with test CA as the CA root")
|
||||
tlsServerName = flag.String("server_host_override", "foo.test.google.fr", "The server name use to verify the hostname returned by TLS handshake if it is not empty. Otherwise, --server_host is used.")
|
||||
caFile = flag.String("ca_file", "", "The file containing the CA root cert file")
|
||||
|
||||
logger = grpclog.Component("stress")
|
||||
)
|
||||
|
||||
// testCaseWithWeight contains the test case type and its weight.
|
||||
@ -196,7 +198,7 @@ func (s *server) createGauge(name string) *gauge {
|
||||
func startServer(server *server, port int) {
|
||||
lis, err := net.Listen("tcp", ":"+strconv.Itoa(port))
|
||||
if err != nil {
|
||||
grpclog.Fatalf("failed to listen: %v", err)
|
||||
logger.Fatalf("failed to listen: %v", err)
|
||||
}
|
||||
|
||||
s := grpc.NewServer()
|
||||
@ -248,23 +250,23 @@ func performRPCs(gauge *gauge, conn *grpc.ClientConn, selector *weightedRandomTe
|
||||
}
|
||||
|
||||
func logParameterInfo(addresses []string, tests []testCaseWithWeight) {
|
||||
grpclog.Infof("server_addresses: %s", *serverAddresses)
|
||||
grpclog.Infof("test_cases: %s", *testCases)
|
||||
grpclog.Infof("test_duration_secs: %d", *testDurationSecs)
|
||||
grpclog.Infof("num_channels_per_server: %d", *numChannelsPerServer)
|
||||
grpclog.Infof("num_stubs_per_channel: %d", *numStubsPerChannel)
|
||||
grpclog.Infof("metrics_port: %d", *metricsPort)
|
||||
grpclog.Infof("use_tls: %t", *useTLS)
|
||||
grpclog.Infof("use_test_ca: %t", *testCA)
|
||||
grpclog.Infof("server_host_override: %s", *tlsServerName)
|
||||
logger.Infof("server_addresses: %s", *serverAddresses)
|
||||
logger.Infof("test_cases: %s", *testCases)
|
||||
logger.Infof("test_duration_secs: %d", *testDurationSecs)
|
||||
logger.Infof("num_channels_per_server: %d", *numChannelsPerServer)
|
||||
logger.Infof("num_stubs_per_channel: %d", *numStubsPerChannel)
|
||||
logger.Infof("metrics_port: %d", *metricsPort)
|
||||
logger.Infof("use_tls: %t", *useTLS)
|
||||
logger.Infof("use_test_ca: %t", *testCA)
|
||||
logger.Infof("server_host_override: %s", *tlsServerName)
|
||||
|
||||
grpclog.Infoln("addresses:")
|
||||
logger.Infoln("addresses:")
|
||||
for i, addr := range addresses {
|
||||
grpclog.Infof("%d. %s\n", i+1, addr)
|
||||
logger.Infof("%d. %s\n", i+1, addr)
|
||||
}
|
||||
grpclog.Infoln("tests:")
|
||||
logger.Infoln("tests:")
|
||||
for i, test := range tests {
|
||||
grpclog.Infof("%d. %v\n", i+1, test)
|
||||
logger.Infof("%d. %v\n", i+1, test)
|
||||
}
|
||||
}
|
||||
|
||||
@ -283,7 +285,7 @@ func newConn(address string, useTLS, testCA bool, tlsServerName string) (*grpc.C
|
||||
}
|
||||
creds, err = credentials.NewClientTLSFromFile(*caFile, sn)
|
||||
if err != nil {
|
||||
grpclog.Fatalf("Failed to create TLS credentials %v", err)
|
||||
logger.Fatalf("Failed to create TLS credentials %v", err)
|
||||
}
|
||||
} else {
|
||||
creds = credentials.NewClientTLSFromCert(nil, sn)
|
||||
@ -311,7 +313,7 @@ func main() {
|
||||
for connIndex := 0; connIndex < *numChannelsPerServer; connIndex++ {
|
||||
conn, err := newConn(address, *useTLS, *testCA, *tlsServerName)
|
||||
if err != nil {
|
||||
grpclog.Fatalf("Fail to dial: %v", err)
|
||||
logger.Fatalf("Fail to dial: %v", err)
|
||||
}
|
||||
defer conn.Close()
|
||||
for clientIndex := 0; clientIndex < *numStubsPerChannel; clientIndex++ {
|
||||
@ -331,6 +333,6 @@ func main() {
|
||||
close(stop)
|
||||
}
|
||||
wg.Wait()
|
||||
grpclog.Infof(" ===== ALL DONE ===== ")
|
||||
logger.Infof(" ===== ALL DONE ===== ")
|
||||
|
||||
}
|
||||
|
@ -33,12 +33,14 @@ import (
|
||||
var (
|
||||
metricsServerAddress = flag.String("metrics_server_address", "", "The metrics server addresses in the format <hostname>:<port>")
|
||||
totalOnly = flag.Bool("total_only", false, "If true, this prints only the total value of all gauges")
|
||||
|
||||
logger = grpclog.Component("stress")
|
||||
)
|
||||
|
||||
func printMetrics(client metricspb.MetricsServiceClient, totalOnly bool) {
|
||||
stream, err := client.GetAllGauges(context.Background(), &metricspb.EmptyMessage{})
|
||||
if err != nil {
|
||||
grpclog.Fatalf("failed to call GetAllGauges: %v", err)
|
||||
logger.Fatalf("failed to call GetAllGauges: %v", err)
|
||||
}
|
||||
|
||||
var (
|
||||
@ -56,25 +58,25 @@ func printMetrics(client metricspb.MetricsServiceClient, totalOnly bool) {
|
||||
}
|
||||
v := gaugeResponse.GetLongValue()
|
||||
if !totalOnly {
|
||||
grpclog.Infof("%s: %d", gaugeResponse.Name, v)
|
||||
logger.Infof("%s: %d", gaugeResponse.Name, v)
|
||||
}
|
||||
overallQPS += v
|
||||
}
|
||||
if rpcStatus != io.EOF {
|
||||
grpclog.Fatalf("failed to finish server streaming: %v", rpcStatus)
|
||||
logger.Fatalf("failed to finish server streaming: %v", rpcStatus)
|
||||
}
|
||||
grpclog.Infof("overall qps: %d", overallQPS)
|
||||
logger.Infof("overall qps: %d", overallQPS)
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
if *metricsServerAddress == "" {
|
||||
grpclog.Fatalf("Metrics server address is empty.")
|
||||
logger.Fatalf("Metrics server address is empty.")
|
||||
}
|
||||
|
||||
conn, err := grpc.Dial(*metricsServerAddress, grpc.WithInsecure())
|
||||
if err != nil {
|
||||
grpclog.Fatalf("cannot connect to metrics server: %v", err)
|
||||
logger.Fatalf("cannot connect to metrics server: %v", err)
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
|
@ -35,7 +35,6 @@ import (
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/internal/balancer/stub"
|
||||
"google.golang.org/grpc/internal/balancerload"
|
||||
"google.golang.org/grpc/internal/grpcsync"
|
||||
@ -84,7 +83,7 @@ func (b *testBalancer) UpdateClientConnState(state balancer.ClientConnState) err
|
||||
var err error
|
||||
b.sc, err = b.cc.NewSubConn(state.ResolverState.Addresses, b.newSubConnOptions)
|
||||
if err != nil {
|
||||
grpclog.Errorf("testBalancer: failed to NewSubConn: %v", err)
|
||||
logger.Errorf("testBalancer: failed to NewSubConn: %v", err)
|
||||
return nil
|
||||
}
|
||||
b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: &picker{sc: b.sc, bal: b}})
|
||||
@ -94,9 +93,9 @@ func (b *testBalancer) UpdateClientConnState(state balancer.ClientConnState) err
|
||||
}
|
||||
|
||||
func (b *testBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) {
|
||||
grpclog.Infof("testBalancer: UpdateSubConnState: %p, %v", sc, s)
|
||||
logger.Infof("testBalancer: UpdateSubConnState: %p, %v", sc, s)
|
||||
if b.sc != sc {
|
||||
grpclog.Infof("testBalancer: ignored state change because sc is not recognized")
|
||||
logger.Infof("testBalancer: ignored state change because sc is not recognized")
|
||||
return
|
||||
}
|
||||
if s.ConnectivityState == connectivity.Shutdown {
|
||||
|
@ -4919,15 +4919,14 @@ func logOutputHasContents(v []byte, wakeup chan<- bool) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
var verboseLogs = flag.Bool("verbose_logs", false, "show all grpclog output, without filtering")
|
||||
var verboseLogs = flag.Bool("verbose_logs", false, "show all log output, without filtering")
|
||||
|
||||
func noop() {}
|
||||
|
||||
// declareLogNoise declares that t is expected to emit the following noisy phrases,
|
||||
// even on success. Those phrases will be filtered from grpclog output
|
||||
// and only be shown if *verbose_logs or t ends up failing.
|
||||
// The returned restore function should be called with defer to be run
|
||||
// before the test ends.
|
||||
// declareLogNoise declares that t is expected to emit the following noisy
|
||||
// phrases, even on success. Those phrases will be filtered from log output and
|
||||
// only be shown if *verbose_logs or t ends up failing. The returned restore
|
||||
// function should be called with defer to be run before the test ends.
|
||||
func declareLogNoise(t *testing.T, phrases ...string) (restore func()) {
|
||||
if *verboseLogs {
|
||||
return noop
|
||||
|
23
test/logging.go
Normal file
23
test/logging.go
Normal file
@ -0,0 +1,23 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package test
|
||||
|
||||
import "google.golang.org/grpc/grpclog"
|
||||
|
||||
var logger = grpclog.Component("testing")
|
3
vet.sh
3
vet.sh
@ -83,6 +83,9 @@ not git grep -l 'x/net/context' -- "*.go"
|
||||
# thread safety.
|
||||
git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test'
|
||||
|
||||
# - Do not call grpclog directly. Use grpclog.Component instead.
|
||||
git grep -l 'grpclog.I\|grpclog.W\|grpclog.E\|grpclog.F\|grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go'
|
||||
|
||||
# - Ensure all ptypes proto packages are renamed when importing.
|
||||
not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go"
|
||||
|
||||
|
Reference in New Issue
Block a user