New grpclb implementation (#1558)
The new grpclb supports fallback to backends if remote balancer is unavailable
This commit is contained in:
@ -27,12 +27,14 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
@ -41,16 +43,20 @@ import (
|
||||
lbspb "google.golang.org/grpc/grpclb/grpc_lb_v1/service"
|
||||
_ "google.golang.org/grpc/grpclog/glogger"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/naming"
|
||||
"google.golang.org/grpc/peer"
|
||||
"google.golang.org/grpc/resolver"
|
||||
"google.golang.org/grpc/resolver/manual"
|
||||
"google.golang.org/grpc/status"
|
||||
testpb "google.golang.org/grpc/test/grpc_testing"
|
||||
"google.golang.org/grpc/test/leakcheck"
|
||||
|
||||
_ "google.golang.org/grpc/grpclog/glogger"
|
||||
)
|
||||
|
||||
var (
|
||||
lbsn = "bar.com"
|
||||
besn = "foo.com"
|
||||
lbToken = "iamatoken"
|
||||
lbServerName = "bar.com"
|
||||
beServerName = "foo.com"
|
||||
lbToken = "iamatoken"
|
||||
|
||||
// Resolver replaces localhost with fakeName in Next().
|
||||
// Dialer replaces fakeName with localhost when dialing.
|
||||
@ -58,83 +64,6 @@ var (
|
||||
fakeName = "fake.Name"
|
||||
)
|
||||
|
||||
type testWatcher struct {
|
||||
// the channel to receives name resolution updates
|
||||
update chan *naming.Update
|
||||
// the side channel to get to know how many updates in a batch
|
||||
side chan int
|
||||
// the channel to notifiy update injector that the update reading is done
|
||||
readDone chan int
|
||||
}
|
||||
|
||||
func (w *testWatcher) Next() (updates []*naming.Update, err error) {
|
||||
n, ok := <-w.side
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("w.side is closed")
|
||||
}
|
||||
for i := 0; i < n; i++ {
|
||||
u, ok := <-w.update
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
if u != nil {
|
||||
// Resolver replaces localhost with fakeName in Next().
|
||||
// Custom dialer will replace fakeName with localhost when dialing.
|
||||
u.Addr = strings.Replace(u.Addr, "localhost", fakeName, 1)
|
||||
updates = append(updates, u)
|
||||
}
|
||||
}
|
||||
w.readDone <- 0
|
||||
return
|
||||
}
|
||||
|
||||
func (w *testWatcher) Close() {
|
||||
close(w.side)
|
||||
}
|
||||
|
||||
// Inject naming resolution updates to the testWatcher.
|
||||
func (w *testWatcher) inject(updates []*naming.Update) {
|
||||
w.side <- len(updates)
|
||||
for _, u := range updates {
|
||||
w.update <- u
|
||||
}
|
||||
<-w.readDone
|
||||
}
|
||||
|
||||
type testNameResolver struct {
|
||||
w *testWatcher
|
||||
addrs []string
|
||||
}
|
||||
|
||||
func (r *testNameResolver) Resolve(target string) (naming.Watcher, error) {
|
||||
r.w = &testWatcher{
|
||||
update: make(chan *naming.Update, len(r.addrs)),
|
||||
side: make(chan int, 1),
|
||||
readDone: make(chan int),
|
||||
}
|
||||
r.w.side <- len(r.addrs)
|
||||
for _, addr := range r.addrs {
|
||||
r.w.update <- &naming.Update{
|
||||
Op: naming.Add,
|
||||
Addr: addr,
|
||||
Metadata: &naming.AddrMetadataGRPCLB{
|
||||
AddrType: naming.GRPCLB,
|
||||
ServerName: lbsn,
|
||||
},
|
||||
}
|
||||
}
|
||||
go func() {
|
||||
<-r.w.readDone
|
||||
}()
|
||||
return r.w, nil
|
||||
}
|
||||
|
||||
func (r *testNameResolver) inject(updates []*naming.Update) {
|
||||
if r.w != nil {
|
||||
r.w.inject(updates)
|
||||
}
|
||||
}
|
||||
|
||||
type serverNameCheckCreds struct {
|
||||
mu sync.Mutex
|
||||
sn string
|
||||
@ -199,23 +128,22 @@ func fakeNameDialer(addr string, timeout time.Duration) (net.Conn, error) {
|
||||
}
|
||||
|
||||
type remoteBalancer struct {
|
||||
sls []*lbmpb.ServerList
|
||||
intervals []time.Duration
|
||||
sls chan *lbmpb.ServerList
|
||||
statsDura time.Duration
|
||||
done chan struct{}
|
||||
mu sync.Mutex
|
||||
stats lbmpb.ClientStats
|
||||
}
|
||||
|
||||
func newRemoteBalancer(sls []*lbmpb.ServerList, intervals []time.Duration) *remoteBalancer {
|
||||
func newRemoteBalancer(intervals []time.Duration) *remoteBalancer {
|
||||
return &remoteBalancer{
|
||||
sls: sls,
|
||||
intervals: intervals,
|
||||
done: make(chan struct{}),
|
||||
sls: make(chan *lbmpb.ServerList, 1),
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
func (b *remoteBalancer) stop() {
|
||||
close(b.sls)
|
||||
close(b.done)
|
||||
}
|
||||
|
||||
@ -225,7 +153,7 @@ func (b *remoteBalancer) BalanceLoad(stream lbspb.LoadBalancer_BalanceLoadServer
|
||||
return err
|
||||
}
|
||||
initReq := req.GetInitialRequest()
|
||||
if initReq.Name != besn {
|
||||
if initReq.Name != beServerName {
|
||||
return status.Errorf(codes.InvalidArgument, "invalid service name: %v", initReq.Name)
|
||||
}
|
||||
resp := &lbmpb.LoadBalanceResponse{
|
||||
@ -260,8 +188,7 @@ func (b *remoteBalancer) BalanceLoad(stream lbspb.LoadBalancer_BalanceLoadServer
|
||||
b.mu.Unlock()
|
||||
}
|
||||
}()
|
||||
for k, v := range b.sls {
|
||||
time.Sleep(b.intervals[k])
|
||||
for v := range b.sls {
|
||||
resp = &lbmpb.LoadBalanceResponse{
|
||||
LoadBalanceResponseType: &lbmpb.LoadBalanceResponse_ServerList{
|
||||
ServerList: v,
|
||||
@ -278,7 +205,8 @@ func (b *remoteBalancer) BalanceLoad(stream lbspb.LoadBalancer_BalanceLoadServer
|
||||
type testServer struct {
|
||||
testpb.TestServiceServer
|
||||
|
||||
addr string
|
||||
addr string
|
||||
fallback bool
|
||||
}
|
||||
|
||||
const testmdkey = "testmd"
|
||||
@ -288,7 +216,7 @@ func (s *testServer) EmptyCall(ctx context.Context, in *testpb.Empty) (*testpb.E
|
||||
if !ok {
|
||||
return nil, status.Error(codes.Internal, "failed to receive metadata")
|
||||
}
|
||||
if md == nil || md["lb-token"][0] != lbToken {
|
||||
if !s.fallback && (md == nil || md["lb-token"][0] != lbToken) {
|
||||
return nil, status.Errorf(codes.Internal, "received unexpected metadata: %v", md)
|
||||
}
|
||||
grpc.SetTrailer(ctx, metadata.Pairs(testmdkey, s.addr))
|
||||
@ -299,13 +227,13 @@ func (s *testServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallServ
|
||||
return nil
|
||||
}
|
||||
|
||||
func startBackends(sn string, lis ...net.Listener) (servers []*grpc.Server) {
|
||||
func startBackends(sn string, fallback bool, lis ...net.Listener) (servers []*grpc.Server) {
|
||||
for _, l := range lis {
|
||||
creds := &serverNameCheckCreds{
|
||||
sn: sn,
|
||||
}
|
||||
s := grpc.NewServer(grpc.Creds(creds))
|
||||
testpb.RegisterTestServiceServer(s, &testServer{addr: l.Addr().String()})
|
||||
testpb.RegisterTestServiceServer(s, &testServer{addr: l.Addr().String(), fallback: fallback})
|
||||
servers = append(servers, s)
|
||||
go func(s *grpc.Server, l net.Listener) {
|
||||
s.Serve(l)
|
||||
@ -348,7 +276,7 @@ func newLoadBalancer(numberOfBackends int) (tss *testServers, cleanup func(), er
|
||||
|
||||
beListeners = append(beListeners, beLis)
|
||||
}
|
||||
backends := startBackends(besn, beListeners...)
|
||||
backends := startBackends(beServerName, false, beListeners...)
|
||||
|
||||
// Start a load balancer.
|
||||
lbLis, err := net.Listen("tcp", "localhost:0")
|
||||
@ -357,21 +285,21 @@ func newLoadBalancer(numberOfBackends int) (tss *testServers, cleanup func(), er
|
||||
return
|
||||
}
|
||||
lbCreds := &serverNameCheckCreds{
|
||||
sn: lbsn,
|
||||
sn: lbServerName,
|
||||
}
|
||||
lb = grpc.NewServer(grpc.Creds(lbCreds))
|
||||
if err != nil {
|
||||
err = fmt.Errorf("Failed to generate the port number %v", err)
|
||||
return
|
||||
}
|
||||
ls = newRemoteBalancer(nil, nil)
|
||||
ls = newRemoteBalancer(nil)
|
||||
lbspb.RegisterLoadBalancerServer(lb, ls)
|
||||
go func() {
|
||||
lb.Serve(lbLis)
|
||||
}()
|
||||
|
||||
tss = &testServers{
|
||||
lbAddr: lbLis.Addr().String(),
|
||||
lbAddr: fakeName + ":" + strconv.Itoa(lbLis.Addr().(*net.TCPAddr).Port),
|
||||
ls: ls,
|
||||
lb: lb,
|
||||
beIPs: beIPs,
|
||||
@ -389,6 +317,10 @@ func newLoadBalancer(numberOfBackends int) (tss *testServers, cleanup func(), er
|
||||
|
||||
func TestGRPCLB(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
|
||||
r, cleanup := manual.GenerateAndRegisterManualResolver()
|
||||
defer cleanup()
|
||||
|
||||
tss, cleanup, err := newLoadBalancer(1)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create new load balancer: %v", err)
|
||||
@ -405,136 +337,178 @@ func TestGRPCLB(t *testing.T) {
|
||||
sl := &lbmpb.ServerList{
|
||||
Servers: bes,
|
||||
}
|
||||
tss.ls.sls = []*lbmpb.ServerList{sl}
|
||||
tss.ls.intervals = []time.Duration{0}
|
||||
tss.ls.sls <- sl
|
||||
creds := serverNameCheckCreds{
|
||||
expected: besn,
|
||||
expected: beServerName,
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
cc, err := grpc.DialContext(ctx, besn,
|
||||
grpc.WithBalancer(grpc.NewGRPCLBBalancer(&testNameResolver{addrs: []string{tss.lbAddr}})),
|
||||
grpc.WithBlock(), grpc.WithTransportCredentials(&creds), grpc.WithDialer(fakeNameDialer))
|
||||
cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName,
|
||||
grpc.WithBalancerBuilder(grpc.NewLBBuilder()),
|
||||
grpc.WithTransportCredentials(&creds), grpc.WithDialer(fakeNameDialer))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to dial to the backend %v", err)
|
||||
}
|
||||
defer cc.Close()
|
||||
testC := testpb.NewTestServiceClient(cc)
|
||||
|
||||
r.NewAddress([]resolver.Address{{
|
||||
Addr: tss.lbAddr,
|
||||
Type: resolver.GRPCLB,
|
||||
ServerName: lbServerName,
|
||||
}})
|
||||
|
||||
if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.FailFast(false)); err != nil {
|
||||
t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, <nil>", testC, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDropRequest(t *testing.T) {
|
||||
// The remote balancer sends response with duplicates to grpclb client.
|
||||
func TestGRPCLBWeighted(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
|
||||
r, cleanup := manual.GenerateAndRegisterManualResolver()
|
||||
defer cleanup()
|
||||
|
||||
tss, cleanup, err := newLoadBalancer(2)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create new load balancer: %v", err)
|
||||
}
|
||||
defer cleanup()
|
||||
tss.ls.sls = []*lbmpb.ServerList{{
|
||||
Servers: []*lbmpb.Server{{
|
||||
IpAddress: tss.beIPs[0],
|
||||
Port: int32(tss.bePorts[0]),
|
||||
LoadBalanceToken: lbToken,
|
||||
DropForLoadBalancing: true,
|
||||
}, {
|
||||
IpAddress: tss.beIPs[1],
|
||||
Port: int32(tss.bePorts[1]),
|
||||
LoadBalanceToken: lbToken,
|
||||
DropForLoadBalancing: false,
|
||||
}},
|
||||
|
||||
beServers := []*lbmpb.Server{{
|
||||
IpAddress: tss.beIPs[0],
|
||||
Port: int32(tss.bePorts[0]),
|
||||
LoadBalanceToken: lbToken,
|
||||
}, {
|
||||
IpAddress: tss.beIPs[1],
|
||||
Port: int32(tss.bePorts[1]),
|
||||
LoadBalanceToken: lbToken,
|
||||
}}
|
||||
tss.ls.intervals = []time.Duration{0}
|
||||
portsToIndex := make(map[int]int)
|
||||
for i := range beServers {
|
||||
portsToIndex[tss.bePorts[i]] = i
|
||||
}
|
||||
|
||||
creds := serverNameCheckCreds{
|
||||
expected: besn,
|
||||
expected: beServerName,
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
cc, err := grpc.DialContext(ctx, besn,
|
||||
grpc.WithBalancer(grpc.NewGRPCLBBalancer(&testNameResolver{addrs: []string{tss.lbAddr}})),
|
||||
grpc.WithBlock(), grpc.WithTransportCredentials(&creds), grpc.WithDialer(fakeNameDialer))
|
||||
cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName,
|
||||
grpc.WithBalancerBuilder(grpc.NewLBBuilder()),
|
||||
grpc.WithTransportCredentials(&creds), grpc.WithDialer(fakeNameDialer))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to dial to the backend %v", err)
|
||||
}
|
||||
defer cc.Close()
|
||||
testC := testpb.NewTestServiceClient(cc)
|
||||
// Wait until the first connection is up.
|
||||
// The first one has Drop set to true, error should contain "drop requests".
|
||||
for {
|
||||
if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}); err != nil {
|
||||
if strings.Contains(err.Error(), "drops requests") {
|
||||
break
|
||||
|
||||
r.NewAddress([]resolver.Address{{
|
||||
Addr: tss.lbAddr,
|
||||
Type: resolver.GRPCLB,
|
||||
ServerName: lbServerName,
|
||||
}})
|
||||
|
||||
sequences := []string{"00101", "00011"}
|
||||
for _, seq := range sequences {
|
||||
var (
|
||||
bes []*lbmpb.Server
|
||||
p peer.Peer
|
||||
result string
|
||||
)
|
||||
for _, s := range seq {
|
||||
bes = append(bes, beServers[s-'0'])
|
||||
}
|
||||
tss.ls.sls <- &lbmpb.ServerList{Servers: bes}
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.FailFast(false), grpc.Peer(&p)); err != nil {
|
||||
t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, <nil>", testC, err)
|
||||
}
|
||||
result += strconv.Itoa(portsToIndex[p.Addr.(*net.TCPAddr).Port])
|
||||
}
|
||||
}
|
||||
// The 1st, non-fail-fast RPC should succeed. This ensures both server
|
||||
// connections are made, because the first one has DropForLoadBalancing set to true.
|
||||
if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.FailFast(false)); err != nil {
|
||||
t.Fatalf("%v.SayHello(_, _) = _, %v, want _, <nil>", testC, err)
|
||||
}
|
||||
for i := 0; i < 3; i++ {
|
||||
// Odd fail-fast RPCs should fail, because the 1st backend has DropForLoadBalancing
|
||||
// set to true.
|
||||
if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}); grpc.Code(err) != codes.Unavailable {
|
||||
t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, %s", testC, err, codes.Unavailable)
|
||||
}
|
||||
// Even fail-fast RPCs should succeed since they choose the
|
||||
// non-drop-request backend according to the round robin policy.
|
||||
if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}); err != nil {
|
||||
t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, <nil>", testC, err)
|
||||
// The generated result will be in format of "0010100101".
|
||||
if !strings.Contains(result, strings.Repeat(seq, 2)) {
|
||||
t.Errorf("got result sequence %q, want patten %q", result, seq)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDropRequestFailedNonFailFast(t *testing.T) {
|
||||
func TestDropRequest(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
|
||||
r, cleanup := manual.GenerateAndRegisterManualResolver()
|
||||
defer cleanup()
|
||||
|
||||
tss, cleanup, err := newLoadBalancer(1)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create new load balancer: %v", err)
|
||||
}
|
||||
defer cleanup()
|
||||
be := &lbmpb.Server{
|
||||
IpAddress: tss.beIPs[0],
|
||||
Port: int32(tss.bePorts[0]),
|
||||
LoadBalanceToken: lbToken,
|
||||
DropForLoadBalancing: true,
|
||||
tss.ls.sls <- &lbmpb.ServerList{
|
||||
Servers: []*lbmpb.Server{{
|
||||
IpAddress: tss.beIPs[0],
|
||||
Port: int32(tss.bePorts[0]),
|
||||
LoadBalanceToken: lbToken,
|
||||
DropForLoadBalancing: false,
|
||||
}, {
|
||||
DropForLoadBalancing: true,
|
||||
}},
|
||||
}
|
||||
var bes []*lbmpb.Server
|
||||
bes = append(bes, be)
|
||||
sl := &lbmpb.ServerList{
|
||||
Servers: bes,
|
||||
}
|
||||
tss.ls.sls = []*lbmpb.ServerList{sl}
|
||||
tss.ls.intervals = []time.Duration{0}
|
||||
creds := serverNameCheckCreds{
|
||||
expected: besn,
|
||||
expected: beServerName,
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
cc, err := grpc.DialContext(ctx, besn,
|
||||
grpc.WithBalancer(grpc.NewGRPCLBBalancer(&testNameResolver{addrs: []string{tss.lbAddr}})),
|
||||
grpc.WithBlock(), grpc.WithTransportCredentials(&creds), grpc.WithDialer(fakeNameDialer))
|
||||
cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName,
|
||||
grpc.WithBalancerBuilder(grpc.NewLBBuilder()),
|
||||
grpc.WithTransportCredentials(&creds), grpc.WithDialer(fakeNameDialer))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to dial to the backend %v", err)
|
||||
}
|
||||
defer cc.Close()
|
||||
testC := testpb.NewTestServiceClient(cc)
|
||||
ctx, cancel = context.WithTimeout(context.Background(), 10*time.Millisecond)
|
||||
defer cancel()
|
||||
if _, err := testC.EmptyCall(ctx, &testpb.Empty{}, grpc.FailFast(false)); grpc.Code(err) != codes.DeadlineExceeded {
|
||||
t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, %s", testC, err, codes.DeadlineExceeded)
|
||||
|
||||
r.NewAddress([]resolver.Address{{
|
||||
Addr: tss.lbAddr,
|
||||
Type: resolver.GRPCLB,
|
||||
ServerName: lbServerName,
|
||||
}})
|
||||
|
||||
// The 1st, non-fail-fast RPC should succeed. This ensures both server
|
||||
// connections are made, because the first one has DropForLoadBalancing set to true.
|
||||
if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.FailFast(false)); err != nil {
|
||||
t.Fatalf("%v.SayHello(_, _) = _, %v, want _, <nil>", testC, err)
|
||||
}
|
||||
for _, failfast := range []bool{true, false} {
|
||||
for i := 0; i < 3; i++ {
|
||||
// Even RPCs should fail, because the 2st backend has
|
||||
// DropForLoadBalancing set to true.
|
||||
if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.FailFast(failfast)); grpc.Code(err) != codes.Unavailable {
|
||||
t.Errorf("%v.EmptyCall(_, _) = _, %v, want _, %s", testC, err, codes.Unavailable)
|
||||
}
|
||||
// Odd RPCs should succeed since they choose the non-drop-request
|
||||
// backend according to the round robin policy.
|
||||
if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.FailFast(failfast)); err != nil {
|
||||
t.Errorf("%v.EmptyCall(_, _) = _, %v, want _, <nil>", testC, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// When the balancer in use disconnects, grpclb should connect to the next address from resolved balancer address list.
|
||||
func TestBalancerDisconnects(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
|
||||
r, cleanup := manual.GenerateAndRegisterManualResolver()
|
||||
defer cleanup()
|
||||
|
||||
var (
|
||||
lbAddrs []string
|
||||
lbs []*grpc.Server
|
||||
tests []*testServers
|
||||
lbs []*grpc.Server
|
||||
)
|
||||
for i := 0; i < 3; i++ {
|
||||
for i := 0; i < 2; i++ {
|
||||
tss, cleanup, err := newLoadBalancer(1)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create new load balancer: %v", err)
|
||||
@ -551,78 +525,149 @@ func TestBalancerDisconnects(t *testing.T) {
|
||||
sl := &lbmpb.ServerList{
|
||||
Servers: bes,
|
||||
}
|
||||
tss.ls.sls = []*lbmpb.ServerList{sl}
|
||||
tss.ls.intervals = []time.Duration{0}
|
||||
tss.ls.sls <- sl
|
||||
|
||||
lbAddrs = append(lbAddrs, tss.lbAddr)
|
||||
tests = append(tests, tss)
|
||||
lbs = append(lbs, tss.lb)
|
||||
}
|
||||
|
||||
creds := serverNameCheckCreds{
|
||||
expected: besn,
|
||||
expected: beServerName,
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
resolver := &testNameResolver{
|
||||
addrs: lbAddrs[:2],
|
||||
}
|
||||
cc, err := grpc.DialContext(ctx, besn,
|
||||
grpc.WithBalancer(grpc.NewGRPCLBBalancer(resolver)),
|
||||
grpc.WithBlock(), grpc.WithTransportCredentials(&creds), grpc.WithDialer(fakeNameDialer))
|
||||
cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName,
|
||||
grpc.WithBalancerBuilder(grpc.NewLBBuilder()),
|
||||
grpc.WithTransportCredentials(&creds), grpc.WithDialer(fakeNameDialer))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to dial to the backend %v", err)
|
||||
}
|
||||
defer cc.Close()
|
||||
testC := testpb.NewTestServiceClient(cc)
|
||||
var previousTrailer string
|
||||
trailer := metadata.MD{}
|
||||
if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Trailer(&trailer), grpc.FailFast(false)); err != nil {
|
||||
|
||||
r.NewAddress([]resolver.Address{{
|
||||
Addr: tests[0].lbAddr,
|
||||
Type: resolver.GRPCLB,
|
||||
ServerName: lbServerName,
|
||||
}, {
|
||||
Addr: tests[1].lbAddr,
|
||||
Type: resolver.GRPCLB,
|
||||
ServerName: lbServerName,
|
||||
}})
|
||||
|
||||
var p peer.Peer
|
||||
if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.FailFast(false), grpc.Peer(&p)); err != nil {
|
||||
t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, <nil>", testC, err)
|
||||
} else {
|
||||
previousTrailer = trailer[testmdkey][0]
|
||||
}
|
||||
// The initial resolver update contains lbs[0] and lbs[1].
|
||||
// When lbs[0] is stopped, lbs[1] should be used.
|
||||
if p.Addr.(*net.TCPAddr).Port != tests[0].bePorts[0] {
|
||||
t.Fatalf("got peer: %v, want peer port: %v", p.Addr, tests[0].bePorts[0])
|
||||
}
|
||||
|
||||
lbs[0].Stop()
|
||||
for {
|
||||
if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Trailer(&trailer), grpc.FailFast(false)); err != nil {
|
||||
// Stop balancer[0], balancer[1] should be used by grpclb.
|
||||
// Check peer address to see if that happened.
|
||||
for i := 0; i < 1000; i++ {
|
||||
if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.FailFast(false), grpc.Peer(&p)); err != nil {
|
||||
t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, <nil>", testC, err)
|
||||
} else if trailer[testmdkey][0] != previousTrailer {
|
||||
// A new backend server should receive the request.
|
||||
// The trailer contains the backend address, so the trailer should be different from the previous one.
|
||||
previousTrailer = trailer[testmdkey][0]
|
||||
break
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
if p.Addr.(*net.TCPAddr).Port == tests[1].bePorts[0] {
|
||||
return
|
||||
}
|
||||
time.Sleep(time.Millisecond)
|
||||
}
|
||||
// Inject a update to add lbs[2] to resolved addresses.
|
||||
resolver.inject([]*naming.Update{
|
||||
{Op: naming.Add,
|
||||
Addr: lbAddrs[2],
|
||||
Metadata: &naming.AddrMetadataGRPCLB{
|
||||
AddrType: naming.GRPCLB,
|
||||
ServerName: lbsn,
|
||||
},
|
||||
},
|
||||
})
|
||||
// Stop lbs[1]. Now lbs[0] and lbs[1] are all stopped. lbs[2] should be used.
|
||||
lbs[1].Stop()
|
||||
for {
|
||||
if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Trailer(&trailer), grpc.FailFast(false)); err != nil {
|
||||
t.Fatalf("No RPC sent to second backend after 1 second")
|
||||
}
|
||||
|
||||
func TestFallback(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
|
||||
r, cleanup := manual.GenerateAndRegisterManualResolver()
|
||||
defer cleanup()
|
||||
|
||||
tss, cleanup, err := newLoadBalancer(1)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create new load balancer: %v", err)
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
// Start a standalone backend.
|
||||
beLis, err := net.Listen("tcp", "localhost:0")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to listen %v", err)
|
||||
}
|
||||
defer beLis.Close()
|
||||
standaloneBEs := startBackends(beServerName, true, beLis)
|
||||
defer stopBackends(standaloneBEs)
|
||||
|
||||
be := &lbmpb.Server{
|
||||
IpAddress: tss.beIPs[0],
|
||||
Port: int32(tss.bePorts[0]),
|
||||
LoadBalanceToken: lbToken,
|
||||
}
|
||||
var bes []*lbmpb.Server
|
||||
bes = append(bes, be)
|
||||
sl := &lbmpb.ServerList{
|
||||
Servers: bes,
|
||||
}
|
||||
tss.ls.sls <- sl
|
||||
creds := serverNameCheckCreds{
|
||||
expected: beServerName,
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName,
|
||||
grpc.WithBalancerBuilder(grpc.NewLBBuilderWithFallbackTimeout(100*time.Millisecond)),
|
||||
grpc.WithTransportCredentials(&creds), grpc.WithDialer(fakeNameDialer))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to dial to the backend %v", err)
|
||||
}
|
||||
defer cc.Close()
|
||||
testC := testpb.NewTestServiceClient(cc)
|
||||
|
||||
r.NewAddress([]resolver.Address{{
|
||||
Addr: "",
|
||||
Type: resolver.GRPCLB,
|
||||
ServerName: lbServerName,
|
||||
}, {
|
||||
Addr: beLis.Addr().String(),
|
||||
Type: resolver.Backend,
|
||||
ServerName: beServerName,
|
||||
}})
|
||||
|
||||
var p peer.Peer
|
||||
if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.FailFast(false), grpc.Peer(&p)); err != nil {
|
||||
t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, <nil>", testC, err)
|
||||
}
|
||||
if p.Addr.String() != beLis.Addr().String() {
|
||||
t.Fatalf("got peer: %v, want peer: %v", p.Addr, beLis.Addr())
|
||||
}
|
||||
|
||||
r.NewAddress([]resolver.Address{{
|
||||
Addr: tss.lbAddr,
|
||||
Type: resolver.GRPCLB,
|
||||
ServerName: lbServerName,
|
||||
}, {
|
||||
Addr: beLis.Addr().String(),
|
||||
Type: resolver.Backend,
|
||||
ServerName: beServerName,
|
||||
}})
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.FailFast(false), grpc.Peer(&p)); err != nil {
|
||||
t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, <nil>", testC, err)
|
||||
} else if trailer[testmdkey][0] != previousTrailer {
|
||||
// A new backend server should receive the request.
|
||||
// The trailer contains the backend address, so the trailer should be different from the previous one.
|
||||
break
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
if p.Addr.(*net.TCPAddr).Port == tss.bePorts[0] {
|
||||
return
|
||||
}
|
||||
time.Sleep(time.Millisecond)
|
||||
}
|
||||
t.Fatalf("No RPC sent to backend behind remote balancer after 1 second")
|
||||
}
|
||||
|
||||
type failPreRPCCred struct{}
|
||||
|
||||
func (failPreRPCCred) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
|
||||
if strings.Contains(uri[0], "failtosend") {
|
||||
if strings.Contains(uri[0], failtosendURI) {
|
||||
return nil, fmt.Errorf("rpc should fail to send")
|
||||
}
|
||||
return nil, nil
|
||||
@ -640,35 +685,46 @@ func checkStats(stats *lbmpb.ClientStats, expected *lbmpb.ClientStats) error {
|
||||
}
|
||||
|
||||
func runAndGetStats(t *testing.T, dropForLoadBalancing, dropForRateLimiting bool, runRPCs func(*grpc.ClientConn)) lbmpb.ClientStats {
|
||||
tss, cleanup, err := newLoadBalancer(3)
|
||||
defer leakcheck.Check(t)
|
||||
|
||||
r, cleanup := manual.GenerateAndRegisterManualResolver()
|
||||
defer cleanup()
|
||||
|
||||
tss, cleanup, err := newLoadBalancer(1)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create new load balancer: %v", err)
|
||||
}
|
||||
defer cleanup()
|
||||
tss.ls.sls = []*lbmpb.ServerList{{
|
||||
tss.ls.sls <- &lbmpb.ServerList{
|
||||
Servers: []*lbmpb.Server{{
|
||||
IpAddress: tss.beIPs[2],
|
||||
Port: int32(tss.bePorts[2]),
|
||||
IpAddress: tss.beIPs[0],
|
||||
Port: int32(tss.bePorts[0]),
|
||||
LoadBalanceToken: lbToken,
|
||||
DropForLoadBalancing: dropForLoadBalancing,
|
||||
DropForRateLimiting: dropForRateLimiting,
|
||||
}},
|
||||
}}
|
||||
tss.ls.intervals = []time.Duration{0}
|
||||
}
|
||||
tss.ls.statsDura = 100 * time.Millisecond
|
||||
creds := serverNameCheckCreds{expected: besn}
|
||||
creds := serverNameCheckCreds{expected: beServerName}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
cc, err := grpc.DialContext(ctx, besn,
|
||||
grpc.WithBalancer(grpc.NewGRPCLBBalancer(&testNameResolver{addrs: []string{tss.lbAddr}})),
|
||||
grpc.WithTransportCredentials(&creds), grpc.WithPerRPCCredentials(failPreRPCCred{}),
|
||||
grpc.WithBlock(), grpc.WithDialer(fakeNameDialer))
|
||||
cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName,
|
||||
grpc.WithBalancerBuilder(grpc.NewLBBuilder()),
|
||||
grpc.WithTransportCredentials(&creds),
|
||||
grpc.WithPerRPCCredentials(failPreRPCCred{}),
|
||||
grpc.WithDialer(fakeNameDialer))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to dial to the backend %v", err)
|
||||
}
|
||||
defer cc.Close()
|
||||
|
||||
r.NewAddress([]resolver.Address{{
|
||||
Addr: tss.lbAddr,
|
||||
Type: resolver.GRPCLB,
|
||||
ServerName: lbServerName,
|
||||
}})
|
||||
|
||||
runRPCs(cc)
|
||||
time.Sleep(1 * time.Second)
|
||||
tss.ls.mu.Lock()
|
||||
@ -677,7 +733,11 @@ func runAndGetStats(t *testing.T, dropForLoadBalancing, dropForRateLimiting bool
|
||||
return stats
|
||||
}
|
||||
|
||||
const countRPC = 40
|
||||
const (
|
||||
countRPC = 40
|
||||
failtosendURI = "failtosend"
|
||||
dropErrDesc = "request dropped by grpclb"
|
||||
)
|
||||
|
||||
func TestGRPCLBStatsUnarySuccess(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
@ -709,7 +769,7 @@ func TestGRPCLBStatsUnaryDropLoadBalancing(t *testing.T) {
|
||||
for {
|
||||
c++
|
||||
if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}); err != nil {
|
||||
if strings.Contains(err.Error(), "drops requests") {
|
||||
if strings.Contains(err.Error(), dropErrDesc) {
|
||||
break
|
||||
}
|
||||
}
|
||||
@ -737,7 +797,7 @@ func TestGRPCLBStatsUnaryDropRateLimiting(t *testing.T) {
|
||||
for {
|
||||
c++
|
||||
if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}); err != nil {
|
||||
if strings.Contains(err.Error(), "drops requests") {
|
||||
if strings.Contains(err.Error(), dropErrDesc) {
|
||||
break
|
||||
}
|
||||
}
|
||||
@ -766,7 +826,7 @@ func TestGRPCLBStatsUnaryFailedToSend(t *testing.T) {
|
||||
t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, <nil>", testC, err)
|
||||
}
|
||||
for i := 0; i < countRPC-1; i++ {
|
||||
grpc.Invoke(context.Background(), "failtosend", &testpb.Empty{}, nil, cc)
|
||||
grpc.Invoke(context.Background(), failtosendURI, &testpb.Empty{}, nil, cc)
|
||||
}
|
||||
})
|
||||
|
||||
@ -824,7 +884,7 @@ func TestGRPCLBStatsStreamingDropLoadBalancing(t *testing.T) {
|
||||
for {
|
||||
c++
|
||||
if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}); err != nil {
|
||||
if strings.Contains(err.Error(), "drops requests") {
|
||||
if strings.Contains(err.Error(), dropErrDesc) {
|
||||
break
|
||||
}
|
||||
}
|
||||
@ -852,7 +912,7 @@ func TestGRPCLBStatsStreamingDropRateLimiting(t *testing.T) {
|
||||
for {
|
||||
c++
|
||||
if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}); err != nil {
|
||||
if strings.Contains(err.Error(), "drops requests") {
|
||||
if strings.Contains(err.Error(), dropErrDesc) {
|
||||
break
|
||||
}
|
||||
}
|
||||
@ -887,7 +947,7 @@ func TestGRPCLBStatsStreamingFailedToSend(t *testing.T) {
|
||||
}
|
||||
}
|
||||
for i := 0; i < countRPC-1; i++ {
|
||||
grpc.NewClientStream(context.Background(), &grpc.StreamDesc{}, cc, "failtosend")
|
||||
grpc.NewClientStream(context.Background(), &grpc.StreamDesc{}, cc, failtosendURI)
|
||||
}
|
||||
})
|
||||
|
||||
|
||||
Reference in New Issue
Block a user