[xds_ignore_weight_0_localities] xds: ignore localities with weight 0 (#2875)

Locality weighted load balancer can be enabled by setting an option in
CDS, and the weight of each locality. Currently, without the guarantee
that CDS is always sent, we assume locality weighted load balance is
always enabled, and ignore all weight 0 localities.
In the future, we should look at the config in CDS response and decide
whether locality weight matters.
This commit is contained in:
Menghan Li
2019-06-24 09:40:15 -07:00
committed by GitHub
parent 43c0599b22
commit e8b58ff637
3 changed files with 49 additions and 6 deletions

View File

@ -81,7 +81,13 @@ func newBalancerGroup(cc balancer.ClientConn, loadStore lrs.Store) *balancerGrou
}
// add adds a balancer built by builder to the group, with given id and weight.
//
// weight should never be zero.
func (bg *balancerGroup) add(id internal.Locality, weight uint32, builder balancer.Builder) {
if weight == 0 {
grpclog.Errorf("balancerGroup.add called with weight 0, locality: %v. Locality is not added to balancer group", id)
return
}
bg.mu.Lock()
if _, ok := bg.idToBalancer[id]; ok {
bg.mu.Unlock()
@ -140,10 +146,16 @@ func (bg *balancerGroup) remove(id internal.Locality) {
// changeWeight changes the weight of the balancer.
//
// newWeight should never be zero.
//
// NOTE: It always results in a picker update now. This probably isn't
// necessary. But it seems better to do the update because it's a change in the
// picker (which is balancer's snapshot).
func (bg *balancerGroup) changeWeight(id internal.Locality, newWeight uint32) {
if newWeight == 0 {
grpclog.Errorf("balancerGroup.changeWeight called with newWeight 0. Weight is not changed")
return
}
bg.pickerMu.Lock()
defer bg.pickerMu.Unlock()
pState, ok := bg.idToPickerState[id]

View File

@ -29,6 +29,7 @@ import (
"google.golang.org/grpc/balancer/roundrobin"
"google.golang.org/grpc/balancer/xds/internal"
edspb "google.golang.org/grpc/balancer/xds/internal/proto/envoy/api/v2/eds"
endpointpb "google.golang.org/grpc/balancer/xds/internal/proto/envoy/api/v2/endpoint/endpoint"
percentpb "google.golang.org/grpc/balancer/xds/internal/proto/envoy/type/percent"
"google.golang.org/grpc/balancer/xds/lrs"
"google.golang.org/grpc/codes"
@ -187,10 +188,27 @@ func (xdsB *EDSBalancer) HandleEDSResponse(edsResp *edspb.ClusterLoadAssignment)
xdsB.updateDrops(edsResp.GetPolicy().GetDropOverloads())
// Filter out all localities with weight 0.
//
// Locality weighted load balancer can be enabled by setting an option in
// CDS, and the weight of each locality. Currently, without the guarantee
// that CDS is always sent, we assume locality weighted load balance is
// always enabled, and ignore all weight 0 localities.
//
// In the future, we should look at the config in CDS response and decide
// whether locality weight matters.
newEndpoints := make([]*endpointpb.LocalityLbEndpoints, 0, len(edsResp.Endpoints))
for _, locality := range edsResp.Endpoints {
if locality.GetLoadBalancingWeight().GetValue() == 0 {
continue
}
newEndpoints = append(newEndpoints, locality)
}
// newLocalitiesSet contains all names of localitis in the new EDS response.
// It's used to delete localities that are removed in the new EDS response.
newLocalitiesSet := make(map[internal.Locality]struct{})
for _, locality := range edsResp.Endpoints {
for _, locality := range newEndpoints {
// One balancer for each locality.
l := locality.GetLocality()
@ -206,11 +224,6 @@ func (xdsB *EDSBalancer) HandleEDSResponse(edsResp *edspb.ClusterLoadAssignment)
newLocalitiesSet[lid] = struct{}{}
newWeight := locality.GetLoadBalancingWeight().GetValue()
if newWeight == 0 {
// Weight can never be 0.
newWeight = 1
}
var newAddrs []resolver.Address
for _, lbEndpoint := range locality.GetLbEndpoints() {
socketAddress := lbEndpoint.GetEndpoint().GetAddress().GetSocketAddress()

View File

@ -334,6 +334,24 @@ func TestEDS_TwoLocalities(t *testing.T) {
}); err != nil {
t.Fatalf("want %v, got %v", want, err)
}
// Change weight of the locality[1] to 0, it should never be picked.
clab6 := newClusterLoadAssignmentBuilder(testClusterNames[0], nil)
clab6.addLocality(testSubZones[1], 0, testEndpointAddrs[1:2])
clab6.addLocality(testSubZones[2], 1, testEndpointAddrs[2:4])
edsb.HandleEDSResponse(clab6.build())
// Test pick with two subconns different locality weight.
p6 := <-cc.newPickerCh
// Locality-1 will be not be picked, and locality-2 will be picked.
// Locality-2 contains sc3 and sc4. So expect sc3, sc4.
want = []balancer.SubConn{sc3, sc4}
if err := isRoundRobin(want, func() balancer.SubConn {
sc, _, _ := p6.Pick(context.Background(), balancer.PickOptions{})
return sc
}); err != nil {
t.Fatalf("want %v, got %v", want, err)
}
}
func TestClose(t *testing.T) {