Fix flaky tests in transport. (#2120)

* Fix flaky tests in transport.

* Post-review update
This commit is contained in:
mmukhi
2018-06-12 17:52:44 -07:00
committed by GitHub
parent fb845db15c
commit 107f19369e
2 changed files with 160 additions and 115 deletions

View File

@ -58,14 +58,20 @@ type writeQuota struct {
ch chan struct{} ch chan struct{}
// done is triggered in error case. // done is triggered in error case.
done <-chan struct{} done <-chan struct{}
// replenish is called by loopyWriter to give quota back to.
// It is implemented as a field so that it can be updated
// by tests.
replenish func(n int)
} }
func newWriteQuota(sz int32, done <-chan struct{}) *writeQuota { func newWriteQuota(sz int32, done <-chan struct{}) *writeQuota {
return &writeQuota{ w := &writeQuota{
quota: sz, quota: sz,
ch: make(chan struct{}, 1), ch: make(chan struct{}, 1),
done: done, done: done,
} }
w.replenish = w.realReplenish
return w
} }
func (w *writeQuota) get(sz int32) error { func (w *writeQuota) get(sz int32) error {
@ -83,7 +89,7 @@ func (w *writeQuota) get(sz int32) error {
} }
} }
func (w *writeQuota) replenish(n int) { func (w *writeQuota) realReplenish(n int) {
sz := int32(n) sz := int32(n)
a := atomic.AddInt32(&w.quota, sz) a := atomic.AddInt32(&w.quota, sz)
b := a - sz b := a - sz

View File

@ -51,6 +51,7 @@ type server struct {
mu sync.Mutex mu sync.Mutex
conns map[ServerTransport]bool conns map[ServerTransport]bool
h *testStreamHandler h *testStreamHandler
ready chan struct{}
} }
var ( var (
@ -62,8 +63,9 @@ var (
) )
type testStreamHandler struct { type testStreamHandler struct {
t *http2Server t *http2Server
notify chan struct{} notify chan struct{}
getNotified chan struct{}
} }
type hType int type hType int
@ -76,7 +78,6 @@ const (
encodingRequiredStatus encodingRequiredStatus
invalidHeaderField invalidHeaderField
delayRead delayRead
delayWrite
pingpong pingpong
) )
@ -182,6 +183,10 @@ func (h *testStreamHandler) handleStreamInvalidHeaderField(t *testing.T, s *Stre
}) })
} }
// handleStreamDelayRead delays reads so that the other side has to halt on
// stream-level flow control.
// This handler assumes dynamic flow control is turned off and assumes window
// sizes to be set to defaultWindowSize.
func (h *testStreamHandler) handleStreamDelayRead(t *testing.T, s *Stream) { func (h *testStreamHandler) handleStreamDelayRead(t *testing.T, s *Stream) {
req := expectedRequest req := expectedRequest
resp := expectedResponse resp := expectedResponse
@ -189,11 +194,52 @@ func (h *testStreamHandler) handleStreamDelayRead(t *testing.T, s *Stream) {
req = expectedRequestLarge req = expectedRequestLarge
resp = expectedResponseLarge resp = expectedResponseLarge
} }
var (
mu sync.Mutex
total int
)
s.wq.replenish = func(n int) {
mu.Lock()
total += n
mu.Unlock()
s.wq.realReplenish(n)
}
getTotal := func() int {
mu.Lock()
defer mu.Unlock()
return total
}
done := make(chan struct{})
defer close(done)
go func() {
for {
select {
// Prevent goroutine from leaking.
case <-done:
return
default:
}
if getTotal() == defaultWindowSize {
// Signal the client to start reading and
// thereby send window update.
close(h.notify)
return
}
runtime.Gosched()
}
}()
p := make([]byte, len(req)) p := make([]byte, len(req))
// Wait before reading. Give time to client to start sending // Let the other side run out of stream-level window before
// before server starts reading. // starting to read and thereby sending a window update.
time.Sleep(2 * time.Second) timer := time.NewTimer(time.Second * 10)
select {
case <-h.getNotified:
timer.Stop()
case <-timer.C:
t.Errorf("Server timed-out.")
return
}
_, err := s.Read(p) _, err := s.Read(p)
if err != nil { if err != nil {
t.Errorf("s.Read(_) = _, %v, want _, <nil>", err) t.Errorf("s.Read(_) = _, %v, want _, <nil>", err)
@ -204,41 +250,19 @@ func (h *testStreamHandler) handleStreamDelayRead(t *testing.T, s *Stream) {
t.Errorf("handleStream got %v, want %v", p, req) t.Errorf("handleStream got %v, want %v", p, req)
return return
} }
// send a response back to the client. // This write will cause server to run out of stream level,
// flow control and the other side won't send a window update
// until that happens.
if err := h.t.Write(s, nil, resp, &Options{}); err != nil { if err := h.t.Write(s, nil, resp, &Options{}); err != nil {
t.Errorf("server Write got %v, want <nil>", err) t.Errorf("server Write got %v, want <nil>", err)
return return
} }
// send the trailer to end the stream. // Read one more time to ensure that everything remains fine and
if err := h.t.WriteStatus(s, status.New(codes.OK, "")); err != nil { // that the goroutine, that we launched earlier to signal client
t.Errorf("server WriteStatus got %v, want <nil>", err) // to read, gets enough time to process.
return _, err = s.Read(p)
}
}
func (h *testStreamHandler) handleStreamDelayWrite(t *testing.T, s *Stream) {
req := expectedRequest
resp := expectedResponse
if s.Method() == "foo.Large" {
req = expectedRequestLarge
resp = expectedResponseLarge
}
p := make([]byte, len(req))
_, err := s.Read(p)
if err != nil { if err != nil {
t.Errorf("s.Read(_) = _, %v, want _, <nil>", err) t.Errorf("s.Read(_) = _, %v, want _, nil", err)
return
}
if !bytes.Equal(p, req) {
t.Errorf("handleStream got %v, want %v", p, req)
return
}
// Wait before sending. Give time to client to start reading
// before server starts sending.
time.Sleep(2 * time.Second)
if err := h.t.Write(s, nil, resp, &Options{}); err != nil {
t.Errorf("server Write got %v, want <nil>", err)
return return
} }
// send the trailer to end the stream. // send the trailer to end the stream.
@ -317,17 +341,16 @@ func (s *server) start(t *testing.T, port int, serverConfig *ServerConfig, ht hT
return ctx return ctx
}) })
case delayRead: case delayRead:
h.notify = make(chan struct{})
h.getNotified = make(chan struct{})
s.mu.Lock()
close(s.ready)
s.mu.Unlock()
go transport.HandleStreams(func(s *Stream) { go transport.HandleStreams(func(s *Stream) {
go h.handleStreamDelayRead(t, s) go h.handleStreamDelayRead(t, s)
}, func(ctx context.Context, method string) context.Context { }, func(ctx context.Context, method string) context.Context {
return ctx return ctx
}) })
case delayWrite:
go transport.HandleStreams(func(s *Stream) {
go h.handleStreamDelayWrite(t, s)
}, func(ctx context.Context, method string) context.Context {
return ctx
})
case pingpong: case pingpong:
go transport.HandleStreams(func(s *Stream) { go transport.HandleStreams(func(s *Stream) {
go h.handleStreamPingPong(t, s) go h.handleStreamPingPong(t, s)
@ -366,7 +389,7 @@ func (s *server) stop() {
} }
func setUpServerOnly(t *testing.T, port int, serverConfig *ServerConfig, ht hType) *server { func setUpServerOnly(t *testing.T, port int, serverConfig *ServerConfig, ht hType) *server {
server := &server{startedErr: make(chan error, 1)} server := &server{startedErr: make(chan error, 1), ready: make(chan struct{})}
go server.start(t, port, serverConfig, ht) go server.start(t, port, serverConfig, ht)
server.wait(t, 2*time.Second) server.wait(t, 2*time.Second)
return server return server
@ -957,83 +980,99 @@ func TestLargeMessage(t *testing.T) {
} }
func TestLargeMessageWithDelayRead(t *testing.T) { func TestLargeMessageWithDelayRead(t *testing.T) {
server, ct := setUp(t, 0, math.MaxUint32, delayRead) // Disable dynamic flow control.
sc := &ServerConfig{
InitialWindowSize: defaultWindowSize,
InitialConnWindowSize: defaultWindowSize,
}
co := ConnectOptions{
InitialWindowSize: defaultWindowSize,
InitialConnWindowSize: defaultWindowSize,
}
server, ct := setUpWithOptions(t, 0, sc, delayRead, co, func() {})
defer server.stop()
defer ct.Close()
server.mu.Lock()
ready := server.ready
server.mu.Unlock()
callHdr := &CallHdr{ callHdr := &CallHdr{
Host: "localhost", Host: "localhost",
Method: "foo.Large", Method: "foo.Large",
} }
var wg sync.WaitGroup ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Second*10))
for i := 0; i < 2; i++ { defer cancel()
wg.Add(1) s, err := ct.NewStream(ctx, callHdr)
go func() { if err != nil {
defer wg.Done() t.Fatalf("%v.NewStream(_, _) = _, %v, want _, <nil>", ct, err)
ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Second*10)) return
defer cancel()
s, err := ct.NewStream(ctx, callHdr)
if err != nil {
t.Errorf("%v.NewStream(_, _) = _, %v, want _, <nil>", ct, err)
return
}
if err := ct.Write(s, []byte{}, expectedRequestLarge, &Options{Last: true, Delay: false}); err != nil {
t.Errorf("%v.Write(_, _, _) = %v, want <nil>", ct, err)
return
}
p := make([]byte, len(expectedResponseLarge))
// Give time to server to begin sending before client starts reading.
time.Sleep(2 * time.Second)
if _, err := s.Read(p); err != nil || !bytes.Equal(p, expectedResponseLarge) {
t.Errorf("s.Read(_) = _, %v, want _, <nil>", err)
return
}
if _, err = s.Read(p); err != io.EOF {
t.Errorf("Failed to complete the stream %v; want <EOF>", err)
}
}()
} }
wg.Wait() // Wait for server's handerler to be initialized
ct.Close() select {
server.stop() case <-ready:
} case <-ctx.Done():
t.Fatalf("Client timed out waiting for server handler to be initialized.")
func TestLargeMessageDelayWrite(t *testing.T) {
server, ct := setUp(t, 0, math.MaxUint32, delayWrite)
callHdr := &CallHdr{
Host: "localhost",
Method: "foo.Large",
} }
var wg sync.WaitGroup server.mu.Lock()
for i := 0; i < 2; i++ { serviceHandler := server.h
wg.Add(1) server.mu.Unlock()
go func() { var (
defer wg.Done() mu sync.Mutex
ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Second*10)) total int
defer cancel() )
s, err := ct.NewStream(ctx, callHdr) s.wq.replenish = func(n int) {
if err != nil { mu.Lock()
t.Errorf("%v.NewStream(_, _) = _, %v, want _, <nil>", ct, err) total += n
return mu.Unlock()
} s.wq.realReplenish(n)
}
// Give time to server to start reading before client starts sending. getTotal := func() int {
time.Sleep(2 * time.Second) mu.Lock()
if err := ct.Write(s, []byte{}, expectedRequestLarge, &Options{Last: true, Delay: false}); err != nil { defer mu.Unlock()
t.Errorf("%v.Write(_, _, _) = %v, want <nil>", ct, err) return total
return }
} done := make(chan struct{})
p := make([]byte, len(expectedResponseLarge)) defer close(done)
if _, err := s.Read(p); err != nil || !bytes.Equal(p, expectedResponseLarge) { go func() {
t.Errorf("io.ReadFull(%v) = _, %v, want %v, <nil>", err, p, expectedResponse) for {
return select {
} // Prevent goroutine from leaking in case of error.
if _, err = s.Read(p); err != io.EOF { case <-done:
t.Errorf("Failed to complete the stream %v; want <EOF>", err) return
} default:
}() }
if getTotal() == defaultWindowSize {
// unblock server to be able to read and
// thereby send stream level window update.
close(serviceHandler.getNotified)
return
}
runtime.Gosched()
}
}()
// This write will cause client to run out of stream level,
// flow control and the other side won't send a window update
// until that happens.
if err := ct.Write(s, []byte{}, expectedRequestLarge, &Options{}); err != nil {
t.Fatalf("write(_, _, _) = %v, want <nil>", err)
}
p := make([]byte, len(expectedResponseLarge))
// Wait for the other side to run out of stream level flow control before
// reading and thereby sending a window update.
select {
case <-serviceHandler.notify:
case <-ctx.Done():
t.Fatalf("Client timed out")
}
if _, err := s.Read(p); err != nil || !bytes.Equal(p, expectedResponseLarge) {
t.Fatalf("s.Read(_) = _, %v, want _, <nil>", err)
}
if err := ct.Write(s, []byte{}, expectedRequestLarge, &Options{Last: true}); err != nil {
t.Fatalf("Write(_, _, _) = %v, want <nil>", err)
}
if _, err = s.Read(p); err != io.EOF {
t.Fatalf("Failed to complete the stream %v; want <EOF>", err)
} }
wg.Wait()
ct.Close()
server.stop()
} }
func TestGracefulClose(t *testing.T) { func TestGracefulClose(t *testing.T) {