1
0
mirror of https://github.com/ipfs/kubo.git synced 2025-06-26 23:53:19 +08:00

Fix 'ctx, _' to have explicit cancel

License: MIT
Signed-off-by: rht <rhtbot@gmail.com>
This commit is contained in:
rht
2015-08-23 19:33:53 +07:00
parent 34e06f6c95
commit a7202fa94c
9 changed files with 37 additions and 23 deletions

View File

@ -42,7 +42,8 @@ func TestBlocks(t *testing.T) {
t.Error("returned key is not equal to block key", err) t.Error("returned key is not equal to block key", err)
} }
ctx, _ := context.WithTimeout(context.TODO(), time.Second*5) ctx, cancel := context.WithTimeout(context.TODO(), time.Second*5)
defer cancel()
b2, err := bs.GetBlock(ctx, b.Key()) b2, err := bs.GetBlock(ctx, b.Key())
if err != nil { if err != nil {
t.Error("failed to retrieve block from BlockService", err) t.Error("failed to retrieve block from BlockService", err)
@ -75,7 +76,8 @@ func TestGetBlocksSequential(t *testing.T) {
t.Log("one instance at a time, get blocks concurrently") t.Log("one instance at a time, get blocks concurrently")
for i := 1; i < len(servs); i++ { for i := 1; i < len(servs); i++ {
ctx, _ := context.WithTimeout(context.TODO(), time.Second*50) ctx, cancel := context.WithTimeout(context.TODO(), time.Second*50)
defer cancel()
out := servs[i].GetBlocks(ctx, keys) out := servs[i].GetBlocks(ctx, keys)
gotten := make(map[key.Key]*blocks.Block) gotten := make(map[key.Key]*blocks.Block)
for blk := range out { for blk := range out {

View File

@ -110,7 +110,8 @@ func Bootstrap(n *IpfsNode, cfg BootstrapConfig) (io.Closer, error) {
func bootstrapRound(ctx context.Context, host host.Host, cfg BootstrapConfig) error { func bootstrapRound(ctx context.Context, host host.Host, cfg BootstrapConfig) error {
ctx, _ = context.WithTimeout(ctx, cfg.ConnectionTimeout) ctx, cancel := context.WithTimeout(ctx, cfg.ConnectionTimeout)
defer cancel()
id := host.ID() id := host.ID()
// get bootstrap peers from config. retrieving them here makes // get bootstrap peers from config. retrieving them here makes

View File

@ -320,7 +320,8 @@ func setupDiscoveryOption(d config.Discovery) DiscoveryOption {
func (n *IpfsNode) HandlePeerFound(p peer.PeerInfo) { func (n *IpfsNode) HandlePeerFound(p peer.PeerInfo) {
log.Warning("trying peer info: ", p) log.Warning("trying peer info: ", p)
ctx, _ := context.WithTimeout(n.Context(), time.Second*10) ctx, cancel := context.WithTimeout(n.Context(), time.Second*10)
defer cancel()
if err := n.PeerHost.Connect(ctx, p); err != nil { if err := n.PeerHost.Connect(ctx, p); err != nil {
log.Warning("Failed to connect to peer found by discovery: ", err) log.Warning("Failed to connect to peer found by discovery: ", err)
} }

View File

@ -298,7 +298,8 @@ func (d *Diagnostics) HandleMessage(ctx context.Context, s inet.Stream) error {
if timeout < HopTimeoutDecrement { if timeout < HopTimeoutDecrement {
return fmt.Errorf("timeout too short: %s", timeout) return fmt.Errorf("timeout too short: %s", timeout)
} }
ctx, _ = context.WithTimeout(ctx, timeout) ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
pmes.SetTimeoutDuration(timeout - HopTimeoutDecrement) pmes.SetTimeoutDuration(timeout - HopTimeoutDecrement)
dpeers, err := d.getDiagnosticFromPeers(ctx, d.getPeers(), pmes) dpeers, err := d.getDiagnosticFromPeers(ctx, d.getPeers(), pmes)

View File

@ -50,7 +50,8 @@ func TestProviderForKeyButNetworkCannotFind(t *testing.T) { // TODO revisit this
solo := g.Next() solo := g.Next()
defer solo.Exchange.Close() defer solo.Exchange.Close()
ctx, _ := context.WithTimeout(context.Background(), time.Nanosecond) ctx, cancel := context.WithTimeout(context.Background(), time.Nanosecond)
defer cancel()
_, err := solo.Exchange.GetBlock(ctx, block.Key()) _, err := solo.Exchange.GetBlock(ctx, block.Key())
if err != context.DeadlineExceeded { if err != context.DeadlineExceeded {
@ -76,7 +77,8 @@ func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) {
wantsBlock := peers[1] wantsBlock := peers[1]
defer wantsBlock.Exchange.Close() defer wantsBlock.Exchange.Close()
ctx, _ := context.WithTimeout(context.Background(), time.Second) ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
received, err := wantsBlock.Exchange.GetBlock(ctx, block.Key()) received, err := wantsBlock.Exchange.GetBlock(ctx, block.Key())
if err != nil { if err != nil {
t.Log(err) t.Log(err)
@ -226,14 +228,16 @@ func TestSendToWantingPeer(t *testing.T) {
alpha := bg.Next() alpha := bg.Next()
// peerA requests and waits for block alpha // peerA requests and waits for block alpha
ctx, _ := context.WithTimeout(context.TODO(), waitTime) ctx, cancel := context.WithTimeout(context.TODO(), waitTime)
defer cancel()
alphaPromise, err := peerA.Exchange.GetBlocks(ctx, []key.Key{alpha.Key()}) alphaPromise, err := peerA.Exchange.GetBlocks(ctx, []key.Key{alpha.Key()})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
// peerB announces to the network that he has block alpha // peerB announces to the network that he has block alpha
ctx, _ = context.WithTimeout(context.TODO(), timeout) ctx, cancel = context.WithTimeout(context.TODO(), timeout)
defer cancel()
err = peerB.Exchange.HasBlock(ctx, alpha) err = peerB.Exchange.HasBlock(ctx, alpha)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -266,7 +270,8 @@ func TestBasicBitswap(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
ctx, _ := context.WithTimeout(context.TODO(), time.Second*5) ctx, cancel := context.WithTimeout(context.TODO(), time.Second*5)
defer cancel()
blk, err := instances[1].Exchange.GetBlock(ctx, blocks[0].Key()) blk, err := instances[1].Exchange.GetBlock(ctx, blocks[0].Key())
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)

View File

@ -112,7 +112,8 @@ func TestSubscribeIsANoopWhenCalledWithNoKeys(t *testing.T) {
func TestCarryOnWhenDeadlineExpires(t *testing.T) { func TestCarryOnWhenDeadlineExpires(t *testing.T) {
impossibleDeadline := time.Nanosecond impossibleDeadline := time.Nanosecond
fastExpiringCtx, _ := context.WithTimeout(context.Background(), impossibleDeadline) fastExpiringCtx, cancel := context.WithTimeout(context.Background(), impossibleDeadline)
defer cancel()
n := New() n := New()
defer n.Shutdown() defer n.Shutdown()

View File

@ -60,7 +60,8 @@ func (p *ipnsPublisher) Publish(ctx context.Context, k ci.PrivKey, value path.Pa
log.Debugf("Storing pubkey at: %s", namekey) log.Debugf("Storing pubkey at: %s", namekey)
// Store associated public key // Store associated public key
timectx, _ := context.WithDeadline(ctx, time.Now().Add(time.Second*10)) timectx, cancel := context.WithDeadline(ctx, time.Now().Add(time.Second*10))
defer cancel()
err = p.routing.PutValue(timectx, namekey, pkbytes) err = p.routing.PutValue(timectx, namekey, pkbytes)
if err != nil { if err != nil {
return err return err
@ -70,9 +71,9 @@ func (p *ipnsPublisher) Publish(ctx context.Context, k ci.PrivKey, value path.Pa
log.Debugf("Storing ipns entry at: %s", ipnskey) log.Debugf("Storing ipns entry at: %s", ipnskey)
// Store ipns entry at "/ipns/"+b58(h(pubkey)) // Store ipns entry at "/ipns/"+b58(h(pubkey))
timectx, _ = context.WithDeadline(ctx, time.Now().Add(time.Second*10)) timectx, cancel = context.WithDeadline(ctx, time.Now().Add(time.Second*10))
err = p.routing.PutValue(timectx, ipnskey, data) defer cancel()
if err != nil { if err := p.routing.PutValue(timectx, ipnskey, data); err != nil {
return err return err
} }

View File

@ -210,21 +210,21 @@ func TestPinRecursiveFail(t *testing.T) {
} }
// Note: this isnt a time based test, we expect the pin to fail // Note: this isnt a time based test, we expect the pin to fail
mctx, _ := context.WithTimeout(ctx, time.Millisecond) mctx, cancel := context.WithTimeout(ctx, time.Millisecond)
defer cancel()
err = p.Pin(mctx, a, true) err = p.Pin(mctx, a, true)
if err == nil { if err == nil {
t.Fatal("should have failed to pin here") t.Fatal("should have failed to pin here")
} }
_, err = dserv.Add(b) if _, err := dserv.Add(b); err != nil {
if err != nil {
t.Fatal(err) t.Fatal(err)
} }
// this one is time based... but shouldnt cause any issues // this one is time based... but shouldnt cause any issues
mctx, _ = context.WithTimeout(ctx, time.Second) mctx, cancel = context.WithTimeout(ctx, time.Second)
err = p.Pin(mctx, a, true) defer cancel()
if err != nil { if err := p.Pin(mctx, a, true); err != nil {
t.Fatal(err) t.Fatal(err)
} }
} }

View File

@ -202,7 +202,8 @@ func TestNotFound(t *testing.T) {
} }
// long timeout to ensure timing is not at play. // long timeout to ensure timing is not at play.
ctx, _ = context.WithTimeout(ctx, time.Second*20) ctx, cancel := context.WithTimeout(ctx, time.Second*20)
defer cancel()
v, err := d.GetValue(ctx, key.Key("hello")) v, err := d.GetValue(ctx, key.Key("hello"))
log.Debugf("get value got %v", v) log.Debugf("get value got %v", v)
if err != nil { if err != nil {
@ -274,7 +275,8 @@ func TestLessThanKResponses(t *testing.T) {
}) })
} }
ctx, _ = context.WithTimeout(ctx, time.Second*30) ctx, cancel := context.WithTimeout(ctx, time.Second*30)
defer cancel()
if _, err := d.GetValue(ctx, key.Key("hello")); err != nil { if _, err := d.GetValue(ctx, key.Key("hello")); err != nil {
switch err { switch err {
case routing.ErrNotFound: case routing.ErrNotFound: