mirror of
https://github.com/ipfs/kubo.git
synced 2025-09-10 05:52:20 +08:00
Replace context.TODO in test files with context.Background
License: MIT Signed-off-by: rht <rhtbot@gmail.com>
This commit is contained in:
@ -42,7 +42,7 @@ func TestBlocks(t *testing.T) {
|
||||
t.Error("returned key is not equal to block key", err)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), time.Second*5)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
|
||||
defer cancel()
|
||||
b2, err := bs.GetBlock(ctx, b.Key())
|
||||
if err != nil {
|
||||
@ -76,7 +76,7 @@ func TestGetBlocksSequential(t *testing.T) {
|
||||
t.Log("one instance at a time, get blocks concurrently")
|
||||
|
||||
for i := 1; i < len(servs); i++ {
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), time.Second*50)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*50)
|
||||
defer cancel()
|
||||
out := servs[i].GetBlocks(ctx, keys)
|
||||
gotten := make(map[key.Key]*blocks.Block)
|
||||
|
@ -10,7 +10,7 @@ import (
|
||||
)
|
||||
|
||||
func TestInitialization(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
ctx := context.Background()
|
||||
id := testIdentity
|
||||
|
||||
good := []*config.Config{
|
||||
|
@ -30,6 +30,7 @@ func getDagserv(t *testing.T) merkledag.DAGService {
|
||||
}
|
||||
|
||||
func TestMetadata(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
// Make some random node
|
||||
ds := getDagserv(t)
|
||||
data := make([]byte, 1000)
|
||||
@ -64,12 +65,12 @@ func TestMetadata(t *testing.T) {
|
||||
t.Fatalf("something went wrong in conversion: '%s' != '%s'", rec.MimeType, m.MimeType)
|
||||
}
|
||||
|
||||
retnode, err := ds.Get(context.Background(), key.B58KeyDecode(mdk))
|
||||
retnode, err := ds.Get(ctx, key.B58KeyDecode(mdk))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ndr, err := uio.NewDagReader(context.TODO(), retnode, ds)
|
||||
ndr, err := uio.NewDagReader(ctx, retnode, ds)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -144,6 +144,7 @@ func TestLargeFileTwoPeers(t *testing.T) {
|
||||
}
|
||||
|
||||
func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) {
|
||||
ctx := context.Background()
|
||||
if testing.Short() {
|
||||
t.SkipNow()
|
||||
}
|
||||
@ -161,7 +162,7 @@ func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) {
|
||||
first := instances[0]
|
||||
for _, b := range blocks {
|
||||
blkeys = append(blkeys, b.Key())
|
||||
first.Exchange.HasBlock(context.Background(), b)
|
||||
first.Exchange.HasBlock(ctx, b)
|
||||
}
|
||||
|
||||
t.Log("Distribute!")
|
||||
@ -171,7 +172,7 @@ func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) {
|
||||
wg.Add(1)
|
||||
go func(inst Instance) {
|
||||
defer wg.Done()
|
||||
outch, err := inst.Exchange.GetBlocks(context.TODO(), blkeys)
|
||||
outch, err := inst.Exchange.GetBlocks(ctx, blkeys)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -228,7 +229,7 @@ func TestSendToWantingPeer(t *testing.T) {
|
||||
|
||||
alpha := bg.Next()
|
||||
// peerA requests and waits for block alpha
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), waitTime)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), waitTime)
|
||||
defer cancel()
|
||||
alphaPromise, err := peerA.Exchange.GetBlocks(ctx, []key.Key{alpha.Key()})
|
||||
if err != nil {
|
||||
@ -236,7 +237,7 @@ func TestSendToWantingPeer(t *testing.T) {
|
||||
}
|
||||
|
||||
// peerB announces to the network that he has block alpha
|
||||
ctx, cancel = context.WithTimeout(context.TODO(), timeout)
|
||||
ctx, cancel = context.WithTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
err = peerB.Exchange.HasBlock(ctx, alpha)
|
||||
if err != nil {
|
||||
@ -265,12 +266,12 @@ func TestBasicBitswap(t *testing.T) {
|
||||
|
||||
instances := sg.Instances(2)
|
||||
blocks := bg.Blocks(1)
|
||||
err := instances[0].Exchange.HasBlock(context.TODO(), blocks[0])
|
||||
err := instances[0].Exchange.HasBlock(context.Background(), blocks[0])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), time.Second*5)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
|
||||
defer cancel()
|
||||
blk, err := instances[1].Exchange.GetBlock(ctx, blocks[0].Key())
|
||||
if err != nil {
|
||||
|
@ -103,7 +103,7 @@ func TestDuplicateSubscribe(t *testing.T) {
|
||||
func TestSubscribeIsANoopWhenCalledWithNoKeys(t *testing.T) {
|
||||
n := New()
|
||||
defer n.Shutdown()
|
||||
ch := n.Subscribe(context.TODO()) // no keys provided
|
||||
ch := n.Subscribe(context.Background()) // no keys provided
|
||||
if _, ok := <-ch; ok {
|
||||
t.Fatal("should be closed if no keys provided")
|
||||
}
|
||||
|
@ -18,7 +18,7 @@ import (
|
||||
// WARNING: this uses RandTestBogusIdentity DO NOT USE for NON TESTS!
|
||||
func NewTestSessionGenerator(
|
||||
net tn.Network) SessionGenerator {
|
||||
ctx, cancel := context.WithCancel(context.TODO())
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
return SessionGenerator{
|
||||
net: net,
|
||||
seq: 0,
|
||||
|
@ -106,7 +106,7 @@ func setupIpnsTest(t *testing.T, node *core.IpfsNode) (*core.IpfsNode, *fstest.M
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ipnsfs, err := nsfs.NewFilesystem(context.TODO(), node.DAG, node.Namesys, node.Pinning, node.PrivateKey)
|
||||
ipnsfs, err := nsfs.NewFilesystem(context.Background(), node.DAG, node.Namesys, node.Pinning, node.PrivateKey)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -45,7 +45,7 @@ func TestBalancedDag(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
dr, err := uio.NewDagReader(context.TODO(), nd, ds)
|
||||
dr, err := uio.NewDagReader(context.Background(), nd, ds)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -102,7 +102,7 @@ func BenchmarkTrickleReadFull(b *testing.B) {
|
||||
|
||||
func runReadBench(b *testing.B, nd *dag.Node, ds dag.DAGService) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
ctx, cancel := context.WithCancel(context.TODO())
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
read, err := uio.NewDagReader(ctx, nd, ds)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
|
@ -443,7 +443,7 @@ func TestAppend(t *testing.T) {
|
||||
r := bytes.NewReader(should[nbytes/2:])
|
||||
blks, errs := chunk.Chan(chunk.NewSizeSplitter(r, 500))
|
||||
|
||||
ctx := context.TODO()
|
||||
ctx := context.Background()
|
||||
nnode, err := TrickleAppend(ctx, nd, dbp.New(blks, errs))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -492,7 +492,7 @@ func TestMultipleAppends(t *testing.T) {
|
||||
|
||||
spl := chunk.SizeSplitterGen(500)
|
||||
|
||||
ctx := context.TODO()
|
||||
ctx := context.Background()
|
||||
for i := 0; i < len(should); i++ {
|
||||
blks, errs := chunk.Chan(spl(bytes.NewReader(should[i : i+1])))
|
||||
|
||||
@ -540,7 +540,7 @@ func TestAppendSingleBytesToEmpty(t *testing.T) {
|
||||
|
||||
blks, errs := chunk.Chan(spl(bytes.NewReader(data[:1])))
|
||||
|
||||
ctx := context.TODO()
|
||||
ctx := context.Background()
|
||||
nnode, err := TrickleAppend(ctx, nd, dbp.New(blks, errs))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -155,6 +155,7 @@ func TestBatchFetchDupBlock(t *testing.T) {
|
||||
}
|
||||
|
||||
func runBatchFetchTest(t *testing.T, read io.Reader) {
|
||||
ctx := context.Background()
|
||||
var dagservs []DAGService
|
||||
for _, bsi := range bstest.Mocks(5) {
|
||||
dagservs = append(dagservs, NewDAGService(bsi))
|
||||
@ -169,7 +170,7 @@ func runBatchFetchTest(t *testing.T, read io.Reader) {
|
||||
|
||||
t.Log("finished setup.")
|
||||
|
||||
dagr, err := uio.NewDagReader(context.TODO(), root, dagservs[0])
|
||||
dagr, err := uio.NewDagReader(ctx, root, dagservs[0])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -196,13 +197,13 @@ func runBatchFetchTest(t *testing.T, read io.Reader) {
|
||||
wg.Add(1)
|
||||
go func(i int) {
|
||||
defer wg.Done()
|
||||
first, err := dagservs[i].Get(context.Background(), k)
|
||||
first, err := dagservs[i].Get(ctx, k)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
fmt.Println("Got first node back.")
|
||||
|
||||
read, err := uio.NewDagReader(context.TODO(), first, dagservs[i])
|
||||
read, err := uio.NewDagReader(ctx, first, dagservs[i])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -266,8 +267,7 @@ func assertCanGet(t *testing.T, ds DAGService, n *Node) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = ds.Get(context.TODO(), k)
|
||||
if err != nil {
|
||||
if _, err := ds.Get(context.Background(), k); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@ -281,7 +281,7 @@ func TestCantGet(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = dsp.ds.Get(context.TODO(), k)
|
||||
_, err = dsp.ds.Get(context.Background(), k)
|
||||
if !strings.Contains(err.Error(), "not found") {
|
||||
t.Fatal("expected err not found, got: ", err)
|
||||
}
|
||||
|
@ -104,7 +104,7 @@ func testInsert(t *testing.T, e *Editor, path, data string, create bool, experr
|
||||
}
|
||||
}
|
||||
|
||||
err = e.InsertNodeAtPath(context.TODO(), path, ck, c)
|
||||
err = e.InsertNodeAtPath(context.Background(), path, ck, c)
|
||||
if experr != "" {
|
||||
var got string
|
||||
if err != nil {
|
||||
|
@ -288,13 +288,13 @@ func TestAddrBlocking(t *testing.T) {
|
||||
swarms[1].Filters.AddDialFilter(block)
|
||||
|
||||
swarms[1].peers.AddAddr(swarms[0].LocalPeer(), swarms[0].ListenAddresses()[0], peer.PermanentAddrTTL)
|
||||
_, err = swarms[1].Dial(context.TODO(), swarms[0].LocalPeer())
|
||||
_, err = swarms[1].Dial(ctx, swarms[0].LocalPeer())
|
||||
if err == nil {
|
||||
t.Fatal("dial should have failed")
|
||||
}
|
||||
|
||||
swarms[0].peers.AddAddr(swarms[1].LocalPeer(), swarms[1].ListenAddresses()[0], peer.PermanentAddrTTL)
|
||||
_, err = swarms[0].Dial(context.TODO(), swarms[1].LocalPeer())
|
||||
_, err = swarms[0].Dial(ctx, swarms[1].LocalPeer())
|
||||
if err == nil {
|
||||
t.Fatal("dial should have failed")
|
||||
}
|
||||
|
Reference in New Issue
Block a user