mirror of
https://github.com/ipfs/kubo.git
synced 2025-12-18 17:11:17 +08:00
* plumb through go-datastore context changes * update go-libp2p to v0.16.0 * use LIBP2P_TCP_REUSEPORT instead of IPFS_REUSEPORT * use relay config * making deprecation notice match the go-ipfs-config key * docs(config): circuit relay v2 * docs(config): fix links and headers * feat(config): Internal.Libp2pForceReachability This switches to config that supports setting and reading Internal.Libp2pForceReachability OptionalString flag * use configuration option for static relays * chore: go-ipfs-config v0.18.0 https://github.com/ipfs/go-ipfs-config/releases/tag/v0.18.0 * feat: circuit v1 migration prompt when Swarm.EnableRelayHop is set (#8559) * exit when Swarm.EnableRelayHop is set * docs: Experimental.ShardingEnabled migration This ensures existing users of global sharding experiment get notified that the flag no longer works + that autosharding happens automatically. For people who NEED to keep the old behavior (eg. have no time to migrate today) there is a note about restoring it with `UnixFSShardingSizeThreshold`. * chore: add dag-jose code to the cid command output * add support for setting automatic unixfs sharding threshold from the config * test: have tests use low cutoff for sharding to mimic old behavior * test: change error message to match the current error * test: Add automatic sharding/unsharding tests (#8547) * test: refactored naming in the sharding sharness tests to make more sense * ci: set interop test executor to convenience image for Go1.16 + Node * ci: use interop master Co-authored-by: Marcin Rataj <lidel@lidel.org> Co-authored-by: Marten Seemann <martenseemann@gmail.com> Co-authored-by: Marcin Rataj <lidel@lidel.org> Co-authored-by: Gus Eggert <gus@gus.dev> Co-authored-by: Lucas Molas <schomatis@gmail.com>
79 lines
2.2 KiB
Go
79 lines
2.2 KiB
Go
package test
|
|
|
|
import (
|
|
"context"
|
|
"strconv"
|
|
"testing"
|
|
"time"
|
|
|
|
files "github.com/ipfs/go-ipfs-files"
|
|
"github.com/ipfs/go-merkledag"
|
|
uio "github.com/ipfs/go-unixfs/io"
|
|
"github.com/ipfs/interface-go-ipfs-core/options"
|
|
"github.com/ipfs/interface-go-ipfs-core/path"
|
|
"github.com/ipld/go-ipld-prime"
|
|
)
|
|
|
|
func TestPathUnixFSHAMTPartial(t *testing.T) {
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
defer cancel()
|
|
|
|
// Create a node
|
|
apis, err := NodeProvider{}.MakeAPISwarm(ctx, true, 1)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
a := apis[0]
|
|
|
|
// Setting this after instantiating the swarm so that it's not clobbered by loading the go-ipfs config
|
|
prevVal := uio.HAMTShardingSize
|
|
uio.HAMTShardingSize = 1
|
|
defer func() {
|
|
uio.HAMTShardingSize = prevVal
|
|
}()
|
|
|
|
// Create and add a sharded directory
|
|
dir := make(map[string]files.Node)
|
|
// Make sure we have at least two levels of sharding
|
|
for i := 0; i < uio.DefaultShardWidth+1; i++ {
|
|
dir[strconv.Itoa(i)] = files.NewBytesFile([]byte(strconv.Itoa(i)))
|
|
}
|
|
|
|
r, err := a.Unixfs().Add(ctx, files.NewMapDirectory(dir), options.Unixfs.Pin(false))
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
// Get the root of the directory
|
|
nd, err := a.Dag().Get(ctx, r.Cid())
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
// Make sure the root is a DagPB node (this API might change in the future to account for ADLs)
|
|
_ = nd.(ipld.Node)
|
|
pbNode := nd.(*merkledag.ProtoNode)
|
|
|
|
// Remove one of the sharded directory blocks
|
|
if err := a.Block().Rm(ctx, path.IpfsPath(pbNode.Links()[0].Cid)); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
// Try and resolve each of the entries in the sharded directory which will result in pathing over the missing block
|
|
//
|
|
// Note: we could just check a particular path here, but it would require either greater use of the HAMT internals
|
|
// or some hard coded values in the test both of which would be a pain to follow.
|
|
for k := range dir {
|
|
// The node will go out to the (non-existent) network looking for the missing block. Make sure we're erroring
|
|
// because we exceeded the timeout on our query
|
|
timeoutCtx, timeoutCancel := context.WithTimeout(ctx, time.Second*1)
|
|
_, err := a.ResolveNode(timeoutCtx, path.Join(r, k))
|
|
if err != nil {
|
|
if timeoutCtx.Err() == nil {
|
|
t.Fatal(err)
|
|
}
|
|
}
|
|
timeoutCancel()
|
|
}
|
|
}
|