1
0
mirror of https://github.com/ipfs/kubo.git synced 2025-05-17 06:57:40 +08:00

feat(config): ipfs add and Import options for controling UnixFS DAG Width (#10774)

Co-authored-by: Marcin Rataj <lidel@lidel.org>
This commit is contained in:
Hector Sanjuan
2025-04-15 22:56:38 +02:00
committed by GitHub
parent fe3106f9a6
commit 6b55e64918
25 changed files with 914 additions and 140 deletions

View File

@ -1,11 +1,18 @@
package config
import (
"github.com/ipfs/boxo/ipld/unixfs/importer/helpers"
"github.com/ipfs/boxo/ipld/unixfs/io"
)
const (
DefaultCidVersion = 0
DefaultUnixFSRawLeaves = false
DefaultUnixFSChunker = "size-262144"
DefaultHashFunction = "sha2-256"
DefaultUnixFSHAMTDirectorySizeThreshold = "256KiB" // https://github.com/ipfs/boxo/blob/6c5a07602aed248acc86598f30ab61923a54a83e/ipld/unixfs/io/directory.go#L26
// DefaultBatchMaxNodes controls the maximum number of nodes in a
// write-batch. The total size of the batch is limited by
// BatchMaxnodes and BatchMaxSize.
@ -14,15 +21,26 @@ const (
// write-batch. The total size of the batch is limited by
// BatchMaxnodes and BatchMaxSize.
DefaultBatchMaxSize = 100 << 20 // 20MiB
)
var (
DefaultUnixFSFileMaxLinks = int64(helpers.DefaultLinksPerBlock)
DefaultUnixFSDirectoryMaxLinks = int64(0)
DefaultUnixFSHAMTDirectoryMaxFanout = int64(io.DefaultShardWidth)
)
// Import configures the default options for ingesting data. This affects commands
// that ingest data, such as 'ipfs add', 'ipfs dag put, 'ipfs block put', 'ipfs files write'.
type Import struct {
CidVersion OptionalInteger
UnixFSRawLeaves Flag
UnixFSChunker OptionalString
HashFunction OptionalString
BatchMaxNodes OptionalInteger
BatchMaxSize OptionalInteger
CidVersion OptionalInteger
UnixFSRawLeaves Flag
UnixFSChunker OptionalString
HashFunction OptionalString
UnixFSFileMaxLinks OptionalInteger
UnixFSDirectoryMaxLinks OptionalInteger
UnixFSHAMTDirectoryMaxFanout OptionalInteger
UnixFSHAMTDirectorySizeThreshold OptionalString
BatchMaxNodes OptionalInteger
BatchMaxSize OptionalInteger
}

View File

@ -3,7 +3,7 @@ package config
type Internal struct {
// All marked as omitempty since we are expecting to make changes to all subcomponents of Internal
Bitswap *InternalBitswap `json:",omitempty"`
UnixFSShardingSizeThreshold *OptionalString `json:",omitempty"`
UnixFSShardingSizeThreshold *OptionalString `json:",omitempty"` // moved to Import.UnixFSHAMTDirectorySizeThreshold
Libp2pForceReachability *OptionalString `json:",omitempty"`
BackupBootstrapInterval *OptionalDuration `json:",omitempty"`
}

View File

@ -266,24 +266,44 @@ fetching may be degraded.
},
},
"legacy-cid-v0": {
Description: `Makes UnixFS import produce legacy CIDv0 with no raw leaves, sha2-256 and 256 KiB chunks.`,
Description: `Makes UnixFS import produce legacy CIDv0 with no raw leaves, sha2-256 and 256 KiB chunks. This is likely the least optimal preset, use only if legacy behavior is required.`,
Transform: func(c *Config) error {
c.Import.CidVersion = *NewOptionalInteger(0)
c.Import.UnixFSRawLeaves = False
c.Import.UnixFSChunker = *NewOptionalString("size-262144")
c.Import.HashFunction = *NewOptionalString("sha2-256")
c.Import.UnixFSFileMaxLinks = *NewOptionalInteger(174)
c.Import.UnixFSDirectoryMaxLinks = *NewOptionalInteger(0)
c.Import.UnixFSHAMTDirectoryMaxFanout = *NewOptionalInteger(256)
c.Import.UnixFSHAMTDirectorySizeThreshold = *NewOptionalString("256KiB")
return nil
},
},
"test-cid-v1": {
Description: `Makes UnixFS import produce modern CIDv1 with raw leaves, sha2-256 and 1 MiB chunks.`,
Description: `Makes UnixFS import produce CIDv1 with raw leaves, sha2-256 and 1 MiB chunks (max 174 links per file, 256 per HAMT node, switch dir to HAMT above 256KiB).`,
Transform: func(c *Config) error {
c.Import.CidVersion = *NewOptionalInteger(1)
c.Import.UnixFSRawLeaves = True
c.Import.UnixFSChunker = *NewOptionalString("size-1048576")
c.Import.HashFunction = *NewOptionalString("sha2-256")
c.Import.UnixFSFileMaxLinks = *NewOptionalInteger(174)
c.Import.UnixFSDirectoryMaxLinks = *NewOptionalInteger(0)
c.Import.UnixFSHAMTDirectoryMaxFanout = *NewOptionalInteger(256)
c.Import.UnixFSHAMTDirectorySizeThreshold = *NewOptionalString("256KiB")
return nil
},
},
"test-cid-v1-wide": {
Description: `Makes UnixFS import produce CIDv1 with raw leaves, sha2-256 and 1MiB chunks and wider file DAGs (max 1024 links per every node type, switch dir to HAMT above 1MiB).`,
Transform: func(c *Config) error {
c.Import.CidVersion = *NewOptionalInteger(1)
c.Import.UnixFSRawLeaves = True
c.Import.UnixFSChunker = *NewOptionalString("size-1048576") // 1MiB
c.Import.HashFunction = *NewOptionalString("sha2-256")
c.Import.UnixFSFileMaxLinks = *NewOptionalInteger(1024)
c.Import.UnixFSDirectoryMaxLinks = *NewOptionalInteger(0) // no limit here, use size-based Import.UnixFSHAMTDirectorySizeThreshold instead
c.Import.UnixFSHAMTDirectoryMaxFanout = *NewOptionalInteger(1024)
c.Import.UnixFSHAMTDirectorySizeThreshold = *NewOptionalString("1MiB") // 1MiB
return nil
},
},

View File

@ -37,23 +37,26 @@ type AddEvent struct {
}
const (
quietOptionName = "quiet"
quieterOptionName = "quieter"
silentOptionName = "silent"
progressOptionName = "progress"
trickleOptionName = "trickle"
wrapOptionName = "wrap-with-directory"
onlyHashOptionName = "only-hash"
chunkerOptionName = "chunker"
pinOptionName = "pin"
rawLeavesOptionName = "raw-leaves"
noCopyOptionName = "nocopy"
fstoreCacheOptionName = "fscache"
cidVersionOptionName = "cid-version"
hashOptionName = "hash"
inlineOptionName = "inline"
inlineLimitOptionName = "inline-limit"
toFilesOptionName = "to-files"
quietOptionName = "quiet"
quieterOptionName = "quieter"
silentOptionName = "silent"
progressOptionName = "progress"
trickleOptionName = "trickle"
wrapOptionName = "wrap-with-directory"
onlyHashOptionName = "only-hash"
chunkerOptionName = "chunker"
pinOptionName = "pin"
rawLeavesOptionName = "raw-leaves"
maxFileLinksOptionName = "max-file-links"
maxDirectoryLinksOptionName = "max-directory-links"
maxHAMTFanoutOptionName = "max-hamt-fanout"
noCopyOptionName = "nocopy"
fstoreCacheOptionName = "fscache"
cidVersionOptionName = "cid-version"
hashOptionName = "hash"
inlineOptionName = "inline"
inlineLimitOptionName = "inline-limit"
toFilesOptionName = "to-files"
preserveModeOptionName = "preserve-mode"
preserveMtimeOptionName = "preserve-mtime"
@ -143,6 +146,9 @@ new flags may be added in the future. It is not guaranteed for the implicit
defaults of 'ipfs add' to remain the same in future Kubo releases, or for other
IPFS software to use the same import parameters as Kubo.
Use Import.* configuration options to override global implicit defaults:
https://github.com/ipfs/kubo/blob/master/docs/config.md#import
If you need to back up or transport content-addressed data using a non-IPFS
medium, CID can be preserved with CAR files.
See 'dag export' and 'dag import' for more information.
@ -166,12 +172,15 @@ See 'dag export' and 'dag import' for more information.
cmds.BoolOption(trickleOptionName, "t", "Use trickle-dag format for dag generation."),
cmds.BoolOption(onlyHashOptionName, "n", "Only chunk and hash - do not write to disk."),
cmds.BoolOption(wrapOptionName, "w", "Wrap files with a directory object."),
cmds.StringOption(chunkerOptionName, "s", "Chunking algorithm, size-[bytes], rabin-[min]-[avg]-[max] or buzhash"),
cmds.BoolOption(rawLeavesOptionName, "Use raw blocks for leaf nodes."),
cmds.StringOption(chunkerOptionName, "s", "Chunking algorithm, size-[bytes], rabin-[min]-[avg]-[max] or buzhash. Default: Import.UnixFSChunker"),
cmds.BoolOption(rawLeavesOptionName, "Use raw blocks for leaf nodes. Default: Import.UnixFSRawLeaves"),
cmds.IntOption(maxFileLinksOptionName, "Limit the maximum number of links in UnixFS file nodes to this value. (experimental) Default: Import.UnixFSFileMaxLinks"),
cmds.IntOption(maxDirectoryLinksOptionName, "Limit the maximum number of links in UnixFS basic directory nodes to this value. Default: Import.UnixFSDirectoryMaxLinks. WARNING: experimental, Import.UnixFSHAMTThreshold is a safer alternative."),
cmds.IntOption(maxHAMTFanoutOptionName, "Limit the maximum number of links of a UnixFS HAMT directory node to this (power of 2, multiple of 8). Default: Import.UnixFSHAMTDirectoryMaxFanout WARNING: experimental, see Import.UnixFSHAMTDirectorySizeThreshold as well."),
cmds.BoolOption(noCopyOptionName, "Add the file using filestore. Implies raw-leaves. (experimental)"),
cmds.BoolOption(fstoreCacheOptionName, "Check the filestore for pre-existing blocks. (experimental)"),
cmds.IntOption(cidVersionOptionName, "CID version. Defaults to 0 unless an option that depends on CIDv1 is passed. Passing version 1 will cause the raw-leaves option to default to true."),
cmds.StringOption(hashOptionName, "Hash function to use. Implies CIDv1 if not sha2-256. (experimental)"),
cmds.IntOption(cidVersionOptionName, "CID version. Defaults to 0 unless an option that depends on CIDv1 is passed. Passing version 1 will cause the raw-leaves option to default to true. Default: Import.CidVersion"),
cmds.StringOption(hashOptionName, "Hash function to use. Implies CIDv1 if not sha2-256. Default: Import.HashFunction"),
cmds.BoolOption(inlineOptionName, "Inline small blocks into CIDs. (experimental)"),
cmds.IntOption(inlineLimitOptionName, "Maximum block size to inline. (experimental)").WithDefault(32),
cmds.BoolOption(pinOptionName, "Pin locally to protect added files from garbage collection.").WithDefault(true),
@ -222,6 +231,9 @@ See 'dag export' and 'dag import' for more information.
chunker, _ := req.Options[chunkerOptionName].(string)
dopin, _ := req.Options[pinOptionName].(bool)
rawblks, rbset := req.Options[rawLeavesOptionName].(bool)
maxFileLinks, maxFileLinksSet := req.Options[maxFileLinksOptionName].(int)
maxDirectoryLinks, maxDirectoryLinksSet := req.Options[maxDirectoryLinksOptionName].(int)
maxHAMTFanout, maxHAMTFanoutSet := req.Options[maxHAMTFanoutOptionName].(int)
nocopy, _ := req.Options[noCopyOptionName].(bool)
fscache, _ := req.Options[fstoreCacheOptionName].(bool)
cidVer, cidVerSet := req.Options[cidVersionOptionName].(int)
@ -253,6 +265,21 @@ See 'dag export' and 'dag import' for more information.
rawblks = cfg.Import.UnixFSRawLeaves.WithDefault(config.DefaultUnixFSRawLeaves)
}
if !maxFileLinksSet && !cfg.Import.UnixFSFileMaxLinks.IsDefault() {
maxFileLinksSet = true
maxFileLinks = int(cfg.Import.UnixFSFileMaxLinks.WithDefault(config.DefaultUnixFSFileMaxLinks))
}
if !maxDirectoryLinksSet && !cfg.Import.UnixFSDirectoryMaxLinks.IsDefault() {
maxDirectoryLinksSet = true
maxDirectoryLinks = int(cfg.Import.UnixFSDirectoryMaxLinks.WithDefault(config.DefaultUnixFSDirectoryMaxLinks))
}
if !maxHAMTFanoutSet && !cfg.Import.UnixFSHAMTDirectoryMaxFanout.IsDefault() {
maxHAMTFanoutSet = true
maxHAMTFanout = int(cfg.Import.UnixFSHAMTDirectoryMaxFanout.WithDefault(config.DefaultUnixFSHAMTDirectoryMaxFanout))
}
// Storing optional mode or mtime (UnixFS 1.5) requires root block
// to always be 'dag-pb' and not 'raw'. Below adjusts raw-leaves setting, if possible.
if preserveMode || preserveMtime || mode != 0 || mtime != 0 {
@ -329,6 +356,18 @@ See 'dag export' and 'dag import' for more information.
opts = append(opts, options.Unixfs.RawLeaves(rawblks))
}
if maxFileLinksSet {
opts = append(opts, options.Unixfs.MaxFileLinks(maxFileLinks))
}
if maxDirectoryLinksSet {
opts = append(opts, options.Unixfs.MaxDirectoryLinks(maxDirectoryLinks))
}
if maxHAMTFanoutSet {
opts = append(opts, options.Unixfs.MaxHAMTFanout(maxHAMTFanout))
}
if trickle {
opts = append(opts, options.Unixfs.Layout(options.TrickleLayout))
}

View File

@ -50,6 +50,12 @@ func (api *UnixfsAPI) Add(ctx context.Context, files files.Node, opts ...options
attribute.Int("inlinelimit", settings.InlineLimit),
attribute.Bool("rawleaves", settings.RawLeaves),
attribute.Bool("rawleavesset", settings.RawLeavesSet),
attribute.Int("maxfilelinks", settings.MaxFileLinks),
attribute.Bool("maxfilelinksset", settings.MaxFileLinksSet),
attribute.Int("maxdirectorylinks", settings.MaxDirectoryLinks),
attribute.Bool("maxdirectorylinksset", settings.MaxDirectoryLinksSet),
attribute.Int("maxhamtfanout", settings.MaxHAMTFanout),
attribute.Bool("maxhamtfanoutset", settings.MaxHAMTFanoutSet),
attribute.Int("layout", int(settings.Layout)),
attribute.Bool("pin", settings.Pin),
attribute.Bool("onlyhash", settings.OnlyHash),
@ -132,6 +138,16 @@ func (api *UnixfsAPI) Add(ctx context.Context, files files.Node, opts ...options
fileAdder.Pin = settings.Pin && !settings.OnlyHash
fileAdder.Silent = settings.Silent
fileAdder.RawLeaves = settings.RawLeaves
if settings.MaxFileLinksSet {
fileAdder.MaxLinks = settings.MaxFileLinks
}
if settings.MaxDirectoryLinksSet {
fileAdder.MaxDirectoryLinks = settings.MaxDirectoryLinks
}
if settings.MaxHAMTFanoutSet {
fileAdder.MaxHAMTFanout = settings.MaxHAMTFanout
}
fileAdder.NoCopy = settings.NoCopy
fileAdder.CidBuilder = prefix
fileAdder.PreserveMode = settings.PreserveMode

View File

@ -7,6 +7,8 @@ import (
"time"
dag "github.com/ipfs/boxo/ipld/merkledag"
"github.com/ipfs/boxo/ipld/unixfs/importer/helpers"
"github.com/ipfs/boxo/ipld/unixfs/io"
cid "github.com/ipfs/go-cid"
mh "github.com/multiformats/go-multihash"
)
@ -22,10 +24,16 @@ type UnixfsAddSettings struct {
CidVersion int
MhType uint64
Inline bool
InlineLimit int
RawLeaves bool
RawLeavesSet bool
Inline bool
InlineLimit int
RawLeaves bool
RawLeavesSet bool
MaxFileLinks int
MaxFileLinksSet bool
MaxDirectoryLinks int
MaxDirectoryLinksSet bool
MaxHAMTFanout int
MaxHAMTFanoutSet bool
Chunker string
Layout Layout
@ -60,10 +68,16 @@ func UnixfsAddOptions(opts ...UnixfsAddOption) (*UnixfsAddSettings, cid.Prefix,
CidVersion: -1,
MhType: mh.SHA2_256,
Inline: false,
InlineLimit: 32,
RawLeaves: false,
RawLeavesSet: false,
Inline: false,
InlineLimit: 32,
RawLeaves: false,
RawLeavesSet: false,
MaxFileLinks: helpers.DefaultLinksPerBlock,
MaxFileLinksSet: false,
MaxDirectoryLinks: 0,
MaxDirectoryLinksSet: false,
MaxHAMTFanout: io.DefaultShardWidth,
MaxHAMTFanoutSet: false,
Chunker: "size-262144",
Layout: BalancedLayout,
@ -190,6 +204,35 @@ func (unixfsOpts) RawLeaves(enable bool) UnixfsAddOption {
}
}
// MaxFileLinks specifies the maximum number of children for UnixFS file
// nodes.
func (unixfsOpts) MaxFileLinks(n int) UnixfsAddOption {
return func(settings *UnixfsAddSettings) error {
settings.MaxFileLinks = n
settings.MaxFileLinksSet = true
return nil
}
}
// MaxDirectoryLinks specifies the maximum number of children for UnixFS basic
// directory nodes.
func (unixfsOpts) MaxDirectoryLinks(n int) UnixfsAddOption {
return func(settings *UnixfsAddSettings) error {
settings.MaxDirectoryLinks = n
settings.MaxDirectoryLinksSet = true
return nil
}
}
// MaxHAMTFanout specifies the maximum width of the HAMT directory shards.
func (unixfsOpts) MaxHAMTFanout(n int) UnixfsAddOption {
return func(settings *UnixfsAddSettings) error {
settings.MaxHAMTFanout = n
settings.MaxHAMTFanoutSet = true
return nil
}
}
// Inline tells the adder to inline small blocks into CIDs
func (unixfsOpts) Inline(enable bool) UnixfsAddOption {
return func(settings *UnixfsAddSettings) error {

View File

@ -19,6 +19,7 @@ import (
"github.com/ipfs/boxo/ipld/unixfs/importer/balanced"
ihelper "github.com/ipfs/boxo/ipld/unixfs/importer/helpers"
"github.com/ipfs/boxo/ipld/unixfs/importer/trickle"
uio "github.com/ipfs/boxo/ipld/unixfs/io"
"github.com/ipfs/boxo/mfs"
"github.com/ipfs/boxo/path"
pin "github.com/ipfs/boxo/pinning/pinner"
@ -51,38 +52,43 @@ func NewAdder(ctx context.Context, p pin.Pinner, bs bstore.GCLocker, ds ipld.DAG
bufferedDS := ipld.NewBufferedDAG(ctx, ds)
return &Adder{
ctx: ctx,
pinning: p,
gcLocker: bs,
dagService: ds,
bufferedDS: bufferedDS,
Progress: false,
Pin: true,
Trickle: false,
Chunker: "",
ctx: ctx,
pinning: p,
gcLocker: bs,
dagService: ds,
bufferedDS: bufferedDS,
Progress: false,
Pin: true,
Trickle: false,
MaxLinks: ihelper.DefaultLinksPerBlock,
MaxHAMTFanout: uio.DefaultShardWidth,
Chunker: "",
}, nil
}
// Adder holds the switches passed to the `add` command.
type Adder struct {
ctx context.Context
pinning pin.Pinner
gcLocker bstore.GCLocker
dagService ipld.DAGService
bufferedDS *ipld.BufferedDAG
Out chan<- interface{}
Progress bool
Pin bool
Trickle bool
RawLeaves bool
Silent bool
NoCopy bool
Chunker string
mroot *mfs.Root
unlocker bstore.Unlocker
tempRoot cid.Cid
CidBuilder cid.Builder
liveNodes uint64
ctx context.Context
pinning pin.Pinner
gcLocker bstore.GCLocker
dagService ipld.DAGService
bufferedDS *ipld.BufferedDAG
Out chan<- interface{}
Progress bool
Pin bool
Trickle bool
RawLeaves bool
MaxLinks int
MaxDirectoryLinks int
MaxHAMTFanout int
Silent bool
NoCopy bool
Chunker string
mroot *mfs.Root
unlocker bstore.Unlocker
tempRoot cid.Cid
CidBuilder cid.Builder
liveNodes uint64
PreserveMode bool
PreserveMtime bool
@ -94,12 +100,13 @@ func (adder *Adder) mfsRoot() (*mfs.Root, error) {
if adder.mroot != nil {
return adder.mroot, nil
}
rnode := unixfs.EmptyDirNode()
err := rnode.SetCidBuilder(adder.CidBuilder)
if err != nil {
return nil, err
}
mr, err := mfs.NewRoot(adder.ctx, adder.dagService, rnode, nil)
// Note, this adds it to DAGService already.
mr, err := mfs.NewEmptyRoot(adder.ctx, adder.dagService, nil, mfs.MkdirOpts{
CidBuilder: adder.CidBuilder,
MaxLinks: adder.MaxDirectoryLinks,
MaxHAMTFanout: adder.MaxHAMTFanout,
})
if err != nil {
return nil, err
}
@ -119,10 +126,15 @@ func (adder *Adder) add(reader io.Reader) (ipld.Node, error) {
return nil, err
}
maxLinks := ihelper.DefaultLinksPerBlock
if adder.MaxLinks > 0 {
maxLinks = adder.MaxLinks
}
params := ihelper.DagBuilderParams{
Dagserv: adder.bufferedDS,
RawLeaves: adder.RawLeaves,
Maxlinks: ihelper.DefaultLinksPerBlock,
Maxlinks: maxLinks,
NoCopy: adder.NoCopy,
CidBuilder: adder.CidBuilder,
FileMode: adder.FileMode,
@ -252,12 +264,15 @@ func (adder *Adder) addNode(node ipld.Node, path string) error {
if err != nil {
return err
}
dir := gopath.Dir(path)
if dir != "." {
opts := mfs.MkdirOpts{
Mkparents: true,
Flush: false,
CidBuilder: adder.CidBuilder,
Mkparents: true,
Flush: false,
CidBuilder: adder.CidBuilder,
MaxLinks: adder.MaxDirectoryLinks,
MaxHAMTFanout: adder.MaxHAMTFanout,
}
if err := mfs.Mkdir(mr, dir, opts); err != nil {
return err
@ -460,12 +475,14 @@ func (adder *Adder) addDir(ctx context.Context, path string, dir files.Directory
// if we need to store mode or modification time then create a new root which includes that data
if toplevel && (adder.FileMode != 0 || !adder.FileMtime.IsZero()) {
nd := unixfs.EmptyDirNodeWithStat(adder.FileMode, adder.FileMtime)
err := nd.SetCidBuilder(adder.CidBuilder)
if err != nil {
return err
}
mr, err := mfs.NewRoot(ctx, adder.dagService, nd, nil)
mr, err := mfs.NewEmptyRoot(ctx, adder.dagService, nil,
mfs.MkdirOpts{
CidBuilder: adder.CidBuilder,
MaxLinks: adder.MaxDirectoryLinks,
MaxHAMTFanout: adder.MaxHAMTFanout,
ModTime: adder.FileMtime,
Mode: adder.FileMode,
})
if err != nil {
return err
}
@ -478,11 +495,13 @@ func (adder *Adder) addDir(ctx context.Context, path string, dir files.Directory
return err
}
err = mfs.Mkdir(mr, path, mfs.MkdirOpts{
Mkparents: true,
Flush: false,
CidBuilder: adder.CidBuilder,
Mode: adder.FileMode,
ModTime: adder.FileMtime,
Mkparents: true,
Flush: false,
CidBuilder: adder.CidBuilder,
Mode: adder.FileMode,
ModTime: adder.FileMtime,
MaxLinks: adder.MaxDirectoryLinks,
MaxHAMTFanout: adder.MaxHAMTFanout,
})
if err != nil {
return err

View File

@ -408,20 +408,29 @@ func IPFS(ctx context.Context, bcfg *BuildCfg) fx.Option {
return fx.Error(err)
}
// Migrate users of deprecated Experimental.ShardingEnabled flag
if cfg.Experimental.ShardingEnabled {
logger.Fatal("The `Experimental.ShardingEnabled` field is no longer used, please remove it from the config. Use Import.UnixFSHAMTDirectorySizeThreshold instead.")
}
if !cfg.Internal.UnixFSShardingSizeThreshold.IsDefault() {
msg := "The `Internal.UnixFSShardingSizeThreshold` field was renamed to `Import.UnixFSHAMTDirectorySizeThreshold`. Please update your config.\n"
if !cfg.Import.UnixFSHAMTDirectorySizeThreshold.IsDefault() {
logger.Fatal(msg) // conflicting values, hard fail
}
logger.Error(msg)
cfg.Import.UnixFSHAMTDirectorySizeThreshold = *cfg.Internal.UnixFSShardingSizeThreshold
}
// Auto-sharding settings
shardSizeString := cfg.Internal.UnixFSShardingSizeThreshold.WithDefault("256kiB")
shardSizeInt, err := humanize.ParseBytes(shardSizeString)
shardingThresholdString := cfg.Import.UnixFSHAMTDirectorySizeThreshold.WithDefault(config.DefaultUnixFSHAMTDirectorySizeThreshold)
shardSingThresholdInt, err := humanize.ParseBytes(shardingThresholdString)
if err != nil {
return fx.Error(err)
}
uio.HAMTShardingSize = int(shardSizeInt)
// Migrate users of deprecated Experimental.ShardingEnabled flag
if cfg.Experimental.ShardingEnabled {
logger.Fatal("The `Experimental.ShardingEnabled` field is no longer used, please remove it from the config.\n" +
"go-ipfs now automatically shards when directory block is bigger than `" + shardSizeString + "`.\n" +
"If you need to restore the old behavior (sharding everything) set `Internal.UnixFSShardingSizeThreshold` to `1B`.\n")
}
shardMaxFanout := cfg.Import.UnixFSHAMTDirectoryMaxFanout.WithDefault(config.DefaultUnixFSHAMTDirectoryMaxFanout)
// TODO: avoid overriding this globally, see if we can extend Directory interface like Get/SetMaxLinks from https://github.com/ipfs/boxo/pull/906
uio.HAMTShardingSize = int(shardSingThresholdInt)
uio.DefaultShardWidth = int(shardMaxFanout)
return fx.Options(
bcfgOpts,

View File

@ -13,6 +13,10 @@ This release was brought to you by the [Shipyard](http://ipshipyard.com/) team.
- [Dedicated `Reprovider.Strategy` for MFS](#dedicated-reproviderstrategy-for-mfs)
- [Additional new configuration options](#additional-new-configuration-options)
- [Grid view in WebUI](#grid-view-in-webui)
- [Enhanced DAG-Shaping Controls for `ipfs add`](#enhanced-dag-shaping-controls-for-ipfs-add)
- [New `ipfs add` Options](#new-ipfs-add-options)
- [Persistent `Import.*` Configuration](#persistent-import-configuration)
- [Updated Configuration Profiles](#updated-configuration-profiles)
- [📦️ Important dependency updates](#-important-dependency-updates)
- [📝 Changelog](#-changelog)
- [👨‍👩‍👧‍👦 Contributors](#-contributors)
@ -42,6 +46,41 @@ The WebUI, accessible at http://127.0.0.1:5001/webui/, now includes support for
> ![image](https://github.com/user-attachments/assets/80dcf0d0-8103-426f-ae91-416fb25d32b6)
#### Enhanced DAG-Shaping Controls for `ipfs add`
This release advances CIDv1 support by introducing fine-grained control over UnixFS DAG shaping during data ingestion with the `ipfs add` command.
Wider DAG trees (more links per node, higher fanout, larger thresholds) are beneficial for large files and directories with many files, reducing tree depth and lookup latency in high-latency networks, but they increase node size, straining memory and CPU on resource-constrained devices. Narrower trees (lower link count, lower fanout, smaller thresholds) are preferable for smaller directories, frequent updates, or low-power clients, minimizing overhead and ensuring compatibility, though they may increase traversal steps for very large datasets.
Kubo now allows users to act on these tradeoffs and customize the width of the DAG created by `ipfs add` command.
##### New `ipfs add` Options
Three new options allow you to override default settings for specific import operations:
- `--max-file-links`: Sets the maximum number of child links for a single file chunk.
- `--max-directory-links`: Defines the maximum number of child entries in a "basic" (single-chunk) directory.
- Note: Directories exceeding this limit or the `Import.UnixFSHAMTDirectorySizeThreshold` are converted to HAMT-based (sharded across multiple blocks) structures.
- `--max-hamt-fanout`: Specifies the maximum number of child nodes for HAMT internal structures.
##### Persistent `Import.*` Configuration
You can set default values for these options using the following configuration settings:
- [`Import.UnixFSFileMaxLinks`](https://github.com/ipfs/kubo/blob/master/docs/config.md#importunixfsfilemaxlinks)
- [`Import.UnixFSDirectoryMaxLinks`](https://github.com/ipfs/kubo/blob/master/docs/config.md#importunixfsdirectorymaxlinks)
- [`Import.UnixFSHAMTDirectoryMaxFanout`](https://github.com/ipfs/kubo/blob/master/docs/config.md#importunixfshamtdirectorymaxfanout)
- [`Import.UnixFSHAMTDirectorySizeThreshold`](https://github.com/ipfs/kubo/blob/master/docs/config.md#importunixfshamtdirectorysizethreshold)
##### Updated Configuration Profiles
The release updated configuration [profiles](https://github.com/ipfs/kubo/blob/master/docs/config.md#profiles) to incorporate these new `Import.*` settings:
- Updated Profile: `test-cid-v1` now includes current defaults as explicit `Import.UnixFSFileMaxLinks=174`, `Import.UnixFSDirectoryMaxLinks=0`, `Import.UnixFSHAMTDirectoryMaxFanout=256` and `Import.UnixFSHAMTDirectorySizeThreshold=256KiB`
- New Profile: `test-cid-v1-wide` adopts experimental directory DAG-shaping defaults, increasing the maximum file DAG width from 174 to 1024, HAMT fanout from 256 to 1024, and raising the HAMT directory sharding threshold from 256KiB to 1MiB, aligning with 1MiB file chunks.
- Feedback: Try it out and share your thoughts at [discuss.ipfs.tech/t/should-we-profile-cids](https://discuss.ipfs.tech/t/should-we-profile-cids/18507) or [ipfs/specs#499](https://github.com/ipfs/specs/pull/499).
> [!TIP]
> Apply one of CIDv1 test [profiles](https://github.com/ipfs/kubo/blob/master/docs/config.md#profiles) with `ipfs config profile apply test-cid-v1[-wide]`.
#### 📦️ Important dependency updates
- update `ipfs-webui` to [v4.7.0](https://github.com/ipfs/ipfs-webui/releases/tag/v4.7.0)

View File

@ -185,6 +185,10 @@ config file at runtime.
- [`Import.HashFunction`](#importhashfunction)
- [`Import.BatchMaxNodes`](#importbatchmaxnodes)
- [`Import.BatchMaxSize`](#importbatchmaxsize)
- [`Import.UnixFSFileMaxLinks`](#importunixfsfilemaxlinks)
- [`Import.UnixFSDirectoryMaxLinks`](#importunixfsdirectorymaxlinks)
- [`Import.UnixFSHAMTDirectoryMaxFanout`](#importunixfshamtdirectorymaxfanout)
- [`Import.UnixFSHAMTDirectorySizeThreshold`](#importunixfshamtdirectorysizethreshold)
- [`Version`](#version)
- [`Version.AgentSuffix`](#versionagentsuffix)
- [`Version.SwarmCheckEnabled`](#versionswarmcheckenabled)
@ -1199,15 +1203,7 @@ Type: `optionalInteger` (`null` means default which is 10)
### `Internal.UnixFSShardingSizeThreshold`
The sharding threshold used internally to decide whether a UnixFS directory should be sharded or not.
This value is not strictly related to the size of the UnixFS directory block and any increases in
the threshold should come with being careful that block sizes stay under 2MiB in order for them to be
reliably transferable through the networking stack (IPFS peers on the public swarm tend to ignore requests for blocks bigger than 2MiB).
Decreasing this value to 1B is functionally equivalent to the previous experimental sharding option to
shard all directories.
Type: `optionalBytes` (`null` means default which is 256KiB)
**MOVED:** see [`Import.UnixFSHAMTDirectorySizeThreshold`](#importunixfshamtdirectorysizethreshold)
## `Ipns`
@ -2560,6 +2556,80 @@ Default: `20971520` (20MiB)
Type: `optionalInteger`
### `Import.UnixFSFileMaxLinks`
The maximum number of links that a node part of a UnixFS File can have
when building the DAG while importing.
This setting controls both the fanout in files that are chunked into several
blocks and grouped as a Unixfs (dag-pb) DAG.
Default: `174`
Type: `optionalInteger`
### `Import.UnixFSDirectoryMaxLinks`
The maximum number of links that a node part of a UnixFS basic directory can
have when building the DAG while importing.
This setting controls both the fanout for basic, non-HAMT folder nodes. It
sets a limit after which directories are converted to a HAMT-based structure.
When unset (0), no limit exists for chilcren. Directories will be converted to
HAMTs based on their estimated size only.
This setting will cause basic directories to be converted to HAMTs when they
exceed the maximum number of children. This happens transparently during the
add process. The fanout of HAMT nodes is controlled by `MaxHAMTFanout`.
Commands affected: `ipfs add`
Default: `0` (no limit, because [`Import.UnixFSHAMTDirectorySizeThreshold`](#importunixfshamtdirectorysizethreshold) triggers controls when to switch to HAMT sharding when a directory grows too big)
Type: `optionalInteger`
### `Import.UnixFSHAMTDirectoryMaxFanout`
The maximum number of children that a node part of a Unixfs HAMT directory
(aka sharded directory) can have.
HAMT directory have unlimited children and are used when basic directories
become too big or reach `MaxLinks`. A HAMT is an structure made of unixfs
nodes that store the list of elements in the folder. This option controls the
maximum number of children that the HAMT nodes can have.
Needs to be a power of two (shard entry size) and multiple of 8 (bitfield size).
Commands affected: `ipfs add`, `ipfs daemon` (globally overrides [`boxo/ipld/unixfs/io.DefaultShardWidth`](https://github.com/ipfs/boxo/blob/6c5a07602aed248acc86598f30ab61923a54a83e/ipld/unixfs/io/directory.go#L30C5-L30C22))
Default: `256`
Type: `optionalInteger`
### `Import.UnixFSHAMTDirectorySizeThreshold`
The sharding threshold to decide whether a basic UnixFS directory
should be sharded (converted into HAMT Directory) or not.
This value is not strictly related to the size of the UnixFS directory block
and any increases in the threshold should come with being careful that block
sizes stay under 2MiB in order for them to be reliably transferable through the
networking stack. At the time of writing this, IPFS peers on the public swarm
tend to ignore requests for blocks bigger than 2MiB.
Uses implementation from `boxo/ipld/unixfs/io/directory`, where the size is not
the *exact* block size of the encoded directory but just the estimated size
based byte length of DAG-PB Links names and CIDs.
Setting to `1B` is functionally equivalent to always using HAMT (useful in testing).
Commands affected: `ipfs add`, `ipfs daemon` (globally overrides [`boxo/ipld/unixfs/io.HAMTShardingSize`](https://github.com/ipfs/boxo/blob/6c5a07602aed248acc86598f30ab61923a54a83e/ipld/unixfs/io/directory.go#L26))
Default: `256KiB` (may change, inspect `DefaultUnixFSHAMTDirectorySizeThreshold` to confirm)
Type: `optionalBytes`
## `Version`
Options to configure agent version announced to the swarm, and leveraging
@ -2742,16 +2812,38 @@ Disables [Reprovider](#reprovider) system (and announcing to Amino DHT).
Makes UnixFS import (`ipfs add`) produce legacy CIDv0 with no raw leaves, sha2-256 and 256 KiB chunks.
See <https://github.com/ipfs/kubo/blob/master/config/profile.go> for exact [`Import.*`](#import) settings.
> [!NOTE]
> This profile is provided for legacy users and should not be used for new projects.
### `test-cid-v1` profile
Makes UnixFS import (`ipfs add`) produce modern CIDv1 with raw leaves, sha2-256 and 1 MiB chunks.
Makes UnixFS import (`ipfs add`) produce modern CIDv1 with raw leaves, sha2-256
and 1 MiB chunks (max 174 links per file, 256 per HAMT node, switch dir to HAMT
above 256KiB).
See <https://github.com/ipfs/kubo/blob/master/config/profile.go> for exact [`Import.*`](#import) settings.
> [!NOTE]
> This profile will become the new implicit default, provided for testing purposes.
> Follow [kubo#4143](https://github.com/ipfs/kubo/issues/4143) for more details.
> [`Import.*`](#import) settings applied by this profile MAY change in future release. Provided for testing purposes.
>
> Follow [kubo#4143](https://github.com/ipfs/kubo/issues/4143) for more details,
> and provide feedback in [discuss.ipfs.tech/t/should-we-profile-cids](https://discuss.ipfs.tech/t/should-we-profile-cids/18507) or [ipfs/specs#499](https://github.com/ipfs/specs/pull/499).
### `test-cid-v1-wide` profile
Makes UnixFS import (`ipfs add`) produce modern CIDv1 with raw leaves, sha2-256
and 1 MiB chunks and wider file DAGs (max 1024 links per every node type,
switch dir to HAMT above 1MiB).
See <https://github.com/ipfs/kubo/blob/master/config/profile.go> for exact [`Import.*`](#import) settings.
> [!NOTE]
> [`Import.*`](#import) settings applied by this profile MAY change in future release. Provided for testing purposes.
>
> Follow [kubo#4143](https://github.com/ipfs/kubo/issues/4143) for more details,
> and provide feedback in [discuss.ipfs.tech/t/should-we-profile-cids](https://discuss.ipfs.tech/t/should-we-profile-cids/18507) or [ipfs/specs#499](https://github.com/ipfs/specs/pull/499).
## Types

View File

@ -7,7 +7,7 @@ go 1.24
replace github.com/ipfs/kubo => ./../../..
require (
github.com/ipfs/boxo v0.29.2-0.20250409154342-bbaf2e146dfb
github.com/ipfs/boxo v0.29.2-0.20250415191135-dc60fe747c37
github.com/ipfs/kubo v0.0.0-00010101000000-000000000000
github.com/libp2p/go-libp2p v0.41.1
github.com/multiformats/go-multiaddr v0.15.0

View File

@ -298,8 +298,8 @@ github.com/ipfs-shipyard/nopfs/ipfs v0.25.0 h1:OqNqsGZPX8zh3eFMO8Lf8EHRRnSGBMqcd
github.com/ipfs-shipyard/nopfs/ipfs v0.25.0/go.mod h1:BxhUdtBgOXg1B+gAPEplkg/GpyTZY+kCMSfsJvvydqU=
github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs=
github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0=
github.com/ipfs/boxo v0.29.2-0.20250409154342-bbaf2e146dfb h1:kA7c3CF6/d8tUwGJR/SwIfaRz7Xk7Fbyoh2ePZAFMlw=
github.com/ipfs/boxo v0.29.2-0.20250409154342-bbaf2e146dfb/go.mod h1:omQZmLS7LegSpBy3m4CrAB9/SO7Fq3pfv+5y1FOd+gI=
github.com/ipfs/boxo v0.29.2-0.20250415191135-dc60fe747c37 h1:q3a+2FIbWzZbx/yUqpuG4jLVSa6GvxtRfx9TU5GLiN0=
github.com/ipfs/boxo v0.29.2-0.20250415191135-dc60fe747c37/go.mod h1:omQZmLS7LegSpBy3m4CrAB9/SO7Fq3pfv+5y1FOd+gI=
github.com/ipfs/go-bitfield v1.1.0 h1:fh7FIo8bSwaJEh6DdTWbCeZ1eqOaOkKFI74SCnsWbGA=
github.com/ipfs/go-bitfield v1.1.0/go.mod h1:paqf1wjq/D2BBmzfTVFlJQ9IlFOZpg422HL0HqsGWHU=
github.com/ipfs/go-bitswap v0.11.0 h1:j1WVvhDX1yhG32NTC9xfxnqycqYIlhzEzLXG/cU1HyQ=

View File

@ -150,7 +150,10 @@ func TestIpfsStressRead(t *testing.T) {
// Now make a bunch of dirs
for i := 0; i < ndiriter; i++ {
db := uio.NewDirectory(nd.DAG)
db, err := uio.NewDirectory(nd.DAG)
if err != nil {
t.Fatal(err)
}
for j := 0; j < 1+rand.Intn(10); j++ {
name := fmt.Sprintf("child%d", j)
@ -245,8 +248,11 @@ func TestIpfsBasicDirRead(t *testing.T) {
fi, data := randObj(t, nd, 10000)
// Make a directory and put that file in it
db := uio.NewDirectory(nd.DAG)
err := db.AddChild(nd.Context(), "actual", fi)
db, err := uio.NewDirectory(nd.DAG)
if err != nil {
t.Fatal(err)
}
err = db.AddChild(nd.Context(), "actual", fi)
if err != nil {
t.Fatal(err)
}

2
go.mod
View File

@ -21,7 +21,7 @@ require (
github.com/hashicorp/go-version v1.7.0
github.com/ipfs-shipyard/nopfs v0.0.14
github.com/ipfs-shipyard/nopfs/ipfs v0.25.0
github.com/ipfs/boxo v0.29.2-0.20250409154342-bbaf2e146dfb
github.com/ipfs/boxo v0.29.2-0.20250415191135-dc60fe747c37
github.com/ipfs/go-block-format v0.2.0
github.com/ipfs/go-cid v0.5.0
github.com/ipfs/go-cidutil v0.1.0

4
go.sum
View File

@ -362,8 +362,8 @@ github.com/ipfs-shipyard/nopfs/ipfs v0.25.0 h1:OqNqsGZPX8zh3eFMO8Lf8EHRRnSGBMqcd
github.com/ipfs-shipyard/nopfs/ipfs v0.25.0/go.mod h1:BxhUdtBgOXg1B+gAPEplkg/GpyTZY+kCMSfsJvvydqU=
github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs=
github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0=
github.com/ipfs/boxo v0.29.2-0.20250409154342-bbaf2e146dfb h1:kA7c3CF6/d8tUwGJR/SwIfaRz7Xk7Fbyoh2ePZAFMlw=
github.com/ipfs/boxo v0.29.2-0.20250409154342-bbaf2e146dfb/go.mod h1:omQZmLS7LegSpBy3m4CrAB9/SO7Fq3pfv+5y1FOd+gI=
github.com/ipfs/boxo v0.29.2-0.20250415191135-dc60fe747c37 h1:q3a+2FIbWzZbx/yUqpuG4jLVSa6GvxtRfx9TU5GLiN0=
github.com/ipfs/boxo v0.29.2-0.20250415191135-dc60fe747c37/go.mod h1:omQZmLS7LegSpBy3m4CrAB9/SO7Fq3pfv+5y1FOd+gI=
github.com/ipfs/go-bitfield v1.1.0 h1:fh7FIo8bSwaJEh6DdTWbCeZ1eqOaOkKFI74SCnsWbGA=
github.com/ipfs/go-bitfield v1.1.0/go.mod h1:paqf1wjq/D2BBmzfTVFlJQ9IlFOZpg422HL0HqsGWHU=
github.com/ipfs/go-bitswap v0.11.0 h1:j1WVvhDX1yhG32NTC9xfxnqycqYIlhzEzLXG/cU1HyQ=

View File

@ -1,10 +1,17 @@
package cli
import (
"io"
"os"
"path/filepath"
"strings"
"testing"
"github.com/dustin/go-humanize"
"github.com/ipfs/kubo/config"
"github.com/ipfs/kubo/test/cli/harness"
"github.com/ipfs/kubo/test/cli/testutils"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@ -19,6 +26,11 @@ func TestAdd(t *testing.T) {
shortStringCidV1Sha512 = "bafkrgqbqt3gerhas23vuzrapkdeqf4vu2dwxp3srdj6hvg6nhsug2tgyn6mj3u23yx7utftq3i2ckw2fwdh5qmhid5qf3t35yvkc5e5ottlw6"
)
const (
cidV0Length = 34 // cidv0 sha2-256
cidV1Length = 36 // cidv1 sha2-256
)
t.Run("produced cid version: implicit default (CIDv0)", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init().StartDaemon()
@ -96,6 +108,33 @@ func TestAdd(t *testing.T) {
require.Equal(t, shortStringCidV1NoRawLeaves, cidStr)
})
t.Run("produced unixfs max file links: command flag --max-file-links overrides configuration in Import.UnixFSFileMaxLinks", func(t *testing.T) {
t.Parallel()
//
// UnixFSChunker=size-262144 (256KiB)
// Import.UnixFSFileMaxLinks=174
node := harness.NewT(t).NewNode().Init("--profile=legacy-cid-v0") // legacy-cid-v0 for determinism across all params
node.UpdateConfig(func(cfg *config.Config) {
cfg.Import.UnixFSChunker = *config.NewOptionalString("size-262144") // 256 KiB chunks
cfg.Import.UnixFSFileMaxLinks = *config.NewOptionalInteger(174) // max 174 per level
})
node.StartDaemon()
defer node.StopDaemon()
// Add 174MiB file:
// 1024 * 256KiB should fit in single layer
seed := shortString
cidStr := node.IPFSAddDeterministic("262144KiB", seed, "--max-file-links", "1024")
root, err := node.InspectPBNode(cidStr)
assert.NoError(t, err)
// Expect 1024 links due to cli parameter raising link limit from 174 to 1024
require.Equal(t, 1024, len(root.Links))
// expect same CID every time
require.Equal(t, "QmbBftNHWmjSWKLC49dMVrfnY8pjrJYntiAXirFJ7oJrNk", cidStr)
})
t.Run("ipfs init --profile=legacy-cid-v0 sets config that produces legacy CIDv0", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init("--profile=legacy-cid-v0")
@ -106,13 +145,307 @@ func TestAdd(t *testing.T) {
require.Equal(t, shortStringCidV0, cidStr)
})
t.Run("ipfs init --profile=test-cid-v1 produces modern CIDv1", func(t *testing.T) {
t.Run("ipfs init --profile=legacy-cid-v0 applies UnixFSChunker=size-262144 and UnixFSFileMaxLinks", func(t *testing.T) {
t.Parallel()
seed := "v0-seed"
profile := "--profile=legacy-cid-v0"
t.Run("under UnixFSFileMaxLinks=174", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init(profile)
node.StartDaemon()
defer node.StopDaemon()
// Add 44544KiB file:
// 174 * 256KiB should fit in single DAG layer
cidStr := node.IPFSAddDeterministic("44544KiB", seed)
root, err := node.InspectPBNode(cidStr)
assert.NoError(t, err)
require.Equal(t, 174, len(root.Links))
// expect same CID every time
require.Equal(t, "QmUbBALi174SnogsUzLpYbD4xPiBSFANF4iztWCsHbMKh2", cidStr)
})
t.Run("above UnixFSFileMaxLinks=174", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init(profile)
node.StartDaemon()
defer node.StopDaemon()
// add 256KiB (one more block), it should force rebalancing DAG and moving most to second layer
cidStr := node.IPFSAddDeterministic("44800KiB", seed)
root, err := node.InspectPBNode(cidStr)
assert.NoError(t, err)
require.Equal(t, 2, len(root.Links))
// expect same CID every time
require.Equal(t, "QmepeWtdmS1hHXx1oZXsPUv6bMrfRRKfZcoPPU4eEfjnbf", cidStr)
})
})
t.Run("ipfs init --profile=legacy-cid-v0 applies UnixFSHAMTDirectoryMaxFanout=256 and UnixFSHAMTDirectorySizeThreshold=256KiB", func(t *testing.T) {
t.Parallel()
seed := "hamt-legacy-cid-v0"
profile := "--profile=legacy-cid-v0"
t.Run("under UnixFSHAMTDirectorySizeThreshold=256KiB", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init(profile)
node.StartDaemon()
defer node.StopDaemon()
randDir, err := os.MkdirTemp(node.Dir, seed)
require.NoError(t, err)
// Create directory with a lot of files that have filenames which together take close to UnixFSHAMTDirectorySizeThreshold in total
err = createDirectoryForHAMT(randDir, cidV0Length, "255KiB", seed)
require.NoError(t, err)
cidStr := node.IPFS("add", "-r", "-Q", randDir).Stdout.Trimmed()
// Confirm the number of links is more than UnixFSHAMTDirectorySizeThreshold (indicating regular "basic" directory"
root, err := node.InspectPBNode(cidStr)
assert.NoError(t, err)
require.Equal(t, 903, len(root.Links))
})
t.Run("above UnixFSHAMTDirectorySizeThreshold=256KiB", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init(profile)
node.StartDaemon()
defer node.StopDaemon()
randDir, err := os.MkdirTemp(node.Dir, seed)
require.NoError(t, err)
// Create directory with a lot of files that have filenames which together take close to UnixFSHAMTDirectorySizeThreshold in total
err = createDirectoryForHAMT(randDir, cidV0Length, "257KiB", seed)
require.NoError(t, err)
cidStr := node.IPFS("add", "-r", "-Q", randDir).Stdout.Trimmed()
// Confirm this time, the number of links is less than UnixFSHAMTDirectorySizeThreshold
root, err := node.InspectPBNode(cidStr)
assert.NoError(t, err)
require.Equal(t, 252, len(root.Links))
})
})
t.Run("ipfs init --profile=test-cid-v1 produces CIDv1 with raw leaves", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init("--profile=test-cid-v1")
node.StartDaemon()
defer node.StopDaemon()
cidStr := node.IPFSAddStr(shortString)
require.Equal(t, shortStringCidV1, cidStr)
require.Equal(t, shortStringCidV1, cidStr) // raw leaf
})
t.Run("ipfs init --profile=test-cid-v1 applies UnixFSChunker=size-1048576", func(t *testing.T) {
t.Parallel()
seed := "v1-seed"
profile := "--profile=test-cid-v1"
t.Run("under UnixFSFileMaxLinks=174", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init(profile)
node.StartDaemon()
defer node.StopDaemon()
// Add 174MiB file:
// 174 * 1MiB should fit in single layer
cidStr := node.IPFSAddDeterministic("174MiB", seed)
root, err := node.InspectPBNode(cidStr)
assert.NoError(t, err)
require.Equal(t, 174, len(root.Links))
// expect same CID every time
require.Equal(t, "bafybeigwduxcf2aawppv3isnfeshnimkyplvw3hthxjhr2bdeje4tdaicu", cidStr)
})
t.Run("above UnixFSFileMaxLinks=174", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init(profile)
node.StartDaemon()
defer node.StopDaemon()
// add +1MiB (one more block), it should force rebalancing DAG and moving most to second layer
cidStr := node.IPFSAddDeterministic("175MiB", seed)
root, err := node.InspectPBNode(cidStr)
assert.NoError(t, err)
require.Equal(t, 2, len(root.Links))
// expect same CID every time
require.Equal(t, "bafybeidhd7lo2n2v7lta5yamob3xwhbxcczmmtmhquwhjesi35jntf7mpu", cidStr)
})
})
t.Run("ipfs init --profile=test-cid-v1 applies UnixFSHAMTDirectoryMaxFanout=256 and UnixFSHAMTDirectorySizeThreshold=256KiB", func(t *testing.T) {
t.Parallel()
seed := "hamt-cid-v1"
profile := "--profile=test-cid-v1"
t.Run("under UnixFSHAMTDirectorySizeThreshold=256KiB", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init(profile)
node.StartDaemon()
defer node.StopDaemon()
randDir, err := os.MkdirTemp(node.Dir, seed)
require.NoError(t, err)
// Create directory with a lot of files that have filenames which together take close to UnixFSHAMTDirectorySizeThreshold in total
err = createDirectoryForHAMT(randDir, cidV1Length, "255KiB", seed)
require.NoError(t, err)
cidStr := node.IPFS("add", "-r", "-Q", randDir).Stdout.Trimmed()
// Confirm the number of links is more than UnixFSHAMTDirectoryMaxFanout (indicating regular "basic" directory"
root, err := node.InspectPBNode(cidStr)
assert.NoError(t, err)
require.Equal(t, 897, len(root.Links))
})
t.Run("above UnixFSHAMTDirectorySizeThreshold=256KiB", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init(profile)
node.StartDaemon()
defer node.StopDaemon()
randDir, err := os.MkdirTemp(node.Dir, seed)
require.NoError(t, err)
// Create directory with a lot of files that have filenames which together take close to UnixFSHAMTDirectorySizeThreshold in total
err = createDirectoryForHAMT(randDir, cidV1Length, "257KiB", seed)
require.NoError(t, err)
cidStr := node.IPFS("add", "-r", "-Q", randDir).Stdout.Trimmed()
// Confirm this time, the number of links is less than UnixFSHAMTDirectoryMaxFanout
root, err := node.InspectPBNode(cidStr)
assert.NoError(t, err)
require.Equal(t, 252, len(root.Links))
})
})
t.Run("ipfs init --profile=test-cid-v1-wide applies UnixFSChunker=size-1048576 and UnixFSFileMaxLinks=1024", func(t *testing.T) {
t.Parallel()
seed := "v1-seed-1024"
profile := "--profile=test-cid-v1-wide"
t.Run("under UnixFSFileMaxLinks=1024", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init(profile)
node.StartDaemon()
defer node.StopDaemon()
// Add 174MiB file:
// 1024 * 1MiB should fit in single layer
cidStr := node.IPFSAddDeterministic("1024MiB", seed)
root, err := node.InspectPBNode(cidStr)
assert.NoError(t, err)
require.Equal(t, 1024, len(root.Links))
// expect same CID every time
require.Equal(t, "bafybeiej5w63ir64oxgkr5htqmlerh5k2rqflurn2howimexrlkae64xru", cidStr)
})
t.Run("above UnixFSFileMaxLinks=1024", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init(profile)
node.StartDaemon()
defer node.StopDaemon()
// add +1MiB (one more block), it should force rebalancing DAG and moving most to second layer
cidStr := node.IPFSAddDeterministic("1025MiB", seed)
root, err := node.InspectPBNode(cidStr)
assert.NoError(t, err)
require.Equal(t, 2, len(root.Links))
// expect same CID every time
require.Equal(t, "bafybeieilp2qx24pe76hxrxe6bpef5meuxto3kj5dd6mhb5kplfeglskdm", cidStr)
})
})
t.Run("ipfs init --profile=test-cid-v1-wide applies UnixFSHAMTDirectoryMaxFanout=256 and UnixFSHAMTDirectorySizeThreshold=1MiB", func(t *testing.T) {
t.Parallel()
seed := "hamt-cid-v1"
profile := "--profile=test-cid-v1-wide"
t.Run("under UnixFSHAMTDirectorySizeThreshold=1MiB", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init(profile)
node.StartDaemon()
defer node.StopDaemon()
randDir, err := os.MkdirTemp(node.Dir, seed)
require.NoError(t, err)
// Create directory with a lot of files that have filenames which together take close to UnixFSHAMTDirectorySizeThreshold in total
err = createDirectoryForHAMT(randDir, cidV1Length, "1023KiB", seed)
require.NoError(t, err)
cidStr := node.IPFS("add", "-r", "-Q", randDir).Stdout.Trimmed()
// Confirm the number of links is more than UnixFSHAMTDirectoryMaxFanout (indicating regular "basic" directory"
root, err := node.InspectPBNode(cidStr)
assert.NoError(t, err)
require.Equal(t, 3599, len(root.Links))
})
t.Run("above UnixFSHAMTDirectorySizeThreshold=1MiB", func(t *testing.T) {
t.Parallel()
node := harness.NewT(t).NewNode().Init(profile)
node.StartDaemon()
defer node.StopDaemon()
randDir, err := os.MkdirTemp(node.Dir, seed)
require.NoError(t, err)
// Create directory with a lot of files that have filenames which together take close to UnixFSHAMTDirectorySizeThreshold in total
err = createDirectoryForHAMT(randDir, cidV1Length, "1025KiB", seed)
require.NoError(t, err)
cidStr := node.IPFS("add", "-r", "-Q", randDir).Stdout.Trimmed()
// Confirm this time, the number of links is less than UnixFSHAMTDirectoryMaxFanout
root, err := node.InspectPBNode(cidStr)
assert.NoError(t, err)
require.Equal(t, 992, len(root.Links))
})
})
}
// createDirectoryForHAMT aims to create enough files with long names for the directory block to be close to the UnixFSHAMTDirectorySizeThreshold.
// The calculation is based on boxo's HAMTShardingSize and sizeBelowThreshold which calculates ballpark size of the block
// by adding length of link names and the binary cid length.
// See https://github.com/ipfs/boxo/blob/6c5a07602aed248acc86598f30ab61923a54a83e/ipld/unixfs/io/directory.go#L491
func createDirectoryForHAMT(dirPath string, cidLength int, unixfsNodeSizeTarget, seed string) error {
hamtThreshold, err := humanize.ParseBytes(unixfsNodeSizeTarget)
if err != nil {
return err
}
// Calculate how many files with long filenames are needed to hit UnixFSHAMTDirectorySizeThreshold
nameLen := 255 // max that works across windows/macos/linux
alphabetLen := len(testutils.AlphabetEasy)
numFiles := int(hamtThreshold) / (nameLen + cidLength)
// Deterministic pseudo-random bytes for static CID
drand, err := testutils.DeterministicRandomReader(unixfsNodeSizeTarget, seed)
if err != nil {
return err
}
// Create necessary files in a single, flat directory
for i := 0; i < numFiles; i++ {
buf := make([]byte, nameLen)
_, err := io.ReadFull(drand, buf)
if err != nil {
return err
}
// Convert deterministic pseudo-random bytes to ASCII
var sb strings.Builder
for _, b := range buf {
// Map byte to printable ASCII range (33-126)
char := testutils.AlphabetEasy[int(b)%alphabetLen]
sb.WriteRune(char)
}
filename := sb.String()[:nameLen]
filePath := filepath.Join(dirPath, filename)
// Create empty file
f, err := os.Create(filePath)
if err != nil {
return err
}
f.Close()
}
return nil
}

View File

@ -76,6 +76,17 @@ func (n *Node) IPFSAddStr(content string, args ...string) string {
return n.IPFSAdd(strings.NewReader(content), args...)
}
// IPFSAddDeterministic produces a CID of a file of a certain size, filled with deterministically generated bytes based on some seed.
// This ensures deterministic CID on the other end, that can be used in tests.
func (n *Node) IPFSAddDeterministic(size string, seed string, args ...string) string {
log.Debugf("node %d adding %s of deterministic pseudo-random data with seed %q and args: %v", n.ID, size, seed, args)
reader, err := DeterministicRandomReader(size, seed)
if err != nil {
panic(err)
}
return n.IPFSAdd(reader, args...)
}
func (n *Node) IPFSAdd(content io.Reader, args ...string) string {
log.Debugf("node %d adding with args: %v", n.ID, args)
fullArgs := []string{"add", "-q"}
@ -108,3 +119,15 @@ func (n *Node) IPFSDagImport(content io.Reader, cid string, args ...string) erro
})
return res.Err
}
/*
func (n *Node) IPFSDagExport(cid string, car *os.File) error {
log.Debugf("node %d dag export of %s to %q with args: %v", n.ID, cid, car.Name())
res := n.Runner.MustRun(RunRequest{
Path: n.IPFSBin,
Args: []string{"dag", "export", cid},
CmdOpts: []CmdOpt{RunWithStdout(car)},
})
return res.Err
}
*/

View File

@ -0,0 +1,54 @@
package harness
import (
"bytes"
"encoding/json"
)
// InspectPBNode uses dag-json output of 'ipfs dag get' to inspect
// "Logical Format" of DAG-PB as defined in
// https://web.archive.org/web/20250403194752/https://ipld.io/specs/codecs/dag-pb/spec/#logical-format
// (mainly used for inspecting Links without depending on any libraries)
func (n *Node) InspectPBNode(cid string) (PBNode, error) {
log.Debugf("node %d dag get %s as dag-json", n.ID, cid)
var root PBNode
var dagJsonOutput bytes.Buffer
res := n.Runner.MustRun(RunRequest{
Path: n.IPFSBin,
Args: []string{"dag", "get", "--output-codec=dag-json", cid},
CmdOpts: []CmdOpt{RunWithStdout(&dagJsonOutput)},
})
if res.Err != nil {
return root, res.Err
}
err := json.Unmarshal(dagJsonOutput.Bytes(), &root)
if err != nil {
return root, err
}
return root, nil
}
// Define structs to match the JSON for
type PBHash struct {
Slash string `json:"/"`
}
type PBLink struct {
Hash PBHash `json:"Hash"`
Name string `json:"Name"`
Tsize int `json:"Tsize"`
}
type PBData struct {
Slash struct {
Bytes string `json:"bytes"`
} `json:"/"`
}
type PBNode struct {
Data PBData `json:"Data"`
Links []PBLink `json:"Links"`
}

View File

@ -0,0 +1,46 @@
package testutils
import (
"crypto/sha256"
"io"
"github.com/dustin/go-humanize"
"golang.org/x/crypto/chacha20"
)
type randomReader struct {
cipher *chacha20.Cipher
remaining int64
}
func (r *randomReader) Read(p []byte) (int, error) {
if r.remaining <= 0 {
return 0, io.EOF
}
n := int64(len(p))
if n > r.remaining {
n = r.remaining
}
// Generate random bytes directly into the provided buffer
r.cipher.XORKeyStream(p[:n], make([]byte, n))
r.remaining -= n
return int(n), nil
}
// createRandomReader produces specified number of pseudo-random bytes
// from a seed.
func DeterministicRandomReader(sizeStr string, seed string) (io.Reader, error) {
size, err := humanize.ParseBytes(sizeStr)
if err != nil {
return nil, err
}
// Hash the seed string to a 32-byte key for ChaCha20
key := sha256.Sum256([]byte(seed))
// Use ChaCha20 for deterministic random bytes
var nonce [chacha20.NonceSize]byte // Zero nonce for simplicity
cipher, err := chacha20.NewUnauthenticatedCipher(key[:chacha20.KeySize], nonce[:])
if err != nil {
return nil, err
}
return &randomReader{cipher: cipher, remaining: int64(size)}, nil
}

View File

@ -24,20 +24,22 @@ type RandFiles struct {
FanoutFiles int // how many files per dir
FanoutDirs int // how many dirs per dir
RandomSize bool // randomize file sizes
RandomFanout bool // randomize fanout numbers
RandomSize bool // randomize file sizes
RandomNameSize bool // randomize filename lengths
RandomFanout bool // randomize fanout numbers
}
func NewRandFiles() *RandFiles {
return &RandFiles{
Rand: rand.New(rand.NewSource(time.Now().UnixNano())),
FileSize: 4096,
FilenameSize: 16,
Alphabet: AlphabetEasy,
FanoutDepth: 2,
FanoutDirs: 5,
FanoutFiles: 10,
RandomSize: true,
Rand: rand.New(rand.NewSource(time.Now().UnixNano())),
FileSize: 4096,
FilenameSize: 16,
Alphabet: AlphabetEasy,
FanoutDepth: 2,
FanoutDirs: 5,
FanoutFiles: 10,
RandomSize: true,
RandomNameSize: true,
}
}
@ -83,7 +85,10 @@ func (r *RandFiles) WriteRandomFile(root string) error {
filesize = r.Rand.Int63n(filesize) + 1
}
n := rand.Intn(r.FilenameSize-4) + 4
n := r.FilenameSize
if r.RandomNameSize {
n = rand.Intn(r.FilenameSize-4) + 4
}
name := r.RandomFilename(n)
filepath := path.Join(root, name)
f, err := os.Create(filepath)

View File

@ -33,6 +33,7 @@ require (
github.com/Masterminds/semver/v3 v3.2.1 // indirect
github.com/OpenPeeDeeP/depguard/v2 v2.2.0 // indirect
github.com/alecthomas/go-check-sumtype v0.1.4 // indirect
github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b // indirect
github.com/alexkohler/nakedret/v2 v2.0.4 // indirect
github.com/alexkohler/prealloc v1.0.0 // indirect
github.com/alingse/asasalint v0.0.11 // indirect
@ -57,6 +58,7 @@ require (
github.com/chavacava/garif v0.1.0 // indirect
github.com/ckaznocha/intrange v0.1.2 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.5 // indirect
github.com/crackcomm/go-gitignore v0.0.0-20241020182519-7843d2ba8fdf // indirect
github.com/curioswitch/go-reassign v0.2.0 // indirect
github.com/daixiang0/gci v0.13.4 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
@ -116,7 +118,8 @@ require (
github.com/huin/goupnp v1.3.0 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/ipfs/bbloom v0.0.4 // indirect
github.com/ipfs/boxo v0.29.2-0.20250409154342-bbaf2e146dfb // indirect
github.com/ipfs/boxo v0.29.2-0.20250415191135-dc60fe747c37 // indirect
github.com/ipfs/go-bitfield v1.1.0 // indirect
github.com/ipfs/go-block-format v0.2.0 // indirect
github.com/ipfs/go-cid v0.5.0 // indirect
github.com/ipfs/go-datastore v0.8.2 // indirect
@ -273,6 +276,7 @@ require (
github.com/urfave/cli v1.22.16 // indirect
github.com/uudashr/gocognit v1.1.3 // indirect
github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc // indirect
github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f // indirect
github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect
github.com/wlynxg/anet v0.0.5 // indirect
github.com/xen0n/gosmopolitan v1.2.2 // indirect

View File

@ -43,6 +43,8 @@ github.com/alecthomas/go-check-sumtype v0.1.4 h1:WCvlB3l5Vq5dZQTFmodqL2g68uHiSww
github.com/alecthomas/go-check-sumtype v0.1.4/go.mod h1:WyYPfhfkdhyrdaligV6svFopZV8Lqdzn5pyVBaV6jhQ=
github.com/alecthomas/repr v0.2.0 h1:HAzS41CIzNW5syS8Mf9UwXhNH1J9aix/BvDRf1Ml2Yk=
github.com/alecthomas/repr v0.2.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4=
github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b h1:mimo19zliBX/vSQ6PWWSL9lK8qwHozUj03+zLoEB8O0=
github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs=
github.com/alexkohler/nakedret/v2 v2.0.4 h1:yZuKmjqGi0pSmjGpOC016LtPJysIL0WEUiaXW5SUnNg=
github.com/alexkohler/nakedret/v2 v2.0.4/go.mod h1:bF5i0zF2Wo2o4X4USt9ntUWve6JbFv02Ff4vlkmS/VU=
github.com/alexkohler/prealloc v1.0.0 h1:Hbq0/3fJPQhNkN0dR95AVrr6R7tou91y0uHG5pOcUuw=
@ -105,6 +107,8 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:ma
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/cpuguy83/go-md2man/v2 v2.0.5 h1:ZtcqGrnekaHpVLArFSe4HK5DoKx1T0rq2DwVB0alcyc=
github.com/cpuguy83/go-md2man/v2 v2.0.5/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/crackcomm/go-gitignore v0.0.0-20241020182519-7843d2ba8fdf h1:dwGgBWn84wUS1pVikGiruW+x5XM4amhjaZO20vCjay4=
github.com/crackcomm/go-gitignore v0.0.0-20241020182519-7843d2ba8fdf/go.mod h1:p1d6YEZWvFzEh4KLyvBcVSnrfNDDvK2zfK/4x2v/4pE=
github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0=
github.com/cskr/pubsub v1.0.2/go.mod h1:/8MzYXk/NJAz782G8RPkFzXTZVu63VotefPnR9TIRis=
github.com/curioswitch/go-reassign v0.2.0 h1:G9UZyOcpk/d7Gd6mqYgd8XYWFMw/znxwGDUstnC9DIo=
@ -294,8 +298,10 @@ github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs=
github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0=
github.com/ipfs/boxo v0.29.2-0.20250409154342-bbaf2e146dfb h1:kA7c3CF6/d8tUwGJR/SwIfaRz7Xk7Fbyoh2ePZAFMlw=
github.com/ipfs/boxo v0.29.2-0.20250409154342-bbaf2e146dfb/go.mod h1:omQZmLS7LegSpBy3m4CrAB9/SO7Fq3pfv+5y1FOd+gI=
github.com/ipfs/boxo v0.29.2-0.20250415191135-dc60fe747c37 h1:q3a+2FIbWzZbx/yUqpuG4jLVSa6GvxtRfx9TU5GLiN0=
github.com/ipfs/boxo v0.29.2-0.20250415191135-dc60fe747c37/go.mod h1:omQZmLS7LegSpBy3m4CrAB9/SO7Fq3pfv+5y1FOd+gI=
github.com/ipfs/go-bitfield v1.1.0 h1:fh7FIo8bSwaJEh6DdTWbCeZ1eqOaOkKFI74SCnsWbGA=
github.com/ipfs/go-bitfield v1.1.0/go.mod h1:paqf1wjq/D2BBmzfTVFlJQ9IlFOZpg422HL0HqsGWHU=
github.com/ipfs/go-block-format v0.2.0 h1:ZqrkxBA2ICbDRbK8KJs/u0O3dlp6gmAuuXUJNiW1Ycs=
github.com/ipfs/go-block-format v0.2.0/go.mod h1:+jpL11nFx5A/SPpsoBn6Bzkra/zaArfSmsknbPMYgzM=
github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg=
@ -767,6 +773,8 @@ github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0 h1:GDDkbFiaK8jsSD
github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw=
github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc h1:BCPnHtcboadS0DvysUuJXZ4lWVv5Bh5i7+tbIyi+ck4=
github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc/go.mod h1:r45hJU7yEoA81k6MWNhpMj/kms0n14dkzkxYHoB96UM=
github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f h1:jQa4QT2UP9WYv2nzyawpKMOCl+Z/jW7djv2/J50lj9E=
github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f/go.mod h1:p9UJB6dDgdPgMJZs7UjUOdulKyRr9fqkS+6JKAInPy8=
github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k=
github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc=
github.com/wlynxg/anet v0.0.3/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA=

View File

@ -16,7 +16,7 @@ fi
test_init_ipfs
test_expect_success 'force sharding' '
ipfs config --json Internal.UnixFSShardingSizeThreshold "\"1B\""
ipfs config --json Import.UnixFSHAMTDirectorySizeThreshold "\"1B\""
'
test_launch_ipfs_daemon

View File

@ -849,7 +849,7 @@ tests_for_files_api "with-daemon"
test_kill_ipfs_daemon
test_expect_success "enable sharding in config" '
ipfs config --json Internal.UnixFSShardingSizeThreshold "\"1B\""
ipfs config --json Import.UnixFSHAMTDirectorySizeThreshold "\"1B\""
'
test_launch_ipfs_daemon_without_network
@ -880,7 +880,7 @@ test_expect_success "set up automatic sharding/unsharding data" '
'
test_expect_success "reset automatic sharding" '
ipfs config --json Internal.UnixFSShardingSizeThreshold null
ipfs config --json Import.UnixFSHAMTDirectorySizeThreshold null
'
test_launch_ipfs_daemon_without_network

View File

@ -34,7 +34,7 @@ test_init_ipfs
UNSHARDED="QmavrTrQG4VhoJmantURAYuw3bowq3E2WcvP36NRQDAC1N"
test_expect_success "force sharding off" '
ipfs config --json Internal.UnixFSShardingSizeThreshold "\"1G\""
ipfs config --json Import.UnixFSHAMTDirectorySizeThreshold "\"1G\""
'
test_add_dir "$UNSHARDED"
@ -46,7 +46,7 @@ test_add_dir "$UNSHARDED"
test_kill_ipfs_daemon
test_expect_success "force sharding on" '
ipfs config --json Internal.UnixFSShardingSizeThreshold "\"1B\""
ipfs config --json Import.UnixFSHAMTDirectorySizeThreshold "\"1B\""
'
SHARDED="QmSCJD1KYLhVVHqBK3YyXuoEqHt7vggyJhzoFYbT8v1XYL"