mirror of
https://github.com/ipfs/kubo.git
synced 2025-07-01 02:30:39 +08:00
Merge pull request #4026 from ipfs/kevina/files-raw-leaves
Add full support for CidV1 in Files API and Dag Modifier
This commit is contained in:
@ -18,8 +18,10 @@ import (
|
|||||||
ft "github.com/ipfs/go-ipfs/unixfs"
|
ft "github.com/ipfs/go-ipfs/unixfs"
|
||||||
uio "github.com/ipfs/go-ipfs/unixfs/io"
|
uio "github.com/ipfs/go-ipfs/unixfs/io"
|
||||||
|
|
||||||
|
cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid"
|
||||||
node "gx/ipfs/QmPN7cwmpcc4DWXb4KTB9dNAJgjuPY69h3npsMfhRrQL9c/go-ipld-format"
|
node "gx/ipfs/QmPN7cwmpcc4DWXb4KTB9dNAJgjuPY69h3npsMfhRrQL9c/go-ipld-format"
|
||||||
logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log"
|
logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log"
|
||||||
|
mh "gx/ipfs/QmU9a9NV9RdPNwZQDYd5uKsm6N6LJLSvLbywDDYFbaaC6P/go-multihash"
|
||||||
)
|
)
|
||||||
|
|
||||||
var log = logging.Logger("cmds/files")
|
var log = logging.Logger("cmds/files")
|
||||||
@ -54,9 +56,13 @@ operations.
|
|||||||
"stat": FilesStatCmd,
|
"stat": FilesStatCmd,
|
||||||
"rm": FilesRmCmd,
|
"rm": FilesRmCmd,
|
||||||
"flush": FilesFlushCmd,
|
"flush": FilesFlushCmd,
|
||||||
|
"chcid": FilesChcidCmd,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var cidVersionOption = cmds.IntOption("cid-version", "cid-ver", "Cid version to use. (experimental)")
|
||||||
|
var hashOption = cmds.StringOption("hash", "Hash function to use. Will set Cid version to 1 if used. (experimental)")
|
||||||
|
|
||||||
var formatError = errors.New("Format was set by multiple options. Only one format option is allowed")
|
var formatError = errors.New("Format was set by multiple options. Only one format option is allowed")
|
||||||
|
|
||||||
var FilesStatCmd = &cmds.Command{
|
var FilesStatCmd = &cmds.Command{
|
||||||
@ -162,38 +168,46 @@ func statNode(ds dag.DAGService, fsn mfs.FSNode) (*Object, error) {
|
|||||||
|
|
||||||
c := nd.Cid()
|
c := nd.Cid()
|
||||||
|
|
||||||
pbnd, ok := nd.(*dag.ProtoNode)
|
|
||||||
if !ok {
|
|
||||||
return nil, dag.ErrNotProtobuf
|
|
||||||
}
|
|
||||||
|
|
||||||
d, err := ft.FromBytes(pbnd.Data())
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
cumulsize, err := nd.Size()
|
cumulsize, err := nd.Size()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var ndtype string
|
switch n := nd.(type) {
|
||||||
switch fsn.Type() {
|
case *dag.ProtoNode:
|
||||||
case mfs.TDir:
|
d, err := ft.FromBytes(n.Data())
|
||||||
ndtype = "directory"
|
if err != nil {
|
||||||
case mfs.TFile:
|
return nil, err
|
||||||
ndtype = "file"
|
}
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("Unrecognized node type: %s", fsn.Type())
|
|
||||||
}
|
|
||||||
|
|
||||||
return &Object{
|
var ndtype string
|
||||||
Hash: c.String(),
|
switch fsn.Type() {
|
||||||
Blocks: len(nd.Links()),
|
case mfs.TDir:
|
||||||
Size: d.GetFilesize(),
|
ndtype = "directory"
|
||||||
CumulativeSize: cumulsize,
|
case mfs.TFile:
|
||||||
Type: ndtype,
|
ndtype = "file"
|
||||||
}, nil
|
default:
|
||||||
|
return nil, fmt.Errorf("unrecognized node type: %s", fsn.Type())
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Object{
|
||||||
|
Hash: c.String(),
|
||||||
|
Blocks: len(nd.Links()),
|
||||||
|
Size: d.GetFilesize(),
|
||||||
|
CumulativeSize: cumulsize,
|
||||||
|
Type: ndtype,
|
||||||
|
}, nil
|
||||||
|
case *dag.RawNode:
|
||||||
|
return &Object{
|
||||||
|
Hash: c.String(),
|
||||||
|
Blocks: 0,
|
||||||
|
Size: cumulsize,
|
||||||
|
CumulativeSize: cumulsize,
|
||||||
|
Type: "file",
|
||||||
|
}, nil
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("not unixfs node (proto or raw)")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var FilesCpCmd = &cmds.Command{
|
var FilesCpCmd = &cmds.Command{
|
||||||
@ -562,6 +576,13 @@ a beginning offset to write to. The entire length of the input will be written.
|
|||||||
If the '--create' option is specified, the file will be created if it does not
|
If the '--create' option is specified, the file will be created if it does not
|
||||||
exist. Nonexistant intermediate directories will not be created.
|
exist. Nonexistant intermediate directories will not be created.
|
||||||
|
|
||||||
|
Newly created files will have the same CID version and hash function of the
|
||||||
|
parent directory unless the --cid-version and --hash options are used.
|
||||||
|
|
||||||
|
Newly created leaves will be in the legacy format (Protobuf) if the
|
||||||
|
CID version is 0, or raw is the CID version is non-zero. Use of the
|
||||||
|
--raw-leaves option will override this behavior.
|
||||||
|
|
||||||
If the '--flush' option is set to false, changes will not be propogated to the
|
If the '--flush' option is set to false, changes will not be propogated to the
|
||||||
merkledag root. This can make operations much faster when doing a large number
|
merkledag root. This can make operations much faster when doing a large number
|
||||||
of writes to a deeper directory structure.
|
of writes to a deeper directory structure.
|
||||||
@ -587,6 +608,9 @@ stat' on the file or any of its ancestors.
|
|||||||
cmds.BoolOption("create", "e", "Create the file if it does not exist."),
|
cmds.BoolOption("create", "e", "Create the file if it does not exist."),
|
||||||
cmds.BoolOption("truncate", "t", "Truncate the file to size zero before writing."),
|
cmds.BoolOption("truncate", "t", "Truncate the file to size zero before writing."),
|
||||||
cmds.IntOption("count", "n", "Maximum number of bytes to read."),
|
cmds.IntOption("count", "n", "Maximum number of bytes to read."),
|
||||||
|
cmds.BoolOption("raw-leaves", "Use raw blocks for newly created leaf nodes. (experimental)"),
|
||||||
|
cidVersionOption,
|
||||||
|
hashOption,
|
||||||
},
|
},
|
||||||
Run: func(req cmds.Request, res cmds.Response) {
|
Run: func(req cmds.Request, res cmds.Response) {
|
||||||
path, err := checkPath(req.Arguments()[0])
|
path, err := checkPath(req.Arguments()[0])
|
||||||
@ -598,6 +622,13 @@ stat' on the file or any of its ancestors.
|
|||||||
create, _, _ := req.Option("create").Bool()
|
create, _, _ := req.Option("create").Bool()
|
||||||
trunc, _, _ := req.Option("truncate").Bool()
|
trunc, _, _ := req.Option("truncate").Bool()
|
||||||
flush, _, _ := req.Option("flush").Bool()
|
flush, _, _ := req.Option("flush").Bool()
|
||||||
|
rawLeaves, rawLeavesDef, _ := req.Option("raw-leaves").Bool()
|
||||||
|
|
||||||
|
prefix, err := getPrefix(req)
|
||||||
|
if err != nil {
|
||||||
|
res.SetError(err, cmds.ErrNormal)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
nd, err := req.InvocContext().GetNode()
|
nd, err := req.InvocContext().GetNode()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -615,11 +646,14 @@ stat' on the file or any of its ancestors.
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
fi, err := getFileHandle(nd.FilesRoot, path, create)
|
fi, err := getFileHandle(nd.FilesRoot, path, create, prefix)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
res.SetError(err, cmds.ErrNormal)
|
res.SetError(err, cmds.ErrNormal)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
if rawLeavesDef {
|
||||||
|
fi.RawLeaves = rawLeaves
|
||||||
|
}
|
||||||
|
|
||||||
wfd, err := fi.Open(mfs.OpenWriteOnly, flush)
|
wfd, err := fi.Open(mfs.OpenWriteOnly, flush)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -685,6 +719,9 @@ var FilesMkdirCmd = &cmds.Command{
|
|||||||
ShortDescription: `
|
ShortDescription: `
|
||||||
Create the directory if it does not already exist.
|
Create the directory if it does not already exist.
|
||||||
|
|
||||||
|
The directory will have the same CID version and hash function of the
|
||||||
|
parent directory unless the --cid-version and --hash options are used.
|
||||||
|
|
||||||
NOTE: All paths must be absolute.
|
NOTE: All paths must be absolute.
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
@ -699,6 +736,8 @@ Examples:
|
|||||||
},
|
},
|
||||||
Options: []cmds.Option{
|
Options: []cmds.Option{
|
||||||
cmds.BoolOption("parents", "p", "No error if existing, make parent directories as needed."),
|
cmds.BoolOption("parents", "p", "No error if existing, make parent directories as needed."),
|
||||||
|
cidVersionOption,
|
||||||
|
hashOption,
|
||||||
},
|
},
|
||||||
Run: func(req cmds.Request, res cmds.Response) {
|
Run: func(req cmds.Request, res cmds.Response) {
|
||||||
n, err := req.InvocContext().GetNode()
|
n, err := req.InvocContext().GetNode()
|
||||||
@ -716,7 +755,18 @@ Examples:
|
|||||||
|
|
||||||
flush, _, _ := req.Option("flush").Bool()
|
flush, _, _ := req.Option("flush").Bool()
|
||||||
|
|
||||||
err = mfs.Mkdir(n.FilesRoot, dirtomake, dashp, flush)
|
prefix, err := getPrefix(req)
|
||||||
|
if err != nil {
|
||||||
|
res.SetError(err, cmds.ErrNormal)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
root := n.FilesRoot
|
||||||
|
|
||||||
|
err = mfs.Mkdir(root, dirtomake, mfs.MkdirOpts{
|
||||||
|
Mkparents: dashp,
|
||||||
|
Flush: flush,
|
||||||
|
Prefix: prefix,
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
res.SetError(err, cmds.ErrNormal)
|
res.SetError(err, cmds.ErrNormal)
|
||||||
return
|
return
|
||||||
@ -756,6 +806,72 @@ are run with the '--flush=false'.
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var FilesChcidCmd = &cmds.Command{
|
||||||
|
Helptext: cmds.HelpText{
|
||||||
|
Tagline: "Change the cid version or hash function of the root node of a given path.",
|
||||||
|
ShortDescription: `
|
||||||
|
Change the cid version or hash function of the root node of a given path.
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
Arguments: []cmds.Argument{
|
||||||
|
cmds.StringArg("path", false, false, "Path to change. Default: '/'."),
|
||||||
|
},
|
||||||
|
Options: []cmds.Option{
|
||||||
|
cidVersionOption,
|
||||||
|
hashOption,
|
||||||
|
},
|
||||||
|
Run: func(req cmds.Request, res cmds.Response) {
|
||||||
|
nd, err := req.InvocContext().GetNode()
|
||||||
|
if err != nil {
|
||||||
|
res.SetError(err, cmds.ErrNormal)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
path := "/"
|
||||||
|
if len(req.Arguments()) > 0 {
|
||||||
|
path = req.Arguments()[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
flush, _, _ := req.Option("flush").Bool()
|
||||||
|
|
||||||
|
prefix, err := getPrefix(req)
|
||||||
|
if err != nil {
|
||||||
|
res.SetError(err, cmds.ErrNormal)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
err = updatePath(nd.FilesRoot, path, prefix, flush)
|
||||||
|
if err != nil {
|
||||||
|
res.SetError(err, cmds.ErrNormal)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func updatePath(rt *mfs.Root, pth string, prefix *cid.Prefix, flush bool) error {
|
||||||
|
if prefix == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
nd, err := mfs.Lookup(rt, pth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch n := nd.(type) {
|
||||||
|
case *mfs.Directory:
|
||||||
|
n.SetPrefix(prefix)
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("can only update directories")
|
||||||
|
}
|
||||||
|
|
||||||
|
if flush {
|
||||||
|
nd.Flush()
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
var FilesRmCmd = &cmds.Command{
|
var FilesRmCmd = &cmds.Command{
|
||||||
Helptext: cmds.HelpText{
|
Helptext: cmds.HelpText{
|
||||||
Tagline: "Remove a file.",
|
Tagline: "Remove a file.",
|
||||||
@ -860,8 +976,36 @@ Remove files or directories.
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
func getFileHandle(r *mfs.Root, path string, create bool) (*mfs.File, error) {
|
func getPrefix(req cmds.Request) (*cid.Prefix, error) {
|
||||||
|
cidVer, cidVerSet, _ := req.Option("cid-version").Int()
|
||||||
|
hashFunStr, hashFunSet, _ := req.Option("hash").String()
|
||||||
|
|
||||||
|
if !cidVerSet && !hashFunSet {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if hashFunSet && cidVer == 0 {
|
||||||
|
cidVer = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
prefix, err := dag.PrefixForCidVersion(cidVer)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if hashFunSet {
|
||||||
|
hashFunCode, ok := mh.Names[strings.ToLower(hashFunStr)]
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("unrecognized hash function: %s", strings.ToLower(hashFunStr))
|
||||||
|
}
|
||||||
|
prefix.MhType = hashFunCode
|
||||||
|
prefix.MhLength = -1
|
||||||
|
}
|
||||||
|
|
||||||
|
return &prefix, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getFileHandle(r *mfs.Root, path string, create bool, prefix *cid.Prefix) (*mfs.File, error) {
|
||||||
target, err := mfs.Lookup(r, path)
|
target, err := mfs.Lookup(r, path)
|
||||||
switch err {
|
switch err {
|
||||||
case nil:
|
case nil:
|
||||||
@ -887,8 +1031,12 @@ func getFileHandle(r *mfs.Root, path string, create bool) (*mfs.File, error) {
|
|||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("%s was not a directory", dirname)
|
return nil, fmt.Errorf("%s was not a directory", dirname)
|
||||||
}
|
}
|
||||||
|
if prefix == nil {
|
||||||
|
prefix = pdir.GetPrefix()
|
||||||
|
}
|
||||||
|
|
||||||
nd := dag.NodeWithData(ft.FilePBData(nil, 0))
|
nd := dag.NodeWithData(ft.FilePBData(nil, 0))
|
||||||
|
nd.SetPrefix(prefix)
|
||||||
err = pdir.AddChild(fname, nd)
|
err = pdir.AddChild(fname, nd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -119,7 +119,6 @@ func (adder *Adder) mfsRoot() (*mfs.Root, error) {
|
|||||||
rnode := unixfs.EmptyDirNode()
|
rnode := unixfs.EmptyDirNode()
|
||||||
rnode.SetPrefix(adder.Prefix)
|
rnode.SetPrefix(adder.Prefix)
|
||||||
mr, err := mfs.NewRoot(adder.ctx, adder.dagService, rnode, nil)
|
mr, err := mfs.NewRoot(adder.ctx, adder.dagService, rnode, nil)
|
||||||
mr.Prefix = adder.Prefix
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -398,7 +397,12 @@ func (adder *Adder) addNode(node node.Node, path string) error {
|
|||||||
}
|
}
|
||||||
dir := gopath.Dir(path)
|
dir := gopath.Dir(path)
|
||||||
if dir != "." {
|
if dir != "." {
|
||||||
if err := mfs.Mkdir(mr, dir, true, false); err != nil {
|
opts := mfs.MkdirOpts{
|
||||||
|
Mkparents: true,
|
||||||
|
Flush: false,
|
||||||
|
Prefix: adder.Prefix,
|
||||||
|
}
|
||||||
|
if err := mfs.Mkdir(mr, dir, opts); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -496,7 +500,11 @@ func (adder *Adder) addDir(dir files.File) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
err = mfs.Mkdir(mr, dir.FileName(), true, false)
|
err = mfs.Mkdir(mr, dir.FileName(), mfs.MkdirOpts{
|
||||||
|
Mkparents: true,
|
||||||
|
Flush: false,
|
||||||
|
Prefix: adder.Prefix,
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -120,8 +120,8 @@ func (db *DagBuilderHelper) NewUnixfsNode() *UnixfsNode {
|
|||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewUnixfsBlock creates a new Unixfs node to represent a raw data block
|
// newUnixfsBlock creates a new Unixfs node to represent a raw data block
|
||||||
func (db *DagBuilderHelper) NewUnixfsBlock() *UnixfsNode {
|
func (db *DagBuilderHelper) newUnixfsBlock() *UnixfsNode {
|
||||||
n := &UnixfsNode{
|
n := &UnixfsNode{
|
||||||
node: new(dag.ProtoNode),
|
node: new(dag.ProtoNode),
|
||||||
ufmt: &ft.FSNode{Type: ft.TRaw},
|
ufmt: &ft.FSNode{Type: ft.TRaw},
|
||||||
@ -181,7 +181,7 @@ func (db *DagBuilderHelper) GetNextDataNode() (*UnixfsNode, error) {
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
blk := db.NewUnixfsBlock()
|
blk := db.newUnixfsBlock()
|
||||||
blk.SetData(data)
|
blk.SetData(data)
|
||||||
return blk, nil
|
return blk, nil
|
||||||
}
|
}
|
||||||
|
@ -19,10 +19,23 @@ import (
|
|||||||
u "gx/ipfs/QmSU6eubNdhXjFBJBSksTp8kv8YRub8mGAPv8tVJHmL2EU/go-ipfs-util"
|
u "gx/ipfs/QmSU6eubNdhXjFBJBSksTp8kv8YRub8mGAPv8tVJHmL2EU/go-ipfs-util"
|
||||||
)
|
)
|
||||||
|
|
||||||
func buildTestDag(ds merkledag.DAGService, spl chunk.Splitter) (*merkledag.ProtoNode, error) {
|
type UseRawLeaves bool
|
||||||
|
|
||||||
|
const (
|
||||||
|
ProtoBufLeaves UseRawLeaves = false
|
||||||
|
RawLeaves UseRawLeaves = true
|
||||||
|
)
|
||||||
|
|
||||||
|
func runBothSubtests(t *testing.T, tfunc func(*testing.T, UseRawLeaves)) {
|
||||||
|
t.Run("leaves=ProtoBuf", func(t *testing.T) { tfunc(t, ProtoBufLeaves) })
|
||||||
|
t.Run("leaves=Raw", func(t *testing.T) { tfunc(t, RawLeaves) })
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildTestDag(ds merkledag.DAGService, spl chunk.Splitter, rawLeaves UseRawLeaves) (*merkledag.ProtoNode, error) {
|
||||||
dbp := h.DagBuilderParams{
|
dbp := h.DagBuilderParams{
|
||||||
Dagserv: ds,
|
Dagserv: ds,
|
||||||
Maxlinks: h.DefaultLinksPerBlock,
|
Maxlinks: h.DefaultLinksPerBlock,
|
||||||
|
RawLeaves: bool(rawLeaves),
|
||||||
}
|
}
|
||||||
|
|
||||||
nd, err := TrickleLayout(dbp.New(spl))
|
nd, err := TrickleLayout(dbp.New(spl))
|
||||||
@ -35,22 +48,31 @@ func buildTestDag(ds merkledag.DAGService, spl chunk.Splitter) (*merkledag.Proto
|
|||||||
return nil, merkledag.ErrNotProtobuf
|
return nil, merkledag.ErrNotProtobuf
|
||||||
}
|
}
|
||||||
|
|
||||||
return pbnd, VerifyTrickleDagStructure(pbnd, ds, dbp.Maxlinks, layerRepeat)
|
return pbnd, VerifyTrickleDagStructure(pbnd, VerifyParams{
|
||||||
|
Getter: ds,
|
||||||
|
Direct: dbp.Maxlinks,
|
||||||
|
LayerRepeat: layerRepeat,
|
||||||
|
RawLeaves: bool(rawLeaves),
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
//Test where calls to read are smaller than the chunk size
|
//Test where calls to read are smaller than the chunk size
|
||||||
func TestSizeBasedSplit(t *testing.T) {
|
func TestSizeBasedSplit(t *testing.T) {
|
||||||
|
runBothSubtests(t, testSizeBasedSplit)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testSizeBasedSplit(t *testing.T, rawLeaves UseRawLeaves) {
|
||||||
if testing.Short() {
|
if testing.Short() {
|
||||||
t.SkipNow()
|
t.SkipNow()
|
||||||
}
|
}
|
||||||
bs := chunk.SizeSplitterGen(512)
|
bs := chunk.SizeSplitterGen(512)
|
||||||
testFileConsistency(t, bs, 32*512)
|
testFileConsistency(t, bs, 32*512, rawLeaves)
|
||||||
|
|
||||||
bs = chunk.SizeSplitterGen(4096)
|
bs = chunk.SizeSplitterGen(4096)
|
||||||
testFileConsistency(t, bs, 32*4096)
|
testFileConsistency(t, bs, 32*4096, rawLeaves)
|
||||||
|
|
||||||
// Uneven offset
|
// Uneven offset
|
||||||
testFileConsistency(t, bs, 31*4095)
|
testFileConsistency(t, bs, 31*4095, rawLeaves)
|
||||||
}
|
}
|
||||||
|
|
||||||
func dup(b []byte) []byte {
|
func dup(b []byte) []byte {
|
||||||
@ -59,13 +81,13 @@ func dup(b []byte) []byte {
|
|||||||
return o
|
return o
|
||||||
}
|
}
|
||||||
|
|
||||||
func testFileConsistency(t *testing.T, bs chunk.SplitterGen, nbytes int) {
|
func testFileConsistency(t *testing.T, bs chunk.SplitterGen, nbytes int, rawLeaves UseRawLeaves) {
|
||||||
should := make([]byte, nbytes)
|
should := make([]byte, nbytes)
|
||||||
u.NewTimeSeededRand().Read(should)
|
u.NewTimeSeededRand().Read(should)
|
||||||
|
|
||||||
read := bytes.NewReader(should)
|
read := bytes.NewReader(should)
|
||||||
ds := mdtest.Mock()
|
ds := mdtest.Mock()
|
||||||
nd, err := buildTestDag(ds, bs(read))
|
nd, err := buildTestDag(ds, bs(read), rawLeaves)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -87,12 +109,16 @@ func testFileConsistency(t *testing.T, bs chunk.SplitterGen, nbytes int) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestBuilderConsistency(t *testing.T) {
|
func TestBuilderConsistency(t *testing.T) {
|
||||||
|
runBothSubtests(t, testBuilderConsistency)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testBuilderConsistency(t *testing.T, rawLeaves UseRawLeaves) {
|
||||||
nbytes := 100000
|
nbytes := 100000
|
||||||
buf := new(bytes.Buffer)
|
buf := new(bytes.Buffer)
|
||||||
io.CopyN(buf, u.NewTimeSeededRand(), int64(nbytes))
|
io.CopyN(buf, u.NewTimeSeededRand(), int64(nbytes))
|
||||||
should := dup(buf.Bytes())
|
should := dup(buf.Bytes())
|
||||||
dagserv := mdtest.Mock()
|
dagserv := mdtest.Mock()
|
||||||
nd, err := buildTestDag(dagserv, chunk.DefaultSplitter(buf))
|
nd, err := buildTestDag(dagserv, chunk.DefaultSplitter(buf), rawLeaves)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -125,6 +151,10 @@ func arrComp(a, b []byte) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestIndirectBlocks(t *testing.T) {
|
func TestIndirectBlocks(t *testing.T) {
|
||||||
|
runBothSubtests(t, testIndirectBlocks)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testIndirectBlocks(t *testing.T, rawLeaves UseRawLeaves) {
|
||||||
splitter := chunk.SizeSplitterGen(512)
|
splitter := chunk.SizeSplitterGen(512)
|
||||||
nbytes := 1024 * 1024
|
nbytes := 1024 * 1024
|
||||||
buf := make([]byte, nbytes)
|
buf := make([]byte, nbytes)
|
||||||
@ -133,7 +163,7 @@ func TestIndirectBlocks(t *testing.T) {
|
|||||||
read := bytes.NewReader(buf)
|
read := bytes.NewReader(buf)
|
||||||
|
|
||||||
ds := mdtest.Mock()
|
ds := mdtest.Mock()
|
||||||
dag, err := buildTestDag(ds, splitter(read))
|
dag, err := buildTestDag(ds, splitter(read), rawLeaves)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -154,13 +184,17 @@ func TestIndirectBlocks(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSeekingBasic(t *testing.T) {
|
func TestSeekingBasic(t *testing.T) {
|
||||||
|
runBothSubtests(t, testSeekingBasic)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testSeekingBasic(t *testing.T, rawLeaves UseRawLeaves) {
|
||||||
nbytes := int64(10 * 1024)
|
nbytes := int64(10 * 1024)
|
||||||
should := make([]byte, nbytes)
|
should := make([]byte, nbytes)
|
||||||
u.NewTimeSeededRand().Read(should)
|
u.NewTimeSeededRand().Read(should)
|
||||||
|
|
||||||
read := bytes.NewReader(should)
|
read := bytes.NewReader(should)
|
||||||
ds := mdtest.Mock()
|
ds := mdtest.Mock()
|
||||||
nd, err := buildTestDag(ds, chunk.NewSizeSplitter(read, 512))
|
nd, err := buildTestDag(ds, chunk.NewSizeSplitter(read, 512), rawLeaves)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -191,13 +225,17 @@ func TestSeekingBasic(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSeekToBegin(t *testing.T) {
|
func TestSeekToBegin(t *testing.T) {
|
||||||
|
runBothSubtests(t, testSeekToBegin)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testSeekToBegin(t *testing.T, rawLeaves UseRawLeaves) {
|
||||||
nbytes := int64(10 * 1024)
|
nbytes := int64(10 * 1024)
|
||||||
should := make([]byte, nbytes)
|
should := make([]byte, nbytes)
|
||||||
u.NewTimeSeededRand().Read(should)
|
u.NewTimeSeededRand().Read(should)
|
||||||
|
|
||||||
read := bytes.NewReader(should)
|
read := bytes.NewReader(should)
|
||||||
ds := mdtest.Mock()
|
ds := mdtest.Mock()
|
||||||
nd, err := buildTestDag(ds, chunk.NewSizeSplitter(read, 500))
|
nd, err := buildTestDag(ds, chunk.NewSizeSplitter(read, 500), rawLeaves)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -235,13 +273,17 @@ func TestSeekToBegin(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSeekToAlmostBegin(t *testing.T) {
|
func TestSeekToAlmostBegin(t *testing.T) {
|
||||||
|
runBothSubtests(t, testSeekToAlmostBegin)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testSeekToAlmostBegin(t *testing.T, rawLeaves UseRawLeaves) {
|
||||||
nbytes := int64(10 * 1024)
|
nbytes := int64(10 * 1024)
|
||||||
should := make([]byte, nbytes)
|
should := make([]byte, nbytes)
|
||||||
u.NewTimeSeededRand().Read(should)
|
u.NewTimeSeededRand().Read(should)
|
||||||
|
|
||||||
read := bytes.NewReader(should)
|
read := bytes.NewReader(should)
|
||||||
ds := mdtest.Mock()
|
ds := mdtest.Mock()
|
||||||
nd, err := buildTestDag(ds, chunk.NewSizeSplitter(read, 500))
|
nd, err := buildTestDag(ds, chunk.NewSizeSplitter(read, 500), rawLeaves)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -279,13 +321,17 @@ func TestSeekToAlmostBegin(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSeekEnd(t *testing.T) {
|
func TestSeekEnd(t *testing.T) {
|
||||||
|
runBothSubtests(t, testSeekEnd)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testSeekEnd(t *testing.T, rawLeaves UseRawLeaves) {
|
||||||
nbytes := int64(50 * 1024)
|
nbytes := int64(50 * 1024)
|
||||||
should := make([]byte, nbytes)
|
should := make([]byte, nbytes)
|
||||||
u.NewTimeSeededRand().Read(should)
|
u.NewTimeSeededRand().Read(should)
|
||||||
|
|
||||||
read := bytes.NewReader(should)
|
read := bytes.NewReader(should)
|
||||||
ds := mdtest.Mock()
|
ds := mdtest.Mock()
|
||||||
nd, err := buildTestDag(ds, chunk.NewSizeSplitter(read, 500))
|
nd, err := buildTestDag(ds, chunk.NewSizeSplitter(read, 500), rawLeaves)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -305,13 +351,17 @@ func TestSeekEnd(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSeekEndSingleBlockFile(t *testing.T) {
|
func TestSeekEndSingleBlockFile(t *testing.T) {
|
||||||
|
runBothSubtests(t, testSeekEndSingleBlockFile)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testSeekEndSingleBlockFile(t *testing.T, rawLeaves UseRawLeaves) {
|
||||||
nbytes := int64(100)
|
nbytes := int64(100)
|
||||||
should := make([]byte, nbytes)
|
should := make([]byte, nbytes)
|
||||||
u.NewTimeSeededRand().Read(should)
|
u.NewTimeSeededRand().Read(should)
|
||||||
|
|
||||||
read := bytes.NewReader(should)
|
read := bytes.NewReader(should)
|
||||||
ds := mdtest.Mock()
|
ds := mdtest.Mock()
|
||||||
nd, err := buildTestDag(ds, chunk.NewSizeSplitter(read, 5000))
|
nd, err := buildTestDag(ds, chunk.NewSizeSplitter(read, 5000), rawLeaves)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -331,13 +381,17 @@ func TestSeekEndSingleBlockFile(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSeekingStress(t *testing.T) {
|
func TestSeekingStress(t *testing.T) {
|
||||||
|
runBothSubtests(t, testSeekingStress)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testSeekingStress(t *testing.T, rawLeaves UseRawLeaves) {
|
||||||
nbytes := int64(1024 * 1024)
|
nbytes := int64(1024 * 1024)
|
||||||
should := make([]byte, nbytes)
|
should := make([]byte, nbytes)
|
||||||
u.NewTimeSeededRand().Read(should)
|
u.NewTimeSeededRand().Read(should)
|
||||||
|
|
||||||
read := bytes.NewReader(should)
|
read := bytes.NewReader(should)
|
||||||
ds := mdtest.Mock()
|
ds := mdtest.Mock()
|
||||||
nd, err := buildTestDag(ds, chunk.NewSizeSplitter(read, 1000))
|
nd, err := buildTestDag(ds, chunk.NewSizeSplitter(read, 1000), rawLeaves)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -376,13 +430,17 @@ func TestSeekingStress(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSeekingConsistency(t *testing.T) {
|
func TestSeekingConsistency(t *testing.T) {
|
||||||
|
runBothSubtests(t, testSeekingConsistency)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testSeekingConsistency(t *testing.T, rawLeaves UseRawLeaves) {
|
||||||
nbytes := int64(128 * 1024)
|
nbytes := int64(128 * 1024)
|
||||||
should := make([]byte, nbytes)
|
should := make([]byte, nbytes)
|
||||||
u.NewTimeSeededRand().Read(should)
|
u.NewTimeSeededRand().Read(should)
|
||||||
|
|
||||||
read := bytes.NewReader(should)
|
read := bytes.NewReader(should)
|
||||||
ds := mdtest.Mock()
|
ds := mdtest.Mock()
|
||||||
nd, err := buildTestDag(ds, chunk.NewSizeSplitter(read, 500))
|
nd, err := buildTestDag(ds, chunk.NewSizeSplitter(read, 500), rawLeaves)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -419,6 +477,10 @@ func TestSeekingConsistency(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestAppend(t *testing.T) {
|
func TestAppend(t *testing.T) {
|
||||||
|
runBothSubtests(t, testAppend)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAppend(t *testing.T, rawLeaves UseRawLeaves) {
|
||||||
nbytes := int64(128 * 1024)
|
nbytes := int64(128 * 1024)
|
||||||
should := make([]byte, nbytes)
|
should := make([]byte, nbytes)
|
||||||
u.NewTimeSeededRand().Read(should)
|
u.NewTimeSeededRand().Read(should)
|
||||||
@ -426,14 +488,15 @@ func TestAppend(t *testing.T) {
|
|||||||
// Reader for half the bytes
|
// Reader for half the bytes
|
||||||
read := bytes.NewReader(should[:nbytes/2])
|
read := bytes.NewReader(should[:nbytes/2])
|
||||||
ds := mdtest.Mock()
|
ds := mdtest.Mock()
|
||||||
nd, err := buildTestDag(ds, chunk.NewSizeSplitter(read, 500))
|
nd, err := buildTestDag(ds, chunk.NewSizeSplitter(read, 500), rawLeaves)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
dbp := &h.DagBuilderParams{
|
dbp := &h.DagBuilderParams{
|
||||||
Dagserv: ds,
|
Dagserv: ds,
|
||||||
Maxlinks: h.DefaultLinksPerBlock,
|
Maxlinks: h.DefaultLinksPerBlock,
|
||||||
|
RawLeaves: bool(rawLeaves),
|
||||||
}
|
}
|
||||||
|
|
||||||
r := bytes.NewReader(should[nbytes/2:])
|
r := bytes.NewReader(should[nbytes/2:])
|
||||||
@ -444,7 +507,12 @@ func TestAppend(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = VerifyTrickleDagStructure(nnode, ds, dbp.Maxlinks, layerRepeat)
|
err = VerifyTrickleDagStructure(nnode, VerifyParams{
|
||||||
|
Getter: ds,
|
||||||
|
Direct: dbp.Maxlinks,
|
||||||
|
LayerRepeat: layerRepeat,
|
||||||
|
RawLeaves: bool(rawLeaves),
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -467,6 +535,10 @@ func TestAppend(t *testing.T) {
|
|||||||
|
|
||||||
// This test appends one byte at a time to an empty file
|
// This test appends one byte at a time to an empty file
|
||||||
func TestMultipleAppends(t *testing.T) {
|
func TestMultipleAppends(t *testing.T) {
|
||||||
|
runBothSubtests(t, testMultipleAppends)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testMultipleAppends(t *testing.T, rawLeaves UseRawLeaves) {
|
||||||
ds := mdtest.Mock()
|
ds := mdtest.Mock()
|
||||||
|
|
||||||
// TODO: fix small size appends and make this number bigger
|
// TODO: fix small size appends and make this number bigger
|
||||||
@ -475,14 +547,15 @@ func TestMultipleAppends(t *testing.T) {
|
|||||||
u.NewTimeSeededRand().Read(should)
|
u.NewTimeSeededRand().Read(should)
|
||||||
|
|
||||||
read := bytes.NewReader(nil)
|
read := bytes.NewReader(nil)
|
||||||
nd, err := buildTestDag(ds, chunk.NewSizeSplitter(read, 500))
|
nd, err := buildTestDag(ds, chunk.NewSizeSplitter(read, 500), rawLeaves)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
dbp := &h.DagBuilderParams{
|
dbp := &h.DagBuilderParams{
|
||||||
Dagserv: ds,
|
Dagserv: ds,
|
||||||
Maxlinks: 4,
|
Maxlinks: 4,
|
||||||
|
RawLeaves: bool(rawLeaves),
|
||||||
}
|
}
|
||||||
|
|
||||||
spl := chunk.SizeSplitterGen(500)
|
spl := chunk.SizeSplitterGen(500)
|
||||||
@ -495,7 +568,12 @@ func TestMultipleAppends(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = VerifyTrickleDagStructure(nnode, ds, dbp.Maxlinks, layerRepeat)
|
err = VerifyTrickleDagStructure(nnode, VerifyParams{
|
||||||
|
Getter: ds,
|
||||||
|
Direct: dbp.Maxlinks,
|
||||||
|
LayerRepeat: layerRepeat,
|
||||||
|
RawLeaves: bool(rawLeaves),
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -9,6 +9,7 @@ import (
|
|||||||
dag "github.com/ipfs/go-ipfs/merkledag"
|
dag "github.com/ipfs/go-ipfs/merkledag"
|
||||||
ft "github.com/ipfs/go-ipfs/unixfs"
|
ft "github.com/ipfs/go-ipfs/unixfs"
|
||||||
|
|
||||||
|
cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid"
|
||||||
node "gx/ipfs/QmPN7cwmpcc4DWXb4KTB9dNAJgjuPY69h3npsMfhRrQL9c/go-ipld-format"
|
node "gx/ipfs/QmPN7cwmpcc4DWXb4KTB9dNAJgjuPY69h3npsMfhRrQL9c/go-ipld-format"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -234,36 +235,78 @@ func trickleDepthInfo(node *h.UnixfsNode, maxlinks int) (int, int) {
|
|||||||
return ((n - maxlinks) / layerRepeat) + 1, (n - maxlinks) % layerRepeat
|
return ((n - maxlinks) / layerRepeat) + 1, (n - maxlinks) % layerRepeat
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// VerifyParams is used by VerifyTrickleDagStructure
|
||||||
|
type VerifyParams struct {
|
||||||
|
Getter node.NodeGetter
|
||||||
|
Direct int
|
||||||
|
LayerRepeat int
|
||||||
|
Prefix *cid.Prefix
|
||||||
|
RawLeaves bool
|
||||||
|
}
|
||||||
|
|
||||||
// VerifyTrickleDagStructure checks that the given dag matches exactly the trickle dag datastructure
|
// VerifyTrickleDagStructure checks that the given dag matches exactly the trickle dag datastructure
|
||||||
// layout
|
// layout
|
||||||
func VerifyTrickleDagStructure(nd node.Node, ds dag.DAGService, direct int, layerRepeat int) error {
|
func VerifyTrickleDagStructure(nd node.Node, p VerifyParams) error {
|
||||||
pbnd, ok := nd.(*dag.ProtoNode)
|
return verifyTDagRec(nd, -1, p)
|
||||||
if !ok {
|
|
||||||
return dag.ErrNotProtobuf
|
|
||||||
}
|
|
||||||
|
|
||||||
return verifyTDagRec(pbnd, -1, direct, layerRepeat, ds)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Recursive call for verifying the structure of a trickledag
|
// Recursive call for verifying the structure of a trickledag
|
||||||
func verifyTDagRec(nd *dag.ProtoNode, depth, direct, layerRepeat int, ds dag.DAGService) error {
|
func verifyTDagRec(n node.Node, depth int, p VerifyParams) error {
|
||||||
|
codec := cid.DagProtobuf
|
||||||
if depth == 0 {
|
if depth == 0 {
|
||||||
// zero depth dag is raw data block
|
if len(n.Links()) > 0 {
|
||||||
if len(nd.Links()) > 0 {
|
|
||||||
return errors.New("expected direct block")
|
return errors.New("expected direct block")
|
||||||
}
|
}
|
||||||
|
// zero depth dag is raw data block
|
||||||
|
switch nd := n.(type) {
|
||||||
|
case *dag.ProtoNode:
|
||||||
|
pbn, err := ft.FromBytes(nd.Data())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
pbn, err := ft.FromBytes(nd.Data())
|
if pbn.GetType() != ft.TRaw {
|
||||||
if err != nil {
|
return errors.New("Expected raw block")
|
||||||
return err
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if pbn.GetType() != ft.TRaw {
|
if p.RawLeaves {
|
||||||
return errors.New("Expected raw block")
|
return errors.New("expected raw leaf, got a protobuf node")
|
||||||
|
}
|
||||||
|
case *dag.RawNode:
|
||||||
|
if !p.RawLeaves {
|
||||||
|
return errors.New("expected protobuf node as leaf")
|
||||||
|
}
|
||||||
|
codec = cid.Raw
|
||||||
|
default:
|
||||||
|
return errors.New("expected ProtoNode or RawNode")
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// verify prefix
|
||||||
|
if p.Prefix != nil {
|
||||||
|
prefix := n.Cid().Prefix()
|
||||||
|
expect := *p.Prefix // make a copy
|
||||||
|
expect.Codec = uint64(codec)
|
||||||
|
if codec == cid.Raw && expect.Version == 0 {
|
||||||
|
expect.Version = 1
|
||||||
|
}
|
||||||
|
if expect.MhLength == -1 {
|
||||||
|
expect.MhLength = prefix.MhLength
|
||||||
|
}
|
||||||
|
if prefix != expect {
|
||||||
|
return fmt.Errorf("unexpected cid prefix: expected: %v; got %v", expect, prefix)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if depth == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
nd, ok := n.(*dag.ProtoNode)
|
||||||
|
if !ok {
|
||||||
|
return errors.New("expected ProtoNode")
|
||||||
|
}
|
||||||
|
|
||||||
// Verify this is a branch node
|
// Verify this is a branch node
|
||||||
pbn, err := ft.FromBytes(nd.Data())
|
pbn, err := ft.FromBytes(nd.Data())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -279,29 +322,24 @@ func verifyTDagRec(nd *dag.ProtoNode, depth, direct, layerRepeat int, ds dag.DAG
|
|||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < len(nd.Links()); i++ {
|
for i := 0; i < len(nd.Links()); i++ {
|
||||||
childi, err := nd.Links()[i].GetNode(context.TODO(), ds)
|
child, err := nd.Links()[i].GetNode(context.TODO(), p.Getter)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
childpb, ok := childi.(*dag.ProtoNode)
|
if i < p.Direct {
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("cannot operate on non-protobuf nodes")
|
|
||||||
}
|
|
||||||
|
|
||||||
if i < direct {
|
|
||||||
// Direct blocks
|
// Direct blocks
|
||||||
err := verifyTDagRec(childpb, 0, direct, layerRepeat, ds)
|
err := verifyTDagRec(child, 0, p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Recursive trickle dags
|
// Recursive trickle dags
|
||||||
rdepth := ((i - direct) / layerRepeat) + 1
|
rdepth := ((i - p.Direct) / p.LayerRepeat) + 1
|
||||||
if rdepth >= depth && depth > 0 {
|
if rdepth >= depth && depth > 0 {
|
||||||
return errors.New("Child dag was too deep!")
|
return errors.New("Child dag was too deep!")
|
||||||
}
|
}
|
||||||
err := verifyTDagRec(childpb, rdepth, direct, layerRepeat, ds)
|
err := verifyTDagRec(child, rdepth, p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -42,6 +42,12 @@ var v1CidPrefix = cid.Prefix{
|
|||||||
Version: 1,
|
Version: 1,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// V0CidPrefix returns a prefix for CIDv0
|
||||||
|
func V0CidPrefix() cid.Prefix { return v0CidPrefix }
|
||||||
|
|
||||||
|
// V1CidPrefix returns a prefix for CIDv1 with the default settings
|
||||||
|
func V1CidPrefix() cid.Prefix { return v1CidPrefix }
|
||||||
|
|
||||||
// PrefixForCidVersion returns the Protobuf prefix for a given CID version
|
// PrefixForCidVersion returns the Protobuf prefix for a given CID version
|
||||||
func PrefixForCidVersion(version int) (cid.Prefix, error) {
|
func PrefixForCidVersion(version int) (cid.Prefix, error) {
|
||||||
switch version {
|
switch version {
|
||||||
|
@ -58,6 +58,11 @@ func NewDirectory(ctx context.Context, name string, node node.Node, parent child
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetPrefix gets the CID prefix of the root node
|
||||||
|
func (d *Directory) GetPrefix() *cid.Prefix {
|
||||||
|
return d.dirbuilder.GetPrefix()
|
||||||
|
}
|
||||||
|
|
||||||
// SetPrefix sets the CID prefix
|
// SetPrefix sets the CID prefix
|
||||||
func (d *Directory) SetPrefix(prefix *cid.Prefix) {
|
func (d *Directory) SetPrefix(prefix *cid.Prefix) {
|
||||||
d.dirbuilder.SetPrefix(prefix)
|
d.dirbuilder.SetPrefix(prefix)
|
||||||
@ -299,6 +304,7 @@ func (d *Directory) Mkdir(name string) (*Directory, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
ndir := ft.EmptyDirNode()
|
ndir := ft.EmptyDirNode()
|
||||||
|
ndir.SetPrefix(d.GetPrefix())
|
||||||
|
|
||||||
_, err = d.dserv.Add(ndir)
|
_, err = d.dserv.Add(ndir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
14
mfs/file.go
14
mfs/file.go
@ -23,16 +23,23 @@ type File struct {
|
|||||||
dserv dag.DAGService
|
dserv dag.DAGService
|
||||||
node node.Node
|
node node.Node
|
||||||
nodelk sync.Mutex
|
nodelk sync.Mutex
|
||||||
|
|
||||||
|
RawLeaves bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFile returns a NewFile object with the given parameters
|
// NewFile returns a NewFile object with the given parameters. If the
|
||||||
|
// Cid version is non-zero RawLeaves will be enabled.
|
||||||
func NewFile(name string, node node.Node, parent childCloser, dserv dag.DAGService) (*File, error) {
|
func NewFile(name string, node node.Node, parent childCloser, dserv dag.DAGService) (*File, error) {
|
||||||
return &File{
|
fi := &File{
|
||||||
dserv: dserv,
|
dserv: dserv,
|
||||||
parent: parent,
|
parent: parent,
|
||||||
name: name,
|
name: name,
|
||||||
node: node,
|
node: node,
|
||||||
}, nil
|
}
|
||||||
|
if node.Cid().Prefix().Version > 0 {
|
||||||
|
fi.RawLeaves = true
|
||||||
|
}
|
||||||
|
return fi, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -79,6 +86,7 @@ func (fi *File) Open(flags int, sync bool) (FileDescriptor, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
dmod.RawLeaves = fi.RawLeaves
|
||||||
|
|
||||||
return &fileDescriptor{
|
return &fileDescriptor{
|
||||||
inode: fi,
|
inode: fi,
|
||||||
|
@ -735,7 +735,7 @@ func TestMfsHugeDir(t *testing.T) {
|
|||||||
_, rt := setupRoot(ctx, t)
|
_, rt := setupRoot(ctx, t)
|
||||||
|
|
||||||
for i := 0; i < 10000; i++ {
|
for i := 0; i < 10000; i++ {
|
||||||
err := Mkdir(rt, fmt.Sprintf("/dir%d", i), false, false)
|
err := Mkdir(rt, fmt.Sprintf("/dir%d", i), MkdirOpts{Mkparents: false, Flush: false})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -747,7 +747,7 @@ func TestMkdirP(t *testing.T) {
|
|||||||
defer cancel()
|
defer cancel()
|
||||||
_, rt := setupRoot(ctx, t)
|
_, rt := setupRoot(ctx, t)
|
||||||
|
|
||||||
err := Mkdir(rt, "/a/b/c/d/e/f", true, true)
|
err := Mkdir(rt, "/a/b/c/d/e/f", MkdirOpts{Mkparents: true, Flush: true})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
26
mfs/ops.go
26
mfs/ops.go
@ -9,6 +9,7 @@ import (
|
|||||||
|
|
||||||
path "github.com/ipfs/go-ipfs/path"
|
path "github.com/ipfs/go-ipfs/path"
|
||||||
|
|
||||||
|
cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid"
|
||||||
node "gx/ipfs/QmPN7cwmpcc4DWXb4KTB9dNAJgjuPY69h3npsMfhRrQL9c/go-ipld-format"
|
node "gx/ipfs/QmPN7cwmpcc4DWXb4KTB9dNAJgjuPY69h3npsMfhRrQL9c/go-ipld-format"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -97,9 +98,16 @@ func PutNode(r *Root, path string, nd node.Node) error {
|
|||||||
return pdir.AddChild(filename, nd)
|
return pdir.AddChild(filename, nd)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MkdirOpts is used by Mkdir
|
||||||
|
type MkdirOpts struct {
|
||||||
|
Mkparents bool
|
||||||
|
Flush bool
|
||||||
|
Prefix *cid.Prefix
|
||||||
|
}
|
||||||
|
|
||||||
// Mkdir creates a directory at 'path' under the directory 'd', creating
|
// Mkdir creates a directory at 'path' under the directory 'd', creating
|
||||||
// intermediary directories as needed if 'mkparents' is set to true
|
// intermediary directories as needed if 'mkparents' is set to true
|
||||||
func Mkdir(r *Root, pth string, mkparents bool, flush bool) error {
|
func Mkdir(r *Root, pth string, opts MkdirOpts) error {
|
||||||
if pth == "" {
|
if pth == "" {
|
||||||
return fmt.Errorf("no path given to Mkdir")
|
return fmt.Errorf("no path given to Mkdir")
|
||||||
}
|
}
|
||||||
@ -115,7 +123,7 @@ func Mkdir(r *Root, pth string, mkparents bool, flush bool) error {
|
|||||||
|
|
||||||
if len(parts) == 0 {
|
if len(parts) == 0 {
|
||||||
// this will only happen on 'mkdir /'
|
// this will only happen on 'mkdir /'
|
||||||
if mkparents {
|
if opts.Mkparents {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return fmt.Errorf("cannot create directory '/': Already exists")
|
return fmt.Errorf("cannot create directory '/': Already exists")
|
||||||
@ -124,12 +132,14 @@ func Mkdir(r *Root, pth string, mkparents bool, flush bool) error {
|
|||||||
cur := r.GetValue().(*Directory)
|
cur := r.GetValue().(*Directory)
|
||||||
for i, d := range parts[:len(parts)-1] {
|
for i, d := range parts[:len(parts)-1] {
|
||||||
fsn, err := cur.Child(d)
|
fsn, err := cur.Child(d)
|
||||||
if err == os.ErrNotExist && mkparents {
|
if err == os.ErrNotExist && opts.Mkparents {
|
||||||
mkd, err := cur.Mkdir(d)
|
mkd, err := cur.Mkdir(d)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
mkd.SetPrefix(r.Prefix)
|
if opts.Prefix != nil {
|
||||||
|
mkd.SetPrefix(opts.Prefix)
|
||||||
|
}
|
||||||
fsn = mkd
|
fsn = mkd
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -144,13 +154,15 @@ func Mkdir(r *Root, pth string, mkparents bool, flush bool) error {
|
|||||||
|
|
||||||
final, err := cur.Mkdir(parts[len(parts)-1])
|
final, err := cur.Mkdir(parts[len(parts)-1])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !mkparents || err != os.ErrExist || final == nil {
|
if !opts.Mkparents || err != os.ErrExist || final == nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
final.SetPrefix(r.Prefix)
|
if opts.Prefix != nil {
|
||||||
|
final.SetPrefix(opts.Prefix)
|
||||||
|
}
|
||||||
|
|
||||||
if flush {
|
if opts.Flush {
|
||||||
err := final.Flush()
|
err := final.Flush()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -61,9 +61,6 @@ type Root struct {
|
|||||||
dserv dag.DAGService
|
dserv dag.DAGService
|
||||||
|
|
||||||
Type string
|
Type string
|
||||||
|
|
||||||
// Prefix to use for any children created
|
|
||||||
Prefix *cid.Prefix
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type PubFunc func(context.Context, *cid.Cid) error
|
type PubFunc func(context.Context, *cid.Cid) error
|
||||||
|
@ -46,11 +46,15 @@ verify_dir_contents() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
test_sharding() {
|
test_sharding() {
|
||||||
test_expect_success "make a directory" '
|
local EXTRA ARGS
|
||||||
ipfs files mkdir /foo
|
EXTRA=$1
|
||||||
|
ARGS=$2 # only applied to the initial directory
|
||||||
|
|
||||||
|
test_expect_success "make a directory $EXTRA" '
|
||||||
|
ipfs files mkdir $ARGS /foo
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "can make 100 files in a directory" '
|
test_expect_success "can make 100 files in a directory $EXTRA" '
|
||||||
printf "" > list_exp_raw
|
printf "" > list_exp_raw
|
||||||
for i in `seq 100`
|
for i in `seq 100`
|
||||||
do
|
do
|
||||||
@ -59,144 +63,164 @@ test_sharding() {
|
|||||||
done
|
done
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "listing works" '
|
test_expect_success "listing works $EXTRA" '
|
||||||
ipfs files ls /foo |sort > list_out &&
|
ipfs files ls /foo |sort > list_out &&
|
||||||
sort list_exp_raw > list_exp &&
|
sort list_exp_raw > list_exp &&
|
||||||
test_cmp list_exp list_out
|
test_cmp list_exp list_out
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "can read a file from sharded directory" '
|
test_expect_success "can read a file from sharded directory $EXTRA" '
|
||||||
ipfs files read /foo/file65 > file_out &&
|
ipfs files read /foo/file65 > file_out &&
|
||||||
echo "65" > file_exp &&
|
echo "65" > file_exp &&
|
||||||
test_cmp file_out file_exp
|
test_cmp file_out file_exp
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "can pin a file from sharded directory" '
|
test_expect_success "can pin a file from sharded directory $EXTRA" '
|
||||||
ipfs files stat --hash /foo/file42 > pin_file_hash &&
|
ipfs files stat --hash /foo/file42 > pin_file_hash &&
|
||||||
ipfs pin add < pin_file_hash > pin_hash
|
ipfs pin add < pin_file_hash > pin_hash
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "can unpin a file from sharded directory" '
|
test_expect_success "can unpin a file from sharded directory $EXTRA" '
|
||||||
read -r _ HASH _ < pin_hash &&
|
read -r _ HASH _ < pin_hash &&
|
||||||
ipfs pin rm $HASH
|
ipfs pin rm $HASH
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "output object was really sharded" '
|
test_expect_success "output object was really sharded and has correct hash $EXTRA" '
|
||||||
ipfs files stat --hash /foo > expected_foo_hash &&
|
ipfs files stat --hash /foo > expected_foo_hash &&
|
||||||
echo QmPkwLJTYZRGPJ8Lazr9qPdrLmswPtUjaDbEpmR9jEh1se > actual_foo_hash &&
|
echo $SHARD_HASH > actual_foo_hash &&
|
||||||
test_cmp expected_foo_hash actual_foo_hash
|
test_cmp expected_foo_hash actual_foo_hash
|
||||||
'
|
'
|
||||||
|
|
||||||
|
test_expect_success "clean up $EXTRA" '
|
||||||
|
ipfs files rm -r /foo
|
||||||
|
'
|
||||||
}
|
}
|
||||||
|
|
||||||
test_files_api() {
|
test_files_api() {
|
||||||
ROOT_HASH=$1
|
local EXTRA ARGS RAW_LEAVES
|
||||||
|
EXTRA=$1
|
||||||
|
ARGS=$2
|
||||||
|
RAW_LEAVES=$3
|
||||||
|
|
||||||
test_expect_success "can mkdir in root" '
|
test_expect_success "can mkdir in root $EXTRA" '
|
||||||
ipfs files mkdir /cats
|
ipfs files mkdir $ARGS /cats
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "'files ls' lists root by default" '
|
test_expect_success "'files ls' lists root by default $EXTRA" '
|
||||||
ipfs files ls >actual &&
|
ipfs files ls >actual &&
|
||||||
echo "cats" >expected &&
|
echo "cats" >expected &&
|
||||||
test_cmp expected actual
|
test_cmp expected actual
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "directory was created" '
|
test_expect_success "directory was created $EXTRA" '
|
||||||
verify_path_exists /cats
|
verify_path_exists /cats
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "directory is empty" '
|
test_expect_success "directory is empty $EXTRA" '
|
||||||
verify_dir_contents /cats
|
verify_dir_contents /cats
|
||||||
'
|
'
|
||||||
# we do verification of stat formatting now as we depend on it
|
# we do verification of stat formatting now as we depend on it
|
||||||
|
|
||||||
test_expect_success "stat works" '
|
test_expect_success "stat works $EXTRA" '
|
||||||
ipfs files stat / >stat
|
ipfs files stat / >stat
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "hash is first line of stat" '
|
test_expect_success "hash is first line of stat $EXTRA" '
|
||||||
ipfs ls $(head -1 stat) | grep "cats"
|
ipfs ls $(head -1 stat) | grep "cats"
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "stat --hash gives only hash" '
|
test_expect_success "stat --hash gives only hash $EXTRA" '
|
||||||
ipfs files stat --hash / >actual &&
|
ipfs files stat --hash / >actual &&
|
||||||
head -n1 stat >expected &&
|
head -n1 stat >expected &&
|
||||||
test_cmp expected actual
|
test_cmp expected actual
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "stat with multiple format options should fail" '
|
test_expect_success "stat with multiple format options should fail $EXTRA" '
|
||||||
test_must_fail ipfs files stat --hash --size /
|
test_must_fail ipfs files stat --hash --size /
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "compare hash option with format" '
|
test_expect_success "compare hash option with format $EXTRA" '
|
||||||
ipfs files stat --hash / >expected &&
|
ipfs files stat --hash / >expected &&
|
||||||
ipfs files stat --format='"'"'<hash>'"'"' / >actual &&
|
ipfs files stat --format='"'"'<hash>'"'"' / >actual &&
|
||||||
test_cmp expected actual
|
test_cmp expected actual
|
||||||
'
|
'
|
||||||
test_expect_success "compare size option with format" '
|
test_expect_success "compare size option with format $EXTRA" '
|
||||||
ipfs files stat --size / >expected &&
|
ipfs files stat --size / >expected &&
|
||||||
ipfs files stat --format='"'"'<cumulsize>'"'"' / >actual &&
|
ipfs files stat --format='"'"'<cumulsize>'"'"' / >actual &&
|
||||||
test_cmp expected actual
|
test_cmp expected actual
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "check root hash" '
|
test_expect_success "check root hash $EXTRA" '
|
||||||
ipfs files stat --hash / > roothash
|
ipfs files stat --hash / > roothash
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "cannot mkdir /" '
|
test_expect_success "cannot mkdir / $EXTRA" '
|
||||||
test_expect_code 1 ipfs files mkdir /
|
test_expect_code 1 ipfs files mkdir $ARGS /
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "check root hash was not changed" '
|
test_expect_success "check root hash was not changed $EXTRA" '
|
||||||
ipfs files stat --hash / > roothashafter &&
|
ipfs files stat --hash / > roothashafter &&
|
||||||
test_cmp roothash roothashafter
|
test_cmp roothash roothashafter
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "can put files into directory" '
|
test_expect_success "can put files into directory $EXTRA" '
|
||||||
ipfs files cp /ipfs/$FILE1 /cats/file1
|
ipfs files cp /ipfs/$FILE1 /cats/file1
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "file shows up in directory" '
|
test_expect_success "file shows up in directory $EXTRA" '
|
||||||
verify_dir_contents /cats file1
|
verify_dir_contents /cats file1
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "file has correct hash and size in directory" '
|
test_expect_success "file has correct hash and size in directory $EXTRA" '
|
||||||
echo "file1 $FILE1 4" > ls_l_expected &&
|
echo "file1 $FILE1 4" > ls_l_expected &&
|
||||||
ipfs files ls -l /cats > ls_l_actual &&
|
ipfs files ls -l /cats > ls_l_actual &&
|
||||||
test_cmp ls_l_expected ls_l_actual
|
test_cmp ls_l_expected ls_l_actual
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "can read file" '
|
test_expect_success "can stat file $EXTRA" '
|
||||||
|
ipfs files stat /cats/file1 > file1stat_orig
|
||||||
|
'
|
||||||
|
|
||||||
|
test_expect_success "stat output looks good" '
|
||||||
|
grep -v CumulativeSize: file1stat_orig > file1stat_actual &&
|
||||||
|
echo "$FILE1" > file1stat_expect &&
|
||||||
|
echo "Size: 4" >> file1stat_expect &&
|
||||||
|
echo "ChildBlocks: 0" >> file1stat_expect &&
|
||||||
|
echo "Type: file" >> file1stat_expect &&
|
||||||
|
test_cmp file1stat_expect file1stat_actual
|
||||||
|
'
|
||||||
|
|
||||||
|
test_expect_success "can read file $EXTRA" '
|
||||||
ipfs files read /cats/file1 > file1out
|
ipfs files read /cats/file1 > file1out
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "output looks good" '
|
test_expect_success "output looks good $EXTRA" '
|
||||||
echo foo > expected &&
|
echo foo > expected &&
|
||||||
test_cmp expected file1out
|
test_cmp expected file1out
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "can put another file into root" '
|
test_expect_success "can put another file into root $EXTRA" '
|
||||||
ipfs files cp /ipfs/$FILE2 /file2
|
ipfs files cp /ipfs/$FILE2 /file2
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "file shows up in root" '
|
test_expect_success "file shows up in root $EXTRA" '
|
||||||
verify_dir_contents / file2 cats
|
verify_dir_contents / file2 cats
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "can read file" '
|
test_expect_success "can read file $EXTRA" '
|
||||||
ipfs files read /file2 > file2out
|
ipfs files read /file2 > file2out
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "output looks good" '
|
test_expect_success "output looks good $EXTRA" '
|
||||||
echo bar > expected &&
|
echo bar > expected &&
|
||||||
test_cmp expected file2out
|
test_cmp expected file2out
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "can make deep directory" '
|
test_expect_success "can make deep directory $EXTRA" '
|
||||||
ipfs files mkdir -p /cats/this/is/a/dir
|
ipfs files mkdir $ARGS -p /cats/this/is/a/dir
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "directory was created correctly" '
|
test_expect_success "directory was created correctly $EXTRA" '
|
||||||
verify_path_exists /cats/this/is/a/dir &&
|
verify_path_exists /cats/this/is/a/dir &&
|
||||||
verify_dir_contents /cats this file1 &&
|
verify_dir_contents /cats this file1 &&
|
||||||
verify_dir_contents /cats/this is &&
|
verify_dir_contents /cats/this is &&
|
||||||
@ -205,362 +229,435 @@ test_files_api() {
|
|||||||
verify_dir_contents /cats/this/is/a/dir
|
verify_dir_contents /cats/this/is/a/dir
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "can copy file into new dir" '
|
test_expect_success "can copy file into new dir $EXTRA" '
|
||||||
ipfs files cp /ipfs/$FILE3 /cats/this/is/a/dir/file3
|
ipfs files cp /ipfs/$FILE3 /cats/this/is/a/dir/file3
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "can read file" '
|
test_expect_success "can read file $EXTRA" '
|
||||||
ipfs files read /cats/this/is/a/dir/file3 > output
|
ipfs files read /cats/this/is/a/dir/file3 > output
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "output looks good" '
|
test_expect_success "output looks good $EXTRA" '
|
||||||
echo baz > expected &&
|
echo baz > expected &&
|
||||||
test_cmp expected output
|
test_cmp expected output
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "file shows up in dir" '
|
test_expect_success "file shows up in dir $EXTRA" '
|
||||||
verify_dir_contents /cats/this/is/a/dir file3
|
verify_dir_contents /cats/this/is/a/dir file3
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "can remove file" '
|
test_expect_success "can remove file $EXTRA" '
|
||||||
ipfs files rm /cats/this/is/a/dir/file3
|
ipfs files rm /cats/this/is/a/dir/file3
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "file no longer appears" '
|
test_expect_success "file no longer appears $EXTRA" '
|
||||||
verify_dir_contents /cats/this/is/a/dir
|
verify_dir_contents /cats/this/is/a/dir
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "can remove dir" '
|
test_expect_success "can remove dir $EXTRA" '
|
||||||
ipfs files rm -r /cats/this/is/a/dir
|
ipfs files rm -r /cats/this/is/a/dir
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "dir no longer appears" '
|
test_expect_success "dir no longer appears $EXTRA" '
|
||||||
verify_dir_contents /cats/this/is/a
|
verify_dir_contents /cats/this/is/a
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "can remove file from root" '
|
test_expect_success "can remove file from root $EXTRA" '
|
||||||
ipfs files rm /file2
|
ipfs files rm /file2
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "file no longer appears" '
|
test_expect_success "file no longer appears $EXTRA" '
|
||||||
verify_dir_contents / cats
|
verify_dir_contents / cats
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "check root hash" '
|
test_expect_success "check root hash $EXTRA" '
|
||||||
ipfs files stat --hash / > roothash
|
ipfs files stat --hash / > roothash
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "cannot remove root" '
|
test_expect_success "cannot remove root $EXTRA" '
|
||||||
test_expect_code 1 ipfs files rm -r /
|
test_expect_code 1 ipfs files rm -r /
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "check root hash was not changed" '
|
test_expect_success "check root hash was not changed $EXTRA" '
|
||||||
ipfs files stat --hash / > roothashafter &&
|
ipfs files stat --hash / > roothashafter &&
|
||||||
test_cmp roothash roothashafter
|
test_cmp roothash roothashafter
|
||||||
'
|
'
|
||||||
|
|
||||||
# test read options
|
# test read options
|
||||||
|
|
||||||
test_expect_success "read from offset works" '
|
test_expect_success "read from offset works $EXTRA" '
|
||||||
ipfs files read -o 1 /cats/file1 > output
|
ipfs files read -o 1 /cats/file1 > output
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "output looks good" '
|
test_expect_success "output looks good $EXTRA" '
|
||||||
echo oo > expected &&
|
echo oo > expected &&
|
||||||
test_cmp expected output
|
test_cmp expected output
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "read with size works" '
|
test_expect_success "read with size works $EXTRA" '
|
||||||
ipfs files read -n 2 /cats/file1 > output
|
ipfs files read -n 2 /cats/file1 > output
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "output looks good" '
|
test_expect_success "output looks good $EXTRA" '
|
||||||
printf fo > expected &&
|
printf fo > expected &&
|
||||||
test_cmp expected output
|
test_cmp expected output
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "cannot read from negative offset" '
|
test_expect_success "cannot read from negative offset $EXTRA" '
|
||||||
test_expect_code 1 ipfs files read --offset -3 /cats/file1
|
test_expect_code 1 ipfs files read --offset -3 /cats/file1
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "read from offset 0 works" '
|
test_expect_success "read from offset 0 works $EXTRA" '
|
||||||
ipfs files read --offset 0 /cats/file1 > output
|
ipfs files read --offset 0 /cats/file1 > output
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "output looks good" '
|
test_expect_success "output looks good $EXTRA" '
|
||||||
echo foo > expected &&
|
echo foo > expected &&
|
||||||
test_cmp expected output
|
test_cmp expected output
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "read last byte works" '
|
test_expect_success "read last byte works $EXTRA" '
|
||||||
ipfs files read --offset 2 /cats/file1 > output
|
ipfs files read --offset 2 /cats/file1 > output
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "output looks good" '
|
test_expect_success "output looks good $EXTRA" '
|
||||||
echo o > expected &&
|
echo o > expected &&
|
||||||
test_cmp expected output
|
test_cmp expected output
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "offset past end of file fails" '
|
test_expect_success "offset past end of file fails $EXTRA" '
|
||||||
test_expect_code 1 ipfs files read --offset 5 /cats/file1
|
test_expect_code 1 ipfs files read --offset 5 /cats/file1
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "cannot read negative count bytes" '
|
test_expect_success "cannot read negative count bytes $EXTRA" '
|
||||||
test_expect_code 1 ipfs read --count -1 /cats/file1
|
test_expect_code 1 ipfs read --count -1 /cats/file1
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "reading zero bytes prints nothing" '
|
test_expect_success "reading zero bytes prints nothing $EXTRA" '
|
||||||
ipfs files read --count 0 /cats/file1 > output
|
ipfs files read --count 0 /cats/file1 > output
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "output looks good" '
|
test_expect_success "output looks good $EXTRA" '
|
||||||
printf "" > expected &&
|
printf "" > expected &&
|
||||||
test_cmp expected output
|
test_cmp expected output
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "count > len(file) prints entire file" '
|
test_expect_success "count > len(file) prints entire file $EXTRA" '
|
||||||
ipfs files read --count 200 /cats/file1 > output
|
ipfs files read --count 200 /cats/file1 > output
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "output looks good" '
|
test_expect_success "output looks good $EXTRA" '
|
||||||
echo foo > expected &&
|
echo foo > expected &&
|
||||||
test_cmp expected output
|
test_cmp expected output
|
||||||
'
|
'
|
||||||
|
|
||||||
# test write
|
# test write
|
||||||
|
|
||||||
test_expect_success "can write file" '
|
test_expect_success "can write file $EXTRA" '
|
||||||
echo "ipfs rocks" > tmpfile &&
|
echo "ipfs rocks" > tmpfile &&
|
||||||
cat tmpfile | ipfs files write --create /cats/ipfs
|
cat tmpfile | ipfs files write $ARGS $RAW_LEAVES --create /cats/ipfs
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "file was created" '
|
test_expect_success "file was created $EXTRA" '
|
||||||
verify_dir_contents /cats ipfs file1 this
|
verify_dir_contents /cats ipfs file1 this
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "can read file we just wrote" '
|
test_expect_success "can read file we just wrote $EXTRA" '
|
||||||
ipfs files read /cats/ipfs > output
|
ipfs files read /cats/ipfs > output
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "can write to offset" '
|
test_expect_success "can write to offset $EXTRA" '
|
||||||
echo "is super cool" | ipfs files write -o 5 /cats/ipfs
|
echo "is super cool" | ipfs files write $ARGS $RAW_LEAVES -o 5 /cats/ipfs
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "file looks correct" '
|
test_expect_success "file looks correct $EXTRA" '
|
||||||
echo "ipfs is super cool" > expected &&
|
echo "ipfs is super cool" > expected &&
|
||||||
ipfs files read /cats/ipfs > output &&
|
ipfs files read /cats/ipfs > output &&
|
||||||
test_cmp expected output
|
test_cmp expected output
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "cant write to negative offset" '
|
test_expect_success "file hash correct $EXTRA" '
|
||||||
|
echo $FILE_HASH > filehash_expected &&
|
||||||
ipfs files stat --hash /cats/ipfs > filehash &&
|
ipfs files stat --hash /cats/ipfs > filehash &&
|
||||||
test_expect_code 1 ipfs files write --offset -1 /cats/ipfs < output
|
test_cmp filehash_expected filehash
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "verify file was not changed" '
|
test_expect_success "cant write to negative offset $EXTRA" '
|
||||||
|
test_expect_code 1 ipfs files write $ARGS $RAW_LEAVES --offset -1 /cats/ipfs < output
|
||||||
|
'
|
||||||
|
|
||||||
|
test_expect_success "verify file was not changed $EXTRA" '
|
||||||
ipfs files stat --hash /cats/ipfs > afterhash &&
|
ipfs files stat --hash /cats/ipfs > afterhash &&
|
||||||
test_cmp filehash afterhash
|
test_cmp filehash afterhash
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "write new file for testing" '
|
test_expect_success "write new file for testing $EXTRA" '
|
||||||
echo foobar | ipfs files write --create /fun
|
echo foobar | ipfs files write $ARGS $RAW_LEAVES --create /fun
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "write to offset past end works" '
|
test_expect_success "write to offset past end works $EXTRA" '
|
||||||
echo blah | ipfs files write --offset 50 /fun
|
echo blah | ipfs files write $ARGS $RAW_LEAVES --offset 50 /fun
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "can read file" '
|
test_expect_success "can read file $EXTRA" '
|
||||||
ipfs files read /fun > sparse_output
|
ipfs files read /fun > sparse_output
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "output looks good" '
|
test_expect_success "output looks good $EXTRA" '
|
||||||
echo foobar > sparse_expected &&
|
echo foobar > sparse_expected &&
|
||||||
echo blah | dd of=sparse_expected bs=50 seek=1 &&
|
echo blah | dd of=sparse_expected bs=50 seek=1 &&
|
||||||
test_cmp sparse_expected sparse_output
|
test_cmp sparse_expected sparse_output
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "cleanup" '
|
test_expect_success "cleanup $EXTRA" '
|
||||||
ipfs files rm /fun
|
ipfs files rm /fun
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "cannot write to directory" '
|
test_expect_success "cannot write to directory $EXTRA" '
|
||||||
ipfs files stat --hash /cats > dirhash &&
|
ipfs files stat --hash /cats > dirhash &&
|
||||||
test_expect_code 1 ipfs files write /cats < output
|
test_expect_code 1 ipfs files write $ARGS $RAW_LEAVES /cats < output
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "verify dir was not changed" '
|
test_expect_success "verify dir was not changed $EXTRA" '
|
||||||
ipfs files stat --hash /cats > afterdirhash &&
|
ipfs files stat --hash /cats > afterdirhash &&
|
||||||
test_cmp dirhash afterdirhash
|
test_cmp dirhash afterdirhash
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "cannot write to nonexistant path" '
|
test_expect_success "cannot write to nonexistant path $EXTRA" '
|
||||||
test_expect_code 1 ipfs files write /cats/bar/ < output
|
test_expect_code 1 ipfs files write $ARGS $RAW_LEAVES /cats/bar/ < output
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "no new paths were created" '
|
test_expect_success "no new paths were created $EXTRA" '
|
||||||
verify_dir_contents /cats file1 ipfs this
|
verify_dir_contents /cats file1 ipfs this
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "write 'no-flush' succeeds" '
|
test_expect_success "write 'no-flush' succeeds $EXTRA" '
|
||||||
echo "testing" | ipfs files write -f=false -e /cats/walrus
|
echo "testing" | ipfs files write $ARGS $RAW_LEAVES -f=false -e /cats/walrus
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "root hash not bubbled up yet" '
|
test_expect_success "root hash not bubbled up yet $EXTRA" '
|
||||||
test -z "$ONLINE" ||
|
test -z "$ONLINE" ||
|
||||||
(ipfs refs local > refsout &&
|
(ipfs refs local > refsout &&
|
||||||
test_expect_code 1 grep $ROOT_HASH refsout)
|
test_expect_code 1 grep $ROOT_HASH refsout)
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "changes bubbled up to root on inspection" '
|
test_expect_success "changes bubbled up to root on inspection $EXTRA" '
|
||||||
ipfs files stat --hash / > root_hash
|
ipfs files stat --hash / > root_hash
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "root hash looks good" '
|
test_expect_success "root hash looks good $EXTRA" '
|
||||||
export EXP_ROOT_HASH="$ROOT_HASH" &&
|
export EXP_ROOT_HASH="$ROOT_HASH" &&
|
||||||
echo $EXP_ROOT_HASH > root_hash_exp &&
|
echo $EXP_ROOT_HASH > root_hash_exp &&
|
||||||
test_cmp root_hash_exp root_hash
|
test_cmp root_hash_exp root_hash
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "flush root succeeds" '
|
test_expect_success "/cats hash looks good $EXTRA" '
|
||||||
|
export EXP_CATS_HASH="$CATS_HASH" &&
|
||||||
|
echo $EXP_CATS_HASH > cats_hash_exp &&
|
||||||
|
ipfs files stat --hash /cats > cats_hash
|
||||||
|
test_cmp cats_hash_exp cats_hash
|
||||||
|
'
|
||||||
|
|
||||||
|
test_expect_success "flush root succeeds $EXTRA" '
|
||||||
ipfs files flush /
|
ipfs files flush /
|
||||||
'
|
'
|
||||||
|
|
||||||
# test mv
|
# test mv
|
||||||
test_expect_success "can mv dir" '
|
test_expect_success "can mv dir $EXTRA" '
|
||||||
ipfs files mv /cats/this/is /cats/
|
ipfs files mv /cats/this/is /cats/
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "mv worked" '
|
test_expect_success "mv worked $EXTRA" '
|
||||||
verify_dir_contents /cats file1 ipfs this is walrus &&
|
verify_dir_contents /cats file1 ipfs this is walrus &&
|
||||||
verify_dir_contents /cats/this
|
verify_dir_contents /cats/this
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "cleanup, remove 'cats'" '
|
test_expect_success "cleanup, remove 'cats' $EXTRA" '
|
||||||
ipfs files rm -r /cats
|
ipfs files rm -r /cats
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "cleanup looks good" '
|
test_expect_success "cleanup looks good $EXTRA" '
|
||||||
verify_dir_contents /
|
verify_dir_contents /
|
||||||
'
|
'
|
||||||
|
|
||||||
# test truncating
|
# test truncating
|
||||||
test_expect_success "create a new file" '
|
test_expect_success "create a new file $EXTRA" '
|
||||||
echo "some content" | ipfs files write --create /cats
|
echo "some content" | ipfs files write $ARGS $RAW_LEAVES --create /cats
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "truncate and write over that file" '
|
test_expect_success "truncate and write over that file $EXTRA" '
|
||||||
echo "fish" | ipfs files write --truncate /cats
|
echo "fish" | ipfs files write $ARGS $RAW_LEAVES --truncate /cats
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "output looks good" '
|
test_expect_success "output looks good $EXTRA" '
|
||||||
ipfs files read /cats > file_out &&
|
ipfs files read /cats > file_out &&
|
||||||
echo "fish" > file_exp &&
|
echo "fish" > file_exp &&
|
||||||
test_cmp file_out file_exp
|
test_cmp file_out file_exp
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "cleanup" '
|
test_expect_success "file hash correct $EXTRA" '
|
||||||
|
echo $TRUNC_HASH > filehash_expected &&
|
||||||
|
ipfs files stat --hash /cats > filehash &&
|
||||||
|
test_cmp filehash_expected filehash
|
||||||
|
'
|
||||||
|
|
||||||
|
test_expect_success "cleanup $EXTRA" '
|
||||||
ipfs files rm /cats
|
ipfs files rm /cats
|
||||||
'
|
'
|
||||||
|
|
||||||
# test flush flags
|
# test flush flags
|
||||||
test_expect_success "mkdir --flush works" '
|
test_expect_success "mkdir --flush works $EXTRA" '
|
||||||
ipfs files mkdir --flush --parents /flushed/deep
|
ipfs files mkdir $ARGS --flush --parents /flushed/deep
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "mkdir --flush works a second time" '
|
test_expect_success "mkdir --flush works a second time $EXTRA" '
|
||||||
ipfs files mkdir --flush --parents /flushed/deep
|
ipfs files mkdir $ARGS --flush --parents /flushed/deep
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "dir looks right" '
|
test_expect_success "dir looks right $EXTRA" '
|
||||||
verify_dir_contents / flushed
|
verify_dir_contents / flushed
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "child dir looks right" '
|
test_expect_success "child dir looks right $EXTRA" '
|
||||||
verify_dir_contents /flushed deep
|
verify_dir_contents /flushed deep
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "cleanup" '
|
test_expect_success "cleanup $EXTRA" '
|
||||||
ipfs files rm -r /flushed
|
ipfs files rm -r /flushed
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "child dir looks right" '
|
test_expect_success "child dir looks right $EXTRA" '
|
||||||
verify_dir_contents /
|
verify_dir_contents /
|
||||||
'
|
'
|
||||||
|
|
||||||
# test for https://github.com/ipfs/go-ipfs/issues/2654
|
# test for https://github.com/ipfs/go-ipfs/issues/2654
|
||||||
test_expect_success "create and remove dir" '
|
test_expect_success "create and remove dir $EXTRA" '
|
||||||
ipfs files mkdir /test_dir &&
|
ipfs files mkdir $ARGS /test_dir &&
|
||||||
ipfs files rm -r "/test_dir"
|
ipfs files rm -r "/test_dir"
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "create test file" '
|
test_expect_success "create test file $EXTRA" '
|
||||||
echo "content" | ipfs files write -e "/test_file"
|
echo "content" | ipfs files write $ARGS $RAW_LEAVES -e "/test_file"
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "copy test file onto test dir" '
|
test_expect_success "copy test file onto test dir $EXTRA" '
|
||||||
ipfs files cp "/test_file" "/test_dir"
|
ipfs files cp "/test_file" "/test_dir"
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "test /test_dir" '
|
test_expect_success "test /test_dir $EXTRA" '
|
||||||
ipfs files stat "/test_dir" | grep -q "^Type: file"
|
ipfs files stat "/test_dir" | grep -q "^Type: file"
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "clean up /test_dir and /test_file" '
|
test_expect_success "clean up /test_dir and /test_file $EXTRA" '
|
||||||
ipfs files rm -r /test_dir &&
|
ipfs files rm -r /test_dir &&
|
||||||
ipfs files rm -r /test_file
|
ipfs files rm -r /test_file
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "make a directory and a file" '
|
test_expect_success "make a directory and a file $EXTRA" '
|
||||||
ipfs files mkdir /adir &&
|
ipfs files mkdir $ARGS /adir &&
|
||||||
echo "blah" | ipfs files write --create /foobar
|
echo "blah" | ipfs files write $ARGS $RAW_LEAVES --create /foobar
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "copy a file into a directory" '
|
test_expect_success "copy a file into a directory $EXTRA" '
|
||||||
ipfs files cp /foobar /adir/
|
ipfs files cp /foobar /adir/
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "file made it into directory" '
|
test_expect_success "file made it into directory $EXTRA" '
|
||||||
ipfs files ls /adir | grep foobar
|
ipfs files ls /adir | grep foobar
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "clean up" '
|
test_expect_success "clean up $EXTRA" '
|
||||||
ipfs files rm -r /foobar &&
|
ipfs files rm -r /foobar &&
|
||||||
ipfs files rm -r /adir
|
ipfs files rm -r /adir
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "root mfs entry is empty" '
|
test_expect_success "root mfs entry is empty $EXTRA" '
|
||||||
verify_dir_contents /
|
verify_dir_contents /
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "repo gc" '
|
test_expect_success "repo gc $EXTRA" '
|
||||||
ipfs repo gc
|
ipfs repo gc
|
||||||
'
|
'
|
||||||
}
|
}
|
||||||
|
|
||||||
# test offline and online
|
# test offline and online
|
||||||
test_expect_success "can create some files for testing" '
|
|
||||||
create_files
|
|
||||||
'
|
|
||||||
test_files_api QmcwKfTMCT7AaeiD92hWjnZn9b6eh9NxnhfSzN5x2vnDpt
|
|
||||||
|
|
||||||
test_expect_success "can create some files for testing with raw-leaves" '
|
tests_for_files_api() {
|
||||||
create_files --raw-leaves
|
local EXTRA
|
||||||
'
|
EXTRA=$1
|
||||||
test_files_api QmTpKiKcAj4sbeesN6vrs5w3QeVmd4QmGpxRL81hHut4dZ
|
|
||||||
|
test_expect_success "can create some files for testing ($EXTRA)" '
|
||||||
|
create_files
|
||||||
|
'
|
||||||
|
ROOT_HASH=QmcwKfTMCT7AaeiD92hWjnZn9b6eh9NxnhfSzN5x2vnDpt
|
||||||
|
CATS_HASH=Qma88m8ErTGkZHbBWGqy1C7VmEmX8wwNDWNpGyCaNmEgwC
|
||||||
|
FILE_HASH=QmQdQt9qooenjeaNhiKHF3hBvmNteB4MQBtgu3jxgf9c7i
|
||||||
|
TRUNC_HASH=QmdaQZbLwK5ykweGdCVovNnvBom7QhikovDUVqTPHQG4L8
|
||||||
|
test_files_api "($EXTRA)"
|
||||||
|
|
||||||
|
test_expect_success "can create some files for testing with raw-leaves ($EXTRA)" '
|
||||||
|
create_files --raw-leaves
|
||||||
|
'
|
||||||
|
|
||||||
|
if [ "$EXTRA" = "offline" ]; then
|
||||||
|
ROOT_HASH=QmTpKiKcAj4sbeesN6vrs5w3QeVmd4QmGpxRL81hHut4dZ
|
||||||
|
CATS_HASH=QmPhPkmtUGGi8ySPHoPu1qbfryLJKKq1GYxpgLyyCruvGe
|
||||||
|
test_files_api "($EXTRA, partial raw-leaves)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
ROOT_HASH=QmW3dMSU6VNd1mEdpk9S3ZYRuR1YwwoXjGaZhkyK6ru9YU
|
||||||
|
CATS_HASH=QmPqWDEg7NoWRX8Y4vvYjZtmdg5umbfsTQ9zwNr12JoLmt
|
||||||
|
FILE_HASH=QmRCgHeoKxCqK2Es6M6nPUDVWz19yNQPnsXGsXeuTkSKpN
|
||||||
|
TRUNC_HASH=QmRFJEKWF5A5FyFYZgNhusLw2UziW9zBKYr4huyHjzcB6o
|
||||||
|
test_files_api "($EXTRA, raw-leaves)" '' --raw-leaves
|
||||||
|
|
||||||
|
ROOT_HASH=QmageRWxC7wWjPv5p36NeAgBAiFdBHaNfxAehBSwzNech2
|
||||||
|
CATS_HASH=zdj7WkEzPLNAr5TYJSQC8CFcBjLvWFfGdx6kaBrJXnBguwWeX
|
||||||
|
FILE_HASH=zdj7WYHvf5sBRgSBjYnq64QFr449CCbgupXfBvoYL3aHC1DzJ
|
||||||
|
TRUNC_HASH=zdj7WYLYbka6Ydg8gZUJRLKnFBVehCADhQKBsFbNiMxZSB5Gj
|
||||||
|
if [ "$EXTRA" = "offline" ]; then
|
||||||
|
test_files_api "($EXTRA, cidv1)" --cid-version=1
|
||||||
|
fi
|
||||||
|
|
||||||
|
test_expect_success "can update root hash to cidv1" '
|
||||||
|
ipfs files chcid --cid-version=1 / &&
|
||||||
|
echo zdj7WbTaiJT1fgatdet9Ei9iDB5hdCxkbVyhyh8YTUnXMiwYi > hash_expect &&
|
||||||
|
ipfs files stat --hash / > hash_actual &&
|
||||||
|
test_cmp hash_expect hash_actual
|
||||||
|
'
|
||||||
|
|
||||||
|
ROOT_HASH=zdj7Whmtnx23bR7c7E1Yn3zWYWjnvT4tpzWYGaBMyqcopDWrx
|
||||||
|
test_files_api "($EXTRA, cidv1 root)"
|
||||||
|
|
||||||
|
if [ "$EXTRA" = "offline" ]; then
|
||||||
|
test_expect_success "can update root hash to blake2b-256" '
|
||||||
|
ipfs files chcid --hash=blake2b-256 / &&
|
||||||
|
echo zDMZof1kvswQMT8txrmnb3JGBuna6qXCTry6hSifrkZEd6VmHbBm > hash_expect &&
|
||||||
|
ipfs files stat --hash / > hash_actual &&
|
||||||
|
test_cmp hash_expect hash_actual
|
||||||
|
'
|
||||||
|
ROOT_HASH=zDMZof1kxEsAwSgCZsGQRVcHCMtHLjkUQoiZUbZ87erpPQJGUeW8
|
||||||
|
CATS_HASH=zDMZof1kuAhr3zBkxq48V7o9HJZCTVyu1Wd9wnZtVcPJLW8xnGft
|
||||||
|
FILE_HASH=zDMZof1kxbB9CvxgRioBzESbGnZUxtSCsZ18H1EUkxDdWt1DYEkK
|
||||||
|
TRUNC_HASH=zDMZof1kxXqKdVsVo231qVdN3hCTF5a34UuQZpzmm5K7CbRJ4u2S
|
||||||
|
test_files_api "($EXTRA, blake2b-256 root)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
test_expect_success "can update root hash back to cidv0" '
|
||||||
|
ipfs files chcid / --cid-version=0 &&
|
||||||
|
echo QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn > hash_expect &&
|
||||||
|
ipfs files stat --hash / > hash_actual &&
|
||||||
|
test_cmp hash_expect hash_actual
|
||||||
|
'
|
||||||
|
}
|
||||||
|
|
||||||
|
tests_for_files_api "online"
|
||||||
|
|
||||||
test_launch_ipfs_daemon --offline
|
test_launch_ipfs_daemon --offline
|
||||||
|
|
||||||
ONLINE=1 # set online flag so tests can easily tell
|
ONLINE=1 # set online flag so tests can easily tell
|
||||||
test_expect_success "can create some files for testing" '
|
|
||||||
create_files
|
|
||||||
'
|
|
||||||
test_files_api QmcwKfTMCT7AaeiD92hWjnZn9b6eh9NxnhfSzN5x2vnDpt
|
|
||||||
|
|
||||||
test_expect_success "can create some files for testing with raw-leaves" '
|
tests_for_files_api "offline"
|
||||||
create_files --raw-leaves
|
|
||||||
'
|
|
||||||
test_files_api QmTpKiKcAj4sbeesN6vrs5w3QeVmd4QmGpxRL81hHut4dZ
|
|
||||||
|
|
||||||
test_kill_ipfs_daemon --offline
|
test_kill_ipfs_daemon --offline
|
||||||
|
|
||||||
@ -569,7 +666,13 @@ test_expect_success "enable sharding in config" '
|
|||||||
'
|
'
|
||||||
|
|
||||||
test_launch_ipfs_daemon --offline
|
test_launch_ipfs_daemon --offline
|
||||||
test_sharding
|
|
||||||
|
SHARD_HASH=QmPkwLJTYZRGPJ8Lazr9qPdrLmswPtUjaDbEpmR9jEh1se
|
||||||
|
test_sharding "(cidv0)"
|
||||||
|
|
||||||
|
SHARD_HASH=zdj7WZXr6vG2Ne7ZLHGEKrGyF3pHBfAViEnmH9CoyvjrFQM8E
|
||||||
|
test_sharding "(cidv1 root)" "--cid-version=1"
|
||||||
|
|
||||||
test_kill_ipfs_daemon
|
test_kill_ipfs_daemon
|
||||||
|
|
||||||
test_done
|
test_done
|
||||||
|
@ -75,8 +75,8 @@ test_add_large_dir_v1() {
|
|||||||
'
|
'
|
||||||
}
|
}
|
||||||
|
|
||||||
# this hash implies both the directory and the leaf entries are CIDv1
|
# this hash implies the directory is CIDv1 and leaf entries are CIDv1 and raw
|
||||||
SHARDEDV1="zdj7WX91spg4DsnNpvoBLjyjXUGgcTTWavygBbSifpmJdgPUA"
|
SHARDEDV1="zdj7WY8aNcxF49q1ZpFXfchNmbswnUxiVDVjmrHb53xRM8W4C"
|
||||||
test_add_large_dir_v1 "$SHARDEDV1"
|
test_add_large_dir_v1 "$SHARDEDV1"
|
||||||
|
|
||||||
test_launch_ipfs_daemon
|
test_launch_ipfs_daemon
|
||||||
|
@ -121,6 +121,7 @@ func NewHamtFromDag(dserv dag.DAGService, nd node.Node) (*HamtShard, error) {
|
|||||||
ds.children = make([]child, len(pbnd.Links()))
|
ds.children = make([]child, len(pbnd.Links()))
|
||||||
ds.bitfield = new(big.Int).SetBytes(pbd.GetData())
|
ds.bitfield = new(big.Int).SetBytes(pbd.GetData())
|
||||||
ds.hashFunc = pbd.GetHashType()
|
ds.hashFunc = pbd.GetHashType()
|
||||||
|
ds.prefix = &ds.nd.Prefix
|
||||||
|
|
||||||
return ds, nil
|
return ds, nil
|
||||||
}
|
}
|
||||||
@ -130,6 +131,11 @@ func (ds *HamtShard) SetPrefix(prefix *cid.Prefix) {
|
|||||||
ds.prefix = prefix
|
ds.prefix = prefix
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Prefix gets the CID Prefix, may be nil if unset
|
||||||
|
func (ds *HamtShard) Prefix() *cid.Prefix {
|
||||||
|
return ds.prefix
|
||||||
|
}
|
||||||
|
|
||||||
// Node serializes the HAMT structure into a merkledag node with unixfs formatting
|
// Node serializes the HAMT structure into a merkledag node with unixfs formatting
|
||||||
func (ds *HamtShard) Node() (node.Node, error) {
|
func (ds *HamtShard) Node() (node.Node, error) {
|
||||||
out := new(dag.ProtoNode)
|
out := new(dag.ProtoNode)
|
||||||
@ -500,6 +506,7 @@ func (ds *HamtShard) modifyValue(ctx context.Context, hv *hashBits, key string,
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
ns.prefix = ds.prefix
|
||||||
chhv := &hashBits{
|
chhv := &hashBits{
|
||||||
b: hash([]byte(child.key)),
|
b: hash([]byte(child.key)),
|
||||||
consumed: hv.consumed,
|
consumed: hv.consumed,
|
||||||
|
@ -17,7 +17,7 @@ import (
|
|||||||
|
|
||||||
func TestBasicRead(t *testing.T) {
|
func TestBasicRead(t *testing.T) {
|
||||||
dserv := testu.GetDAGServ()
|
dserv := testu.GetDAGServ()
|
||||||
inbuf, node := testu.GetRandomNode(t, dserv, 1024)
|
inbuf, node := testu.GetRandomNode(t, dserv, 1024, testu.UseProtoBufLeaves)
|
||||||
ctx, closer := context.WithCancel(context.Background())
|
ctx, closer := context.WithCancel(context.Background())
|
||||||
defer closer()
|
defer closer()
|
||||||
|
|
||||||
@ -44,7 +44,7 @@ func TestSeekAndRead(t *testing.T) {
|
|||||||
inbuf[i] = byte(i)
|
inbuf[i] = byte(i)
|
||||||
}
|
}
|
||||||
|
|
||||||
node := testu.GetNode(t, dserv, inbuf)
|
node := testu.GetNode(t, dserv, inbuf, testu.UseProtoBufLeaves)
|
||||||
ctx, closer := context.WithCancel(context.Background())
|
ctx, closer := context.WithCancel(context.Background())
|
||||||
defer closer()
|
defer closer()
|
||||||
|
|
||||||
@ -84,7 +84,7 @@ func TestRelativeSeek(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
inbuf[1023] = 1 // force the reader to be 1024 bytes
|
inbuf[1023] = 1 // force the reader to be 1024 bytes
|
||||||
node := testu.GetNode(t, dserv, inbuf)
|
node := testu.GetNode(t, dserv, inbuf, testu.UseProtoBufLeaves)
|
||||||
|
|
||||||
reader, err := NewDagReader(ctx, node, dserv)
|
reader, err := NewDagReader(ctx, node, dserv)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -160,7 +160,7 @@ func TestBadPBData(t *testing.T) {
|
|||||||
|
|
||||||
func TestMetadataNode(t *testing.T) {
|
func TestMetadataNode(t *testing.T) {
|
||||||
dserv := testu.GetDAGServ()
|
dserv := testu.GetDAGServ()
|
||||||
rdata, rnode := testu.GetRandomNode(t, dserv, 512)
|
rdata, rnode := testu.GetRandomNode(t, dserv, 512, testu.UseProtoBufLeaves)
|
||||||
_, err := dserv.Add(rnode)
|
_, err := dserv.Add(rnode)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@ -203,7 +203,7 @@ func TestMetadataNode(t *testing.T) {
|
|||||||
|
|
||||||
func TestWriteTo(t *testing.T) {
|
func TestWriteTo(t *testing.T) {
|
||||||
dserv := testu.GetDAGServ()
|
dserv := testu.GetDAGServ()
|
||||||
inbuf, node := testu.GetRandomNode(t, dserv, 1024)
|
inbuf, node := testu.GetRandomNode(t, dserv, 1024, testu.UseProtoBufLeaves)
|
||||||
ctx, closer := context.WithCancel(context.Background())
|
ctx, closer := context.WithCancel(context.Background())
|
||||||
defer closer()
|
defer closer()
|
||||||
|
|
||||||
@ -225,7 +225,7 @@ func TestWriteTo(t *testing.T) {
|
|||||||
func TestReaderSzie(t *testing.T) {
|
func TestReaderSzie(t *testing.T) {
|
||||||
dserv := testu.GetDAGServ()
|
dserv := testu.GetDAGServ()
|
||||||
size := int64(1024)
|
size := int64(1024)
|
||||||
_, node := testu.GetRandomNode(t, dserv, size)
|
_, node := testu.GetRandomNode(t, dserv, size, testu.UseProtoBufLeaves)
|
||||||
ctx, closer := context.WithCancel(context.Background())
|
ctx, closer := context.WithCancel(context.Background())
|
||||||
defer closer()
|
defer closer()
|
||||||
|
|
||||||
|
@ -115,6 +115,7 @@ func (d *Directory) switchToSharding(ctx context.Context) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
s.SetPrefix(&d.dirnode.Prefix)
|
||||||
|
|
||||||
d.shard = s
|
d.shard = s
|
||||||
for _, lnk := range d.dirnode.Links() {
|
for _, lnk := range d.dirnode.Links() {
|
||||||
@ -192,3 +193,12 @@ func (d *Directory) GetNode() (node.Node, error) {
|
|||||||
|
|
||||||
return d.shard.Node()
|
return d.shard.Node()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetPrefix returns the CID Prefix used
|
||||||
|
func (d *Directory) GetPrefix() *cid.Prefix {
|
||||||
|
if d.shard == nil {
|
||||||
|
return &d.dirnode.Prefix
|
||||||
|
}
|
||||||
|
|
||||||
|
return d.shard.Prefix()
|
||||||
|
}
|
||||||
|
@ -40,11 +40,18 @@ type DagModifier struct {
|
|||||||
curWrOff uint64
|
curWrOff uint64
|
||||||
wrBuf *bytes.Buffer
|
wrBuf *bytes.Buffer
|
||||||
|
|
||||||
|
Prefix cid.Prefix
|
||||||
|
RawLeaves bool
|
||||||
|
|
||||||
read uio.DagReader
|
read uio.DagReader
|
||||||
}
|
}
|
||||||
|
|
||||||
var ErrNotUnixfs = fmt.Errorf("dagmodifier only supports unixfs nodes (proto or raw)")
|
var ErrNotUnixfs = fmt.Errorf("dagmodifier only supports unixfs nodes (proto or raw)")
|
||||||
|
|
||||||
|
// NewDagModifier returns a new DagModifier, the Cid prefix for newly
|
||||||
|
// created nodes will be inherted from the passed in node. If the Cid
|
||||||
|
// version if not 0 raw leaves will also be enabled. The Prefix and
|
||||||
|
// RawLeaves options can be overridden by changing them after the call.
|
||||||
func NewDagModifier(ctx context.Context, from node.Node, serv mdag.DAGService, spl chunk.SplitterGen) (*DagModifier, error) {
|
func NewDagModifier(ctx context.Context, from node.Node, serv mdag.DAGService, spl chunk.SplitterGen) (*DagModifier, error) {
|
||||||
switch from.(type) {
|
switch from.(type) {
|
||||||
case *mdag.ProtoNode, *mdag.RawNode:
|
case *mdag.ProtoNode, *mdag.RawNode:
|
||||||
@ -53,11 +60,20 @@ func NewDagModifier(ctx context.Context, from node.Node, serv mdag.DAGService, s
|
|||||||
return nil, ErrNotUnixfs
|
return nil, ErrNotUnixfs
|
||||||
}
|
}
|
||||||
|
|
||||||
|
prefix := from.Cid().Prefix()
|
||||||
|
prefix.Codec = cid.DagProtobuf
|
||||||
|
rawLeaves := false
|
||||||
|
if prefix.Version > 0 {
|
||||||
|
rawLeaves = true
|
||||||
|
}
|
||||||
|
|
||||||
return &DagModifier{
|
return &DagModifier{
|
||||||
curNode: from.Copy(),
|
curNode: from.Copy(),
|
||||||
dagserv: serv,
|
dagserv: serv,
|
||||||
splitter: spl,
|
splitter: spl,
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
|
Prefix: prefix,
|
||||||
|
RawLeaves: rawLeaves,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -113,17 +129,7 @@ func (dm *DagModifier) expandSparse(size int64) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
_, err = dm.dagserv.Add(nnode)
|
_, err = dm.dagserv.Add(nnode)
|
||||||
if err != nil {
|
return err
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
pbnnode, ok := nnode.(*mdag.ProtoNode)
|
|
||||||
if !ok {
|
|
||||||
return mdag.ErrNotProtobuf
|
|
||||||
}
|
|
||||||
|
|
||||||
dm.curNode = pbnnode
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write continues writing to the dag at the current offset
|
// Write continues writing to the dag at the current offset
|
||||||
@ -149,26 +155,28 @@ func (dm *DagModifier) Write(b []byte) (int, error) {
|
|||||||
return n, nil
|
return n, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var ErrNoRawYet = fmt.Errorf("currently only fully support protonodes in the dagmodifier")
|
|
||||||
|
|
||||||
// Size returns the Filesize of the node
|
// Size returns the Filesize of the node
|
||||||
func (dm *DagModifier) Size() (int64, error) {
|
func (dm *DagModifier) Size() (int64, error) {
|
||||||
switch nd := dm.curNode.(type) {
|
fileSize, err := fileSize(dm.curNode)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
if dm.wrBuf != nil && int64(dm.wrBuf.Len())+int64(dm.writeStart) > int64(fileSize) {
|
||||||
|
return int64(dm.wrBuf.Len()) + int64(dm.writeStart), nil
|
||||||
|
}
|
||||||
|
return int64(fileSize), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func fileSize(n node.Node) (uint64, error) {
|
||||||
|
switch nd := n.(type) {
|
||||||
case *mdag.ProtoNode:
|
case *mdag.ProtoNode:
|
||||||
pbn, err := ft.FromBytes(nd.Data())
|
f, err := ft.FromBytes(nd.Data())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
if dm.wrBuf != nil && uint64(dm.wrBuf.Len())+dm.writeStart > pbn.GetFilesize() {
|
return f.GetFilesize(), nil
|
||||||
return int64(dm.wrBuf.Len()) + int64(dm.writeStart), nil
|
|
||||||
}
|
|
||||||
return int64(pbn.GetFilesize()), nil
|
|
||||||
case *mdag.RawNode:
|
case *mdag.RawNode:
|
||||||
if dm.wrBuf != nil {
|
return uint64(len(nd.RawData())), nil
|
||||||
return 0, ErrNoRawYet
|
|
||||||
}
|
|
||||||
sz, err := nd.Size()
|
|
||||||
return int64(sz), err
|
|
||||||
default:
|
default:
|
||||||
return 0, ErrNotUnixfs
|
return 0, ErrNotUnixfs
|
||||||
}
|
}
|
||||||
@ -196,36 +204,22 @@ func (dm *DagModifier) Sync() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
nd, err := dm.dagserv.Get(dm.ctx, thisc)
|
dm.curNode, err = dm.dagserv.Get(dm.ctx, thisc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
pbnd, ok := nd.(*mdag.ProtoNode)
|
|
||||||
if !ok {
|
|
||||||
return mdag.ErrNotProtobuf
|
|
||||||
}
|
|
||||||
|
|
||||||
dm.curNode = pbnd
|
|
||||||
|
|
||||||
// need to write past end of current dag
|
// need to write past end of current dag
|
||||||
if !done {
|
if !done {
|
||||||
nd, err := dm.appendData(dm.curNode, dm.splitter(dm.wrBuf))
|
dm.curNode, err = dm.appendData(dm.curNode, dm.splitter(dm.wrBuf))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = dm.dagserv.Add(nd)
|
_, err = dm.dagserv.Add(dm.curNode)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
pbnode, ok := nd.(*mdag.ProtoNode)
|
|
||||||
if !ok {
|
|
||||||
return mdag.ErrNotProtobuf
|
|
||||||
}
|
|
||||||
|
|
||||||
dm.curNode = pbnode
|
|
||||||
}
|
}
|
||||||
|
|
||||||
dm.writeStart += uint64(buflen)
|
dm.writeStart += uint64(buflen)
|
||||||
@ -238,9 +232,82 @@ func (dm *DagModifier) Sync() error {
|
|||||||
// returns the new key of the passed in node and whether or not all the data in the reader
|
// returns the new key of the passed in node and whether or not all the data in the reader
|
||||||
// has been consumed.
|
// has been consumed.
|
||||||
func (dm *DagModifier) modifyDag(n node.Node, offset uint64, data io.Reader) (*cid.Cid, bool, error) {
|
func (dm *DagModifier) modifyDag(n node.Node, offset uint64, data io.Reader) (*cid.Cid, bool, error) {
|
||||||
|
// If we've reached a leaf node.
|
||||||
|
if len(n.Links()) == 0 {
|
||||||
|
switch nd0 := n.(type) {
|
||||||
|
case *mdag.ProtoNode:
|
||||||
|
f, err := ft.FromBytes(nd0.Data())
|
||||||
|
if err != nil {
|
||||||
|
return nil, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
n, err := data.Read(f.Data[offset:])
|
||||||
|
if err != nil && err != io.EOF {
|
||||||
|
return nil, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update newly written node..
|
||||||
|
b, err := proto.Marshal(f)
|
||||||
|
if err != nil {
|
||||||
|
return nil, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
nd := new(mdag.ProtoNode)
|
||||||
|
nd.SetData(b)
|
||||||
|
nd.SetPrefix(&nd0.Prefix)
|
||||||
|
k, err := dm.dagserv.Add(nd)
|
||||||
|
if err != nil {
|
||||||
|
return nil, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hey look! we're done!
|
||||||
|
var done bool
|
||||||
|
if n < len(f.Data[offset:]) {
|
||||||
|
done = true
|
||||||
|
}
|
||||||
|
|
||||||
|
return k, done, nil
|
||||||
|
case *mdag.RawNode:
|
||||||
|
origData := nd0.RawData()
|
||||||
|
bytes := make([]byte, len(origData))
|
||||||
|
|
||||||
|
// copy orig data up to offset
|
||||||
|
copy(bytes, origData[:offset])
|
||||||
|
|
||||||
|
// copy in new data
|
||||||
|
n, err := data.Read(bytes[offset:])
|
||||||
|
if err != nil && err != io.EOF {
|
||||||
|
return nil, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// copy remaining data
|
||||||
|
offsetPlusN := int(offset) + n
|
||||||
|
if offsetPlusN < len(origData) {
|
||||||
|
copy(bytes[offsetPlusN:], origData[offsetPlusN:])
|
||||||
|
}
|
||||||
|
|
||||||
|
nd, err := mdag.NewRawNodeWPrefix(bytes, nd0.Cid().Prefix())
|
||||||
|
if err != nil {
|
||||||
|
return nil, false, err
|
||||||
|
}
|
||||||
|
k, err := dm.dagserv.Add(nd)
|
||||||
|
if err != nil {
|
||||||
|
return nil, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hey look! we're done!
|
||||||
|
var done bool
|
||||||
|
if n < len(bytes[offset:]) {
|
||||||
|
done = true
|
||||||
|
}
|
||||||
|
|
||||||
|
return k, done, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
node, ok := n.(*mdag.ProtoNode)
|
node, ok := n.(*mdag.ProtoNode)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, false, ErrNoRawYet
|
return nil, false, ErrNotUnixfs
|
||||||
}
|
}
|
||||||
|
|
||||||
f, err := ft.FromBytes(node.Data())
|
f, err := ft.FromBytes(node.Data())
|
||||||
@ -248,35 +315,6 @@ func (dm *DagModifier) modifyDag(n node.Node, offset uint64, data io.Reader) (*c
|
|||||||
return nil, false, err
|
return nil, false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we've reached a leaf node.
|
|
||||||
if len(node.Links()) == 0 {
|
|
||||||
n, err := data.Read(f.Data[offset:])
|
|
||||||
if err != nil && err != io.EOF {
|
|
||||||
return nil, false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update newly written node..
|
|
||||||
b, err := proto.Marshal(f)
|
|
||||||
if err != nil {
|
|
||||||
return nil, false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
nd := new(mdag.ProtoNode)
|
|
||||||
nd.SetData(b)
|
|
||||||
k, err := dm.dagserv.Add(nd)
|
|
||||||
if err != nil {
|
|
||||||
return nil, false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hey look! we're done!
|
|
||||||
var done bool
|
|
||||||
if n < len(f.Data[offset:]) {
|
|
||||||
done = true
|
|
||||||
}
|
|
||||||
|
|
||||||
return k, done, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var cur uint64
|
var cur uint64
|
||||||
var done bool
|
var done bool
|
||||||
for i, bs := range f.GetBlocksizes() {
|
for i, bs := range f.GetBlocksizes() {
|
||||||
@ -287,12 +325,7 @@ func (dm *DagModifier) modifyDag(n node.Node, offset uint64, data io.Reader) (*c
|
|||||||
return nil, false, err
|
return nil, false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
childpb, ok := child.(*mdag.ProtoNode)
|
k, sdone, err := dm.modifyDag(child, offset-cur, data)
|
||||||
if !ok {
|
|
||||||
return nil, false, mdag.ErrNotProtobuf
|
|
||||||
}
|
|
||||||
|
|
||||||
k, sdone, err := dm.modifyDag(childpb, offset-cur, data)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, false, err
|
return nil, false, err
|
||||||
}
|
}
|
||||||
@ -323,14 +356,14 @@ func (dm *DagModifier) modifyDag(n node.Node, offset uint64, data io.Reader) (*c
|
|||||||
// appendData appends the blocks from the given chan to the end of this dag
|
// appendData appends the blocks from the given chan to the end of this dag
|
||||||
func (dm *DagModifier) appendData(nd node.Node, spl chunk.Splitter) (node.Node, error) {
|
func (dm *DagModifier) appendData(nd node.Node, spl chunk.Splitter) (node.Node, error) {
|
||||||
switch nd := nd.(type) {
|
switch nd := nd.(type) {
|
||||||
case *mdag.ProtoNode:
|
case *mdag.ProtoNode, *mdag.RawNode:
|
||||||
dbp := &help.DagBuilderParams{
|
dbp := &help.DagBuilderParams{
|
||||||
Dagserv: dm.dagserv,
|
Dagserv: dm.dagserv,
|
||||||
Maxlinks: help.DefaultLinksPerBlock,
|
Maxlinks: help.DefaultLinksPerBlock,
|
||||||
|
Prefix: &dm.Prefix,
|
||||||
|
RawLeaves: dm.RawLeaves,
|
||||||
}
|
}
|
||||||
return trickle.TrickleAppend(dm.ctx, nd, dbp.New(spl))
|
return trickle.TrickleAppend(dm.ctx, nd, dbp.New(spl))
|
||||||
case *mdag.RawNode:
|
|
||||||
return nil, fmt.Errorf("appending to raw node types not yet supported")
|
|
||||||
default:
|
default:
|
||||||
return nil, ErrNotUnixfs
|
return nil, ErrNotUnixfs
|
||||||
}
|
}
|
||||||
@ -478,26 +511,30 @@ func (dm *DagModifier) Truncate(size int64) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// dagTruncate truncates the given node to 'size' and returns the modified Node
|
// dagTruncate truncates the given node to 'size' and returns the modified Node
|
||||||
func dagTruncate(ctx context.Context, n node.Node, size uint64, ds mdag.DAGService) (*mdag.ProtoNode, error) {
|
func dagTruncate(ctx context.Context, n node.Node, size uint64, ds mdag.DAGService) (node.Node, error) {
|
||||||
nd, ok := n.(*mdag.ProtoNode)
|
if len(n.Links()) == 0 {
|
||||||
if !ok {
|
switch nd := n.(type) {
|
||||||
return nil, ErrNoRawYet
|
case *mdag.ProtoNode:
|
||||||
|
// TODO: this can likely be done without marshaling and remarshaling
|
||||||
|
pbn, err := ft.FromBytes(nd.Data())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
nd.SetData(ft.WrapData(pbn.Data[:size]))
|
||||||
|
return nd, nil
|
||||||
|
case *mdag.RawNode:
|
||||||
|
return mdag.NewRawNodeWPrefix(nd.RawData()[:size], nd.Cid().Prefix())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(nd.Links()) == 0 {
|
nd, ok := n.(*mdag.ProtoNode)
|
||||||
// TODO: this can likely be done without marshaling and remarshaling
|
if !ok {
|
||||||
pbn, err := ft.FromBytes(nd.Data())
|
return nil, ErrNotUnixfs
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
nd.SetData(ft.WrapData(pbn.Data[:size]))
|
|
||||||
return nd, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var cur uint64
|
var cur uint64
|
||||||
end := 0
|
end := 0
|
||||||
var modified *mdag.ProtoNode
|
var modified node.Node
|
||||||
ndata := new(ft.FSNode)
|
ndata := new(ft.FSNode)
|
||||||
for i, lnk := range nd.Links() {
|
for i, lnk := range nd.Links() {
|
||||||
child, err := lnk.GetNode(ctx, ds)
|
child, err := lnk.GetNode(ctx, ds)
|
||||||
@ -505,19 +542,14 @@ func dagTruncate(ctx context.Context, n node.Node, size uint64, ds mdag.DAGServi
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
childpb, ok := child.(*mdag.ProtoNode)
|
childsize, err := fileSize(child)
|
||||||
if !ok {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
childsize, err := ft.DataSize(childpb.Data())
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// found the child we want to cut
|
// found the child we want to cut
|
||||||
if size < cur+childsize {
|
if size < cur+childsize {
|
||||||
nchild, err := dagTruncate(ctx, childpb, size-cur, ds)
|
nchild, err := dagTruncate(ctx, child, size-cur, ds)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -9,15 +9,14 @@ import (
|
|||||||
|
|
||||||
h "github.com/ipfs/go-ipfs/importer/helpers"
|
h "github.com/ipfs/go-ipfs/importer/helpers"
|
||||||
trickle "github.com/ipfs/go-ipfs/importer/trickle"
|
trickle "github.com/ipfs/go-ipfs/importer/trickle"
|
||||||
mdag "github.com/ipfs/go-ipfs/merkledag"
|
|
||||||
ft "github.com/ipfs/go-ipfs/unixfs"
|
|
||||||
uio "github.com/ipfs/go-ipfs/unixfs/io"
|
uio "github.com/ipfs/go-ipfs/unixfs/io"
|
||||||
testu "github.com/ipfs/go-ipfs/unixfs/test"
|
testu "github.com/ipfs/go-ipfs/unixfs/test"
|
||||||
|
|
||||||
u "gx/ipfs/QmSU6eubNdhXjFBJBSksTp8kv8YRub8mGAPv8tVJHmL2EU/go-ipfs-util"
|
u "gx/ipfs/QmSU6eubNdhXjFBJBSksTp8kv8YRub8mGAPv8tVJHmL2EU/go-ipfs-util"
|
||||||
)
|
)
|
||||||
|
|
||||||
func testModWrite(t *testing.T, beg, size uint64, orig []byte, dm *DagModifier) []byte {
|
func testModWrite(t *testing.T, beg, size uint64, orig []byte, dm *DagModifier, opts testu.NodeOpts) []byte {
|
||||||
newdata := make([]byte, size)
|
newdata := make([]byte, size)
|
||||||
r := u.NewTimeSeededRand()
|
r := u.NewTimeSeededRand()
|
||||||
r.Read(newdata)
|
r.Read(newdata)
|
||||||
@ -36,12 +35,24 @@ func testModWrite(t *testing.T, beg, size uint64, orig []byte, dm *DagModifier)
|
|||||||
t.Fatalf("Mod length not correct! %d != %d", nmod, size)
|
t.Fatalf("Mod length not correct! %d != %d", nmod, size)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
verifyNode(t, orig, dm, opts)
|
||||||
|
|
||||||
|
return orig
|
||||||
|
}
|
||||||
|
|
||||||
|
func verifyNode(t *testing.T, orig []byte, dm *DagModifier, opts testu.NodeOpts) {
|
||||||
nd, err := dm.GetNode()
|
nd, err := dm.GetNode()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = trickle.VerifyTrickleDagStructure(nd, dm.dagserv, h.DefaultLinksPerBlock, 4)
|
err = trickle.VerifyTrickleDagStructure(nd, trickle.VerifyParams{
|
||||||
|
Getter: dm.dagserv,
|
||||||
|
Direct: h.DefaultLinksPerBlock,
|
||||||
|
LayerRepeat: 4,
|
||||||
|
Prefix: &opts.Prefix,
|
||||||
|
RawLeaves: opts.RawLeavesUsed,
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -60,12 +71,21 @@ func testModWrite(t *testing.T, beg, size uint64, orig []byte, dm *DagModifier)
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
return orig
|
}
|
||||||
|
|
||||||
|
func runAllSubtests(t *testing.T, tfunc func(*testing.T, testu.NodeOpts)) {
|
||||||
|
t.Run("opts=ProtoBufLeaves", func(t *testing.T) { tfunc(t, testu.UseProtoBufLeaves) })
|
||||||
|
t.Run("opts=RawLeaves", func(t *testing.T) { tfunc(t, testu.UseRawLeaves) })
|
||||||
|
t.Run("opts=CidV1", func(t *testing.T) { tfunc(t, testu.UseCidV1) })
|
||||||
|
t.Run("opts=Blake2b256", func(t *testing.T) { tfunc(t, testu.UseBlake2b256) })
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDagModifierBasic(t *testing.T) {
|
func TestDagModifierBasic(t *testing.T) {
|
||||||
|
runAllSubtests(t, testDagModifierBasic)
|
||||||
|
}
|
||||||
|
func testDagModifierBasic(t *testing.T, opts testu.NodeOpts) {
|
||||||
dserv := testu.GetDAGServ()
|
dserv := testu.GetDAGServ()
|
||||||
b, n := testu.GetRandomNode(t, dserv, 50000)
|
b, n := testu.GetRandomNode(t, dserv, 50000, opts)
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
@ -73,32 +93,35 @@ func TestDagModifierBasic(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
if opts.ForceRawLeaves {
|
||||||
|
dagmod.RawLeaves = true
|
||||||
|
}
|
||||||
|
|
||||||
// Within zero block
|
// Within zero block
|
||||||
beg := uint64(15)
|
beg := uint64(15)
|
||||||
length := uint64(60)
|
length := uint64(60)
|
||||||
|
|
||||||
t.Log("Testing mod within zero block")
|
t.Log("Testing mod within zero block")
|
||||||
b = testModWrite(t, beg, length, b, dagmod)
|
b = testModWrite(t, beg, length, b, dagmod, opts)
|
||||||
|
|
||||||
// Within bounds of existing file
|
// Within bounds of existing file
|
||||||
beg = 1000
|
beg = 1000
|
||||||
length = 4000
|
length = 4000
|
||||||
t.Log("Testing mod within bounds of existing multiblock file.")
|
t.Log("Testing mod within bounds of existing multiblock file.")
|
||||||
b = testModWrite(t, beg, length, b, dagmod)
|
b = testModWrite(t, beg, length, b, dagmod, opts)
|
||||||
|
|
||||||
// Extend bounds
|
// Extend bounds
|
||||||
beg = 49500
|
beg = 49500
|
||||||
length = 4000
|
length = 4000
|
||||||
|
|
||||||
t.Log("Testing mod that extends file.")
|
t.Log("Testing mod that extends file.")
|
||||||
b = testModWrite(t, beg, length, b, dagmod)
|
b = testModWrite(t, beg, length, b, dagmod, opts)
|
||||||
|
|
||||||
// "Append"
|
// "Append"
|
||||||
beg = uint64(len(b))
|
beg = uint64(len(b))
|
||||||
length = 3000
|
length = 3000
|
||||||
t.Log("Testing pure append")
|
t.Log("Testing pure append")
|
||||||
_ = testModWrite(t, beg, length, b, dagmod)
|
_ = testModWrite(t, beg, length, b, dagmod, opts)
|
||||||
|
|
||||||
// Verify reported length
|
// Verify reported length
|
||||||
node, err := dagmod.GetNode()
|
node, err := dagmod.GetNode()
|
||||||
@ -106,7 +129,7 @@ func TestDagModifierBasic(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
size, err := ft.DataSize(node.(*mdag.ProtoNode).Data())
|
size, err := fileSize(node)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -118,8 +141,11 @@ func TestDagModifierBasic(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestMultiWrite(t *testing.T) {
|
func TestMultiWrite(t *testing.T) {
|
||||||
|
runAllSubtests(t, testMultiWrite)
|
||||||
|
}
|
||||||
|
func testMultiWrite(t *testing.T, opts testu.NodeOpts) {
|
||||||
dserv := testu.GetDAGServ()
|
dserv := testu.GetDAGServ()
|
||||||
n := testu.GetEmptyNode(t, dserv)
|
n := testu.GetEmptyNode(t, dserv, opts)
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
@ -128,6 +154,9 @@ func TestMultiWrite(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
if opts.ForceRawLeaves {
|
||||||
|
dagmod.RawLeaves = true
|
||||||
|
}
|
||||||
|
|
||||||
data := make([]byte, 4000)
|
data := make([]byte, 4000)
|
||||||
u.NewTimeSeededRand().Read(data)
|
u.NewTimeSeededRand().Read(data)
|
||||||
@ -150,29 +179,16 @@ func TestMultiWrite(t *testing.T) {
|
|||||||
t.Fatal("Size was reported incorrectly")
|
t.Fatal("Size was reported incorrectly")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
nd, err := dagmod.GetNode()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
read, err := uio.NewDagReader(context.Background(), nd, dserv)
|
verifyNode(t, data, dagmod, opts)
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
rbuf, err := ioutil.ReadAll(read)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = testu.ArrComp(rbuf, data)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMultiWriteAndFlush(t *testing.T) {
|
func TestMultiWriteAndFlush(t *testing.T) {
|
||||||
|
runAllSubtests(t, testMultiWriteAndFlush)
|
||||||
|
}
|
||||||
|
func testMultiWriteAndFlush(t *testing.T, opts testu.NodeOpts) {
|
||||||
dserv := testu.GetDAGServ()
|
dserv := testu.GetDAGServ()
|
||||||
n := testu.GetEmptyNode(t, dserv)
|
n := testu.GetEmptyNode(t, dserv, opts)
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
@ -181,6 +197,9 @@ func TestMultiWriteAndFlush(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
if opts.ForceRawLeaves {
|
||||||
|
dagmod.RawLeaves = true
|
||||||
|
}
|
||||||
|
|
||||||
data := make([]byte, 20)
|
data := make([]byte, 20)
|
||||||
u.NewTimeSeededRand().Read(data)
|
u.NewTimeSeededRand().Read(data)
|
||||||
@ -198,29 +217,16 @@ func TestMultiWriteAndFlush(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
nd, err := dagmod.GetNode()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
read, err := uio.NewDagReader(context.Background(), nd, dserv)
|
verifyNode(t, data, dagmod, opts)
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
rbuf, err := ioutil.ReadAll(read)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = testu.ArrComp(rbuf, data)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestWriteNewFile(t *testing.T) {
|
func TestWriteNewFile(t *testing.T) {
|
||||||
|
runAllSubtests(t, testWriteNewFile)
|
||||||
|
}
|
||||||
|
func testWriteNewFile(t *testing.T, opts testu.NodeOpts) {
|
||||||
dserv := testu.GetDAGServ()
|
dserv := testu.GetDAGServ()
|
||||||
n := testu.GetEmptyNode(t, dserv)
|
n := testu.GetEmptyNode(t, dserv, opts)
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
@ -229,6 +235,9 @@ func TestWriteNewFile(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
if opts.ForceRawLeaves {
|
||||||
|
dagmod.RawLeaves = true
|
||||||
|
}
|
||||||
|
|
||||||
towrite := make([]byte, 2000)
|
towrite := make([]byte, 2000)
|
||||||
u.NewTimeSeededRand().Read(towrite)
|
u.NewTimeSeededRand().Read(towrite)
|
||||||
@ -241,29 +250,15 @@ func TestWriteNewFile(t *testing.T) {
|
|||||||
t.Fatal("Wrote wrong amount")
|
t.Fatal("Wrote wrong amount")
|
||||||
}
|
}
|
||||||
|
|
||||||
nd, err := dagmod.GetNode()
|
verifyNode(t, towrite, dagmod, opts)
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
read, err := uio.NewDagReader(ctx, nd, dserv)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
data, err := ioutil.ReadAll(read)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := testu.ArrComp(data, towrite); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMultiWriteCoal(t *testing.T) {
|
func TestMultiWriteCoal(t *testing.T) {
|
||||||
|
runAllSubtests(t, testMultiWriteCoal)
|
||||||
|
}
|
||||||
|
func testMultiWriteCoal(t *testing.T, opts testu.NodeOpts) {
|
||||||
dserv := testu.GetDAGServ()
|
dserv := testu.GetDAGServ()
|
||||||
n := testu.GetEmptyNode(t, dserv)
|
n := testu.GetEmptyNode(t, dserv, opts)
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
@ -272,6 +267,9 @@ func TestMultiWriteCoal(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
if opts.ForceRawLeaves {
|
||||||
|
dagmod.RawLeaves = true
|
||||||
|
}
|
||||||
|
|
||||||
data := make([]byte, 1000)
|
data := make([]byte, 1000)
|
||||||
u.NewTimeSeededRand().Read(data)
|
u.NewTimeSeededRand().Read(data)
|
||||||
@ -287,29 +285,16 @@ func TestMultiWriteCoal(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
nd, err := dagmod.GetNode()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
read, err := uio.NewDagReader(context.Background(), nd, dserv)
|
verifyNode(t, data, dagmod, opts)
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
rbuf, err := ioutil.ReadAll(read)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = testu.ArrComp(rbuf, data)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestLargeWriteChunks(t *testing.T) {
|
func TestLargeWriteChunks(t *testing.T) {
|
||||||
|
runAllSubtests(t, testLargeWriteChunks)
|
||||||
|
}
|
||||||
|
func testLargeWriteChunks(t *testing.T, opts testu.NodeOpts) {
|
||||||
dserv := testu.GetDAGServ()
|
dserv := testu.GetDAGServ()
|
||||||
n := testu.GetEmptyNode(t, dserv)
|
n := testu.GetEmptyNode(t, dserv, opts)
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
@ -318,6 +303,9 @@ func TestLargeWriteChunks(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
if opts.ForceRawLeaves {
|
||||||
|
dagmod.RawLeaves = true
|
||||||
|
}
|
||||||
|
|
||||||
wrsize := 1000
|
wrsize := 1000
|
||||||
datasize := 10000000
|
datasize := 10000000
|
||||||
@ -343,12 +331,14 @@ func TestLargeWriteChunks(t *testing.T) {
|
|||||||
if err = testu.ArrComp(out, data); err != nil {
|
if err = testu.ArrComp(out, data); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDagTruncate(t *testing.T) {
|
func TestDagTruncate(t *testing.T) {
|
||||||
|
runAllSubtests(t, testDagTruncate)
|
||||||
|
}
|
||||||
|
func testDagTruncate(t *testing.T, opts testu.NodeOpts) {
|
||||||
dserv := testu.GetDAGServ()
|
dserv := testu.GetDAGServ()
|
||||||
b, n := testu.GetRandomNode(t, dserv, 50000)
|
b, n := testu.GetRandomNode(t, dserv, 50000, opts)
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
@ -356,6 +346,9 @@ func TestDagTruncate(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
if opts.ForceRawLeaves {
|
||||||
|
dagmod.RawLeaves = true
|
||||||
|
}
|
||||||
|
|
||||||
err = dagmod.Truncate(12345)
|
err = dagmod.Truncate(12345)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -414,8 +407,11 @@ func TestDagTruncate(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSparseWrite(t *testing.T) {
|
func TestSparseWrite(t *testing.T) {
|
||||||
|
runAllSubtests(t, testSparseWrite)
|
||||||
|
}
|
||||||
|
func testSparseWrite(t *testing.T, opts testu.NodeOpts) {
|
||||||
dserv := testu.GetDAGServ()
|
dserv := testu.GetDAGServ()
|
||||||
n := testu.GetEmptyNode(t, dserv)
|
n := testu.GetEmptyNode(t, dserv, opts)
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
@ -423,6 +419,9 @@ func TestSparseWrite(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
if opts.ForceRawLeaves {
|
||||||
|
dagmod.RawLeaves = true
|
||||||
|
}
|
||||||
|
|
||||||
buf := make([]byte, 5000)
|
buf := make([]byte, 5000)
|
||||||
u.NewTimeSeededRand().Read(buf[2500:])
|
u.NewTimeSeededRand().Read(buf[2500:])
|
||||||
@ -452,8 +451,11 @@ func TestSparseWrite(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSeekPastEndWrite(t *testing.T) {
|
func TestSeekPastEndWrite(t *testing.T) {
|
||||||
|
runAllSubtests(t, testSeekPastEndWrite)
|
||||||
|
}
|
||||||
|
func testSeekPastEndWrite(t *testing.T, opts testu.NodeOpts) {
|
||||||
dserv := testu.GetDAGServ()
|
dserv := testu.GetDAGServ()
|
||||||
n := testu.GetEmptyNode(t, dserv)
|
n := testu.GetEmptyNode(t, dserv, opts)
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
@ -461,6 +463,9 @@ func TestSeekPastEndWrite(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
if opts.ForceRawLeaves {
|
||||||
|
dagmod.RawLeaves = true
|
||||||
|
}
|
||||||
|
|
||||||
buf := make([]byte, 5000)
|
buf := make([]byte, 5000)
|
||||||
u.NewTimeSeededRand().Read(buf[2500:])
|
u.NewTimeSeededRand().Read(buf[2500:])
|
||||||
@ -499,8 +504,11 @@ func TestSeekPastEndWrite(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestRelativeSeek(t *testing.T) {
|
func TestRelativeSeek(t *testing.T) {
|
||||||
|
runAllSubtests(t, testRelativeSeek)
|
||||||
|
}
|
||||||
|
func testRelativeSeek(t *testing.T, opts testu.NodeOpts) {
|
||||||
dserv := testu.GetDAGServ()
|
dserv := testu.GetDAGServ()
|
||||||
n := testu.GetEmptyNode(t, dserv)
|
n := testu.GetEmptyNode(t, dserv, opts)
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
@ -508,6 +516,9 @@ func TestRelativeSeek(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
if opts.ForceRawLeaves {
|
||||||
|
dagmod.RawLeaves = true
|
||||||
|
}
|
||||||
|
|
||||||
for i := 0; i < 64; i++ {
|
for i := 0; i < 64; i++ {
|
||||||
dagmod.Write([]byte{byte(i)})
|
dagmod.Write([]byte{byte(i)})
|
||||||
@ -529,8 +540,11 @@ func TestRelativeSeek(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestInvalidSeek(t *testing.T) {
|
func TestInvalidSeek(t *testing.T) {
|
||||||
|
runAllSubtests(t, testInvalidSeek)
|
||||||
|
}
|
||||||
|
func testInvalidSeek(t *testing.T, opts testu.NodeOpts) {
|
||||||
dserv := testu.GetDAGServ()
|
dserv := testu.GetDAGServ()
|
||||||
n := testu.GetEmptyNode(t, dserv)
|
n := testu.GetEmptyNode(t, dserv, opts)
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
@ -538,6 +552,10 @@ func TestInvalidSeek(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
if opts.ForceRawLeaves {
|
||||||
|
dagmod.RawLeaves = true
|
||||||
|
}
|
||||||
|
|
||||||
_, err = dagmod.Seek(10, -10)
|
_, err = dagmod.Seek(10, -10)
|
||||||
|
|
||||||
if err != ErrUnrecognizedWhence {
|
if err != ErrUnrecognizedWhence {
|
||||||
@ -546,9 +564,12 @@ func TestInvalidSeek(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestEndSeek(t *testing.T) {
|
func TestEndSeek(t *testing.T) {
|
||||||
|
runAllSubtests(t, testEndSeek)
|
||||||
|
}
|
||||||
|
func testEndSeek(t *testing.T, opts testu.NodeOpts) {
|
||||||
dserv := testu.GetDAGServ()
|
dserv := testu.GetDAGServ()
|
||||||
|
|
||||||
n := testu.GetEmptyNode(t, dserv)
|
n := testu.GetEmptyNode(t, dserv, opts)
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
@ -556,6 +577,9 @@ func TestEndSeek(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
if opts.ForceRawLeaves {
|
||||||
|
dagmod.RawLeaves = true
|
||||||
|
}
|
||||||
|
|
||||||
_, err = dagmod.Write(make([]byte, 100))
|
_, err = dagmod.Write(make([]byte, 100))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -588,9 +612,12 @@ func TestEndSeek(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestReadAndSeek(t *testing.T) {
|
func TestReadAndSeek(t *testing.T) {
|
||||||
|
runAllSubtests(t, testReadAndSeek)
|
||||||
|
}
|
||||||
|
func testReadAndSeek(t *testing.T, opts testu.NodeOpts) {
|
||||||
dserv := testu.GetDAGServ()
|
dserv := testu.GetDAGServ()
|
||||||
|
|
||||||
n := testu.GetEmptyNode(t, dserv)
|
n := testu.GetEmptyNode(t, dserv, opts)
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
@ -598,6 +625,9 @@ func TestReadAndSeek(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
if opts.ForceRawLeaves {
|
||||||
|
dagmod.RawLeaves = true
|
||||||
|
}
|
||||||
|
|
||||||
writeBuf := []byte{0, 1, 2, 3, 4, 5, 6, 7}
|
writeBuf := []byte{0, 1, 2, 3, 4, 5, 6, 7}
|
||||||
dagmod.Write(writeBuf)
|
dagmod.Write(writeBuf)
|
||||||
@ -656,9 +686,12 @@ func TestReadAndSeek(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestCtxRead(t *testing.T) {
|
func TestCtxRead(t *testing.T) {
|
||||||
|
runAllSubtests(t, testCtxRead)
|
||||||
|
}
|
||||||
|
func testCtxRead(t *testing.T, opts testu.NodeOpts) {
|
||||||
dserv := testu.GetDAGServ()
|
dserv := testu.GetDAGServ()
|
||||||
|
|
||||||
n := testu.GetEmptyNode(t, dserv)
|
n := testu.GetEmptyNode(t, dserv, opts)
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
@ -666,6 +699,9 @@ func TestCtxRead(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
if opts.ForceRawLeaves {
|
||||||
|
dagmod.RawLeaves = true
|
||||||
|
}
|
||||||
|
|
||||||
_, err = dagmod.Write([]byte{0, 1, 2, 3, 4, 5, 6, 7})
|
_, err = dagmod.Write([]byte{0, 1, 2, 3, 4, 5, 6, 7})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -689,7 +725,7 @@ func TestCtxRead(t *testing.T) {
|
|||||||
func BenchmarkDagmodWrite(b *testing.B) {
|
func BenchmarkDagmodWrite(b *testing.B) {
|
||||||
b.StopTimer()
|
b.StopTimer()
|
||||||
dserv := testu.GetDAGServ()
|
dserv := testu.GetDAGServ()
|
||||||
n := testu.GetEmptyNode(b, dserv)
|
n := testu.GetEmptyNode(b, dserv, testu.UseProtoBufLeaves)
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
|
@ -8,14 +8,17 @@ import (
|
|||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
imp "github.com/ipfs/go-ipfs/importer"
|
|
||||||
"github.com/ipfs/go-ipfs/importer/chunk"
|
"github.com/ipfs/go-ipfs/importer/chunk"
|
||||||
|
h "github.com/ipfs/go-ipfs/importer/helpers"
|
||||||
|
trickle "github.com/ipfs/go-ipfs/importer/trickle"
|
||||||
mdag "github.com/ipfs/go-ipfs/merkledag"
|
mdag "github.com/ipfs/go-ipfs/merkledag"
|
||||||
mdagmock "github.com/ipfs/go-ipfs/merkledag/test"
|
mdagmock "github.com/ipfs/go-ipfs/merkledag/test"
|
||||||
ft "github.com/ipfs/go-ipfs/unixfs"
|
ft "github.com/ipfs/go-ipfs/unixfs"
|
||||||
|
|
||||||
|
cid "gx/ipfs/QmNp85zy9RLrQ5oQD4hPyS39ezrrXpcaa7R4Y9kxdWQLLQ/go-cid"
|
||||||
node "gx/ipfs/QmPN7cwmpcc4DWXb4KTB9dNAJgjuPY69h3npsMfhRrQL9c/go-ipld-format"
|
node "gx/ipfs/QmPN7cwmpcc4DWXb4KTB9dNAJgjuPY69h3npsMfhRrQL9c/go-ipld-format"
|
||||||
u "gx/ipfs/QmSU6eubNdhXjFBJBSksTp8kv8YRub8mGAPv8tVJHmL2EU/go-ipfs-util"
|
u "gx/ipfs/QmSU6eubNdhXjFBJBSksTp8kv8YRub8mGAPv8tVJHmL2EU/go-ipfs-util"
|
||||||
|
mh "gx/ipfs/QmU9a9NV9RdPNwZQDYd5uKsm6N6LJLSvLbywDDYFbaaC6P/go-multihash"
|
||||||
)
|
)
|
||||||
|
|
||||||
func SizeSplitterGen(size int64) chunk.SplitterGen {
|
func SizeSplitterGen(size int64) chunk.SplitterGen {
|
||||||
@ -28,9 +31,37 @@ func GetDAGServ() mdag.DAGService {
|
|||||||
return mdagmock.Mock()
|
return mdagmock.Mock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetNode(t testing.TB, dserv mdag.DAGService, data []byte) node.Node {
|
// NodeOpts is used by GetNode, GetEmptyNode and GetRandomNode
|
||||||
|
type NodeOpts struct {
|
||||||
|
Prefix cid.Prefix
|
||||||
|
// ForceRawLeaves if true will force the use of raw leaves
|
||||||
|
ForceRawLeaves bool
|
||||||
|
// RawLeavesUsed is true if raw leaves or either implicitly or explicitly enabled
|
||||||
|
RawLeavesUsed bool
|
||||||
|
}
|
||||||
|
|
||||||
|
var UseProtoBufLeaves = NodeOpts{Prefix: mdag.V0CidPrefix()}
|
||||||
|
var UseRawLeaves = NodeOpts{Prefix: mdag.V0CidPrefix(), ForceRawLeaves: true, RawLeavesUsed: true}
|
||||||
|
var UseCidV1 = NodeOpts{Prefix: mdag.V1CidPrefix(), RawLeavesUsed: true}
|
||||||
|
var UseBlake2b256 NodeOpts
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
UseBlake2b256 = UseCidV1
|
||||||
|
UseBlake2b256.Prefix.MhType = mh.Names["blake2b-256"]
|
||||||
|
UseBlake2b256.Prefix.MhLength = -1
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetNode(t testing.TB, dserv mdag.DAGService, data []byte, opts NodeOpts) node.Node {
|
||||||
in := bytes.NewReader(data)
|
in := bytes.NewReader(data)
|
||||||
node, err := imp.BuildTrickleDagFromReader(dserv, SizeSplitterGen(500)(in))
|
|
||||||
|
dbp := h.DagBuilderParams{
|
||||||
|
Dagserv: dserv,
|
||||||
|
Maxlinks: h.DefaultLinksPerBlock,
|
||||||
|
Prefix: &opts.Prefix,
|
||||||
|
RawLeaves: opts.RawLeavesUsed,
|
||||||
|
}
|
||||||
|
|
||||||
|
node, err := trickle.TrickleLayout(dbp.New(SizeSplitterGen(500)(in)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -38,18 +69,18 @@ func GetNode(t testing.TB, dserv mdag.DAGService, data []byte) node.Node {
|
|||||||
return node
|
return node
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetEmptyNode(t testing.TB, dserv mdag.DAGService) node.Node {
|
func GetEmptyNode(t testing.TB, dserv mdag.DAGService, opts NodeOpts) node.Node {
|
||||||
return GetNode(t, dserv, []byte{})
|
return GetNode(t, dserv, []byte{}, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetRandomNode(t testing.TB, dserv mdag.DAGService, size int64) ([]byte, node.Node) {
|
func GetRandomNode(t testing.TB, dserv mdag.DAGService, size int64, opts NodeOpts) ([]byte, node.Node) {
|
||||||
in := io.LimitReader(u.NewTimeSeededRand(), size)
|
in := io.LimitReader(u.NewTimeSeededRand(), size)
|
||||||
buf, err := ioutil.ReadAll(in)
|
buf, err := ioutil.ReadAll(in)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
node := GetNode(t, dserv, buf)
|
node := GetNode(t, dserv, buf, opts)
|
||||||
return buf, node
|
return buf, node
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user