1
0
mirror of https://github.com/ipfs/kubo.git synced 2025-05-17 15:06:47 +08:00

merkledag: change 'Node' to be an interface

Also change existing 'Node' type to 'ProtoNode' and use that most
everywhere for now. As we move forward with the integration we will try
and use the Node interface in more places that we're currently using
ProtoNode.

License: MIT
Signed-off-by: Jeromy <why@ipfs.io>
This commit is contained in:
Jeromy
2016-10-09 12:59:36 -07:00
parent 015d476c4f
commit 01aee44679
61 changed files with 852 additions and 523 deletions

View File

@ -14,7 +14,6 @@ import (
var ErrWrongHash = errors.New("data did not match given hash!")
type Block interface {
Multihash() mh.Multihash
RawData() []byte
Cid() *cid.Cid
String() string

View File

@ -18,18 +18,8 @@ import (
dssync "gx/ipfs/QmbzuUusHqaLLoNTDEVLcSF6vZDHZDLPC7p4bztRvvkXxU/go-datastore/sync"
)
func newObject(data []byte) *testObject {
return &testObject{
Block: blocks.NewBlock(data),
}
}
type testObject struct {
blocks.Block
}
func (o *testObject) Cid() *cid.Cid {
return cid.NewCidV0(o.Block.Multihash())
func newObject(data []byte) blocks.Block {
return blocks.NewBlock(data)
}
func TestBlocks(t *testing.T) {
@ -38,12 +28,8 @@ func TestBlocks(t *testing.T) {
defer bs.Close()
o := newObject([]byte("beep boop"))
h := u.Hash([]byte("beep boop"))
if !bytes.Equal(o.Multihash(), h) {
t.Error("Block Multihash and data multihash not equal")
}
if !o.Cid().Equals(cid.NewCidV0(h)) {
h := cid.NewCidV0(u.Hash([]byte("beep boop")))
if !o.Cid().Equals(h) {
t.Error("Block key and data multihash key not equal")
}
@ -74,8 +60,8 @@ func TestBlocks(t *testing.T) {
}
}
func makeObjects(n int) []*testObject {
var out []*testObject
func makeObjects(n int) []blocks.Block {
var out []blocks.Block
for i := 0; i < n; i++ {
out = append(out, newObject([]byte(fmt.Sprintf("object %d", i))))
}

View File

@ -182,7 +182,7 @@ func statNode(ds dag.DAGService, fsn mfs.FSNode) (*Object, error) {
return &Object{
Hash: c.String(),
Blocks: len(nd.Links),
Blocks: len(nd.Links()),
Size: d.GetFilesize(),
CumulativeSize: cumulsize,
Type: ndtype,
@ -245,7 +245,7 @@ var FilesCpCmd = &cmds.Command{
},
}
func getNodeFromPath(ctx context.Context, node *core.IpfsNode, p string) (*dag.Node, error) {
func getNodeFromPath(ctx context.Context, node *core.IpfsNode, p string) (*dag.ProtoNode, error) {
switch {
case strings.HasPrefix(p, "/ipfs/"):
np, err := path.ParsePath(p)
@ -253,7 +253,17 @@ func getNodeFromPath(ctx context.Context, node *core.IpfsNode, p string) (*dag.N
return nil, err
}
return core.Resolve(ctx, node, np)
nd, err := core.Resolve(ctx, node, np)
if err != nil {
return nil, err
}
pbnd, ok := nd.(*dag.ProtoNode)
if !ok {
return nil, dag.ErrNotProtobuf
}
return pbnd, nil
default:
fsn, err := mfs.Lookup(node.FilesRoot, p)
if err != nil {

View File

@ -13,6 +13,7 @@ import (
cmds "github.com/ipfs/go-ipfs/commands"
core "github.com/ipfs/go-ipfs/core"
dag "github.com/ipfs/go-ipfs/merkledag"
path "github.com/ipfs/go-ipfs/path"
tar "github.com/ipfs/go-ipfs/thirdparty/tar"
uarchive "github.com/ipfs/go-ipfs/unixfs/archive"
@ -69,6 +70,12 @@ may also specify the level of compression by specifying '-l=<1-9>'.
return
}
pbnd, ok := dn.(*dag.ProtoNode)
if !ok {
res.SetError(err, cmds.ErrNormal)
return
}
size, err := dn.Size()
if err != nil {
res.SetError(err, cmds.ErrNormal)
@ -78,7 +85,7 @@ may also specify the level of compression by specifying '-l=<1-9>'.
res.SetLength(size)
archive, _, _ := req.Option("archive").Bool()
reader, err := uarchive.DagArchive(ctx, dn, p.String(), node.DAG, archive, cmplvl)
reader, err := uarchive.DagArchive(ctx, pbnd, p.String(), node.DAG, archive, cmplvl)
if err != nil {
res.SetError(err, cmds.ErrNormal)
return

View File

@ -12,8 +12,6 @@ import (
path "github.com/ipfs/go-ipfs/path"
unixfs "github.com/ipfs/go-ipfs/unixfs"
unixfspb "github.com/ipfs/go-ipfs/unixfs/pb"
cid "gx/ipfs/QmXUuRadqDq5BuFWzVU6VuKaSjTcNm1gNCtLvvP1TJCW4z/go-cid"
)
type LsLink struct {
@ -72,7 +70,7 @@ The JSON output contains type information.
paths := req.Arguments()
var dagnodes []*merkledag.Node
var dagnodes []merkledag.Node
for _, fpath := range paths {
dagnode, err := core.Resolve(req.Context(), node, path.Path(fpath))
if err != nil {
@ -86,12 +84,12 @@ The JSON output contains type information.
for i, dagnode := range dagnodes {
output[i] = LsObject{
Hash: paths[i],
Links: make([]LsLink, len(dagnode.Links)),
Links: make([]LsLink, len(dagnode.Links())),
}
for j, link := range dagnode.Links {
var linkNode *merkledag.Node
for j, link := range dagnode.Links() {
var linkNode *merkledag.ProtoNode
t := unixfspb.Data_DataType(-1)
linkKey := cid.NewCidV0(link.Hash)
linkKey := link.Cid
if ok, err := node.Blockstore.Has(linkKey); ok && err == nil {
b, err := node.Blockstore.Get(linkKey)
if err != nil {
@ -106,11 +104,19 @@ The JSON output contains type information.
}
if linkNode == nil && resolve {
linkNode, err = link.GetNode(req.Context(), node.DAG)
nd, err := link.GetNode(req.Context(), node.DAG)
if err != nil {
res.SetError(err, cmds.ErrNormal)
return
}
pbnd, ok := nd.(*merkledag.ProtoNode)
if !ok {
res.SetError(merkledag.ErrNotProtobuf, cmds.ErrNormal)
return
}
linkNode = pbnd
}
if linkNode != nil {
d, err := unixfs.FromBytes(linkNode.Data())
@ -123,7 +129,7 @@ The JSON output contains type information.
}
output[i].Links[j] = LsLink{
Name: link.Name,
Hash: link.Hash.B58String(),
Hash: link.Cid.String(),
Size: link.Size,
Type: t,
}

View File

@ -7,6 +7,7 @@ import (
cmds "github.com/ipfs/go-ipfs/commands"
core "github.com/ipfs/go-ipfs/core"
dag "github.com/ipfs/go-ipfs/merkledag"
dagutils "github.com/ipfs/go-ipfs/merkledag/utils"
path "github.com/ipfs/go-ipfs/path"
)
@ -85,7 +86,19 @@ Example:
return
}
changes, err := dagutils.Diff(ctx, node.DAG, obj_a, obj_b)
pbobj_a, ok := obj_a.(*dag.ProtoNode)
if !ok {
res.SetError(dag.ErrNotProtobuf, cmds.ErrNormal)
return
}
pbobj_b, ok := obj_b.(*dag.ProtoNode)
if !ok {
res.SetError(dag.ErrNotProtobuf, cmds.ErrNormal)
return
}
changes, err := dagutils.Diff(ctx, node.DAG, pbobj_a, pbobj_b)
if err != nil {
res.SetError(err, cmds.ErrNormal)
return

View File

@ -12,13 +12,13 @@ import (
"strings"
"text/tabwriter"
mh "gx/ipfs/QmYDds3421prZgqKbLpEK7T9Aa2eVdQ7o3YarX1LVLdP2J/go-multihash"
cmds "github.com/ipfs/go-ipfs/commands"
core "github.com/ipfs/go-ipfs/core"
dag "github.com/ipfs/go-ipfs/merkledag"
path "github.com/ipfs/go-ipfs/path"
ft "github.com/ipfs/go-ipfs/unixfs"
cid "gx/ipfs/QmXUuRadqDq5BuFWzVU6VuKaSjTcNm1gNCtLvvP1TJCW4z/go-cid"
)
// ErrObjectTooLarge is returned when too much data was read from stdin. current limit 2m
@ -98,7 +98,14 @@ is the raw data of the object.
res.SetError(err, cmds.ErrNormal)
return
}
res.SetOutput(bytes.NewReader(node.Data()))
pbnode, ok := node.(*dag.ProtoNode)
if !ok {
res.SetError(dag.ErrNotProtobuf, cmds.ErrNormal)
return
}
res.SetOutput(bytes.NewReader(pbnode.Data()))
},
}
@ -137,6 +144,7 @@ multihash.
res.SetError(err, cmds.ErrNormal)
return
}
output, err := getOutput(node)
if err != nil {
res.SetError(err, cmds.ErrNormal)
@ -201,14 +209,20 @@ This command outputs data in the following encodings:
return
}
node := &Node{
Links: make([]Link, len(object.Links)),
Data: string(object.Data()),
pbo, ok := object.(*dag.ProtoNode)
if !ok {
res.SetError(dag.ErrNotProtobuf, cmds.ErrNormal)
return
}
for i, link := range object.Links {
node := &Node{
Links: make([]Link, len(object.Links())),
Data: string(pbo.Data()),
}
for i, link := range object.Links() {
node.Links[i] = Link{
Hash: link.Hash.B58String(),
Hash: link.Cid.String(),
Name: link.Name,
Size: link.Size,
}
@ -413,7 +427,7 @@ Available templates:
return
}
node := new(dag.Node)
node := new(dag.ProtoNode)
if len(req.Arguments()) == 1 {
template := req.Arguments()[0]
var err error
@ -440,7 +454,7 @@ Available templates:
Type: Object{},
}
func nodeFromTemplate(template string) (*dag.Node, error) {
func nodeFromTemplate(template string) (*dag.ProtoNode, error) {
switch template {
case "unixfs-dir":
return ft.EmptyDirNode(), nil
@ -464,7 +478,7 @@ func objectPut(n *core.IpfsNode, input io.Reader, encoding string, dataFieldEnco
return nil, ErrObjectTooLarge
}
var dagnode *dag.Node
var dagnode *dag.ProtoNode
switch getObjectEnc(encoding) {
case objectEncodingJSON:
node := new(Node)
@ -542,17 +556,17 @@ func getObjectEnc(o interface{}) objectEncoding {
return objectEncoding(v)
}
func getOutput(dagnode *dag.Node) (*Object, error) {
func getOutput(dagnode dag.Node) (*Object, error) {
c := dagnode.Cid()
output := &Object{
Hash: c.String(),
Links: make([]Link, len(dagnode.Links)),
Links: make([]Link, len(dagnode.Links())),
}
for i, link := range dagnode.Links {
for i, link := range dagnode.Links() {
output.Links[i] = Link{
Name: link.Name,
Hash: link.Hash.B58String(),
Hash: link.Cid.String(),
Size: link.Size,
}
}
@ -560,9 +574,9 @@ func getOutput(dagnode *dag.Node) (*Object, error) {
return output, nil
}
// converts the Node object into a real dag.Node
func deserializeNode(node *Node, dataFieldEncoding string) (*dag.Node, error) {
dagnode := new(dag.Node)
// converts the Node object into a real dag.ProtoNode
func deserializeNode(node *Node, dataFieldEncoding string) (*dag.ProtoNode, error) {
dagnode := new(dag.ProtoNode)
switch dataFieldEncoding {
case "text":
dagnode.SetData([]byte(node.Data))
@ -573,16 +587,16 @@ func deserializeNode(node *Node, dataFieldEncoding string) (*dag.Node, error) {
return nil, fmt.Errorf("Unkown data field encoding")
}
dagnode.Links = make([]*dag.Link, len(node.Links))
dagnode.SetLinks(make([]*dag.Link, len(node.Links)))
for i, link := range node.Links {
hash, err := mh.FromB58String(link.Hash)
c, err := cid.Decode(link.Hash)
if err != nil {
return nil, err
}
dagnode.Links[i] = &dag.Link{
dagnode.Links()[i] = &dag.Link{
Name: link.Name,
Size: link.Size,
Hash: hash,
Cid: c,
}
}

View File

@ -79,6 +79,12 @@ the limit will not be respected by the network.
return
}
rtpb, ok := rootnd.(*dag.ProtoNode)
if !ok {
res.SetError(dag.ErrNotProtobuf, cmds.ErrNormal)
return
}
fi, err := req.Files().NextFile()
if err != nil {
res.SetError(err, cmds.ErrNormal)
@ -91,9 +97,9 @@ the limit will not be respected by the network.
return
}
rootnd.SetData(append(rootnd.Data(), data...))
rtpb.SetData(append(rtpb.Data(), data...))
newkey, err := nd.DAG.Add(rootnd)
newkey, err := nd.DAG.Add(rtpb)
if err != nil {
res.SetError(err, cmds.ErrNormal)
return
@ -141,6 +147,12 @@ Example:
return
}
rtpb, ok := root.(*dag.ProtoNode)
if !ok {
res.SetError(dag.ErrNotProtobuf, cmds.ErrNormal)
return
}
fi, err := req.Files().NextFile()
if err != nil {
res.SetError(err, cmds.ErrNormal)
@ -153,9 +165,9 @@ Example:
return
}
root.SetData(data)
rtpb.SetData(data)
newkey, err := nd.DAG.Add(root)
newkey, err := nd.DAG.Add(rtpb)
if err != nil {
res.SetError(err, cmds.ErrNormal)
return
@ -199,9 +211,15 @@ Removes a link by the given name from root.
return
}
rtpb, ok := root.(*dag.ProtoNode)
if !ok {
res.SetError(dag.ErrNotProtobuf, cmds.ErrNormal)
return
}
path := req.Arguments()[1]
e := dagutils.NewDagEditor(root, nd.DAG)
e := dagutils.NewDagEditor(rtpb, nd.DAG)
err = e.RmLink(req.Context(), path)
if err != nil {
@ -268,6 +286,12 @@ to a file containing 'bar', and returns the hash of the new object.
return
}
rtpb, ok := root.(*dag.ProtoNode)
if !ok {
res.SetError(dag.ErrNotProtobuf, cmds.ErrNormal)
return
}
npath := req.Arguments()[1]
childp, err := path.ParsePath(req.Arguments()[2])
if err != nil {
@ -281,12 +305,12 @@ to a file containing 'bar', and returns the hash of the new object.
return
}
var createfunc func() *dag.Node
var createfunc func() *dag.ProtoNode
if create {
createfunc = ft.EmptyDirNode
}
e := dagutils.NewDagEditor(root, nd.DAG)
e := dagutils.NewDagEditor(rtpb, nd.DAG)
childnd, err := core.Resolve(req.Context(), nd, childp)
if err != nil {
@ -294,7 +318,13 @@ to a file containing 'bar', and returns the hash of the new object.
return
}
err = e.InsertNodeAtPath(req.Context(), npath, childnd, createfunc)
chpb, ok := childnd.(*dag.ProtoNode)
if !ok {
res.SetError(dag.ErrNotProtobuf, cmds.ErrNormal)
return
}
err = e.InsertNodeAtPath(req.Context(), npath, chpb, createfunc)
if err != nil {
res.SetError(err, cmds.ErrNormal)
return

View File

@ -195,8 +195,8 @@ var refsMarshallerMap = cmds.MarshalerMap{
},
}
func objectsForPaths(ctx context.Context, n *core.IpfsNode, paths []string) ([]*dag.Node, error) {
objects := make([]*dag.Node, len(paths))
func objectsForPaths(ctx context.Context, n *core.IpfsNode, paths []string) ([]dag.Node, error) {
objects := make([]dag.Node, len(paths))
for i, p := range paths {
o, err := core.Resolve(ctx, n, path.Path(p))
if err != nil {
@ -225,24 +225,24 @@ type RefWriter struct {
}
// WriteRefs writes refs of the given object to the underlying writer.
func (rw *RefWriter) WriteRefs(n *dag.Node) (int, error) {
func (rw *RefWriter) WriteRefs(n dag.Node) (int, error) {
if rw.Recursive {
return rw.writeRefsRecursive(n)
}
return rw.writeRefsSingle(n)
}
func (rw *RefWriter) writeRefsRecursive(n *dag.Node) (int, error) {
func (rw *RefWriter) writeRefsRecursive(n dag.Node) (int, error) {
nc := n.Cid()
var count int
for i, ng := range dag.GetDAG(rw.Ctx, rw.DAG, n) {
lc := cid.NewCidV0(n.Links[i].Hash)
lc := n.Links()[i].Cid
if rw.skip(lc) {
continue
}
if err := rw.WriteEdge(nc, lc, n.Links[i].Name); err != nil {
if err := rw.WriteEdge(nc, lc, n.Links()[i].Name); err != nil {
return count, err
}
@ -260,7 +260,7 @@ func (rw *RefWriter) writeRefsRecursive(n *dag.Node) (int, error) {
return count, nil
}
func (rw *RefWriter) writeRefsSingle(n *dag.Node) (int, error) {
func (rw *RefWriter) writeRefsSingle(n dag.Node) (int, error) {
c := n.Cid()
if rw.skip(c) {
@ -268,9 +268,8 @@ func (rw *RefWriter) writeRefsSingle(n *dag.Node) (int, error) {
}
count := 0
for _, l := range n.Links {
lc := cid.NewCidV0(l.Hash)
for _, l := range n.Links() {
lc := l.Cid
if rw.skip(lc) {
continue
}

View File

@ -7,6 +7,7 @@ import (
cmds "github.com/ipfs/go-ipfs/commands"
core "github.com/ipfs/go-ipfs/core"
"github.com/ipfs/go-ipfs/core/coreunix"
dag "github.com/ipfs/go-ipfs/merkledag"
path "github.com/ipfs/go-ipfs/path"
tar "github.com/ipfs/go-ipfs/tar"
)
@ -100,7 +101,13 @@ var tarCatCmd = &cmds.Command{
return
}
r, err := tar.ExportTar(req.Context(), root, nd.DAG)
rootpb, ok := root.(*dag.ProtoNode)
if !ok {
res.SetError(dag.ErrNotProtobuf, cmds.ErrNormal)
return
}
r, err := tar.ExportTar(req.Context(), rootpb, nd.DAG)
if err != nil {
res.SetError(err, cmds.ErrNormal)
return

View File

@ -103,7 +103,13 @@ possible, please use 'ipfs ls' instead.
continue
}
unixFSNode, err := unixfs.FromBytes(merkleNode.Data())
ndpb, ok := merkleNode.(*merkledag.ProtoNode)
if !ok {
res.SetError(merkledag.ErrNotProtobuf, cmds.ErrNormal)
return
}
unixFSNode, err := unixfs.FromBytes(ndpb.Data())
if err != nil {
res.SetError(err, cmds.ErrNormal)
return
@ -121,16 +127,21 @@ possible, please use 'ipfs ls' instead.
case unixfspb.Data_File:
break
case unixfspb.Data_Directory:
links := make([]LsLink, len(merkleNode.Links))
links := make([]LsLink, len(merkleNode.Links()))
output.Objects[hash].Links = links
for i, link := range merkleNode.Links {
var linkNode *merkledag.Node
linkNode, err = link.GetNode(ctx, node.DAG)
for i, link := range merkleNode.Links() {
linkNode, err := link.GetNode(ctx, node.DAG)
if err != nil {
res.SetError(err, cmds.ErrNormal)
return
}
d, err := unixfs.FromBytes(linkNode.Data())
lnpb, ok := linkNode.(*merkledag.ProtoNode)
if !ok {
res.SetError(merkledag.ErrNotProtobuf, cmds.ErrNormal)
return
}
d, err := unixfs.FromBytes(lnpb.Data())
if err != nil {
res.SetError(err, cmds.ErrNormal)
return
@ -138,7 +149,7 @@ possible, please use 'ipfs ls' instead.
t := d.GetType()
lsLink := LsLink{
Name: link.Name,
Hash: link.Hash.B58String(),
Hash: link.Cid.String(),
Type: t.String(),
}
if t == unixfspb.Data_File {

View File

@ -499,7 +499,7 @@ func (n *IpfsNode) loadFilesRoot() error {
return n.Repo.Datastore().Put(dsk, c.Bytes())
}
var nd *merkledag.Node
var nd *merkledag.ProtoNode
val, err := n.Repo.Datastore().Get(dsk)
switch {
@ -515,10 +515,17 @@ func (n *IpfsNode) loadFilesRoot() error {
return err
}
nd, err = n.DAG.Get(n.Context(), c)
rnd, err := n.DAG.Get(n.Context(), c)
if err != nil {
return fmt.Errorf("error loading filesroot from DAG: %s", err)
}
pbnd, ok := rnd.(*merkledag.ProtoNode)
if !ok {
return merkledag.ErrNotProtobuf
}
nd = pbnd
default:
return err
}

View File

@ -45,7 +45,7 @@ func newGatewayHandler(node *core.IpfsNode, conf GatewayConfig) *gatewayHandler
}
// TODO(cryptix): find these helpers somewhere else
func (i *gatewayHandler) newDagFromReader(r io.Reader) (*dag.Node, error) {
func (i *gatewayHandler) newDagFromReader(r io.Reader) (*dag.ProtoNode, error) {
// TODO(cryptix): change and remove this helper once PR1136 is merged
// return ufs.AddFromReader(i.node, r.Body)
return importer.BuildDagFromReader(
@ -163,6 +163,12 @@ func (i *gatewayHandler) getOrHeadHandler(w http.ResponseWriter, r *http.Request
return
}
pbnd, ok := nd.(*dag.ProtoNode)
if !ok {
webError(w, "Cannot read non protobuf nodes through gateway", dag.ErrNotProtobuf, http.StatusBadRequest)
return
}
etag := gopath.Base(urlPath)
if r.Header.Get("If-None-Match") == etag {
w.WriteHeader(http.StatusNotModified)
@ -190,7 +196,7 @@ func (i *gatewayHandler) getOrHeadHandler(w http.ResponseWriter, r *http.Request
w.Header().Set("Suborigin", pathRoot)
}
dr, err := uio.NewDagReader(ctx, nd, i.node.DAG)
dr, err := uio.NewDagReader(ctx, pbnd, i.node.DAG)
if err != nil && err != uio.ErrIsDir {
// not a directory and still an error
internalWebError(w, err)
@ -221,7 +227,7 @@ func (i *gatewayHandler) getOrHeadHandler(w http.ResponseWriter, r *http.Request
var dirListing []directoryItem
// loop through files
foundIndex := false
for _, link := range nd.Links {
for _, link := range nd.Links() {
if link.Name == "index.html" {
log.Debugf("found index.html link for %s", urlPath)
foundIndex = true
@ -239,7 +245,14 @@ func (i *gatewayHandler) getOrHeadHandler(w http.ResponseWriter, r *http.Request
internalWebError(w, err)
return
}
dr, err := uio.NewDagReader(ctx, nd, i.node.DAG)
pbnd, ok := nd.(*dag.ProtoNode)
if !ok {
internalWebError(w, dag.ErrNotProtobuf)
return
}
dr, err := uio.NewDagReader(ctx, pbnd, i.node.DAG)
if err != nil {
internalWebError(w, err)
return
@ -340,7 +353,7 @@ func (i *gatewayHandler) putHandler(w http.ResponseWriter, r *http.Request) {
return
}
var newnode *dag.Node
var newnode *dag.ProtoNode
if rsegs[len(rsegs)-1] == "QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn" {
newnode = uio.NewEmptyDirectory()
} else {
@ -376,7 +389,13 @@ func (i *gatewayHandler) putHandler(w http.ResponseWriter, r *http.Request) {
return
}
e := dagutils.NewDagEditor(rnode, i.node.DAG)
pbnd, ok := rnode.(*dag.ProtoNode)
if !ok {
webError(w, "Cannot read non protobuf nodes through gateway", dag.ErrNotProtobuf, http.StatusBadRequest)
return
}
e := dagutils.NewDagEditor(pbnd, i.node.DAG)
err = e.InsertNodeAtPath(ctx, newPath, newnode, uio.NewEmptyDirectory)
if err != nil {
webError(w, "putHandler: InsertNodeAtPath failed", err, http.StatusInternalServerError)
@ -392,13 +411,19 @@ func (i *gatewayHandler) putHandler(w http.ResponseWriter, r *http.Request) {
newcid = nnode.Cid()
case nil:
// object set-data case
rnode.SetData(newnode.Data())
pbnd, ok := rnode.(*dag.ProtoNode)
if !ok {
webError(w, "Cannot read non protobuf nodes through gateway", dag.ErrNotProtobuf, http.StatusBadRequest)
return
}
newcid, err = i.node.DAG.Add(rnode)
// object set-data case
pbnd.SetData(newnode.Data())
newcid, err = i.node.DAG.Add(pbnd)
if err != nil {
nnk := newnode.Cid()
rk := rnode.Cid()
rk := pbnd.Cid()
webError(w, fmt.Sprintf("putHandler: Could not add newnode(%q) to root(%q)", nnk.String(), rk.String()), err, http.StatusInternalServerError)
return
}
@ -444,20 +469,33 @@ func (i *gatewayHandler) deleteHandler(w http.ResponseWriter, r *http.Request) {
return
}
pbnd, ok := pathNodes[len(pathNodes)-1].(*dag.ProtoNode)
if !ok {
webError(w, "Cannot read non protobuf nodes through gateway", dag.ErrNotProtobuf, http.StatusBadRequest)
return
}
// TODO(cyrptix): assumes len(pathNodes) > 1 - not found is an error above?
err = pathNodes[len(pathNodes)-1].RemoveNodeLink(components[len(components)-1])
err = pbnd.RemoveNodeLink(components[len(components)-1])
if err != nil {
webError(w, "Could not delete link", err, http.StatusBadRequest)
return
}
newnode := pathNodes[len(pathNodes)-1]
var newnode *dag.ProtoNode = pbnd
for j := len(pathNodes) - 2; j >= 0; j-- {
if _, err := i.node.DAG.Add(newnode); err != nil {
webError(w, "Could not add node", err, http.StatusInternalServerError)
return
}
newnode, err = pathNodes[j].UpdateNodeLink(components[j], newnode)
pathpb, ok := pathNodes[j].(*dag.ProtoNode)
if !ok {
webError(w, "Cannot read non protobuf nodes through gateway", dag.ErrNotProtobuf, http.StatusBadRequest)
return
}
newnode, err = pathpb.UpdateNodeLink(components[j], newnode)
if err != nil {
webError(w, "Could not update node links", err, http.StatusInternalServerError)
return

View File

@ -25,7 +25,7 @@ import (
)
func Pin(n *core.IpfsNode, ctx context.Context, paths []string, recursive bool) ([]*cid.Cid, error) {
dagnodes := make([]*merkledag.Node, 0)
dagnodes := make([]merkledag.Node, 0)
for _, fpath := range paths {
dagnode, err := core.Resolve(ctx, n, path.Path(fpath))
if err != nil {

View File

@ -100,7 +100,7 @@ type Adder struct {
Silent bool
Wrap bool
Chunker string
root *dag.Node
root *dag.ProtoNode
mr *mfs.Root
unlocker bs.Unlocker
tempRoot *cid.Cid
@ -111,7 +111,7 @@ func (adder *Adder) SetMfsRoot(r *mfs.Root) {
}
// Perform the actual add & pin locally, outputting results to reader
func (adder Adder) add(reader io.Reader) (*dag.Node, error) {
func (adder Adder) add(reader io.Reader) (*dag.ProtoNode, error) {
chnk, err := chunk.FromString(reader, adder.Chunker)
if err != nil {
return nil, err
@ -129,7 +129,7 @@ func (adder Adder) add(reader io.Reader) (*dag.Node, error) {
)
}
func (adder *Adder) RootNode() (*dag.Node, error) {
func (adder *Adder) RootNode() (*dag.ProtoNode, error) {
// for memoizing
if adder.root != nil {
return adder.root, nil
@ -141,11 +141,18 @@ func (adder *Adder) RootNode() (*dag.Node, error) {
}
// if not wrapping, AND one root file, use that hash as root.
if !adder.Wrap && len(root.Links) == 1 {
root, err = root.Links[0].GetNode(adder.ctx, adder.dagService)
if !adder.Wrap && len(root.Links()) == 1 {
nd, err := root.Links()[0].GetNode(adder.ctx, adder.dagService)
if err != nil {
return nil, err
}
pbnd, ok := nd.(*dag.ProtoNode)
if !ok {
return nil, dag.ErrNotProtobuf
}
root = pbnd
}
adder.root = root
@ -178,7 +185,7 @@ func (adder *Adder) PinRoot() error {
return adder.pinning.Flush()
}
func (adder *Adder) Finalize() (*dag.Node, error) {
func (adder *Adder) Finalize() (*dag.ProtoNode, error) {
root := adder.mr.GetValue()
// cant just call adder.RootNode() here as we need the name for printing
@ -189,7 +196,7 @@ func (adder *Adder) Finalize() (*dag.Node, error) {
var name string
if !adder.Wrap {
name = rootNode.Links[0].Name
name = rootNode.Links()[0].Name
dir, ok := adder.mr.GetValue().(*mfs.Directory)
if !ok {
@ -300,7 +307,7 @@ func AddR(n *core.IpfsNode, root string) (key string, err error) {
// to preserve the filename.
// Returns the path of the added file ("<dir hash>/filename"), the DAG node of
// the directory, and and error if any.
func AddWrapped(n *core.IpfsNode, r io.Reader, filename string) (string, *dag.Node, error) {
func AddWrapped(n *core.IpfsNode, r io.Reader, filename string) (string, *dag.ProtoNode, error) {
file := files.NewReaderFile(filename, filename, ioutil.NopCloser(r), nil)
fileAdder, err := NewAdder(n.Context(), n.Pinning, n.Blockstore, n.DAG)
if err != nil {
@ -324,7 +331,7 @@ func AddWrapped(n *core.IpfsNode, r io.Reader, filename string) (string, *dag.No
return gopath.Join(c.String(), filename), dagnode, nil
}
func (adder *Adder) addNode(node *dag.Node, path string) error {
func (adder *Adder) addNode(node *dag.ProtoNode, path string) error {
// patch it into the root
if path == "" {
path = node.Cid().String()
@ -449,7 +456,7 @@ func (adder *Adder) maybePauseForGC() error {
}
// outputDagnode sends dagnode info over the output channel
func outputDagnode(out chan interface{}, name string, dn *dag.Node) error {
func outputDagnode(out chan interface{}, name string, dn *dag.ProtoNode) error {
if out == nil {
return nil
}
@ -475,18 +482,17 @@ func NewMemoryDagService() dag.DAGService {
}
// from core/commands/object.go
func getOutput(dagnode *dag.Node) (*Object, error) {
func getOutput(dagnode *dag.ProtoNode) (*Object, error) {
c := dagnode.Cid()
output := &Object{
Hash: c.String(),
Links: make([]Link, len(dagnode.Links)),
Links: make([]Link, len(dagnode.Links())),
}
for i, link := range dagnode.Links {
for i, link := range dagnode.Links() {
output.Links[i] = Link{
Name: link.Name,
//Hash: link.Hash.B58String(),
Size: link.Size,
}
}

View File

@ -1,8 +1,10 @@
package coreunix
import (
context "context"
"context"
core "github.com/ipfs/go-ipfs/core"
dag "github.com/ipfs/go-ipfs/merkledag"
path "github.com/ipfs/go-ipfs/path"
uio "github.com/ipfs/go-ipfs/unixfs/io"
)
@ -12,5 +14,11 @@ func Cat(ctx context.Context, n *core.IpfsNode, pstr string) (*uio.DagReader, er
if err != nil {
return nil, err
}
return uio.NewDagReader(ctx, dagNode, n.DAG)
dnpb, ok := dagNode.(*dag.ProtoNode)
if !ok {
return nil, dag.ErrNotProtobuf
}
return uio.NewDagReader(ctx, dnpb, n.DAG)
}

View File

@ -18,7 +18,7 @@ func AddMetadataTo(n *core.IpfsNode, skey string, m *ft.Metadata) (string, error
return "", err
}
mdnode := new(dag.Node)
mdnode := new(dag.ProtoNode)
mdata, err := ft.BytesForMetadata(m)
if err != nil {
return "", err
@ -48,5 +48,10 @@ func Metadata(n *core.IpfsNode, skey string) (*ft.Metadata, error) {
return nil, err
}
return ft.MetadataFromBytes(nd.Data())
pbnd, ok := nd.(*dag.ProtoNode)
if !ok {
return nil, dag.ErrNotProtobuf
}
return ft.MetadataFromBytes(pbnd.Data())
}

View File

@ -72,7 +72,12 @@ func TestMetadata(t *testing.T) {
t.Fatal(err)
}
ndr, err := uio.NewDagReader(ctx, retnode, ds)
rtnpb, ok := retnode.(*merkledag.ProtoNode)
if !ok {
t.Fatal("expected protobuf node")
}
ndr, err := uio.NewDagReader(ctx, rtnpb, ds)
if err != nil {
t.Fatal(err)
}

View File

@ -19,7 +19,7 @@ var ErrNoNamesys = errors.New(
// Resolve resolves the given path by parsing out protocol-specific
// entries (e.g. /ipns/<node-key>) and then going through the /ipfs/
// entries and returning the final merkledag node.
func Resolve(ctx context.Context, n *IpfsNode, p path.Path) (*merkledag.Node, error) {
func Resolve(ctx context.Context, n *IpfsNode, p path.Path) (merkledag.Node, error) {
if strings.HasPrefix(p.String(), "/ipns/") {
// resolve ipns paths
@ -82,10 +82,10 @@ func ResolveToCid(ctx context.Context, n *IpfsNode, p path.Path) (*cid.Cid, erro
}
// Extract and return the key of the link to the target dag node.
link, err := dagnode.GetNodeLink(tail)
link, _, err := dagnode.Resolve([]string{tail})
if err != nil {
return nil, err
}
return cid.NewCidV0(link.Hash), nil
return link.Cid, nil
}

View File

@ -60,7 +60,7 @@ func (bs *Bitswap) taskWorker(ctx context.Context, id int) {
log.Event(ctx, "Bitswap.TaskWorker.Work", logging.LoggableMap{
"ID": id,
"Target": envelope.Peer.Pretty(),
"Block": envelope.Block.Multihash().B58String(),
"Block": envelope.Block.Cid().String(),
})
bs.wm.SendBlock(ctx, envelope)

View File

@ -100,7 +100,12 @@ func loadRoot(ctx context.Context, rt *keyRoot, ipfs *core.IpfsNode, name string
return nil, err
}
root, err := mfs.NewRoot(ctx, ipfs.DAG, node, ipnsPubFunc(ipfs, rt.k))
pbnode, ok := node.(*dag.ProtoNode)
if !ok {
return nil, dag.ErrNotProtobuf
}
root, err := mfs.NewRoot(ctx, ipfs.DAG, pbnode, ipnsPubFunc(ipfs, rt.k))
if err != nil {
return nil, err
}

View File

@ -33,7 +33,7 @@ func maybeSkipFuseTests(t *testing.T) {
}
}
func randObj(t *testing.T, nd *core.IpfsNode, size int64) (*dag.Node, []byte) {
func randObj(t *testing.T, nd *core.IpfsNode, size int64) (*dag.ProtoNode, []byte) {
buf := make([]byte, size)
u.NewTimeSeededRand().Read(buf)
read := bytes.NewReader(buf)
@ -86,17 +86,23 @@ func TestIpfsBasicRead(t *testing.T) {
}
}
func getPaths(t *testing.T, ipfs *core.IpfsNode, name string, n *dag.Node) []string {
if len(n.Links) == 0 {
func getPaths(t *testing.T, ipfs *core.IpfsNode, name string, n *dag.ProtoNode) []string {
if len(n.Links()) == 0 {
return []string{name}
}
var out []string
for _, lnk := range n.Links {
for _, lnk := range n.Links() {
child, err := lnk.GetNode(ipfs.Context(), ipfs.DAG)
if err != nil {
t.Fatal(err)
}
sub := getPaths(t, ipfs, path.Join(name, lnk.Name), child)
childpb, ok := child.(*dag.ProtoNode)
if !ok {
t.Fatal(dag.ErrNotProtobuf)
}
sub := getPaths(t, ipfs, path.Join(name, lnk.Name), childpb)
out = append(out, sub...)
}
return out

View File

@ -66,7 +66,13 @@ func (s *Root) Lookup(ctx context.Context, name string) (fs.Node, error) {
return nil, fuse.ENOENT
}
return &Node{Ipfs: s.Ipfs, Nd: nd}, nil
pbnd, ok := nd.(*mdag.ProtoNode)
if !ok {
log.Error("fuse node was not a protobuf node")
return nil, fuse.ENOTSUP
}
return &Node{Ipfs: s.Ipfs, Nd: pbnd}, nil
}
// ReadDirAll reads a particular directory. Disallowed for root.
@ -78,7 +84,7 @@ func (*Root) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) {
// Node is the core object representing a filesystem tree node.
type Node struct {
Ipfs *core.IpfsNode
Nd *mdag.Node
Nd *mdag.ProtoNode
fd *uio.DagReader
cached *ftpb.Data
}
@ -105,13 +111,13 @@ func (s *Node) Attr(ctx context.Context, a *fuse.Attr) error {
size := s.cached.GetFilesize()
a.Mode = 0444
a.Size = uint64(size)
a.Blocks = uint64(len(s.Nd.Links))
a.Blocks = uint64(len(s.Nd.Links()))
a.Uid = uint32(os.Getuid())
a.Gid = uint32(os.Getgid())
case ftpb.Data_Raw:
a.Mode = 0444
a.Size = uint64(len(s.cached.GetData()))
a.Blocks = uint64(len(s.Nd.Links))
a.Blocks = uint64(len(s.Nd.Links()))
a.Uid = uint32(os.Getuid())
a.Gid = uint32(os.Getgid())
case ftpb.Data_Symlink:
@ -134,17 +140,23 @@ func (s *Node) Lookup(ctx context.Context, name string) (fs.Node, error) {
return nil, fuse.ENOENT
}
return &Node{Ipfs: s.Ipfs, Nd: nodes[len(nodes)-1]}, nil
pbnd, ok := nodes[len(nodes)-1].(*mdag.ProtoNode)
if !ok {
log.Error("fuse lookup got non-protobuf node")
return nil, fuse.ENOTSUP
}
return &Node{Ipfs: s.Ipfs, Nd: pbnd}, nil
}
// ReadDirAll reads the link structure as directory entries
func (s *Node) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) {
log.Debug("Node ReadDir")
entries := make([]fuse.Dirent, len(s.Nd.Links))
for i, link := range s.Nd.Links {
entries := make([]fuse.Dirent, len(s.Nd.Links()))
for i, link := range s.Nd.Links() {
n := link.Name
if len(n) == 0 {
n = link.Hash.B58String()
n = link.Cid.String()
}
entries[i] = fuse.Dirent{Name: n, Type: fuse.DT_File}
}

View File

@ -22,7 +22,7 @@ import (
// TODO: extract these tests and more as a generic layout test suite
func buildTestDag(ds dag.DAGService, spl chunk.Splitter) (*dag.Node, error) {
func buildTestDag(ds dag.DAGService, spl chunk.Splitter) (*dag.ProtoNode, error) {
dbp := h.DagBuilderParams{
Dagserv: ds,
Maxlinks: h.DefaultLinksPerBlock,
@ -31,7 +31,7 @@ func buildTestDag(ds dag.DAGService, spl chunk.Splitter) (*dag.Node, error) {
return BalancedLayout(dbp.New(spl))
}
func getTestDag(t *testing.T, ds dag.DAGService, size int64, blksize int64) (*dag.Node, []byte) {
func getTestDag(t *testing.T, ds dag.DAGService, size int64, blksize int64) (*dag.ProtoNode, []byte) {
data := make([]byte, size)
u.NewTimeSeededRand().Read(data)
r := bytes.NewReader(data)

View File

@ -7,7 +7,7 @@ import (
dag "github.com/ipfs/go-ipfs/merkledag"
)
func BalancedLayout(db *h.DagBuilderHelper) (*dag.Node, error) {
func BalancedLayout(db *h.DagBuilderHelper) (*dag.ProtoNode, error) {
var root *h.UnixfsNode
for level := 0; !db.Done(); level++ {

View File

@ -106,7 +106,7 @@ func (db *DagBuilderHelper) FillNodeWithData(node *UnixfsNode) error {
return nil
}
func (db *DagBuilderHelper) Add(node *UnixfsNode) (*dag.Node, error) {
func (db *DagBuilderHelper) Add(node *UnixfsNode) (*dag.ProtoNode, error) {
dn, err := node.GetDagNode()
if err != nil {
return nil, err

View File

@ -37,14 +37,14 @@ var ErrSizeLimitExceeded = fmt.Errorf("object size limit exceeded")
// UnixfsNode is a struct created to aid in the generation
// of unixfs DAG trees
type UnixfsNode struct {
node *dag.Node
node *dag.ProtoNode
ufmt *ft.FSNode
}
// NewUnixfsNode creates a new Unixfs node to represent a file
func NewUnixfsNode() *UnixfsNode {
return &UnixfsNode{
node: new(dag.Node),
node: new(dag.ProtoNode),
ufmt: &ft.FSNode{Type: ft.TFile},
}
}
@ -52,13 +52,13 @@ func NewUnixfsNode() *UnixfsNode {
// NewUnixfsBlock creates a new Unixfs node to represent a raw data block
func NewUnixfsBlock() *UnixfsNode {
return &UnixfsNode{
node: new(dag.Node),
node: new(dag.ProtoNode),
ufmt: &ft.FSNode{Type: ft.TRaw},
}
}
// NewUnixfsNodeFromDag reconstructs a Unixfs node from a given dag node
func NewUnixfsNodeFromDag(nd *dag.Node) (*UnixfsNode, error) {
func NewUnixfsNodeFromDag(nd *dag.ProtoNode) (*UnixfsNode, error) {
mb, err := ft.FSNodeFromBytes(nd.Data())
if err != nil {
return nil, err
@ -75,12 +75,17 @@ func (n *UnixfsNode) NumChildren() int {
}
func (n *UnixfsNode) GetChild(ctx context.Context, i int, ds dag.DAGService) (*UnixfsNode, error) {
nd, err := n.node.Links[i].GetNode(ctx, ds)
nd, err := n.node.Links()[i].GetNode(ctx, ds)
if err != nil {
return nil, err
}
return NewUnixfsNodeFromDag(nd)
pbn, ok := nd.(*dag.ProtoNode)
if !ok {
return nil, dag.ErrNotProtobuf
}
return NewUnixfsNodeFromDag(pbn)
}
// addChild will add the given UnixfsNode as a child of the receiver.
@ -112,7 +117,7 @@ func (n *UnixfsNode) AddChild(child *UnixfsNode, db *DagBuilderHelper) error {
// Removes the child node at the given index
func (n *UnixfsNode) RemoveChild(index int, dbh *DagBuilderHelper) {
n.ufmt.RemoveBlockSize(index)
n.node.Links = append(n.node.Links[:index], n.node.Links[index+1:]...)
n.node.SetLinks(append(n.node.Links()[:index], n.node.Links()[index+1:]...))
}
func (n *UnixfsNode) SetData(data []byte) {
@ -121,7 +126,7 @@ func (n *UnixfsNode) SetData(data []byte) {
// getDagNode fills out the proper formatting for the unixfs node
// inside of a DAG node and returns the dag node
func (n *UnixfsNode) GetDagNode() (*dag.Node, error) {
func (n *UnixfsNode) GetDagNode() (*dag.ProtoNode, error) {
data, err := n.ufmt.GetBytes()
if err != nil {
return nil, err

View File

@ -19,7 +19,7 @@ var log = logging.Logger("importer")
// Builds a DAG from the given file, writing created blocks to disk as they are
// created
func BuildDagFromFile(fpath string, ds dag.DAGService) (*dag.Node, error) {
func BuildDagFromFile(fpath string, ds dag.DAGService) (*dag.ProtoNode, error) {
stat, err := os.Lstat(fpath)
if err != nil {
return nil, err
@ -38,7 +38,7 @@ func BuildDagFromFile(fpath string, ds dag.DAGService) (*dag.Node, error) {
return BuildDagFromReader(ds, chunk.NewSizeSplitter(f, chunk.DefaultBlockSize))
}
func BuildDagFromReader(ds dag.DAGService, spl chunk.Splitter) (*dag.Node, error) {
func BuildDagFromReader(ds dag.DAGService, spl chunk.Splitter) (*dag.ProtoNode, error) {
dbp := h.DagBuilderParams{
Dagserv: ds,
Maxlinks: h.DefaultLinksPerBlock,
@ -47,7 +47,7 @@ func BuildDagFromReader(ds dag.DAGService, spl chunk.Splitter) (*dag.Node, error
return bal.BalancedLayout(dbp.New(spl))
}
func BuildTrickleDagFromReader(ds dag.DAGService, spl chunk.Splitter) (*dag.Node, error) {
func BuildTrickleDagFromReader(ds dag.DAGService, spl chunk.Splitter) (*dag.ProtoNode, error) {
dbp := h.DagBuilderParams{
Dagserv: ds,
Maxlinks: h.DefaultLinksPerBlock,

View File

@ -14,7 +14,7 @@ import (
u "gx/ipfs/Qmb912gdngC1UWwTkhuW8knyRbcWeu5kqkxBpveLmW8bSr/go-ipfs-util"
)
func getBalancedDag(t testing.TB, size int64, blksize int64) (*dag.Node, dag.DAGService) {
func getBalancedDag(t testing.TB, size int64, blksize int64) (*dag.ProtoNode, dag.DAGService) {
ds := mdtest.Mock()
r := io.LimitReader(u.NewTimeSeededRand(), size)
nd, err := BuildDagFromReader(ds, chunk.NewSizeSplitter(r, blksize))
@ -24,7 +24,7 @@ func getBalancedDag(t testing.TB, size int64, blksize int64) (*dag.Node, dag.DAG
return nd, ds
}
func getTrickleDag(t testing.TB, size int64, blksize int64) (*dag.Node, dag.DAGService) {
func getTrickleDag(t testing.TB, size int64, blksize int64) (*dag.ProtoNode, dag.DAGService) {
ds := mdtest.Mock()
r := io.LimitReader(u.NewTimeSeededRand(), size)
nd, err := BuildTrickleDagFromReader(ds, chunk.NewSizeSplitter(r, blksize))
@ -100,7 +100,7 @@ func BenchmarkTrickleReadFull(b *testing.B) {
runReadBench(b, nd, ds)
}
func runReadBench(b *testing.B, nd *dag.Node, ds dag.DAGService) {
func runReadBench(b *testing.B, nd *dag.ProtoNode, ds dag.DAGService) {
for i := 0; i < b.N; i++ {
ctx, cancel := context.WithCancel(context.Background())
read, err := uio.NewDagReader(ctx, nd, ds)

View File

@ -20,7 +20,7 @@ import (
u "gx/ipfs/Qmb912gdngC1UWwTkhuW8knyRbcWeu5kqkxBpveLmW8bSr/go-ipfs-util"
)
func buildTestDag(ds merkledag.DAGService, spl chunk.Splitter) (*merkledag.Node, error) {
func buildTestDag(ds merkledag.DAGService, spl chunk.Splitter) (*merkledag.ProtoNode, error) {
dbp := h.DagBuilderParams{
Dagserv: ds,
Maxlinks: h.DefaultLinksPerBlock,
@ -523,7 +523,7 @@ func TestAppendSingleBytesToEmpty(t *testing.T) {
data := []byte("AB")
nd := new(merkledag.Node)
nd := new(merkledag.ProtoNode)
nd.SetData(ft.FilePBData(nil, 0))
dbp := &h.DagBuilderParams{
@ -561,7 +561,7 @@ func TestAppendSingleBytesToEmpty(t *testing.T) {
}
}
func printDag(nd *merkledag.Node, ds merkledag.DAGService, indent int) {
func printDag(nd *merkledag.ProtoNode, ds merkledag.DAGService, indent int) {
pbd, err := ft.FromBytes(nd.Data())
if err != nil {
panic(err)
@ -571,17 +571,17 @@ func printDag(nd *merkledag.Node, ds merkledag.DAGService, indent int) {
fmt.Print(" ")
}
fmt.Printf("{size = %d, type = %s, nc = %d", pbd.GetFilesize(), pbd.GetType().String(), len(pbd.GetBlocksizes()))
if len(nd.Links) > 0 {
if len(nd.Links()) > 0 {
fmt.Println()
}
for _, lnk := range nd.Links {
for _, lnk := range nd.Links() {
child, err := lnk.GetNode(context.Background(), ds)
if err != nil {
panic(err)
}
printDag(child, ds, indent+1)
printDag(child.(*merkledag.ProtoNode), ds, indent+1)
}
if len(nd.Links) > 0 {
if len(nd.Links()) > 0 {
for i := 0; i < indent; i++ {
fmt.Print(" ")
}

View File

@ -1,9 +1,9 @@
package trickle
import (
"context"
"errors"
context "context"
"fmt"
h "github.com/ipfs/go-ipfs/importer/helpers"
dag "github.com/ipfs/go-ipfs/merkledag"
@ -15,7 +15,7 @@ import (
// improves seek speeds.
const layerRepeat = 4
func TrickleLayout(db *h.DagBuilderHelper) (*dag.Node, error) {
func TrickleLayout(db *h.DagBuilderHelper) (*dag.ProtoNode, error) {
root := h.NewUnixfsNode()
if err := db.FillNodeLayer(root); err != nil {
return nil, err
@ -66,7 +66,7 @@ func fillTrickleRec(db *h.DagBuilderHelper, node *h.UnixfsNode, depth int) error
}
// TrickleAppend appends the data in `db` to the dag, using the Trickledag format
func TrickleAppend(ctx context.Context, base *dag.Node, db *h.DagBuilderHelper) (out *dag.Node, err_out error) {
func TrickleAppend(ctx context.Context, base *dag.ProtoNode, db *h.DagBuilderHelper) (out *dag.ProtoNode, err_out error) {
defer func() {
if err_out == nil {
if err := db.Close(); err != nil {
@ -229,15 +229,15 @@ func trickleDepthInfo(node *h.UnixfsNode, maxlinks int) (int, int) {
// VerifyTrickleDagStructure checks that the given dag matches exactly the trickle dag datastructure
// layout
func VerifyTrickleDagStructure(nd *dag.Node, ds dag.DAGService, direct int, layerRepeat int) error {
func VerifyTrickleDagStructure(nd *dag.ProtoNode, ds dag.DAGService, direct int, layerRepeat int) error {
return verifyTDagRec(nd, -1, direct, layerRepeat, ds)
}
// Recursive call for verifying the structure of a trickledag
func verifyTDagRec(nd *dag.Node, depth, direct, layerRepeat int, ds dag.DAGService) error {
func verifyTDagRec(nd *dag.ProtoNode, depth, direct, layerRepeat int, ds dag.DAGService) error {
if depth == 0 {
// zero depth dag is raw data block
if len(nd.Links) > 0 {
if len(nd.Links()) > 0 {
return errors.New("expected direct block")
}
@ -259,22 +259,27 @@ func verifyTDagRec(nd *dag.Node, depth, direct, layerRepeat int, ds dag.DAGServi
}
if pbn.GetType() != ft.TFile {
return errors.New("expected file as branch node")
return fmt.Errorf("expected file as branch node, got: %s", pbn.GetType())
}
if len(pbn.Data) > 0 {
return errors.New("branch node should not have data")
}
for i := 0; i < len(nd.Links); i++ {
child, err := nd.Links[i].GetNode(context.TODO(), ds)
for i := 0; i < len(nd.Links()); i++ {
childi, err := nd.Links()[i].GetNode(context.TODO(), ds)
if err != nil {
return err
}
childpb, ok := childi.(*dag.ProtoNode)
if !ok {
return fmt.Errorf("cannot operate on non-protobuf nodes")
}
if i < direct {
// Direct blocks
err := verifyTDagRec(child, 0, direct, layerRepeat, ds)
err := verifyTDagRec(childpb, 0, direct, layerRepeat, ds)
if err != nil {
return err
}
@ -284,7 +289,7 @@ func verifyTDagRec(nd *dag.Node, depth, direct, layerRepeat int, ds dag.DAGServi
if rdepth >= depth && depth > 0 {
return errors.New("Child dag was too deep!")
}
err := verifyTDagRec(child, rdepth, direct, layerRepeat, ds)
err := verifyTDagRec(childpb, rdepth, direct, layerRepeat, ds)
if err != nil {
return err
}

View File

@ -7,7 +7,6 @@ import (
pb "github.com/ipfs/go-ipfs/merkledag/pb"
cid "gx/ipfs/QmXUuRadqDq5BuFWzVU6VuKaSjTcNm1gNCtLvvP1TJCW4z/go-cid"
mh "gx/ipfs/QmYDds3421prZgqKbLpEK7T9Aa2eVdQ7o3YarX1LVLdP2J/go-multihash"
u "gx/ipfs/Qmb912gdngC1UWwTkhuW8knyRbcWeu5kqkxBpveLmW8bSr/go-ipfs-util"
)
@ -16,23 +15,23 @@ import (
// unmarshal decodes raw data into a *Node instance.
// The conversion uses an intermediate PBNode.
func (n *Node) unmarshal(encoded []byte) error {
func (n *ProtoNode) unmarshal(encoded []byte) error {
var pbn pb.PBNode
if err := pbn.Unmarshal(encoded); err != nil {
return fmt.Errorf("Unmarshal failed. %v", err)
}
pbnl := pbn.GetLinks()
n.Links = make([]*Link, len(pbnl))
n.links = make([]*Link, len(pbnl))
for i, l := range pbnl {
n.Links[i] = &Link{Name: l.GetName(), Size: l.GetTsize()}
h, err := mh.Cast(l.GetHash())
n.links[i] = &Link{Name: l.GetName(), Size: l.GetTsize()}
c, err := cid.Cast(l.GetHash())
if err != nil {
return fmt.Errorf("Link hash #%d is not valid multihash. %v", i, err)
}
n.Links[i].Hash = h
n.links[i].Cid = c
}
sort.Stable(LinkSlice(n.Links)) // keep links sorted
sort.Stable(LinkSlice(n.links)) // keep links sorted
n.data = pbn.GetData()
n.encoded = encoded
@ -41,7 +40,7 @@ func (n *Node) unmarshal(encoded []byte) error {
// Marshal encodes a *Node instance into a new byte slice.
// The conversion uses an intermediate PBNode.
func (n *Node) Marshal() ([]byte, error) {
func (n *ProtoNode) Marshal() ([]byte, error) {
pbn := n.getPBNode()
data, err := pbn.Marshal()
if err != nil {
@ -50,18 +49,18 @@ func (n *Node) Marshal() ([]byte, error) {
return data, nil
}
func (n *Node) getPBNode() *pb.PBNode {
func (n *ProtoNode) getPBNode() *pb.PBNode {
pbn := &pb.PBNode{}
if len(n.Links) > 0 {
pbn.Links = make([]*pb.PBLink, len(n.Links))
if len(n.links) > 0 {
pbn.Links = make([]*pb.PBLink, len(n.links))
}
sort.Stable(LinkSlice(n.Links)) // keep links sorted
for i, l := range n.Links {
sort.Stable(LinkSlice(n.links)) // keep links sorted
for i, l := range n.links {
pbn.Links[i] = &pb.PBLink{}
pbn.Links[i].Name = &l.Name
pbn.Links[i].Tsize = &l.Size
pbn.Links[i].Hash = []byte(l.Hash)
pbn.Links[i].Hash = l.Cid.Bytes()
}
if len(n.data) > 0 {
@ -72,8 +71,8 @@ func (n *Node) getPBNode() *pb.PBNode {
// EncodeProtobuf returns the encoded raw data version of a Node instance.
// It may use a cached encoded version, unless the force flag is given.
func (n *Node) EncodeProtobuf(force bool) ([]byte, error) {
sort.Stable(LinkSlice(n.Links)) // keep links sorted
func (n *ProtoNode) EncodeProtobuf(force bool) ([]byte, error) {
sort.Stable(LinkSlice(n.links)) // keep links sorted
if n.encoded == nil || force {
n.cached = nil
var err error
@ -91,8 +90,8 @@ func (n *Node) EncodeProtobuf(force bool) ([]byte, error) {
}
// Decoded decodes raw data and returns a new Node instance.
func DecodeProtobuf(encoded []byte) (*Node, error) {
n := new(Node)
func DecodeProtobuf(encoded []byte) (*ProtoNode, error) {
n := new(ProtoNode)
err := n.unmarshal(encoded)
if err != nil {
return nil, fmt.Errorf("incorrectly formatted merkledag node: %s", err)

View File

@ -20,9 +20,9 @@ var ErrNotFound = fmt.Errorf("merkledag: not found")
// DAGService is an IPFS Merkle DAG service.
type DAGService interface {
Add(*Node) (*cid.Cid, error)
Get(context.Context, *cid.Cid) (*Node, error)
Remove(*Node) error
Add(Node) (*cid.Cid, error)
Get(context.Context, *cid.Cid) (Node, error)
Remove(Node) error
// GetDAG returns, in order, all the single leve child
// nodes of the passed in node.
@ -45,6 +45,19 @@ func NewDAGService(bs bserv.BlockService) *dagService {
return &dagService{Blocks: bs}
}
type Node interface {
Resolve(path []string) (*Link, []string, error)
Links() []*Link
Tree() []string
Stat() (*NodeStat, error)
Size() (uint64, error)
Cid() *cid.Cid
Loggable() map[string]interface{}
RawData() []byte
String() string
}
// dagService is an IPFS Merkle DAG service.
// - the root is virtual (like a forest)
// - stores nodes' data in a BlockService
@ -55,7 +68,7 @@ type dagService struct {
}
// Add adds a node to the dagService, storing the block in the BlockService
func (n *dagService) Add(nd *Node) (*cid.Cid, error) {
func (n *dagService) Add(nd Node) (*cid.Cid, error) {
if n == nil { // FIXME remove this assertion. protect with constructor invariant
return nil, fmt.Errorf("dagService is nil")
}
@ -68,7 +81,7 @@ func (n *dagService) Batch() *Batch {
}
// Get retrieves a node from the dagService, fetching the block in the BlockService
func (n *dagService) Get(ctx context.Context, c *cid.Cid) (*Node, error) {
func (n *dagService) Get(ctx context.Context, c *cid.Cid) (Node, error) {
if n == nil {
return nil, fmt.Errorf("dagService is nil")
}
@ -84,7 +97,7 @@ func (n *dagService) Get(ctx context.Context, c *cid.Cid) (*Node, error) {
return nil, fmt.Errorf("Failed to get block for %s: %v", c, err)
}
var res *Node
var res Node
switch c.Type() {
case cid.Protobuf:
out, err := DecodeProtobuf(b.RawData())
@ -94,13 +107,12 @@ func (n *dagService) Get(ctx context.Context, c *cid.Cid) (*Node, error) {
}
return nil, fmt.Errorf("Failed to decode Protocol Buffers: %v", err)
}
out.cached = c
res = out
default:
return nil, fmt.Errorf("unrecognized formatting type")
}
res.cached = c
return res, nil
}
@ -109,7 +121,7 @@ func (n *dagService) GetLinks(ctx context.Context, c *cid.Cid) ([]*Link, error)
if err != nil {
return nil, err
}
return node.Links, nil
return node.Links(), nil
}
func (n *dagService) GetOfflineLinkService() LinkService {
@ -121,7 +133,7 @@ func (n *dagService) GetOfflineLinkService() LinkService {
}
}
func (n *dagService) Remove(nd *Node) error {
func (n *dagService) Remove(nd Node) error {
return n.Blocks.DeleteBlock(nd)
}
@ -143,7 +155,7 @@ func FindLinks(links []*cid.Cid, c *cid.Cid, start int) []int {
}
type NodeOption struct {
Node *Node
Node Node
Err error
}
@ -166,7 +178,7 @@ func (ds *dagService) GetMany(ctx context.Context, keys []*cid.Cid) <-chan *Node
c := b.Cid()
var nd *Node
var nd Node
switch c.Type() {
case cid.Protobuf:
decnd, err := DecodeProtobuf(b.RawData())
@ -174,7 +186,7 @@ func (ds *dagService) GetMany(ctx context.Context, keys []*cid.Cid) <-chan *Node
out <- &NodeOption{Err: err}
return
}
decnd.cached = cid.NewCidV0(b.Multihash())
decnd.cached = b.Cid()
nd = decnd
default:
out <- &NodeOption{Err: fmt.Errorf("unrecognized object type: %s", c.Type())}
@ -197,10 +209,10 @@ func (ds *dagService) GetMany(ctx context.Context, keys []*cid.Cid) <-chan *Node
// GetDAG will fill out all of the links of the given Node.
// It returns a channel of nodes, which the caller can receive
// all the child nodes of 'root' on, in proper order.
func GetDAG(ctx context.Context, ds DAGService, root *Node) []NodeGetter {
func GetDAG(ctx context.Context, ds DAGService, root Node) []NodeGetter {
var cids []*cid.Cid
for _, lnk := range root.Links {
cids = append(cids, cid.NewCidV0(lnk.Hash))
for _, lnk := range root.Links() {
cids = append(cids, lnk.Cid)
}
return GetNodes(ctx, ds, cids)
@ -269,16 +281,16 @@ func dedupeKeys(cids []*cid.Cid) []*cid.Cid {
func newNodePromise(ctx context.Context) NodeGetter {
return &nodePromise{
recv: make(chan *Node, 1),
recv: make(chan Node, 1),
ctx: ctx,
err: make(chan error, 1),
}
}
type nodePromise struct {
cache *Node
cache Node
clk sync.Mutex
recv chan *Node
recv chan Node
ctx context.Context
err chan error
}
@ -288,9 +300,9 @@ type nodePromise struct {
// from its internal channels, subsequent calls will return the
// cached node.
type NodeGetter interface {
Get(context.Context) (*Node, error)
Get(context.Context) (Node, error)
Fail(err error)
Send(*Node)
Send(Node)
}
func (np *nodePromise) Fail(err error) {
@ -306,7 +318,7 @@ func (np *nodePromise) Fail(err error) {
np.err <- err
}
func (np *nodePromise) Send(nd *Node) {
func (np *nodePromise) Send(nd Node) {
var already bool
np.clk.Lock()
if np.cache != nil {
@ -322,7 +334,7 @@ func (np *nodePromise) Send(nd *Node) {
np.recv <- nd
}
func (np *nodePromise) Get(ctx context.Context) (*Node, error) {
func (np *nodePromise) Get(ctx context.Context) (Node, error) {
np.clk.Lock()
c := np.cache
np.clk.Unlock()
@ -350,14 +362,9 @@ type Batch struct {
MaxSize int
}
func (t *Batch) Add(nd *Node) (*cid.Cid, error) {
d, err := nd.EncodeProtobuf(false)
if err != nil {
return nil, err
}
func (t *Batch) Add(nd Node) (*cid.Cid, error) {
t.blocks = append(t.blocks, nd)
t.size += len(d)
t.size += len(nd.RawData())
if t.size > t.MaxSize {
return nd.Cid(), t.Commit()
}
@ -371,10 +378,6 @@ func (t *Batch) Commit() error {
return err
}
func legacyCidFromLink(lnk *Link) *cid.Cid {
return cid.NewCidV0(lnk.Hash)
}
// EnumerateChildren will walk the dag below the given root node and add all
// unseen children to the passed in set.
// TODO: parallelize to avoid disk latency perf hits?
@ -386,7 +389,7 @@ func EnumerateChildren(ctx context.Context, ds LinkService, root *cid.Cid, visit
return err
}
for _, lnk := range links {
c := legacyCidFromLink(lnk)
c := lnk.Cid
if visit(c) {
err = EnumerateChildren(ctx, ds, c, visit, bestEffort)
if err != nil {
@ -432,8 +435,8 @@ func EnumerateChildrenAsync(ctx context.Context, ds DAGService, c *cid.Cid, visi
live--
var cids []*cid.Cid
for _, lnk := range nd.Links {
c := legacyCidFromLink(lnk)
for _, lnk := range nd.Links() {
c := lnk.Cid
if visit(c) {
live++
cids = append(cids, c)

View File

@ -38,13 +38,13 @@ func TestNode(t *testing.T) {
t.Error(err)
}
printn := func(name string, n *Node) {
printn := func(name string, n *ProtoNode) {
fmt.Println(">", name)
fmt.Println("data:", string(n.Data()))
fmt.Println("links:")
for _, l := range n.Links {
fmt.Println("-", l.Name, l.Size, l.Hash)
for _, l := range n.Links() {
fmt.Println("-", l.Name, l.Size, l.Cid)
}
e, err := n.EncodeProtobuf(false)
@ -70,7 +70,7 @@ func TestNode(t *testing.T) {
printn("beep boop", n3)
}
func SubtestNodeStat(t *testing.T, n *Node) {
func SubtestNodeStat(t *testing.T, n *ProtoNode) {
enc, err := n.EncodeProtobuf(true)
if err != nil {
t.Error("n.EncodeProtobuf(true) failed")
@ -86,7 +86,7 @@ func SubtestNodeStat(t *testing.T, n *Node) {
k := n.Key()
expected := NodeStat{
NumLinks: len(n.Links),
NumLinks: len(n.Links()),
BlockSize: len(enc),
LinksSize: len(enc) - len(n.Data()), // includes framing.
DataSize: len(n.Data()),
@ -174,7 +174,12 @@ func runBatchFetchTest(t *testing.T, read io.Reader) {
}
fmt.Println("Got first node back.")
read, err := uio.NewDagReader(ctx, first, dagservs[i])
firstpb, ok := first.(*ProtoNode)
if !ok {
errs <- ErrNotProtobuf
}
read, err := uio.NewDagReader(ctx, firstpb, dagservs[i])
if err != nil {
errs <- err
}
@ -201,7 +206,7 @@ func runBatchFetchTest(t *testing.T, read io.Reader) {
}
}
func assertCanGet(t *testing.T, ds DAGService, n *Node) {
func assertCanGet(t *testing.T, ds DAGService, n Node) {
if _, err := ds.Get(context.Background(), n.Cid()); err != nil {
t.Fatal(err)
}
@ -263,13 +268,13 @@ func TestEnumerateChildren(t *testing.T) {
t.Fatal(err)
}
var traverse func(n *Node)
traverse = func(n *Node) {
var traverse func(n Node)
traverse = func(n Node) {
// traverse dag and check
for _, lnk := range n.Links {
c := cid.NewCidV0(lnk.Hash)
for _, lnk := range n.Links() {
c := lnk.Cid
if !set.Has(c) {
t.Fatal("missing key in set! ", lnk.Hash.B58String())
t.Fatal("missing key in set! ", lnk.Cid.String())
}
child, err := ds.Get(context.Background(), c)
if err != nil {
@ -286,7 +291,7 @@ func TestFetchFailure(t *testing.T) {
ds := dstest.Mock()
ds_bad := dstest.Mock()
top := new(Node)
top := new(ProtoNode)
for i := 0; i < 10; i++ {
nd := NodeWithData([]byte{byte('a' + i)})
_, err := ds.Add(nd)
@ -345,13 +350,13 @@ func TestUnmarshalFailure(t *testing.T) {
t.Fatal("should have failed to parse node with bad link")
}
n := &Node{}
n := &ProtoNode{}
n.Marshal()
}
func TestBasicAddGet(t *testing.T) {
ds := dstest.Mock()
nd := new(Node)
nd := new(ProtoNode)
c, err := ds.Add(nd)
if err != nil {

View File

@ -14,8 +14,8 @@ var ErrLinkNotFound = fmt.Errorf("no link by that name")
// Node represents a node in the IPFS Merkle DAG.
// nodes have opaque data and a set of navigable links.
type Node struct {
Links []*Link
type ProtoNode struct {
links []*Link
data []byte
// cache encoded/marshaled value
@ -48,7 +48,7 @@ type Link struct {
Size uint64
// multihash of the target object
Hash mh.Multihash
Cid *cid.Cid
}
type LinkSlice []*Link
@ -58,31 +58,29 @@ func (ls LinkSlice) Swap(a, b int) { ls[a], ls[b] = ls[b], ls[a] }
func (ls LinkSlice) Less(a, b int) bool { return ls[a].Name < ls[b].Name }
// MakeLink creates a link to the given node
func MakeLink(n *Node) (*Link, error) {
func MakeLink(n Node) (*Link, error) {
s, err := n.Size()
if err != nil {
return nil, err
}
h := n.Multihash()
return &Link{
Size: s,
Hash: h,
Cid: n.Cid(),
}, nil
}
// GetNode returns the MDAG Node that this link points to
func (l *Link) GetNode(ctx context.Context, serv DAGService) (*Node, error) {
return serv.Get(ctx, legacyCidFromLink(l))
func (l *Link) GetNode(ctx context.Context, serv DAGService) (Node, error) {
return serv.Get(ctx, l.Cid)
}
func NodeWithData(d []byte) *Node {
return &Node{data: d}
func NodeWithData(d []byte) *ProtoNode {
return &ProtoNode{data: d}
}
// AddNodeLink adds a link to another node.
func (n *Node) AddNodeLink(name string, that *Node) error {
func (n *ProtoNode) AddNodeLink(name string, that *ProtoNode) error {
n.encoded = nil
lnk, err := MakeLink(that)
@ -99,7 +97,7 @@ func (n *Node) AddNodeLink(name string, that *Node) error {
// AddNodeLinkClean adds a link to another node. without keeping a reference to
// the child node
func (n *Node) AddNodeLinkClean(name string, that *Node) error {
func (n *ProtoNode) AddNodeLinkClean(name string, that Node) error {
n.encoded = nil
lnk, err := MakeLink(that)
if err != nil {
@ -111,31 +109,31 @@ func (n *Node) AddNodeLinkClean(name string, that *Node) error {
}
// AddRawLink adds a copy of a link to this node
func (n *Node) AddRawLink(name string, l *Link) error {
func (n *ProtoNode) AddRawLink(name string, l *Link) error {
n.encoded = nil
n.Links = append(n.Links, &Link{
n.links = append(n.links, &Link{
Name: name,
Size: l.Size,
Hash: l.Hash,
Cid: l.Cid,
})
return nil
}
// Remove a link on this node by the given name
func (n *Node) RemoveNodeLink(name string) error {
func (n *ProtoNode) RemoveNodeLink(name string) error {
n.encoded = nil
good := make([]*Link, 0, len(n.Links))
good := make([]*Link, 0, len(n.links))
var found bool
for _, l := range n.Links {
for _, l := range n.links {
if l.Name != name {
good = append(good, l)
} else {
found = true
}
}
n.Links = good
n.links = good
if !found {
return ErrNotFound
@ -145,20 +143,36 @@ func (n *Node) RemoveNodeLink(name string) error {
}
// Return a copy of the link with given name
func (n *Node) GetNodeLink(name string) (*Link, error) {
for _, l := range n.Links {
func (n *ProtoNode) GetNodeLink(name string) (*Link, error) {
for _, l := range n.links {
if l.Name == name {
return &Link{
Name: l.Name,
Size: l.Size,
Hash: l.Hash,
Cid: l.Cid,
}, nil
}
}
return nil, ErrLinkNotFound
}
func (n *Node) GetLinkedNode(ctx context.Context, ds DAGService, name string) (*Node, error) {
var ErrNotProtobuf = fmt.Errorf("expected protobuf dag node")
func (n *ProtoNode) GetLinkedProtoNode(ctx context.Context, ds DAGService, name string) (*ProtoNode, error) {
nd, err := n.GetLinkedNode(ctx, ds, name)
if err != nil {
return nil, err
}
pbnd, ok := nd.(*ProtoNode)
if !ok {
return nil, ErrNotProtobuf
}
return pbnd, nil
}
func (n *ProtoNode) GetLinkedNode(ctx context.Context, ds DAGService, name string) (Node, error) {
lnk, err := n.GetNodeLink(name)
if err != nil {
return nil, err
@ -169,30 +183,30 @@ func (n *Node) GetLinkedNode(ctx context.Context, ds DAGService, name string) (*
// Copy returns a copy of the node.
// NOTE: Does not make copies of Node objects in the links.
func (n *Node) Copy() *Node {
nnode := new(Node)
func (n *ProtoNode) Copy() *ProtoNode {
nnode := new(ProtoNode)
if len(n.data) > 0 {
nnode.data = make([]byte, len(n.data))
copy(nnode.data, n.data)
}
if len(n.Links) > 0 {
nnode.Links = make([]*Link, len(n.Links))
copy(nnode.Links, n.Links)
if len(n.links) > 0 {
nnode.links = make([]*Link, len(n.links))
copy(nnode.links, n.links)
}
return nnode
}
func (n *Node) RawData() []byte {
func (n *ProtoNode) RawData() []byte {
out, _ := n.EncodeProtobuf(false)
return out
}
func (n *Node) Data() []byte {
func (n *ProtoNode) Data() []byte {
return n.data
}
func (n *Node) SetData(d []byte) {
func (n *ProtoNode) SetData(d []byte) {
n.encoded = nil
n.cached = nil
n.data = d
@ -200,7 +214,7 @@ func (n *Node) SetData(d []byte) {
// UpdateNodeLink return a copy of the node with the link name set to point to
// that. If a link of the same name existed, it is removed.
func (n *Node) UpdateNodeLink(name string, that *Node) (*Node, error) {
func (n *ProtoNode) UpdateNodeLink(name string, that *ProtoNode) (*ProtoNode, error) {
newnode := n.Copy()
err := newnode.RemoveNodeLink(name)
err = nil // ignore error
@ -210,21 +224,21 @@ func (n *Node) UpdateNodeLink(name string, that *Node) (*Node, error) {
// Size returns the total size of the data addressed by node,
// including the total sizes of references.
func (n *Node) Size() (uint64, error) {
func (n *ProtoNode) Size() (uint64, error) {
b, err := n.EncodeProtobuf(false)
if err != nil {
return 0, err
}
s := uint64(len(b))
for _, l := range n.Links {
for _, l := range n.links {
s += l.Size
}
return s, nil
}
// Stat returns statistics on the node.
func (n *Node) Stat() (*NodeStat, error) {
func (n *ProtoNode) Stat() (*NodeStat, error) {
enc, err := n.EncodeProtobuf(false)
if err != nil {
return nil, err
@ -237,7 +251,7 @@ func (n *Node) Stat() (*NodeStat, error) {
return &NodeStat{
Hash: n.Key().B58String(),
NumLinks: len(n.Links),
NumLinks: len(n.links),
BlockSize: len(enc),
LinksSize: len(enc) - len(n.data), // includes framing.
DataSize: len(n.data),
@ -245,28 +259,28 @@ func (n *Node) Stat() (*NodeStat, error) {
}, nil
}
func (n *Node) Key() key.Key {
func (n *ProtoNode) Key() key.Key {
return key.Key(n.Multihash())
}
func (n *Node) Loggable() map[string]interface{} {
func (n *ProtoNode) Loggable() map[string]interface{} {
return map[string]interface{}{
"node": n.String(),
}
}
func (n *Node) Cid() *cid.Cid {
func (n *ProtoNode) Cid() *cid.Cid {
h := n.Multihash()
return cid.NewCidV0(h)
}
func (n *Node) String() string {
func (n *ProtoNode) String() string {
return n.Cid().String()
}
// Multihash hashes the encoded data of this node.
func (n *Node) Multihash() mh.Multihash {
func (n *ProtoNode) Multihash() mh.Multihash {
// NOTE: EncodeProtobuf generates the hash and puts it in n.cached.
_, err := n.EncodeProtobuf(false)
if err != nil {
@ -276,3 +290,32 @@ func (n *Node) Multihash() mh.Multihash {
return n.cached.Hash()
}
func (n *ProtoNode) Links() []*Link {
return n.links
}
func (n *ProtoNode) SetLinks(links []*Link) {
n.links = links
}
func (n *ProtoNode) Resolve(path []string) (*Link, []string, error) {
if len(path) == 0 {
return nil, nil, fmt.Errorf("end of path, no more links to resolve")
}
lnk, err := n.GetNodeLink(path[0])
if err != nil {
return nil, nil, err
}
return lnk, path[1:], nil
}
func (n *ProtoNode) Tree() []string {
out := make([]string, 0, len(n.links))
for _, lnk := range n.links {
out = append(out, lnk.Name)
}
return out
}

View File

@ -10,31 +10,30 @@ import (
)
func TestRemoveLink(t *testing.T) {
nd := &Node{
Links: []*Link{
&Link{Name: "a"},
&Link{Name: "b"},
&Link{Name: "a"},
&Link{Name: "a"},
&Link{Name: "c"},
&Link{Name: "a"},
},
}
nd := &ProtoNode{}
nd.SetLinks([]*Link{
&Link{Name: "a"},
&Link{Name: "b"},
&Link{Name: "a"},
&Link{Name: "a"},
&Link{Name: "c"},
&Link{Name: "a"},
})
err := nd.RemoveNodeLink("a")
if err != nil {
t.Fatal(err)
}
if len(nd.Links) != 2 {
if len(nd.Links()) != 2 {
t.Fatal("number of links incorrect")
}
if nd.Links[0].Name != "b" {
if nd.Links()[0].Name != "b" {
t.Fatal("link order wrong")
}
if nd.Links[1].Name != "c" {
if nd.Links()[1].Name != "c" {
t.Fatal("link order wrong")
}
@ -45,33 +44,32 @@ func TestRemoveLink(t *testing.T) {
}
// ensure nothing else got touched
if len(nd.Links) != 2 {
if len(nd.Links()) != 2 {
t.Fatal("number of links incorrect")
}
if nd.Links[0].Name != "b" {
if nd.Links()[0].Name != "b" {
t.Fatal("link order wrong")
}
if nd.Links[1].Name != "c" {
if nd.Links()[1].Name != "c" {
t.Fatal("link order wrong")
}
}
func TestFindLink(t *testing.T) {
ds := mdtest.Mock()
k, err := ds.Add(new(Node))
k, err := ds.Add(new(ProtoNode))
if err != nil {
t.Fatal(err)
}
nd := &Node{
Links: []*Link{
&Link{Name: "a", Hash: k.Hash()},
&Link{Name: "c", Hash: k.Hash()},
&Link{Name: "b", Hash: k.Hash()},
},
}
nd := &ProtoNode{}
nd.SetLinks([]*Link{
&Link{Name: "a", Cid: k},
&Link{Name: "c", Cid: k},
&Link{Name: "b", Cid: k},
})
_, err = ds.Add(nd)
if err != nil {
@ -107,19 +105,19 @@ func TestFindLink(t *testing.T) {
t.Fatal(err)
}
if olnk.Hash.B58String() == k.String() {
if olnk.Cid.String() == k.String() {
t.Fatal("new link should have different hash")
}
}
func TestNodeCopy(t *testing.T) {
nd := &Node{
Links: []*Link{
&Link{Name: "a"},
&Link{Name: "c"},
&Link{Name: "b"},
},
}
nd := &ProtoNode{}
nd.SetLinks([]*Link{
&Link{Name: "a"},
&Link{Name: "c"},
&Link{Name: "b"},
})
nd.SetData([]byte("testing"))
ond := nd.Copy()

View File

@ -30,7 +30,7 @@ type Options struct {
// State is a current traversal state
type State struct {
Node *mdag.Node
Node mdag.Node
Depth int
}
@ -39,13 +39,13 @@ type traversal struct {
seen map[string]struct{}
}
func (t *traversal) shouldSkip(n *mdag.Node) (bool, error) {
func (t *traversal) shouldSkip(n mdag.Node) (bool, error) {
if t.opts.SkipDuplicates {
k := n.Key()
if _, found := t.seen[string(k)]; found {
k := n.Cid()
if _, found := t.seen[k.KeyString()]; found {
return true, nil
}
t.seen[string(k)] = struct{}{}
t.seen[k.KeyString()] = struct{}{}
}
return false, nil
@ -59,9 +59,9 @@ func (t *traversal) callFunc(next State) error {
// stop processing. if it returns a nil node, just skip it.
//
// the error handling is a little complicated.
func (t *traversal) getNode(link *mdag.Link) (*mdag.Node, error) {
func (t *traversal) getNode(link *mdag.Link) (mdag.Node, error) {
getNode := func(l *mdag.Link) (*mdag.Node, error) {
getNode := func(l *mdag.Link) (mdag.Node, error) {
next, err := l.GetNode(context.TODO(), t.opts.DAG)
if err != nil {
return nil, err
@ -99,7 +99,7 @@ type Func func(current State) error
//
type ErrFunc func(err error) error
func Traverse(root *mdag.Node, o Options) error {
func Traverse(root mdag.Node, o Options) error {
t := traversal{
opts: o,
seen: map[string]struct{}{},
@ -145,7 +145,7 @@ func dfsPostTraverse(state State, t *traversal) error {
}
func dfsDescend(df dfsFunc, curr State, t *traversal) error {
for _, l := range curr.Node.Links {
for _, l := range curr.Node.Links() {
node, err := t.getNode(l)
if err != nil {
return err
@ -184,7 +184,7 @@ func bfsTraverse(root State, t *traversal) error {
return err
}
for _, l := range curr.Node.Links {
for _, l := range curr.Node.Links() {
node, err := t.getNode(l)
if err != nil {
return err

View File

@ -321,12 +321,12 @@ func TestBFSSkip(t *testing.T) {
`))
}
func testWalkOutputs(t *testing.T, root *mdag.Node, opts Options, expect []byte) {
func testWalkOutputs(t *testing.T, root mdag.Node, opts Options, expect []byte) {
expect = bytes.TrimLeft(expect, "\n")
buf := new(bytes.Buffer)
walk := func(current State) error {
s := fmt.Sprintf("%d %s\n", current.Depth, current.Node.Data())
s := fmt.Sprintf("%d %s\n", current.Depth, current.Node.(*mdag.ProtoNode).Data())
t.Logf("walk: %s", s)
buf.Write([]byte(s))
return nil
@ -348,7 +348,7 @@ func testWalkOutputs(t *testing.T, root *mdag.Node, opts Options, expect []byte)
}
}
func newFan(t *testing.T, ds mdag.DAGService) *mdag.Node {
func newFan(t *testing.T, ds mdag.DAGService) mdag.Node {
a := mdag.NodeWithData([]byte("/a"))
addLink(t, ds, a, child(t, ds, a, "aa"))
addLink(t, ds, a, child(t, ds, a, "ab"))
@ -357,7 +357,7 @@ func newFan(t *testing.T, ds mdag.DAGService) *mdag.Node {
return a
}
func newLinkedList(t *testing.T, ds mdag.DAGService) *mdag.Node {
func newLinkedList(t *testing.T, ds mdag.DAGService) mdag.Node {
a := mdag.NodeWithData([]byte("/a"))
aa := child(t, ds, a, "aa")
aaa := child(t, ds, aa, "aaa")
@ -370,7 +370,7 @@ func newLinkedList(t *testing.T, ds mdag.DAGService) *mdag.Node {
return a
}
func newBinaryTree(t *testing.T, ds mdag.DAGService) *mdag.Node {
func newBinaryTree(t *testing.T, ds mdag.DAGService) mdag.Node {
a := mdag.NodeWithData([]byte("/a"))
aa := child(t, ds, a, "aa")
ab := child(t, ds, a, "ab")
@ -383,7 +383,7 @@ func newBinaryTree(t *testing.T, ds mdag.DAGService) *mdag.Node {
return a
}
func newBinaryDAG(t *testing.T, ds mdag.DAGService) *mdag.Node {
func newBinaryDAG(t *testing.T, ds mdag.DAGService) mdag.Node {
a := mdag.NodeWithData([]byte("/a"))
aa := child(t, ds, a, "aa")
aaa := child(t, ds, aa, "aaa")
@ -400,16 +400,16 @@ func newBinaryDAG(t *testing.T, ds mdag.DAGService) *mdag.Node {
return a
}
func addLink(t *testing.T, ds mdag.DAGService, a, b *mdag.Node) {
to := string(a.Data()) + "2" + string(b.Data())
func addLink(t *testing.T, ds mdag.DAGService, a, b mdag.Node) {
to := string(a.(*mdag.ProtoNode).Data()) + "2" + string(b.(*mdag.ProtoNode).Data())
if _, err := ds.Add(b); err != nil {
t.Error(err)
}
if err := a.AddNodeLink(to, b); err != nil {
if err := a.(*mdag.ProtoNode).AddNodeLink(to, b.(*mdag.ProtoNode)); err != nil {
t.Error(err)
}
}
func child(t *testing.T, ds mdag.DAGService, a *mdag.Node, name string) *mdag.Node {
return mdag.NodeWithData([]byte(string(a.Data()) + "/" + name))
func child(t *testing.T, ds mdag.DAGService, a mdag.Node, name string) mdag.Node {
return mdag.NodeWithData([]byte(string(a.(*mdag.ProtoNode).Data()) + "/" + name))
}

View File

@ -1,7 +1,6 @@
package dagutils
import (
"bytes"
"fmt"
"path"
@ -37,7 +36,7 @@ func (c *Change) String() string {
}
}
func ApplyChange(ctx context.Context, ds dag.DAGService, nd *dag.Node, cs []*Change) (*dag.Node, error) {
func ApplyChange(ctx context.Context, ds dag.DAGService, nd *dag.ProtoNode, cs []*Change) (*dag.ProtoNode, error) {
e := NewDagEditor(nd, ds)
for _, c := range cs {
switch c.Type {
@ -46,7 +45,13 @@ func ApplyChange(ctx context.Context, ds dag.DAGService, nd *dag.Node, cs []*Cha
if err != nil {
return nil, err
}
err = e.InsertNodeAtPath(ctx, c.Path, child, nil)
childpb, ok := child.(*dag.ProtoNode)
if !ok {
return nil, dag.ErrNotProtobuf
}
err = e.InsertNodeAtPath(ctx, c.Path, childpb, nil)
if err != nil {
return nil, err
}
@ -66,7 +71,13 @@ func ApplyChange(ctx context.Context, ds dag.DAGService, nd *dag.Node, cs []*Cha
if err != nil {
return nil, err
}
err = e.InsertNodeAtPath(ctx, c.Path, child, nil)
childpb, ok := child.(*dag.ProtoNode)
if !ok {
return nil, dag.ErrNotProtobuf
}
err = e.InsertNodeAtPath(ctx, c.Path, childpb, nil)
if err != nil {
return nil, err
}
@ -76,8 +87,8 @@ func ApplyChange(ctx context.Context, ds dag.DAGService, nd *dag.Node, cs []*Cha
return e.Finalize(ds)
}
func Diff(ctx context.Context, ds dag.DAGService, a, b *dag.Node) ([]*Change, error) {
if len(a.Links) == 0 && len(b.Links) == 0 {
func Diff(ctx context.Context, ds dag.DAGService, a, b *dag.ProtoNode) ([]*Change, error) {
if len(a.Links()) == 0 && len(b.Links()) == 0 {
return []*Change{
&Change{
Type: Mod,
@ -92,10 +103,10 @@ func Diff(ctx context.Context, ds dag.DAGService, a, b *dag.Node) ([]*Change, er
clean_b := b.Copy()
// strip out unchanged stuff
for _, lnk := range a.Links {
for _, lnk := range a.Links() {
l, err := b.GetNodeLink(lnk.Name)
if err == nil {
if bytes.Equal(l.Hash, lnk.Hash) {
if l.Cid.Equals(lnk.Cid) {
// no change... ignore it
} else {
anode, err := lnk.GetNode(ctx, ds)
@ -108,7 +119,17 @@ func Diff(ctx context.Context, ds dag.DAGService, a, b *dag.Node) ([]*Change, er
return nil, err
}
sub, err := Diff(ctx, ds, anode, bnode)
anodepb, ok := anode.(*dag.ProtoNode)
if !ok {
return nil, dag.ErrNotProtobuf
}
bnodepb, ok := bnode.(*dag.ProtoNode)
if !ok {
return nil, dag.ErrNotProtobuf
}
sub, err := Diff(ctx, ds, anodepb, bnodepb)
if err != nil {
return nil, err
}
@ -123,18 +144,18 @@ func Diff(ctx context.Context, ds dag.DAGService, a, b *dag.Node) ([]*Change, er
}
}
for _, lnk := range clean_a.Links {
for _, lnk := range clean_a.Links() {
out = append(out, &Change{
Type: Remove,
Path: lnk.Name,
Before: cid.NewCidV0(lnk.Hash),
Before: lnk.Cid,
})
}
for _, lnk := range clean_b.Links {
for _, lnk := range clean_b.Links() {
out = append(out, &Change{
Type: Add,
Path: lnk.Name,
After: cid.NewCidV0(lnk.Hash),
After: lnk.Cid,
})
}

View File

@ -15,7 +15,7 @@ import (
)
type Editor struct {
root *dag.Node
root *dag.ProtoNode
// tmp is a temporary in memory (for now) dagstore for all of the
// intermediary nodes to be stored in
@ -34,7 +34,7 @@ func NewMemoryDagService() dag.DAGService {
}
// root is the node to be modified, source is the dagstore to pull nodes from (optional)
func NewDagEditor(root *dag.Node, source dag.DAGService) *Editor {
func NewDagEditor(root *dag.ProtoNode, source dag.DAGService) *Editor {
return &Editor{
root: root,
tmp: NewMemoryDagService(),
@ -42,7 +42,7 @@ func NewDagEditor(root *dag.Node, source dag.DAGService) *Editor {
}
}
func (e *Editor) GetNode() *dag.Node {
func (e *Editor) GetNode() *dag.ProtoNode {
return e.root.Copy()
}
@ -50,7 +50,7 @@ func (e *Editor) GetDagService() dag.DAGService {
return e.tmp
}
func addLink(ctx context.Context, ds dag.DAGService, root *dag.Node, childname string, childnd *dag.Node) (*dag.Node, error) {
func addLink(ctx context.Context, ds dag.DAGService, root *dag.ProtoNode, childname string, childnd *dag.ProtoNode) (*dag.ProtoNode, error) {
if childname == "" {
return nil, errors.New("cannot create link with no name!")
}
@ -76,7 +76,7 @@ func addLink(ctx context.Context, ds dag.DAGService, root *dag.Node, childname s
return root, nil
}
func (e *Editor) InsertNodeAtPath(ctx context.Context, pth string, toinsert *dag.Node, create func() *dag.Node) error {
func (e *Editor) InsertNodeAtPath(ctx context.Context, pth string, toinsert *dag.ProtoNode, create func() *dag.ProtoNode) error {
splpath := path.SplitList(pth)
nd, err := e.insertNodeAtPath(ctx, e.root, splpath, toinsert, create)
if err != nil {
@ -86,12 +86,12 @@ func (e *Editor) InsertNodeAtPath(ctx context.Context, pth string, toinsert *dag
return nil
}
func (e *Editor) insertNodeAtPath(ctx context.Context, root *dag.Node, path []string, toinsert *dag.Node, create func() *dag.Node) (*dag.Node, error) {
func (e *Editor) insertNodeAtPath(ctx context.Context, root *dag.ProtoNode, path []string, toinsert *dag.ProtoNode, create func() *dag.ProtoNode) (*dag.ProtoNode, error) {
if len(path) == 1 {
return addLink(ctx, e.tmp, root, path[0], toinsert)
}
nd, err := root.GetLinkedNode(ctx, e.tmp, path[0])
nd, err := root.GetLinkedProtoNode(ctx, e.tmp, path[0])
if err != nil {
// if 'create' is true, we create directories on the way down as needed
if err == dag.ErrLinkNotFound && create != nil {
@ -99,7 +99,7 @@ func (e *Editor) insertNodeAtPath(ctx context.Context, root *dag.Node, path []st
err = nil // no longer an error case
} else if err == dag.ErrNotFound {
// try finding it in our source dagstore
nd, err = root.GetLinkedNode(ctx, e.src, path[0])
nd, err = root.GetLinkedProtoNode(ctx, e.src, path[0])
}
// if we receive an ErrNotFound, then our second 'GetLinkedNode' call
@ -140,7 +140,7 @@ func (e *Editor) RmLink(ctx context.Context, pth string) error {
return nil
}
func (e *Editor) rmLink(ctx context.Context, root *dag.Node, path []string) (*dag.Node, error) {
func (e *Editor) rmLink(ctx context.Context, root *dag.ProtoNode, path []string) (*dag.ProtoNode, error) {
if len(path) == 1 {
// base case, remove node in question
err := root.RemoveNodeLink(path[0])
@ -157,9 +157,9 @@ func (e *Editor) rmLink(ctx context.Context, root *dag.Node, path []string) (*da
}
// search for node in both tmp dagstore and source dagstore
nd, err := root.GetLinkedNode(ctx, e.tmp, path[0])
nd, err := root.GetLinkedProtoNode(ctx, e.tmp, path[0])
if err == dag.ErrNotFound {
nd, err = root.GetLinkedNode(ctx, e.src, path[0])
nd, err = root.GetLinkedProtoNode(ctx, e.src, path[0])
}
if err != nil {
@ -187,19 +187,19 @@ func (e *Editor) rmLink(ctx context.Context, root *dag.Node, path []string) (*da
return root, nil
}
func (e *Editor) Finalize(ds dag.DAGService) (*dag.Node, error) {
func (e *Editor) Finalize(ds dag.DAGService) (*dag.ProtoNode, error) {
nd := e.GetNode()
err := copyDag(nd, e.tmp, ds)
return nd, err
}
func copyDag(nd *dag.Node, from, to dag.DAGService) error {
func copyDag(nd *dag.ProtoNode, from, to dag.DAGService) error {
_, err := to.Add(nd)
if err != nil {
return err
}
for _, lnk := range nd.Links {
for _, lnk := range nd.Links() {
child, err := lnk.GetNode(context.Background(), from)
if err != nil {
if err == dag.ErrNotFound {
@ -210,7 +210,12 @@ func copyDag(nd *dag.Node, from, to dag.DAGService) error {
return err
}
err = copyDag(child, from, to)
childpb, ok := child.(*dag.ProtoNode)
if !ok {
return dag.ErrNotProtobuf
}
err = copyDag(childpb, from, to)
if err != nil {
return err
}

View File

@ -20,7 +20,7 @@ func TestAddLink(t *testing.T) {
t.Fatal(err)
}
nd := new(dag.Node)
nd := new(dag.ProtoNode)
nnode, err := addLink(context.Background(), ds, nd, "fish", fishnode)
if err != nil {
t.Fatal(err)
@ -37,11 +37,11 @@ func TestAddLink(t *testing.T) {
}
}
func assertNodeAtPath(t *testing.T, ds dag.DAGService, root *dag.Node, pth string, exp *cid.Cid) {
func assertNodeAtPath(t *testing.T, ds dag.DAGService, root *dag.ProtoNode, pth string, exp *cid.Cid) {
parts := path.SplitList(pth)
cur := root
for _, e := range parts {
nxt, err := cur.GetLinkedNode(context.Background(), ds, e)
nxt, err := cur.GetLinkedProtoNode(context.Background(), ds, e)
if err != nil {
t.Fatal(err)
}
@ -56,7 +56,7 @@ func assertNodeAtPath(t *testing.T, ds dag.DAGService, root *dag.Node, pth strin
}
func TestInsertNode(t *testing.T) {
root := new(dag.Node)
root := new(dag.ProtoNode)
e := NewDagEditor(root, nil)
testInsert(t, e, "a", "anodefortesting", false, "")
@ -83,10 +83,10 @@ func testInsert(t *testing.T, e *Editor, path, data string, create bool, experr
t.Fatal(err)
}
var c func() *dag.Node
var c func() *dag.ProtoNode
if create {
c = func() *dag.Node {
return &dag.Node{}
c = func() *dag.ProtoNode {
return &dag.ProtoNode{}
}
}

View File

@ -28,7 +28,7 @@ type Directory struct {
files map[string]*File
lock sync.Mutex
node *dag.Node
node *dag.ProtoNode
ctx context.Context
modTime time.Time
@ -36,7 +36,7 @@ type Directory struct {
name string
}
func NewDirectory(ctx context.Context, name string, node *dag.Node, parent childCloser, dserv dag.DAGService) *Directory {
func NewDirectory(ctx context.Context, name string, node *dag.ProtoNode, parent childCloser, dserv dag.DAGService) *Directory {
return &Directory{
dserv: dserv,
ctx: ctx,
@ -51,7 +51,7 @@ func NewDirectory(ctx context.Context, name string, node *dag.Node, parent child
// closeChild updates the child by the given name to the dag node 'nd'
// and changes its own dag node
func (d *Directory) closeChild(name string, nd *dag.Node, sync bool) error {
func (d *Directory) closeChild(name string, nd *dag.ProtoNode, sync bool) error {
mynd, err := d.closeChildUpdate(name, nd, sync)
if err != nil {
return err
@ -64,7 +64,7 @@ func (d *Directory) closeChild(name string, nd *dag.Node, sync bool) error {
}
// closeChildUpdate is the portion of closeChild that needs to be locked around
func (d *Directory) closeChildUpdate(name string, nd *dag.Node, sync bool) (*dag.Node, error) {
func (d *Directory) closeChildUpdate(name string, nd *dag.ProtoNode, sync bool) (*dag.ProtoNode, error) {
d.lock.Lock()
defer d.lock.Unlock()
@ -79,7 +79,7 @@ func (d *Directory) closeChildUpdate(name string, nd *dag.Node, sync bool) (*dag
return nil, nil
}
func (d *Directory) flushCurrentNode() (*dag.Node, error) {
func (d *Directory) flushCurrentNode() (*dag.ProtoNode, error) {
_, err := d.dserv.Add(d.node)
if err != nil {
return nil, err
@ -88,7 +88,7 @@ func (d *Directory) flushCurrentNode() (*dag.Node, error) {
return d.node.Copy(), nil
}
func (d *Directory) updateChild(name string, nd *dag.Node) error {
func (d *Directory) updateChild(name string, nd *dag.ProtoNode) error {
err := d.node.RemoveNodeLink(name)
if err != nil && err != dag.ErrNotFound {
return err
@ -120,7 +120,7 @@ func (d *Directory) childNode(name string) (FSNode, error) {
}
// cacheNode caches a node into d.childDirs or d.files and returns the FSNode.
func (d *Directory) cacheNode(name string, nd *dag.Node) (FSNode, error) {
func (d *Directory) cacheNode(name string, nd *dag.ProtoNode) (FSNode, error) {
i, err := ft.FromBytes(nd.Data())
if err != nil {
return nil, err
@ -161,14 +161,16 @@ func (d *Directory) Uncache(name string) {
// childFromDag searches through this directories dag node for a child link
// with the given name
func (d *Directory) childFromDag(name string) (*dag.Node, error) {
for _, lnk := range d.node.Links {
if lnk.Name == name {
return lnk.GetNode(d.ctx, d.dserv)
}
func (d *Directory) childFromDag(name string) (*dag.ProtoNode, error) {
pbn, err := d.node.GetLinkedProtoNode(d.ctx, d.dserv, name)
switch err {
case nil:
return pbn, nil
case dag.ErrLinkNotFound:
return nil, os.ErrNotExist
default:
return nil, err
}
return nil, os.ErrNotExist
}
// childUnsync returns the child under this directory by the given name
@ -206,7 +208,7 @@ func (d *Directory) ListNames() []string {
names[n] = struct{}{}
}
for _, l := range d.node.Links {
for _, l := range d.node.Links() {
names[l.Name] = struct{}{}
}
@ -224,7 +226,7 @@ func (d *Directory) List() ([]NodeListing, error) {
defer d.lock.Unlock()
var out []NodeListing
for _, l := range d.node.Links {
for _, l := range d.node.Links() {
child := NodeListing{}
child.Name = l.Name
@ -270,7 +272,7 @@ func (d *Directory) Mkdir(name string) (*Directory, error) {
}
}
ndir := new(dag.Node)
ndir := new(dag.ProtoNode)
ndir.SetData(ft.FolderPBData())
_, err = d.dserv.Add(ndir)
@ -321,7 +323,7 @@ func (d *Directory) Flush() error {
}
// AddChild adds the node 'nd' under this directory giving it the name 'name'
func (d *Directory) AddChild(name string, nd *dag.Node) error {
func (d *Directory) AddChild(name string, nd *dag.ProtoNode) error {
d.lock.Lock()
defer d.lock.Unlock()
@ -382,7 +384,7 @@ func (d *Directory) Path() string {
return out
}
func (d *Directory) GetNode() (*dag.Node, error) {
func (d *Directory) GetNode() (*dag.ProtoNode, error) {
d.lock.Lock()
defer d.lock.Unlock()

View File

@ -19,12 +19,12 @@ type File struct {
desclock sync.RWMutex
dserv dag.DAGService
node *dag.Node
node *dag.ProtoNode
nodelk sync.Mutex
}
// NewFile returns a NewFile object with the given parameters
func NewFile(name string, node *dag.Node, parent childCloser, dserv dag.DAGService) (*File, error) {
func NewFile(name string, node *dag.ProtoNode, parent childCloser, dserv dag.DAGService) (*File, error) {
return &File{
dserv: dserv,
parent: parent,
@ -94,7 +94,7 @@ func (fi *File) Size() (int64, error) {
}
// GetNode returns the dag node associated with this file
func (fi *File) GetNode() (*dag.Node, error) {
func (fi *File) GetNode() (*dag.ProtoNode, error) {
fi.nodelk.Lock()
defer fi.nodelk.Unlock()
return fi.node, nil

View File

@ -30,7 +30,7 @@ import (
dssync "gx/ipfs/QmbzuUusHqaLLoNTDEVLcSF6vZDHZDLPC7p4bztRvvkXxU/go-datastore/sync"
)
func emptyDirNode() *dag.Node {
func emptyDirNode() *dag.ProtoNode {
return dag.NodeWithData(ft.FolderPBData())
}
@ -41,12 +41,12 @@ func getDagserv(t *testing.T) dag.DAGService {
return dag.NewDAGService(blockserv)
}
func getRandFile(t *testing.T, ds dag.DAGService, size int64) *dag.Node {
func getRandFile(t *testing.T, ds dag.DAGService, size int64) *dag.ProtoNode {
r := io.LimitReader(u.NewTimeSeededRand(), size)
return fileNodeFromReader(t, ds, r)
}
func fileNodeFromReader(t *testing.T, ds dag.DAGService, r io.Reader) *dag.Node {
func fileNodeFromReader(t *testing.T, ds dag.DAGService, r io.Reader) *dag.ProtoNode {
nd, err := importer.BuildDagFromReader(ds, chunk.DefaultSplitter(r))
if err != nil {
t.Fatal(err)
@ -124,7 +124,7 @@ func compStrArrs(a, b []string) bool {
return true
}
func assertFileAtPath(ds dag.DAGService, root *Directory, exp *dag.Node, pth string) error {
func assertFileAtPath(ds dag.DAGService, root *Directory, exp *dag.ProtoNode, pth string) error {
parts := path.SplitList(pth)
cur := root
for i, d := range parts[:len(parts)-1] {
@ -173,7 +173,7 @@ func assertFileAtPath(ds dag.DAGService, root *Directory, exp *dag.Node, pth str
return nil
}
func catNode(ds dag.DAGService, nd *dag.Node) ([]byte, error) {
func catNode(ds dag.DAGService, nd *dag.ProtoNode) ([]byte, error) {
r, err := uio.NewDagReader(context.TODO(), nd, ds)
if err != nil {
return nil, err
@ -280,7 +280,7 @@ func TestDirectoryLoadFromDag(t *testing.T) {
t.Fatal(err)
}
fihash := nd.Multihash()
fihash := nd.Cid()
dir := emptyDirNode()
_, err = ds.Add(dir)
@ -288,19 +288,19 @@ func TestDirectoryLoadFromDag(t *testing.T) {
t.Fatal(err)
}
dirhash := dir.Multihash()
dirhash := dir.Cid()
top := emptyDirNode()
top.Links = []*dag.Link{
top.SetLinks([]*dag.Link{
&dag.Link{
Name: "a",
Hash: fihash,
Cid: fihash,
},
&dag.Link{
Name: "b",
Hash: dirhash,
Cid: dirhash,
},
}
})
err = rootdir.AddChild("foo", top)
if err != nil {

View File

@ -87,7 +87,7 @@ func lookupDir(r *Root, path string) (*Directory, error) {
}
// PutNode inserts 'nd' at 'path' in the given mfs
func PutNode(r *Root, path string, nd *dag.Node) error {
func PutNode(r *Root, path string, nd *dag.ProtoNode) error {
dirp, filename := gopath.Split(path)
if filename == "" {
return fmt.Errorf("cannot create file with empty name")

View File

@ -29,7 +29,7 @@ var log = logging.Logger("mfs")
var ErrIsDirectory = errors.New("error: is a directory")
type childCloser interface {
closeChild(string, *dag.Node, bool) error
closeChild(string, *dag.ProtoNode, bool) error
}
type NodeType int
@ -41,7 +41,7 @@ const (
// FSNode represents any node (directory, root, or file) in the mfs filesystem
type FSNode interface {
GetNode() (*dag.Node, error)
GetNode() (*dag.ProtoNode, error)
Flush() error
Type() NodeType
}
@ -49,7 +49,7 @@ type FSNode interface {
// Root represents the root of a filesystem tree
type Root struct {
// node is the merkledag root
node *dag.Node
node *dag.ProtoNode
// val represents the node. It can either be a File or a Directory
val FSNode
@ -64,7 +64,7 @@ type Root struct {
type PubFunc func(context.Context, *cid.Cid) error
// newRoot creates a new Root and starts up a republisher routine for it
func NewRoot(parent context.Context, ds dag.DAGService, node *dag.Node, pf PubFunc) (*Root, error) {
func NewRoot(parent context.Context, ds dag.DAGService, node *dag.ProtoNode, pf PubFunc) (*Root, error) {
var repub *Republisher
if pf != nil {
@ -118,7 +118,7 @@ func (kr *Root) Flush() error {
// closeChild implements the childCloser interface, and signals to the publisher that
// there are changes ready to be published
func (kr *Root) closeChild(name string, nd *dag.Node, sync bool) error {
func (kr *Root) closeChild(name string, nd *dag.ProtoNode, sync bool) error {
c, err := kr.dserv.Add(nd)
if err != nil {
return err

View File

@ -2,14 +2,13 @@
package path
import (
"context"
"errors"
"fmt"
"time"
"context"
mh "gx/ipfs/QmYDds3421prZgqKbLpEK7T9Aa2eVdQ7o3YarX1LVLdP2J/go-multihash"
merkledag "github.com/ipfs/go-ipfs/merkledag"
logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log"
cid "gx/ipfs/QmXUuRadqDq5BuFWzVU6VuKaSjTcNm1gNCtLvvP1TJCW4z/go-cid"
)
@ -23,11 +22,11 @@ var ErrNoComponents = errors.New(
// ErrNoLink is returned when a link is not found in a path
type ErrNoLink struct {
Name string
Node mh.Multihash
Node *cid.Cid
}
func (e ErrNoLink) Error() string {
return fmt.Sprintf("no link named %q under %s", e.Name, e.Node.B58String())
return fmt.Sprintf("no link named %q under %s", e.Name, e.Node.String())
}
// Resolver provides path resolution to IPFS
@ -62,7 +61,7 @@ func SplitAbsPath(fpath Path) (*cid.Cid, []string, error) {
// ResolvePath fetches the node for given path. It returns the last item
// returned by ResolvePathComponents.
func (s *Resolver) ResolvePath(ctx context.Context, fpath Path) (*merkledag.Node, error) {
func (s *Resolver) ResolvePath(ctx context.Context, fpath Path) (merkledag.Node, error) {
// validate path
if err := fpath.IsValid(); err != nil {
return nil, err
@ -78,7 +77,7 @@ func (s *Resolver) ResolvePath(ctx context.Context, fpath Path) (*merkledag.Node
// ResolvePathComponents fetches the nodes for each segment of the given path.
// It uses the first path component as a hash (key) of the first node, then
// resolves all other components walking the links, with ResolveLinks.
func (s *Resolver) ResolvePathComponents(ctx context.Context, fpath Path) ([]*merkledag.Node, error) {
func (s *Resolver) ResolvePathComponents(ctx context.Context, fpath Path) ([]merkledag.Node, error) {
h, parts, err := SplitAbsPath(fpath)
if err != nil {
return nil, err
@ -100,28 +99,33 @@ func (s *Resolver) ResolvePathComponents(ctx context.Context, fpath Path) ([]*me
//
// ResolveLinks(nd, []string{"foo", "bar", "baz"})
// would retrieve "baz" in ("bar" in ("foo" in nd.Links).Links).Links
func (s *Resolver) ResolveLinks(ctx context.Context, ndd *merkledag.Node, names []string) ([]*merkledag.Node, error) {
func (s *Resolver) ResolveLinks(ctx context.Context, ndd merkledag.Node, names []string) ([]merkledag.Node, error) {
result := make([]*merkledag.Node, 0, len(names)+1)
result := make([]merkledag.Node, 0, len(names)+1)
result = append(result, ndd)
nd := ndd // dup arg workaround
// for each of the path components
for _, name := range names {
for len(names) > 0 {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, time.Minute)
defer cancel()
nextnode, err := nd.GetLinkedNode(ctx, s.DAG, name)
lnk, rest, err := nd.Resolve(names)
if err == merkledag.ErrLinkNotFound {
n := nd.Multihash()
return result, ErrNoLink{Name: name, Node: n}
n := nd.Cid()
return result, ErrNoLink{Name: names[0], Node: n}
} else if err != nil {
return append(result, nextnode), err
return result, err
}
nextnode, err := s.DAG.Get(ctx, lnk.Cid)
if err != nil {
return result, err
}
nd = nextnode
names = rest
result = append(result, nextnode)
}
return result, nil

View File

@ -13,8 +13,8 @@ import (
util "gx/ipfs/Qmb912gdngC1UWwTkhuW8knyRbcWeu5kqkxBpveLmW8bSr/go-ipfs-util"
)
func randNode() (*merkledag.Node, key.Key) {
node := new(merkledag.Node)
func randNode() (*merkledag.ProtoNode, key.Key) {
node := new(merkledag.ProtoNode)
node.SetData(make([]byte, 32))
util.NewTimeSeededRand().Read(node.Data())
k := node.Key()
@ -39,7 +39,7 @@ func TestRecurivePathResolution(t *testing.T) {
t.Fatal(err)
}
for _, n := range []*merkledag.Node{a, b, c} {
for _, n := range []merkledag.Node{a, b, c} {
_, err = dagService.Add(n)
if err != nil {
t.Fatal(err)
@ -60,7 +60,7 @@ func TestRecurivePathResolution(t *testing.T) {
t.Fatal(err)
}
key := node.Key()
key := node.Cid()
if key.String() != cKey.String() {
t.Fatal(fmt.Errorf(
"recursive path resolution failed for %s: %s != %s",

View File

@ -83,7 +83,7 @@ func StringToPinMode(s string) (PinMode, bool) {
type Pinner interface {
IsPinned(*cid.Cid) (string, bool, error)
IsPinnedWithType(*cid.Cid, PinMode) (string, bool, error)
Pin(context.Context, *mdag.Node, bool) error
Pin(context.Context, mdag.Node, bool) error
Unpin(context.Context, *cid.Cid, bool) error
// Check if a set of keys are pinned, more efficient than
@ -162,7 +162,7 @@ func NewPinner(dstore ds.Datastore, serv, internal mdag.DAGService) Pinner {
}
// Pin the given node, optionally recursive
func (p *pinner) Pin(ctx context.Context, node *mdag.Node, recurse bool) error {
func (p *pinner) Pin(ctx context.Context, node mdag.Node, recurse bool) error {
p.lock.Lock()
defer p.lock.Unlock()
c := node.Cid()
@ -317,7 +317,7 @@ func (p *pinner) CheckIfPinned(cids ...*cid.Cid) ([]Pinned, error) {
return err
}
for _, lnk := range links {
c := cid.NewCidV0(lnk.Hash)
c := lnk.Cid
if toCheck.Has(c) {
pinned = append(pinned,
@ -403,12 +403,17 @@ func LoadPinner(d ds.Datastore, dserv, internal mdag.DAGService) (Pinner, error)
return nil, fmt.Errorf("cannot find pinning root object: %v", err)
}
rootpb, ok := root.(*mdag.ProtoNode)
if !ok {
return nil, mdag.ErrNotProtobuf
}
internalset := cid.NewSet()
internalset.Add(rootCid)
recordInternal := internalset.Add
{ // load recursive set
recurseKeys, err := loadSet(ctx, internal, root, linkRecursive, recordInternal)
recurseKeys, err := loadSet(ctx, internal, rootpb, linkRecursive, recordInternal)
if err != nil {
return nil, fmt.Errorf("cannot load recursive pins: %v", err)
}
@ -416,7 +421,7 @@ func LoadPinner(d ds.Datastore, dserv, internal mdag.DAGService) (Pinner, error)
}
{ // load direct set
directKeys, err := loadSet(ctx, internal, root, linkDirect, recordInternal)
directKeys, err := loadSet(ctx, internal, rootpb, linkDirect, recordInternal)
if err != nil {
return nil, fmt.Errorf("cannot load direct pins: %v", err)
}
@ -453,7 +458,7 @@ func (p *pinner) Flush() error {
internalset := cid.NewSet()
recordInternal := internalset.Add
root := &mdag.Node{}
root := &mdag.ProtoNode{}
{
n, err := storeSet(ctx, p.internal, p.directPin.Keys(), recordInternal)
if err != nil {
@ -475,7 +480,7 @@ func (p *pinner) Flush() error {
}
// add the empty node, its referenced by the pin sets but never created
_, err := p.internal.Add(new(mdag.Node))
_, err := p.internal.Add(new(mdag.ProtoNode))
if err != nil {
return err
}
@ -522,7 +527,7 @@ func hasChild(ds mdag.LinkService, root *cid.Cid, child key.Key) (bool, error) {
return false, err
}
for _, lnk := range links {
c := cid.NewCidV0(lnk.Hash)
c := lnk.Cid
if key.Key(c.Hash()) == child {
return true, nil
}

View File

@ -16,8 +16,8 @@ import (
dssync "gx/ipfs/QmbzuUusHqaLLoNTDEVLcSF6vZDHZDLPC7p4bztRvvkXxU/go-datastore/sync"
)
func randNode() (*mdag.Node, *cid.Cid) {
nd := new(mdag.Node)
func randNode() (*mdag.ProtoNode, *cid.Cid) {
nd := new(mdag.ProtoNode)
nd.SetData(make([]byte, 32))
util.NewTimeSeededRand().Read(nd.Data())
k := nd.Cid()

View File

@ -55,25 +55,27 @@ func (s sortByHash) Len() int {
}
func (s sortByHash) Less(a, b int) bool {
return bytes.Compare(s.links[a].Hash, s.links[b].Hash) == -1
return bytes.Compare(s.links[a].Cid.Bytes(), s.links[b].Cid.Bytes()) == -1
}
func (s sortByHash) Swap(a, b int) {
s.links[a], s.links[b] = s.links[b], s.links[a]
}
func storeItems(ctx context.Context, dag merkledag.DAGService, estimatedLen uint64, iter itemIterator, internalKeys keyObserver) (*merkledag.Node, error) {
func storeItems(ctx context.Context, dag merkledag.DAGService, estimatedLen uint64, iter itemIterator, internalKeys keyObserver) (*merkledag.ProtoNode, error) {
seed, err := randomSeed()
if err != nil {
return nil, err
}
n := &merkledag.Node{Links: make([]*merkledag.Link, 0, defaultFanout+maxItems)}
links := make([]*merkledag.Link, 0, defaultFanout+maxItems)
for i := 0; i < defaultFanout; i++ {
n.Links = append(n.Links, &merkledag.Link{Hash: emptyKey.Hash()})
links = append(links, &merkledag.Link{Cid: emptyKey})
}
// add emptyKey to our set of internal pinset objects
n := &merkledag.ProtoNode{}
n.SetLinks(links)
internalKeys(emptyKey)
hdr := &pb.Set{
@ -87,17 +89,22 @@ func storeItems(ctx context.Context, dag merkledag.DAGService, estimatedLen uint
if estimatedLen < maxItems {
// it'll probably fit
links := n.Links()
for i := 0; i < maxItems; i++ {
k, ok := iter()
if !ok {
// all done
break
}
n.Links = append(n.Links, &merkledag.Link{Hash: k.Hash()})
links = append(links, &merkledag.Link{Cid: k})
}
n.SetLinks(links)
// sort by hash, also swap item Data
s := sortByHash{
links: n.Links[defaultFanout:],
links: n.Links()[defaultFanout:],
}
sort.Stable(s)
}
@ -152,15 +159,15 @@ func storeItems(ctx context.Context, dag merkledag.DAGService, estimatedLen uint
internalKeys(childKey)
// overwrite the 'empty key' in the existing links array
n.Links[h] = &merkledag.Link{
Hash: childKey.Hash(),
n.Links()[h] = &merkledag.Link{
Cid: childKey,
Size: size,
}
}
return n, nil
}
func readHdr(n *merkledag.Node) (*pb.Set, error) {
func readHdr(n *merkledag.ProtoNode) (*pb.Set, error) {
hdrLenRaw, consumed := binary.Uvarint(n.Data())
if consumed <= 0 {
return nil, errors.New("invalid Set header length")
@ -180,13 +187,13 @@ func readHdr(n *merkledag.Node) (*pb.Set, error) {
if v := hdr.GetVersion(); v != 1 {
return nil, fmt.Errorf("unsupported Set version: %d", v)
}
if uint64(hdr.GetFanout()) > uint64(len(n.Links)) {
if uint64(hdr.GetFanout()) > uint64(len(n.Links())) {
return nil, errors.New("impossibly large Fanout")
}
return &hdr, nil
}
func writeHdr(n *merkledag.Node, hdr *pb.Set) error {
func writeHdr(n *merkledag.ProtoNode, hdr *pb.Set) error {
hdrData, err := proto.Marshal(hdr)
if err != nil {
return err
@ -207,20 +214,20 @@ func writeHdr(n *merkledag.Node, hdr *pb.Set) error {
type walkerFunc func(idx int, link *merkledag.Link) error
func walkItems(ctx context.Context, dag merkledag.DAGService, n *merkledag.Node, fn walkerFunc, children keyObserver) error {
func walkItems(ctx context.Context, dag merkledag.DAGService, n *merkledag.ProtoNode, fn walkerFunc, children keyObserver) error {
hdr, err := readHdr(n)
if err != nil {
return err
}
// readHdr guarantees fanout is a safe value
fanout := hdr.GetFanout()
for i, l := range n.Links[fanout:] {
for i, l := range n.Links()[fanout:] {
if err := fn(i, l); err != nil {
return err
}
}
for _, l := range n.Links[:fanout] {
c := cid.NewCidV0(l.Hash)
for _, l := range n.Links()[:fanout] {
c := l.Cid
children(c)
if c.Equals(emptyKey) {
continue
@ -229,20 +236,26 @@ func walkItems(ctx context.Context, dag merkledag.DAGService, n *merkledag.Node,
if err != nil {
return err
}
if err := walkItems(ctx, dag, subtree, fn, children); err != nil {
stpb, ok := subtree.(*merkledag.ProtoNode)
if !ok {
return merkledag.ErrNotProtobuf
}
if err := walkItems(ctx, dag, stpb, fn, children); err != nil {
return err
}
}
return nil
}
func loadSet(ctx context.Context, dag merkledag.DAGService, root *merkledag.Node, name string, internalKeys keyObserver) ([]*cid.Cid, error) {
func loadSet(ctx context.Context, dag merkledag.DAGService, root *merkledag.ProtoNode, name string, internalKeys keyObserver) ([]*cid.Cid, error) {
l, err := root.GetNodeLink(name)
if err != nil {
return nil, err
}
lnkc := cid.NewCidV0(l.Hash)
lnkc := l.Cid
internalKeys(lnkc)
n, err := l.GetNode(ctx, dag)
@ -250,12 +263,18 @@ func loadSet(ctx context.Context, dag merkledag.DAGService, root *merkledag.Node
return nil, err
}
pbn, ok := n.(*merkledag.ProtoNode)
if !ok {
return nil, merkledag.ErrNotProtobuf
}
var res []*cid.Cid
walk := func(idx int, link *merkledag.Link) error {
res = append(res, cid.NewCidV0(link.Hash))
res = append(res, link.Cid)
return nil
}
if err := walkItems(ctx, dag, n, walk, internalKeys); err != nil {
if err := walkItems(ctx, dag, pbn, walk, internalKeys); err != nil {
return nil, err
}
return res, nil
@ -273,7 +292,7 @@ func getCidListIterator(cids []*cid.Cid) itemIterator {
}
}
func storeSet(ctx context.Context, dag merkledag.DAGService, cids []*cid.Cid, internalKeys keyObserver) (*merkledag.Node, error) {
func storeSet(ctx context.Context, dag merkledag.DAGService, cids []*cid.Cid, internalKeys keyObserver) (*merkledag.ProtoNode, error) {
iter := getCidListIterator(cids)
n, err := storeItems(ctx, dag, uint64(len(cids)), iter, internalKeys)

View File

@ -38,7 +38,7 @@ func TestSet(t *testing.T) {
// weird wrapper node because loadSet expects us to pass an
// object pointing to multiple named sets
setroot := &dag.Node{}
setroot := &dag.ProtoNode{}
err = setroot.AddNodeLinkClean("foo", out)
if err != nil {
t.Fatal(err)

View File

@ -34,7 +34,7 @@ func marshalHeader(h *tar.Header) ([]byte, error) {
return buf.Bytes(), nil
}
func ImportTar(r io.Reader, ds dag.DAGService) (*dag.Node, error) {
func ImportTar(r io.Reader, ds dag.DAGService) (*dag.ProtoNode, error) {
rall, err := ioutil.ReadAll(r)
if err != nil {
return nil, err
@ -44,7 +44,7 @@ func ImportTar(r io.Reader, ds dag.DAGService) (*dag.Node, error) {
tr := tar.NewReader(r)
root := new(dag.Node)
root := new(dag.ProtoNode)
root.SetData([]byte("ipfs/tar"))
e := dagutil.NewDagEditor(root, ds)
@ -58,7 +58,7 @@ func ImportTar(r io.Reader, ds dag.DAGService) (*dag.Node, error) {
return nil, err
}
header := new(dag.Node)
header := new(dag.ProtoNode)
headerBytes, err := marshalHeader(h)
if err != nil {
@ -86,7 +86,7 @@ func ImportTar(r io.Reader, ds dag.DAGService) (*dag.Node, error) {
}
path := escapePath(h.Name)
err = e.InsertNodeAtPath(context.Background(), path, header, func() *dag.Node { return new(dag.Node) })
err = e.InsertNodeAtPath(context.Background(), path, header, func() *dag.ProtoNode { return new(dag.ProtoNode) })
if err != nil {
return nil, err
}
@ -170,9 +170,14 @@ func (tr *tarReader) Read(b []byte) (int, error) {
return 0, err
}
tr.hdrBuf = bytes.NewReader(headerNd.Data())
hndpb, ok := headerNd.(*dag.ProtoNode)
if !ok {
return 0, dag.ErrNotProtobuf
}
dataNd, err := headerNd.GetLinkedNode(tr.ctx, tr.ds, "data")
tr.hdrBuf = bytes.NewReader(hndpb.Data())
dataNd, err := hndpb.GetLinkedProtoNode(tr.ctx, tr.ds, "data")
if err != nil && err != dag.ErrLinkNotFound {
return 0, err
}
@ -185,9 +190,9 @@ func (tr *tarReader) Read(b []byte) (int, error) {
}
tr.fileRead = &countReader{r: dr}
} else if len(headerNd.Links) > 0 {
} else if len(headerNd.Links()) > 0 {
tr.childRead = &tarReader{
links: headerNd.Links,
links: headerNd.Links(),
ds: tr.ds,
ctx: tr.ctx,
}
@ -196,12 +201,12 @@ func (tr *tarReader) Read(b []byte) (int, error) {
return tr.Read(b)
}
func ExportTar(ctx context.Context, root *dag.Node, ds dag.DAGService) (io.Reader, error) {
func ExportTar(ctx context.Context, root *dag.ProtoNode, ds dag.DAGService) (io.Reader, error) {
if string(root.Data()) != "ipfs/tar" {
return nil, errors.New("not an ipfs tarchive")
}
return &tarReader{
links: root.Links,
links: root.Links(),
ds: ds,
ctx: ctx,
}, nil

View File

@ -30,7 +30,7 @@ func (i *identityWriteCloser) Close() error {
}
// DagArchive is equivalent to `ipfs getdag $hash | maybe_tar | maybe_gzip`
func DagArchive(ctx cxt.Context, nd *mdag.Node, name string, dag mdag.DAGService, archive bool, compression int) (io.Reader, error) {
func DagArchive(ctx cxt.Context, nd *mdag.ProtoNode, name string, dag mdag.DAGService, archive bool, compression int) (io.Reader, error) {
_, filename := path.Split(name)

View File

@ -34,7 +34,7 @@ func NewWriter(ctx cxt.Context, dag mdag.DAGService, archive bool, compression i
}, nil
}
func (w *Writer) writeDir(nd *mdag.Node, fpath string) error {
func (w *Writer) writeDir(nd *mdag.ProtoNode, fpath string) error {
if err := writeDirHeader(w.TarW, fpath); err != nil {
return err
}
@ -45,8 +45,13 @@ func (w *Writer) writeDir(nd *mdag.Node, fpath string) error {
return err
}
npath := path.Join(fpath, nd.Links[i].Name)
if err := w.WriteNode(child, npath); err != nil {
childpb, ok := child.(*mdag.ProtoNode)
if !ok {
return mdag.ErrNotProtobuf
}
npath := path.Join(fpath, nd.Links()[i].Name)
if err := w.WriteNode(childpb, npath); err != nil {
return err
}
}
@ -54,7 +59,7 @@ func (w *Writer) writeDir(nd *mdag.Node, fpath string) error {
return nil
}
func (w *Writer) writeFile(nd *mdag.Node, pb *upb.Data, fpath string) error {
func (w *Writer) writeFile(nd *mdag.ProtoNode, pb *upb.Data, fpath string) error {
if err := writeFileHeader(w.TarW, fpath, pb.GetFilesize()); err != nil {
return err
}
@ -67,7 +72,7 @@ func (w *Writer) writeFile(nd *mdag.Node, pb *upb.Data, fpath string) error {
return nil
}
func (w *Writer) WriteNode(nd *mdag.Node, fpath string) error {
func (w *Writer) WriteNode(nd *mdag.ProtoNode, fpath string) error {
pb := new(upb.Data)
if err := proto.Unmarshal(nd.Data(), pb); err != nil {
return err

View File

@ -224,6 +224,6 @@ func BytesForMetadata(m *Metadata) ([]byte, error) {
return proto.Marshal(pbd)
}
func EmptyDirNode() *dag.Node {
func EmptyDirNode() *dag.ProtoNode {
return dag.NodeWithData(FolderPBData())
}

View File

@ -24,7 +24,7 @@ type DagReader struct {
serv mdag.DAGService
// the node being read
node *mdag.Node
node *mdag.ProtoNode
// cached protobuf structure from node.Data
pbdata *ftpb.Data
@ -58,7 +58,7 @@ type ReadSeekCloser interface {
// NewDagReader creates a new reader object that reads the data represented by
// the given node, using the passed in DAGService for data retreival
func NewDagReader(ctx context.Context, n *mdag.Node, serv mdag.DAGService) (*DagReader, error) {
func NewDagReader(ctx context.Context, n *mdag.ProtoNode, serv mdag.DAGService) (*DagReader, error) {
pb := new(ftpb.Data)
if err := proto.Unmarshal(n.Data(), pb); err != nil {
return nil, err
@ -71,14 +71,19 @@ func NewDagReader(ctx context.Context, n *mdag.Node, serv mdag.DAGService) (*Dag
case ftpb.Data_File, ftpb.Data_Raw:
return NewDataFileReader(ctx, n, pb, serv), nil
case ftpb.Data_Metadata:
if len(n.Links) == 0 {
if len(n.Links()) == 0 {
return nil, errors.New("incorrectly formatted metadata object")
}
child, err := n.Links[0].GetNode(ctx, serv)
child, err := n.Links()[0].GetNode(ctx, serv)
if err != nil {
return nil, err
}
return NewDagReader(ctx, child, serv)
childpb, ok := child.(*mdag.ProtoNode)
if !ok {
return nil, mdag.ErrNotProtobuf
}
return NewDagReader(ctx, childpb, serv)
case ftpb.Data_Symlink:
return nil, ErrCantReadSymlinks
default:
@ -86,7 +91,7 @@ func NewDagReader(ctx context.Context, n *mdag.Node, serv mdag.DAGService) (*Dag
}
}
func NewDataFileReader(ctx context.Context, n *mdag.Node, pb *ftpb.Data, serv mdag.DAGService) *DagReader {
func NewDataFileReader(ctx context.Context, n *mdag.ProtoNode, pb *ftpb.Data, serv mdag.DAGService) *DagReader {
fctx, cancel := context.WithCancel(ctx)
promises := mdag.GetDAG(fctx, serv, n)
return &DagReader{
@ -114,8 +119,13 @@ func (dr *DagReader) precalcNextBuf(ctx context.Context) error {
}
dr.linkPosition++
nxtpb, ok := nxt.(*mdag.ProtoNode)
if !ok {
return mdag.ErrNotProtobuf
}
pb := new(ftpb.Data)
err = proto.Unmarshal(nxt.Data(), pb)
err = proto.Unmarshal(nxtpb.Data(), pb)
if err != nil {
return fmt.Errorf("incorrectly formatted protobuf: %s", err)
}
@ -125,7 +135,7 @@ func (dr *DagReader) precalcNextBuf(ctx context.Context) error {
// A directory should not exist within a file
return ft.ErrInvalidDirLocation
case ftpb.Data_File:
dr.buf = NewDataFileReader(dr.ctx, nxt, pb, dr.serv)
dr.buf = NewDataFileReader(dr.ctx, nxtpb, pb, dr.serv)
return nil
case ftpb.Data_Raw:
dr.buf = NewRSNCFromBytes(pb.GetData())

View File

@ -10,12 +10,12 @@ import (
type directoryBuilder struct {
dserv mdag.DAGService
dirnode *mdag.Node
dirnode *mdag.ProtoNode
}
// NewEmptyDirectory returns an empty merkledag Node with a folder Data chunk
func NewEmptyDirectory() *mdag.Node {
nd := new(mdag.Node)
func NewEmptyDirectory() *mdag.ProtoNode {
nd := new(mdag.ProtoNode)
nd.SetData(format.FolderPBData())
return nd
}
@ -35,10 +35,15 @@ func (d *directoryBuilder) AddChild(ctx context.Context, name string, c *cid.Cid
return err
}
return d.dirnode.AddNodeLinkClean(name, cnode)
cnpb, ok := cnode.(*mdag.ProtoNode)
if !ok {
return mdag.ErrNotProtobuf
}
return d.dirnode.AddNodeLinkClean(name, cnpb)
}
// GetNode returns the root of this directoryBuilder
func (d *directoryBuilder) GetNode() *mdag.Node {
func (d *directoryBuilder) GetNode() *mdag.ProtoNode {
return d.dirnode
}

View File

@ -10,7 +10,7 @@ import (
func TestEmptyNode(t *testing.T) {
n := NewEmptyDirectory()
if len(n.Links) != 0 {
if len(n.Links()) != 0 {
t.Fatal("empty node should have 0 links")
}
}
@ -27,7 +27,7 @@ func TestDirBuilder(t *testing.T) {
b.AddChild(ctx, "random", key)
dir := b.GetNode()
outn, err := dir.GetLinkedNode(ctx, dserv, "random")
outn, err := dir.GetLinkedProtoNode(ctx, dserv, "random")
if err != nil {
t.Fatal(err)
}

View File

@ -32,7 +32,7 @@ var log = logging.Logger("dagio")
// Dear god, please rename this to something more pleasant
type DagModifier struct {
dagserv mdag.DAGService
curNode *mdag.Node
curNode *mdag.ProtoNode
splitter chunk.SplitterGen
ctx context.Context
@ -45,7 +45,7 @@ type DagModifier struct {
read *uio.DagReader
}
func NewDagModifier(ctx context.Context, from *mdag.Node, serv mdag.DAGService, spl chunk.SplitterGen) (*DagModifier, error) {
func NewDagModifier(ctx context.Context, from *mdag.ProtoNode, serv mdag.DAGService, spl chunk.SplitterGen) (*DagModifier, error) {
return &DagModifier{
curNode: from.Copy(),
dagserv: serv,
@ -178,11 +178,16 @@ func (dm *DagModifier) Sync() error {
return err
}
dm.curNode = nd
pbnd, ok := nd.(*mdag.ProtoNode)
if !ok {
return mdag.ErrNotProtobuf
}
dm.curNode = pbnd
// need to write past end of current dag
if !done {
nd, err = dm.appendData(dm.curNode, dm.splitter(dm.wrBuf))
nd, err := dm.appendData(dm.curNode, dm.splitter(dm.wrBuf))
if err != nil {
return err
}
@ -204,14 +209,14 @@ func (dm *DagModifier) Sync() error {
// modifyDag writes the data in 'data' over the data in 'node' starting at 'offset'
// returns the new key of the passed in node and whether or not all the data in the reader
// has been consumed.
func (dm *DagModifier) modifyDag(node *mdag.Node, offset uint64, data io.Reader) (*cid.Cid, bool, error) {
func (dm *DagModifier) modifyDag(node *mdag.ProtoNode, offset uint64, data io.Reader) (*cid.Cid, bool, error) {
f, err := ft.FromBytes(node.Data())
if err != nil {
return nil, false, err
}
// If we've reached a leaf node.
if len(node.Links) == 0 {
if len(node.Links()) == 0 {
n, err := data.Read(f.Data[offset:])
if err != nil && err != io.EOF {
return nil, false, err
@ -223,7 +228,7 @@ func (dm *DagModifier) modifyDag(node *mdag.Node, offset uint64, data io.Reader)
return nil, false, err
}
nd := new(mdag.Node)
nd := new(mdag.ProtoNode)
nd.SetData(b)
k, err := dm.dagserv.Add(nd)
if err != nil {
@ -244,17 +249,23 @@ func (dm *DagModifier) modifyDag(node *mdag.Node, offset uint64, data io.Reader)
for i, bs := range f.GetBlocksizes() {
// We found the correct child to write into
if cur+bs > offset {
child, err := node.Links[i].GetNode(dm.ctx, dm.dagserv)
child, err := node.Links()[i].GetNode(dm.ctx, dm.dagserv)
if err != nil {
return nil, false, err
}
k, sdone, err := dm.modifyDag(child, offset-cur, data)
childpb, ok := child.(*mdag.ProtoNode)
if !ok {
return nil, false, mdag.ErrNotProtobuf
}
k, sdone, err := dm.modifyDag(childpb, offset-cur, data)
if err != nil {
return nil, false, err
}
offset += bs
node.Links[i].Hash = k.Hash()
node.Links()[i].Cid = k
// Recache serialized node
_, err = node.EncodeProtobuf(true)
@ -277,7 +288,7 @@ func (dm *DagModifier) modifyDag(node *mdag.Node, offset uint64, data io.Reader)
}
// appendData appends the blocks from the given chan to the end of this dag
func (dm *DagModifier) appendData(node *mdag.Node, spl chunk.Splitter) (*mdag.Node, error) {
func (dm *DagModifier) appendData(node *mdag.ProtoNode, spl chunk.Splitter) (*mdag.ProtoNode, error) {
dbp := &help.DagBuilderParams{
Dagserv: dm.dagserv,
Maxlinks: help.DefaultLinksPerBlock,
@ -340,7 +351,7 @@ func (dm *DagModifier) CtxReadFull(ctx context.Context, b []byte) (int, error) {
}
// GetNode gets the modified DAG Node
func (dm *DagModifier) GetNode() (*mdag.Node, error) {
func (dm *DagModifier) GetNode() (*mdag.ProtoNode, error) {
err := dm.Sync()
if err != nil {
return nil, err
@ -425,8 +436,8 @@ func (dm *DagModifier) Truncate(size int64) error {
}
// dagTruncate truncates the given node to 'size' and returns the modified Node
func dagTruncate(ctx context.Context, nd *mdag.Node, size uint64, ds mdag.DAGService) (*mdag.Node, error) {
if len(nd.Links) == 0 {
func dagTruncate(ctx context.Context, nd *mdag.ProtoNode, size uint64, ds mdag.DAGService) (*mdag.ProtoNode, error) {
if len(nd.Links()) == 0 {
// TODO: this can likely be done without marshaling and remarshaling
pbn, err := ft.FromBytes(nd.Data())
if err != nil {
@ -439,22 +450,27 @@ func dagTruncate(ctx context.Context, nd *mdag.Node, size uint64, ds mdag.DAGSer
var cur uint64
end := 0
var modified *mdag.Node
var modified *mdag.ProtoNode
ndata := new(ft.FSNode)
for i, lnk := range nd.Links {
for i, lnk := range nd.Links() {
child, err := lnk.GetNode(ctx, ds)
if err != nil {
return nil, err
}
childsize, err := ft.DataSize(child.Data())
childpb, ok := child.(*mdag.ProtoNode)
if !ok {
return nil, err
}
childsize, err := ft.DataSize(childpb.Data())
if err != nil {
return nil, err
}
// found the child we want to cut
if size < cur+childsize {
nchild, err := dagTruncate(ctx, child, size-cur, ds)
nchild, err := dagTruncate(ctx, childpb, size-cur, ds)
if err != nil {
return nil, err
}
@ -474,7 +490,7 @@ func dagTruncate(ctx context.Context, nd *mdag.Node, size uint64, ds mdag.DAGSer
return nil, err
}
nd.Links = nd.Links[:end]
nd.SetLinks(nd.Links()[:end])
err = nd.AddNodeLinkClean("", modified)
if err != nil {
return nil, err

View File

@ -27,7 +27,7 @@ func GetDAGServ() mdag.DAGService {
return mdagmock.Mock()
}
func GetNode(t testing.TB, dserv mdag.DAGService, data []byte) *mdag.Node {
func GetNode(t testing.TB, dserv mdag.DAGService, data []byte) *mdag.ProtoNode {
in := bytes.NewReader(data)
node, err := imp.BuildTrickleDagFromReader(dserv, SizeSplitterGen(500)(in))
if err != nil {
@ -37,11 +37,11 @@ func GetNode(t testing.TB, dserv mdag.DAGService, data []byte) *mdag.Node {
return node
}
func GetEmptyNode(t testing.TB, dserv mdag.DAGService) *mdag.Node {
func GetEmptyNode(t testing.TB, dserv mdag.DAGService) *mdag.ProtoNode {
return GetNode(t, dserv, []byte{})
}
func GetRandomNode(t testing.TB, dserv mdag.DAGService, size int64) ([]byte, *mdag.Node) {
func GetRandomNode(t testing.TB, dserv mdag.DAGService, size int64) ([]byte, *mdag.ProtoNode) {
in := io.LimitReader(u.NewTimeSeededRand(), size)
buf, err := ioutil.ReadAll(in)
if err != nil {
@ -64,7 +64,7 @@ func ArrComp(a, b []byte) error {
return nil
}
func PrintDag(nd *mdag.Node, ds mdag.DAGService, indent int) {
func PrintDag(nd *mdag.ProtoNode, ds mdag.DAGService, indent int) {
pbd, err := ft.FromBytes(nd.Data())
if err != nil {
panic(err)
@ -74,17 +74,17 @@ func PrintDag(nd *mdag.Node, ds mdag.DAGService, indent int) {
fmt.Print(" ")
}
fmt.Printf("{size = %d, type = %s, children = %d", pbd.GetFilesize(), pbd.GetType().String(), len(pbd.GetBlocksizes()))
if len(nd.Links) > 0 {
if len(nd.Links()) > 0 {
fmt.Println()
}
for _, lnk := range nd.Links {
for _, lnk := range nd.Links() {
child, err := lnk.GetNode(context.Background(), ds)
if err != nil {
panic(err)
}
PrintDag(child, ds, indent+1)
PrintDag(child.(*mdag.ProtoNode), ds, indent+1)
}
if len(nd.Links) > 0 {
if len(nd.Links()) > 0 {
for i := 0; i < indent; i++ {
fmt.Print(" ")
}