diff --git a/core/commands/get.go b/core/commands/get.go index 9093bc5da..4b49e4658 100644 --- a/core/commands/get.go +++ b/core/commands/get.go @@ -70,22 +70,24 @@ may also specify the level of compression by specifying '-l=<1-9>'. return } - pbnd, ok := dn.(*dag.ProtoNode) - if !ok { - res.SetError(err, cmds.ErrNormal) + switch dn := dn.(type) { + case *dag.ProtoNode: + size, err := dn.Size() + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + res.SetLength(size) + case *dag.RawNode: + res.SetLength(uint64(len(dn.RawData()))) + default: + res.SetError(fmt.Errorf("'ipfs get' only supports unixfs nodes"), cmds.ErrNormal) return } - size, err := dn.Size() - if err != nil { - res.SetError(err, cmds.ErrNormal) - return - } - - res.SetLength(size) - archive, _, _ := req.Option("archive").Bool() - reader, err := uarchive.DagArchive(ctx, pbnd, p.String(), node.DAG, archive, cmplvl) + reader, err := uarchive.DagArchive(ctx, dn, p.String(), node.DAG, archive, cmplvl) if err != nil { res.SetError(err, cmds.ErrNormal) return diff --git a/test/sharness/t0090-get.sh b/test/sharness/t0090-get.sh index 193bd56d2..a79827957 100755 --- a/test/sharness/t0090-get.sh +++ b/test/sharness/t0090-get.sh @@ -64,6 +64,20 @@ test_get_cmd() { rm "$HASH" ' + test_expect_success "ipfs get works with raw leaves" ' + HASH2=$(ipfs add --raw-leaves -q data) && + ipfs get "$HASH2" >actual2 + ' + + test_expect_success "ipfs get output looks good" ' + printf "%s\n\n" "Saving file(s) to $HASH2" >expected2 && + test_cmp expected2 actual2 + ' + + test_expect_success "ipfs get file output looks good" ' + test_cmp "$HASH2" data + ' + test_ipfs_get_flag ".tar" "-xf" -a test_ipfs_get_flag ".tar.gz" "-zxf" -a -C diff --git a/unixfs/archive/archive.go b/unixfs/archive/archive.go index a94c9f7af..b39c71560 100644 --- a/unixfs/archive/archive.go +++ b/unixfs/archive/archive.go @@ -3,14 +3,15 @@ package archive import ( "bufio" "compress/gzip" + "context" "io" "path" - cxt "context" - mdag "github.com/ipfs/go-ipfs/merkledag" tar "github.com/ipfs/go-ipfs/unixfs/archive/tar" uio "github.com/ipfs/go-ipfs/unixfs/io" + + node "gx/ipfs/QmYDscK7dmdo2GZ9aumS8s5auUUAH5mR1jvj5pYhWusfK7/go-ipld-node" ) // DefaultBufSize is the buffer size for gets. for now, 1MB, which is ~4 blocks. @@ -30,7 +31,7 @@ func (i *identityWriteCloser) Close() error { } // DagArchive is equivalent to `ipfs getdag $hash | maybe_tar | maybe_gzip` -func DagArchive(ctx cxt.Context, nd *mdag.ProtoNode, name string, dag mdag.DAGService, archive bool, compression int) (io.Reader, error) { +func DagArchive(ctx context.Context, nd node.Node, name string, dag mdag.DAGService, archive bool, compression int) (io.Reader, error) { _, filename := path.Split(name) diff --git a/unixfs/archive/tar/writer.go b/unixfs/archive/tar/writer.go index 17c43e717..26492d897 100644 --- a/unixfs/archive/tar/writer.go +++ b/unixfs/archive/tar/writer.go @@ -2,17 +2,19 @@ package tar import ( "archive/tar" + "context" + "fmt" "io" "path" "time" - cxt "context" - proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" - mdag "github.com/ipfs/go-ipfs/merkledag" ft "github.com/ipfs/go-ipfs/unixfs" uio "github.com/ipfs/go-ipfs/unixfs/io" upb "github.com/ipfs/go-ipfs/unixfs/pb" + + node "gx/ipfs/QmYDscK7dmdo2GZ9aumS8s5auUUAH5mR1jvj5pYhWusfK7/go-ipld-node" + proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" ) // Writer is a utility structure that helps to write @@ -22,11 +24,11 @@ type Writer struct { Dag mdag.DAGService TarW *tar.Writer - ctx cxt.Context + ctx context.Context } // NewWriter wraps given io.Writer. -func NewWriter(ctx cxt.Context, dag mdag.DAGService, archive bool, compression int, w io.Writer) (*Writer, error) { +func NewWriter(ctx context.Context, dag mdag.DAGService, archive bool, compression int, w io.Writer) (*Writer, error) { return &Writer{ Dag: dag, TarW: tar.NewWriter(w), @@ -45,13 +47,8 @@ func (w *Writer) writeDir(nd *mdag.ProtoNode, fpath string) error { return err } - childpb, ok := child.(*mdag.ProtoNode) - if !ok { - return mdag.ErrNotProtobuf - } - npath := path.Join(fpath, nd.Links()[i].Name) - if err := w.WriteNode(childpb, npath); err != nil { + if err := w.WriteNode(child, npath); err != nil { return err } } @@ -72,25 +69,40 @@ func (w *Writer) writeFile(nd *mdag.ProtoNode, pb *upb.Data, fpath string) error return nil } -func (w *Writer) WriteNode(nd *mdag.ProtoNode, fpath string) error { - pb := new(upb.Data) - if err := proto.Unmarshal(nd.Data(), pb); err != nil { - return err - } +func (w *Writer) WriteNode(nd node.Node, fpath string) error { + switch nd := nd.(type) { + case *mdag.ProtoNode: + pb := new(upb.Data) + if err := proto.Unmarshal(nd.Data(), pb); err != nil { + return err + } - switch pb.GetType() { - case upb.Data_Metadata: - fallthrough - case upb.Data_Directory: - return w.writeDir(nd, fpath) - case upb.Data_Raw: - fallthrough - case upb.Data_File: - return w.writeFile(nd, pb, fpath) - case upb.Data_Symlink: - return writeSymlinkHeader(w.TarW, string(pb.GetData()), fpath) + switch pb.GetType() { + case upb.Data_Metadata: + fallthrough + case upb.Data_Directory: + return w.writeDir(nd, fpath) + case upb.Data_Raw: + fallthrough + case upb.Data_File: + return w.writeFile(nd, pb, fpath) + case upb.Data_Symlink: + return writeSymlinkHeader(w.TarW, string(pb.GetData()), fpath) + default: + return ft.ErrUnrecognizedType + } + case *mdag.RawNode: + if err := writeFileHeader(w.TarW, fpath, uint64(len(nd.RawData()))); err != nil { + return err + } + + if _, err := w.TarW.Write(nd.RawData()); err != nil { + return err + } + w.TarW.Flush() + return nil default: - return ft.ErrUnrecognizedType + return fmt.Errorf("nodes of type %T are not supported in unixfs", nd) } }