1
0
mirror of https://github.com/ipfs/kubo.git synced 2025-07-02 03:28:25 +08:00

Merge pull request #3757 from ipfs/fix/get-raw-leaves

make raw leaves work with 'ipfs get'
This commit is contained in:
Jeromy Johnson
2017-03-07 15:40:18 -08:00
committed by GitHub
4 changed files with 72 additions and 43 deletions

View File

@ -70,22 +70,24 @@ may also specify the level of compression by specifying '-l=<1-9>'.
return
}
pbnd, ok := dn.(*dag.ProtoNode)
if !ok {
res.SetError(err, cmds.ErrNormal)
switch dn := dn.(type) {
case *dag.ProtoNode:
size, err := dn.Size()
if err != nil {
res.SetError(err, cmds.ErrNormal)
return
}
res.SetLength(size)
case *dag.RawNode:
res.SetLength(uint64(len(dn.RawData())))
default:
res.SetError(fmt.Errorf("'ipfs get' only supports unixfs nodes"), cmds.ErrNormal)
return
}
size, err := dn.Size()
if err != nil {
res.SetError(err, cmds.ErrNormal)
return
}
res.SetLength(size)
archive, _, _ := req.Option("archive").Bool()
reader, err := uarchive.DagArchive(ctx, pbnd, p.String(), node.DAG, archive, cmplvl)
reader, err := uarchive.DagArchive(ctx, dn, p.String(), node.DAG, archive, cmplvl)
if err != nil {
res.SetError(err, cmds.ErrNormal)
return

View File

@ -64,6 +64,20 @@ test_get_cmd() {
rm "$HASH"
'
test_expect_success "ipfs get works with raw leaves" '
HASH2=$(ipfs add --raw-leaves -q data) &&
ipfs get "$HASH2" >actual2
'
test_expect_success "ipfs get output looks good" '
printf "%s\n\n" "Saving file(s) to $HASH2" >expected2 &&
test_cmp expected2 actual2
'
test_expect_success "ipfs get file output looks good" '
test_cmp "$HASH2" data
'
test_ipfs_get_flag ".tar" "-xf" -a
test_ipfs_get_flag ".tar.gz" "-zxf" -a -C

View File

@ -3,14 +3,15 @@ package archive
import (
"bufio"
"compress/gzip"
"context"
"io"
"path"
cxt "context"
mdag "github.com/ipfs/go-ipfs/merkledag"
tar "github.com/ipfs/go-ipfs/unixfs/archive/tar"
uio "github.com/ipfs/go-ipfs/unixfs/io"
node "gx/ipfs/QmYDscK7dmdo2GZ9aumS8s5auUUAH5mR1jvj5pYhWusfK7/go-ipld-node"
)
// DefaultBufSize is the buffer size for gets. for now, 1MB, which is ~4 blocks.
@ -30,7 +31,7 @@ func (i *identityWriteCloser) Close() error {
}
// DagArchive is equivalent to `ipfs getdag $hash | maybe_tar | maybe_gzip`
func DagArchive(ctx cxt.Context, nd *mdag.ProtoNode, name string, dag mdag.DAGService, archive bool, compression int) (io.Reader, error) {
func DagArchive(ctx context.Context, nd node.Node, name string, dag mdag.DAGService, archive bool, compression int) (io.Reader, error) {
_, filename := path.Split(name)

View File

@ -2,17 +2,19 @@ package tar
import (
"archive/tar"
"context"
"fmt"
"io"
"path"
"time"
cxt "context"
proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto"
mdag "github.com/ipfs/go-ipfs/merkledag"
ft "github.com/ipfs/go-ipfs/unixfs"
uio "github.com/ipfs/go-ipfs/unixfs/io"
upb "github.com/ipfs/go-ipfs/unixfs/pb"
node "gx/ipfs/QmYDscK7dmdo2GZ9aumS8s5auUUAH5mR1jvj5pYhWusfK7/go-ipld-node"
proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto"
)
// Writer is a utility structure that helps to write
@ -22,11 +24,11 @@ type Writer struct {
Dag mdag.DAGService
TarW *tar.Writer
ctx cxt.Context
ctx context.Context
}
// NewWriter wraps given io.Writer.
func NewWriter(ctx cxt.Context, dag mdag.DAGService, archive bool, compression int, w io.Writer) (*Writer, error) {
func NewWriter(ctx context.Context, dag mdag.DAGService, archive bool, compression int, w io.Writer) (*Writer, error) {
return &Writer{
Dag: dag,
TarW: tar.NewWriter(w),
@ -45,13 +47,8 @@ func (w *Writer) writeDir(nd *mdag.ProtoNode, fpath string) error {
return err
}
childpb, ok := child.(*mdag.ProtoNode)
if !ok {
return mdag.ErrNotProtobuf
}
npath := path.Join(fpath, nd.Links()[i].Name)
if err := w.WriteNode(childpb, npath); err != nil {
if err := w.WriteNode(child, npath); err != nil {
return err
}
}
@ -72,25 +69,40 @@ func (w *Writer) writeFile(nd *mdag.ProtoNode, pb *upb.Data, fpath string) error
return nil
}
func (w *Writer) WriteNode(nd *mdag.ProtoNode, fpath string) error {
pb := new(upb.Data)
if err := proto.Unmarshal(nd.Data(), pb); err != nil {
return err
}
func (w *Writer) WriteNode(nd node.Node, fpath string) error {
switch nd := nd.(type) {
case *mdag.ProtoNode:
pb := new(upb.Data)
if err := proto.Unmarshal(nd.Data(), pb); err != nil {
return err
}
switch pb.GetType() {
case upb.Data_Metadata:
fallthrough
case upb.Data_Directory:
return w.writeDir(nd, fpath)
case upb.Data_Raw:
fallthrough
case upb.Data_File:
return w.writeFile(nd, pb, fpath)
case upb.Data_Symlink:
return writeSymlinkHeader(w.TarW, string(pb.GetData()), fpath)
switch pb.GetType() {
case upb.Data_Metadata:
fallthrough
case upb.Data_Directory:
return w.writeDir(nd, fpath)
case upb.Data_Raw:
fallthrough
case upb.Data_File:
return w.writeFile(nd, pb, fpath)
case upb.Data_Symlink:
return writeSymlinkHeader(w.TarW, string(pb.GetData()), fpath)
default:
return ft.ErrUnrecognizedType
}
case *mdag.RawNode:
if err := writeFileHeader(w.TarW, fpath, uint64(len(nd.RawData()))); err != nil {
return err
}
if _, err := w.TarW.Write(nd.RawData()); err != nil {
return err
}
w.TarW.Flush()
return nil
default:
return ft.ErrUnrecognizedType
return fmt.Errorf("nodes of type %T are not supported in unixfs", nd)
}
}