mirror of
https://github.com/ipfs/kubo.git
synced 2025-07-03 13:00:37 +08:00
Merge pull request #3757 from ipfs/fix/get-raw-leaves
make raw leaves work with 'ipfs get'
This commit is contained in:
@ -70,12 +70,8 @@ may also specify the level of compression by specifying '-l=<1-9>'.
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
pbnd, ok := dn.(*dag.ProtoNode)
|
switch dn := dn.(type) {
|
||||||
if !ok {
|
case *dag.ProtoNode:
|
||||||
res.SetError(err, cmds.ErrNormal)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
size, err := dn.Size()
|
size, err := dn.Size()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
res.SetError(err, cmds.ErrNormal)
|
res.SetError(err, cmds.ErrNormal)
|
||||||
@ -83,9 +79,15 @@ may also specify the level of compression by specifying '-l=<1-9>'.
|
|||||||
}
|
}
|
||||||
|
|
||||||
res.SetLength(size)
|
res.SetLength(size)
|
||||||
|
case *dag.RawNode:
|
||||||
|
res.SetLength(uint64(len(dn.RawData())))
|
||||||
|
default:
|
||||||
|
res.SetError(fmt.Errorf("'ipfs get' only supports unixfs nodes"), cmds.ErrNormal)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
archive, _, _ := req.Option("archive").Bool()
|
archive, _, _ := req.Option("archive").Bool()
|
||||||
reader, err := uarchive.DagArchive(ctx, pbnd, p.String(), node.DAG, archive, cmplvl)
|
reader, err := uarchive.DagArchive(ctx, dn, p.String(), node.DAG, archive, cmplvl)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
res.SetError(err, cmds.ErrNormal)
|
res.SetError(err, cmds.ErrNormal)
|
||||||
return
|
return
|
||||||
|
@ -64,6 +64,20 @@ test_get_cmd() {
|
|||||||
rm "$HASH"
|
rm "$HASH"
|
||||||
'
|
'
|
||||||
|
|
||||||
|
test_expect_success "ipfs get works with raw leaves" '
|
||||||
|
HASH2=$(ipfs add --raw-leaves -q data) &&
|
||||||
|
ipfs get "$HASH2" >actual2
|
||||||
|
'
|
||||||
|
|
||||||
|
test_expect_success "ipfs get output looks good" '
|
||||||
|
printf "%s\n\n" "Saving file(s) to $HASH2" >expected2 &&
|
||||||
|
test_cmp expected2 actual2
|
||||||
|
'
|
||||||
|
|
||||||
|
test_expect_success "ipfs get file output looks good" '
|
||||||
|
test_cmp "$HASH2" data
|
||||||
|
'
|
||||||
|
|
||||||
test_ipfs_get_flag ".tar" "-xf" -a
|
test_ipfs_get_flag ".tar" "-xf" -a
|
||||||
|
|
||||||
test_ipfs_get_flag ".tar.gz" "-zxf" -a -C
|
test_ipfs_get_flag ".tar.gz" "-zxf" -a -C
|
||||||
|
@ -3,14 +3,15 @@ package archive
|
|||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"compress/gzip"
|
"compress/gzip"
|
||||||
|
"context"
|
||||||
"io"
|
"io"
|
||||||
"path"
|
"path"
|
||||||
|
|
||||||
cxt "context"
|
|
||||||
|
|
||||||
mdag "github.com/ipfs/go-ipfs/merkledag"
|
mdag "github.com/ipfs/go-ipfs/merkledag"
|
||||||
tar "github.com/ipfs/go-ipfs/unixfs/archive/tar"
|
tar "github.com/ipfs/go-ipfs/unixfs/archive/tar"
|
||||||
uio "github.com/ipfs/go-ipfs/unixfs/io"
|
uio "github.com/ipfs/go-ipfs/unixfs/io"
|
||||||
|
|
||||||
|
node "gx/ipfs/QmYDscK7dmdo2GZ9aumS8s5auUUAH5mR1jvj5pYhWusfK7/go-ipld-node"
|
||||||
)
|
)
|
||||||
|
|
||||||
// DefaultBufSize is the buffer size for gets. for now, 1MB, which is ~4 blocks.
|
// DefaultBufSize is the buffer size for gets. for now, 1MB, which is ~4 blocks.
|
||||||
@ -30,7 +31,7 @@ func (i *identityWriteCloser) Close() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DagArchive is equivalent to `ipfs getdag $hash | maybe_tar | maybe_gzip`
|
// DagArchive is equivalent to `ipfs getdag $hash | maybe_tar | maybe_gzip`
|
||||||
func DagArchive(ctx cxt.Context, nd *mdag.ProtoNode, name string, dag mdag.DAGService, archive bool, compression int) (io.Reader, error) {
|
func DagArchive(ctx context.Context, nd node.Node, name string, dag mdag.DAGService, archive bool, compression int) (io.Reader, error) {
|
||||||
|
|
||||||
_, filename := path.Split(name)
|
_, filename := path.Split(name)
|
||||||
|
|
||||||
|
@ -2,17 +2,19 @@ package tar
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"archive/tar"
|
"archive/tar"
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"path"
|
"path"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
cxt "context"
|
|
||||||
proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto"
|
|
||||||
|
|
||||||
mdag "github.com/ipfs/go-ipfs/merkledag"
|
mdag "github.com/ipfs/go-ipfs/merkledag"
|
||||||
ft "github.com/ipfs/go-ipfs/unixfs"
|
ft "github.com/ipfs/go-ipfs/unixfs"
|
||||||
uio "github.com/ipfs/go-ipfs/unixfs/io"
|
uio "github.com/ipfs/go-ipfs/unixfs/io"
|
||||||
upb "github.com/ipfs/go-ipfs/unixfs/pb"
|
upb "github.com/ipfs/go-ipfs/unixfs/pb"
|
||||||
|
|
||||||
|
node "gx/ipfs/QmYDscK7dmdo2GZ9aumS8s5auUUAH5mR1jvj5pYhWusfK7/go-ipld-node"
|
||||||
|
proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Writer is a utility structure that helps to write
|
// Writer is a utility structure that helps to write
|
||||||
@ -22,11 +24,11 @@ type Writer struct {
|
|||||||
Dag mdag.DAGService
|
Dag mdag.DAGService
|
||||||
TarW *tar.Writer
|
TarW *tar.Writer
|
||||||
|
|
||||||
ctx cxt.Context
|
ctx context.Context
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewWriter wraps given io.Writer.
|
// NewWriter wraps given io.Writer.
|
||||||
func NewWriter(ctx cxt.Context, dag mdag.DAGService, archive bool, compression int, w io.Writer) (*Writer, error) {
|
func NewWriter(ctx context.Context, dag mdag.DAGService, archive bool, compression int, w io.Writer) (*Writer, error) {
|
||||||
return &Writer{
|
return &Writer{
|
||||||
Dag: dag,
|
Dag: dag,
|
||||||
TarW: tar.NewWriter(w),
|
TarW: tar.NewWriter(w),
|
||||||
@ -45,13 +47,8 @@ func (w *Writer) writeDir(nd *mdag.ProtoNode, fpath string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
childpb, ok := child.(*mdag.ProtoNode)
|
|
||||||
if !ok {
|
|
||||||
return mdag.ErrNotProtobuf
|
|
||||||
}
|
|
||||||
|
|
||||||
npath := path.Join(fpath, nd.Links()[i].Name)
|
npath := path.Join(fpath, nd.Links()[i].Name)
|
||||||
if err := w.WriteNode(childpb, npath); err != nil {
|
if err := w.WriteNode(child, npath); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -72,7 +69,9 @@ func (w *Writer) writeFile(nd *mdag.ProtoNode, pb *upb.Data, fpath string) error
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *Writer) WriteNode(nd *mdag.ProtoNode, fpath string) error {
|
func (w *Writer) WriteNode(nd node.Node, fpath string) error {
|
||||||
|
switch nd := nd.(type) {
|
||||||
|
case *mdag.ProtoNode:
|
||||||
pb := new(upb.Data)
|
pb := new(upb.Data)
|
||||||
if err := proto.Unmarshal(nd.Data(), pb); err != nil {
|
if err := proto.Unmarshal(nd.Data(), pb); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -92,6 +91,19 @@ func (w *Writer) WriteNode(nd *mdag.ProtoNode, fpath string) error {
|
|||||||
default:
|
default:
|
||||||
return ft.ErrUnrecognizedType
|
return ft.ErrUnrecognizedType
|
||||||
}
|
}
|
||||||
|
case *mdag.RawNode:
|
||||||
|
if err := writeFileHeader(w.TarW, fpath, uint64(len(nd.RawData()))); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := w.TarW.Write(nd.RawData()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
w.TarW.Flush()
|
||||||
|
return nil
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("nodes of type %T are not supported in unixfs", nd)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *Writer) Close() error {
|
func (w *Writer) Close() error {
|
||||||
|
Reference in New Issue
Block a user