1
0
mirror of https://github.com/ipfs/kubo.git synced 2025-10-25 02:16:56 +08:00

Refactor ipfs get

License: MIT
Signed-off-by: rht <rhtbot@gmail.com>
This commit is contained in:
rht
2015-08-10 04:05:41 +07:00
parent 3dfe02aa95
commit dfa0351df9
4 changed files with 70 additions and 103 deletions

View File

@ -58,8 +58,7 @@ type ReadSeekCloser interface {
// node, using the passed in DAGService for data retreival
func NewDagReader(ctx context.Context, n *mdag.Node, serv mdag.DAGService) (*DagReader, error) {
pb := new(ftpb.Data)
err := proto.Unmarshal(n.Data, pb)
if err != nil {
if err := proto.Unmarshal(n.Data, pb); err != nil {
return nil, err
}
@ -70,7 +69,7 @@ func NewDagReader(ctx context.Context, n *mdag.Node, serv mdag.DAGService) (*Dag
case ftpb.Data_Raw:
fallthrough
case ftpb.Data_File:
return newDataFileReader(ctx, n, pb, serv), nil
return NewDataFileReader(ctx, n, pb, serv), nil
case ftpb.Data_Metadata:
if len(n.Links) == 0 {
return nil, errors.New("incorrectly formatted metadata object")
@ -85,7 +84,7 @@ func NewDagReader(ctx context.Context, n *mdag.Node, serv mdag.DAGService) (*Dag
}
}
func newDataFileReader(ctx context.Context, n *mdag.Node, pb *ftpb.Data, serv mdag.DAGService) *DagReader {
func NewDataFileReader(ctx context.Context, n *mdag.Node, pb *ftpb.Data, serv mdag.DAGService) *DagReader {
fctx, cancel := context.WithCancel(ctx)
promises := serv.GetDAG(fctx, n)
return &DagReader{
@ -124,7 +123,7 @@ func (dr *DagReader) precalcNextBuf(ctx context.Context) error {
// A directory should not exist within a file
return ft.ErrInvalidDirLocation
case ftpb.Data_File:
dr.buf = newDataFileReader(dr.ctx, nxt, pb, dr.serv)
dr.buf = NewDataFileReader(dr.ctx, nxt, pb, dr.serv)
return nil
case ftpb.Data_Raw:
dr.buf = NewRSNCFromBytes(pb.GetData())
@ -137,8 +136,8 @@ func (dr *DagReader) precalcNextBuf(ctx context.Context) error {
}
// Size return the total length of the data from the DAG structured file.
func (dr *DagReader) Size() int64 {
return int64(dr.pbdata.GetFilesize())
func (dr *DagReader) Size() uint64 {
return dr.pbdata.GetFilesize()
}
// Read reads data from the DAG structured file

View File

@ -4,7 +4,6 @@ import (
"archive/tar"
"bufio"
"compress/gzip"
"fmt"
"io"
"path"
"time"
@ -13,6 +12,7 @@ import (
cxt "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context"
mdag "github.com/ipfs/go-ipfs/merkledag"
ft "github.com/ipfs/go-ipfs/unixfs"
uio "github.com/ipfs/go-ipfs/unixfs/io"
upb "github.com/ipfs/go-ipfs/unixfs/pb"
)
@ -21,7 +21,8 @@ import (
// TODO: does this need to be configurable?
var DefaultBufSize = 1048576
func DagArchive(ctx cxt.Context, nd *mdag.Node, name string, dag mdag.DAGService, compression int) (io.Reader, error) {
// DagArchive is equivalent to `ipfs getdag $hash | maybe_tar | maybe_gzip`
func DagArchive(ctx cxt.Context, nd *mdag.Node, name string, dag mdag.DAGService, archive bool, compression int) (io.Reader, error) {
_, filename := path.Split(name)
@ -31,17 +32,44 @@ func DagArchive(ctx cxt.Context, nd *mdag.Node, name string, dag mdag.DAGService
// use a buffered writer to parallelize task
bufw := bufio.NewWriterSize(pipew, DefaultBufSize)
// compression determines whether to use gzip compression.
var maybeGzw io.Writer
if compression != gzip.NoCompression {
var err error
maybeGzw, err = gzip.NewWriterLevel(bufw, compression)
if err != nil {
return nil, err
}
} else {
maybeGzw = bufw
}
// construct the tar writer
w, err := NewWriter(bufw, dag, compression)
w, err := NewWriter(ctx, dag, archive, compression, maybeGzw)
if err != nil {
return nil, err
}
// write all the nodes recursively
go func() {
if err := w.WriteNode(ctx, nd, filename); err != nil {
pipew.CloseWithError(err)
return
if !archive && compression != gzip.NoCompression {
// the case when the node is a file
dagr, err := uio.NewDagReader(w.ctx, nd, w.Dag)
if err != nil {
pipew.CloseWithError(err)
return
}
if _, err := dagr.WriteTo(maybeGzw); err != nil {
pipew.CloseWithError(err)
return
}
} else {
// the case for 1. archive, and 2. not archived and not compressed, in which tar is used anyway as a transport format
if err := w.WriteNode(nd, filename); err != nil {
pipew.CloseWithError(err)
return
}
}
if err := bufw.Flush(); err != nil {
@ -49,6 +77,7 @@ func DagArchive(ctx cxt.Context, nd *mdag.Node, name string, dag mdag.DAGService
return
}
w.Close()
pipew.Close() // everything seems to be ok.
}()
@ -61,39 +90,32 @@ func DagArchive(ctx cxt.Context, nd *mdag.Node, name string, dag mdag.DAGService
type Writer struct {
Dag mdag.DAGService
TarW *tar.Writer
ctx cxt.Context
}
// NewWriter wraps given io.Writer.
// compression determines whether to use gzip compression.
func NewWriter(w io.Writer, dag mdag.DAGService, compression int) (*Writer, error) {
if compression != gzip.NoCompression {
var err error
w, err = gzip.NewWriterLevel(w, compression)
if err != nil {
return nil, err
}
}
func NewWriter(ctx cxt.Context, dag mdag.DAGService, archive bool, compression int, w io.Writer) (*Writer, error) {
return &Writer{
Dag: dag,
TarW: tar.NewWriter(w),
ctx: ctx,
}, nil
}
func (w *Writer) WriteDir(ctx cxt.Context, nd *mdag.Node, fpath string) error {
func (w *Writer) writeDir(nd *mdag.Node, fpath string) error {
if err := writeDirHeader(w.TarW, fpath); err != nil {
return err
}
for i, ng := range w.Dag.GetDAG(ctx, nd) {
child, err := ng.Get(ctx)
for i, ng := range w.Dag.GetDAG(w.ctx, nd) {
child, err := ng.Get(w.ctx)
if err != nil {
return err
}
npath := path.Join(fpath, nd.Links[i].Name)
if err := w.WriteNode(ctx, child, npath); err != nil {
if err := w.WriteNode(child, npath); err != nil {
return err
}
}
@ -101,46 +123,33 @@ func (w *Writer) WriteDir(ctx cxt.Context, nd *mdag.Node, fpath string) error {
return nil
}
func (w *Writer) WriteFile(ctx cxt.Context, nd *mdag.Node, fpath string) error {
pb := new(upb.Data)
if err := proto.Unmarshal(nd.Data, pb); err != nil {
return err
}
return w.writeFile(ctx, nd, pb, fpath)
}
func (w *Writer) writeFile(ctx cxt.Context, nd *mdag.Node, pb *upb.Data, fpath string) error {
func (w *Writer) writeFile(nd *mdag.Node, pb *upb.Data, fpath string) error {
if err := writeFileHeader(w.TarW, fpath, pb.GetFilesize()); err != nil {
return err
}
dagr, err := uio.NewDagReader(ctx, nd, w.Dag)
if err != nil {
return err
}
_, err = io.Copy(w.TarW, dagr)
if err != nil && err != io.EOF {
return err
}
return nil
dagr := uio.NewDataFileReader(w.ctx, nd, pb, w.Dag)
_, err := dagr.WriteTo(w.TarW)
return err
}
func (w *Writer) WriteNode(ctx cxt.Context, nd *mdag.Node, fpath string) error {
func (w *Writer) WriteNode(nd *mdag.Node, fpath string) error {
pb := new(upb.Data)
if err := proto.Unmarshal(nd.Data, pb); err != nil {
return err
}
switch pb.GetType() {
case upb.Data_Metadata:
fallthrough
case upb.Data_Directory:
return w.WriteDir(ctx, nd, fpath)
return w.writeDir(nd, fpath)
case upb.Data_Raw:
fallthrough
case upb.Data_File:
return w.writeFile(ctx, nd, pb, fpath)
return w.writeFile(nd, pb, fpath)
default:
return fmt.Errorf("unixfs type not supported: %s", pb.GetType())
return ft.ErrUnrecognizedType
}
}