mirror of
https://github.com/ipfs/kubo.git
synced 2025-10-25 02:16:56 +08:00
Decompose DagArchive from unixfs tar
License: MIT Signed-off-by: rht <rhtbot@gmail.com>
This commit is contained in:
83
unixfs/archive/archive.go
Normal file
83
unixfs/archive/archive.go
Normal file
@ -0,0 +1,83 @@
|
||||
package archive
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"compress/gzip"
|
||||
"io"
|
||||
"path"
|
||||
|
||||
cxt "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context"
|
||||
|
||||
mdag "github.com/ipfs/go-ipfs/merkledag"
|
||||
tar "github.com/ipfs/go-ipfs/unixfs/archive/tar"
|
||||
uio "github.com/ipfs/go-ipfs/unixfs/io"
|
||||
)
|
||||
|
||||
// DefaultBufSize is the buffer size for gets. for now, 1MB, which is ~4 blocks.
|
||||
// TODO: does this need to be configurable?
|
||||
var DefaultBufSize = 1048576
|
||||
|
||||
// DagArchive is equivalent to `ipfs getdag $hash | maybe_tar | maybe_gzip`
|
||||
func DagArchive(ctx cxt.Context, nd *mdag.Node, name string, dag mdag.DAGService, archive bool, compression int) (io.Reader, error) {
|
||||
|
||||
_, filename := path.Split(name)
|
||||
|
||||
// need to connect a writer to a reader
|
||||
piper, pipew := io.Pipe()
|
||||
|
||||
// use a buffered writer to parallelize task
|
||||
bufw := bufio.NewWriterSize(pipew, DefaultBufSize)
|
||||
|
||||
// compression determines whether to use gzip compression.
|
||||
var maybeGzw io.Writer
|
||||
if compression != gzip.NoCompression {
|
||||
var err error
|
||||
maybeGzw, err = gzip.NewWriterLevel(bufw, compression)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
maybeGzw = bufw
|
||||
}
|
||||
|
||||
if !archive && compression != gzip.NoCompression {
|
||||
// the case when the node is a file
|
||||
dagr, err := uio.NewDagReader(ctx, nd, dag)
|
||||
if err != nil {
|
||||
pipew.CloseWithError(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
go func() {
|
||||
if _, err := dagr.WriteTo(maybeGzw); err != nil {
|
||||
pipew.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
pipew.Close() // everything seems to be ok.
|
||||
}()
|
||||
} else {
|
||||
// the case for 1. archive, and 2. not archived and not compressed, in which tar is used anyway as a transport format
|
||||
|
||||
// construct the tar writer
|
||||
w, err := tar.NewWriter(ctx, dag, archive, compression, maybeGzw)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
go func() {
|
||||
// write all the nodes recursively
|
||||
if err := w.WriteNode(nd, filename); err != nil {
|
||||
pipew.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
if err := bufw.Flush(); err != nil {
|
||||
pipew.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
w.Close()
|
||||
pipew.Close() // everything seems to be ok.
|
||||
}()
|
||||
}
|
||||
|
||||
return piper, nil
|
||||
}
|
||||
@ -2,8 +2,6 @@ package tar
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bufio"
|
||||
"compress/gzip"
|
||||
"io"
|
||||
"path"
|
||||
"time"
|
||||
@ -17,73 +15,6 @@ import (
|
||||
upb "github.com/ipfs/go-ipfs/unixfs/pb"
|
||||
)
|
||||
|
||||
// DefaultBufSize is the buffer size for gets. for now, 1MB, which is ~4 blocks.
|
||||
// TODO: does this need to be configurable?
|
||||
var DefaultBufSize = 1048576
|
||||
|
||||
// DagArchive is equivalent to `ipfs getdag $hash | maybe_tar | maybe_gzip`
|
||||
func DagArchive(ctx cxt.Context, nd *mdag.Node, name string, dag mdag.DAGService, archive bool, compression int) (io.Reader, error) {
|
||||
|
||||
_, filename := path.Split(name)
|
||||
|
||||
// need to connect a writer to a reader
|
||||
piper, pipew := io.Pipe()
|
||||
|
||||
// use a buffered writer to parallelize task
|
||||
bufw := bufio.NewWriterSize(pipew, DefaultBufSize)
|
||||
|
||||
// compression determines whether to use gzip compression.
|
||||
var maybeGzw io.Writer
|
||||
if compression != gzip.NoCompression {
|
||||
var err error
|
||||
maybeGzw, err = gzip.NewWriterLevel(bufw, compression)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
maybeGzw = bufw
|
||||
}
|
||||
|
||||
// construct the tar writer
|
||||
w, err := NewWriter(ctx, dag, archive, compression, maybeGzw)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// write all the nodes recursively
|
||||
go func() {
|
||||
if !archive && compression != gzip.NoCompression {
|
||||
// the case when the node is a file
|
||||
dagr, err := uio.NewDagReader(w.ctx, nd, w.Dag)
|
||||
if err != nil {
|
||||
pipew.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
|
||||
if _, err := dagr.WriteTo(maybeGzw); err != nil {
|
||||
pipew.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
// the case for 1. archive, and 2. not archived and not compressed, in which tar is used anyway as a transport format
|
||||
if err := w.WriteNode(nd, filename); err != nil {
|
||||
pipew.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if err := bufw.Flush(); err != nil {
|
||||
pipew.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
|
||||
w.Close()
|
||||
pipew.Close() // everything seems to be ok.
|
||||
}()
|
||||
|
||||
return piper, nil
|
||||
}
|
||||
|
||||
// Writer is a utility structure that helps to write
|
||||
// unixfs merkledag nodes as a tar archive format.
|
||||
// It wraps any io.Writer.
|
||||
Reference in New Issue
Block a user