mirror of
https://github.com/ipfs/kubo.git
synced 2025-08-06 19:44:01 +08:00

Also change existing 'Node' type to 'ProtoNode' and use that most everywhere for now. As we move forward with the integration we will try and use the Node interface in more places that we're currently using ProtoNode. License: MIT Signed-off-by: Jeromy <why@ipfs.io>
107 lines
2.5 KiB
Go
107 lines
2.5 KiB
Go
package archive
|
|
|
|
import (
|
|
"bufio"
|
|
"compress/gzip"
|
|
"io"
|
|
"path"
|
|
|
|
cxt "context"
|
|
|
|
mdag "github.com/ipfs/go-ipfs/merkledag"
|
|
tar "github.com/ipfs/go-ipfs/unixfs/archive/tar"
|
|
uio "github.com/ipfs/go-ipfs/unixfs/io"
|
|
)
|
|
|
|
// DefaultBufSize is the buffer size for gets. for now, 1MB, which is ~4 blocks.
|
|
// TODO: does this need to be configurable?
|
|
var DefaultBufSize = 1048576
|
|
|
|
type identityWriteCloser struct {
|
|
w io.Writer
|
|
}
|
|
|
|
func (i *identityWriteCloser) Write(p []byte) (int, error) {
|
|
return i.w.Write(p)
|
|
}
|
|
|
|
func (i *identityWriteCloser) Close() error {
|
|
return nil
|
|
}
|
|
|
|
// DagArchive is equivalent to `ipfs getdag $hash | maybe_tar | maybe_gzip`
|
|
func DagArchive(ctx cxt.Context, nd *mdag.ProtoNode, name string, dag mdag.DAGService, archive bool, compression int) (io.Reader, error) {
|
|
|
|
_, filename := path.Split(name)
|
|
|
|
// need to connect a writer to a reader
|
|
piper, pipew := io.Pipe()
|
|
checkErrAndClosePipe := func(err error) bool {
|
|
if err != nil {
|
|
pipew.CloseWithError(err)
|
|
return true
|
|
}
|
|
return false
|
|
}
|
|
|
|
// use a buffered writer to parallelize task
|
|
bufw := bufio.NewWriterSize(pipew, DefaultBufSize)
|
|
|
|
// compression determines whether to use gzip compression.
|
|
maybeGzw, err := newMaybeGzWriter(bufw, compression)
|
|
if checkErrAndClosePipe(err) {
|
|
return nil, err
|
|
}
|
|
|
|
closeGzwAndPipe := func() {
|
|
if err := maybeGzw.Close(); checkErrAndClosePipe(err) {
|
|
return
|
|
}
|
|
if err := bufw.Flush(); checkErrAndClosePipe(err) {
|
|
return
|
|
}
|
|
pipew.Close() // everything seems to be ok.
|
|
}
|
|
|
|
if !archive && compression != gzip.NoCompression {
|
|
// the case when the node is a file
|
|
dagr, err := uio.NewDagReader(ctx, nd, dag)
|
|
if checkErrAndClosePipe(err) {
|
|
return nil, err
|
|
}
|
|
|
|
go func() {
|
|
if _, err := dagr.WriteTo(maybeGzw); checkErrAndClosePipe(err) {
|
|
return
|
|
}
|
|
closeGzwAndPipe() // everything seems to be ok
|
|
}()
|
|
} else {
|
|
// the case for 1. archive, and 2. not archived and not compressed, in which tar is used anyway as a transport format
|
|
|
|
// construct the tar writer
|
|
w, err := tar.NewWriter(ctx, dag, archive, compression, maybeGzw)
|
|
if checkErrAndClosePipe(err) {
|
|
return nil, err
|
|
}
|
|
|
|
go func() {
|
|
// write all the nodes recursively
|
|
if err := w.WriteNode(nd, filename); checkErrAndClosePipe(err) {
|
|
return
|
|
}
|
|
w.Close() // close tar writer
|
|
closeGzwAndPipe() // everything seems to be ok
|
|
}()
|
|
}
|
|
|
|
return piper, nil
|
|
}
|
|
|
|
func newMaybeGzWriter(w io.Writer, compression int) (io.WriteCloser, error) {
|
|
if compression != gzip.NoCompression {
|
|
return gzip.NewWriterLevel(w, compression)
|
|
}
|
|
return &identityWriteCloser{w}, nil
|
|
}
|