1
0
mirror of https://github.com/ipfs/kubo.git synced 2025-07-29 23:34:47 +08:00
Files
kubo/unixfs/archive/archive.go
Jeromy 0e312f5caf initial vendoring of libp2p outside of the repo with gx
License: MIT
Signed-off-by: Jeromy <jeromyj@gmail.com>
2016-01-30 09:34:06 -08:00

107 lines
2.6 KiB
Go

package archive
import (
"bufio"
"compress/gzip"
"io"
"path"
cxt "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"
mdag "github.com/ipfs/go-ipfs/merkledag"
tar "github.com/ipfs/go-ipfs/unixfs/archive/tar"
uio "github.com/ipfs/go-ipfs/unixfs/io"
)
// DefaultBufSize is the buffer size for gets. for now, 1MB, which is ~4 blocks.
// TODO: does this need to be configurable?
var DefaultBufSize = 1048576
type identityWriteCloser struct {
w io.Writer
}
func (i *identityWriteCloser) Write(p []byte) (int, error) {
return i.w.Write(p)
}
func (i *identityWriteCloser) Close() error {
return nil
}
// DagArchive is equivalent to `ipfs getdag $hash | maybe_tar | maybe_gzip`
func DagArchive(ctx cxt.Context, nd *mdag.Node, name string, dag mdag.DAGService, archive bool, compression int) (io.Reader, error) {
_, filename := path.Split(name)
// need to connect a writer to a reader
piper, pipew := io.Pipe()
checkErrAndClosePipe := func(err error) bool {
if err != nil {
pipew.CloseWithError(err)
return true
}
return false
}
// use a buffered writer to parallelize task
bufw := bufio.NewWriterSize(pipew, DefaultBufSize)
// compression determines whether to use gzip compression.
maybeGzw, err := newMaybeGzWriter(bufw, compression)
if checkErrAndClosePipe(err) {
return nil, err
}
closeGzwAndPipe := func() {
if err := maybeGzw.Close(); checkErrAndClosePipe(err) {
return
}
if err := bufw.Flush(); checkErrAndClosePipe(err) {
return
}
pipew.Close() // everything seems to be ok.
}
if !archive && compression != gzip.NoCompression {
// the case when the node is a file
dagr, err := uio.NewDagReader(ctx, nd, dag)
if checkErrAndClosePipe(err) {
return nil, err
}
go func() {
if _, err := dagr.WriteTo(maybeGzw); checkErrAndClosePipe(err) {
return
}
closeGzwAndPipe() // everything seems to be ok
}()
} else {
// the case for 1. archive, and 2. not archived and not compressed, in which tar is used anyway as a transport format
// construct the tar writer
w, err := tar.NewWriter(ctx, dag, archive, compression, maybeGzw)
if checkErrAndClosePipe(err) {
return nil, err
}
go func() {
// write all the nodes recursively
if err := w.WriteNode(nd, filename); checkErrAndClosePipe(err) {
return
}
w.Close() // close tar writer
closeGzwAndPipe() // everything seems to be ok
}()
}
return piper, nil
}
func newMaybeGzWriter(w io.Writer, compression int) (io.WriteCloser, error) {
if compression != gzip.NoCompression {
return gzip.NewWriterLevel(w, compression)
}
return &identityWriteCloser{w}, nil
}