mirror of
https://github.com/ipfs/kubo.git
synced 2025-06-29 17:36:38 +08:00
Merge pull request #4873 from ipfs/fix/4871
make the tar writer handle sharded ipfs directories
This commit is contained in:
@ -137,6 +137,10 @@ possible, please use 'ipfs ls' instead.
|
|||||||
switch t {
|
switch t {
|
||||||
case unixfspb.Data_File:
|
case unixfspb.Data_File:
|
||||||
break
|
break
|
||||||
|
case unixfspb.Data_HAMTShard:
|
||||||
|
// We need a streaming ls API for this.
|
||||||
|
res.SetError(fmt.Errorf("cannot list large directories yet"), cmdkit.ErrNormal)
|
||||||
|
return
|
||||||
case unixfspb.Data_Directory:
|
case unixfspb.Data_Directory:
|
||||||
links := make([]LsLink, len(merkleNode.Links()))
|
links := make([]LsLink, len(merkleNode.Links()))
|
||||||
output.Objects[hash].Links = links
|
output.Objects[hash].Links = links
|
||||||
|
@ -4,7 +4,7 @@
|
|||||||
# MIT Licensed; see the LICENSE file in this repository.
|
# MIT Licensed; see the LICENSE file in this repository.
|
||||||
#
|
#
|
||||||
|
|
||||||
test_description="Test global enable sharding flag"
|
test_description="Test directory sharding"
|
||||||
|
|
||||||
. lib/test-lib.sh
|
. lib/test-lib.sh
|
||||||
|
|
||||||
@ -23,6 +23,10 @@ test_add_large_dir() {
|
|||||||
echo "$exphash" > sharddir_exp &&
|
echo "$exphash" > sharddir_exp &&
|
||||||
test_cmp sharddir_exp sharddir_out
|
test_cmp sharddir_exp sharddir_out
|
||||||
'
|
'
|
||||||
|
test_expect_success "ipfs get on very large directory succeeds" '
|
||||||
|
ipfs get -o testdata-out "$exphash" &&
|
||||||
|
test_cmp testdata testdata-out
|
||||||
|
'
|
||||||
}
|
}
|
||||||
|
|
||||||
test_init_ipfs
|
test_init_ipfs
|
@ -39,23 +39,22 @@ func NewWriter(ctx context.Context, dag ipld.DAGService, archive bool, compressi
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (w *Writer) writeDir(nd *mdag.ProtoNode, fpath string) error {
|
func (w *Writer) writeDir(nd *mdag.ProtoNode, fpath string) error {
|
||||||
|
dir, err := uio.NewDirectoryFromNode(w.Dag, nd)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
if err := writeDirHeader(w.TarW, fpath); err != nil {
|
if err := writeDirHeader(w.TarW, fpath); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, ng := range ipld.GetDAG(w.ctx, w.Dag, nd) {
|
return dir.ForEachLink(w.ctx, func(l *ipld.Link) error {
|
||||||
child, err := ng.Get(w.ctx)
|
child, err := w.Dag.Get(w.ctx, l.Cid)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
npath := path.Join(fpath, l.Name)
|
||||||
npath := path.Join(fpath, nd.Links()[i].Name)
|
return w.WriteNode(child, npath)
|
||||||
if err := w.WriteNode(child, npath); err != nil {
|
})
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *Writer) writeFile(nd *mdag.ProtoNode, pb *upb.Data, fpath string) error {
|
func (w *Writer) writeFile(nd *mdag.ProtoNode, pb *upb.Data, fpath string) error {
|
||||||
@ -83,7 +82,7 @@ func (w *Writer) WriteNode(nd ipld.Node, fpath string) error {
|
|||||||
switch pb.GetType() {
|
switch pb.GetType() {
|
||||||
case upb.Data_Metadata:
|
case upb.Data_Metadata:
|
||||||
fallthrough
|
fallthrough
|
||||||
case upb.Data_Directory:
|
case upb.Data_Directory, upb.Data_HAMTShard:
|
||||||
return w.writeDir(nd, fpath)
|
return w.writeDir(nd, fpath)
|
||||||
case upb.Data_Raw:
|
case upb.Data_Raw:
|
||||||
fallthrough
|
fallthrough
|
||||||
|
@ -112,7 +112,7 @@ func (dr *PBDagReader) precalcNextBuf(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
switch pb.GetType() {
|
switch pb.GetType() {
|
||||||
case ftpb.Data_Directory:
|
case ftpb.Data_Directory, ftpb.Data_HAMTShard:
|
||||||
// A directory should not exist within a file
|
// A directory should not exist within a file
|
||||||
return ft.ErrInvalidDirLocation
|
return ft.ErrInvalidDirLocation
|
||||||
case ftpb.Data_File:
|
case ftpb.Data_File:
|
||||||
|
Reference in New Issue
Block a user