1
0
mirror of https://github.com/ipfs/kubo.git synced 2025-10-25 18:36:26 +08:00

core/commands/unixfs/ls: Don't recurse into chunked files

Folks operating at the Unix-filesystem level shouldn't care about that
level of Merkle-DAG detail.  Before this commit we had:

  $ ipfs unixfs ls /ipfs/QmSRCHG21Sbqm3EJG9aEBo4vS7Fqu86pAjqf99MyCdNxZ4/busybox
  /ipfs/QmSRCHG21Sbqm3EJG9aEBo4vS7Fqu86pAjqf99MyCdNxZ4/busybox:
  ... several lines of empty-string names ...

And with this commit we have:

  $ ipfs unixfs ls /ipfs/QmSRCHG21Sbqm3EJG9aEBo4vS7Fqu86pAjqf99MyCdNxZ4/busybox
  /ipfs/QmSRCHG21Sbqm3EJG9aEBo4vS7Fqu86pAjqf99MyCdNxZ4/busybox

I also reworked the argument-prefixing (object.Argument) in the output
marshaller to avoid redundancies like:

  $ ipfs unixfs ls /ipfs/QmSRCHG21Sbqm3EJG9aEBo4vS7Fqu86pAjqf99MyCdNxZ4/busybox
  /ipfs/QmSRCHG21Sbqm3EJG9aEBo4vS7Fqu86pAjqf99MyCdNxZ4/busybox:
  /ipfs/QmSRCHG21Sbqm3EJG9aEBo4vS7Fqu86pAjqf99MyCdNxZ4/busybox

As a side-effect of this rework, we no longer have the trailing blank
line that we used to have after the final directory listing.

The new ErrImplementation is like Python's NotImplementedError, and is
mostly a way to guard against external changes that would need
associated updates in this code.  For example, once we see something
that's neither a file nor a directory, we'll have to update the switch
statement to handle those objects.

License: MIT
Signed-off-by: W. Trevor King <wking@tremily.us>
This commit is contained in:
W. Trevor King
2015-06-09 14:06:33 -07:00
parent 434871ba18
commit 663f37cb99
3 changed files with 76 additions and 31 deletions

View File

@ -15,8 +15,9 @@ type ErrorType uint
// ErrorTypes convey what category of error ocurred // ErrorTypes convey what category of error ocurred
const ( const (
ErrNormal ErrorType = iota // general errors ErrNormal ErrorType = iota // general errors
ErrClient // error was caused by the client, (e.g. invalid CLI usage) ErrClient // error was caused by the client, (e.g. invalid CLI usage)
ErrImplementation // programmer error in the server
// TODO: add more types of errors for better error-specific handling // TODO: add more types of errors for better error-specific handling
) )

View File

@ -59,40 +59,66 @@ directories, the child size is the IPFS link size.
output := make([]*LsObject, len(paths)) output := make([]*LsObject, len(paths))
for i, fpath := range paths { for i, fpath := range paths {
dagnode, err := core.Resolve(req.Context().Context, node, path.Path(fpath)) ctx := req.Context().Context
merkleNode, err := core.Resolve(ctx, node, path.Path(fpath))
if err != nil { if err != nil {
res.SetError(err, cmds.ErrNormal) res.SetError(err, cmds.ErrNormal)
return return
} }
output[i] = &LsObject{ unixFSNode, err := unixfs.FromBytes(merkleNode.Data)
Argument: fpath, if err != nil {
Links: make([]LsLink, len(dagnode.Links)), res.SetError(err, cmds.ErrNormal)
return
} }
for j, link := range dagnode.Links {
ctx, cancel := context.WithTimeout(context.TODO(), time.Minute) output[i] = &LsObject{}
defer cancel()
link.Node, err = link.GetNode(ctx, node.DAG) t := unixFSNode.GetType()
switch t {
default:
res.SetError(fmt.Errorf("unrecognized type: %s", t), cmds.ErrImplementation)
return
case unixfspb.Data_File:
key, err := merkleNode.Key()
if err != nil { if err != nil {
res.SetError(err, cmds.ErrNormal) res.SetError(err, cmds.ErrNormal)
return return
} }
d, err := unixfs.FromBytes(link.Node.Data) output[i].Links = []LsLink{LsLink{
if err != nil { Name: fpath,
res.SetError(err, cmds.ErrNormal) Hash: key.String(),
return Type: t,
Size: unixFSNode.GetFilesize(),
}}
case unixfspb.Data_Directory:
output[i].Argument = fpath
output[i].Links = make([]LsLink, len(merkleNode.Links))
for j, link := range merkleNode.Links {
getCtx, cancel := context.WithTimeout(context.TODO(), time.Minute)
defer cancel()
link.Node, err = link.GetNode(getCtx, node.DAG)
if err != nil {
res.SetError(err, cmds.ErrNormal)
return
}
d, err := unixfs.FromBytes(link.Node.Data)
if err != nil {
res.SetError(err, cmds.ErrNormal)
return
}
lsLink := LsLink{
Name: link.Name,
Hash: link.Hash.B58String(),
Type: d.GetType(),
}
if lsLink.Type == unixfspb.Data_File {
lsLink.Size = d.GetFilesize()
} else {
lsLink.Size = link.Size
}
output[i].Links[j] = lsLink
} }
lsLink := LsLink{
Name: link.Name,
Hash: link.Hash.B58String(),
Type: d.GetType(),
}
if lsLink.Type == unixfspb.Data_File {
lsLink.Size = d.GetFilesize()
} else {
lsLink.Size = link.Size
}
output[i].Links[j] = lsLink
} }
} }
@ -104,16 +130,23 @@ directories, the child size is the IPFS link size.
output := res.Output().(*LsOutput) output := res.Output().(*LsOutput)
buf := new(bytes.Buffer) buf := new(bytes.Buffer)
w := tabwriter.NewWriter(buf, 1, 2, 1, ' ', 0) w := tabwriter.NewWriter(buf, 1, 2, 1, ' ', 0)
for _, object := range output.Objects { lastObjectDirHeader := false
if len(output.Objects) > 1 { for i, object := range output.Objects {
if len(output.Objects) > 1 && object.Argument != "" {
if i > 0 {
fmt.Fprintln(w)
}
fmt.Fprintf(w, "%s:\n", object.Argument) fmt.Fprintf(w, "%s:\n", object.Argument)
lastObjectDirHeader = true
} else {
if lastObjectDirHeader {
fmt.Fprintln(w)
}
lastObjectDirHeader = false
} }
for _, link := range object.Links { for _, link := range object.Links {
fmt.Fprintf(w, "%s\n", link.Name) fmt.Fprintf(w, "%s\n", link.Name)
} }
if len(output.Objects) > 1 {
fmt.Fprintln(w)
}
} }
w.Flush() w.Flush()

View File

@ -57,10 +57,21 @@ test_ls_cmd() {
QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss: QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss:
128 128
a a
EOF EOF
test_cmp expected_ls actual_ls test_cmp expected_ls actual_ls
' '
test_expect_success "'ipfs unixfs ls <file hashes>' succeeds" '
ipfs unixfs ls /ipfs/QmR3jhV4XpxxPjPT3Y8vNnWvWNvakdcT3H6vqpRBsX1MLy/1024 QmQNd6ubRXaNG6Prov8o6vk3bn6eWsj9FxLGrAVDUAGkGe >actual_ls_file
'
test_expect_success "'ipfs unixfs ls <file hashes>' output looks good" '
cat <<-\EOF >expected_ls_file &&
/ipfs/QmR3jhV4XpxxPjPT3Y8vNnWvWNvakdcT3H6vqpRBsX1MLy/1024
QmQNd6ubRXaNG6Prov8o6vk3bn6eWsj9FxLGrAVDUAGkGe
EOF
test_cmp expected_ls_file actual_ls_file
'
} }