mirror of
https://github.com/ipfs/kubo.git
synced 2025-07-09 11:44:54 +08:00
core/commands/unixfs/ls: Hash-map for Objects
Discussion with Juan on IRC ([1] through [2]) lead to this adjusted JSON output. Benefits over the old output include: * deduplication (we only check the children of a given Merkle node once, even if multiple arguments resolve to that hash) * alphabetized output (like POSIX's ls). As a side-effect of this change, I'm also matching GNU Coreutils' ls output (maybe in POSIX?) by printing an alphabetized list of non-directories (one per line) first, with alphabetized directory lists afterwards. [1]: https://botbot.me/freenode/ipfs/2015-06-12/?msg=41725570&page=5 [2]: https://botbot.me/freenode/ipfs/2015-06-12/?msg=41726547&page=5 License: MIT Signed-off-by: W. Trevor King <wking@tremily.us>
This commit is contained in:
@ -4,6 +4,7 @@ import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
@ -23,12 +24,12 @@ type LsLink struct {
|
||||
}
|
||||
|
||||
type LsObject struct {
|
||||
Argument string
|
||||
Links []LsLink
|
||||
Links []LsLink
|
||||
}
|
||||
|
||||
type LsOutput struct {
|
||||
Objects []*LsObject
|
||||
Arguments map[string]string
|
||||
Objects map[string]*LsObject
|
||||
}
|
||||
|
||||
var LsCmd = &cmds.Command{
|
||||
@ -57,8 +58,12 @@ directories, the child size is the IPFS link size.
|
||||
|
||||
paths := req.Arguments()
|
||||
|
||||
output := make([]*LsObject, len(paths))
|
||||
for i, fpath := range paths {
|
||||
output := LsOutput{
|
||||
Arguments: map[string]string{},
|
||||
Objects: map[string]*LsObject{},
|
||||
}
|
||||
|
||||
for _, fpath := range paths {
|
||||
ctx := req.Context().Context
|
||||
merkleNode, err := core.Resolve(ctx, node, path.Path(fpath))
|
||||
if err != nil {
|
||||
@ -66,13 +71,27 @@ directories, the child size is the IPFS link size.
|
||||
return
|
||||
}
|
||||
|
||||
unixFSNode, err := unixfs.FromBytes(merkleNode.Data)
|
||||
key, err := merkleNode.Key()
|
||||
if err != nil {
|
||||
res.SetError(err, cmds.ErrNormal)
|
||||
return
|
||||
}
|
||||
|
||||
output[i] = &LsObject{Argument: fpath}
|
||||
hash := key.B58String()
|
||||
output.Arguments[fpath] = hash
|
||||
|
||||
if _, ok := output.Objects[hash]; ok {
|
||||
// duplicate argument for an already-listed node
|
||||
continue
|
||||
}
|
||||
|
||||
output.Objects[hash] = &LsObject{}
|
||||
|
||||
unixFSNode, err := unixfs.FromBytes(merkleNode.Data)
|
||||
if err != nil {
|
||||
res.SetError(err, cmds.ErrNormal)
|
||||
return
|
||||
}
|
||||
|
||||
t := unixFSNode.GetType()
|
||||
switch t {
|
||||
@ -85,15 +104,16 @@ directories, the child size is the IPFS link size.
|
||||
res.SetError(err, cmds.ErrNormal)
|
||||
return
|
||||
}
|
||||
output[i].Links = []LsLink{LsLink{
|
||||
output.Objects[hash].Links = []LsLink{LsLink{
|
||||
Name: fpath,
|
||||
Hash: key.String(),
|
||||
Type: t.String(),
|
||||
Size: unixFSNode.GetFilesize(),
|
||||
}}
|
||||
case unixfspb.Data_Directory:
|
||||
output[i].Links = make([]LsLink, len(merkleNode.Links))
|
||||
for j, link := range merkleNode.Links {
|
||||
links := make([]LsLink, len(merkleNode.Links))
|
||||
output.Objects[hash].Links = links
|
||||
for i, link := range merkleNode.Links {
|
||||
getCtx, cancel := context.WithTimeout(ctx, time.Minute)
|
||||
defer cancel()
|
||||
link.Node, err = link.GetNode(getCtx, node.DAG)
|
||||
@ -117,12 +137,12 @@ directories, the child size is the IPFS link size.
|
||||
} else {
|
||||
lsLink.Size = link.Size
|
||||
}
|
||||
output[i].Links[j] = lsLink
|
||||
links[i] = lsLink
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
res.SetOutput(&LsOutput{Objects: output})
|
||||
res.SetOutput(&output)
|
||||
},
|
||||
Marshalers: cmds.MarshalerMap{
|
||||
cmds.Text: func(res cmds.Response) (io.Reader, error) {
|
||||
@ -130,21 +150,44 @@ directories, the child size is the IPFS link size.
|
||||
output := res.Output().(*LsOutput)
|
||||
buf := new(bytes.Buffer)
|
||||
w := tabwriter.NewWriter(buf, 1, 2, 1, ' ', 0)
|
||||
lastObjectDirHeader := false
|
||||
for i, object := range output.Objects {
|
||||
singleObject := (len(object.Links) == 1 &&
|
||||
object.Links[0].Name == object.Argument)
|
||||
if len(output.Objects) > 1 && !singleObject {
|
||||
if i > 0 {
|
||||
fmt.Fprintln(w)
|
||||
}
|
||||
fmt.Fprintf(w, "%s:\n", object.Argument)
|
||||
lastObjectDirHeader = true
|
||||
|
||||
nonDirectories := []string{}
|
||||
directories := []string{}
|
||||
for argument, hash := range output.Arguments {
|
||||
object, ok := output.Objects[hash]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unresolved hash: %s", hash)
|
||||
}
|
||||
|
||||
if len(object.Links) == 1 && object.Links[0].Hash == hash {
|
||||
nonDirectories = append(nonDirectories, argument)
|
||||
} else {
|
||||
if lastObjectDirHeader {
|
||||
fmt.Fprintln(w)
|
||||
directories = append(directories, argument)
|
||||
}
|
||||
}
|
||||
sort.Strings(nonDirectories)
|
||||
sort.Strings(directories)
|
||||
|
||||
for _, argument := range nonDirectories {
|
||||
fmt.Fprintf(w, "%s\n", argument)
|
||||
}
|
||||
|
||||
seen := map[string]bool{}
|
||||
for i, argument := range directories {
|
||||
hash := output.Arguments[argument]
|
||||
if _, ok := seen[hash]; ok {
|
||||
continue
|
||||
}
|
||||
seen[hash] = true
|
||||
|
||||
object := output.Objects[hash]
|
||||
if i > 0 || len(nonDirectories) > 0 {
|
||||
fmt.Fprintln(w)
|
||||
}
|
||||
for _, arg := range directories[i:] {
|
||||
if output.Arguments[arg] == hash {
|
||||
fmt.Fprintf(w, "%s:\n", arg)
|
||||
}
|
||||
lastObjectDirHeader = false
|
||||
}
|
||||
for _, link := range object.Links {
|
||||
fmt.Fprintf(w, "%s\n", link.Name)
|
||||
|
@ -44,12 +44,6 @@ test_ls_cmd() {
|
||||
|
||||
test_expect_success "'ipfs file ls <three dir hashes>' output looks good" '
|
||||
cat <<-\EOF >expected_ls &&
|
||||
QmfNy183bXiRVyrhyWtq3TwHn79yHEkiAGFr18P7YNzESj:
|
||||
d1
|
||||
d2
|
||||
f1
|
||||
f2
|
||||
|
||||
QmR3jhV4XpxxPjPT3Y8vNnWvWNvakdcT3H6vqpRBsX1MLy:
|
||||
1024
|
||||
a
|
||||
@ -57,6 +51,12 @@ test_ls_cmd() {
|
||||
QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss:
|
||||
128
|
||||
a
|
||||
|
||||
QmfNy183bXiRVyrhyWtq3TwHn79yHEkiAGFr18P7YNzESj:
|
||||
d1
|
||||
d2
|
||||
f1
|
||||
f2
|
||||
EOF
|
||||
test_cmp expected_ls actual_ls
|
||||
'
|
||||
@ -73,6 +73,23 @@ test_ls_cmd() {
|
||||
test_cmp expected_ls_file actual_ls_file
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs file ls <duplicates>' succeeds" '
|
||||
ipfs file ls /ipfs/QmfNy183bXiRVyrhyWtq3TwHn79yHEkiAGFr18P7YNzESj/d1 /ipfs/QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss /ipfs/QmR3jhV4XpxxPjPT3Y8vNnWvWNvakdcT3H6vqpRBsX1MLy/1024 /ipfs/QmbQBUSRL9raZtNXfpTDeaxQapibJEG6qEY8WqAN22aUzd >actual_ls_duplicates_file
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs file ls <duplicates>' output looks good" '
|
||||
cat <<-\EOF >expected_ls_duplicates_file &&
|
||||
/ipfs/QmR3jhV4XpxxPjPT3Y8vNnWvWNvakdcT3H6vqpRBsX1MLy/1024
|
||||
/ipfs/QmbQBUSRL9raZtNXfpTDeaxQapibJEG6qEY8WqAN22aUzd
|
||||
|
||||
/ipfs/QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss:
|
||||
/ipfs/QmfNy183bXiRVyrhyWtq3TwHn79yHEkiAGFr18P7YNzESj/d1:
|
||||
128
|
||||
a
|
||||
EOF
|
||||
test_cmp expected_ls_duplicates_file actual_ls_duplicates_file
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs --encoding=json file ls <file hashes>' succeeds" '
|
||||
ipfs --encoding=json file ls /ipfs/QmR3jhV4XpxxPjPT3Y8vNnWvWNvakdcT3H6vqpRBsX1MLy/1024 >actual_json_ls_file
|
||||
'
|
||||
@ -80,9 +97,11 @@ test_ls_cmd() {
|
||||
test_expect_success "'ipfs --encoding=json file ls <file hashes>' output looks good" '
|
||||
cat <<-\EOF >expected_json_ls_file_trailing_newline &&
|
||||
{
|
||||
"Objects": [
|
||||
{
|
||||
"Argument": "/ipfs/QmR3jhV4XpxxPjPT3Y8vNnWvWNvakdcT3H6vqpRBsX1MLy/1024",
|
||||
"Arguments": {
|
||||
"/ipfs/QmR3jhV4XpxxPjPT3Y8vNnWvWNvakdcT3H6vqpRBsX1MLy/1024": "QmbQBUSRL9raZtNXfpTDeaxQapibJEG6qEY8WqAN22aUzd"
|
||||
},
|
||||
"Objects": {
|
||||
"QmbQBUSRL9raZtNXfpTDeaxQapibJEG6qEY8WqAN22aUzd": {
|
||||
"Links": [
|
||||
{
|
||||
"Name": "/ipfs/QmR3jhV4XpxxPjPT3Y8vNnWvWNvakdcT3H6vqpRBsX1MLy/1024",
|
||||
@ -92,12 +111,59 @@ test_ls_cmd() {
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
EOF
|
||||
printf %s "$(cat expected_json_ls_file_trailing_newline)" >expected_json_ls_file &&
|
||||
test_cmp expected_json_ls_file actual_json_ls_file
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs --encoding=json file ls <duplicates>' succeeds" '
|
||||
ipfs --encoding=json file ls /ipfs/QmfNy183bXiRVyrhyWtq3TwHn79yHEkiAGFr18P7YNzESj/d1 /ipfs/QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss /ipfs/QmR3jhV4XpxxPjPT3Y8vNnWvWNvakdcT3H6vqpRBsX1MLy/1024 /ipfs/QmbQBUSRL9raZtNXfpTDeaxQapibJEG6qEY8WqAN22aUzd >actual_json_ls_duplicates_file
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs --encoding=json file ls <duplicates>' output looks good" '
|
||||
cat <<-\EOF >expected_json_ls_duplicates_file_trailing_newline &&
|
||||
{
|
||||
"Arguments": {
|
||||
"/ipfs/QmR3jhV4XpxxPjPT3Y8vNnWvWNvakdcT3H6vqpRBsX1MLy/1024": "QmbQBUSRL9raZtNXfpTDeaxQapibJEG6qEY8WqAN22aUzd",
|
||||
"/ipfs/QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss": "QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss",
|
||||
"/ipfs/QmbQBUSRL9raZtNXfpTDeaxQapibJEG6qEY8WqAN22aUzd": "QmbQBUSRL9raZtNXfpTDeaxQapibJEG6qEY8WqAN22aUzd",
|
||||
"/ipfs/QmfNy183bXiRVyrhyWtq3TwHn79yHEkiAGFr18P7YNzESj/d1": "QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss"
|
||||
},
|
||||
"Objects": {
|
||||
"QmSix55yz8CzWXf5ZVM9vgEvijnEeeXiTSarVtsqiiCJss": {
|
||||
"Links": [
|
||||
{
|
||||
"Name": "128",
|
||||
"Hash": "QmQNd6ubRXaNG6Prov8o6vk3bn6eWsj9FxLGrAVDUAGkGe",
|
||||
"Size": 128,
|
||||
"Type": "File"
|
||||
},
|
||||
{
|
||||
"Name": "a",
|
||||
"Hash": "QmZULkCELmmk5XNfCgTnCyFgAVxBRBXyDHGGMVoLFLiXEN",
|
||||
"Size": 6,
|
||||
"Type": "File"
|
||||
}
|
||||
]
|
||||
},
|
||||
"QmbQBUSRL9raZtNXfpTDeaxQapibJEG6qEY8WqAN22aUzd": {
|
||||
"Links": [
|
||||
{
|
||||
"Name": "/ipfs/QmR3jhV4XpxxPjPT3Y8vNnWvWNvakdcT3H6vqpRBsX1MLy/1024",
|
||||
"Hash": "QmbQBUSRL9raZtNXfpTDeaxQapibJEG6qEY8WqAN22aUzd",
|
||||
"Size": 1024,
|
||||
"Type": "File"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
EOF
|
||||
printf %s "$(cat expected_json_ls_duplicates_file_trailing_newline)" >expected_json_ls_duplicates_file &&
|
||||
test_cmp expected_json_ls_duplicates_file actual_json_ls_duplicates_file
|
||||
'
|
||||
}
|
||||
|
||||
|
||||
|
Reference in New Issue
Block a user