mirror of
https://github.com/ipfs/kubo.git
synced 2025-06-29 09:34:03 +08:00
Merge pull request #3938 from ipfs/kevina/filestore-verify-sort
filestore: add "--file-order" option to "filestore ls" and "verify"
This commit is contained in:
@ -40,6 +40,9 @@ The output is:
|
||||
Arguments: []cmds.Argument{
|
||||
cmds.StringArg("obj", false, true, "Cid of objects to list."),
|
||||
},
|
||||
Options: []cmds.Option{
|
||||
cmds.BoolOption("file-order", "sort the results based on the path of the backing file"),
|
||||
},
|
||||
Run: func(req cmds.Request, res cmds.Response) {
|
||||
_, fs, err := getFilestore(req)
|
||||
if err != nil {
|
||||
@ -53,7 +56,8 @@ The output is:
|
||||
}, req.Context())
|
||||
res.SetOutput(out)
|
||||
} else {
|
||||
next, err := filestore.ListAll(fs)
|
||||
fileOrder, _, _ := req.Option("file-order").Bool()
|
||||
next, err := filestore.ListAll(fs, fileOrder)
|
||||
if err != nil {
|
||||
res.SetError(err, cmds.ErrNormal)
|
||||
return
|
||||
@ -114,6 +118,9 @@ For ERROR entries the error will also be printed to stderr.
|
||||
Arguments: []cmds.Argument{
|
||||
cmds.StringArg("obj", false, true, "Cid of objects to verify."),
|
||||
},
|
||||
Options: []cmds.Option{
|
||||
cmds.BoolOption("file-order", "verify the objects based on the order of the backing file"),
|
||||
},
|
||||
Run: func(req cmds.Request, res cmds.Response) {
|
||||
_, fs, err := getFilestore(req)
|
||||
if err != nil {
|
||||
@ -127,7 +134,8 @@ For ERROR entries the error will also be printed to stderr.
|
||||
}, req.Context())
|
||||
res.SetOutput(out)
|
||||
} else {
|
||||
next, err := filestore.VerifyAll(fs)
|
||||
fileOrder, _, _ := req.Option("file-order").Bool()
|
||||
next, err := filestore.VerifyAll(fs, fileOrder)
|
||||
if err != nil {
|
||||
res.SetError(err, cmds.ErrNormal)
|
||||
return
|
||||
|
@ -2,6 +2,7 @@ package filestore
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/ipfs/go-ipfs/blocks/blockstore"
|
||||
pb "github.com/ipfs/go-ipfs/filestore/pb"
|
||||
@ -89,7 +90,10 @@ func List(fs *Filestore, key *cid.Cid) *ListRes {
|
||||
// one by one each block in the Filestore's FileManager.
|
||||
// ListAll does not verify that the references are valid or whether
|
||||
// the raw data is accessible. See VerifyAll().
|
||||
func ListAll(fs *Filestore) (func() *ListRes, error) {
|
||||
func ListAll(fs *Filestore, fileOrder bool) (func() *ListRes, error) {
|
||||
if fileOrder {
|
||||
return listAllFileOrder(fs, false)
|
||||
}
|
||||
return listAll(fs, false)
|
||||
}
|
||||
|
||||
@ -105,7 +109,10 @@ func Verify(fs *Filestore, key *cid.Cid) *ListRes {
|
||||
// returns one by one each block in the Filestore's FileManager.
|
||||
// VerifyAll checks that the reference is valid and that the block data
|
||||
// can be read.
|
||||
func VerifyAll(fs *Filestore) (func() *ListRes, error) {
|
||||
func VerifyAll(fs *Filestore, fileOrder bool) (func() *ListRes, error) {
|
||||
if fileOrder {
|
||||
return listAllFileOrder(fs, true)
|
||||
}
|
||||
return listAll(fs, true)
|
||||
}
|
||||
|
||||
@ -158,6 +165,93 @@ func next(qr dsq.Results) (*cid.Cid, *pb.DataObj, error) {
|
||||
return c, dobj, nil
|
||||
}
|
||||
|
||||
func listAllFileOrder(fs *Filestore, verify bool) (func() *ListRes, error) {
|
||||
q := dsq.Query{}
|
||||
qr, err := fs.fm.ds.Query(q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var entries listEntries
|
||||
|
||||
for {
|
||||
v, ok := qr.NextSync()
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
dobj, err := unmarshalDataObj(v.Value)
|
||||
if err != nil {
|
||||
entries = append(entries, &listEntry{
|
||||
dsKey: v.Key,
|
||||
err: err,
|
||||
})
|
||||
} else {
|
||||
entries = append(entries, &listEntry{
|
||||
dsKey: v.Key,
|
||||
filePath: dobj.GetFilePath(),
|
||||
offset: dobj.GetOffset(),
|
||||
size: dobj.GetSize_(),
|
||||
})
|
||||
}
|
||||
}
|
||||
sort.Sort(entries)
|
||||
|
||||
i := 0
|
||||
return func() *ListRes {
|
||||
if i >= len(entries) {
|
||||
return nil
|
||||
}
|
||||
v := entries[i]
|
||||
i++
|
||||
// attempt to convert the datastore key to a CID,
|
||||
// store the error but don't use it yet
|
||||
cid, keyErr := dshelp.DsKeyToCid(ds.RawKey(v.dsKey))
|
||||
// first if they listRes already had an error return that error
|
||||
if v.err != nil {
|
||||
return mkListRes(cid, nil, v.err)
|
||||
}
|
||||
// now reconstruct the DataObj
|
||||
dobj := pb.DataObj{
|
||||
FilePath: &v.filePath,
|
||||
Offset: &v.offset,
|
||||
Size_: &v.size,
|
||||
}
|
||||
// now if we could not convert the datastore key return that
|
||||
// error
|
||||
if keyErr != nil {
|
||||
return mkListRes(cid, &dobj, keyErr)
|
||||
}
|
||||
// finally verify the dataobj if requested
|
||||
var err error
|
||||
if verify {
|
||||
_, err = fs.fm.readDataObj(cid, &dobj)
|
||||
}
|
||||
return mkListRes(cid, &dobj, err)
|
||||
}, nil
|
||||
}
|
||||
|
||||
type listEntry struct {
|
||||
filePath string
|
||||
offset uint64
|
||||
dsKey string
|
||||
size uint64
|
||||
err error
|
||||
}
|
||||
|
||||
type listEntries []*listEntry
|
||||
|
||||
func (l listEntries) Len() int { return len(l) }
|
||||
func (l listEntries) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
|
||||
func (l listEntries) Less(i, j int) bool {
|
||||
if l[i].filePath == l[j].filePath {
|
||||
if l[i].offset == l[j].offset {
|
||||
return l[i].dsKey < l[j].dsKey
|
||||
}
|
||||
return l[i].offset < l[j].offset
|
||||
}
|
||||
return l[i].filePath < l[j].filePath
|
||||
}
|
||||
|
||||
func mkListRes(c *cid.Cid, d *pb.DataObj, err error) *ListRes {
|
||||
status := StatusOk
|
||||
errorMsg := ""
|
||||
|
@ -37,28 +37,32 @@ test_init() {
|
||||
|
||||
EXPHASH="QmRueCuPMYYvdxWz1vWncF7wzCScEx4qasZXo5aVBb1R4V"
|
||||
|
||||
cat <<EOF > ls_expect
|
||||
zb2rhaPkR7ZF9BzSC2BfqbcGivi9QMdauermW9YB6NvS7FZMo 10000 somedir/file2 0
|
||||
zb2rhav4wcdvNXtaKDTWHYAqtUHMEpygT1cxqMsfK7QrDuHxH 262144 somedir/file3 524288
|
||||
cat <<EOF > ls_expect_file_order
|
||||
zb2rhbcZ3aUXYcrbhhDH1JyrpDcpdw1KFJ5Xs5covjnvMpxDR 1000 somedir/file1 0
|
||||
zb2rhaPkR7ZF9BzSC2BfqbcGivi9QMdauermW9YB6NvS7FZMo 10000 somedir/file2 0
|
||||
zb2rhe28UqCDm7TFib7PRyQYEkvuq8iahcXA2AbgaxCLvNhfk 262144 somedir/file3 0
|
||||
zb2rhebtyTTuHKyTbJPnkDUSruU5Uma4DN8t2EkvYZ6fP36mm 262144 somedir/file3 262144
|
||||
zb2rhav4wcdvNXtaKDTWHYAqtUHMEpygT1cxqMsfK7QrDuHxH 262144 somedir/file3 524288
|
||||
zb2rhm9VTrX2mfatggYUk8mHLz78XBxVUTTzLvM2N3d6frdAU 213568 somedir/file3 786432
|
||||
EOF
|
||||
|
||||
sort < ls_expect_file_order > ls_expect_key_order
|
||||
|
||||
FILE1_HASH=zb2rhbcZ3aUXYcrbhhDH1JyrpDcpdw1KFJ5Xs5covjnvMpxDR
|
||||
FILE2_HASH=zb2rhaPkR7ZF9BzSC2BfqbcGivi9QMdauermW9YB6NvS7FZMo
|
||||
FILE3_HASH=QmfE4SDQazxTD7u8VTYs9AJqQL8rrJPUAorLeJXKSZrVf9
|
||||
|
||||
cat <<EOF > verify_expect
|
||||
ok zb2rhaPkR7ZF9BzSC2BfqbcGivi9QMdauermW9YB6NvS7FZMo 10000 somedir/file2 0
|
||||
ok zb2rhav4wcdvNXtaKDTWHYAqtUHMEpygT1cxqMsfK7QrDuHxH 262144 somedir/file3 524288
|
||||
cat <<EOF > verify_expect_file_order
|
||||
ok zb2rhbcZ3aUXYcrbhhDH1JyrpDcpdw1KFJ5Xs5covjnvMpxDR 1000 somedir/file1 0
|
||||
ok zb2rhaPkR7ZF9BzSC2BfqbcGivi9QMdauermW9YB6NvS7FZMo 10000 somedir/file2 0
|
||||
ok zb2rhe28UqCDm7TFib7PRyQYEkvuq8iahcXA2AbgaxCLvNhfk 262144 somedir/file3 0
|
||||
ok zb2rhebtyTTuHKyTbJPnkDUSruU5Uma4DN8t2EkvYZ6fP36mm 262144 somedir/file3 262144
|
||||
ok zb2rhav4wcdvNXtaKDTWHYAqtUHMEpygT1cxqMsfK7QrDuHxH 262144 somedir/file3 524288
|
||||
ok zb2rhm9VTrX2mfatggYUk8mHLz78XBxVUTTzLvM2N3d6frdAU 213568 somedir/file3 786432
|
||||
EOF
|
||||
|
||||
sort < verify_expect_file_order > verify_expect_key_order
|
||||
|
||||
test_filestore_adds() {
|
||||
test_expect_success "nocopy add succeeds" '
|
||||
HASH=$(ipfs add --raw-leaves --nocopy -r -q somedir | tail -n1)
|
||||
@ -70,7 +74,12 @@ test_filestore_adds() {
|
||||
|
||||
test_expect_success "'ipfs filestore ls' output looks good'" '
|
||||
ipfs filestore ls | sort > ls_actual &&
|
||||
test_cmp ls_expect ls_actual
|
||||
test_cmp ls_expect_key_order ls_actual
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs filestore ls --file-order' output looks good'" '
|
||||
ipfs filestore ls --file-order > ls_actual &&
|
||||
test_cmp ls_expect_file_order ls_actual
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs filestore ls HASH' works" '
|
||||
@ -88,13 +97,18 @@ test_filestore_adds() {
|
||||
test_filestore_state() {
|
||||
test_expect_success "ipfs filestore verify' output looks good'" '
|
||||
ipfs filestore verify | LC_ALL=C sort > verify_actual
|
||||
test_cmp verify_expect verify_actual
|
||||
test_cmp verify_expect_key_order verify_actual
|
||||
'
|
||||
}
|
||||
|
||||
test_filestore_verify() {
|
||||
test_filestore_state
|
||||
|
||||
test_expect_success "ipfs filestore verify --file-order' output looks good'" '
|
||||
ipfs filestore verify --file-order > verify_actual
|
||||
test_cmp verify_expect_file_order verify_actual
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs filestore verify HASH' works" '
|
||||
ipfs filestore verify $FILE1_HASH > verify_actual &&
|
||||
grep -q somedir/file1 verify_actual
|
||||
|
Reference in New Issue
Block a user