mirror of
https://github.com/ipfs/kubo.git
synced 2025-06-26 23:53:19 +08:00
Merge pull request #1565 from ipfs/add-patch
cmds/add: use dagutils.Editor, like patch
This commit is contained in:
@ -4,6 +4,7 @@ import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
@ -47,7 +48,7 @@ func Parse(input []string, stdin *os.File, root *cmds.Command) (cmds.Request, *c
|
||||
}
|
||||
req.SetArguments(stringArgs)
|
||||
|
||||
file := files.NewSliceFile("", fileArgs)
|
||||
file := files.NewSliceFile("", "", fileArgs)
|
||||
req.SetFiles(file)
|
||||
|
||||
err = cmd.CheckArguments(req)
|
||||
@ -341,9 +342,17 @@ func appendStdinAsString(args []string, stdin *os.File) ([]string, *os.File, err
|
||||
}
|
||||
|
||||
func appendFile(args []files.File, inputs []string, argDef *cmds.Argument, recursive bool) ([]files.File, []string, error) {
|
||||
path := inputs[0]
|
||||
fpath := inputs[0]
|
||||
|
||||
file, err := os.Open(path)
|
||||
if fpath == "." {
|
||||
cwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
fpath = cwd
|
||||
}
|
||||
|
||||
file, err := os.Open(fpath)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@ -356,26 +365,25 @@ func appendFile(args []files.File, inputs []string, argDef *cmds.Argument, recur
|
||||
if stat.IsDir() {
|
||||
if !argDef.Recursive {
|
||||
err = fmt.Errorf("Invalid path '%s', argument '%s' does not support directories",
|
||||
path, argDef.Name)
|
||||
fpath, argDef.Name)
|
||||
return nil, nil, err
|
||||
}
|
||||
if !recursive {
|
||||
err = fmt.Errorf("'%s' is a directory, use the '-%s' flag to specify directories",
|
||||
path, cmds.RecShort)
|
||||
fpath, cmds.RecShort)
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
arg, err := files.NewSerialFile(path, file)
|
||||
arg, err := files.NewSerialFile(path.Base(fpath), fpath, file)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return append(args, arg), inputs[1:], nil
|
||||
}
|
||||
|
||||
func appendStdinAsFile(args []files.File, stdin *os.File) ([]files.File, *os.File) {
|
||||
arg := files.NewReaderFile("", stdin, nil)
|
||||
arg := files.NewReaderFile("", "", stdin, nil)
|
||||
return append(args, arg), nil
|
||||
}
|
||||
|
||||
|
@ -18,9 +18,12 @@ type File interface {
|
||||
// Files implement ReadCloser, but can only be read from or closed if they are not directories
|
||||
io.ReadCloser
|
||||
|
||||
// FileName returns a full filename path associated with this file
|
||||
// FileName returns a filename path associated with this file
|
||||
FileName() string
|
||||
|
||||
// FullPath returns the full path in the os associated with this file
|
||||
FullPath() string
|
||||
|
||||
// IsDirectory returns true if the File is a directory (and therefore supports calling `NextFile`)
|
||||
// and false if the File is a normal file (and therefor supports calling `Read` and `Close`)
|
||||
IsDirectory() bool
|
||||
|
@ -11,13 +11,13 @@ import (
|
||||
func TestSliceFiles(t *testing.T) {
|
||||
name := "testname"
|
||||
files := []File{
|
||||
NewReaderFile("file.txt", ioutil.NopCloser(strings.NewReader("Some text!\n")), nil),
|
||||
NewReaderFile("beep.txt", ioutil.NopCloser(strings.NewReader("beep")), nil),
|
||||
NewReaderFile("boop.txt", ioutil.NopCloser(strings.NewReader("boop")), nil),
|
||||
NewReaderFile("file.txt", "file.txt", ioutil.NopCloser(strings.NewReader("Some text!\n")), nil),
|
||||
NewReaderFile("beep.txt", "beep.txt", ioutil.NopCloser(strings.NewReader("beep")), nil),
|
||||
NewReaderFile("boop.txt", "boop.txt", ioutil.NopCloser(strings.NewReader("boop")), nil),
|
||||
}
|
||||
buf := make([]byte, 20)
|
||||
|
||||
sf := NewSliceFile(name, files)
|
||||
sf := NewSliceFile(name, name, files)
|
||||
|
||||
if !sf.IsDirectory() {
|
||||
t.Error("SliceFile should always be a directory")
|
||||
@ -55,7 +55,7 @@ func TestSliceFiles(t *testing.T) {
|
||||
|
||||
func TestReaderFiles(t *testing.T) {
|
||||
message := "beep boop"
|
||||
rf := NewReaderFile("file.txt", ioutil.NopCloser(strings.NewReader(message)), nil)
|
||||
rf := NewReaderFile("file.txt", "file.txt", ioutil.NopCloser(strings.NewReader(message)), nil)
|
||||
buf := make([]byte, len(message))
|
||||
|
||||
if rf.IsDirectory() {
|
||||
|
@ -80,6 +80,10 @@ func (f *MultipartFile) FileName() string {
|
||||
return filename
|
||||
}
|
||||
|
||||
func (f *MultipartFile) FullPath() string {
|
||||
return f.FileName()
|
||||
}
|
||||
|
||||
func (f *MultipartFile) Read(p []byte) (int, error) {
|
||||
if f.IsDirectory() {
|
||||
return 0, ErrNotReader
|
||||
|
@ -10,12 +10,13 @@ import (
|
||||
// ReaderFiles are never directories, and can be read from and closed.
|
||||
type ReaderFile struct {
|
||||
filename string
|
||||
fullpath string
|
||||
reader io.ReadCloser
|
||||
stat os.FileInfo
|
||||
}
|
||||
|
||||
func NewReaderFile(filename string, reader io.ReadCloser, stat os.FileInfo) *ReaderFile {
|
||||
return &ReaderFile{filename, reader, stat}
|
||||
func NewReaderFile(filename, path string, reader io.ReadCloser, stat os.FileInfo) *ReaderFile {
|
||||
return &ReaderFile{filename, path, reader, stat}
|
||||
}
|
||||
|
||||
func (f *ReaderFile) IsDirectory() bool {
|
||||
@ -30,6 +31,10 @@ func (f *ReaderFile) FileName() string {
|
||||
return f.filename
|
||||
}
|
||||
|
||||
func (f *ReaderFile) FullPath() string {
|
||||
return f.fullpath
|
||||
}
|
||||
|
||||
func (f *ReaderFile) Read(p []byte) (int, error) {
|
||||
return f.reader.Read(p)
|
||||
}
|
||||
|
@ -18,25 +18,26 @@ func (es sortFIByName) Less(i, j int) bool { return es[i].Name() < es[j].Name()
|
||||
// No more than one file will be opened at a time (directories will advance
|
||||
// to the next file when NextFile() is called).
|
||||
type serialFile struct {
|
||||
name string
|
||||
path string
|
||||
files []os.FileInfo
|
||||
stat os.FileInfo
|
||||
current *os.File
|
||||
}
|
||||
|
||||
func NewSerialFile(path string, file *os.File) (File, error) {
|
||||
func NewSerialFile(name, path string, file *os.File) (File, error) {
|
||||
stat, err := file.Stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newSerialFile(path, file, stat)
|
||||
return newSerialFile(name, path, file, stat)
|
||||
}
|
||||
|
||||
func newSerialFile(path string, file *os.File, stat os.FileInfo) (File, error) {
|
||||
func newSerialFile(name, path string, file *os.File, stat os.FileInfo) (File, error) {
|
||||
// for non-directories, return a ReaderFile
|
||||
if !stat.IsDir() {
|
||||
return &ReaderFile{path, file, stat}, nil
|
||||
return &ReaderFile{name, path, file, stat}, nil
|
||||
}
|
||||
|
||||
// for directories, stat all of the contents first, so we know what files to
|
||||
@ -56,7 +57,7 @@ func newSerialFile(path string, file *os.File, stat os.FileInfo) (File, error) {
|
||||
// make sure contents are sorted so -- repeatably -- we get the same inputs.
|
||||
sort.Sort(sortFIByName(contents))
|
||||
|
||||
return &serialFile{path, contents, stat, nil}, nil
|
||||
return &serialFile{name, path, contents, stat, nil}, nil
|
||||
}
|
||||
|
||||
func (f *serialFile) IsDirectory() bool {
|
||||
@ -81,6 +82,7 @@ func (f *serialFile) NextFile() (File, error) {
|
||||
f.files = f.files[1:]
|
||||
|
||||
// open the next file
|
||||
fileName := fp.Join(f.name, stat.Name())
|
||||
filePath := fp.Join(f.path, stat.Name())
|
||||
file, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
@ -91,10 +93,14 @@ func (f *serialFile) NextFile() (File, error) {
|
||||
// recursively call the constructor on the next file
|
||||
// if it's a regular file, we will open it as a ReaderFile
|
||||
// if it's a directory, files in it will be opened serially
|
||||
return newSerialFile(filePath, file, stat)
|
||||
return newSerialFile(fileName, filePath, file, stat)
|
||||
}
|
||||
|
||||
func (f *serialFile) FileName() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
func (f *serialFile) FullPath() string {
|
||||
return f.path
|
||||
}
|
||||
|
||||
|
@ -10,12 +10,13 @@ import (
|
||||
// SliceFiles are always directories, and can't be read from or closed.
|
||||
type SliceFile struct {
|
||||
filename string
|
||||
path string
|
||||
files []File
|
||||
n int
|
||||
}
|
||||
|
||||
func NewSliceFile(filename string, files []File) *SliceFile {
|
||||
return &SliceFile{filename, files, 0}
|
||||
func NewSliceFile(filename, path string, files []File) *SliceFile {
|
||||
return &SliceFile{filename, path, files, 0}
|
||||
}
|
||||
|
||||
func (f *SliceFile) IsDirectory() bool {
|
||||
@ -35,6 +36,10 @@ func (f *SliceFile) FileName() string {
|
||||
return f.filename
|
||||
}
|
||||
|
||||
func (f *SliceFile) FullPath() string {
|
||||
return f.path
|
||||
}
|
||||
|
||||
func (f *SliceFile) Read(p []byte) (int, error) {
|
||||
return 0, ErrNotReader
|
||||
}
|
||||
|
@ -13,14 +13,14 @@ import (
|
||||
func TestOutput(t *testing.T) {
|
||||
text := "Some text! :)"
|
||||
fileset := []files.File{
|
||||
files.NewReaderFile("file.txt", ioutil.NopCloser(strings.NewReader(text)), nil),
|
||||
files.NewSliceFile("boop", []files.File{
|
||||
files.NewReaderFile("boop/a.txt", ioutil.NopCloser(strings.NewReader("bleep")), nil),
|
||||
files.NewReaderFile("boop/b.txt", ioutil.NopCloser(strings.NewReader("bloop")), nil),
|
||||
files.NewReaderFile("file.txt", "file.txt", ioutil.NopCloser(strings.NewReader(text)), nil),
|
||||
files.NewSliceFile("boop", "boop", []files.File{
|
||||
files.NewReaderFile("boop/a.txt", "boop/a.txt", ioutil.NopCloser(strings.NewReader("bleep")), nil),
|
||||
files.NewReaderFile("boop/b.txt", "boop/b.txt", ioutil.NopCloser(strings.NewReader("bloop")), nil),
|
||||
}),
|
||||
files.NewReaderFile("beep.txt", ioutil.NopCloser(strings.NewReader("beep")), nil),
|
||||
files.NewReaderFile("beep.txt", "beep.txt", ioutil.NopCloser(strings.NewReader("beep")), nil),
|
||||
}
|
||||
sf := files.NewSliceFile("", fileset)
|
||||
sf := files.NewSliceFile("", "", fileset)
|
||||
buf := make([]byte, 20)
|
||||
|
||||
// testing output by reading it with the go stdlib "mime/multipart" Reader
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
"path"
|
||||
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/cheggaaa/pb"
|
||||
cxt "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context"
|
||||
|
||||
cmds "github.com/ipfs/go-ipfs/commands"
|
||||
files "github.com/ipfs/go-ipfs/commands/files"
|
||||
@ -13,6 +14,7 @@ import (
|
||||
importer "github.com/ipfs/go-ipfs/importer"
|
||||
"github.com/ipfs/go-ipfs/importer/chunk"
|
||||
dag "github.com/ipfs/go-ipfs/merkledag"
|
||||
dagutils "github.com/ipfs/go-ipfs/merkledag/utils"
|
||||
pin "github.com/ipfs/go-ipfs/pin"
|
||||
ft "github.com/ipfs/go-ipfs/unixfs"
|
||||
u "github.com/ipfs/go-ipfs/util"
|
||||
@ -102,7 +104,7 @@ remains to be implemented.
|
||||
chunker, _, _ := req.Option(chunkerOptionName).String()
|
||||
|
||||
if hash {
|
||||
nilnode, err := core.NewNodeBuilder().NilRepo().Build(n.Context())
|
||||
nilnode, err := core.NewNodeBuilder().Build(n.Context())
|
||||
if err != nil {
|
||||
res.SetError(err, cmds.ErrNormal)
|
||||
return
|
||||
@ -113,22 +115,37 @@ remains to be implemented.
|
||||
outChan := make(chan interface{}, 8)
|
||||
res.SetOutput((<-chan interface{})(outChan))
|
||||
|
||||
// addSingleFile is a function that adds a file given as a param.
|
||||
addSingleFile := func(file files.File) error {
|
||||
addParams := adder{
|
||||
node: n,
|
||||
out: outChan,
|
||||
progress: progress,
|
||||
hidden: hidden,
|
||||
trickle: trickle,
|
||||
chunker: chunker,
|
||||
}
|
||||
fileAdder := adder{
|
||||
ctx: req.Context(),
|
||||
node: n,
|
||||
editor: dagutils.NewDagEditor(n.DAG, newDirNode()),
|
||||
out: outChan,
|
||||
chunker: chunker,
|
||||
progress: progress,
|
||||
hidden: hidden,
|
||||
trickle: trickle,
|
||||
wrap: wrap,
|
||||
}
|
||||
|
||||
rootnd, err := addParams.addFile(file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// addAllFiles loops over a convenience slice file to
|
||||
// add each file individually. e.g. 'ipfs add a b c'
|
||||
addAllFiles := func(sliceFile files.File) error {
|
||||
for {
|
||||
file, err := sliceFile.NextFile()
|
||||
if err != nil && err != io.EOF {
|
||||
return err
|
||||
}
|
||||
if file == nil {
|
||||
return nil // done
|
||||
}
|
||||
|
||||
if _, err := fileAdder.addFile(file); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pinRoot := func(rootnd *dag.Node) error {
|
||||
rnk, err := rootnd.Key()
|
||||
if err != nil {
|
||||
return err
|
||||
@ -140,37 +157,22 @@ remains to be implemented.
|
||||
return n.Pinning.Flush()
|
||||
}
|
||||
|
||||
// addFilesSeparately loops over a convenience slice file to
|
||||
// add each file individually. e.g. 'ipfs add a b c'
|
||||
addFilesSeparately := func(sliceFile files.File) error {
|
||||
for {
|
||||
file, err := sliceFile.NextFile()
|
||||
if err != nil && err != io.EOF {
|
||||
return err
|
||||
}
|
||||
if file == nil {
|
||||
return nil // done
|
||||
}
|
||||
|
||||
if err := addSingleFile(file); err != nil {
|
||||
return err
|
||||
}
|
||||
addAllAndPin := func(f files.File) error {
|
||||
if err := addAllFiles(f); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rootnd, err := fileAdder.RootNode()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return pinRoot(rootnd)
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer close(outChan)
|
||||
|
||||
// really, we're unrapping, if !wrap, because
|
||||
// req.Files() is already a SliceFile() with all of them,
|
||||
// so can just use that slice as the wrapper.
|
||||
var err error
|
||||
if wrap {
|
||||
err = addSingleFile(req.Files())
|
||||
} else {
|
||||
err = addFilesSeparately(req.Files())
|
||||
}
|
||||
if err != nil {
|
||||
if err := addAllAndPin(req.Files()); err != nil {
|
||||
res.SetError(err, cmds.ErrNormal)
|
||||
return
|
||||
}
|
||||
@ -264,12 +266,17 @@ remains to be implemented.
|
||||
|
||||
// Internal structure for holding the switches passed to the `add` call
|
||||
type adder struct {
|
||||
ctx cxt.Context
|
||||
node *core.IpfsNode
|
||||
editor *dagutils.Editor
|
||||
out chan interface{}
|
||||
progress bool
|
||||
hidden bool
|
||||
trickle bool
|
||||
wrap bool
|
||||
chunker string
|
||||
|
||||
nextUntitled int
|
||||
}
|
||||
|
||||
// Perform the actual add & pin locally, outputting results to reader
|
||||
@ -301,6 +308,40 @@ func add(n *core.IpfsNode, reader io.Reader, useTrickle bool, chunker string) (*
|
||||
return node, nil
|
||||
}
|
||||
|
||||
func (params *adder) RootNode() (*dag.Node, error) {
|
||||
r := params.editor.GetNode()
|
||||
|
||||
// if not wrapping, AND one root file, use that hash as root.
|
||||
if !params.wrap && len(r.Links) == 1 {
|
||||
var err error
|
||||
r, err = r.Links[0].GetNode(params.ctx, params.node.DAG)
|
||||
// no need to output, as we've already done so.
|
||||
return r, err
|
||||
}
|
||||
|
||||
// otherwise need to output, as we have not.
|
||||
err := outputDagnode(params.out, "", r)
|
||||
return r, err
|
||||
}
|
||||
|
||||
func (params *adder) addNode(node *dag.Node, path string) error {
|
||||
// patch it into the root
|
||||
key, err := node.Key()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if path == "" {
|
||||
path = key.Pretty()
|
||||
}
|
||||
|
||||
if err := params.editor.InsertNodeAtPath(params.ctx, path, key, newDirNode); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return outputDagnode(params.out, path, node)
|
||||
}
|
||||
|
||||
// Add the given file while respecting the params.
|
||||
func (params *adder) addFile(file files.File) (*dag.Node, error) {
|
||||
// Check if file is hidden
|
||||
@ -326,11 +367,10 @@ func (params *adder) addFile(file files.File) (*dag.Node, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// patch it into the root
|
||||
log.Infof("adding file: %s", file.FileName())
|
||||
if err := outputDagnode(params.out, file.FileName(), dagnode); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dagnode, nil
|
||||
err = params.addNode(dagnode, file.FileName())
|
||||
return dagnode, err
|
||||
}
|
||||
|
||||
func (params *adder) addDir(file files.File) (*dag.Node, error) {
|
||||
@ -364,8 +404,7 @@ func (params *adder) addDir(file files.File) (*dag.Node, error) {
|
||||
}
|
||||
}
|
||||
|
||||
err := outputDagnode(params.out, file.FileName(), tree)
|
||||
if err != nil {
|
||||
if err := params.addNode(tree, file.FileName()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -431,3 +470,8 @@ func (i *progressReader) Read(p []byte) (int, error) {
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
// TODO: generalize this to more than unix-fs nodes.
|
||||
func newDirNode() *dag.Node {
|
||||
return &dag.Node{Data: ft.FolderPBData()}
|
||||
}
|
||||
|
@ -50,7 +50,7 @@ func AddR(n *core.IpfsNode, root string) (key string, err error) {
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
ff, err := files.NewSerialFile(root, f)
|
||||
ff, err := files.NewSerialFile(root, root, f)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -79,8 +79,8 @@ func AddR(n *core.IpfsNode, root string) (key string, err error) {
|
||||
// Returns the path of the added file ("<dir hash>/filename"), the DAG node of
|
||||
// the directory, and and error if any.
|
||||
func AddWrapped(n *core.IpfsNode, r io.Reader, filename string) (string, *merkledag.Node, error) {
|
||||
file := files.NewReaderFile(filename, ioutil.NopCloser(r), nil)
|
||||
dir := files.NewSliceFile("", []files.File{file})
|
||||
file := files.NewReaderFile(filename, filename, ioutil.NopCloser(r), nil)
|
||||
dir := files.NewSliceFile("", "", []files.File{file})
|
||||
dagnode, err := addDir(n, dir)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
|
@ -35,7 +35,7 @@ test_expect_success "ipfs add succeeds" '
|
||||
|
||||
test_expect_success "ipfs add output looks good" '
|
||||
HASH="QmVr26fY1tKyspEJBniVhqxQeEjhF78XerGiqWAwraVLQH" &&
|
||||
echo "added $HASH mountdir/hello.txt" >expected &&
|
||||
echo "added $HASH hello.txt" >expected &&
|
||||
test_cmp expected actual
|
||||
'
|
||||
|
||||
@ -116,7 +116,7 @@ test_expect_success "'ipfs add' with stdin input succeeds" '
|
||||
|
||||
test_expect_success "'ipfs add' output looks good" '
|
||||
HASH="QmZDhWpi8NvKrekaYYhxKCdNVGWsFFe1CREnAjP1QbPaB3" &&
|
||||
echo "added $HASH " >expected &&
|
||||
echo "added $HASH $HASH" >expected &&
|
||||
test_cmp expected actual
|
||||
'
|
||||
|
||||
@ -140,9 +140,9 @@ test_expect_success "'ipfs add -r' output looks good" '
|
||||
PLANETS="QmWSgS32xQEcXMeqd3YPJLrNBLSdsfYCep2U7CFkyrjXwY" &&
|
||||
MARS="QmPrrHqJzto9m7SyiRzarwkqPcCSsKR2EB1AyqJfe8L8tN" &&
|
||||
VENUS="QmU5kp3BH3B8tnWUU2Pikdb2maksBNkb92FHRr56hyghh4" &&
|
||||
echo "added $MARS mountdir/planets/mars.txt" >expected &&
|
||||
echo "added $VENUS mountdir/planets/venus.txt" >>expected &&
|
||||
echo "added $PLANETS mountdir/planets" >>expected &&
|
||||
echo "added $MARS planets/mars.txt" >expected &&
|
||||
echo "added $VENUS planets/venus.txt" >>expected &&
|
||||
echo "added $PLANETS planets" >>expected &&
|
||||
test_cmp expected actual
|
||||
'
|
||||
|
||||
@ -201,7 +201,7 @@ test_expect_success "'ipfs add bigfile' succeeds" '
|
||||
|
||||
test_expect_success "'ipfs add bigfile' output looks good" '
|
||||
HASH="QmSr7FqYkxYWGoSfy8ZiaMWQ5vosb18DQGCzjwEQnVHkTb" &&
|
||||
echo "added $HASH mountdir/bigfile" >expected &&
|
||||
echo "added $HASH bigfile" >expected &&
|
||||
test_cmp expected actual
|
||||
'
|
||||
test_expect_success "'ipfs cat' succeeds" '
|
||||
@ -236,7 +236,7 @@ test_expect_success EXPENSIVE "ipfs add bigfile succeeds" '
|
||||
|
||||
test_expect_success EXPENSIVE "ipfs add bigfile output looks good" '
|
||||
HASH="QmU9SWAPPmNEKZB8umYMmjYvN7VyHqABNvdA6GUi4MMEz3" &&
|
||||
echo "added $HASH mountdir/bigfile" >expected &&
|
||||
echo "added $HASH bigfile" >expected &&
|
||||
test_cmp expected actual
|
||||
'
|
||||
|
||||
|
@ -22,9 +22,9 @@ test_add_skip() {
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs add -r' did not include . files" '
|
||||
echo "added QmZy3khu7qf696i5HtkgL2NotsCZ8wzvNZJ1eUdA5n8KaV mountdir/planets/mars.txt
|
||||
added QmQnv4m3Q5512zgVtpbJ9z85osQrzZzGRn934AGh6iVEXz mountdir/planets/venus.txt
|
||||
added QmR8nD1Vzk5twWVC6oShTHvv7mMYkVh6dApCByBJyV2oj3 mountdir/planets" >expected
|
||||
echo "added QmZy3khu7qf696i5HtkgL2NotsCZ8wzvNZJ1eUdA5n8KaV planets/mars.txt
|
||||
added QmQnv4m3Q5512zgVtpbJ9z85osQrzZzGRn934AGh6iVEXz planets/venus.txt
|
||||
added QmR8nD1Vzk5twWVC6oShTHvv7mMYkVh6dApCByBJyV2oj3 planets" >expected
|
||||
test_cmp expected actual
|
||||
'
|
||||
|
||||
@ -33,14 +33,14 @@ added QmR8nD1Vzk5twWVC6oShTHvv7mMYkVh6dApCByBJyV2oj3 mountdir/planets" >expected
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs add -r --hidden' did include . files" '
|
||||
echo "added QmcAREBcjgnUpKfyFmUGnfajA1NQS5ydqRp7WfqZ6JF8Dx mountdir/planets/.asteroids/ceres.txt
|
||||
added QmZ5eaLybJ5GUZBNwy24AA9EEDTDpA4B8qXnuN3cGxu2uF mountdir/planets/.asteroids/pallas.txt
|
||||
added Qmf6rbs5GF85anDuoxpSAdtuZPM9D2Yt3HngzjUVSQ7kDV mountdir/planets/.asteroids
|
||||
added QmaowqjedBkUrMUXgzt9c2ZnAJncM9jpJtkFfgdFstGr5a mountdir/planets/.charon.txt
|
||||
added QmU4zFD5eJtRBsWC63AvpozM9Atiadg9kPVTuTrnCYJiNF mountdir/planets/.pluto.txt
|
||||
added QmZy3khu7qf696i5HtkgL2NotsCZ8wzvNZJ1eUdA5n8KaV mountdir/planets/mars.txt
|
||||
added QmQnv4m3Q5512zgVtpbJ9z85osQrzZzGRn934AGh6iVEXz mountdir/planets/venus.txt
|
||||
added QmetajtFdmzhWYodAsZoVZSiqpeJDAiaw2NwbM3xcWcpDj mountdir/planets" >expected &&
|
||||
echo "added QmcAREBcjgnUpKfyFmUGnfajA1NQS5ydqRp7WfqZ6JF8Dx planets/.asteroids/ceres.txt
|
||||
added QmZ5eaLybJ5GUZBNwy24AA9EEDTDpA4B8qXnuN3cGxu2uF planets/.asteroids/pallas.txt
|
||||
added Qmf6rbs5GF85anDuoxpSAdtuZPM9D2Yt3HngzjUVSQ7kDV planets/.asteroids
|
||||
added QmaowqjedBkUrMUXgzt9c2ZnAJncM9jpJtkFfgdFstGr5a planets/.charon.txt
|
||||
added QmU4zFD5eJtRBsWC63AvpozM9Atiadg9kPVTuTrnCYJiNF planets/.pluto.txt
|
||||
added QmZy3khu7qf696i5HtkgL2NotsCZ8wzvNZJ1eUdA5n8KaV planets/mars.txt
|
||||
added QmQnv4m3Q5512zgVtpbJ9z85osQrzZzGRn934AGh6iVEXz planets/venus.txt
|
||||
added QmetajtFdmzhWYodAsZoVZSiqpeJDAiaw2NwbM3xcWcpDj planets" >expected &&
|
||||
test_cmp expected actual
|
||||
'
|
||||
|
||||
|
@ -8,42 +8,42 @@ test_description="Test add -w"
|
||||
|
||||
add_w_m='QmazHkwx6mPmmCEi1jR5YzjjQd1g5XzKfYQLzRAg7x5uUk'
|
||||
|
||||
add_w_1='added Qme987pqNBhZZXy4ckeXiR7zaRQwBabB7fTgHurW2yJfNu m/4r93
|
||||
add_w_1='added Qme987pqNBhZZXy4ckeXiR7zaRQwBabB7fTgHurW2yJfNu 4r93
|
||||
added Qmf82PSsMpUHcrqxa69KG6Qp5yeK7K9BTizXgG3nvzWcNG '
|
||||
|
||||
add_w_12='added Qme987pqNBhZZXy4ckeXiR7zaRQwBabB7fTgHurW2yJfNu m/4r93
|
||||
added QmVb4ntSZZnT2J2zvCmXKMJc52cmZYH6AB37MzeYewnkjs m/4u6ead
|
||||
add_w_12='added Qme987pqNBhZZXy4ckeXiR7zaRQwBabB7fTgHurW2yJfNu 4r93
|
||||
added QmVb4ntSZZnT2J2zvCmXKMJc52cmZYH6AB37MzeYewnkjs 4u6ead
|
||||
added QmZPASVB6EsADrLN8S2sak34zEHL8mx4TAVsPJU9cNnQQJ '
|
||||
|
||||
add_w_21='added QmVb4ntSZZnT2J2zvCmXKMJc52cmZYH6AB37MzeYewnkjs m/4u6ead
|
||||
added Qme987pqNBhZZXy4ckeXiR7zaRQwBabB7fTgHurW2yJfNu m/4r93
|
||||
add_w_21='added QmVb4ntSZZnT2J2zvCmXKMJc52cmZYH6AB37MzeYewnkjs 4u6ead
|
||||
added Qme987pqNBhZZXy4ckeXiR7zaRQwBabB7fTgHurW2yJfNu 4r93
|
||||
added QmZPASVB6EsADrLN8S2sak34zEHL8mx4TAVsPJU9cNnQQJ '
|
||||
|
||||
add_w_d1='added QmPcaX84tDiTfzdTn8GQxexodgeWH6mHjSss5Zfr5ojssb m/t_1wp-8a2/_jo7/-s782qgs
|
||||
added QmaVBqquUuXKjkyWHXaXfsaQUxAnsCKS95VRDHU8PzGA4K m/t_1wp-8a2/_jo7/15totauzkak-
|
||||
added QmaAHFG8cmhW3WLjofx5siSp44VV25ETN6ThzrU8iAqpkR m/t_1wp-8a2/_jo7/galecuirrj4r
|
||||
added QmeuSfhJNKwBESp1W9H8cfoMdBfW3AeHQDWXbNXQJYWp53 m/t_1wp-8a2/_jo7/mzo50r-1xidf5zx
|
||||
added QmYC3u5jGWuyFwvTxtvLYm2K3SpWZ31tg3NjpVVvh9cJaJ m/t_1wp-8a2/_jo7/wzvsihy
|
||||
added QmQkib3f9XNX5sj6WEahLUPFpheTcwSRJwUCSvjcv8b9by m/t_1wp-8a2/_jo7
|
||||
add_w_d1='added QmPcaX84tDiTfzdTn8GQxexodgeWH6mHjSss5Zfr5ojssb _jo7/-s782qgs
|
||||
added QmaVBqquUuXKjkyWHXaXfsaQUxAnsCKS95VRDHU8PzGA4K _jo7/15totauzkak-
|
||||
added QmaAHFG8cmhW3WLjofx5siSp44VV25ETN6ThzrU8iAqpkR _jo7/galecuirrj4r
|
||||
added QmeuSfhJNKwBESp1W9H8cfoMdBfW3AeHQDWXbNXQJYWp53 _jo7/mzo50r-1xidf5zx
|
||||
added QmYC3u5jGWuyFwvTxtvLYm2K3SpWZ31tg3NjpVVvh9cJaJ _jo7/wzvsihy
|
||||
added QmQkib3f9XNX5sj6WEahLUPFpheTcwSRJwUCSvjcv8b9by _jo7
|
||||
added QmNQoesMj1qp8ApE51NbtTjFYksyzkezPD4cat7V2kzbKN '
|
||||
|
||||
add_w_d2='added QmVaKAt2eVftNKFfKhiBV7Mu5HjCugffuLqWqobSSFgiA7 m/t_1wp-8a2/h3qpecj0
|
||||
added QmU9Jqks8TPu4vFr6t7EKkAKQrSJuEujNj1AkzoCeTEDFJ m/ha6f0x7su6/gnz66h/1k0xpx34
|
||||
added QmSLYZycXAufRw3ePMVH2brbtYWCcWsmksGLbHcT8ia9Ke m/ha6f0x7su6/gnz66h/9cwudvacx
|
||||
added QmfYmpCCAMU9nLe7xbrYsHf5z2R2GxeQnsm4zavUhX9vq2 m/ha6f0x7su6/gnz66h/9ximv51cbo8
|
||||
added QmWgEE4e2kfx3b8HZcBk5cLrfhoi8kTMQP2MipgPhykuV3 m/ha6f0x7su6/gnz66h/b54ygh6gs
|
||||
added QmcLbqEqhREGednc6mrVtanee4WHKp5JnUfiwTTHCJwuDf m/ha6f0x7su6/gnz66h/lbl5
|
||||
added QmVPwNy8pZegpsNmsjjZvdTQn4uCeuZgtzhgWhRSQWjK9x m/ha6f0x7su6/gnz66h
|
||||
added QmPcaX84tDiTfzdTn8GQxexodgeWH6mHjSss5Zfr5ojssb m/t_1wp-8a2/_jo7/-s782qgs
|
||||
added QmaVBqquUuXKjkyWHXaXfsaQUxAnsCKS95VRDHU8PzGA4K m/t_1wp-8a2/_jo7/15totauzkak-
|
||||
added QmaAHFG8cmhW3WLjofx5siSp44VV25ETN6ThzrU8iAqpkR m/t_1wp-8a2/_jo7/galecuirrj4r
|
||||
added QmeuSfhJNKwBESp1W9H8cfoMdBfW3AeHQDWXbNXQJYWp53 m/t_1wp-8a2/_jo7/mzo50r-1xidf5zx
|
||||
added QmYC3u5jGWuyFwvTxtvLYm2K3SpWZ31tg3NjpVVvh9cJaJ m/t_1wp-8a2/_jo7/wzvsihy
|
||||
added QmQkib3f9XNX5sj6WEahLUPFpheTcwSRJwUCSvjcv8b9by m/t_1wp-8a2/_jo7
|
||||
added Qme987pqNBhZZXy4ckeXiR7zaRQwBabB7fTgHurW2yJfNu m/4r93
|
||||
add_w_d2='added QmVaKAt2eVftNKFfKhiBV7Mu5HjCugffuLqWqobSSFgiA7 h3qpecj0
|
||||
added QmU9Jqks8TPu4vFr6t7EKkAKQrSJuEujNj1AkzoCeTEDFJ gnz66h/1k0xpx34
|
||||
added QmSLYZycXAufRw3ePMVH2brbtYWCcWsmksGLbHcT8ia9Ke gnz66h/9cwudvacx
|
||||
added QmfYmpCCAMU9nLe7xbrYsHf5z2R2GxeQnsm4zavUhX9vq2 gnz66h/9ximv51cbo8
|
||||
added QmWgEE4e2kfx3b8HZcBk5cLrfhoi8kTMQP2MipgPhykuV3 gnz66h/b54ygh6gs
|
||||
added QmcLbqEqhREGednc6mrVtanee4WHKp5JnUfiwTTHCJwuDf gnz66h/lbl5
|
||||
added QmVPwNy8pZegpsNmsjjZvdTQn4uCeuZgtzhgWhRSQWjK9x gnz66h
|
||||
added QmPcaX84tDiTfzdTn8GQxexodgeWH6mHjSss5Zfr5ojssb _jo7/-s782qgs
|
||||
added QmaVBqquUuXKjkyWHXaXfsaQUxAnsCKS95VRDHU8PzGA4K _jo7/15totauzkak-
|
||||
added QmaAHFG8cmhW3WLjofx5siSp44VV25ETN6ThzrU8iAqpkR _jo7/galecuirrj4r
|
||||
added QmeuSfhJNKwBESp1W9H8cfoMdBfW3AeHQDWXbNXQJYWp53 _jo7/mzo50r-1xidf5zx
|
||||
added QmYC3u5jGWuyFwvTxtvLYm2K3SpWZ31tg3NjpVVvh9cJaJ _jo7/wzvsihy
|
||||
added QmQkib3f9XNX5sj6WEahLUPFpheTcwSRJwUCSvjcv8b9by _jo7
|
||||
added Qme987pqNBhZZXy4ckeXiR7zaRQwBabB7fTgHurW2yJfNu 4r93
|
||||
added QmTmc46fhKC8Liuh5soy1VotdnHcqLu3r6HpPGwDZCnqL1 '
|
||||
|
||||
add_w_r='QmWpSjVaMts6cXr4g4uQ9AVadunLKxW7Fhyxk3TXo36hEf'
|
||||
add_w_r='QmcCksBMDuuyuyfAMMNzEAx6Z7jTrdRy9a23WpufAhG9ji'
|
||||
|
||||
. lib/test-lib.sh
|
||||
|
||||
|
@ -11,6 +11,15 @@ test_description="Test ipfs repo operations"
|
||||
test_init_ipfs
|
||||
test_launch_ipfs_daemon
|
||||
|
||||
test_expect_success "'ipfs repo gc' succeeds" '
|
||||
ipfs repo gc >gc_out_actual
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs repo gc' looks good (empty)" '
|
||||
true >empty &&
|
||||
test_cmp empty gc_out_actual
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs add afile' succeeds" '
|
||||
echo "some text" >afile &&
|
||||
HASH=`ipfs add -q afile`
|
||||
@ -25,9 +34,10 @@ test_expect_success "'ipfs repo gc' succeeds" '
|
||||
ipfs repo gc >gc_out_actual
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs repo gc' looks good (empty)" '
|
||||
true >empty &&
|
||||
test_cmp empty gc_out_actual
|
||||
test_expect_success "'ipfs repo gc' looks good (patch root)" '
|
||||
PATCH_ROOT=QmQXirSbubiySKnqaFyfs5YzziXRB5JEVQVjU6xsd7innr &&
|
||||
echo "removed $PATCH_ROOT" >patch_root &&
|
||||
test_cmp patch_root gc_out_actual
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs repo gc' doesnt remove file" '
|
||||
@ -60,7 +70,8 @@ test_expect_success "file no longer pinned" '
|
||||
ipfs refs -r "$HASH_WELCOME_DOCS" >>expected2 &&
|
||||
echo "$HASH_GATEWAY_ASSETS" >>expected2 &&
|
||||
ipfs refs -r "$HASH_GATEWAY_ASSETS" >>expected2 &&
|
||||
echo QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn >> expected2 &&
|
||||
EMPTY_DIR=QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn &&
|
||||
echo "$EMPTY_DIR" >>expected2 &&
|
||||
ipfs pin ls --type=recursive --quiet >actual2 &&
|
||||
test_sort_cmp expected2 actual2
|
||||
'
|
||||
@ -96,14 +107,16 @@ test_expect_success "remove direct pin" '
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs repo gc' removes file" '
|
||||
echo "removed $HASH" >expected7 &&
|
||||
echo "removed $PATCH_ROOT" >expected7 &&
|
||||
echo "removed $HASH" >>expected7 &&
|
||||
ipfs repo gc >actual7 &&
|
||||
test_cmp expected7 actual7
|
||||
test_sort_cmp expected7 actual7
|
||||
'
|
||||
|
||||
# TODO: there seems to be a serious bug with leveldb not returning a key.
|
||||
test_expect_failure "'ipfs refs local' no longer shows file" '
|
||||
echo QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn >expected8 &&
|
||||
EMPTY_DIR=QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn &&
|
||||
echo "$EMPTY_DIR" >expected8 &&
|
||||
echo "$HASH_WELCOME_DOCS" >>expected8 &&
|
||||
ipfs refs -r "$HASH_WELCOME_DOCS" >>expected8 &&
|
||||
ipfs refs local >actual8 &&
|
||||
@ -146,7 +159,7 @@ test_expect_success "'ipfs pin ls --type=recursive' is correct" '
|
||||
echo "$MBLOCKHASH" >rp_expected &&
|
||||
echo "$HASH_WELCOME_DOCS" >>rp_expected &&
|
||||
echo "$HASH_GATEWAY_ASSETS" >>rp_expected &&
|
||||
echo QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn >>rp_expected &&
|
||||
echo "$EMPTY_DIR" >>rp_expected &&
|
||||
ipfs refs -r "$HASH_WELCOME_DOCS" >>rp_expected &&
|
||||
ipfs refs -r "$HASH_GATEWAY_ASSETS" >>rp_expected &&
|
||||
sed -i="" "s/\(.*\)/\1 recursive/g" rp_expected &&
|
||||
|
@ -100,6 +100,7 @@ test_expect_success "'ipfs add dir' succeeds" '
|
||||
echo "some text 5" >dir1/dir3/file5 &&
|
||||
ipfs add -q -r dir1 | tail -n1 >actual1 &&
|
||||
echo "$HASH_DIR1" >expected1 &&
|
||||
ipfs repo gc && # remove the patch chaff
|
||||
test_cmp actual1 expected1
|
||||
'
|
||||
|
||||
|
Reference in New Issue
Block a user