mirror of
https://github.com/ipfs/kubo.git
synced 2025-05-17 06:57:40 +08:00
core/commands!: remove deprecated object APIs (#10375)
This commit is contained in:
@ -9,9 +9,7 @@ import (
|
||||
"github.com/ipfs/kubo/core/coreapi"
|
||||
|
||||
"github.com/ipfs/boxo/files"
|
||||
"github.com/ipfs/boxo/path"
|
||||
cid "github.com/ipfs/go-cid"
|
||||
options "github.com/ipfs/kubo/core/coreiface/options"
|
||||
)
|
||||
|
||||
//go:embed init-doc
|
||||
@ -39,12 +37,7 @@ func addAssetList(nd *core.IpfsNode, l []string) (cid.Cid, error) {
|
||||
return cid.Cid{}, err
|
||||
}
|
||||
|
||||
dirb, err := api.Object().New(nd.Context(), options.Object.Type("unixfs-dir"))
|
||||
if err != nil {
|
||||
return cid.Cid{}, err
|
||||
}
|
||||
|
||||
basePath := path.FromCid(dirb.Cid())
|
||||
dirMap := map[string]files.Node{}
|
||||
|
||||
for _, p := range l {
|
||||
d, err := Asset.ReadFile(p)
|
||||
@ -52,17 +45,12 @@ func addAssetList(nd *core.IpfsNode, l []string) (cid.Cid, error) {
|
||||
return cid.Cid{}, fmt.Errorf("assets: could load Asset '%s': %s", p, err)
|
||||
}
|
||||
|
||||
fp, err := api.Unixfs().Add(nd.Context(), files.NewBytesFile(d))
|
||||
if err != nil {
|
||||
return cid.Cid{}, err
|
||||
}
|
||||
dirMap[gopath.Base(p)] = files.NewBytesFile(d)
|
||||
}
|
||||
|
||||
fname := gopath.Base(p)
|
||||
|
||||
basePath, err = api.Object().AddLink(nd.Context(), basePath, fname, fp)
|
||||
if err != nil {
|
||||
return cid.Cid{}, err
|
||||
}
|
||||
basePath, err := api.Unixfs().Add(nd.Context(), files.NewMapDirectory(dirMap))
|
||||
if err != nil {
|
||||
return cid.Cid{}, err
|
||||
}
|
||||
|
||||
if err := api.Pin().Add(nd.Context(), basePath); err != nil {
|
||||
|
@ -19,7 +19,7 @@ if [ $? -ne 0 ]; then
|
||||
fi
|
||||
|
||||
# check the object is there
|
||||
ipfs object stat "$1" >/dev/null
|
||||
ipfs dag stat "$1" >/dev/null
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "error: ipfs cannot find $1"
|
||||
exit 1
|
||||
|
@ -1,16 +1,10 @@
|
||||
package rpc
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/ipfs/boxo/ipld/merkledag"
|
||||
ft "github.com/ipfs/boxo/ipld/unixfs"
|
||||
"github.com/ipfs/boxo/path"
|
||||
"github.com/ipfs/go-cid"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
iface "github.com/ipfs/kubo/core/coreiface"
|
||||
caopts "github.com/ipfs/kubo/core/coreiface/options"
|
||||
)
|
||||
@ -21,138 +15,6 @@ type objectOut struct {
|
||||
Hash string
|
||||
}
|
||||
|
||||
func (api *ObjectAPI) New(ctx context.Context, opts ...caopts.ObjectNewOption) (ipld.Node, error) {
|
||||
options, err := caopts.ObjectNewOptions(opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var n ipld.Node
|
||||
switch options.Type {
|
||||
case "empty":
|
||||
n = new(merkledag.ProtoNode)
|
||||
case "unixfs-dir":
|
||||
n = ft.EmptyDirNode()
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown object type: %s", options.Type)
|
||||
}
|
||||
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (api *ObjectAPI) Put(ctx context.Context, r io.Reader, opts ...caopts.ObjectPutOption) (path.ImmutablePath, error) {
|
||||
options, err := caopts.ObjectPutOptions(opts...)
|
||||
if err != nil {
|
||||
return path.ImmutablePath{}, err
|
||||
}
|
||||
|
||||
var out objectOut
|
||||
err = api.core().Request("object/put").
|
||||
Option("inputenc", options.InputEnc).
|
||||
Option("datafieldenc", options.DataType).
|
||||
Option("pin", options.Pin).
|
||||
FileBody(r).
|
||||
Exec(ctx, &out)
|
||||
if err != nil {
|
||||
return path.ImmutablePath{}, err
|
||||
}
|
||||
|
||||
c, err := cid.Parse(out.Hash)
|
||||
if err != nil {
|
||||
return path.ImmutablePath{}, err
|
||||
}
|
||||
|
||||
return path.FromCid(c), nil
|
||||
}
|
||||
|
||||
func (api *ObjectAPI) Get(ctx context.Context, p path.Path) (ipld.Node, error) {
|
||||
r, err := api.core().Block().Get(ctx, p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b, err := io.ReadAll(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return merkledag.DecodeProtobuf(b)
|
||||
}
|
||||
|
||||
func (api *ObjectAPI) Data(ctx context.Context, p path.Path) (io.Reader, error) {
|
||||
resp, err := api.core().Request("object/data", p.String()).Send(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.Error != nil {
|
||||
return nil, resp.Error
|
||||
}
|
||||
|
||||
// TODO: make Data return ReadCloser to avoid copying
|
||||
defer resp.Close()
|
||||
b := new(bytes.Buffer)
|
||||
if _, err := io.Copy(b, resp.Output); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (api *ObjectAPI) Links(ctx context.Context, p path.Path) ([]*ipld.Link, error) {
|
||||
var out struct {
|
||||
Links []struct {
|
||||
Name string
|
||||
Hash string
|
||||
Size uint64
|
||||
}
|
||||
}
|
||||
if err := api.core().Request("object/links", p.String()).Exec(ctx, &out); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res := make([]*ipld.Link, len(out.Links))
|
||||
for i, l := range out.Links {
|
||||
c, err := cid.Parse(l.Hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res[i] = &ipld.Link{
|
||||
Cid: c,
|
||||
Name: l.Name,
|
||||
Size: l.Size,
|
||||
}
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (api *ObjectAPI) Stat(ctx context.Context, p path.Path) (*iface.ObjectStat, error) {
|
||||
var out struct {
|
||||
Hash string
|
||||
NumLinks int
|
||||
BlockSize int
|
||||
LinksSize int
|
||||
DataSize int
|
||||
CumulativeSize int
|
||||
}
|
||||
if err := api.core().Request("object/stat", p.String()).Exec(ctx, &out); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c, err := cid.Parse(out.Hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &iface.ObjectStat{
|
||||
Cid: c,
|
||||
NumLinks: out.NumLinks,
|
||||
BlockSize: out.BlockSize,
|
||||
LinksSize: out.LinksSize,
|
||||
DataSize: out.DataSize,
|
||||
CumulativeSize: out.CumulativeSize,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (api *ObjectAPI) AddLink(ctx context.Context, base path.Path, name string, child path.Path, opts ...caopts.ObjectAddLinkOption) (path.ImmutablePath, error) {
|
||||
options, err := caopts.ObjectAddLinkOptions(opts...)
|
||||
if err != nil {
|
||||
@ -191,40 +53,6 @@ func (api *ObjectAPI) RmLink(ctx context.Context, base path.Path, link string) (
|
||||
return path.FromCid(c), nil
|
||||
}
|
||||
|
||||
func (api *ObjectAPI) AppendData(ctx context.Context, p path.Path, r io.Reader) (path.ImmutablePath, error) {
|
||||
var out objectOut
|
||||
err := api.core().Request("object/patch/append-data", p.String()).
|
||||
FileBody(r).
|
||||
Exec(ctx, &out)
|
||||
if err != nil {
|
||||
return path.ImmutablePath{}, err
|
||||
}
|
||||
|
||||
c, err := cid.Parse(out.Hash)
|
||||
if err != nil {
|
||||
return path.ImmutablePath{}, err
|
||||
}
|
||||
|
||||
return path.FromCid(c), nil
|
||||
}
|
||||
|
||||
func (api *ObjectAPI) SetData(ctx context.Context, p path.Path, r io.Reader) (path.ImmutablePath, error) {
|
||||
var out objectOut
|
||||
err := api.core().Request("object/patch/set-data", p.String()).
|
||||
FileBody(r).
|
||||
Exec(ctx, &out)
|
||||
if err != nil {
|
||||
return path.ImmutablePath{}, err
|
||||
}
|
||||
|
||||
c, err := cid.Parse(out.Hash)
|
||||
if err != nil {
|
||||
return path.ImmutablePath{}, err
|
||||
}
|
||||
|
||||
return path.FromCid(c), nil
|
||||
}
|
||||
|
||||
type change struct {
|
||||
Type iface.ChangeType
|
||||
Path string
|
||||
|
@ -61,6 +61,11 @@ func TestCommands(t *testing.T) {
|
||||
"/dag/stat",
|
||||
"/dht",
|
||||
"/dht/query",
|
||||
"/dht/findprovs",
|
||||
"/dht/findpeer",
|
||||
"/dht/get",
|
||||
"/dht/provide",
|
||||
"/dht/put",
|
||||
"/routing",
|
||||
"/routing/put",
|
||||
"/routing/get",
|
||||
|
@ -15,13 +15,19 @@ import (
|
||||
var ErrNotDHT = errors.New("routing service is not a DHT")
|
||||
|
||||
var DhtCmd = &cmds.Command{
|
||||
Status: cmds.Deprecated,
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "Issue commands directly through the DHT.",
|
||||
ShortDescription: ``,
|
||||
},
|
||||
|
||||
Subcommands: map[string]*cmds.Command{
|
||||
"query": queryDhtCmd,
|
||||
"query": queryDhtCmd,
|
||||
"findprovs": RemovedDHTCmd,
|
||||
"findpeer": RemovedDHTCmd,
|
||||
"get": RemovedDHTCmd,
|
||||
"put": RemovedDHTCmd,
|
||||
"provide": RemovedDHTCmd,
|
||||
},
|
||||
}
|
||||
|
||||
@ -32,6 +38,7 @@ type kademlia interface {
|
||||
}
|
||||
|
||||
var queryDhtCmd = &cmds.Command{
|
||||
Status: cmds.Deprecated,
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "Find the closest Peer IDs to a given Peer ID by querying the DHT.",
|
||||
ShortDescription: "Outputs a list of newline-delimited Peer IDs.",
|
||||
@ -114,3 +121,12 @@ var queryDhtCmd = &cmds.Command{
|
||||
},
|
||||
Type: routing.QueryEvent{},
|
||||
}
|
||||
var RemovedDHTCmd = &cmds.Command{
|
||||
Status: cmds.Removed,
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "Removed, use 'ipfs routing' instead.",
|
||||
},
|
||||
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
return errors.New("removed, use 'ipfs routing' instead")
|
||||
},
|
||||
}
|
||||
|
@ -1,28 +1,11 @@
|
||||
package objectcmd
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"text/tabwriter"
|
||||
|
||||
cmds "github.com/ipfs/go-ipfs-cmds"
|
||||
"github.com/ipfs/kubo/core/commands/cmdenv"
|
||||
"github.com/ipfs/kubo/core/commands/cmdutils"
|
||||
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
dag "github.com/ipfs/boxo/ipld/merkledag"
|
||||
"github.com/ipfs/go-cid"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
"github.com/ipfs/kubo/core/coreiface/options"
|
||||
)
|
||||
|
||||
type Node struct {
|
||||
Links []Link
|
||||
Data string
|
||||
}
|
||||
|
||||
type Link struct {
|
||||
Name, Hash string
|
||||
Size uint64
|
||||
@ -35,16 +18,6 @@ type Object struct {
|
||||
|
||||
var ErrDataEncoding = errors.New("unknown data field encoding")
|
||||
|
||||
const (
|
||||
headersOptionName = "headers"
|
||||
encodingOptionName = "data-encoding"
|
||||
inputencOptionName = "inputenc"
|
||||
datafieldencOptionName = "datafieldenc"
|
||||
pinOptionName = "pin"
|
||||
quietOptionName = "quiet"
|
||||
humanOptionName = "human"
|
||||
)
|
||||
|
||||
var ObjectCmd = &cmds.Command{
|
||||
Status: cmds.Deprecated, // https://github.com/ipfs/kubo/issues/7936
|
||||
Helptext: cmds.HelpText{
|
||||
@ -55,516 +28,23 @@ directly. Deprecated, use more modern 'ipfs dag' and 'ipfs files' instead.`,
|
||||
},
|
||||
|
||||
Subcommands: map[string]*cmds.Command{
|
||||
"data": ObjectDataCmd,
|
||||
"data": RemovedObjectCmd,
|
||||
"diff": ObjectDiffCmd,
|
||||
"get": ObjectGetCmd,
|
||||
"links": ObjectLinksCmd,
|
||||
"new": ObjectNewCmd,
|
||||
"get": RemovedObjectCmd,
|
||||
"links": RemovedObjectCmd,
|
||||
"new": RemovedObjectCmd,
|
||||
"patch": ObjectPatchCmd,
|
||||
"put": ObjectPutCmd,
|
||||
"stat": ObjectStatCmd,
|
||||
"put": RemovedObjectCmd,
|
||||
"stat": RemovedObjectCmd,
|
||||
},
|
||||
}
|
||||
|
||||
// ObjectDataCmd object data command
|
||||
var ObjectDataCmd = &cmds.Command{
|
||||
Status: cmds.Deprecated, // https://github.com/ipfs/kubo/issues/7936
|
||||
var RemovedObjectCmd = &cmds.Command{
|
||||
Status: cmds.Removed,
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "Deprecated way to read the raw bytes of a dag-pb object: use 'dag get' instead.",
|
||||
ShortDescription: `
|
||||
'ipfs object data' is a deprecated plumbing command for retrieving the raw
|
||||
bytes stored in a dag-pb node. It outputs to stdout, and <key> is a base58
|
||||
encoded multihash. Provided for legacy reasons. Use 'ipfs dag get' instead.
|
||||
`,
|
||||
LongDescription: `
|
||||
'ipfs object data' is a deprecated plumbing command for retrieving the raw
|
||||
bytes stored in a dag-pb node. It outputs to stdout, and <key> is a base58
|
||||
encoded multihash. Provided for legacy reasons. Use 'ipfs dag get' instead.
|
||||
|
||||
Note that the "--encoding" option does not affect the output, since the output
|
||||
is the raw data of the object.
|
||||
`,
|
||||
},
|
||||
|
||||
Arguments: []cmds.Argument{
|
||||
cmds.StringArg("key", true, false, "Key of the object to retrieve, in base58-encoded multihash format.").EnableStdin(),
|
||||
Tagline: "Removed, use 'ipfs dag' or 'ipfs files' instead.",
|
||||
},
|
||||
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
api, err := cmdenv.GetApi(env, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
path, err := cmdutils.PathOrCidPath(req.Arguments[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
data, err := api.Object().Data(req.Context, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return res.Emit(data)
|
||||
return errors.New("removed, use 'ipfs dag' or 'ipfs files' instead")
|
||||
},
|
||||
}
|
||||
|
||||
// ObjectLinksCmd object links command
|
||||
var ObjectLinksCmd = &cmds.Command{
|
||||
Status: cmds.Deprecated, // https://github.com/ipfs/kubo/issues/7936
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "Deprecated way to output links in the specified dag-pb object: use 'dag get' instead.",
|
||||
ShortDescription: `
|
||||
'ipfs object links' is a plumbing command for retrieving the links from
|
||||
a dag-pb node. It outputs to stdout, and <key> is a base58 encoded
|
||||
multihash. Provided for legacy reasons. Use 'ipfs dag get' instead.
|
||||
`,
|
||||
},
|
||||
|
||||
Arguments: []cmds.Argument{
|
||||
cmds.StringArg("key", true, false, "Key of the dag-pb object to retrieve, in base58-encoded multihash format.").EnableStdin(),
|
||||
},
|
||||
Options: []cmds.Option{
|
||||
cmds.BoolOption(headersOptionName, "v", "Print table headers (Hash, Size, Name)."),
|
||||
},
|
||||
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
api, err := cmdenv.GetApi(env, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
enc, err := cmdenv.GetLowLevelCidEncoder(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
path, err := cmdutils.PathOrCidPath(req.Arguments[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rp, _, err := api.ResolvePath(req.Context, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
links, err := api.Object().Links(req.Context, rp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
outLinks := make([]Link, len(links))
|
||||
for i, link := range links {
|
||||
outLinks[i] = Link{
|
||||
Hash: enc.Encode(link.Cid),
|
||||
Name: link.Name,
|
||||
Size: link.Size,
|
||||
}
|
||||
}
|
||||
|
||||
out := &Object{
|
||||
Hash: enc.Encode(rp.RootCid()),
|
||||
Links: outLinks,
|
||||
}
|
||||
|
||||
return cmds.EmitOnce(res, out)
|
||||
},
|
||||
Encoders: cmds.EncoderMap{
|
||||
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *Object) error {
|
||||
tw := tabwriter.NewWriter(w, 1, 2, 1, ' ', 0)
|
||||
headers, _ := req.Options[headersOptionName].(bool)
|
||||
if headers {
|
||||
fmt.Fprintln(tw, "Hash\tSize\tName")
|
||||
}
|
||||
for _, link := range out.Links {
|
||||
fmt.Fprintf(tw, "%s\t%v\t%s\n", link.Hash, link.Size, cmdenv.EscNonPrint(link.Name))
|
||||
}
|
||||
tw.Flush()
|
||||
|
||||
return nil
|
||||
}),
|
||||
},
|
||||
Type: &Object{},
|
||||
}
|
||||
|
||||
// ObjectGetCmd object get command
|
||||
var ObjectGetCmd = &cmds.Command{
|
||||
Status: cmds.Deprecated, // https://github.com/ipfs/kubo/issues/7936
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "Deprecated way to get and serialize the dag-pb node. Use 'dag get' instead",
|
||||
ShortDescription: `
|
||||
'ipfs object get' is a plumbing command for retrieving dag-pb nodes.
|
||||
It serializes the DAG node to the format specified by the "--encoding"
|
||||
flag. It outputs to stdout, and <key> is a base58 encoded multihash.
|
||||
|
||||
DEPRECATED and provided for legacy reasons. Use 'ipfs dag get' instead.
|
||||
`,
|
||||
},
|
||||
|
||||
Arguments: []cmds.Argument{
|
||||
cmds.StringArg("key", true, false, "Key of the dag-pb object to retrieve, in base58-encoded multihash format.").EnableStdin(),
|
||||
},
|
||||
Options: []cmds.Option{
|
||||
cmds.StringOption(encodingOptionName, "Encoding type of the data field, either \"text\" or \"base64\".").WithDefault("text"),
|
||||
},
|
||||
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
api, err := cmdenv.GetApi(env, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
enc, err := cmdenv.GetLowLevelCidEncoder(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
path, err := cmdutils.PathOrCidPath(req.Arguments[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
datafieldenc, _ := req.Options[encodingOptionName].(string)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nd, err := api.Object().Get(req.Context, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r, err := api.Object().Data(req.Context, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
data, err := io.ReadAll(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
out, err := encodeData(data, datafieldenc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
node := &Node{
|
||||
Links: make([]Link, len(nd.Links())),
|
||||
Data: out,
|
||||
}
|
||||
|
||||
for i, link := range nd.Links() {
|
||||
node.Links[i] = Link{
|
||||
Hash: enc.Encode(link.Cid),
|
||||
Name: link.Name,
|
||||
Size: link.Size,
|
||||
}
|
||||
}
|
||||
|
||||
return cmds.EmitOnce(res, node)
|
||||
},
|
||||
Type: Node{},
|
||||
Encoders: cmds.EncoderMap{
|
||||
cmds.Protobuf: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *Node) error {
|
||||
// deserialize the Data field as text as this was the standard behaviour
|
||||
object, err := deserializeNode(out, "text")
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
marshaled, err := object.Marshal()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = w.Write(marshaled)
|
||||
return err
|
||||
}),
|
||||
},
|
||||
}
|
||||
|
||||
// ObjectStatCmd object stat command
|
||||
var ObjectStatCmd = &cmds.Command{
|
||||
Status: cmds.Deprecated, // https://github.com/ipfs/kubo/issues/7936
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "Deprecated way to read stats for the dag-pb node. Use 'files stat' instead.",
|
||||
ShortDescription: `
|
||||
'ipfs object stat' is a plumbing command to print dag-pb node statistics.
|
||||
<key> is a base58 encoded multihash.
|
||||
|
||||
DEPRECATED: modern replacements are 'files stat' and 'dag stat'
|
||||
`,
|
||||
LongDescription: `
|
||||
'ipfs object stat' is a plumbing command to print dag-pb node statistics.
|
||||
<key> is a base58 encoded multihash. It outputs to stdout:
|
||||
|
||||
NumLinks int number of links in link table
|
||||
BlockSize int size of the raw, encoded data
|
||||
LinksSize int size of the links segment
|
||||
DataSize int size of the data segment
|
||||
CumulativeSize int cumulative size of object and its references
|
||||
|
||||
DEPRECATED: Provided for legacy reasons. Modern replacements:
|
||||
|
||||
For unixfs, 'ipfs files stat' can be used:
|
||||
|
||||
$ ipfs files stat --with-local /ipfs/QmWfVY9y3xjsixTgbd9AorQxH7VtMpzfx2HaWtsoUYecaX
|
||||
QmWfVY9y3xjsixTgbd9AorQxH7VtMpzfx2HaWtsoUYecaX
|
||||
Size: 5
|
||||
CumulativeSize: 13
|
||||
ChildBlocks: 0
|
||||
Type: file
|
||||
Local: 13 B of 13 B (100.00%)
|
||||
|
||||
Reported sizes are based on metadata present in root block, and should not be
|
||||
trusted. A slower, but more secure alternative is 'ipfs dag stat', which
|
||||
will work for every DAG type. It comes with a benefit of calculating the
|
||||
size by walking the DAG:
|
||||
|
||||
$ ipfs dag stat /ipfs/QmWfVY9y3xjsixTgbd9AorQxH7VtMpzfx2HaWtsoUYecaX
|
||||
Size: 13, NumBlocks: 1
|
||||
`,
|
||||
},
|
||||
|
||||
Arguments: []cmds.Argument{
|
||||
cmds.StringArg("key", true, false, "Key of the object to retrieve, in base58-encoded multihash format.").EnableStdin(),
|
||||
},
|
||||
Options: []cmds.Option{
|
||||
cmds.BoolOption(humanOptionName, "Print sizes in human readable format (e.g., 1K 234M 2G)"),
|
||||
},
|
||||
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
api, err := cmdenv.GetApi(env, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
enc, err := cmdenv.GetLowLevelCidEncoder(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p, err := cmdutils.PathOrCidPath(req.Arguments[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ns, err := api.Object().Stat(req.Context, p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
oldStat := &ipld.NodeStat{
|
||||
Hash: enc.Encode(ns.Cid),
|
||||
NumLinks: ns.NumLinks,
|
||||
BlockSize: ns.BlockSize,
|
||||
LinksSize: ns.LinksSize,
|
||||
DataSize: ns.DataSize,
|
||||
CumulativeSize: ns.CumulativeSize,
|
||||
}
|
||||
|
||||
return cmds.EmitOnce(res, oldStat)
|
||||
},
|
||||
Type: ipld.NodeStat{},
|
||||
Encoders: cmds.EncoderMap{
|
||||
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *ipld.NodeStat) error {
|
||||
wtr := tabwriter.NewWriter(w, 0, 0, 1, ' ', 0)
|
||||
defer wtr.Flush()
|
||||
fw := func(s string, n int) {
|
||||
fmt.Fprintf(wtr, "%s:\t%d\n", s, n)
|
||||
}
|
||||
human, _ := req.Options[humanOptionName].(bool)
|
||||
fw("NumLinks", out.NumLinks)
|
||||
fw("BlockSize", out.BlockSize)
|
||||
fw("LinksSize", out.LinksSize)
|
||||
fw("DataSize", out.DataSize)
|
||||
if human {
|
||||
fmt.Fprintf(wtr, "%s:\t%s\n", "CumulativeSize", humanize.Bytes(uint64(out.CumulativeSize)))
|
||||
} else {
|
||||
fw("CumulativeSize", out.CumulativeSize)
|
||||
}
|
||||
|
||||
return nil
|
||||
}),
|
||||
},
|
||||
}
|
||||
|
||||
// ObjectPutCmd object put command
|
||||
var ObjectPutCmd = &cmds.Command{
|
||||
Status: cmds.Deprecated, // https://github.com/ipfs/kubo/issues/7936
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "Deprecated way to store input as a DAG object. Use 'dag put' instead.",
|
||||
ShortDescription: `
|
||||
'ipfs object put' is a plumbing command for storing dag-pb nodes.
|
||||
It reads from stdin, and the output is a base58 encoded multihash.
|
||||
|
||||
DEPRECATED and provided for legacy reasons. Use 'ipfs dag put' instead.
|
||||
`,
|
||||
},
|
||||
|
||||
Arguments: []cmds.Argument{
|
||||
cmds.FileArg("data", true, false, "Data to be stored as a dag-pb object.").EnableStdin(),
|
||||
},
|
||||
Options: []cmds.Option{
|
||||
cmds.StringOption(inputencOptionName, "Encoding type of input data. One of: {\"protobuf\", \"json\"}.").WithDefault("json"),
|
||||
cmds.StringOption(datafieldencOptionName, "Encoding type of the data field, either \"text\" or \"base64\".").WithDefault("text"),
|
||||
cmds.BoolOption(pinOptionName, "Pin this object when adding."),
|
||||
cmds.BoolOption(quietOptionName, "q", "Write minimal output."),
|
||||
},
|
||||
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
api, err := cmdenv.GetApi(env, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
enc, err := cmdenv.GetLowLevelCidEncoder(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
file, err := cmdenv.GetFileArg(req.Files.Entries())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
inputenc, _ := req.Options[inputencOptionName].(string)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
datafieldenc, _ := req.Options[datafieldencOptionName].(string)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dopin, _ := req.Options[pinOptionName].(bool)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p, err := api.Object().Put(req.Context, file,
|
||||
options.Object.DataType(datafieldenc),
|
||||
options.Object.InputEnc(inputenc),
|
||||
options.Object.Pin(dopin))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return cmds.EmitOnce(res, &Object{Hash: enc.Encode(p.RootCid())})
|
||||
},
|
||||
Encoders: cmds.EncoderMap{
|
||||
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *Object) error {
|
||||
quiet, _ := req.Options[quietOptionName].(bool)
|
||||
|
||||
o := out.Hash
|
||||
if !quiet {
|
||||
o = "added " + o
|
||||
}
|
||||
|
||||
fmt.Fprintln(w, o)
|
||||
|
||||
return nil
|
||||
}),
|
||||
},
|
||||
Type: Object{},
|
||||
}
|
||||
|
||||
// ObjectNewCmd object new command
|
||||
var ObjectNewCmd = &cmds.Command{
|
||||
Status: cmds.Deprecated, // https://github.com/ipfs/kubo/issues/7936
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "Deprecated way to create a new dag-pb object from a template.",
|
||||
ShortDescription: `
|
||||
'ipfs object new' is a plumbing command for creating new dag-pb nodes.
|
||||
DEPRECATED and provided for legacy reasons. Use 'dag put' and 'files' instead.
|
||||
`,
|
||||
LongDescription: `
|
||||
'ipfs object new' is a plumbing command for creating new dag-pb nodes.
|
||||
By default it creates and returns a new empty merkledag node, but
|
||||
you may pass an optional template argument to create a preformatted
|
||||
node.
|
||||
|
||||
Available templates:
|
||||
* unixfs-dir
|
||||
|
||||
DEPRECATED and provided for legacy reasons. Use 'dag put' and 'files' instead.
|
||||
`,
|
||||
},
|
||||
Arguments: []cmds.Argument{
|
||||
cmds.StringArg("template", false, false, "Template to use. Optional."),
|
||||
},
|
||||
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
api, err := cmdenv.GetApi(env, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
enc, err := cmdenv.GetLowLevelCidEncoder(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
template := "empty"
|
||||
if len(req.Arguments) == 1 {
|
||||
template = req.Arguments[0]
|
||||
}
|
||||
|
||||
nd, err := api.Object().New(req.Context, options.Object.Type(template))
|
||||
if err != nil && err != io.EOF {
|
||||
return err
|
||||
}
|
||||
|
||||
return cmds.EmitOnce(res, &Object{Hash: enc.Encode(nd.Cid())})
|
||||
},
|
||||
Encoders: cmds.EncoderMap{
|
||||
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *Object) error {
|
||||
fmt.Fprintln(w, out.Hash)
|
||||
return nil
|
||||
}),
|
||||
},
|
||||
Type: Object{},
|
||||
}
|
||||
|
||||
// converts the Node object into a real dag.ProtoNode
|
||||
func deserializeNode(nd *Node, dataFieldEncoding string) (*dag.ProtoNode, error) {
|
||||
dagnode := new(dag.ProtoNode)
|
||||
switch dataFieldEncoding {
|
||||
case "text":
|
||||
dagnode.SetData([]byte(nd.Data))
|
||||
case "base64":
|
||||
data, err := base64.StdEncoding.DecodeString(nd.Data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dagnode.SetData(data)
|
||||
default:
|
||||
return nil, ErrDataEncoding
|
||||
}
|
||||
|
||||
links := make([]*ipld.Link, len(nd.Links))
|
||||
for i, link := range nd.Links {
|
||||
c, err := cid.Decode(link.Hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
links[i] = &ipld.Link{
|
||||
Name: link.Name,
|
||||
Size: link.Size,
|
||||
Cid: c,
|
||||
}
|
||||
}
|
||||
if err := dagnode.SetLinks(links); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return dagnode, nil
|
||||
}
|
||||
|
||||
func encodeData(data []byte, encoding string) (string, error) {
|
||||
switch encoding {
|
||||
case "text":
|
||||
return string(data), nil
|
||||
case "base64":
|
||||
return base64.StdEncoding.EncodeToString(data), nil
|
||||
}
|
||||
|
||||
return "", ErrDataEncoding
|
||||
}
|
||||
|
@ -37,128 +37,16 @@ For modern use cases, use MFS with 'files' commands: 'ipfs files --help'.
|
||||
},
|
||||
Arguments: []cmds.Argument{},
|
||||
Subcommands: map[string]*cmds.Command{
|
||||
"append-data": patchAppendDataCmd,
|
||||
"append-data": RemovedObjectCmd,
|
||||
"add-link": patchAddLinkCmd,
|
||||
"rm-link": patchRmLinkCmd,
|
||||
"set-data": patchSetDataCmd,
|
||||
"set-data": RemovedObjectCmd,
|
||||
},
|
||||
Options: []cmds.Option{
|
||||
cmdutils.AllowBigBlockOption,
|
||||
},
|
||||
}
|
||||
|
||||
var patchAppendDataCmd = &cmds.Command{
|
||||
Status: cmds.Deprecated, // https://github.com/ipfs/kubo/issues/7936
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "Deprecated way to append data to the data segment of a DAG node.",
|
||||
ShortDescription: `
|
||||
Append data to what already exists in the data segment in the given object.
|
||||
|
||||
Example:
|
||||
|
||||
$ echo "hello" | ipfs object patch $HASH append-data
|
||||
|
||||
NOTE: This does not append data to a file - it modifies the actual raw
|
||||
data within a dag-pb object. Blocks have a max size of 1MiB and objects larger than
|
||||
the limit will not be respected by the network.
|
||||
|
||||
DEPRECATED and provided for legacy reasons. Use 'ipfs add' or 'ipfs files' instead.
|
||||
`,
|
||||
},
|
||||
Arguments: []cmds.Argument{
|
||||
cmds.StringArg("root", true, false, "The hash of the node to modify."),
|
||||
cmds.FileArg("data", true, false, "Data to append.").EnableStdin(),
|
||||
},
|
||||
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
api, err := cmdenv.GetApi(env, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
root, err := cmdutils.PathOrCidPath(req.Arguments[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
file, err := cmdenv.GetFileArg(req.Files.Entries())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p, err := api.Object().AppendData(req.Context, root, file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := cmdutils.CheckCIDSize(req, p.RootCid(), api.Dag()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return cmds.EmitOnce(res, &Object{Hash: p.RootCid().String()})
|
||||
},
|
||||
Type: &Object{},
|
||||
Encoders: cmds.EncoderMap{
|
||||
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, obj *Object) error {
|
||||
_, err := fmt.Fprintln(w, obj.Hash)
|
||||
return err
|
||||
}),
|
||||
},
|
||||
}
|
||||
|
||||
var patchSetDataCmd = &cmds.Command{
|
||||
Status: cmds.Deprecated, // https://github.com/ipfs/kubo/issues/7936
|
||||
Helptext: cmds.HelpText{
|
||||
Tagline: "Deprecated way to set the data field of dag-pb object.",
|
||||
ShortDescription: `
|
||||
Set the data of an IPFS object from stdin or with the contents of a file.
|
||||
|
||||
Example:
|
||||
|
||||
$ echo "my data" | ipfs object patch $MYHASH set-data
|
||||
|
||||
DEPRECATED and provided for legacy reasons. Use 'files cp' and 'dag put' instead.
|
||||
`,
|
||||
},
|
||||
Arguments: []cmds.Argument{
|
||||
cmds.StringArg("root", true, false, "The hash of the node to modify."),
|
||||
cmds.FileArg("data", true, false, "The data to set the object to.").EnableStdin(),
|
||||
},
|
||||
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
api, err := cmdenv.GetApi(env, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
root, err := cmdutils.PathOrCidPath(req.Arguments[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
file, err := cmdenv.GetFileArg(req.Files.Entries())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p, err := api.Object().SetData(req.Context, root, file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := cmdutils.CheckCIDSize(req, p.RootCid(), api.Dag()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return cmds.EmitOnce(res, &Object{Hash: p.RootCid().String()})
|
||||
},
|
||||
Type: Object{},
|
||||
Encoders: cmds.EncoderMap{
|
||||
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *Object) error {
|
||||
fmt.Fprintln(w, out.Hash)
|
||||
return nil
|
||||
}),
|
||||
},
|
||||
}
|
||||
|
||||
var patchRmLinkCmd = &cmds.Command{
|
||||
Status: cmds.Deprecated, // https://github.com/ipfs/kubo/issues/7936
|
||||
Helptext: cmds.HelpText{
|
||||
|
@ -1,22 +1,12 @@
|
||||
package coreapi
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
dag "github.com/ipfs/boxo/ipld/merkledag"
|
||||
"github.com/ipfs/boxo/ipld/merkledag/dagutils"
|
||||
ft "github.com/ipfs/boxo/ipld/unixfs"
|
||||
"github.com/ipfs/boxo/path"
|
||||
pin "github.com/ipfs/boxo/pinning/pinner"
|
||||
cid "github.com/ipfs/go-cid"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
coreiface "github.com/ipfs/kubo/core/coreiface"
|
||||
caopts "github.com/ipfs/kubo/core/coreiface/options"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
@ -25,8 +15,6 @@ import (
|
||||
"github.com/ipfs/kubo/tracing"
|
||||
)
|
||||
|
||||
const inputLimit = 2 << 20
|
||||
|
||||
type ObjectAPI CoreAPI
|
||||
|
||||
type Link struct {
|
||||
@ -39,180 +27,6 @@ type Node struct {
|
||||
Data string
|
||||
}
|
||||
|
||||
func (api *ObjectAPI) New(ctx context.Context, opts ...caopts.ObjectNewOption) (ipld.Node, error) {
|
||||
ctx, span := tracing.Span(ctx, "CoreAPI.ObjectAPI", "New")
|
||||
defer span.End()
|
||||
|
||||
options, err := caopts.ObjectNewOptions(opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var n ipld.Node
|
||||
switch options.Type {
|
||||
case "empty":
|
||||
n = new(dag.ProtoNode)
|
||||
case "unixfs-dir":
|
||||
n = ft.EmptyDirNode()
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown node type: %s", options.Type)
|
||||
}
|
||||
|
||||
err = api.dag.Add(ctx, n)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (api *ObjectAPI) Put(ctx context.Context, src io.Reader, opts ...caopts.ObjectPutOption) (path.ImmutablePath, error) {
|
||||
ctx, span := tracing.Span(ctx, "CoreAPI.ObjectAPI", "Put")
|
||||
defer span.End()
|
||||
|
||||
options, err := caopts.ObjectPutOptions(opts...)
|
||||
if err != nil {
|
||||
return path.ImmutablePath{}, err
|
||||
}
|
||||
span.SetAttributes(
|
||||
attribute.Bool("pin", options.Pin),
|
||||
attribute.String("datatype", options.DataType),
|
||||
attribute.String("inputenc", options.InputEnc),
|
||||
)
|
||||
|
||||
data, err := io.ReadAll(io.LimitReader(src, inputLimit+10))
|
||||
if err != nil {
|
||||
return path.ImmutablePath{}, err
|
||||
}
|
||||
|
||||
var dagnode *dag.ProtoNode
|
||||
switch options.InputEnc {
|
||||
case "json":
|
||||
node := new(Node)
|
||||
decoder := json.NewDecoder(bytes.NewReader(data))
|
||||
decoder.DisallowUnknownFields()
|
||||
err = decoder.Decode(node)
|
||||
if err != nil {
|
||||
return path.ImmutablePath{}, err
|
||||
}
|
||||
|
||||
dagnode, err = deserializeNode(node, options.DataType)
|
||||
if err != nil {
|
||||
return path.ImmutablePath{}, err
|
||||
}
|
||||
|
||||
case "protobuf":
|
||||
dagnode, err = dag.DecodeProtobuf(data)
|
||||
|
||||
case "xml":
|
||||
node := new(Node)
|
||||
err = xml.Unmarshal(data, node)
|
||||
if err != nil {
|
||||
return path.ImmutablePath{}, err
|
||||
}
|
||||
|
||||
dagnode, err = deserializeNode(node, options.DataType)
|
||||
if err != nil {
|
||||
return path.ImmutablePath{}, err
|
||||
}
|
||||
|
||||
default:
|
||||
return path.ImmutablePath{}, errors.New("unknown object encoding")
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return path.ImmutablePath{}, err
|
||||
}
|
||||
|
||||
if options.Pin {
|
||||
defer api.blockstore.PinLock(ctx).Unlock(ctx)
|
||||
}
|
||||
|
||||
err = api.dag.Add(ctx, dagnode)
|
||||
if err != nil {
|
||||
return path.ImmutablePath{}, err
|
||||
}
|
||||
|
||||
if options.Pin {
|
||||
if err := api.pinning.PinWithMode(ctx, dagnode.Cid(), pin.Recursive, ""); err != nil {
|
||||
return path.ImmutablePath{}, err
|
||||
}
|
||||
|
||||
err = api.pinning.Flush(ctx)
|
||||
if err != nil {
|
||||
return path.ImmutablePath{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return path.FromCid(dagnode.Cid()), nil
|
||||
}
|
||||
|
||||
func (api *ObjectAPI) Get(ctx context.Context, path path.Path) (ipld.Node, error) {
|
||||
ctx, span := tracing.Span(ctx, "CoreAPI.ObjectAPI", "Get", trace.WithAttributes(attribute.String("path", path.String())))
|
||||
defer span.End()
|
||||
return api.core().ResolveNode(ctx, path)
|
||||
}
|
||||
|
||||
func (api *ObjectAPI) Data(ctx context.Context, path path.Path) (io.Reader, error) {
|
||||
ctx, span := tracing.Span(ctx, "CoreAPI.ObjectAPI", "Data", trace.WithAttributes(attribute.String("path", path.String())))
|
||||
defer span.End()
|
||||
|
||||
nd, err := api.core().ResolveNode(ctx, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pbnd, ok := nd.(*dag.ProtoNode)
|
||||
if !ok {
|
||||
return nil, dag.ErrNotProtobuf
|
||||
}
|
||||
|
||||
return bytes.NewReader(pbnd.Data()), nil
|
||||
}
|
||||
|
||||
func (api *ObjectAPI) Links(ctx context.Context, path path.Path) ([]*ipld.Link, error) {
|
||||
ctx, span := tracing.Span(ctx, "CoreAPI.ObjectAPI", "Links", trace.WithAttributes(attribute.String("path", path.String())))
|
||||
defer span.End()
|
||||
|
||||
nd, err := api.core().ResolveNode(ctx, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
links := nd.Links()
|
||||
out := make([]*ipld.Link, len(links))
|
||||
for n, l := range links {
|
||||
out[n] = (*ipld.Link)(l)
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (api *ObjectAPI) Stat(ctx context.Context, path path.Path) (*coreiface.ObjectStat, error) {
|
||||
ctx, span := tracing.Span(ctx, "CoreAPI.ObjectAPI", "Stat", trace.WithAttributes(attribute.String("path", path.String())))
|
||||
defer span.End()
|
||||
|
||||
nd, err := api.core().ResolveNode(ctx, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stat, err := nd.Stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
out := &coreiface.ObjectStat{
|
||||
Cid: nd.Cid(),
|
||||
NumLinks: stat.NumLinks,
|
||||
BlockSize: stat.BlockSize,
|
||||
LinksSize: stat.LinksSize,
|
||||
DataSize: stat.DataSize,
|
||||
CumulativeSize: stat.CumulativeSize,
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (api *ObjectAPI) AddLink(ctx context.Context, base path.Path, name string, child path.Path, opts ...caopts.ObjectAddLinkOption) (path.ImmutablePath, error) {
|
||||
ctx, span := tracing.Span(ctx, "CoreAPI.ObjectAPI", "AddLink", trace.WithAttributes(
|
||||
attribute.String("base", base.String()),
|
||||
@ -294,49 +108,6 @@ func (api *ObjectAPI) RmLink(ctx context.Context, base path.Path, link string) (
|
||||
return path.FromCid(nnode.Cid()), nil
|
||||
}
|
||||
|
||||
func (api *ObjectAPI) AppendData(ctx context.Context, path path.Path, r io.Reader) (path.ImmutablePath, error) {
|
||||
ctx, span := tracing.Span(ctx, "CoreAPI.ObjectAPI", "AppendData", trace.WithAttributes(attribute.String("path", path.String())))
|
||||
defer span.End()
|
||||
|
||||
return api.patchData(ctx, path, r, true)
|
||||
}
|
||||
|
||||
func (api *ObjectAPI) SetData(ctx context.Context, path path.Path, r io.Reader) (path.ImmutablePath, error) {
|
||||
ctx, span := tracing.Span(ctx, "CoreAPI.ObjectAPI", "SetData", trace.WithAttributes(attribute.String("path", path.String())))
|
||||
defer span.End()
|
||||
|
||||
return api.patchData(ctx, path, r, false)
|
||||
}
|
||||
|
||||
func (api *ObjectAPI) patchData(ctx context.Context, p path.Path, r io.Reader, appendData bool) (path.ImmutablePath, error) {
|
||||
nd, err := api.core().ResolveNode(ctx, p)
|
||||
if err != nil {
|
||||
return path.ImmutablePath{}, err
|
||||
}
|
||||
|
||||
pbnd, ok := nd.(*dag.ProtoNode)
|
||||
if !ok {
|
||||
return path.ImmutablePath{}, dag.ErrNotProtobuf
|
||||
}
|
||||
|
||||
data, err := io.ReadAll(r)
|
||||
if err != nil {
|
||||
return path.ImmutablePath{}, err
|
||||
}
|
||||
|
||||
if appendData {
|
||||
data = append(pbnd.Data(), data...)
|
||||
}
|
||||
pbnd.SetData(data)
|
||||
|
||||
err = api.dag.Add(ctx, pbnd)
|
||||
if err != nil {
|
||||
return path.ImmutablePath{}, err
|
||||
}
|
||||
|
||||
return path.FromCid(pbnd.Cid()), nil
|
||||
}
|
||||
|
||||
func (api *ObjectAPI) Diff(ctx context.Context, before path.Path, after path.Path) ([]coreiface.ObjectChange, error) {
|
||||
ctx, span := tracing.Span(ctx, "CoreAPI.ObjectAPI", "Diff", trace.WithAttributes(
|
||||
attribute.String("before", before.String()),
|
||||
@ -381,37 +152,3 @@ func (api *ObjectAPI) Diff(ctx context.Context, before path.Path, after path.Pat
|
||||
func (api *ObjectAPI) core() coreiface.CoreAPI {
|
||||
return (*CoreAPI)(api)
|
||||
}
|
||||
|
||||
func deserializeNode(nd *Node, dataFieldEncoding string) (*dag.ProtoNode, error) {
|
||||
dagnode := new(dag.ProtoNode)
|
||||
switch dataFieldEncoding {
|
||||
case "text":
|
||||
dagnode.SetData([]byte(nd.Data))
|
||||
case "base64":
|
||||
data, err := base64.StdEncoding.DecodeString(nd.Data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dagnode.SetData(data)
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown data field encoding")
|
||||
}
|
||||
|
||||
links := make([]*ipld.Link, len(nd.Links))
|
||||
for i, link := range nd.Links {
|
||||
c, err := cid.Decode(link.Hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
links[i] = &ipld.Link{
|
||||
Name: link.Name,
|
||||
Size: link.Size,
|
||||
Cid: c,
|
||||
}
|
||||
}
|
||||
if err := dagnode.SetLinks(links); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return dagnode, nil
|
||||
}
|
||||
|
@ -2,36 +2,11 @@ package iface
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/ipfs/boxo/path"
|
||||
"github.com/ipfs/kubo/core/coreiface/options"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
)
|
||||
|
||||
// ObjectStat provides information about dag nodes
|
||||
type ObjectStat struct {
|
||||
// Cid is the CID of the node
|
||||
Cid cid.Cid
|
||||
|
||||
// NumLinks is number of links the node contains
|
||||
NumLinks int
|
||||
|
||||
// BlockSize is size of the raw serialized node
|
||||
BlockSize int
|
||||
|
||||
// LinksSize is size of the links block section
|
||||
LinksSize int
|
||||
|
||||
// DataSize is the size of data block section
|
||||
DataSize int
|
||||
|
||||
// CumulativeSize is size of the tree (BlockSize + link sizes)
|
||||
CumulativeSize int
|
||||
}
|
||||
|
||||
// ChangeType denotes type of change in ObjectChange
|
||||
type ChangeType int
|
||||
|
||||
@ -69,24 +44,6 @@ type ObjectChange struct {
|
||||
// ObjectAPI specifies the interface to MerkleDAG and contains useful utilities
|
||||
// for manipulating MerkleDAG data structures.
|
||||
type ObjectAPI interface {
|
||||
// New creates new, empty (by default) dag-node.
|
||||
New(context.Context, ...options.ObjectNewOption) (ipld.Node, error)
|
||||
|
||||
// Put imports the data into merkledag
|
||||
Put(context.Context, io.Reader, ...options.ObjectPutOption) (path.ImmutablePath, error)
|
||||
|
||||
// Get returns the node for the path
|
||||
Get(context.Context, path.Path) (ipld.Node, error)
|
||||
|
||||
// Data returns reader for data of the node
|
||||
Data(context.Context, path.Path) (io.Reader, error)
|
||||
|
||||
// Links returns lint or links the node contains
|
||||
Links(context.Context, path.Path) ([]*ipld.Link, error)
|
||||
|
||||
// Stat returns information about the node
|
||||
Stat(context.Context, path.Path) (*ObjectStat, error)
|
||||
|
||||
// AddLink adds a link under the specified path. child path can point to a
|
||||
// subdirectory within the patent which must be present (can be overridden
|
||||
// with WithCreate option).
|
||||
@ -95,12 +52,6 @@ type ObjectAPI interface {
|
||||
// RmLink removes a link from the node
|
||||
RmLink(ctx context.Context, base path.Path, link string) (path.ImmutablePath, error)
|
||||
|
||||
// AppendData appends data to the node
|
||||
AppendData(context.Context, path.Path, io.Reader) (path.ImmutablePath, error)
|
||||
|
||||
// SetData sets the data contained in the node
|
||||
SetData(context.Context, path.Path, io.Reader) (path.ImmutablePath, error)
|
||||
|
||||
// Diff returns a set of changes needed to transform the first object into the
|
||||
// second.
|
||||
Diff(context.Context, path.Path, path.Path) ([]ObjectChange, error)
|
||||
|
@ -1,55 +1,13 @@
|
||||
package options
|
||||
|
||||
type ObjectNewSettings struct {
|
||||
Type string
|
||||
}
|
||||
|
||||
type ObjectPutSettings struct {
|
||||
InputEnc string
|
||||
DataType string
|
||||
Pin bool
|
||||
}
|
||||
|
||||
type ObjectAddLinkSettings struct {
|
||||
Create bool
|
||||
}
|
||||
|
||||
type (
|
||||
ObjectNewOption func(*ObjectNewSettings) error
|
||||
ObjectPutOption func(*ObjectPutSettings) error
|
||||
ObjectAddLinkOption func(*ObjectAddLinkSettings) error
|
||||
)
|
||||
|
||||
func ObjectNewOptions(opts ...ObjectNewOption) (*ObjectNewSettings, error) {
|
||||
options := &ObjectNewSettings{
|
||||
Type: "empty",
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
err := opt(options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return options, nil
|
||||
}
|
||||
|
||||
func ObjectPutOptions(opts ...ObjectPutOption) (*ObjectPutSettings, error) {
|
||||
options := &ObjectPutSettings{
|
||||
InputEnc: "json",
|
||||
DataType: "text",
|
||||
Pin: false,
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
err := opt(options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return options, nil
|
||||
}
|
||||
|
||||
func ObjectAddLinkOptions(opts ...ObjectAddLinkOption) (*ObjectAddLinkSettings, error) {
|
||||
options := &ObjectAddLinkSettings{
|
||||
Create: false,
|
||||
@ -68,54 +26,6 @@ type objectOpts struct{}
|
||||
|
||||
var Object objectOpts
|
||||
|
||||
// Type is an option for Object.New which allows to change the type of created
|
||||
// dag node.
|
||||
//
|
||||
// Supported types:
|
||||
// * 'empty' - Empty node
|
||||
// * 'unixfs-dir' - Empty UnixFS directory
|
||||
func (objectOpts) Type(t string) ObjectNewOption {
|
||||
return func(settings *ObjectNewSettings) error {
|
||||
settings.Type = t
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// InputEnc is an option for Object.Put which specifies the input encoding of the
|
||||
// data. Default is "json".
|
||||
//
|
||||
// Supported encodings:
|
||||
// * "protobuf"
|
||||
// * "json"
|
||||
func (objectOpts) InputEnc(e string) ObjectPutOption {
|
||||
return func(settings *ObjectPutSettings) error {
|
||||
settings.InputEnc = e
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// DataType is an option for Object.Put which specifies the encoding of data
|
||||
// field when using Json or XML input encoding.
|
||||
//
|
||||
// Supported types:
|
||||
// * "text" (default)
|
||||
// * "base64"
|
||||
func (objectOpts) DataType(t string) ObjectPutOption {
|
||||
return func(settings *ObjectPutSettings) error {
|
||||
settings.DataType = t
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Pin is an option for Object.Put which specifies whether to pin the added
|
||||
// objects, default is false
|
||||
func (objectOpts) Pin(pin bool) ObjectPutOption {
|
||||
return func(settings *ObjectPutSettings) error {
|
||||
settings.Pin = pin
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Create is an option for Object.AddLink which specifies whether create required
|
||||
// directories for the child
|
||||
func (objectOpts) Create(create bool) ObjectAddLinkOption {
|
||||
|
@ -1,15 +1,15 @@
|
||||
package tests
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"io"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
dag "github.com/ipfs/boxo/ipld/merkledag"
|
||||
"github.com/ipfs/boxo/path"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
iface "github.com/ipfs/kubo/core/coreiface"
|
||||
opt "github.com/ipfs/kubo/core/coreiface/options"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func (tp *TestSuite) TestObject(t *testing.T) {
|
||||
@ -20,448 +20,125 @@ func (tp *TestSuite) TestObject(t *testing.T) {
|
||||
return nil
|
||||
})
|
||||
|
||||
t.Run("TestNew", tp.TestNew)
|
||||
t.Run("TestObjectPut", tp.TestObjectPut)
|
||||
t.Run("TestObjectGet", tp.TestObjectGet)
|
||||
t.Run("TestObjectData", tp.TestObjectData)
|
||||
t.Run("TestObjectLinks", tp.TestObjectLinks)
|
||||
t.Run("TestObjectStat", tp.TestObjectStat)
|
||||
t.Run("TestObjectAddLink", tp.TestObjectAddLink)
|
||||
t.Run("TestObjectAddLinkCreate", tp.TestObjectAddLinkCreate)
|
||||
t.Run("TestObjectRmLink", tp.TestObjectRmLink)
|
||||
t.Run("TestObjectAddData", tp.TestObjectAddData)
|
||||
t.Run("TestObjectSetData", tp.TestObjectSetData)
|
||||
t.Run("TestDiffTest", tp.TestDiffTest)
|
||||
}
|
||||
|
||||
func (tp *TestSuite) TestNew(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
api, err := tp.makeAPI(t, ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
func putDagPbNode(t *testing.T, ctx context.Context, api iface.CoreAPI, data string, links []*ipld.Link) path.ImmutablePath {
|
||||
dagnode := new(dag.ProtoNode)
|
||||
|
||||
if data != "" {
|
||||
dagnode.SetData([]byte(data))
|
||||
}
|
||||
|
||||
emptyNode, err := api.Object().New(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
if links != nil {
|
||||
err := dagnode.SetLinks(links)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
dirNode, err := api.Object().New(ctx, opt.Object.Type("unixfs-dir"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err := api.Dag().Add(ctx, dagnode)
|
||||
require.NoError(t, err)
|
||||
|
||||
if emptyNode.String() != "QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n" {
|
||||
t.Errorf("Unexpected emptyNode path: %s", emptyNode.String())
|
||||
}
|
||||
|
||||
if dirNode.String() != "QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn" {
|
||||
t.Errorf("Unexpected dirNode path: %s", dirNode.String())
|
||||
}
|
||||
}
|
||||
|
||||
func (tp *TestSuite) TestObjectPut(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
api, err := tp.makeAPI(t, ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
p1, err := api.Object().Put(ctx, strings.NewReader(`{"Data":"foo"}`))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
p2, err := api.Object().Put(ctx, strings.NewReader(`{"Data":"YmFy"}`), opt.Object.DataType("base64")) // bar
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
pbBytes, err := hex.DecodeString("0a0362617a")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
p3, err := api.Object().Put(ctx, bytes.NewReader(pbBytes), opt.Object.InputEnc("protobuf"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if p1.String() != "/ipfs/QmQeGyS87nyijii7kFt1zbe4n2PsXTFimzsdxyE9qh9TST" {
|
||||
t.Errorf("unexpected path: %s", p1.String())
|
||||
}
|
||||
|
||||
if p2.String() != "/ipfs/QmNeYRbCibmaMMK6Du6ChfServcLqFvLJF76PzzF76SPrZ" {
|
||||
t.Errorf("unexpected path: %s", p2.String())
|
||||
}
|
||||
|
||||
if p3.String() != "/ipfs/QmZreR7M2t7bFXAdb1V5FtQhjk4t36GnrvueLJowJbQM9m" {
|
||||
t.Errorf("unexpected path: %s", p3.String())
|
||||
}
|
||||
}
|
||||
|
||||
func (tp *TestSuite) TestObjectGet(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
api, err := tp.makeAPI(t, ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
p1, err := api.Object().Put(ctx, strings.NewReader(`{"Data":"foo"}`))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
nd, err := api.Object().Get(ctx, p1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if string(nd.RawData()[len(nd.RawData())-3:]) != "foo" {
|
||||
t.Fatal("got non-matching data")
|
||||
}
|
||||
}
|
||||
|
||||
func (tp *TestSuite) TestObjectData(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
api, err := tp.makeAPI(t, ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
p1, err := api.Object().Put(ctx, strings.NewReader(`{"Data":"foo"}`))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
r, err := api.Object().Data(ctx, p1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
data, err := io.ReadAll(r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if string(data) != "foo" {
|
||||
t.Fatal("got non-matching data")
|
||||
}
|
||||
}
|
||||
|
||||
func (tp *TestSuite) TestObjectLinks(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
api, err := tp.makeAPI(t, ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
p1, err := api.Object().Put(ctx, strings.NewReader(`{"Data":"foo"}`))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
p2, err := api.Object().Put(ctx, strings.NewReader(`{"Links":[{"Name":"bar", "Hash":"`+p1.RootCid().String()+`"}]}`))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
links, err := api.Object().Links(ctx, p2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(links) != 1 {
|
||||
t.Errorf("unexpected number of links: %d", len(links))
|
||||
}
|
||||
|
||||
if links[0].Cid.String() != p1.RootCid().String() {
|
||||
t.Fatal("cids didn't batch")
|
||||
}
|
||||
|
||||
if links[0].Name != "bar" {
|
||||
t.Fatal("unexpected link name")
|
||||
}
|
||||
}
|
||||
|
||||
func (tp *TestSuite) TestObjectStat(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
api, err := tp.makeAPI(t, ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
p1, err := api.Object().Put(ctx, strings.NewReader(`{"Data":"foo"}`))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
p2, err := api.Object().Put(ctx, strings.NewReader(`{"Data":"bazz", "Links":[{"Name":"bar", "Hash":"`+p1.RootCid().String()+`", "Size":3}]}`))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
stat, err := api.Object().Stat(ctx, p2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if stat.Cid.String() != p2.RootCid().String() {
|
||||
t.Error("unexpected stat.Cid")
|
||||
}
|
||||
|
||||
if stat.NumLinks != 1 {
|
||||
t.Errorf("unexpected stat.NumLinks")
|
||||
}
|
||||
|
||||
if stat.BlockSize != 51 {
|
||||
t.Error("unexpected stat.BlockSize")
|
||||
}
|
||||
|
||||
if stat.LinksSize != 47 {
|
||||
t.Errorf("unexpected stat.LinksSize: %d", stat.LinksSize)
|
||||
}
|
||||
|
||||
if stat.DataSize != 4 {
|
||||
t.Error("unexpected stat.DataSize")
|
||||
}
|
||||
|
||||
if stat.CumulativeSize != 54 {
|
||||
t.Error("unexpected stat.DataSize")
|
||||
}
|
||||
return path.FromCid(dagnode.Cid())
|
||||
}
|
||||
|
||||
func (tp *TestSuite) TestObjectAddLink(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
api, err := tp.makeAPI(t, ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
p1, err := api.Object().Put(ctx, strings.NewReader(`{"Data":"foo"}`))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
p2, err := api.Object().Put(ctx, strings.NewReader(`{"Data":"bazz", "Links":[{"Name":"bar", "Hash":"`+p1.RootCid().String()+`", "Size":3}]}`))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
p1 := putDagPbNode(t, ctx, api, "foo", nil)
|
||||
p2 := putDagPbNode(t, ctx, api, "bazz", []*ipld.Link{
|
||||
{
|
||||
Name: "bar",
|
||||
Cid: p1.RootCid(),
|
||||
Size: 3,
|
||||
},
|
||||
})
|
||||
|
||||
p3, err := api.Object().AddLink(ctx, p2, "abc", p2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
links, err := api.Object().Links(ctx, p3)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
nd, err := api.Dag().Get(ctx, p3.RootCid())
|
||||
require.NoError(t, err)
|
||||
|
||||
if len(links) != 2 {
|
||||
t.Errorf("unexpected number of links: %d", len(links))
|
||||
}
|
||||
|
||||
if links[0].Name != "abc" {
|
||||
t.Errorf("unexpected link 0 name: %s", links[0].Name)
|
||||
}
|
||||
|
||||
if links[1].Name != "bar" {
|
||||
t.Errorf("unexpected link 1 name: %s", links[1].Name)
|
||||
}
|
||||
links := nd.Links()
|
||||
require.Len(t, links, 2)
|
||||
require.Equal(t, "abc", links[0].Name)
|
||||
require.Equal(t, "bar", links[1].Name)
|
||||
}
|
||||
|
||||
func (tp *TestSuite) TestObjectAddLinkCreate(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
api, err := tp.makeAPI(t, ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
p1, err := api.Object().Put(ctx, strings.NewReader(`{"Data":"foo"}`))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
p2, err := api.Object().Put(ctx, strings.NewReader(`{"Data":"bazz", "Links":[{"Name":"bar", "Hash":"`+p1.RootCid().String()+`", "Size":3}]}`))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
p1 := putDagPbNode(t, ctx, api, "foo", nil)
|
||||
p2 := putDagPbNode(t, ctx, api, "bazz", []*ipld.Link{
|
||||
{
|
||||
Name: "bar",
|
||||
Cid: p1.RootCid(),
|
||||
Size: 3,
|
||||
},
|
||||
})
|
||||
|
||||
_, err = api.Object().AddLink(ctx, p2, "abc/d", p2)
|
||||
if err == nil {
|
||||
t.Fatal("expected an error")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "no link by that name") {
|
||||
t.Fatalf("unexpected error: %s", err.Error())
|
||||
}
|
||||
require.ErrorContains(t, err, "no link by that name")
|
||||
|
||||
p3, err := api.Object().AddLink(ctx, p2, "abc/d", p2, opt.Object.Create(true))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
links, err := api.Object().Links(ctx, p3)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
nd, err := api.Dag().Get(ctx, p3.RootCid())
|
||||
require.NoError(t, err)
|
||||
|
||||
if len(links) != 2 {
|
||||
t.Errorf("unexpected number of links: %d", len(links))
|
||||
}
|
||||
|
||||
if links[0].Name != "abc" {
|
||||
t.Errorf("unexpected link 0 name: %s", links[0].Name)
|
||||
}
|
||||
|
||||
if links[1].Name != "bar" {
|
||||
t.Errorf("unexpected link 1 name: %s", links[1].Name)
|
||||
}
|
||||
links := nd.Links()
|
||||
require.Len(t, links, 2)
|
||||
require.Equal(t, "abc", links[0].Name)
|
||||
require.Equal(t, "bar", links[1].Name)
|
||||
}
|
||||
|
||||
func (tp *TestSuite) TestObjectRmLink(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
api, err := tp.makeAPI(t, ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
p1, err := api.Object().Put(ctx, strings.NewReader(`{"Data":"foo"}`))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
p2, err := api.Object().Put(ctx, strings.NewReader(`{"Data":"bazz", "Links":[{"Name":"bar", "Hash":"`+p1.RootCid().String()+`", "Size":3}]}`))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
p1 := putDagPbNode(t, ctx, api, "foo", nil)
|
||||
p2 := putDagPbNode(t, ctx, api, "bazz", []*ipld.Link{
|
||||
{
|
||||
Name: "bar",
|
||||
Cid: p1.RootCid(),
|
||||
Size: 3,
|
||||
},
|
||||
})
|
||||
|
||||
p3, err := api.Object().RmLink(ctx, p2, "bar")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
links, err := api.Object().Links(ctx, p3)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
nd, err := api.Dag().Get(ctx, p3.RootCid())
|
||||
require.NoError(t, err)
|
||||
|
||||
if len(links) != 0 {
|
||||
t.Errorf("unexpected number of links: %d", len(links))
|
||||
}
|
||||
}
|
||||
|
||||
func (tp *TestSuite) TestObjectAddData(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
api, err := tp.makeAPI(t, ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
p1, err := api.Object().Put(ctx, strings.NewReader(`{"Data":"foo"}`))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
p2, err := api.Object().AppendData(ctx, p1, strings.NewReader("bar"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
r, err := api.Object().Data(ctx, p2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
data, err := io.ReadAll(r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if string(data) != "foobar" {
|
||||
t.Error("unexpected data")
|
||||
}
|
||||
}
|
||||
|
||||
func (tp *TestSuite) TestObjectSetData(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
api, err := tp.makeAPI(t, ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
p1, err := api.Object().Put(ctx, strings.NewReader(`{"Data":"foo"}`))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
p2, err := api.Object().SetData(ctx, p1, strings.NewReader("bar"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
r, err := api.Object().Data(ctx, p2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
data, err := io.ReadAll(r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if string(data) != "bar" {
|
||||
t.Error("unexpected data")
|
||||
}
|
||||
links := nd.Links()
|
||||
require.Len(t, links, 0)
|
||||
}
|
||||
|
||||
func (tp *TestSuite) TestDiffTest(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
api, err := tp.makeAPI(t, ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
p1, err := api.Object().Put(ctx, strings.NewReader(`{"Data":"foo"}`))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
p2, err := api.Object().Put(ctx, strings.NewReader(`{"Data":"bar"}`))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
p1 := putDagPbNode(t, ctx, api, "foo", nil)
|
||||
p2 := putDagPbNode(t, ctx, api, "bar", nil)
|
||||
|
||||
changes, err := api.Object().Diff(ctx, p1, p2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(changes) != 1 {
|
||||
t.Fatal("unexpected changes len")
|
||||
}
|
||||
|
||||
if changes[0].Type != iface.DiffMod {
|
||||
t.Fatal("unexpected change type")
|
||||
}
|
||||
|
||||
if changes[0].Before.String() != p1.String() {
|
||||
t.Fatal("unexpected before path")
|
||||
}
|
||||
|
||||
if changes[0].After.String() != p2.String() {
|
||||
t.Fatal("unexpected before path")
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.Len(t, changes, 1)
|
||||
require.Equal(t, iface.DiffMod, changes[0].Type)
|
||||
require.Equal(t, p1.String(), changes[0].Before.String())
|
||||
require.Equal(t, p2.String(), changes[0].After.String())
|
||||
}
|
||||
|
@ -630,16 +630,11 @@ func (tp *TestSuite) TestGetDir(t *testing.T) {
|
||||
}
|
||||
p := path.FromCid(edir.Cid())
|
||||
|
||||
emptyDir, err := api.Object().New(ctx, options.Object.Type("unixfs-dir"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
if p.String() != path.FromCid(edir.Cid()).String() {
|
||||
t.Fatalf("expected path %s, got: %s", edir.Cid(), p.String())
|
||||
}
|
||||
|
||||
if p.String() != path.FromCid(emptyDir.Cid()).String() {
|
||||
t.Fatalf("expected path %s, got: %s", emptyDir.Cid(), p.String())
|
||||
}
|
||||
|
||||
r, err := api.Unixfs().Get(ctx, path.FromCid(emptyDir.Cid()))
|
||||
r, err := api.Unixfs().Get(ctx, path.FromCid(edir.Cid()))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -779,17 +774,12 @@ func (tp *TestSuite) TestLsEmptyDir(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = api.Unixfs().Add(ctx, files.NewSliceDirectory([]files.DirEntry{}))
|
||||
p, err := api.Unixfs().Add(ctx, files.NewSliceDirectory([]files.DirEntry{}))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
emptyDir, err := api.Object().New(ctx, options.Object.Type("unixfs-dir"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
links, err := api.Unixfs().Ls(ctx, path.FromCid(emptyDir.Cid()))
|
||||
links, err := api.Unixfs().Ls(ctx, p)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -8,6 +8,7 @@
|
||||
- [🔦 Highlights](#-highlights)
|
||||
- [RPC client: removed deprecated DHT API](#rpc-client-removed-deprecated-dht-api)
|
||||
- [Gateway: `/api/v0` is removed](#gateway-apiv0-is-removed)
|
||||
- [Removed deprecated Object API commands](#removed-deprecated-object-api-commands)
|
||||
- [📝 Changelog](#-changelog)
|
||||
- [👨👩👧👦 Contributors](#-contributors)
|
||||
|
||||
@ -23,6 +24,10 @@ The legacy subset of the Kubo RPC that was available via the Gateway port and wa
|
||||
|
||||
If you have a legacy software that relies on this behavior, and want to expose parts of `/api/v0` next to `/ipfs`, use reverse-proxy in front of Kubo to mount both Gateway and RPC on the same port. NOTE: exposing RPC to the internet comes with security risk: make sure to specify access control via [API.Authorizations](https://github.com/ipfs/kubo/blob/master/docs/config.md#apiauthorizations).
|
||||
|
||||
#### Removed deprecated Object API commands
|
||||
|
||||
The Object API commands deprecated back in [2021](https://github.com/ipfs/kubo/issues/7936) have been removed, except for `object diff`, `object patch add-link` and `object patch rm-link`, whose alternatives have not yet been built (see issues [4801](https://github.com/ipfs/kubo/issues/4801) and [4782](https://github.com/ipfs/kubo/issues/4782)).
|
||||
|
||||
### 📝 Changelog
|
||||
|
||||
### 👨👩👧👦 Contributors
|
||||
|
@ -39,10 +39,9 @@ function calls. For example:
|
||||
#### CLI API Transport
|
||||
|
||||
In the commandline, IPFS uses a traditional flag and arg-based mapping, where:
|
||||
- the first arguments selects the command, as in git - e.g. `ipfs object get`
|
||||
- the first arguments selects the command, as in git - e.g. `ipfs dag get`
|
||||
- the flags specify options - e.g. `--enc=protobuf -q`
|
||||
- the rest are positional arguments - e.g.
|
||||
`ipfs object patch <hash1> add-linkfoo <hash2>`
|
||||
- the rest are positional arguments - e.g. `ipfs key rename <name> <newName>`
|
||||
- files are specified by filename, or through stdin
|
||||
|
||||
(NOTE: When kubo runs the daemon, the CLI API is actually converted to HTTP
|
||||
|
@ -147,7 +147,6 @@ func TestCommandDocsWidth(t *testing.T) {
|
||||
"ipfs swarm addrs listen": true,
|
||||
"ipfs dag resolve": true,
|
||||
"ipfs dag get": true,
|
||||
"ipfs object stat": true,
|
||||
"ipfs pin remote add": true,
|
||||
"ipfs config show": true,
|
||||
"ipfs config edit": true,
|
||||
@ -164,8 +163,6 @@ func TestCommandDocsWidth(t *testing.T) {
|
||||
"ipfs object diff": true,
|
||||
"ipfs object patch add-link": true,
|
||||
"ipfs name": true,
|
||||
"ipfs object patch append-data": true,
|
||||
"ipfs object patch set-data": true,
|
||||
"ipfs diag profile": true,
|
||||
"ipfs diag cmds": true,
|
||||
"ipfs swarm addrs local": true,
|
||||
|
@ -42,12 +42,12 @@ test_expect_success "'ipfs block put' output looks good" '
|
||||
'
|
||||
|
||||
test_expect_success "can set cid codec on block put" '
|
||||
CODEC_HASH=$(ipfs block put --cid-codec=dag-pb ../t0051-object-data/testPut.pb)
|
||||
CODEC_HASH=$(ipfs block put --cid-codec=dag-pb ../t0050-block-data/testPut.pb)
|
||||
'
|
||||
|
||||
test_expect_success "block get output looks right" '
|
||||
ipfs block get $CODEC_HASH > pb_block_out &&
|
||||
test_cmp pb_block_out ../t0051-object-data/testPut.pb
|
||||
test_cmp pb_block_out ../t0050-block-data/testPut.pb
|
||||
'
|
||||
|
||||
#
|
||||
@ -210,33 +210,33 @@ test_expect_success "multi-block 'ipfs block rm -q' produces no output" '
|
||||
# --format used 'protobuf' for 'dag-pb' which was invalid, but we keep
|
||||
# for backward-compatibility
|
||||
test_expect_success "can set deprecated --format=protobuf on block put" '
|
||||
HASH=$(ipfs block put --format=protobuf ../t0051-object-data/testPut.pb)
|
||||
HASH=$(ipfs block put --format=protobuf ../t0050-block-data/testPut.pb)
|
||||
'
|
||||
|
||||
test_expect_success "created an object correctly!" '
|
||||
ipfs object get $HASH > obj_out &&
|
||||
echo "{\"Links\":[],\"Data\":\"test json for sharness test\"}" > obj_exp &&
|
||||
ipfs dag get $HASH > obj_out &&
|
||||
echo -n "{\"Data\":{\"/\":{\"bytes\":\"dGVzdCBqc29uIGZvciBzaGFybmVzcyB0ZXN0\"}},\"Links\":[]}" > obj_exp &&
|
||||
test_cmp obj_out obj_exp
|
||||
'
|
||||
|
||||
test_expect_success "block get output looks right" '
|
||||
ipfs block get $HASH > pb_block_out &&
|
||||
test_cmp pb_block_out ../t0051-object-data/testPut.pb
|
||||
test_cmp pb_block_out ../t0050-block-data/testPut.pb
|
||||
'
|
||||
|
||||
test_expect_success "can set --cid-codec=dag-pb on block put" '
|
||||
HASH=$(ipfs block put --cid-codec=dag-pb ../t0051-object-data/testPut.pb)
|
||||
HASH=$(ipfs block put --cid-codec=dag-pb ../t0050-block-data/testPut.pb)
|
||||
'
|
||||
|
||||
test_expect_success "created an object correctly!" '
|
||||
ipfs object get $HASH > obj_out &&
|
||||
echo "{\"Links\":[],\"Data\":\"test json for sharness test\"}" > obj_exp &&
|
||||
ipfs dag get $HASH > obj_out &&
|
||||
echo -n "{\"Data\":{\"/\":{\"bytes\":\"dGVzdCBqc29uIGZvciBzaGFybmVzcyB0ZXN0\"}},\"Links\":[]}" > obj_exp &&
|
||||
test_cmp obj_out obj_exp
|
||||
'
|
||||
|
||||
test_expect_success "block get output looks right" '
|
||||
ipfs block get $HASH > pb_block_out &&
|
||||
test_cmp pb_block_out ../t0051-object-data/testPut.pb
|
||||
test_cmp pb_block_out ../t0050-block-data/testPut.pb
|
||||
'
|
||||
|
||||
test_expect_success "can set multihash type and length on block put with --format=raw (deprecated)" '
|
||||
@ -248,7 +248,7 @@ test_expect_success "output looks good" '
|
||||
'
|
||||
|
||||
test_expect_success "can't use both legacy format and custom cid-codec at the same time" '
|
||||
test_expect_code 1 ipfs block put --format=dag-cbor --cid-codec=dag-json < ../t0051-object-data/testPut.pb 2> output &&
|
||||
test_expect_code 1 ipfs block put --format=dag-cbor --cid-codec=dag-json < ../t0050-block-data/testPut.pb 2> output &&
|
||||
test_should_contain "unable to use \"format\" (deprecated) and a custom \"cid-codec\" at the same time" output
|
||||
'
|
||||
|
||||
|
Binary file not shown.
@ -1,5 +0,0 @@
|
||||
{
|
||||
"this": "should",
|
||||
"return": "an",
|
||||
"error":"not valid dag object"
|
||||
}
|
@ -1 +0,0 @@
|
||||
<Noodles><Spaghetti>This is not a valid dag object fail</Spaghetti></Noodles>
|
@ -1 +0,0 @@
|
||||
{"Links":[],"Data":"\b\u0002\u0012\nHello Mars\u0018\n"}
|
@ -1,5 +0,0 @@
|
||||
{"Data": "another",
|
||||
"Links": [
|
||||
{"Name": "some link", "Hash": "QmXg9Pp2ytZ14xgmQjYEiHjVjMFXzCVVEcRTWJBmLgR39V", "Size": 8},
|
||||
{"Name": "inlined", "Hash": "z4CrgyEyhm4tAw1pgzQtNNuP7", "Size": 14}
|
||||
]}
|
@ -1,3 +0,0 @@
|
||||
{
|
||||
"Data": "test json for sharness test"
|
||||
}
|
@ -1 +0,0 @@
|
||||
<Node><Data>Test xml for sharness test</Data></Node>
|
@ -27,204 +27,21 @@ test_patch_create_path() {
|
||||
}
|
||||
|
||||
test_object_cmd() {
|
||||
|
||||
test_expect_success "'ipfs add testData' succeeds" '
|
||||
printf "Hello Mars" >expected_in &&
|
||||
ipfs add expected_in >actual_Addout
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs add testData' output looks good" '
|
||||
HASH="QmWkHFpYBZ9mpPRreRbMhhYWXfUhBAue3JkbbpFqwowSRb" &&
|
||||
echo "added $HASH expected_in" >expected_Addout &&
|
||||
test_cmp expected_Addout actual_Addout
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs object get' succeeds" '
|
||||
ipfs object get $HASH >actual_getOut
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs object get' output looks good" '
|
||||
test_cmp ../t0051-object-data/expected_getOut actual_getOut
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs object get' can specify data encoding as base64" '
|
||||
ipfs object get --data-encoding base64 $HASH > obj_out &&
|
||||
echo "{\"Links\":[],\"Data\":\"CAISCkhlbGxvIE1hcnMYCg==\"}" > obj_exp &&
|
||||
test_cmp obj_out obj_exp
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs object get' can specify data encoding as text" '
|
||||
echo "{\"Links\":[],\"Data\":\"Hello Mars\"}" | ipfs object put &&
|
||||
ipfs object get --data-encoding text QmS3hVY6eYrMQ6L22agwrx3YHBEsc3LJxVXCtyQHqRBukH > obj_out &&
|
||||
echo "{\"Links\":[],\"Data\":\"Hello Mars\"}" > obj_exp &&
|
||||
test_cmp obj_out obj_exp
|
||||
'
|
||||
|
||||
test_expect_failure "'ipfs object get' requires known data encoding" '
|
||||
ipfs object get --data-encoding nonsensical-encoding $HASH
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs object stat' succeeds" '
|
||||
ipfs object stat $HASH >actual_stat
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs object get' output looks good" '
|
||||
echo "NumLinks: 0" > expected_stat &&
|
||||
echo "BlockSize: 18" >> expected_stat &&
|
||||
echo "LinksSize: 2" >> expected_stat &&
|
||||
echo "DataSize: 16" >> expected_stat &&
|
||||
echo "CumulativeSize: 18" >> expected_stat &&
|
||||
test_cmp expected_stat actual_stat
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs object put file.json' succeeds" '
|
||||
ipfs object put ../t0051-object-data/testPut.json > actual_putOut
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs object put file.json' output looks good" '
|
||||
HASH="QmUTSAdDi2xsNkDtLqjFgQDMEn5di3Ab9eqbrt4gaiNbUD" &&
|
||||
printf "added $HASH\n" > expected_putOut &&
|
||||
test_cmp expected_putOut actual_putOut
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs object put --quiet file.json' succeeds" '
|
||||
ipfs object put --quiet ../t0051-object-data/testPut.json > actual_putOut
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs object put --quiet file.json' output looks good" '
|
||||
HASH="QmUTSAdDi2xsNkDtLqjFgQDMEn5di3Ab9eqbrt4gaiNbUD" &&
|
||||
printf "$HASH\n" > expected_putOut &&
|
||||
test_cmp expected_putOut actual_putOut
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs object put file.xml' succeeds" '
|
||||
ipfs object put ../t0051-object-data/testPut.xml --inputenc=xml > actual_putOut
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs object put file.xml' output looks good" '
|
||||
HASH="QmQzNKUHy4HyEUGkqKe3q3t796ffPLQXYCkHCcXUNT5JNK" &&
|
||||
printf "added $HASH\n" > expected_putOut &&
|
||||
test_cmp expected_putOut actual_putOut
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs object put' from stdin succeeds" '
|
||||
cat ../t0051-object-data/testPut.xml | ipfs object put --inputenc=xml > actual_putStdinOut
|
||||
'
|
||||
|
||||
test_expect_failure "'ipfs object put broken.xml' should fail" '
|
||||
test_expect_code 1 ipfs object put ../t0051-object-data/brokenPut.xml --inputenc=xml 2>actual_putBrokenErr >actual_putBroken
|
||||
'
|
||||
|
||||
test_expect_failure "'ipfs object put broken.hxml' output looks good" '
|
||||
touch expected_putBroken &&
|
||||
printf "Error: no data or links in this node\n" > expected_putBrokenErr &&
|
||||
test_cmp expected_putBroken actual_putBroken &&
|
||||
test_cmp expected_putBrokenErr actual_putBrokenErr
|
||||
'
|
||||
test_expect_success "'ipfs object get --enc=xml' succeeds" '
|
||||
ipfs object get --enc=xml $HASH >utf8_xml
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs object put --inputenc=xml' succeeds" '
|
||||
ipfs object put --inputenc=xml <utf8_xml >actual
|
||||
'
|
||||
|
||||
test_expect_failure "'ipfs object put --inputenc=xml' output looks good" '
|
||||
echo "added $HASH\n" >expected &&
|
||||
test_cmp expected actual
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs object put file.pb' succeeds" '
|
||||
ipfs object put --inputenc=protobuf ../t0051-object-data/testPut.pb > actual_putOut
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs object put file.pb' output looks good" '
|
||||
HASH="QmUTSAdDi2xsNkDtLqjFgQDMEn5di3Ab9eqbrt4gaiNbUD" &&
|
||||
printf "added $HASH\n" > expected_putOut &&
|
||||
test_cmp expected_putOut actual_putOut
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs object put' from stdin succeeds" '
|
||||
cat ../t0051-object-data/testPut.json | ipfs object put > actual_putStdinOut
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs object put' from stdin output looks good" '
|
||||
HASH="QmUTSAdDi2xsNkDtLqjFgQDMEn5di3Ab9eqbrt4gaiNbUD" &&
|
||||
printf "added $HASH\n" > expected_putStdinOut &&
|
||||
test_cmp expected_putStdinOut actual_putStdinOut
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs object put' from stdin (pb) succeeds" '
|
||||
cat ../t0051-object-data/testPut.pb | ipfs object put --inputenc=protobuf > actual_putPbStdinOut
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs object put' from stdin (pb) output looks good" '
|
||||
HASH="QmUTSAdDi2xsNkDtLqjFgQDMEn5di3Ab9eqbrt4gaiNbUD" &&
|
||||
printf "added $HASH\n" > expected_putStdinOut &&
|
||||
test_cmp expected_putStdinOut actual_putPbStdinOut
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs object put broken.json' should fail" '
|
||||
test_expect_code 1 ipfs object put ../t0051-object-data/brokenPut.json 2>actual_putBrokenErr >actual_putBroken
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs object put broken.hjson' output looks good" '
|
||||
touch expected_putBroken &&
|
||||
printf "Error: json: unknown field \"this\"\n" > expected_putBrokenErr &&
|
||||
test_cmp expected_putBroken actual_putBroken &&
|
||||
test_cmp expected_putBrokenErr actual_putBrokenErr
|
||||
'
|
||||
|
||||
test_expect_success "setup: add UTF-8 test file" '
|
||||
HASH="QmNY5sQeH9ttVCg24sizH71dNbcZTpGd7Yb3YwsKZ4jiFP" &&
|
||||
ipfs add ../t0051-object-data/UTF-8-test.txt >actual &&
|
||||
echo "added $HASH UTF-8-test.txt" >expected &&
|
||||
test_cmp expected actual
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs object get --enc=json' succeeds" '
|
||||
ipfs object get --enc=json $HASH >utf8_json
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs object put --inputenc=json' succeeds" '
|
||||
ipfs object put --inputenc=json <utf8_json >actual
|
||||
'
|
||||
|
||||
test_expect_failure "'ipfs object put --inputenc=json' output looks good" '
|
||||
echo "added $HASH" >expected &&
|
||||
test_cmp expected actual
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs object put --pin' succeeds" '
|
||||
HASH="QmXg9Pp2ytZ14xgmQjYEiHjVjMFXzCVVEcRTWJBmLgR39V" &&
|
||||
echo "added $HASH" >expected &&
|
||||
echo "{ \"Data\": \"abc\" }" | ipfs object put --pin >actual
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs object put --pin' output looks good" '
|
||||
echo "added $HASH" >expected &&
|
||||
test_cmp expected actual
|
||||
'
|
||||
|
||||
test_expect_success "after gc, objects still accessible" '
|
||||
ipfs repo gc > /dev/null &&
|
||||
ipfs refs -r --timeout=2s $HASH > /dev/null
|
||||
'
|
||||
EMPTY_DIR=$(echo '{"Links":[]}' | ipfs dag put --store-codec dag-pb)
|
||||
EMPTY_UNIXFS_DIR=$(echo '{"Data":{"/":{"bytes":"CAE"}},"Links":[]}' | ipfs dag put --store-codec dag-pb)
|
||||
|
||||
test_expect_success "'ipfs object patch' should work (no unixfs-dir)" '
|
||||
EMPTY_DIR=$(ipfs object new) &&
|
||||
OUTPUT=$(ipfs object patch $EMPTY_DIR add-link foo $EMPTY_DIR) &&
|
||||
ipfs object stat $OUTPUT
|
||||
ipfs dag stat $OUTPUT
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs object patch' should work" '
|
||||
EMPTY_DIR=$(ipfs object new unixfs-dir) &&
|
||||
OUTPUT=$(ipfs object patch $EMPTY_DIR add-link foo $EMPTY_DIR) &&
|
||||
ipfs object stat $OUTPUT
|
||||
OUTPUT=$(ipfs object patch $EMPTY_UNIXFS_DIR add-link foo $EMPTY_UNIXFS_DIR) &&
|
||||
ipfs dag stat $OUTPUT
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs object patch' check output block size" '
|
||||
DIR=$(ipfs object new unixfs-dir)
|
||||
DIR=$EMPTY_UNIXFS_DIR
|
||||
for i in {1..13}
|
||||
do
|
||||
DIR=$(ipfs object patch "$DIR" add-link "$DIR.jpg" "$DIR")
|
||||
@ -241,32 +58,20 @@ test_object_cmd() {
|
||||
test_expect_code 0 ipfs object patch --allow-big-block=true "$DIR" add-link "$DIR.jpg" "$DIR"
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs object new foo' shouldn't crash" '
|
||||
test_expect_code 1 ipfs object new foo
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs object links' gives the correct results" '
|
||||
echo "$EMPTY_DIR" 4 foo > expected &&
|
||||
ipfs object links "$OUTPUT" > actual &&
|
||||
test_cmp expected actual
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs object patch add-link' should work with paths" '
|
||||
EMPTY_DIR=$(ipfs object new unixfs-dir) &&
|
||||
N1=$(ipfs object patch $EMPTY_DIR add-link baz $EMPTY_DIR) &&
|
||||
N2=$(ipfs object patch $EMPTY_DIR add-link bar $N1) &&
|
||||
N3=$(ipfs object patch $EMPTY_DIR add-link foo /ipfs/$N2/bar) &&
|
||||
ipfs object stat /ipfs/$N3 > /dev/null &&
|
||||
ipfs object stat $N3/foo > /dev/null &&
|
||||
ipfs object stat /ipfs/$N3/foo/baz > /dev/null
|
||||
N1=$(ipfs object patch $EMPTY_UNIXFS_DIR add-link baz $EMPTY_UNIXFS_DIR) &&
|
||||
N2=$(ipfs object patch $EMPTY_UNIXFS_DIR add-link bar $N1) &&
|
||||
N3=$(ipfs object patch $EMPTY_UNIXFS_DIR add-link foo /ipfs/$N2/bar) &&
|
||||
ipfs dag stat /ipfs/$N3 > /dev/null &&
|
||||
ipfs dag stat $N3/foo > /dev/null &&
|
||||
ipfs dag stat /ipfs/$N3/foo/baz > /dev/null
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs object patch add-link' allow linking IPLD objects" '
|
||||
EMPTY_DIR=$(ipfs object new unixfs-dir) &&
|
||||
OBJ=$(echo "123" | ipfs dag put) &&
|
||||
N1=$(ipfs object patch $EMPTY_DIR add-link foo $OBJ) &&
|
||||
N1=$(ipfs object patch $EMPTY_UNIXFS_DIR add-link foo $OBJ) &&
|
||||
|
||||
ipfs object stat /ipfs/$N1 > /dev/null &&
|
||||
ipfs dag stat /ipfs/$N1 > /dev/null &&
|
||||
ipfs resolve /ipfs/$N1/foo > actual &&
|
||||
echo /ipfs/$OBJ > expected &&
|
||||
|
||||
@ -274,7 +79,7 @@ test_object_cmd() {
|
||||
'
|
||||
|
||||
test_expect_success "object patch creation looks right" '
|
||||
echo "QmPc73aWK9dgFBXe86P4PvQizHo9e5Qt7n7DAMXWuigFuG" > hash_exp &&
|
||||
echo "bafybeiakusqwohnt7bs75kx6jhmt4oi47l634bmudxfv4qxhpco6xuvgna" > hash_exp &&
|
||||
echo $N3 > hash_actual &&
|
||||
test_cmp hash_exp hash_actual
|
||||
'
|
||||
@ -282,7 +87,7 @@ test_object_cmd() {
|
||||
test_expect_success "multilayer ipfs patch works" '
|
||||
echo "hello world" > hwfile &&
|
||||
FILE=$(ipfs add -q hwfile) &&
|
||||
EMPTY=$(ipfs object new unixfs-dir) &&
|
||||
EMPTY=$EMPTY_UNIXFS_DIR &&
|
||||
ONE=$(ipfs object patch $EMPTY add-link b $EMPTY) &&
|
||||
TWO=$(ipfs object patch $EMPTY add-link a $ONE) &&
|
||||
ipfs object patch $TWO add-link a/b/c $FILE > multi_patch
|
||||
@ -293,49 +98,12 @@ test_object_cmd() {
|
||||
test_cmp hwfile hwfile_out
|
||||
'
|
||||
|
||||
test_expect_success "ipfs object stat path succeeds" '
|
||||
ipfs object stat $(cat multi_patch)/a > obj_stat_out
|
||||
'
|
||||
|
||||
test_expect_success "ipfs object stat output looks good" '
|
||||
echo "NumLinks: 1" > obj_stat_exp &&
|
||||
echo "BlockSize: 47" >> obj_stat_exp &&
|
||||
echo "LinksSize: 45" >> obj_stat_exp &&
|
||||
echo "DataSize: 2" >> obj_stat_exp &&
|
||||
echo "CumulativeSize: 114" >> obj_stat_exp &&
|
||||
|
||||
test_cmp obj_stat_exp obj_stat_out
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs object stat --human' succeeds" '
|
||||
ipfs object stat $(cat multi_patch)/a --human > obj_stat_human_out
|
||||
'
|
||||
|
||||
test_expect_success "ipfs object stat --human output looks good" '
|
||||
echo "NumLinks: 1" > obj_stat_human_exp &&
|
||||
echo "BlockSize: 47" >> obj_stat_human_exp &&
|
||||
echo "LinksSize: 45" >> obj_stat_human_exp &&
|
||||
echo "DataSize: 2" >> obj_stat_human_exp &&
|
||||
echo "CumulativeSize: 114 B" >> obj_stat_human_exp &&
|
||||
|
||||
test_cmp obj_stat_human_exp obj_stat_human_out
|
||||
'
|
||||
|
||||
test_expect_success "should have created dir within a dir" '
|
||||
ipfs ls $OUTPUT > patched_output
|
||||
'
|
||||
|
||||
test_expect_success "output looks good" '
|
||||
echo "QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn - foo/" > patched_exp &&
|
||||
test_cmp patched_exp patched_output
|
||||
'
|
||||
|
||||
test_expect_success "can remove the directory" '
|
||||
ipfs object patch $OUTPUT rm-link foo > rmlink_output
|
||||
'
|
||||
|
||||
test_expect_success "output should be empty" '
|
||||
echo QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn > rmlink_exp &&
|
||||
echo bafybeiczsscdsbs7ffqz55asqdf3smv6klcw3gofszvwlyarci47bgf354 > rmlink_exp &&
|
||||
test_cmp rmlink_exp rmlink_output
|
||||
'
|
||||
|
||||
@ -344,7 +112,7 @@ test_object_cmd() {
|
||||
'
|
||||
|
||||
test_expect_success "output looks good" '
|
||||
echo "QmZD3r9cZjzU8huNY2JS9TC6n8daDfT8TmE8zBSqG31Wvq" > multi_link_rm_exp &&
|
||||
echo "bafybeicourxysmtbe5hacxqico4d5hyvh7gqkrwlmqa4ew7zufn3pj3juu" > multi_link_rm_exp &&
|
||||
test_cmp multi_link_rm_exp multi_link_rm_out
|
||||
'
|
||||
|
||||
@ -355,7 +123,7 @@ test_object_cmd() {
|
||||
test_patch_create_path $EMPTY a/b/b/b/b $FILE
|
||||
|
||||
test_expect_success "can create blank object" '
|
||||
BLANK=$(ipfs object new)
|
||||
BLANK=$EMPTY_DIR
|
||||
'
|
||||
|
||||
test_patch_create_path $BLANK a $FILE
|
||||
@ -363,98 +131,6 @@ test_object_cmd() {
|
||||
test_expect_success "create bad path fails" '
|
||||
test_must_fail ipfs object patch $EMPTY add-link --create / $FILE
|
||||
'
|
||||
|
||||
test_expect_success "patch set-data works" '
|
||||
EMPTY=$(ipfs object new) &&
|
||||
HASH=$(printf "foo" | ipfs object patch $EMPTY set-data)
|
||||
'
|
||||
|
||||
test_expect_success "output looks good" '
|
||||
echo "{\"Links\":[],\"Data\":\"foo\"}" > exp_data_set &&
|
||||
ipfs object get $HASH > actual_data_set &&
|
||||
test_cmp exp_data_set actual_data_set
|
||||
'
|
||||
|
||||
test_expect_success "patch append-data works" '
|
||||
HASH=$(printf "bar" | ipfs object patch $HASH append-data)
|
||||
'
|
||||
|
||||
test_expect_success "output looks good" '
|
||||
echo "{\"Links\":[],\"Data\":\"foobar\"}" > exp_data_append &&
|
||||
ipfs object get $HASH > actual_data_append &&
|
||||
test_cmp exp_data_append actual_data_append
|
||||
'
|
||||
|
||||
#
|
||||
# CidBase Tests
|
||||
#
|
||||
|
||||
test_expect_success "'ipfs object put file.json --cid-base=base32' succeeds" '
|
||||
ipfs object put --cid-base=base32 ../t0051-object-data/testPut.json > actual_putOut
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs object put file.json --cid-base=base32' output looks good" '
|
||||
HASH="QmUTSAdDi2xsNkDtLqjFgQDMEn5di3Ab9eqbrt4gaiNbUD" &&
|
||||
printf "added $HASH\n" > expected_putOut &&
|
||||
test_cmp expected_putOut actual_putOut
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs object put file.json --cid-base=base32 --upgrade-cidv0-in-output=true' succeeds" '
|
||||
ipfs object put --cid-base=base32 --upgrade-cidv0-in-output=true ../t0051-object-data/testPut.json > actual_putOut
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs object put file.json --cid-base=base32 --upgrade-cidv0-in-output=true' output looks good" '
|
||||
HASH=$(ipfs cid base32 "QmUTSAdDi2xsNkDtLqjFgQDMEn5di3Ab9eqbrt4gaiNbUD") &&
|
||||
printf "added $HASH\n" > expected_putOut &&
|
||||
test_cmp expected_putOut actual_putOut
|
||||
'
|
||||
|
||||
test_expect_success "'insert json dag with both CidV0 and CidV1 links'" '
|
||||
MIXED=$(ipfs object put ../t0051-object-data/mixed.json -q) &&
|
||||
echo $MIXED
|
||||
'
|
||||
|
||||
test_expect_success "ipfs object get then put creates identical object with --cid-base=base32" '
|
||||
ipfs object get --cid-base=base32 $MIXED > mixedv2.json &&
|
||||
MIXED2=$(ipfs object put -q mixedv2.json) &&
|
||||
echo "$MIXED =? $MIXED2" &&
|
||||
test "$MIXED" = "$MIXED2"
|
||||
'
|
||||
|
||||
HASHv0=QmXg9Pp2ytZ14xgmQjYEiHjVjMFXzCVVEcRTWJBmLgR39V
|
||||
HASHv1=bafkqadsimvwgy3zajb2w2yloeefau
|
||||
|
||||
test_expect_success "ipfs object get with --cid-base=base32 uses base32 for CidV1 link only" '
|
||||
ipfs object get --cid-base=base32 $MIXED > mixed.actual &&
|
||||
grep -q $HASHv0 mixed.actual &&
|
||||
grep -q $(ipfs cid base32 $HASHv1) mixed.actual
|
||||
'
|
||||
|
||||
test_expect_success "ipfs object links --cid-base=base32 --upgrade-cidv0-in-output=true converts both links" '
|
||||
ipfs object links --cid-base=base32 --upgrade-cidv0-in-output=true $MIXED | awk "{print \$1}" | sort > links.actual &&
|
||||
echo $(ipfs cid base32 $HASHv1) > links.expected
|
||||
echo $(ipfs cid base32 $HASHv0) >> links.expected
|
||||
test_cmp links.actual links.expected
|
||||
'
|
||||
}
|
||||
|
||||
test_object_content_type() {
|
||||
|
||||
test_expect_success "'ipfs object get --encoding=protobuf' returns the correct content type" '
|
||||
curl -X POST -sI "http://$API_ADDR/api/v0/object/get?arg=$HASH&encoding=protobuf" | grep -q "^Content-Type: application/protobuf"
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs object get --encoding=json' returns the correct content type" '
|
||||
curl -X POST -sI "http://$API_ADDR/api/v0/object/get?arg=$HASH&encoding=json" | grep -q "^Content-Type: application/json"
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs object get --encoding=text' returns the correct content type" '
|
||||
curl -X POST -sI "http://$API_ADDR/api/v0/object/get?arg=$HASH&encoding=text" | grep -q "^Content-Type: text/plain"
|
||||
'
|
||||
|
||||
test_expect_success "'ipfs object get --encoding=xml' returns the correct content type" '
|
||||
curl -X POST -sI "http://$API_ADDR/api/v0/object/get?arg=$HASH&encoding=xml" | grep -q "^Content-Type: application/xml"
|
||||
'
|
||||
}
|
||||
|
||||
# should work offline
|
||||
@ -463,7 +139,6 @@ test_object_cmd
|
||||
# should work online
|
||||
test_launch_ipfs_daemon
|
||||
test_object_cmd
|
||||
test_object_content_type
|
||||
test_kill_ipfs_daemon
|
||||
|
||||
test_done
|
||||
|
@ -114,8 +114,8 @@ test_expect_success "objects are there" '
|
||||
'
|
||||
|
||||
# saving this output for later
|
||||
test_expect_success "ipfs object links $HASH_DIR1 works" '
|
||||
ipfs object links $HASH_DIR1 > DIR1_objlink
|
||||
test_expect_success "ipfs dag get $HASH_DIR1 works" '
|
||||
ipfs dag get $HASH_DIR1 | jq -r ".Links[] | .Hash | .[\"/\"]" > DIR1_objlink
|
||||
'
|
||||
|
||||
|
||||
@ -224,7 +224,7 @@ test_expect_success "some objects are still there" '
|
||||
ipfs cat "$HASH_FILE1" >>actual8 &&
|
||||
ipfs ls "$HASH_DIR4" >>actual8 &&
|
||||
ipfs ls "$HASH_DIR2" >>actual8 &&
|
||||
ipfs object links "$HASH_DIR1" >>actual8 &&
|
||||
ipfs dag get "$HASH_DIR1" | jq -r ".Links[] | .Hash | .[\"/\"]" >>actual8 &&
|
||||
test_cmp expected8 actual8
|
||||
'
|
||||
|
||||
|
@ -157,13 +157,13 @@ test_get_cmd() {
|
||||
test_get_fail() {
|
||||
test_expect_success "create an object that has unresolvable links" '
|
||||
cat <<-\EOF >bad_object &&
|
||||
{ "Links": [ { "Name": "foo", "Hash": "QmZzaC6ydNXiR65W8VjGA73ET9MZ6VFAqUT1ngYMXcpihn", "Size": 1897 }, { "Name": "bar", "Hash": "Qmd4mG6pDFDmDTn6p3hX1srP8qTbkyXKj5yjpEsiHDX3u8", "Size": 56 }, { "Name": "baz", "Hash": "QmUTjwRnG28dSrFFVTYgbr6LiDLsBmRr2SaUSTGheK2YqG", "Size": 24266 } ], "Data": "\b\u0001" }
|
||||
{"Data":{"/":{"bytes":"CAE"}},"Links":[{"Hash":{"/":"Qmd4mG6pDFDmDTn6p3hX1srP8qTbkyXKj5yjpEsiHDX3u8"},"Name":"bar","Tsize":56},{"Hash":{"/":"QmUTjwRnG28dSrFFVTYgbr6LiDLsBmRr2SaUSTGheK2YqG"},"Name":"baz","Tsize":24266},{"Hash":{"/":"QmZzaC6ydNXiR65W8VjGA73ET9MZ6VFAqUT1ngYMXcpihn"},"Name":"foo","Tsize":1897}]}
|
||||
EOF
|
||||
cat bad_object | ipfs object put > put_out
|
||||
cat bad_object | ipfs dag put --store-codec dag-pb > put_out
|
||||
'
|
||||
|
||||
test_expect_success "output looks good" '
|
||||
echo "added QmaGidyrnX8FMbWJoxp8HVwZ1uRKwCyxBJzABnR1S2FVUr" > put_exp &&
|
||||
echo "bafybeifrjjol3gixedca6etdwccnvwfvhurc4wb3i5mnk2rvwvyfcgwxd4" > put_exp &&
|
||||
test_cmp put_exp put_out
|
||||
'
|
||||
|
||||
|
@ -38,9 +38,9 @@ test_expect_success "gc okay after adding incomplete node -- prep" '
|
||||
'
|
||||
|
||||
test_expect_success "gc okay after adding incomplete node" '
|
||||
ipfs object stat $ADIR_HASH &&
|
||||
ipfs dag get $ADIR_HASH &&
|
||||
ipfs repo gc &&
|
||||
ipfs object stat $ADIR_HASH
|
||||
ipfs dag get $ADIR_HASH
|
||||
'
|
||||
|
||||
test_expect_success "add directory with direct pin" '
|
||||
|
Reference in New Issue
Block a user