mirror of
https://github.com/ipfs/kubo.git
synced 2025-06-20 02:21:48 +08:00
core: add context.Context param to core.Resolve()
commands/object: remove objectData() and objectLinks() helpers resolver: added context parameters sharness: $HASH carried the \r from the http protocol with sharness: write curl output to individual files http gw: break PUT handler until PR#1191
This commit is contained in:
@ -9,6 +9,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context"
|
context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context"
|
||||||
|
|
||||||
cmds "github.com/ipfs/go-ipfs/commands"
|
cmds "github.com/ipfs/go-ipfs/commands"
|
||||||
u "github.com/ipfs/go-ipfs/util"
|
u "github.com/ipfs/go-ipfs/util"
|
||||||
)
|
)
|
||||||
@ -48,11 +49,6 @@ func NewHandler(ctx cmds.Context, root *cmds.Command, origin string) *Handler {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (i Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
func (i Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||||
// create a context.Context to pass into the commands.
|
|
||||||
ctx, cancel := context.WithCancel(context.TODO())
|
|
||||||
defer cancel()
|
|
||||||
i.ctx.Context = ctx
|
|
||||||
|
|
||||||
log.Debug("Incoming API request: ", r.URL)
|
log.Debug("Incoming API request: ", r.URL)
|
||||||
|
|
||||||
// error on external referers (to prevent CSRF attacks)
|
// error on external referers (to prevent CSRF attacks)
|
||||||
@ -84,6 +80,27 @@ func (i Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|||||||
w.Write([]byte(err.Error()))
|
w.Write([]byte(err.Error()))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// get the node's context to pass into the commands.
|
||||||
|
node, err := i.ctx.GetNode()
|
||||||
|
if err != nil {
|
||||||
|
err = fmt.Errorf("cmds/http: couldn't GetNode(): %s", err)
|
||||||
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ctx, cancel := context.WithCancel(node.Context())
|
||||||
|
defer cancel()
|
||||||
|
/*
|
||||||
|
TODO(cryptix): the next line looks very fishy to me..
|
||||||
|
It looks like the the context for the command request beeing prepared here is shared across all incoming requests..
|
||||||
|
|
||||||
|
I assume it really isn't because ServeHTTP() doesn't take a pointer receiver, but it's really subtule..
|
||||||
|
|
||||||
|
Shouldn't the context be just put on the command request?
|
||||||
|
|
||||||
|
ps: take note of the name clash - commands.Context != context.Context
|
||||||
|
*/
|
||||||
|
i.ctx.Context = ctx
|
||||||
req.SetContext(i.ctx)
|
req.SetContext(i.ctx)
|
||||||
|
|
||||||
// call the command
|
// call the command
|
||||||
|
@ -62,7 +62,7 @@ func cat(ctx context.Context, node *core.IpfsNode, paths []string) ([]io.Reader,
|
|||||||
readers := make([]io.Reader, 0, len(paths))
|
readers := make([]io.Reader, 0, len(paths))
|
||||||
length := uint64(0)
|
length := uint64(0)
|
||||||
for _, fpath := range paths {
|
for _, fpath := range paths {
|
||||||
dagnode, err := core.Resolve(node, path.Path(fpath))
|
dagnode, err := core.Resolve(ctx, node, path.Path(fpath))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
|
@ -9,13 +9,14 @@ import (
|
|||||||
gopath "path"
|
gopath "path"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/cheggaaa/pb"
|
||||||
|
context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context"
|
||||||
|
|
||||||
cmds "github.com/ipfs/go-ipfs/commands"
|
cmds "github.com/ipfs/go-ipfs/commands"
|
||||||
core "github.com/ipfs/go-ipfs/core"
|
core "github.com/ipfs/go-ipfs/core"
|
||||||
path "github.com/ipfs/go-ipfs/path"
|
path "github.com/ipfs/go-ipfs/path"
|
||||||
tar "github.com/ipfs/go-ipfs/thirdparty/tar"
|
tar "github.com/ipfs/go-ipfs/thirdparty/tar"
|
||||||
utar "github.com/ipfs/go-ipfs/unixfs/tar"
|
utar "github.com/ipfs/go-ipfs/unixfs/tar"
|
||||||
|
|
||||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/cheggaaa/pb"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var ErrInvalidCompressionLevel = errors.New("Compression level must be between 1 and 9")
|
var ErrInvalidCompressionLevel = errors.New("Compression level must be between 1 and 9")
|
||||||
@ -62,7 +63,7 @@ may also specify the level of compression by specifying '-l=<1-9>'.
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
reader, err := get(node, req.Arguments()[0], cmplvl)
|
reader, err := get(req.Context().Context, node, req.Arguments()[0], cmplvl)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
res.SetError(err, cmds.ErrNormal)
|
res.SetError(err, cmds.ErrNormal)
|
||||||
return
|
return
|
||||||
@ -165,9 +166,9 @@ func getCompressOptions(req cmds.Request) (int, error) {
|
|||||||
return gzip.NoCompression, nil
|
return gzip.NoCompression, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func get(node *core.IpfsNode, p string, compression int) (io.Reader, error) {
|
func get(ctx context.Context, node *core.IpfsNode, p string, compression int) (io.Reader, error) {
|
||||||
pathToResolve := path.Path(p)
|
pathToResolve := path.Path(p)
|
||||||
dagnode, err := core.Resolve(node, pathToResolve)
|
dagnode, err := core.Resolve(ctx, node, pathToResolve)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -66,7 +66,7 @@ it contains, with the following format:
|
|||||||
|
|
||||||
dagnodes := make([]*merkledag.Node, 0)
|
dagnodes := make([]*merkledag.Node, 0)
|
||||||
for _, fpath := range paths {
|
for _, fpath := range paths {
|
||||||
dagnode, err := core.Resolve(node, path.Path(fpath))
|
dagnode, err := core.Resolve(req.Context().Context, node, path.Path(fpath))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
res.SetError(err, cmds.ErrNormal)
|
res.SetError(err, cmds.ErrNormal)
|
||||||
return
|
return
|
||||||
|
@ -91,12 +91,12 @@ output is the raw data of the object.
|
|||||||
}
|
}
|
||||||
|
|
||||||
fpath := path.Path(req.Arguments()[0])
|
fpath := path.Path(req.Arguments()[0])
|
||||||
output, err := objectData(n, fpath)
|
node, err := core.Resolve(req.Context().Context, n, fpath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
res.SetError(err, cmds.ErrNormal)
|
res.SetError(err, cmds.ErrNormal)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
res.SetOutput(output)
|
res.SetOutput(bytes.NewReader(node.Data))
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -121,7 +121,12 @@ multihash.
|
|||||||
}
|
}
|
||||||
|
|
||||||
fpath := path.Path(req.Arguments()[0])
|
fpath := path.Path(req.Arguments()[0])
|
||||||
output, err := objectLinks(n, fpath)
|
node, err := core.Resolve(req.Context().Context, n, fpath)
|
||||||
|
if err != nil {
|
||||||
|
res.SetError(err, cmds.ErrNormal)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
output, err := getOutput(node)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
res.SetError(err, cmds.ErrNormal)
|
res.SetError(err, cmds.ErrNormal)
|
||||||
return
|
return
|
||||||
@ -176,7 +181,7 @@ This command outputs data in the following encodings:
|
|||||||
|
|
||||||
fpath := path.Path(req.Arguments()[0])
|
fpath := path.Path(req.Arguments()[0])
|
||||||
|
|
||||||
object, err := objectGet(n, fpath)
|
object, err := core.Resolve(req.Context().Context, n, fpath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
res.SetError(err, cmds.ErrNormal)
|
res.SetError(err, cmds.ErrNormal)
|
||||||
return
|
return
|
||||||
@ -242,7 +247,7 @@ var objectStatCmd = &cmds.Command{
|
|||||||
|
|
||||||
fpath := path.Path(req.Arguments()[0])
|
fpath := path.Path(req.Arguments()[0])
|
||||||
|
|
||||||
object, err := objectGet(n, fpath)
|
object, err := core.Resolve(req.Context().Context, n, fpath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
res.SetError(err, cmds.ErrNormal)
|
res.SetError(err, cmds.ErrNormal)
|
||||||
return
|
return
|
||||||
@ -343,42 +348,6 @@ Data should be in the format specified by the --inputenc flag.
|
|||||||
Type: Object{},
|
Type: Object{},
|
||||||
}
|
}
|
||||||
|
|
||||||
// objectData takes a key string and writes out the raw bytes of that node (if there is one)
|
|
||||||
func objectData(n *core.IpfsNode, fpath path.Path) (io.Reader, error) {
|
|
||||||
dagnode, err := core.Resolve(n, fpath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Debugf("objectData: found dagnode %s (# of bytes: %d - # links: %d)", fpath, len(dagnode.Data), len(dagnode.Links))
|
|
||||||
|
|
||||||
return bytes.NewReader(dagnode.Data), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// objectLinks takes a key string and lists the links it points to
|
|
||||||
func objectLinks(n *core.IpfsNode, fpath path.Path) (*Object, error) {
|
|
||||||
dagnode, err := core.Resolve(n, fpath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Debugf("objectLinks: found dagnode %s (# of bytes: %d - # links: %d)", fpath, len(dagnode.Data), len(dagnode.Links))
|
|
||||||
|
|
||||||
return getOutput(dagnode)
|
|
||||||
}
|
|
||||||
|
|
||||||
// objectGet takes a key string from args and a format option and serializes the dagnode to that format
|
|
||||||
func objectGet(n *core.IpfsNode, fpath path.Path) (*dag.Node, error) {
|
|
||||||
dagnode, err := core.Resolve(n, fpath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Debugf("objectGet: found dagnode %s (# of bytes: %d - # links: %d)", fpath, len(dagnode.Data), len(dagnode.Links))
|
|
||||||
|
|
||||||
return dagnode, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ErrEmptyNode is returned when the input to 'ipfs object put' contains no data
|
// ErrEmptyNode is returned when the input to 'ipfs object put' contains no data
|
||||||
var ErrEmptyNode = errors.New("no data or links in this node")
|
var ErrEmptyNode = errors.New("no data or links in this node")
|
||||||
|
|
||||||
|
@ -6,6 +6,8 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context"
|
||||||
|
|
||||||
cmds "github.com/ipfs/go-ipfs/commands"
|
cmds "github.com/ipfs/go-ipfs/commands"
|
||||||
core "github.com/ipfs/go-ipfs/core"
|
core "github.com/ipfs/go-ipfs/core"
|
||||||
crypto "github.com/ipfs/go-ipfs/p2p/crypto"
|
crypto "github.com/ipfs/go-ipfs/p2p/crypto"
|
||||||
@ -89,7 +91,8 @@ Publish an <ipfs-path> to another public key (not implemented):
|
|||||||
}
|
}
|
||||||
|
|
||||||
// TODO n.Keychain.Get(name).PrivKey
|
// TODO n.Keychain.Get(name).PrivKey
|
||||||
output, err := publish(n, n.PrivateKey, p)
|
// TODO(cryptix): is req.Context().Context a child of n.Context()?
|
||||||
|
output, err := publish(req.Context().Context, n, n.PrivateKey, p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
res.SetError(err, cmds.ErrNormal)
|
res.SetError(err, cmds.ErrNormal)
|
||||||
return
|
return
|
||||||
@ -106,14 +109,14 @@ Publish an <ipfs-path> to another public key (not implemented):
|
|||||||
Type: IpnsEntry{},
|
Type: IpnsEntry{},
|
||||||
}
|
}
|
||||||
|
|
||||||
func publish(n *core.IpfsNode, k crypto.PrivKey, ref path.Path) (*IpnsEntry, error) {
|
func publish(ctx context.Context, n *core.IpfsNode, k crypto.PrivKey, ref path.Path) (*IpnsEntry, error) {
|
||||||
// First, verify the path exists
|
// First, verify the path exists
|
||||||
_, err := core.Resolve(n, ref)
|
_, err := core.Resolve(ctx, n, ref)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = n.Namesys.Publish(n.Context(), k, ref)
|
err = n.Namesys.Publish(ctx, k, ref)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -85,7 +85,7 @@ Note: list all refs recursively with -r.
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
objs, err := objectsForPaths(n, req.Arguments())
|
objs, err := objectsForPaths(ctx, n, req.Arguments())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
res.SetError(err, cmds.ErrNormal)
|
res.SetError(err, cmds.ErrNormal)
|
||||||
return
|
return
|
||||||
@ -161,10 +161,10 @@ Displays the hashes of all local objects.
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
func objectsForPaths(n *core.IpfsNode, paths []string) ([]*dag.Node, error) {
|
func objectsForPaths(ctx context.Context, n *core.IpfsNode, paths []string) ([]*dag.Node, error) {
|
||||||
objects := make([]*dag.Node, len(paths))
|
objects := make([]*dag.Node, len(paths))
|
||||||
for i, p := range paths {
|
for i, p := range paths {
|
||||||
o, err := core.Resolve(n, path.Path(p))
|
o, err := core.Resolve(ctx, n, path.Path(p))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package corehttp
|
package corehttp
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"html/template"
|
"html/template"
|
||||||
"io"
|
"io"
|
||||||
@ -101,7 +102,7 @@ func (i *gatewayHandler) ResolvePath(ctx context.Context, p string) (*dag.Node,
|
|||||||
return nil, "", err
|
return nil, "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
node, err := i.node.Resolver.ResolvePath(path.Path(p))
|
node, err := i.node.Resolver.ResolvePath(ctx, path.Path(p))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", err
|
return nil, "", err
|
||||||
}
|
}
|
||||||
@ -309,6 +310,9 @@ func (i *gatewayHandler) putEmptyDirHandler(w http.ResponseWriter, r *http.Reque
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (i *gatewayHandler) putHandler(w http.ResponseWriter, r *http.Request) {
|
func (i *gatewayHandler) putHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
|
// TODO(cryptix): will be resolved in PR#1191
|
||||||
|
webErrorWithCode(w, "Sorry, PUT is bugged right now, closing request", errors.New("handler disabled"), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
urlPath := r.URL.Path
|
urlPath := r.URL.Path
|
||||||
pathext := urlPath[5:]
|
pathext := urlPath[5:]
|
||||||
var err error
|
var err error
|
||||||
@ -362,7 +366,7 @@ func (i *gatewayHandler) putHandler(w http.ResponseWriter, r *http.Request) {
|
|||||||
|
|
||||||
// resolving path components into merkledag nodes. if a component does not
|
// resolving path components into merkledag nodes. if a component does not
|
||||||
// resolve, create empty directories (which will be linked and populated below.)
|
// resolve, create empty directories (which will be linked and populated below.)
|
||||||
path_nodes, err := i.node.Resolver.ResolveLinks(rootnd, components[:len(components)-1])
|
path_nodes, err := i.node.Resolver.ResolveLinks(tctx, rootnd, components[:len(components)-1])
|
||||||
if _, ok := err.(path.ErrNoLink); ok {
|
if _, ok := err.(path.ErrNoLink); ok {
|
||||||
// Create empty directories, links will be made further down the code
|
// Create empty directories, links will be made further down the code
|
||||||
for len(path_nodes) < len(components) {
|
for len(path_nodes) < len(components) {
|
||||||
@ -424,7 +428,7 @@ func (i *gatewayHandler) deleteHandler(w http.ResponseWriter, r *http.Request) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
path_nodes, err := i.node.Resolver.ResolveLinks(rootnd, components[:len(components)-1])
|
path_nodes, err := i.node.Resolver.ResolveLinks(tctx, rootnd, components[:len(components)-1])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
webError(w, "Could not resolve parent object", err, http.StatusBadRequest)
|
webError(w, "Could not resolve parent object", err, http.StatusBadRequest)
|
||||||
return
|
return
|
||||||
|
@ -26,10 +26,12 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func Pin(n *core.IpfsNode, paths []string, recursive bool) ([]u.Key, error) {
|
func Pin(n *core.IpfsNode, paths []string, recursive bool) ([]u.Key, error) {
|
||||||
|
// TODO(cryptix): do we want a ctx as first param for (Un)Pin() as well, just like core.Resolve?
|
||||||
|
ctx := n.Context()
|
||||||
|
|
||||||
dagnodes := make([]*merkledag.Node, 0)
|
dagnodes := make([]*merkledag.Node, 0)
|
||||||
for _, fpath := range paths {
|
for _, fpath := range paths {
|
||||||
dagnode, err := core.Resolve(n, path.Path(fpath))
|
dagnode, err := core.Resolve(ctx, n, path.Path(fpath))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("pin: %s", err)
|
return nil, fmt.Errorf("pin: %s", err)
|
||||||
}
|
}
|
||||||
@ -43,7 +45,7 @@ func Pin(n *core.IpfsNode, paths []string, recursive bool) ([]u.Key, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.TODO(), time.Minute)
|
ctx, cancel := context.WithTimeout(ctx, time.Minute)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
err = n.Pinning.Pin(ctx, dagnode, recursive)
|
err = n.Pinning.Pin(ctx, dagnode, recursive)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -61,10 +63,12 @@ func Pin(n *core.IpfsNode, paths []string, recursive bool) ([]u.Key, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func Unpin(n *core.IpfsNode, paths []string, recursive bool) ([]u.Key, error) {
|
func Unpin(n *core.IpfsNode, paths []string, recursive bool) ([]u.Key, error) {
|
||||||
|
// TODO(cryptix): do we want a ctx as first param for (Un)Pin() as well, just like core.Resolve?
|
||||||
|
ctx := n.Context()
|
||||||
|
|
||||||
dagnodes := make([]*merkledag.Node, 0)
|
dagnodes := make([]*merkledag.Node, 0)
|
||||||
for _, fpath := range paths {
|
for _, fpath := range paths {
|
||||||
dagnode, err := core.Resolve(n, path.Path(fpath))
|
dagnode, err := core.Resolve(ctx, n, path.Path(fpath))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -75,7 +79,7 @@ func Unpin(n *core.IpfsNode, paths []string, recursive bool) ([]u.Key, error) {
|
|||||||
for _, dagnode := range dagnodes {
|
for _, dagnode := range dagnodes {
|
||||||
k, _ := dagnode.Key()
|
k, _ := dagnode.Key()
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.TODO(), time.Minute)
|
ctx, cancel := context.WithTimeout(ctx, time.Minute)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
err := n.Pinning.Unpin(ctx, k, recursive)
|
err := n.Pinning.Unpin(ctx, k, recursive)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -10,7 +10,7 @@ import (
|
|||||||
|
|
||||||
func Cat(n *core.IpfsNode, pstr string) (io.Reader, error) {
|
func Cat(n *core.IpfsNode, pstr string) (io.Reader, error) {
|
||||||
p := path.FromString(pstr)
|
p := path.FromString(pstr)
|
||||||
dagNode, err := n.Resolver.ResolvePath(p)
|
dagNode, err := n.Resolver.ResolvePath(n.ContextGroup.Context(), p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -5,22 +5,35 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context"
|
||||||
|
|
||||||
merkledag "github.com/ipfs/go-ipfs/merkledag"
|
merkledag "github.com/ipfs/go-ipfs/merkledag"
|
||||||
path "github.com/ipfs/go-ipfs/path"
|
path "github.com/ipfs/go-ipfs/path"
|
||||||
)
|
)
|
||||||
|
|
||||||
const maxLinks = 32
|
const maxLinks = 32
|
||||||
|
|
||||||
var ErrTooManyLinks = errors.New("exceeded maximum number of links in ipns entry")
|
// errors returned by Resolve function
|
||||||
|
var (
|
||||||
|
ErrTooManyLinks = errors.New("core/resolve: exceeded maximum number of links in ipns entry")
|
||||||
|
ErrNoNamesys = errors.New("core/resolve: no Namesys on IpfsNode - can't resolve ipns entry")
|
||||||
|
)
|
||||||
|
|
||||||
// Resolves the given path by parsing out /ipns/ entries and then going
|
// Resolve resolves the given path by parsing out /ipns/ entries and then going
|
||||||
// through the /ipfs/ entries and returning the final merkledage node.
|
// through the /ipfs/ entries and returning the final merkledage node.
|
||||||
// Effectively enables /ipns/ in CLI commands.
|
// Effectively enables /ipns/ in CLI commands.
|
||||||
func Resolve(n *IpfsNode, p path.Path) (*merkledag.Node, error) {
|
func Resolve(ctx context.Context, n *IpfsNode, p path.Path) (*merkledag.Node, error) {
|
||||||
return resolveRecurse(n, p, 0)
|
r := resolver{ctx, n, p}
|
||||||
|
return r.resolveRecurse(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
func resolveRecurse(n *IpfsNode, p path.Path, depth int) (*merkledag.Node, error) {
|
type resolver struct {
|
||||||
|
ctx context.Context
|
||||||
|
n *IpfsNode
|
||||||
|
p path.Path
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *resolver) resolveRecurse(depth int) (*merkledag.Node, error) {
|
||||||
if depth >= maxLinks {
|
if depth >= maxLinks {
|
||||||
return nil, ErrTooManyLinks
|
return nil, ErrTooManyLinks
|
||||||
}
|
}
|
||||||
@ -29,29 +42,33 @@ func resolveRecurse(n *IpfsNode, p path.Path, depth int) (*merkledag.Node, error
|
|||||||
// emerges when resolving just a <hash>. Is it meant
|
// emerges when resolving just a <hash>. Is it meant
|
||||||
// to be an ipfs or an ipns resolution?
|
// to be an ipfs or an ipns resolution?
|
||||||
|
|
||||||
if strings.HasPrefix(p.String(), "/ipns/") {
|
if strings.HasPrefix(r.p.String(), "/ipns/") {
|
||||||
|
// TODO(cryptix): we sould be able to query the local cache for the path
|
||||||
|
if r.n.Namesys == nil {
|
||||||
|
return nil, ErrNoNamesys
|
||||||
|
}
|
||||||
// if it's an ipns path, try to resolve it.
|
// if it's an ipns path, try to resolve it.
|
||||||
// if we can't, we can give that error back to the user.
|
// if we can't, we can give that error back to the user.
|
||||||
seg := p.Segments()
|
seg := r.p.Segments()
|
||||||
if len(seg) < 2 || seg[1] == "" { // just "/ipns/"
|
if len(seg) < 2 || seg[1] == "" { // just "/ipns/"
|
||||||
return nil, fmt.Errorf("invalid path: %s", string(p))
|
return nil, fmt.Errorf("invalid path: %s", string(r.p))
|
||||||
}
|
}
|
||||||
|
|
||||||
ipnsPath := seg[1]
|
ipnsPath := seg[1]
|
||||||
extensions := seg[2:]
|
extensions := seg[2:]
|
||||||
respath, err := n.Namesys.Resolve(n.Context(), ipnsPath)
|
respath, err := r.n.Namesys.Resolve(r.ctx, ipnsPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
segments := append(respath.Segments(), extensions...)
|
segments := append(respath.Segments(), extensions...)
|
||||||
respath, err = path.FromSegments(segments...)
|
r.p, err = path.FromSegments(segments...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return resolveRecurse(n, respath, depth+1)
|
return r.resolveRecurse(depth + 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ok, we have an ipfs path now (or what we'll treat as one)
|
// ok, we have an ipfs path now (or what we'll treat as one)
|
||||||
return n.Resolver.ResolvePath(p)
|
return r.n.Resolver.ResolvePath(r.ctx, r.p)
|
||||||
}
|
}
|
||||||
|
@ -56,7 +56,7 @@ func (s *Root) Lookup(ctx context.Context, name string) (fs.Node, error) {
|
|||||||
return nil, fuse.ENOENT
|
return nil, fuse.ENOENT
|
||||||
}
|
}
|
||||||
|
|
||||||
nd, err := s.Ipfs.Resolver.ResolvePath(path.Path(name))
|
nd, err := s.Ipfs.Resolver.ResolvePath(ctx, path.Path(name))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// todo: make this error more versatile.
|
// todo: make this error more versatile.
|
||||||
return nil, fuse.ENOENT
|
return nil, fuse.ENOENT
|
||||||
@ -124,7 +124,7 @@ func (s *Node) Attr() fuse.Attr {
|
|||||||
// Lookup performs a lookup under this node.
|
// Lookup performs a lookup under this node.
|
||||||
func (s *Node) Lookup(ctx context.Context, name string) (fs.Node, error) {
|
func (s *Node) Lookup(ctx context.Context, name string) (fs.Node, error) {
|
||||||
log.Debugf("Lookup '%s'", name)
|
log.Debugf("Lookup '%s'", name)
|
||||||
nodes, err := s.Ipfs.Resolver.ResolveLinks(s.Nd, []string{name})
|
nodes, err := s.Ipfs.Resolver.ResolveLinks(ctx, s.Nd, []string{name})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// todo: make this error more versatile.
|
// todo: make this error more versatile.
|
||||||
return nil, fuse.ENOENT
|
return nil, fuse.ENOENT
|
||||||
|
@ -163,7 +163,7 @@ func (fs *Filesystem) newKeyRoot(parent context.Context, k ci.PrivKey) (*KeyRoot
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
mnode, err := fs.resolver.ResolvePath(pointsTo)
|
mnode, err := fs.resolver.ResolvePath(ctx, pointsTo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Failed to retrieve value '%s' for ipns entry: %s\n", pointsTo, err)
|
log.Errorf("Failed to retrieve value '%s' for ipns entry: %s\n", pointsTo, err)
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
// package path implements utilities for resolving paths within ipfs.
|
// Package path implements utilities for resolving paths within ipfs.
|
||||||
package path
|
package path
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -7,6 +7,7 @@ import (
|
|||||||
|
|
||||||
mh "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multihash"
|
mh "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multihash"
|
||||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context"
|
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context"
|
||||||
|
|
||||||
merkledag "github.com/ipfs/go-ipfs/merkledag"
|
merkledag "github.com/ipfs/go-ipfs/merkledag"
|
||||||
u "github.com/ipfs/go-ipfs/util"
|
u "github.com/ipfs/go-ipfs/util"
|
||||||
)
|
)
|
||||||
@ -57,33 +58,32 @@ func SplitAbsPath(fpath Path) (mh.Multihash, []string, error) {
|
|||||||
|
|
||||||
// ResolvePath fetches the node for given path. It returns the last item
|
// ResolvePath fetches the node for given path. It returns the last item
|
||||||
// returned by ResolvePathComponents.
|
// returned by ResolvePathComponents.
|
||||||
func (s *Resolver) ResolvePath(fpath Path) (*merkledag.Node, error) {
|
func (s *Resolver) ResolvePath(ctx context.Context, fpath Path) (*merkledag.Node, error) {
|
||||||
nodes, err := s.ResolvePathComponents(fpath)
|
nodes, err := s.ResolvePathComponents(ctx, fpath)
|
||||||
if err != nil || nodes == nil {
|
if err != nil || nodes == nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
} else {
|
|
||||||
return nodes[len(nodes)-1], err
|
|
||||||
}
|
}
|
||||||
|
return nodes[len(nodes)-1], err
|
||||||
}
|
}
|
||||||
|
|
||||||
// ResolvePathComponents fetches the nodes for each segment of the given path.
|
// ResolvePathComponents fetches the nodes for each segment of the given path.
|
||||||
// It uses the first path component as a hash (key) of the first node, then
|
// It uses the first path component as a hash (key) of the first node, then
|
||||||
// resolves all other components walking the links, with ResolveLinks.
|
// resolves all other components walking the links, with ResolveLinks.
|
||||||
func (s *Resolver) ResolvePathComponents(fpath Path) ([]*merkledag.Node, error) {
|
func (s *Resolver) ResolvePathComponents(ctx context.Context, fpath Path) ([]*merkledag.Node, error) {
|
||||||
h, parts, err := SplitAbsPath(fpath)
|
h, parts, err := SplitAbsPath(fpath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debug("Resolve dag get.\n")
|
log.Debug("Resolve dag get.")
|
||||||
ctx, cancel := context.WithTimeout(context.TODO(), time.Minute)
|
ctx, cancel := context.WithTimeout(ctx, time.Minute)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
nd, err := s.DAG.Get(ctx, u.Key(h))
|
nd, err := s.DAG.Get(ctx, u.Key(h))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return s.ResolveLinks(nd, parts)
|
return s.ResolveLinks(ctx, nd, parts)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ResolveLinks iteratively resolves names by walking the link hierarchy.
|
// ResolveLinks iteratively resolves names by walking the link hierarchy.
|
||||||
@ -93,10 +93,9 @@ func (s *Resolver) ResolvePathComponents(fpath Path) ([]*merkledag.Node, error)
|
|||||||
//
|
//
|
||||||
// ResolveLinks(nd, []string{"foo", "bar", "baz"})
|
// ResolveLinks(nd, []string{"foo", "bar", "baz"})
|
||||||
// would retrieve "baz" in ("bar" in ("foo" in nd.Links).Links).Links
|
// would retrieve "baz" in ("bar" in ("foo" in nd.Links).Links).Links
|
||||||
func (s *Resolver) ResolveLinks(ndd *merkledag.Node, names []string) (
|
func (s *Resolver) ResolveLinks(ctx context.Context, ndd *merkledag.Node, names []string) ([]*merkledag.Node, error) {
|
||||||
result []*merkledag.Node, err error) {
|
|
||||||
|
|
||||||
result = make([]*merkledag.Node, 0, len(names)+1)
|
result := make([]*merkledag.Node, 0, len(names)+1)
|
||||||
result = append(result, ndd)
|
result = append(result, ndd)
|
||||||
nd := ndd // dup arg workaround
|
nd := ndd // dup arg workaround
|
||||||
|
|
||||||
@ -121,9 +120,9 @@ func (s *Resolver) ResolveLinks(ndd *merkledag.Node, names []string) (
|
|||||||
|
|
||||||
if nlink.Node == nil {
|
if nlink.Node == nil {
|
||||||
// fetch object for link and assign to nd
|
// fetch object for link and assign to nd
|
||||||
ctx, cancel := context.WithTimeout(context.TODO(), time.Minute)
|
ctx, cancel := context.WithTimeout(ctx, time.Minute)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
nd, err = s.DAG.Get(ctx, next)
|
nd, err := s.DAG.Get(ctx, next)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return append(result, nd), err
|
return append(result, nd), err
|
||||||
}
|
}
|
||||||
@ -134,5 +133,5 @@ func (s *Resolver) ResolveLinks(ndd *merkledag.Node, names []string) (
|
|||||||
|
|
||||||
result = append(result, nlink.Node)
|
result = append(result, nlink.Node)
|
||||||
}
|
}
|
||||||
return
|
return result, nil
|
||||||
}
|
}
|
||||||
|
@ -26,35 +26,31 @@ test_expect_success "HTTP gateway gives access to sample file" '
|
|||||||
test_expect_success "HTTP POST file gives Hash" '
|
test_expect_success "HTTP POST file gives Hash" '
|
||||||
echo "$RANDOM" >infile &&
|
echo "$RANDOM" >infile &&
|
||||||
URL="http://localhost:$port/ipfs/" &&
|
URL="http://localhost:$port/ipfs/" &&
|
||||||
curl -svX POST --data-binary @infile "$URL" 2>curl.out &&
|
curl -svX POST --data-binary @infile "$URL" 2>curl_post.out &&
|
||||||
grep "HTTP/1.1 201 Created" curl.out &&
|
grep "HTTP/1.1 201 Created" curl_post.out &&
|
||||||
LOCATION=$(grep Location curl.out) &&
|
LOCATION=$(grep Location curl_post.out) &&
|
||||||
HASH=$(expr "$LOCATION" : "< Location: /ipfs/\(.*\)$")
|
HASH=$(echo $LOCATION | cut -d":" -f2- |tr -d " \n\r")
|
||||||
'
|
'
|
||||||
|
|
||||||
# this is failing on osx
|
test_expect_success "We can HTTP GET file just created" '
|
||||||
# claims "multihash too short. must be > 3 bytes" but the multihash is there.
|
URL="http://localhost:${port}${HASH}" &&
|
||||||
test_expect_failure "We can HTTP GET file just created" '
|
|
||||||
URL="http://localhost:$port/ipfs/$HASH" &&
|
|
||||||
curl -so outfile "$URL" &&
|
curl -so outfile "$URL" &&
|
||||||
test_cmp infile outfile ||
|
test_cmp infile outfile
|
||||||
echo $URL &&
|
|
||||||
test_fsh cat outfile
|
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "HTTP PUT empty directory" '
|
test_expect_success "HTTP PUT empty directory" '
|
||||||
URL="http://localhost:$port/ipfs/$HASH_EMPTY_DIR/" &&
|
URL="http://localhost:$port/ipfs/$HASH_EMPTY_DIR/" &&
|
||||||
echo "PUT $URL" &&
|
echo "PUT $URL" &&
|
||||||
curl -svX PUT "$URL" 2>curl.out &&
|
curl -svX PUT "$URL" 2>curl_putEmpty.out &&
|
||||||
cat curl.out &&
|
cat curl_putEmpty.out &&
|
||||||
grep "Ipfs-Hash: $HASH_EMPTY_DIR" curl.out &&
|
grep "Ipfs-Hash: $HASH_EMPTY_DIR" curl_putEmpty.out &&
|
||||||
grep "Location: /ipfs/$HASH_EMPTY_DIR/" curl.out &&
|
grep "Location: /ipfs/$HASH_EMPTY_DIR/" curl_putEmpty.out &&
|
||||||
grep "HTTP/1.1 201 Created" curl.out
|
grep "HTTP/1.1 201 Created" curl_putEmpty.out
|
||||||
'
|
'
|
||||||
|
|
||||||
test_expect_success "HTTP GET empty directory" '
|
test_expect_success "HTTP GET empty directory" '
|
||||||
echo "GET $URL" &&
|
echo "GET $URL" &&
|
||||||
curl -so outfile "$URL" 2>curl.out &&
|
curl -so outfile "$URL" 2>curl_getEmpty.out &&
|
||||||
grep "Index of /ipfs/$HASH_EMPTY_DIR/" outfile
|
grep "Index of /ipfs/$HASH_EMPTY_DIR/" outfile
|
||||||
'
|
'
|
||||||
|
|
||||||
@ -62,9 +58,9 @@ test_expect_success "HTTP PUT file to construct a hierarchy" '
|
|||||||
echo "$RANDOM" >infile &&
|
echo "$RANDOM" >infile &&
|
||||||
URL="http://localhost:$port/ipfs/$HASH_EMPTY_DIR/test.txt" &&
|
URL="http://localhost:$port/ipfs/$HASH_EMPTY_DIR/test.txt" &&
|
||||||
echo "PUT $URL" &&
|
echo "PUT $URL" &&
|
||||||
curl -svX PUT --data-binary @infile "$URL" 2>curl.out &&
|
curl -svX PUT --data-binary @infile "$URL" 2>curl_put.out &&
|
||||||
grep "HTTP/1.1 201 Created" curl.out &&
|
grep "HTTP/1.1 201 Created" curl_put.out &&
|
||||||
LOCATION=$(grep Location curl.out) &&
|
LOCATION=$(grep Location curl_put.out) &&
|
||||||
HASH=$(expr "$LOCATION" : "< Location: /ipfs/\(.*\)/test.txt")
|
HASH=$(expr "$LOCATION" : "< Location: /ipfs/\(.*\)/test.txt")
|
||||||
'
|
'
|
||||||
|
|
||||||
@ -79,22 +75,18 @@ test_expect_success "HTTP PUT file to append to existing hierarchy" '
|
|||||||
echo "$RANDOM" >infile2 &&
|
echo "$RANDOM" >infile2 &&
|
||||||
URL="http://localhost:$port/ipfs/$HASH/test/test.txt" &&
|
URL="http://localhost:$port/ipfs/$HASH/test/test.txt" &&
|
||||||
echo "PUT $URL" &&
|
echo "PUT $URL" &&
|
||||||
curl -svX PUT --data-binary @infile2 "$URL" 2>curl.out &&
|
curl -svX PUT --data-binary @infile2 "$URL" 2>curl_putAgain.out &&
|
||||||
grep "HTTP/1.1 201 Created" curl.out &&
|
grep "HTTP/1.1 201 Created" curl_putAgain.out &&
|
||||||
LOCATION=$(grep Location curl.out) &&
|
LOCATION=$(grep Location curl_putAgain.out) &&
|
||||||
HASH=$(expr "$LOCATION" : "< Location: /ipfs/\(.*\)/test/test.txt")
|
HASH=$(expr "$LOCATION" : "< Location: /ipfs/\(.*\)/test/test.txt")
|
||||||
'
|
'
|
||||||
|
|
||||||
|
|
||||||
test_expect_success "We can HTTP GET file just created" '
|
test_expect_success "We can HTTP GET file just updated" '
|
||||||
URL="http://localhost:$port/ipfs/$HASH/test/test.txt" &&
|
URL="http://localhost:$port/ipfs/$HASH/test/test.txt" &&
|
||||||
echo "GET $URL" &&
|
echo "GET $URL" &&
|
||||||
curl -so outfile2 "$URL" &&
|
curl -svo outfile2 "$URL" 2>curl_getAgain.out &&
|
||||||
test_cmp infile2 outfile2 &&
|
test_cmp infile2 outfile2
|
||||||
URL="http://localhost:$port/ipfs/$HASH/test.txt" &&
|
|
||||||
echo "GET $URL" &&
|
|
||||||
curl -so outfile "$URL" &&
|
|
||||||
test_cmp infile outfile
|
|
||||||
'
|
'
|
||||||
|
|
||||||
test_kill_ipfs_daemon
|
test_kill_ipfs_daemon
|
Reference in New Issue
Block a user