mirror of
https://github.com/ipfs/kubo.git
synced 2025-07-03 21:08:17 +08:00
style: gofumpt and godot [skip changelog] (#10081)
This commit is contained in:
@ -17,7 +17,7 @@ import (
|
||||
//go:embed init-doc
|
||||
var Asset embed.FS
|
||||
|
||||
// initDocPaths lists the paths for the docs we want to seed during --init
|
||||
// initDocPaths lists the paths for the docs we want to seed during --init.
|
||||
var initDocPaths = []string{
|
||||
gopath.Join("init-doc", "about"),
|
||||
gopath.Join("init-doc", "readme"),
|
||||
@ -28,7 +28,7 @@ var initDocPaths = []string{
|
||||
gopath.Join("init-doc", "ping"),
|
||||
}
|
||||
|
||||
// SeedInitDocs adds the list of embedded init documentation to the passed node, pins it and returns the root key
|
||||
// SeedInitDocs adds the list of embedded init documentation to the passed node, pins it and returns the root key.
|
||||
func SeedInitDocs(nd *core.IpfsNode) (cid.Cid, error) {
|
||||
return addAssetList(nd, initDocPaths)
|
||||
}
|
||||
|
@ -48,7 +48,7 @@ type HttpApi struct {
|
||||
// IPFS daemon
|
||||
//
|
||||
// Daemon api address is pulled from the $IPFS_PATH/api file.
|
||||
// If $IPFS_PATH env var is not present, it defaults to ~/.ipfs
|
||||
// If $IPFS_PATH env var is not present, it defaults to ~/.ipfs.
|
||||
func NewLocalApi() (*HttpApi, error) {
|
||||
baseDir := os.Getenv(EnvDir)
|
||||
if baseDir == "" {
|
||||
@ -59,7 +59,7 @@ func NewLocalApi() (*HttpApi, error) {
|
||||
}
|
||||
|
||||
// NewPathApi constructs new HttpApi by pulling api address from specified
|
||||
// ipfspath. Api file should be located at $ipfspath/api
|
||||
// ipfspath. Api file should be located at $ipfspath/api.
|
||||
func NewPathApi(ipfspath string) (*HttpApi, error) {
|
||||
a, err := ApiAddr(ipfspath)
|
||||
if err != nil {
|
||||
@ -71,7 +71,7 @@ func NewPathApi(ipfspath string) (*HttpApi, error) {
|
||||
return NewApi(a)
|
||||
}
|
||||
|
||||
// ApiAddr reads api file in specified ipfs path
|
||||
// ApiAddr reads api file in specified ipfs path.
|
||||
func ApiAddr(ipfspath string) (ma.Multiaddr, error) {
|
||||
baseDir, err := homedir.Expand(ipfspath)
|
||||
if err != nil {
|
||||
@ -88,7 +88,7 @@ func ApiAddr(ipfspath string) (ma.Multiaddr, error) {
|
||||
return ma.NewMultiaddr(strings.TrimSpace(string(api)))
|
||||
}
|
||||
|
||||
// NewApi constructs HttpApi with specified endpoint
|
||||
// NewApi constructs HttpApi with specified endpoint.
|
||||
func NewApi(a ma.Multiaddr) (*HttpApi, error) {
|
||||
c := &http.Client{
|
||||
Transport: &http.Transport{
|
||||
@ -100,7 +100,7 @@ func NewApi(a ma.Multiaddr) (*HttpApi, error) {
|
||||
return NewApiWithClient(a, c)
|
||||
}
|
||||
|
||||
// NewApiWithClient constructs HttpApi with specified endpoint and custom http client
|
||||
// NewApiWithClient constructs HttpApi with specified endpoint and custom http client.
|
||||
func NewApiWithClient(a ma.Multiaddr, c *http.Client) (*HttpApi, error) {
|
||||
_, url, err := manet.DialArgs(a)
|
||||
if err != nil {
|
||||
|
@ -12,7 +12,7 @@ import (
|
||||
"github.com/ipfs/go-cid"
|
||||
)
|
||||
|
||||
const forwardSeekLimit = 1 << 14 //16k
|
||||
const forwardSeekLimit = 1 << 14 // 16k
|
||||
|
||||
func (api *UnixfsAPI) Get(ctx context.Context, p path.Path) (files.Node, error) {
|
||||
if p.Mutable() { // use resolved path in case we are dealing with IPNS / MFS
|
||||
@ -107,11 +107,11 @@ func (f *apiFile) Seek(offset int64, whence int) (int64, error) {
|
||||
case io.SeekCurrent:
|
||||
offset = f.at + offset
|
||||
}
|
||||
if f.at == offset { //noop
|
||||
if f.at == offset { // noop
|
||||
return offset, nil
|
||||
}
|
||||
|
||||
if f.at < offset && offset-f.at < forwardSeekLimit { //forward skip
|
||||
if f.at < offset && offset-f.at < forwardSeekLimit { // forward skip
|
||||
r, err := io.CopyN(io.Discard, f.r.Output, offset-f.at)
|
||||
|
||||
f.at += r
|
||||
@ -246,7 +246,6 @@ func (api *UnixfsAPI) getDir(ctx context.Context, p path.Path, size int64) (file
|
||||
resp, err := api.core().Request("ls", p.String()).
|
||||
Option("resolve-size", true).
|
||||
Option("stream", true).Send(ctx)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -266,5 +265,7 @@ func (api *UnixfsAPI) getDir(ctx context.Context, p path.Path, size int64) (file
|
||||
return d, nil
|
||||
}
|
||||
|
||||
var _ files.File = &apiFile{}
|
||||
var _ files.Directory = &apiDir{}
|
||||
var (
|
||||
_ files.File = &apiFile{}
|
||||
_ files.Directory = &apiDir{}
|
||||
)
|
||||
|
@ -83,7 +83,7 @@ func (api *BlockAPI) Get(ctx context.Context, p path.Path) (io.Reader, error) {
|
||||
return nil, parseErrNotFoundWithFallbackToError(resp.Error)
|
||||
}
|
||||
|
||||
//TODO: make get return ReadCloser to avoid copying
|
||||
// TODO: make get return ReadCloser to avoid copying
|
||||
defer resp.Close()
|
||||
b := new(bytes.Buffer)
|
||||
if _, err := io.Copy(b, resp.Output); err != nil {
|
||||
|
@ -14,9 +14,11 @@ import (
|
||||
multicodec "github.com/multiformats/go-multicodec"
|
||||
)
|
||||
|
||||
type httpNodeAdder HttpApi
|
||||
type HttpDagServ httpNodeAdder
|
||||
type pinningHttpNodeAdder httpNodeAdder
|
||||
type (
|
||||
httpNodeAdder HttpApi
|
||||
HttpDagServ httpNodeAdder
|
||||
pinningHttpNodeAdder httpNodeAdder
|
||||
)
|
||||
|
||||
func (api *HttpDagServ) Get(ctx context.Context, c cid.Cid) (format.Node, error) {
|
||||
r, err := api.core().Block().Get(ctx, path.IpldPath(c))
|
||||
@ -114,7 +116,7 @@ func (api *HttpDagServ) Pinning() format.NodeAdder {
|
||||
}
|
||||
|
||||
func (api *HttpDagServ) Remove(ctx context.Context, c cid.Cid) error {
|
||||
return api.core().Block().Rm(ctx, path.IpldPath(c)) //TODO: should we force rm?
|
||||
return api.core().Block().Rm(ctx, path.IpldPath(c)) // TODO: should we force rm?
|
||||
}
|
||||
|
||||
func (api *HttpDagServ) RemoveMany(ctx context.Context, cids []cid.Cid) error {
|
||||
|
@ -68,7 +68,7 @@ func parseErrNotFound(msg string) (error, bool) {
|
||||
// Assume CIDs break on:
|
||||
// - Whitespaces: " \t\n\r\v\f"
|
||||
// - Semicolon: ";" this is to parse ipld.ErrNotFound wrapped in multierr
|
||||
// - Double Quotes: "\"" this is for parsing %q and %#v formating
|
||||
// - Double Quotes: "\"" this is for parsing %q and %#v formating.
|
||||
const cidBreakSet = " \t\n\r\v\f;\""
|
||||
|
||||
func parseIPLDErrNotFound(msg string) (error, bool) {
|
||||
@ -139,7 +139,7 @@ func parseIPLDErrNotFound(msg string) (error, bool) {
|
||||
// This is a simple error type that just return msg as Error().
|
||||
// But that also match ipld.ErrNotFound when called with Is(err).
|
||||
// That is needed to keep compatiblity with code that use string.Contains(err.Error(), "blockstore: block not found")
|
||||
// and code using ipld.ErrNotFound
|
||||
// and code using ipld.ErrNotFound.
|
||||
type blockstoreNotFoundMatchingIPLDErrNotFound struct {
|
||||
msg string
|
||||
}
|
||||
|
@ -87,7 +87,7 @@ func (api *ObjectAPI) Data(ctx context.Context, p path.Path) (io.Reader, error)
|
||||
return nil, resp.Error
|
||||
}
|
||||
|
||||
//TODO: make Data return ReadCloser to avoid copying
|
||||
// TODO: make Data return ReadCloser to avoid copying
|
||||
defer resp.Close()
|
||||
b := new(bytes.Buffer)
|
||||
if _, err := io.Copy(b, resp.Output); err != nil {
|
||||
|
@ -15,7 +15,7 @@ func (api *HttpApi) ResolvePath(ctx context.Context, p path.Path) (path.Resolved
|
||||
RemPath string
|
||||
}
|
||||
|
||||
//TODO: this is hacky, fixing https://github.com/ipfs/go-ipfs/issues/5703 would help
|
||||
// TODO: this is hacky, fixing https://github.com/ipfs/go-ipfs/issues/5703 would help
|
||||
|
||||
var err error
|
||||
if p.Namespace() == "ipns" {
|
||||
|
@ -112,7 +112,7 @@ func (api *PinAPI) Ls(ctx context.Context, opts ...caopts.PinLsOption) (<-chan i
|
||||
}
|
||||
|
||||
// IsPinned returns whether or not the given cid is pinned
|
||||
// and an explanation of why its pinned
|
||||
// and an explanation of why its pinned.
|
||||
func (api *PinAPI) IsPinned(ctx context.Context, p path.Path, opts ...caopts.PinIsPinnedOption) (string, bool, error) {
|
||||
options, err := caopts.PinIsPinnedOptions(opts...)
|
||||
if err != nil {
|
||||
|
@ -152,7 +152,6 @@ func (api *PubsubAPI) Subscribe(ctx context.Context, topic string, opts ...caopt
|
||||
}
|
||||
*/
|
||||
resp, err := api.core().Request("pubsub/sub", toMultibase([]byte(topic))).Send(ctx)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -207,7 +206,7 @@ func (api *PubsubAPI) core() *HttpApi {
|
||||
return (*HttpApi)(api)
|
||||
}
|
||||
|
||||
// Encodes bytes into URL-safe multibase that can be sent over HTTP RPC (URL or body)
|
||||
// Encodes bytes into URL-safe multibase that can be sent over HTTP RPC (URL or body).
|
||||
func toMultibase(data []byte) string {
|
||||
mb, _ := mbase.Encode(mbase.Base64url, data)
|
||||
return mb
|
||||
|
@ -54,7 +54,7 @@ func (r *Response) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Cancel aborts running request (without draining request body)
|
||||
// Cancel aborts running request (without draining request body).
|
||||
func (r *Response) Cancel() error {
|
||||
if r.Output != nil {
|
||||
return r.Output.Close()
|
||||
@ -63,7 +63,7 @@ func (r *Response) Cancel() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode reads request body and decodes it as json
|
||||
// Decode reads request body and decodes it as json.
|
||||
func (r *Response) decode(dec interface{}) error {
|
||||
if r.Error != nil {
|
||||
return r.Error
|
||||
@ -157,7 +157,6 @@ func (r *Request) Send(c *http.Client) (*Response, error) {
|
||||
}
|
||||
|
||||
func (r *Request) getURL() string {
|
||||
|
||||
values := make(url.Values)
|
||||
for _, arg := range r.Args {
|
||||
values.Add("arg", arg)
|
||||
|
@ -49,7 +49,6 @@ func (api *RoutingAPI) Put(ctx context.Context, key string, value []byte, opts .
|
||||
Option("allow-offline", cfg.AllowOffline).
|
||||
FileBody(bytes.NewReader(value)).
|
||||
Send(ctx)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -19,7 +19,7 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
// addMigrations adds any migration downloaded by the fetcher to the IPFS node
|
||||
// addMigrations adds any migration downloaded by the fetcher to the IPFS node.
|
||||
func addMigrations(ctx context.Context, node *core.IpfsNode, fetcher migrations.Fetcher, pin bool) error {
|
||||
var fetchers []migrations.Fetcher
|
||||
if mf, ok := fetcher.(*migrations.MultiFetcher); ok {
|
||||
@ -63,7 +63,7 @@ func addMigrations(ctx context.Context, node *core.IpfsNode, fetcher migrations.
|
||||
return nil
|
||||
}
|
||||
|
||||
// addMigrationFiles adds the files at paths to IPFS, optionally pinning them
|
||||
// addMigrationFiles adds the files at paths to IPFS, optionally pinning them.
|
||||
func addMigrationFiles(ctx context.Context, node *core.IpfsNode, paths []string, pin bool) error {
|
||||
if len(paths) == 0 {
|
||||
return nil
|
||||
|
@ -73,7 +73,7 @@ const (
|
||||
enableMultiplexKwd = "enable-mplex-experiment"
|
||||
agentVersionSuffix = "agent-version-suffix"
|
||||
// apiAddrKwd = "address-api"
|
||||
// swarmAddrKwd = "address-swarm"
|
||||
// swarmAddrKwd = "address-swarm".
|
||||
)
|
||||
|
||||
var daemonCmd = &cmds.Command{
|
||||
@ -389,7 +389,7 @@ func daemonFunc(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment
|
||||
"pubsub": pubsub,
|
||||
"ipnsps": ipnsps,
|
||||
},
|
||||
//TODO(Kubuxu): refactor Online vs Offline by adding Permanent vs Ephemeral
|
||||
// TODO(Kubuxu): refactor Online vs Offline by adding Permanent vs Ephemeral
|
||||
}
|
||||
|
||||
routingOption, _ := req.Options[routingOptionKwd].(string)
|
||||
@ -552,7 +552,7 @@ take effect.
|
||||
}
|
||||
|
||||
// Add ipfs version info to prometheus metrics
|
||||
var ipfsInfoMetric = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
ipfsInfoMetric := promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "ipfs_info",
|
||||
Help: "IPFS version information.",
|
||||
}, []string{"version", "commit"})
|
||||
@ -607,7 +607,6 @@ take effect.
|
||||
log.Error("failed to bootstrap (no peers found): consider updating Bootstrap or Peering section of your config")
|
||||
}
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
// Hard deprecation notice if someone still uses IPFS_REUSEPORT
|
||||
@ -627,7 +626,7 @@ take effect.
|
||||
return errs
|
||||
}
|
||||
|
||||
// serveHTTPApi collects options, creates listener, prints status message and starts serving requests
|
||||
// serveHTTPApi collects options, creates listener, prints status message and starts serving requests.
|
||||
func serveHTTPApi(req *cmds.Request, cctx *oldcmds.Context) (<-chan error, error) {
|
||||
cfg, err := cctx.GetConfig()
|
||||
if err != nil {
|
||||
@ -690,7 +689,7 @@ func serveHTTPApi(req *cmds.Request, cctx *oldcmds.Context) (<-chan error, error
|
||||
gatewayOpt = corehttp.GatewayOption("/ipfs", "/ipns")
|
||||
}
|
||||
|
||||
var opts = []corehttp.ServeOption{
|
||||
opts := []corehttp.ServeOption{
|
||||
corehttp.MetricsCollectionOption("api"),
|
||||
corehttp.MetricsOpenCensusCollectionOption(),
|
||||
corehttp.MetricsOpenCensusDefaultPrometheusRegistry(),
|
||||
@ -752,7 +751,7 @@ func rewriteMaddrToUseLocalhostIfItsAny(maddr ma.Multiaddr) ma.Multiaddr {
|
||||
}
|
||||
}
|
||||
|
||||
// printSwarmAddrs prints the addresses of the host
|
||||
// printSwarmAddrs prints the addresses of the host.
|
||||
func printSwarmAddrs(node *core.IpfsNode) {
|
||||
if !node.IsOnline {
|
||||
fmt.Println("Swarm not listening, running in offline mode.")
|
||||
@ -781,10 +780,9 @@ func printSwarmAddrs(node *core.IpfsNode) {
|
||||
for _, addr := range addrs {
|
||||
fmt.Printf("Swarm announcing %s\n", addr)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// serveHTTPGateway collects options, creates listener, prints status message and starts serving requests
|
||||
// serveHTTPGateway collects options, creates listener, prints status message and starts serving requests.
|
||||
func serveHTTPGateway(req *cmds.Request, cctx *oldcmds.Context) (<-chan error, error) {
|
||||
cfg, err := cctx.GetConfig()
|
||||
if err != nil {
|
||||
@ -837,7 +835,7 @@ func serveHTTPGateway(req *cmds.Request, cctx *oldcmds.Context) (<-chan error, e
|
||||
cmdctx := *cctx
|
||||
cmdctx.Gateway = true
|
||||
|
||||
var opts = []corehttp.ServeOption{
|
||||
opts := []corehttp.ServeOption{
|
||||
corehttp.MetricsCollectionOption("gateway"),
|
||||
corehttp.HostnameOption(),
|
||||
corehttp.GatewayOption("/ipfs", "/ipns"),
|
||||
@ -891,7 +889,7 @@ func serveHTTPGateway(req *cmds.Request, cctx *oldcmds.Context) (<-chan error, e
|
||||
return errc, nil
|
||||
}
|
||||
|
||||
// collects options and opens the fuse mountpoint
|
||||
// collects options and opens the fuse mountpoint.
|
||||
func mountFuse(req *cmds.Request, cctx *oldcmds.Context) error {
|
||||
cfg, err := cctx.GetConfig()
|
||||
if err != nil {
|
||||
|
@ -25,7 +25,8 @@ func makeResolver(t *testing.T, n uint8) *madns.Resolver {
|
||||
backend := &madns.MockResolver{
|
||||
IP: map[string][]net.IPAddr{
|
||||
"example.com": results,
|
||||
}}
|
||||
},
|
||||
}
|
||||
|
||||
resolver, err := madns.NewResolver(madns.WithDefaultResolver(backend))
|
||||
if err != nil {
|
||||
|
@ -194,7 +194,7 @@ func checkWritable(dir string) error {
|
||||
|
||||
if os.IsNotExist(err) {
|
||||
// dir doesn't exist, check that we can create it
|
||||
return os.Mkdir(dir, 0775)
|
||||
return os.Mkdir(dir, 0o775)
|
||||
}
|
||||
|
||||
if os.IsPermission(err) {
|
||||
|
@ -14,7 +14,7 @@ var Root = &cmds.Command{
|
||||
Helptext: commands.Root.Helptext,
|
||||
}
|
||||
|
||||
// commandsClientCmd is the "ipfs commands" command for local cli
|
||||
// commandsClientCmd is the "ipfs commands" command for local cli.
|
||||
var commandsClientCmd = commands.CommandsCmd(Root)
|
||||
|
||||
// Commands in localCommands should always be run locally (even if daemon is running).
|
||||
|
@ -39,11 +39,13 @@ import (
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
// log is the command logger
|
||||
var log = logging.Logger("cmd/ipfs")
|
||||
var tracer trace.Tracer
|
||||
// log is the command logger.
|
||||
var (
|
||||
log = logging.Logger("cmd/ipfs")
|
||||
tracer trace.Tracer
|
||||
)
|
||||
|
||||
// declared as a var for testing purposes
|
||||
// declared as a var for testing purposes.
|
||||
var dnsResolver = madns.DefaultResolver
|
||||
|
||||
const (
|
||||
@ -73,7 +75,7 @@ func loadPlugins(repoPath string) (*loader.PluginLoader, error) {
|
||||
// - if user requests help, print it and exit.
|
||||
// - run the command invocation
|
||||
// - output the response
|
||||
// - if anything fails, print error, maybe with help
|
||||
// - if anything fails, print error, maybe with help.
|
||||
func main() {
|
||||
os.Exit(mainRet())
|
||||
}
|
||||
|
@ -18,7 +18,7 @@ import (
|
||||
"github.com/ipfs/kubo/core"
|
||||
)
|
||||
|
||||
// mfslog is the logger for remote mfs pinning
|
||||
// mfslog is the logger for remote mfs pinning.
|
||||
var mfslog = logging.Logger("remotepinning/mfs")
|
||||
|
||||
type lastPin struct {
|
||||
|
@ -12,7 +12,7 @@ import (
|
||||
|
||||
// this abuses go so much that I felt dirty writing this code
|
||||
// but it is the only way to do it without writing custom compiler that would
|
||||
// be a clone of go-build with go-test
|
||||
// be a clone of go-build with go-test.
|
||||
func TestRunMain(t *testing.T) {
|
||||
args := flag.Args()
|
||||
os.Args = append([]string{os.Args[0]}, args...)
|
||||
@ -20,7 +20,7 @@ func TestRunMain(t *testing.T) {
|
||||
|
||||
p := os.Getenv("IPFS_COVER_RET_FILE")
|
||||
if len(p) != 0 {
|
||||
os.WriteFile(p, []byte(fmt.Sprintf("%d\n", ret)), 0777)
|
||||
os.WriteFile(p, []byte(fmt.Sprintf("%d\n", ret)), 0o777)
|
||||
}
|
||||
|
||||
// close outputs so go testing doesn't print anything
|
||||
|
@ -14,19 +14,19 @@ var log = logging.Logger("ulimit")
|
||||
var (
|
||||
supportsFDManagement = false
|
||||
|
||||
// getlimit returns the soft and hard limits of file descriptors counts
|
||||
// getlimit returns the soft and hard limits of file descriptors counts.
|
||||
getLimit func() (uint64, uint64, error)
|
||||
// set limit sets the soft and hard limits of file descriptors counts
|
||||
// set limit sets the soft and hard limits of file descriptors counts.
|
||||
setLimit func(uint64, uint64) error
|
||||
)
|
||||
|
||||
// minimum file descriptor limit before we complain
|
||||
// minimum file descriptor limit before we complain.
|
||||
const minFds = 2048
|
||||
|
||||
// default max file descriptor limit.
|
||||
const maxFds = 8192
|
||||
|
||||
// userMaxFDs returns the value of IPFS_FD_MAX
|
||||
// userMaxFDs returns the value of IPFS_FD_MAX.
|
||||
func userMaxFDs() uint64 {
|
||||
// check if the IPFS_FD_MAX is set up and if it does
|
||||
// not have a valid fds number notify the user
|
||||
@ -42,7 +42,7 @@ func userMaxFDs() uint64 {
|
||||
}
|
||||
|
||||
// ManageFdLimit raise the current max file descriptor count
|
||||
// of the process based on the IPFS_FD_MAX value
|
||||
// of the process based on the IPFS_FD_MAX value.
|
||||
func ManageFdLimit() (changed bool, newLimit uint64, err error) {
|
||||
if !supportsFDManagement {
|
||||
return false, 0, nil
|
||||
|
@ -24,9 +24,11 @@ import (
|
||||
homedir "github.com/mitchellh/go-homedir"
|
||||
)
|
||||
|
||||
var http = flag.Bool("http", false, "expose IPFS HTTP API")
|
||||
var repoPath = flag.String("repo", os.Getenv("IPFS_PATH"), "IPFS_PATH to use")
|
||||
var watchPath = flag.String("path", ".", "the path to watch")
|
||||
var (
|
||||
http = flag.Bool("http", false, "expose IPFS HTTP API")
|
||||
repoPath = flag.String("repo", os.Getenv("IPFS_PATH"), "IPFS_PATH to use")
|
||||
watchPath = flag.String("path", ".", "the path to watch")
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
@ -52,7 +54,6 @@ func main() {
|
||||
}
|
||||
|
||||
func run(ipfsPath, watchPath string) error {
|
||||
|
||||
proc := process.WithParent(process.Background())
|
||||
log.Printf("running IPFSWatch on '%s' using repo at '%s'...", watchPath, ipfsPath)
|
||||
|
||||
@ -93,7 +94,7 @@ func run(ipfsPath, watchPath string) error {
|
||||
|
||||
if *http {
|
||||
addr := "/ip4/127.0.0.1/tcp/5001"
|
||||
var opts = []corehttp.ServeOption{
|
||||
opts := []corehttp.ServeOption{
|
||||
corehttp.GatewayOption("/ipfs", "/ipns"),
|
||||
corehttp.WebUIOption,
|
||||
corehttp.CommandsOption(cmdCtx(node, ipfsPath)),
|
||||
|
@ -19,7 +19,7 @@ import (
|
||||
|
||||
var log = logging.Logger("command")
|
||||
|
||||
// Context represents request context
|
||||
// Context represents request context.
|
||||
type Context struct {
|
||||
ConfigRoot string
|
||||
ReqLog *ReqLog
|
||||
@ -54,7 +54,7 @@ func (c *Context) GetNode() (*core.IpfsNode, error) {
|
||||
}
|
||||
|
||||
// GetAPI returns CoreAPI instance backed by ipfs node.
|
||||
// It may construct the node with the provided function
|
||||
// It may construct the node with the provided function.
|
||||
func (c *Context) GetAPI() (coreiface.CoreAPI, error) {
|
||||
if c.api == nil {
|
||||
n, err := c.GetNode()
|
||||
|
@ -5,7 +5,7 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// ReqLogEntry is an entry in the request log
|
||||
// ReqLogEntry is an entry in the request log.
|
||||
type ReqLogEntry struct {
|
||||
StartTime time.Time
|
||||
EndTime time.Time
|
||||
@ -18,14 +18,14 @@ type ReqLogEntry struct {
|
||||
log *ReqLog
|
||||
}
|
||||
|
||||
// Copy returns a copy of the ReqLogEntry
|
||||
// Copy returns a copy of the ReqLogEntry.
|
||||
func (r *ReqLogEntry) Copy() *ReqLogEntry {
|
||||
out := *r
|
||||
out.log = nil
|
||||
return &out
|
||||
}
|
||||
|
||||
// ReqLog is a log of requests
|
||||
// ReqLog is a log of requests.
|
||||
type ReqLog struct {
|
||||
Requests []*ReqLogEntry
|
||||
nextID int
|
||||
@ -33,7 +33,7 @@ type ReqLog struct {
|
||||
keep time.Duration
|
||||
}
|
||||
|
||||
// AddEntry adds an entry to the log
|
||||
// AddEntry adds an entry to the log.
|
||||
func (rl *ReqLog) AddEntry(rle *ReqLogEntry) {
|
||||
rl.lock.Lock()
|
||||
defer rl.lock.Unlock()
|
||||
@ -47,7 +47,7 @@ func (rl *ReqLog) AddEntry(rle *ReqLogEntry) {
|
||||
}
|
||||
}
|
||||
|
||||
// ClearInactive removes stale entries
|
||||
// ClearInactive removes stale entries.
|
||||
func (rl *ReqLog) ClearInactive() {
|
||||
rl.lock.Lock()
|
||||
defer rl.lock.Unlock()
|
||||
@ -79,14 +79,14 @@ func (rl *ReqLog) cleanup() {
|
||||
rl.Requests = rl.Requests[:i]
|
||||
}
|
||||
|
||||
// SetKeepTime sets a duration after which an entry will be considered inactive
|
||||
// SetKeepTime sets a duration after which an entry will be considered inactive.
|
||||
func (rl *ReqLog) SetKeepTime(t time.Duration) {
|
||||
rl.lock.Lock()
|
||||
defer rl.lock.Unlock()
|
||||
rl.keep = t
|
||||
}
|
||||
|
||||
// Report generates a copy of all the entries in the requestlog
|
||||
// Report generates a copy of all the entries in the requestlog.
|
||||
func (rl *ReqLog) Report() []*ReqLogEntry {
|
||||
rl.lock.Lock()
|
||||
defer rl.lock.Unlock()
|
||||
@ -99,7 +99,7 @@ func (rl *ReqLog) Report() []*ReqLogEntry {
|
||||
return out
|
||||
}
|
||||
|
||||
// Finish marks an entry in the log as finished
|
||||
// Finish marks an entry in the log as finished.
|
||||
func (rl *ReqLog) Finish(rle *ReqLogEntry) {
|
||||
rl.lock.Lock()
|
||||
defer rl.lock.Unlock()
|
||||
|
@ -64,7 +64,7 @@ type AutoNATConfig struct {
|
||||
Throttle *AutoNATThrottleConfig `json:",omitempty"`
|
||||
}
|
||||
|
||||
// AutoNATThrottleConfig configures the throttle limites
|
||||
// AutoNATThrottleConfig configures the throttle limites.
|
||||
type AutoNATThrottleConfig struct {
|
||||
// GlobalLimit and PeerLimit sets the global and per-peer dialback
|
||||
// limits. The AutoNAT service will only perform the specified number of
|
||||
|
@ -41,17 +41,17 @@ type Config struct {
|
||||
}
|
||||
|
||||
const (
|
||||
// DefaultPathName is the default config dir name
|
||||
// DefaultPathName is the default config dir name.
|
||||
DefaultPathName = ".ipfs"
|
||||
// DefaultPathRoot is the path to the default config dir location.
|
||||
DefaultPathRoot = "~/" + DefaultPathName
|
||||
// DefaultConfigFile is the filename of the configuration file
|
||||
// DefaultConfigFile is the filename of the configuration file.
|
||||
DefaultConfigFile = "config"
|
||||
// EnvDir is the environment variable used to change the path root.
|
||||
EnvDir = "IPFS_PATH"
|
||||
)
|
||||
|
||||
// PathRoot returns the default configuration root directory
|
||||
// PathRoot returns the default configuration root directory.
|
||||
func PathRoot() (string, error) {
|
||||
dir := os.Getenv(EnvDir)
|
||||
var err error
|
||||
@ -95,7 +95,7 @@ func Filename(configroot, userConfigFile string) (string, error) {
|
||||
return userConfigFile, nil
|
||||
}
|
||||
|
||||
// HumanOutput gets a config value ready for printing
|
||||
// HumanOutput gets a config value ready for printing.
|
||||
func HumanOutput(value interface{}) ([]byte, error) {
|
||||
s, ok := value.(string)
|
||||
if ok {
|
||||
@ -104,7 +104,7 @@ func HumanOutput(value interface{}) ([]byte, error) {
|
||||
return Marshal(value)
|
||||
}
|
||||
|
||||
// Marshal configuration with JSON
|
||||
// Marshal configuration with JSON.
|
||||
func Marshal(value interface{}) ([]byte, error) {
|
||||
// need to prettyprint, hence MarshalIndent, instead of Encoder
|
||||
return json.MarshalIndent(value, "", " ")
|
||||
|
@ -26,7 +26,7 @@ type Datastore struct {
|
||||
}
|
||||
|
||||
// DataStorePath returns the default data store path given a configuration root
|
||||
// (set an empty string to have the default configuration root)
|
||||
// (set an empty string to have the default configuration root).
|
||||
func DataStorePath(configroot string) (string, error) {
|
||||
return Path(configroot, DefaultDataStoreDirectory)
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
package config
|
||||
|
||||
// DNS specifies DNS resolution rules using custom resolvers
|
||||
// DNS specifies DNS resolution rules using custom resolvers.
|
||||
type DNS struct {
|
||||
// Resolvers is a map of FQDNs to URLs for custom DNS resolution.
|
||||
// URLs starting with `https://` indicate DoH endpoints.
|
||||
|
@ -37,7 +37,6 @@ type GatewaySpec struct {
|
||||
|
||||
// Gateway contains options for the HTTP gateway server.
|
||||
type Gateway struct {
|
||||
|
||||
// HTTPHeaders configures the headers that should be returned by this
|
||||
// gateway.
|
||||
HTTPHeaders map[string][]string // HTTP headers to return with the gateway
|
||||
|
@ -6,9 +6,11 @@ import (
|
||||
ic "github.com/libp2p/go-libp2p/core/crypto"
|
||||
)
|
||||
|
||||
const IdentityTag = "Identity"
|
||||
const PrivKeyTag = "PrivKey"
|
||||
const PrivKeySelector = IdentityTag + "." + PrivKeyTag
|
||||
const (
|
||||
IdentityTag = "Identity"
|
||||
PrivKeyTag = "PrivKey"
|
||||
PrivKeySelector = IdentityTag + "." + PrivKeyTag
|
||||
)
|
||||
|
||||
// Identity tracks the configuration of the local node's identity.
|
||||
type Identity struct {
|
||||
@ -16,7 +18,7 @@ type Identity struct {
|
||||
PrivKey string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// DecodePrivateKey is a helper to decode the users PrivateKey
|
||||
// DecodePrivateKey is a helper to decode the users PrivateKey.
|
||||
func (i *Identity) DecodePrivateKey(passphrase string) (ic.PrivKey, error) {
|
||||
pkb, err := base64.StdEncoding.DecodeString(i.PrivKey)
|
||||
if err != nil {
|
||||
|
@ -90,15 +90,15 @@ func InitWithIdentity(identity Identity) (*Config, error) {
|
||||
}
|
||||
|
||||
// DefaultConnMgrHighWater is the default value for the connection managers
|
||||
// 'high water' mark
|
||||
// 'high water' mark.
|
||||
const DefaultConnMgrHighWater = 96
|
||||
|
||||
// DefaultConnMgrLowWater is the default value for the connection managers 'low
|
||||
// water' mark
|
||||
// water' mark.
|
||||
const DefaultConnMgrLowWater = 32
|
||||
|
||||
// DefaultConnMgrGracePeriod is the default value for the connection managers
|
||||
// grace period
|
||||
// grace period.
|
||||
const DefaultConnMgrGracePeriod = time.Second * 20
|
||||
|
||||
// DefaultConnMgrType is the default value for the connection managers
|
||||
|
@ -5,7 +5,7 @@ const DefaultMigrationKeep = "cache"
|
||||
var DefaultMigrationDownloadSources = []string{"HTTPS", "IPFS"}
|
||||
|
||||
// Migration configures how migrations are downloaded and if the downloads are
|
||||
// added to IPFS locally
|
||||
// added to IPFS locally.
|
||||
type Migration struct {
|
||||
// Sources in order of preference, where "IPFS" means use IPFS and "HTTPS"
|
||||
// means use default gateways. Any other values are interpreted as
|
||||
|
@ -1,6 +1,6 @@
|
||||
package config
|
||||
|
||||
// Mounts stores the (string) mount points
|
||||
// Mounts stores the (string) mount points.
|
||||
type Mounts struct {
|
||||
IPFS string
|
||||
IPNS string
|
||||
|
@ -6,10 +6,10 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// Transformer is a function which takes configuration and applies some filter to it
|
||||
// Transformer is a function which takes configuration and applies some filter to it.
|
||||
type Transformer func(c *Config) error
|
||||
|
||||
// Profile contains the profile transformer the description of the profile
|
||||
// Profile contains the profile transformer the description of the profile.
|
||||
type Profile struct {
|
||||
// Description briefly describes the functionality of the profile.
|
||||
Description string
|
||||
@ -43,7 +43,7 @@ var defaultServerFilters = []string{
|
||||
"/ip6/fe80::/ipcidr/10",
|
||||
}
|
||||
|
||||
// Profiles is a map holding configuration transformers. Docs are in docs/config.md
|
||||
// Profiles is a map holding configuration transformers. Docs are in docs/config.md.
|
||||
var Profiles = map[string]Profile{
|
||||
"server": {
|
||||
Description: `Disables local host discovery, recommended when
|
||||
|
@ -2,8 +2,10 @@ package config
|
||||
|
||||
import "time"
|
||||
|
||||
const DefaultReproviderInterval = time.Hour * 22 // https://github.com/ipfs/kubo/pull/9326
|
||||
const DefaultReproviderStrategy = "all"
|
||||
const (
|
||||
DefaultReproviderInterval = time.Hour * 22 // https://github.com/ipfs/kubo/pull/9326
|
||||
DefaultReproviderStrategy = "all"
|
||||
)
|
||||
|
||||
type Reprovider struct {
|
||||
Interval *OptionalDuration `json:",omitempty"` // Time period to reprovide locally stored objects to the network
|
||||
|
@ -6,7 +6,7 @@ import (
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// Routing defines configuration options for libp2p routing
|
||||
// Routing defines configuration options for libp2p routing.
|
||||
type Routing struct {
|
||||
// Type sets default daemon routing mode.
|
||||
//
|
||||
@ -23,7 +23,6 @@ type Routing struct {
|
||||
}
|
||||
|
||||
type Router struct {
|
||||
|
||||
// Router type ID. See RouterType for more info.
|
||||
Type RouterType
|
||||
|
||||
@ -32,11 +31,12 @@ type Router struct {
|
||||
Parameters interface{}
|
||||
}
|
||||
|
||||
type Routers map[string]RouterParser
|
||||
type Methods map[MethodName]Method
|
||||
type (
|
||||
Routers map[string]RouterParser
|
||||
Methods map[MethodName]Method
|
||||
)
|
||||
|
||||
func (m Methods) Check() error {
|
||||
|
||||
// Check supported methods
|
||||
for _, mn := range MethodNameList {
|
||||
_, ok := m[mn]
|
||||
|
@ -23,42 +23,46 @@ func TestRouterParameters(t *testing.T) {
|
||||
PublicIPNetwork: false,
|
||||
},
|
||||
}},
|
||||
"router-parallel": {Router{
|
||||
Type: RouterTypeParallel,
|
||||
Parameters: ComposableRouterParams{
|
||||
Routers: []ConfigRouter{
|
||||
{
|
||||
RouterName: "router-dht",
|
||||
Timeout: Duration{10 * time.Second},
|
||||
IgnoreErrors: true,
|
||||
},
|
||||
{
|
||||
RouterName: "router-dht",
|
||||
Timeout: Duration{10 * time.Second},
|
||||
IgnoreErrors: false,
|
||||
ExecuteAfter: &OptionalDuration{&sec},
|
||||
"router-parallel": {
|
||||
Router{
|
||||
Type: RouterTypeParallel,
|
||||
Parameters: ComposableRouterParams{
|
||||
Routers: []ConfigRouter{
|
||||
{
|
||||
RouterName: "router-dht",
|
||||
Timeout: Duration{10 * time.Second},
|
||||
IgnoreErrors: true,
|
||||
},
|
||||
{
|
||||
RouterName: "router-dht",
|
||||
Timeout: Duration{10 * time.Second},
|
||||
IgnoreErrors: false,
|
||||
ExecuteAfter: &OptionalDuration{&sec},
|
||||
},
|
||||
},
|
||||
Timeout: &OptionalDuration{&min},
|
||||
},
|
||||
Timeout: &OptionalDuration{&min},
|
||||
}},
|
||||
},
|
||||
},
|
||||
"router-sequential": {Router{
|
||||
Type: RouterTypeSequential,
|
||||
Parameters: ComposableRouterParams{
|
||||
Routers: []ConfigRouter{
|
||||
{
|
||||
RouterName: "router-dht",
|
||||
Timeout: Duration{10 * time.Second},
|
||||
IgnoreErrors: true,
|
||||
},
|
||||
{
|
||||
RouterName: "router-dht",
|
||||
Timeout: Duration{10 * time.Second},
|
||||
IgnoreErrors: false,
|
||||
"router-sequential": {
|
||||
Router{
|
||||
Type: RouterTypeSequential,
|
||||
Parameters: ComposableRouterParams{
|
||||
Routers: []ConfigRouter{
|
||||
{
|
||||
RouterName: "router-dht",
|
||||
Timeout: Duration{10 * time.Second},
|
||||
IgnoreErrors: true,
|
||||
},
|
||||
{
|
||||
RouterName: "router-dht",
|
||||
Timeout: Duration{10 * time.Second},
|
||||
IgnoreErrors: false,
|
||||
},
|
||||
},
|
||||
Timeout: &OptionalDuration{&min},
|
||||
},
|
||||
Timeout: &OptionalDuration{&min},
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
Methods: Methods{
|
||||
|
@ -35,12 +35,12 @@ func ReadConfigFile(filename string, cfg interface{}) error {
|
||||
|
||||
// WriteConfigFile writes the config from `cfg` into `filename`.
|
||||
func WriteConfigFile(filename string, cfg interface{}) error {
|
||||
err := os.MkdirAll(filepath.Dir(filename), 0755)
|
||||
err := os.MkdirAll(filepath.Dir(filename), 0o755)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f, err := atomicfile.New(filename, 0600)
|
||||
f, err := atomicfile.New(filename, 0o600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -49,7 +49,7 @@ func WriteConfigFile(filename string, cfg interface{}) error {
|
||||
return encode(f, cfg)
|
||||
}
|
||||
|
||||
// encode configuration with JSON
|
||||
// encode configuration with JSON.
|
||||
func encode(w io.Writer, value interface{}) error {
|
||||
// need to prettyprint, hence MarshalIndent, instead of Encoder
|
||||
buf, err := config.Marshal(value)
|
||||
|
@ -30,7 +30,7 @@ func TestConfig(t *testing.T) {
|
||||
}
|
||||
|
||||
if runtime.GOOS != "windows" { // see https://golang.org/src/os/types_windows.go
|
||||
if g := st.Mode().Perm(); g&0117 != 0 {
|
||||
if g := st.Mode().Perm(); g&0o117 != 0 {
|
||||
t.Fatalf("config file should not be executable or accessible to world: %v", g)
|
||||
}
|
||||
}
|
||||
|
@ -127,7 +127,7 @@ type Transports struct {
|
||||
}
|
||||
}
|
||||
|
||||
// ConnMgr defines configuration options for the libp2p connection manager
|
||||
// ConnMgr defines configuration options for the libp2p connection manager.
|
||||
type ConnMgr struct {
|
||||
Type *OptionalString `json:",omitempty"`
|
||||
LowWater *OptionalInteger `json:",omitempty"`
|
||||
|
@ -42,8 +42,10 @@ func (o Strings) MarshalJSON() ([]byte, error) {
|
||||
}
|
||||
}
|
||||
|
||||
var _ json.Unmarshaler = (*Strings)(nil)
|
||||
var _ json.Marshaler = (*Strings)(nil)
|
||||
var (
|
||||
_ json.Unmarshaler = (*Strings)(nil)
|
||||
_ json.Marshaler = (*Strings)(nil)
|
||||
)
|
||||
|
||||
// Flag represents a ternary value: false (-1), default (0), or true (+1).
|
||||
//
|
||||
@ -113,8 +115,10 @@ func (f Flag) String() string {
|
||||
}
|
||||
}
|
||||
|
||||
var _ json.Unmarshaler = (*Flag)(nil)
|
||||
var _ json.Marshaler = (*Flag)(nil)
|
||||
var (
|
||||
_ json.Unmarshaler = (*Flag)(nil)
|
||||
_ json.Marshaler = (*Flag)(nil)
|
||||
)
|
||||
|
||||
// Priority represents a value with a priority where 0 means "default" and -1
|
||||
// means "disabled".
|
||||
@ -210,17 +214,19 @@ func (p Priority) String() string {
|
||||
}
|
||||
}
|
||||
|
||||
var _ json.Unmarshaler = (*Priority)(nil)
|
||||
var _ json.Marshaler = (*Priority)(nil)
|
||||
var (
|
||||
_ json.Unmarshaler = (*Priority)(nil)
|
||||
_ json.Marshaler = (*Priority)(nil)
|
||||
)
|
||||
|
||||
// OptionalDuration wraps time.Duration to provide json serialization and deserialization.
|
||||
//
|
||||
// NOTE: the zero value encodes to JSON nill
|
||||
// NOTE: the zero value encodes to JSON nill.
|
||||
type OptionalDuration struct {
|
||||
value *time.Duration
|
||||
}
|
||||
|
||||
// NewOptionalDuration returns an OptionalDuration from a string
|
||||
// NewOptionalDuration returns an OptionalDuration from a string.
|
||||
func NewOptionalDuration(d time.Duration) *OptionalDuration {
|
||||
return &OptionalDuration{value: &d}
|
||||
}
|
||||
@ -266,8 +272,10 @@ func (d OptionalDuration) String() string {
|
||||
return d.value.String()
|
||||
}
|
||||
|
||||
var _ json.Unmarshaler = (*OptionalDuration)(nil)
|
||||
var _ json.Marshaler = (*OptionalDuration)(nil)
|
||||
var (
|
||||
_ json.Unmarshaler = (*OptionalDuration)(nil)
|
||||
_ json.Marshaler = (*OptionalDuration)(nil)
|
||||
)
|
||||
|
||||
type Duration struct {
|
||||
time.Duration
|
||||
@ -298,17 +306,19 @@ func (d *Duration) UnmarshalJSON(b []byte) error {
|
||||
}
|
||||
}
|
||||
|
||||
var _ json.Unmarshaler = (*Duration)(nil)
|
||||
var _ json.Marshaler = (*Duration)(nil)
|
||||
var (
|
||||
_ json.Unmarshaler = (*Duration)(nil)
|
||||
_ json.Marshaler = (*Duration)(nil)
|
||||
)
|
||||
|
||||
// OptionalInteger represents an integer that has a default value
|
||||
//
|
||||
// When encoded in json, Default is encoded as "null"
|
||||
// When encoded in json, Default is encoded as "null".
|
||||
type OptionalInteger struct {
|
||||
value *int64
|
||||
}
|
||||
|
||||
// NewOptionalInteger returns an OptionalInteger from a int64
|
||||
// NewOptionalInteger returns an OptionalInteger from a int64.
|
||||
func NewOptionalInteger(v int64) *OptionalInteger {
|
||||
return &OptionalInteger{value: &v}
|
||||
}
|
||||
@ -321,7 +331,7 @@ func (p *OptionalInteger) WithDefault(defaultValue int64) (value int64) {
|
||||
return *p.value
|
||||
}
|
||||
|
||||
// IsDefault returns if this is a default optional integer
|
||||
// IsDefault returns if this is a default optional integer.
|
||||
func (p *OptionalInteger) IsDefault() bool {
|
||||
return p == nil || p.value == nil
|
||||
}
|
||||
@ -355,17 +365,19 @@ func (p OptionalInteger) String() string {
|
||||
return fmt.Sprintf("%d", *p.value)
|
||||
}
|
||||
|
||||
var _ json.Unmarshaler = (*OptionalInteger)(nil)
|
||||
var _ json.Marshaler = (*OptionalInteger)(nil)
|
||||
var (
|
||||
_ json.Unmarshaler = (*OptionalInteger)(nil)
|
||||
_ json.Marshaler = (*OptionalInteger)(nil)
|
||||
)
|
||||
|
||||
// OptionalString represents a string that has a default value
|
||||
//
|
||||
// When encoded in json, Default is encoded as "null"
|
||||
// When encoded in json, Default is encoded as "null".
|
||||
type OptionalString struct {
|
||||
value *string
|
||||
}
|
||||
|
||||
// NewOptionalString returns an OptionalString from a string
|
||||
// NewOptionalString returns an OptionalString from a string.
|
||||
func NewOptionalString(s string) *OptionalString {
|
||||
return &OptionalString{value: &s}
|
||||
}
|
||||
@ -378,7 +390,7 @@ func (p *OptionalString) WithDefault(defaultValue string) (value string) {
|
||||
return *p.value
|
||||
}
|
||||
|
||||
// IsDefault returns if this is a default optional integer
|
||||
// IsDefault returns if this is a default optional integer.
|
||||
func (p *OptionalString) IsDefault() bool {
|
||||
return p == nil || p.value == nil
|
||||
}
|
||||
@ -412,8 +424,10 @@ func (p OptionalString) String() string {
|
||||
return *p.value
|
||||
}
|
||||
|
||||
var _ json.Unmarshaler = (*OptionalInteger)(nil)
|
||||
var _ json.Marshaler = (*OptionalInteger)(nil)
|
||||
var (
|
||||
_ json.Unmarshaler = (*OptionalInteger)(nil)
|
||||
_ json.Marshaler = (*OptionalInteger)(nil)
|
||||
)
|
||||
|
||||
type swarmLimits doNotUse
|
||||
|
||||
|
@ -129,7 +129,6 @@ func TestOneStrings(t *testing.T) {
|
||||
out, err := json.Marshal(Strings{"one"})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
||||
}
|
||||
expected := "\"one\""
|
||||
if string(out) != expected {
|
||||
@ -141,7 +140,6 @@ func TestNoStrings(t *testing.T) {
|
||||
out, err := json.Marshal(Strings{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
||||
}
|
||||
expected := "null"
|
||||
if string(out) != expected {
|
||||
@ -153,7 +151,6 @@ func TestManyStrings(t *testing.T) {
|
||||
out, err := json.Marshal(Strings{"one", "two"})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
||||
}
|
||||
expected := "[\"one\",\"two\"]"
|
||||
if string(out) != expected {
|
||||
|
@ -85,7 +85,6 @@ func BootstrapConfigWithPeers(pis []peer.AddrInfo) BootstrapConfig {
|
||||
// connections to well-known bootstrap peers. It also kicks off subsystem
|
||||
// bootstrapping (i.e. routing).
|
||||
func Bootstrap(id peer.ID, host host.Host, rt routing.Routing, cfg BootstrapConfig) (io.Closer, error) {
|
||||
|
||||
// make a signal to wait for one bootstrap round to complete.
|
||||
doneWithRound := make(chan struct{})
|
||||
|
||||
@ -219,7 +218,6 @@ func saveConnectedPeersAsTemporaryBootstrap(ctx context.Context, host host.Host,
|
||||
// Peers can be original bootstrap or temporary ones (drawn from a list of
|
||||
// persisted previously connected peers).
|
||||
func bootstrapRound(ctx context.Context, host host.Host, cfg BootstrapConfig) error {
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, cfg.ConnectionTimeout)
|
||||
defer cancel()
|
||||
id := host.ID()
|
||||
|
@ -374,9 +374,7 @@ func bootstrapRemove(r repo.Repo, cfg *config.Config, toRemove []string) ([]stri
|
||||
removed = append(removed, p)
|
||||
continue
|
||||
}
|
||||
var (
|
||||
keptAddrs, removedAddrs []ma.Multiaddr
|
||||
)
|
||||
var keptAddrs, removedAddrs []ma.Multiaddr
|
||||
// remove specific addresses
|
||||
filter:
|
||||
for _, addr := range p.Addrs {
|
||||
|
@ -10,8 +10,10 @@ import (
|
||||
mbase "github.com/multiformats/go-multibase"
|
||||
)
|
||||
|
||||
var OptionCidBase = cmds.StringOption("cid-base", "Multibase encoding used for version 1 CIDs in output.")
|
||||
var OptionUpgradeCidV0InOutput = cmds.BoolOption("upgrade-cidv0-in-output", "Upgrade version 0 to version 1 CIDs in output.")
|
||||
var (
|
||||
OptionCidBase = cmds.StringOption("cid-base", "Multibase encoding used for version 1 CIDs in output.")
|
||||
OptionUpgradeCidV0InOutput = cmds.BoolOption("upgrade-cidv0-in-output", "Upgrade version 0 to version 1 CIDs in output.")
|
||||
)
|
||||
|
||||
// GetCidEncoder processes the `cid-base` and `output-cidv1` options and
|
||||
// returns a encoder to use based on those parameters.
|
||||
|
@ -47,5 +47,4 @@ func CheckBlockSize(req *cmds.Request, size uint64) error {
|
||||
return fmt.Errorf("produced block is over 1MiB: big blocks can't be exchanged with other peers. consider using UnixFS for automatic chunking of bigger files, or pass --allow-big-block to override")
|
||||
}
|
||||
return nil
|
||||
|
||||
}
|
||||
|
@ -71,6 +71,7 @@ func TestROCommands(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCommands(t *testing.T) {
|
||||
list := []string{
|
||||
"/add",
|
||||
|
@ -208,7 +208,6 @@ complete -c ipfs --keep-order --no-files
|
||||
|
||||
{{ template "command" . }}
|
||||
`))
|
||||
|
||||
}
|
||||
|
||||
// writeBashCompletions generates a bash completion script for the given command tree.
|
||||
|
@ -581,5 +581,4 @@ func getRemotePinningServices(r repo.Repo) (map[string]config.RemotePinningServi
|
||||
}
|
||||
}
|
||||
return oldServices, nil
|
||||
|
||||
}
|
||||
|
@ -12,6 +12,5 @@ func TestScrubMapInternalDelete(t *testing.T) {
|
||||
}
|
||||
if len(m) != 0 {
|
||||
t.Errorf("expecting an empty map, got a non-empty map")
|
||||
|
||||
}
|
||||
}
|
||||
|
@ -13,9 +13,9 @@ import (
|
||||
cid "github.com/ipfs/go-cid"
|
||||
cidenc "github.com/ipfs/go-cidutil/cidenc"
|
||||
cmds "github.com/ipfs/go-ipfs-cmds"
|
||||
//gipfree "github.com/ipld/go-ipld-prime/impl/free"
|
||||
//gipselector "github.com/ipld/go-ipld-prime/traversal/selector"
|
||||
//gipselectorbuilder "github.com/ipld/go-ipld-prime/traversal/selector/builder"
|
||||
// gipfree "github.com/ipld/go-ipld-prime/impl/free"
|
||||
// gipselector "github.com/ipld/go-ipld-prime/traversal/selector"
|
||||
// gipselectorbuilder "github.com/ipld/go-ipld-prime/traversal/selector/builder"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -209,7 +209,6 @@ Specification of CAR formats: https://ipld.io/specs/transport/car/
|
||||
Run: dagImport,
|
||||
Encoders: cmds.EncoderMap{
|
||||
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, event *CarImportOutput) error {
|
||||
|
||||
silent, _ := req.Options[silentOptionName].(bool)
|
||||
if silent {
|
||||
return nil
|
||||
@ -343,9 +342,11 @@ func (s *DagStatSummary) String() string {
|
||||
func (s *DagStatSummary) incrementTotalSize(size uint64) {
|
||||
s.TotalSize += size
|
||||
}
|
||||
|
||||
func (s *DagStatSummary) incrementRedundantSize(size uint64) {
|
||||
s.redundantSize += size
|
||||
}
|
||||
|
||||
func (s *DagStatSummary) appendStats(stats *DagStat) {
|
||||
s.DagStatsArray = append(s.DagStatsArray, stats)
|
||||
}
|
||||
|
@ -79,7 +79,6 @@ func dagExport(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment
|
||||
}
|
||||
|
||||
func finishCLIExport(res cmds.Response, re cmds.ResponseEmitter) error {
|
||||
|
||||
var showProgress bool
|
||||
val, specified := res.Request().Options[progressOptionName]
|
||||
if !specified {
|
||||
|
@ -41,7 +41,7 @@ func ExternalBinary(instructions string) *cmds.Command {
|
||||
cmd := exec.Command(binname, req.Arguments...)
|
||||
|
||||
// TODO: make commands lib be able to pass stdin through daemon
|
||||
//cmd.Stdin = req.Stdin()
|
||||
// cmd.Stdin = req.Stdin()
|
||||
cmd.Stdin = io.LimitReader(nil, 0)
|
||||
cmd.Stdout = w
|
||||
cmd.Stderr = w
|
||||
|
@ -88,8 +88,10 @@ const (
|
||||
filesHashOptionName = "hash"
|
||||
)
|
||||
|
||||
var cidVersionOption = cmds.IntOption(filesCidVersionOptionName, "cid-ver", "Cid version to use. (experimental)")
|
||||
var hashOption = cmds.StringOption(filesHashOptionName, "Hash function to use. Will set Cid version to 1 if used. (experimental)")
|
||||
var (
|
||||
cidVersionOption = cmds.IntOption(filesCidVersionOptionName, "cid-ver", "Cid version to use. (experimental)")
|
||||
hashOption = cmds.StringOption(filesHashOptionName, "Hash function to use. Will set Cid version to 1 if used. (experimental)")
|
||||
)
|
||||
|
||||
var errFormat = errors.New("format was set by multiple options. Only one format option is allowed")
|
||||
|
||||
@ -131,7 +133,6 @@ var filesStatCmd = &cmds.Command{
|
||||
cmds.BoolOption(filesWithLocalOptionName, "Compute the amount of the dag that is local, and if possible the total size"),
|
||||
},
|
||||
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
|
||||
_, err := statGetFormatOptions(req)
|
||||
if err != nil {
|
||||
return cmds.Errorf(cmds.ErrClient, err.Error())
|
||||
@ -225,7 +226,6 @@ func moreThanOne(a, b, c bool) bool {
|
||||
}
|
||||
|
||||
func statGetFormatOptions(req *cmds.Request) (string, error) {
|
||||
|
||||
hash, _ := req.Options[filesHashOptionName].(bool)
|
||||
size, _ := req.Options[filesSizeOptionName].(bool)
|
||||
format, _ := req.Options[filesFormatOptionName].(string)
|
||||
@ -307,7 +307,6 @@ func walkBlock(ctx context.Context, dagserv ipld.DAGService, nd ipld.Node) (bool
|
||||
}
|
||||
|
||||
childLocal, childLocalSize, err := walkBlock(ctx, dagserv, child)
|
||||
|
||||
if err != nil {
|
||||
return local, sizeLocal, err
|
||||
}
|
||||
|
@ -118,7 +118,6 @@ var keyGenCmd = &cmds.Command{
|
||||
}
|
||||
|
||||
key, err := api.Key().Generate(req.Context, name, opts...)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -211,7 +210,6 @@ elsewhere. For example, using openssl to get a PEM with public key:
|
||||
stdKey, err := crypto.PrivKeyToStdKey(sk)
|
||||
if err != nil {
|
||||
return fmt.Errorf("converting libp2p private key to std Go key: %w", err)
|
||||
|
||||
}
|
||||
// For some reason the ed25519.PrivateKey does not use pointer
|
||||
// receivers, so we need to convert it for MarshalPKCS8PrivateKey.
|
||||
@ -375,7 +373,6 @@ The PEM format allows for key generation outside of the IPFS node:
|
||||
sk, _, err = crypto.KeyPairFromStdKey(stdKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("converting std Go key to libp2p key: %w", err)
|
||||
|
||||
}
|
||||
case keyFormatLibp2pCleartextOption:
|
||||
sk, err = crypto.UnmarshalPrivateKey(data)
|
||||
|
@ -15,9 +15,7 @@ import (
|
||||
ke "github.com/ipfs/kubo/core/commands/keyencode"
|
||||
)
|
||||
|
||||
var (
|
||||
errAllowOffline = errors.New("can't publish while offline: pass `--allow-offline` to override")
|
||||
)
|
||||
var errAllowOffline = errors.New("can't publish while offline: pass `--allow-offline` to override")
|
||||
|
||||
const (
|
||||
ipfsPathOptionName = "ipfs-path"
|
||||
|
@ -370,9 +370,7 @@ var p2pCloseCmd = &cmds.Command{
|
||||
|
||||
proto := protocol.ID(protoOpt)
|
||||
|
||||
var (
|
||||
target, listen ma.Multiaddr
|
||||
)
|
||||
var target, listen ma.Multiaddr
|
||||
|
||||
if l {
|
||||
listen, err = ma.NewMultiaddr(listenOpt)
|
||||
|
@ -54,16 +54,18 @@ var remotePinServiceCmd = &cmds.Command{
|
||||
},
|
||||
}
|
||||
|
||||
const pinNameOptionName = "name"
|
||||
const pinCIDsOptionName = "cid"
|
||||
const pinStatusOptionName = "status"
|
||||
const pinServiceNameOptionName = "service"
|
||||
const pinServiceNameArgName = pinServiceNameOptionName
|
||||
const pinServiceEndpointArgName = "endpoint"
|
||||
const pinServiceKeyArgName = "key"
|
||||
const pinServiceStatOptionName = "stat"
|
||||
const pinBackgroundOptionName = "background"
|
||||
const pinForceOptionName = "force"
|
||||
const (
|
||||
pinNameOptionName = "name"
|
||||
pinCIDsOptionName = "cid"
|
||||
pinStatusOptionName = "status"
|
||||
pinServiceNameOptionName = "service"
|
||||
pinServiceNameArgName = pinServiceNameOptionName
|
||||
pinServiceEndpointArgName = "endpoint"
|
||||
pinServiceKeyArgName = "key"
|
||||
pinServiceStatOptionName = "stat"
|
||||
pinBackgroundOptionName = "background"
|
||||
pinForceOptionName = "force"
|
||||
)
|
||||
|
||||
type RemotePinOutput struct {
|
||||
Status string
|
||||
|
@ -63,5 +63,4 @@ func TestNormalizeEndpoint(t *testing.T) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -16,8 +16,10 @@ import (
|
||||
|
||||
var log = logging.Logger("core/commands")
|
||||
|
||||
var ErrNotOnline = errors.New("this command must be run in online mode. Try running 'ipfs daemon' first")
|
||||
var ErrSelfUnsupported = errors.New("finding your own node in the DHT is currently not supported")
|
||||
var (
|
||||
ErrNotOnline = errors.New("this command must be run in online mode. Try running 'ipfs daemon' first")
|
||||
ErrSelfUnsupported = errors.New("finding your own node in the DHT is currently not supported")
|
||||
)
|
||||
|
||||
const (
|
||||
RepoDirOption = "repo-dir"
|
||||
|
@ -21,9 +21,7 @@ import (
|
||||
routing "github.com/libp2p/go-libp2p/core/routing"
|
||||
)
|
||||
|
||||
var (
|
||||
errAllowOffline = errors.New("can't put while offline: pass `--allow-offline` to override")
|
||||
)
|
||||
var errAllowOffline = errors.New("can't put while offline: pass `--allow-offline` to override")
|
||||
|
||||
const (
|
||||
dhtVerboseOptionName = "verbose"
|
||||
@ -75,7 +73,6 @@ var findProvidersRoutingCmd = &cmds.Command{
|
||||
}
|
||||
|
||||
c, err := cid.Parse(req.Arguments[0])
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -495,8 +492,10 @@ identified by QmFoo.
|
||||
Type: routing.QueryEvent{},
|
||||
}
|
||||
|
||||
type printFunc func(obj *routing.QueryEvent, out io.Writer, verbose bool) error
|
||||
type pfuncMap map[routing.QueryEventType]printFunc
|
||||
type (
|
||||
printFunc func(obj *routing.QueryEvent, out io.Writer, verbose bool) error
|
||||
pfuncMap map[routing.QueryEventType]printFunc
|
||||
)
|
||||
|
||||
func printEvent(obj *routing.QueryEvent, out io.Writer, verbose bool, override pfuncMap) error {
|
||||
if verbose {
|
||||
|
@ -345,7 +345,8 @@ var swarmResourcesCmd = &cmds.Command{
|
||||
Get a summary of all resources accounted for by the libp2p Resource Manager.
|
||||
This includes the limits and the usage against those limits.
|
||||
This can output a human readable table and JSON encoding.
|
||||
`},
|
||||
`,
|
||||
},
|
||||
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
|
||||
node, err := cmdenv.GetNode(env)
|
||||
if err != nil {
|
||||
|
@ -64,7 +64,6 @@ var log = logging.Logger("core")
|
||||
|
||||
// IpfsNode is IPFS Core module. It represents an IPFS instance.
|
||||
type IpfsNode struct {
|
||||
|
||||
// Self
|
||||
Identity peer.ID // the local node's identity
|
||||
|
||||
|
@ -29,8 +29,10 @@ type connInfo struct {
|
||||
}
|
||||
|
||||
// tag used in the connection manager when explicitly connecting to a peer.
|
||||
const connectionManagerTag = "user-connect"
|
||||
const connectionManagerWeight = 100
|
||||
const (
|
||||
connectionManagerTag = "user-connect"
|
||||
connectionManagerWeight = 100
|
||||
)
|
||||
|
||||
func (api *SwarmAPI) Connect(ctx context.Context, pi peer.AddrInfo) error {
|
||||
ctx, span := tracing.Span(ctx, "CoreAPI.SwarmAPI", "Connect", trace.WithAttributes(attribute.String("peerid", pi.ID.String())))
|
||||
|
@ -32,8 +32,10 @@ import (
|
||||
|
||||
type UnixfsAPI CoreAPI
|
||||
|
||||
var nilNode *core.IpfsNode
|
||||
var once sync.Once
|
||||
var (
|
||||
nilNode *core.IpfsNode
|
||||
once sync.Once
|
||||
)
|
||||
|
||||
func getOrCreateNilNode() (*core.IpfsNode, error) {
|
||||
once.Do(func() {
|
||||
@ -41,7 +43,7 @@ func getOrCreateNilNode() (*core.IpfsNode, error) {
|
||||
return
|
||||
}
|
||||
node, err := core.NewNode(context.Background(), &core.BuildCfg{
|
||||
//TODO: need this to be true or all files
|
||||
// TODO: need this to be true or all files
|
||||
// hashed will be stored in memory!
|
||||
NilRepo: true,
|
||||
})
|
||||
@ -253,7 +255,6 @@ func (api *UnixfsAPI) processLink(ctx context.Context, linkres ft.LinkResult, se
|
||||
defer span.End()
|
||||
if linkres.Link != nil {
|
||||
span.SetAttributes(attribute.String("linkname", linkres.Link.Name), attribute.String("cid", linkres.Link.Cid.String()))
|
||||
|
||||
}
|
||||
|
||||
if linkres.Err != nil {
|
||||
@ -314,7 +315,7 @@ func (api *UnixfsAPI) lsFromLinksAsync(ctx context.Context, dir uio.Directory, s
|
||||
defer close(out)
|
||||
for l := range dir.EnumLinksAsync(ctx) {
|
||||
select {
|
||||
case out <- api.processLink(ctx, l, settings): //TODO: perf: processing can be done in background and in parallel
|
||||
case out <- api.processLink(ctx, l, settings): // TODO: perf: processing can be done in background and in parallel
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
@ -329,7 +330,7 @@ func (api *UnixfsAPI) lsFromLinks(ctx context.Context, ndlinks []*ipld.Link, set
|
||||
for _, l := range ndlinks {
|
||||
lr := ft.LinkResult{Link: &ipld.Link{Name: l.Name, Size: l.Size, Cid: l.Cid}}
|
||||
|
||||
links <- api.processLink(ctx, lr, settings) //TODO: can be parallel if settings.Async
|
||||
links <- api.processLink(ctx, lr, settings) // TODO: can be parallel if settings.Async
|
||||
}
|
||||
close(links)
|
||||
return links, nil
|
||||
|
@ -20,12 +20,11 @@ import (
|
||||
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
|
||||
)
|
||||
|
||||
var (
|
||||
errAPIVersionMismatch = errors.New("api version mismatch")
|
||||
)
|
||||
var errAPIVersionMismatch = errors.New("api version mismatch")
|
||||
|
||||
const originEnvKey = "API_ORIGIN"
|
||||
const originEnvKeyDeprecate = `You are using the ` + originEnvKey + `ENV Variable.
|
||||
const (
|
||||
originEnvKey = "API_ORIGIN"
|
||||
originEnvKeyDeprecate = `You are using the ` + originEnvKey + `ENV Variable.
|
||||
This functionality is deprecated, and will be removed in future versions.
|
||||
Instead, try either adding headers to the config, or passing them via
|
||||
cli arguments:
|
||||
@ -33,6 +32,7 @@ cli arguments:
|
||||
ipfs config API.HTTPHeaders --json '{"Access-Control-Allow-Origin": ["*"]}'
|
||||
ipfs daemon
|
||||
`
|
||||
)
|
||||
|
||||
// APIPath is the path at which the API is mounted.
|
||||
const APIPath = "/api/v0"
|
||||
@ -100,7 +100,6 @@ func addCORSDefaults(c *cmdsHttp.ServerConfig) {
|
||||
}
|
||||
|
||||
func patchCORSVars(c *cmdsHttp.ServerConfig, addr net.Addr) {
|
||||
|
||||
// we have to grab the port from an addr, which may be an ip6 addr.
|
||||
// TODO: this should take multiaddrs and derive port from there.
|
||||
port := ""
|
||||
@ -125,7 +124,6 @@ func patchCORSVars(c *cmdsHttp.ServerConfig, addr net.Addr) {
|
||||
|
||||
func commandsOption(cctx oldcmds.Context, command *cmds.Command, allowGet bool) ServeOption {
|
||||
return func(n *core.IpfsNode, l net.Listener, mux *http.ServeMux) (*http.ServeMux, error) {
|
||||
|
||||
cfg := cmdsHttp.NewServerConfig()
|
||||
cfg.AllowGet = allowGet
|
||||
corsAllowedMethods := []string{http.MethodPost}
|
||||
|
@ -151,13 +151,11 @@ func MetricsCollectionOption(handlerName string) ServeOption {
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
peersTotalMetric = prometheus.NewDesc(
|
||||
prometheus.BuildFQName("ipfs", "p2p", "peers_total"),
|
||||
"Number of connected peers",
|
||||
[]string{"transport"},
|
||||
nil,
|
||||
)
|
||||
var peersTotalMetric = prometheus.NewDesc(
|
||||
prometheus.BuildFQName("ipfs", "p2p", "peers_total"),
|
||||
"Number of connected peers",
|
||||
[]string{"transport"},
|
||||
nil,
|
||||
)
|
||||
|
||||
type IpfsNodeCollector struct {
|
||||
|
@ -179,11 +179,9 @@ func TestAddGCLive(t *testing.T) {
|
||||
defer close(addDone)
|
||||
defer close(out)
|
||||
_, err := adder.AddAllAndPin(context.Background(), slf)
|
||||
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
}()
|
||||
|
||||
addedHashes := make(map[string]struct{})
|
||||
|
@ -248,7 +248,6 @@ var IPNS = fx.Options(
|
||||
|
||||
// Online groups online-only units
|
||||
func Online(bcfg *BuildCfg, cfg *config.Config, userResourceOverrides rcmgr.PartialLimitConfig) fx.Option {
|
||||
|
||||
// Namesys params
|
||||
|
||||
ipnsCacheSize := cfg.Ipns.ResolveCacheSize
|
||||
|
@ -2,6 +2,7 @@ package helpers
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"go.uber.org/fx"
|
||||
)
|
||||
|
||||
|
@ -118,7 +118,8 @@ filled in with autocomputed defaults.`)
|
||||
lc.Append(fx.Hook{
|
||||
OnStop: func(_ context.Context) error {
|
||||
return manager.Close()
|
||||
}})
|
||||
},
|
||||
})
|
||||
|
||||
return manager, opts, nil
|
||||
}
|
||||
|
@ -31,8 +31,10 @@ type loggingScope struct {
|
||||
countErrs func(error)
|
||||
}
|
||||
|
||||
var _ network.ResourceManager = (*loggingResourceManager)(nil)
|
||||
var _ rcmgr.ResourceManagerState = (*loggingResourceManager)(nil)
|
||||
var (
|
||||
_ network.ResourceManager = (*loggingResourceManager)(nil)
|
||||
_ rcmgr.ResourceManagerState = (*loggingResourceManager)(nil)
|
||||
)
|
||||
|
||||
func (n *loggingResourceManager) start(ctx context.Context) {
|
||||
logInterval := n.logInterval
|
||||
@ -85,36 +87,43 @@ func (n *loggingResourceManager) countErrs(err error) {
|
||||
func (n *loggingResourceManager) ViewSystem(f func(network.ResourceScope) error) error {
|
||||
return n.delegate.ViewSystem(f)
|
||||
}
|
||||
|
||||
func (n *loggingResourceManager) ViewTransient(f func(network.ResourceScope) error) error {
|
||||
return n.delegate.ViewTransient(func(s network.ResourceScope) error {
|
||||
return f(&loggingScope{logger: n.logger, delegate: s, countErrs: n.countErrs})
|
||||
})
|
||||
}
|
||||
|
||||
func (n *loggingResourceManager) ViewService(svc string, f func(network.ServiceScope) error) error {
|
||||
return n.delegate.ViewService(svc, func(s network.ServiceScope) error {
|
||||
return f(&loggingScope{logger: n.logger, delegate: s, countErrs: n.countErrs})
|
||||
})
|
||||
}
|
||||
|
||||
func (n *loggingResourceManager) ViewProtocol(p protocol.ID, f func(network.ProtocolScope) error) error {
|
||||
return n.delegate.ViewProtocol(p, func(s network.ProtocolScope) error {
|
||||
return f(&loggingScope{logger: n.logger, delegate: s, countErrs: n.countErrs})
|
||||
})
|
||||
}
|
||||
|
||||
func (n *loggingResourceManager) ViewPeer(p peer.ID, f func(network.PeerScope) error) error {
|
||||
return n.delegate.ViewPeer(p, func(s network.PeerScope) error {
|
||||
return f(&loggingScope{logger: n.logger, delegate: s, countErrs: n.countErrs})
|
||||
})
|
||||
}
|
||||
|
||||
func (n *loggingResourceManager) OpenConnection(dir network.Direction, usefd bool, remote ma.Multiaddr) (network.ConnManagementScope, error) {
|
||||
connMgmtScope, err := n.delegate.OpenConnection(dir, usefd, remote)
|
||||
n.countErrs(err)
|
||||
return connMgmtScope, err
|
||||
}
|
||||
|
||||
func (n *loggingResourceManager) OpenStream(p peer.ID, dir network.Direction) (network.StreamManagementScope, error) {
|
||||
connMgmtScope, err := n.delegate.OpenStream(p, dir)
|
||||
n.countErrs(err)
|
||||
return connMgmtScope, err
|
||||
}
|
||||
|
||||
func (n *loggingResourceManager) Close() error {
|
||||
return n.delegate.Close()
|
||||
}
|
||||
@ -127,6 +136,7 @@ func (n *loggingResourceManager) ListServices() []string {
|
||||
|
||||
return rapi.ListServices()
|
||||
}
|
||||
|
||||
func (n *loggingResourceManager) ListProtocols() []protocol.ID {
|
||||
rapi, ok := n.delegate.(rcmgr.ResourceManagerState)
|
||||
if !ok {
|
||||
@ -135,6 +145,7 @@ func (n *loggingResourceManager) ListProtocols() []protocol.ID {
|
||||
|
||||
return rapi.ListProtocols()
|
||||
}
|
||||
|
||||
func (n *loggingResourceManager) ListPeers() []peer.ID {
|
||||
rapi, ok := n.delegate.(rcmgr.ResourceManagerState)
|
||||
if !ok {
|
||||
@ -158,54 +169,69 @@ func (s *loggingScope) ReserveMemory(size int, prio uint8) error {
|
||||
s.countErrs(err)
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *loggingScope) ReleaseMemory(size int) {
|
||||
s.delegate.ReleaseMemory(size)
|
||||
}
|
||||
|
||||
func (s *loggingScope) Stat() network.ScopeStat {
|
||||
return s.delegate.Stat()
|
||||
}
|
||||
|
||||
func (s *loggingScope) BeginSpan() (network.ResourceScopeSpan, error) {
|
||||
return s.delegate.BeginSpan()
|
||||
}
|
||||
|
||||
func (s *loggingScope) Done() {
|
||||
s.delegate.(network.ResourceScopeSpan).Done()
|
||||
}
|
||||
|
||||
func (s *loggingScope) Name() string {
|
||||
return s.delegate.(network.ServiceScope).Name()
|
||||
}
|
||||
|
||||
func (s *loggingScope) Protocol() protocol.ID {
|
||||
return s.delegate.(network.ProtocolScope).Protocol()
|
||||
}
|
||||
|
||||
func (s *loggingScope) Peer() peer.ID {
|
||||
return s.delegate.(network.PeerScope).Peer()
|
||||
}
|
||||
|
||||
func (s *loggingScope) PeerScope() network.PeerScope {
|
||||
return s.delegate.(network.PeerScope)
|
||||
}
|
||||
|
||||
func (s *loggingScope) SetPeer(p peer.ID) error {
|
||||
err := s.delegate.(network.ConnManagementScope).SetPeer(p)
|
||||
s.countErrs(err)
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *loggingScope) ProtocolScope() network.ProtocolScope {
|
||||
return s.delegate.(network.ProtocolScope)
|
||||
}
|
||||
|
||||
func (s *loggingScope) SetProtocol(proto protocol.ID) error {
|
||||
err := s.delegate.(network.StreamManagementScope).SetProtocol(proto)
|
||||
s.countErrs(err)
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *loggingScope) ServiceScope() network.ServiceScope {
|
||||
return s.delegate.(network.ServiceScope)
|
||||
}
|
||||
|
||||
func (s *loggingScope) SetService(srv string) error {
|
||||
err := s.delegate.(network.StreamManagementScope).SetService(srv)
|
||||
s.countErrs(err)
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *loggingScope) Limit() rcmgr.Limit {
|
||||
return s.delegate.(rcmgr.ResourceScopeLimiter).Limit()
|
||||
}
|
||||
|
||||
func (s *loggingScope) SetLimit(limit rcmgr.Limit) {
|
||||
s.delegate.(rcmgr.ResourceScopeLimiter).SetLimit(limit)
|
||||
}
|
||||
|
@ -232,7 +232,6 @@ func PubsubRouter(mctx helpers.MetricsCtx, lc fx.Lifecycle, in p2pPSRoutingIn) (
|
||||
in.Validator,
|
||||
namesys.WithRebroadcastInterval(time.Minute),
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return p2pRouterOut{}, nil, err
|
||||
}
|
||||
|
@ -21,7 +21,6 @@ func TopicDiscovery() interface{} {
|
||||
baseDisc,
|
||||
backoff.NewExponentialBackoff(minBackoff, maxBackoff, backoff.FullJitter, time.Second, 5.0, 0, rng),
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -18,7 +18,8 @@ func Transports(tptConfig config.Transports) interface{} {
|
||||
return func(pnet struct {
|
||||
fx.In
|
||||
Fprint PNetFingerprint `optional:"true"`
|
||||
}) (opts Libp2pOpts, err error) {
|
||||
},
|
||||
) (opts Libp2pOpts, err error) {
|
||||
privateNetworkEnabled := pnet.Fprint != nil
|
||||
|
||||
if tptConfig.Network.TCP.WithDefault(true) {
|
||||
|
@ -85,7 +85,7 @@ func createTempRepo() (string, error) {
|
||||
|
||||
/// ------ Spawning the node
|
||||
|
||||
// Creates an IPFS node and returns its coreAPI
|
||||
// Creates an IPFS node and returns its coreAPI.
|
||||
func createNode(ctx context.Context, repoPath string) (*core.IpfsNode, error) {
|
||||
// Open the repo
|
||||
repo, err := fsrepo.Open(repoPath)
|
||||
@ -107,7 +107,7 @@ func createNode(ctx context.Context, repoPath string) (*core.IpfsNode, error) {
|
||||
|
||||
var loadPluginsOnce sync.Once
|
||||
|
||||
// Spawns a node to be used just for this run (i.e. creates a tmp repo)
|
||||
// Spawns a node to be used just for this run (i.e. creates a tmp repo).
|
||||
func spawnEphemeral(ctx context.Context) (icore.CoreAPI, *core.IpfsNode, error) {
|
||||
var onceErr error
|
||||
loadPluginsOnce.Do(func() {
|
||||
|
@ -56,7 +56,7 @@ func writeFileOrFail(t *testing.T, size int, path string) []byte {
|
||||
|
||||
func writeFile(size int, path string) ([]byte, error) {
|
||||
data := randBytes(size)
|
||||
err := os.WriteFile(path, data, 0666)
|
||||
err := os.WriteFile(path, data, 0o666)
|
||||
return data, err
|
||||
}
|
||||
|
||||
@ -156,7 +156,7 @@ func TestIpnsLocalLink(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Test writing a file and reading it back
|
||||
// Test writing a file and reading it back.
|
||||
func TestIpnsBasicIO(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.SkipNow()
|
||||
@ -187,7 +187,7 @@ func TestIpnsBasicIO(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Test to make sure file changes persist over mounts of ipns
|
||||
// Test to make sure file changes persist over mounts of ipns.
|
||||
func TestFilePersistence(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.SkipNow()
|
||||
@ -250,7 +250,7 @@ func TestMultipleDirs(t *testing.T) {
|
||||
mnt.Close()
|
||||
}
|
||||
|
||||
// Test to make sure the filesystem reports file sizes correctly
|
||||
// Test to make sure the filesystem reports file sizes correctly.
|
||||
func TestFileSizeReporting(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.SkipNow()
|
||||
@ -271,7 +271,7 @@ func TestFileSizeReporting(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Test to make sure you can't create multiple entries with the same name
|
||||
// Test to make sure you can't create multiple entries with the same name.
|
||||
func TestDoubleEntryFailure(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.SkipNow()
|
||||
@ -280,12 +280,12 @@ func TestDoubleEntryFailure(t *testing.T) {
|
||||
defer mnt.Close()
|
||||
|
||||
dname := mnt.Dir + "/local/thisisadir"
|
||||
err := os.Mkdir(dname, 0777)
|
||||
err := os.Mkdir(dname, 0o777)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = os.Mkdir(dname, 0777)
|
||||
err = os.Mkdir(dname, 0o777)
|
||||
if err == nil {
|
||||
t.Fatal("Should have gotten error one creating new directory.")
|
||||
}
|
||||
@ -301,7 +301,7 @@ func TestAppendFile(t *testing.T) {
|
||||
fname := mnt.Dir + "/local/file"
|
||||
data := writeFileOrFail(t, 1300, fname)
|
||||
|
||||
fi, err := os.OpenFile(fname, os.O_RDWR|os.O_APPEND, 0666)
|
||||
fi, err := os.OpenFile(fname, os.O_RDWR|os.O_APPEND, 0o666)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -463,9 +463,8 @@ func TestFSThrash(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Test writing a medium sized file one byte at a time
|
||||
// Test writing a medium sized file one byte at a time.
|
||||
func TestMultiWrite(t *testing.T) {
|
||||
|
||||
if testing.Short() {
|
||||
t.SkipNow()
|
||||
}
|
||||
|
@ -149,7 +149,7 @@ func CreateRoot(ctx context.Context, ipfs iface.CoreAPI, keys map[string]iface.K
|
||||
// Attr returns file attributes.
|
||||
func (r *Root) Attr(ctx context.Context, a *fuse.Attr) error {
|
||||
log.Debug("Root Attr")
|
||||
a.Mode = os.ModeDir | 0111 // -rw+x
|
||||
a.Mode = os.ModeDir | 0o111 // -rw+x
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -212,7 +212,7 @@ func (r *Root) Forget() {
|
||||
}
|
||||
|
||||
// ReadDirAll reads a particular directory. Will show locally available keys
|
||||
// as well as a symlink to the peerID key
|
||||
// as well as a symlink to the peerID key.
|
||||
func (r *Root) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) {
|
||||
log.Debug("Root ReadDirAll")
|
||||
|
||||
@ -231,7 +231,7 @@ func (r *Root) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) {
|
||||
return listing, nil
|
||||
}
|
||||
|
||||
// Directory is wrapper over an mfs directory to satisfy the fuse fs interface
|
||||
// Directory is wrapper over an mfs directory to satisfy the fuse fs interface.
|
||||
type Directory struct {
|
||||
dir *mfs.Directory
|
||||
}
|
||||
@ -240,7 +240,7 @@ type FileNode struct {
|
||||
fi *mfs.File
|
||||
}
|
||||
|
||||
// File is wrapper over an mfs file to satisfy the fuse fs interface
|
||||
// File is wrapper over an mfs file to satisfy the fuse fs interface.
|
||||
type File struct {
|
||||
fi mfs.FileDescriptor
|
||||
}
|
||||
@ -248,7 +248,7 @@ type File struct {
|
||||
// Attr returns the attributes of a given node.
|
||||
func (d *Directory) Attr(ctx context.Context, a *fuse.Attr) error {
|
||||
log.Debug("Directory Attr")
|
||||
a.Mode = os.ModeDir | 0555
|
||||
a.Mode = os.ModeDir | 0o555
|
||||
a.Uid = uint32(os.Getuid())
|
||||
a.Gid = uint32(os.Getgid())
|
||||
return nil
|
||||
@ -262,7 +262,7 @@ func (fi *FileNode) Attr(ctx context.Context, a *fuse.Attr) error {
|
||||
// In this case, the dag node in question may not be unixfs
|
||||
return fmt.Errorf("fuse/ipns: failed to get file.Size(): %s", err)
|
||||
}
|
||||
a.Mode = os.FileMode(0666)
|
||||
a.Mode = os.FileMode(0o666)
|
||||
a.Size = uint64(size)
|
||||
a.Uid = uint32(os.Getuid())
|
||||
a.Gid = uint32(os.Getgid())
|
||||
@ -289,7 +289,7 @@ func (d *Directory) Lookup(ctx context.Context, name string) (fs.Node, error) {
|
||||
}
|
||||
}
|
||||
|
||||
// ReadDirAll reads the link structure as directory entries
|
||||
// ReadDirAll reads the link structure as directory entries.
|
||||
func (d *Directory) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) {
|
||||
listing, err := d.dir.List(ctx)
|
||||
if err != nil {
|
||||
@ -491,7 +491,7 @@ func (d *Directory) Remove(ctx context.Context, req *fuse.RemoveRequest) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Rename implements NodeRenamer
|
||||
// Rename implements NodeRenamer.
|
||||
func (d *Directory) Rename(ctx context.Context, req *fuse.RenameRequest, newDir fs.Node) error {
|
||||
cur, err := d.dir.Child(req.OldName)
|
||||
if err != nil {
|
||||
@ -531,7 +531,7 @@ func min(a, b int) int {
|
||||
return b
|
||||
}
|
||||
|
||||
// to check that out Node implements all the interfaces we want
|
||||
// to check that out Node implements all the interfaces we want.
|
||||
type ipnsRoot interface {
|
||||
fs.Node
|
||||
fs.HandleReadDirAller
|
||||
@ -565,5 +565,7 @@ type ipnsFileNode interface {
|
||||
fs.NodeOpener
|
||||
}
|
||||
|
||||
var _ ipnsFileNode = (*FileNode)(nil)
|
||||
var _ ipnsFile = (*File)(nil)
|
||||
var (
|
||||
_ ipnsFileNode = (*FileNode)(nil)
|
||||
_ ipnsFile = (*File)(nil)
|
||||
)
|
||||
|
@ -17,7 +17,7 @@ type Link struct {
|
||||
|
||||
func (l *Link) Attr(ctx context.Context, a *fuse.Attr) error {
|
||||
log.Debug("Link attr.")
|
||||
a.Mode = os.ModeSymlink | 0555
|
||||
a.Mode = os.ModeSymlink | 0o555
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -16,7 +16,7 @@ import (
|
||||
|
||||
var ErrNotMounted = errors.New("not mounted")
|
||||
|
||||
// mount implements go-ipfs/fuse/mount
|
||||
// mount implements go-ipfs/fuse/mount.
|
||||
type mount struct {
|
||||
mpoint string
|
||||
filesys fs.FS
|
||||
@ -34,7 +34,7 @@ func NewMount(p goprocess.Process, fsys fs.FS, mountpoint string, allowOther boo
|
||||
var conn *fuse.Conn
|
||||
var err error
|
||||
|
||||
var mountOpts = []fuse.MountOption{
|
||||
mountOpts := []fuse.MountOption{
|
||||
fuse.MaxReadahead(64 * 1024 * 1024),
|
||||
fuse.AsyncRead(),
|
||||
}
|
||||
|
@ -16,7 +16,7 @@ var log = logging.Logger("mount")
|
||||
|
||||
var MountTimeout = time.Second * 5
|
||||
|
||||
// Mount represents a filesystem mount
|
||||
// Mount represents a filesystem mount.
|
||||
type Mount interface {
|
||||
// MountPoint is the path at which this mount is mounted
|
||||
MountPoint() string
|
||||
@ -65,7 +65,7 @@ func ForceUnmount(m Mount) error {
|
||||
}
|
||||
|
||||
// UnmountCmd creates an exec.Cmd that is GOOS-specific
|
||||
// for unmount a FUSE mount
|
||||
// for unmount a FUSE mount.
|
||||
func UnmountCmd(point string) (*exec.Cmd, error) {
|
||||
switch runtime.GOOS {
|
||||
case "darwin":
|
||||
|
@ -25,7 +25,7 @@ func init() {
|
||||
// skip fuse checks.
|
||||
const dontCheckOSXFUSEConfigKey = "DontCheckOSXFUSE"
|
||||
|
||||
// fuseVersionPkg is the go pkg url for fuse-version
|
||||
// fuseVersionPkg is the go pkg url for fuse-version.
|
||||
const fuseVersionPkg = "github.com/jbenet/go-fuse-version/fuse-version"
|
||||
|
||||
// errStrFuseRequired is returned when we're sure the user does not have fuse.
|
||||
|
@ -32,7 +32,7 @@ func mkdir(t *testing.T, path string) {
|
||||
}
|
||||
}
|
||||
|
||||
// Test externally unmounting, then trying to unmount in code
|
||||
// Test externally unmounting, then trying to unmount in code.
|
||||
func TestExternalUnmount(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.SkipNow()
|
||||
|
@ -19,14 +19,14 @@ import (
|
||||
|
||||
var log = logging.Logger("node")
|
||||
|
||||
// fuseNoDirectory used to check the returning fuse error
|
||||
// fuseNoDirectory used to check the returning fuse error.
|
||||
const fuseNoDirectory = "fusermount: failed to access mountpoint"
|
||||
|
||||
// fuseExitStatus1 used to check the returning fuse error
|
||||
// fuseExitStatus1 used to check the returning fuse error.
|
||||
const fuseExitStatus1 = "fusermount: exit status 1"
|
||||
|
||||
// platformFuseChecks can get overridden by arch-specific files
|
||||
// to run fuse checks (like checking the OSXFUSE version)
|
||||
// to run fuse checks (like checking the OSXFUSE version).
|
||||
var platformFuseChecks = func(*core.IpfsNode) error {
|
||||
return nil
|
||||
}
|
||||
|
@ -79,7 +79,7 @@ func setupIpfsTest(t *testing.T, node *core.IpfsNode) (*core.IpfsNode, *fstest.M
|
||||
return node, mnt
|
||||
}
|
||||
|
||||
// Test writing an object and reading it back through fuse
|
||||
// Test writing an object and reading it back through fuse.
|
||||
func TestIpfsBasicRead(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.SkipNow()
|
||||
@ -122,7 +122,7 @@ func getPaths(t *testing.T, ipfs *core.IpfsNode, name string, n *dag.ProtoNode)
|
||||
return out
|
||||
}
|
||||
|
||||
// Perform a large number of concurrent reads to stress the system
|
||||
// Perform a large number of concurrent reads to stress the system.
|
||||
func TestIpfsStressRead(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.SkipNow()
|
||||
@ -194,8 +194,8 @@ func TestIpfsStressRead(t *testing.T) {
|
||||
errs <- err
|
||||
}
|
||||
|
||||
//nd.Context() is never closed which leads to
|
||||
//hitting 8128 goroutine limit in go test -race mode
|
||||
// nd.Context() is never closed which leads to
|
||||
// hitting 8128 goroutine limit in go test -race mode
|
||||
ctx, cancelFunc := context.WithCancel(context.Background())
|
||||
|
||||
read, err := api.Unixfs().Get(ctx, item)
|
||||
@ -229,7 +229,7 @@ func TestIpfsStressRead(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Test writing a file and reading it back
|
||||
// Test writing a file and reading it back.
|
||||
func TestIpfsBasicDirRead(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.SkipNow()
|
||||
@ -280,7 +280,7 @@ func TestIpfsBasicDirRead(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Test to make sure the filesystem reports file sizes correctly
|
||||
// Test to make sure the filesystem reports file sizes correctly.
|
||||
func TestFileSizeReporting(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.SkipNow()
|
||||
|
@ -49,7 +49,7 @@ type Root struct {
|
||||
|
||||
// Attr returns file attributes.
|
||||
func (*Root) Attr(ctx context.Context, a *fuse.Attr) error {
|
||||
a.Mode = os.ModeDir | 0111 // -rw+x
|
||||
a.Mode = os.ModeDir | 0o111 // -rw+x
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -139,7 +139,7 @@ func (s *Node) loadData() error {
|
||||
func (s *Node) Attr(ctx context.Context, a *fuse.Attr) error {
|
||||
log.Debug("Node attr")
|
||||
if rawnd, ok := s.Nd.(*mdag.RawNode); ok {
|
||||
a.Mode = 0444
|
||||
a.Mode = 0o444
|
||||
a.Size = uint64(len(rawnd.RawData()))
|
||||
a.Blocks = 1
|
||||
return nil
|
||||
@ -152,18 +152,18 @@ func (s *Node) Attr(ctx context.Context, a *fuse.Attr) error {
|
||||
}
|
||||
switch s.cached.Type() {
|
||||
case ft.TDirectory, ft.THAMTShard:
|
||||
a.Mode = os.ModeDir | 0555
|
||||
a.Mode = os.ModeDir | 0o555
|
||||
case ft.TFile:
|
||||
size := s.cached.FileSize()
|
||||
a.Mode = 0444
|
||||
a.Mode = 0o444
|
||||
a.Size = uint64(size)
|
||||
a.Blocks = uint64(len(s.Nd.Links()))
|
||||
case ft.TRaw:
|
||||
a.Mode = 0444
|
||||
a.Mode = 0o444
|
||||
a.Size = uint64(len(s.cached.Data()))
|
||||
a.Blocks = uint64(len(s.Nd.Links()))
|
||||
case ft.TSymlink:
|
||||
a.Mode = 0777 | os.ModeSymlink
|
||||
a.Mode = 0o777 | os.ModeSymlink
|
||||
a.Size = uint64(len(s.cached.Data()))
|
||||
default:
|
||||
return fmt.Errorf("invalid data type - %s", s.cached.Type())
|
||||
@ -195,7 +195,7 @@ func (s *Node) Lookup(ctx context.Context, name string) (fs.Node, error) {
|
||||
return &Node{Ipfs: s.Ipfs, Nd: nd}, nil
|
||||
}
|
||||
|
||||
// ReadDirAll reads the link structure as directory entries
|
||||
// ReadDirAll reads the link structure as directory entries.
|
||||
func (s *Node) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) {
|
||||
log.Debug("Node ReadDir")
|
||||
dir, err := uio.NewDirectoryFromNode(s.Ipfs.DAG, s.Nd)
|
||||
@ -284,7 +284,7 @@ func (s *Node) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadR
|
||||
return nil // may be non-nil / not succeeded
|
||||
}
|
||||
|
||||
// to check that out Node implements all the interfaces we want
|
||||
// to check that out Node implements all the interfaces we want.
|
||||
type roRoot interface {
|
||||
fs.Node
|
||||
fs.HandleReadDirAller
|
||||
|
1
gc/gc.go
1
gc/gc.go
@ -191,7 +191,6 @@ func Descendants(ctx context.Context, getLinks dag.GetLinks, set *cid.Set, roots
|
||||
err := dag.Walk(ctx, verifyGetLinks, wrapper.C, func(k cid.Cid) bool {
|
||||
return set.Visit(toCidV1(k))
|
||||
}, dag.Concurrent())
|
||||
|
||||
if err != nil {
|
||||
err = verboseCidError(err)
|
||||
return err
|
||||
|
@ -10,7 +10,7 @@ import (
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
)
|
||||
|
||||
// Listener listens for connections and proxies them to a target
|
||||
// Listener listens for connections and proxies them to a target.
|
||||
type Listener interface {
|
||||
Protocol() protocol.ID
|
||||
ListenAddress() ma.Multiaddr
|
||||
@ -23,7 +23,7 @@ type Listener interface {
|
||||
}
|
||||
|
||||
// Listeners manages a group of Listener implementations,
|
||||
// checking for conflicts and optionally dispatching connections
|
||||
// checking for conflicts and optionally dispatching connections.
|
||||
type Listeners struct {
|
||||
sync.RWMutex
|
||||
|
||||
@ -60,7 +60,7 @@ func newListenersP2P(host p2phost.Host) *Listeners {
|
||||
return reg
|
||||
}
|
||||
|
||||
// Register registers listenerInfo into this registry and starts it
|
||||
// Register registers listenerInfo into this registry and starts it.
|
||||
func (r *Listeners) Register(l Listener) error {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
|
@ -12,7 +12,7 @@ import (
|
||||
manet "github.com/multiformats/go-multiaddr/net"
|
||||
)
|
||||
|
||||
// localListener manet streams and proxies them to libp2p services
|
||||
// localListener manet streams and proxies them to libp2p services.
|
||||
type localListener struct {
|
||||
ctx context.Context
|
||||
|
||||
@ -25,7 +25,7 @@ type localListener struct {
|
||||
listener manet.Listener
|
||||
}
|
||||
|
||||
// ForwardLocal creates new P2P stream to a remote listener
|
||||
// ForwardLocal creates new P2P stream to a remote listener.
|
||||
func (p2p *P2P) ForwardLocal(ctx context.Context, peer peer.ID, proto protocol.ID, bindAddr ma.Multiaddr) (Listener, error) {
|
||||
listener := &localListener{
|
||||
ctx: ctx,
|
||||
|
@ -10,7 +10,7 @@ import (
|
||||
|
||||
var log = logging.Logger("p2p-mount")
|
||||
|
||||
// P2P structure holds information on currently running streams/Listeners
|
||||
// P2P structure holds information on currently running streams/Listeners.
|
||||
type P2P struct {
|
||||
ListenersLocal *Listeners
|
||||
ListenersP2P *Listeners
|
||||
@ -21,7 +21,7 @@ type P2P struct {
|
||||
peerstore pstore.Peerstore
|
||||
}
|
||||
|
||||
// New creates new P2P struct
|
||||
// New creates new P2P struct.
|
||||
func New(identity peer.ID, peerHost p2phost.Host, peerstore pstore.Peerstore) *P2P {
|
||||
return &P2P{
|
||||
identity: identity,
|
||||
@ -40,7 +40,7 @@ func New(identity peer.ID, peerHost p2phost.Host, peerstore pstore.Peerstore) *P
|
||||
}
|
||||
|
||||
// CheckProtoExists checks whether a proto handler is registered to
|
||||
// mux handler
|
||||
// mux handler.
|
||||
func (p2p *P2P) CheckProtoExists(proto protocol.ID) bool {
|
||||
protos := p2p.peerHost.Mux().Protocols()
|
||||
|
||||
|
@ -12,7 +12,7 @@ import (
|
||||
|
||||
var maPrefix = "/" + ma.ProtocolWithCode(ma.P_IPFS).Name + "/"
|
||||
|
||||
// remoteListener accepts libp2p streams and proxies them to a manet host
|
||||
// remoteListener accepts libp2p streams and proxies them to a manet host.
|
||||
type remoteListener struct {
|
||||
p2p *P2P
|
||||
|
||||
@ -27,7 +27,7 @@ type remoteListener struct {
|
||||
reportRemote bool
|
||||
}
|
||||
|
||||
// ForwardRemote creates new p2p listener
|
||||
// ForwardRemote creates new p2p listener.
|
||||
func (p2p *P2P) ForwardRemote(ctx context.Context, proto protocol.ID, addr ma.Multiaddr, reportRemote bool) (Listener, error) {
|
||||
listener := &remoteListener{
|
||||
p2p: p2p,
|
||||
|
@ -30,12 +30,12 @@ type Stream struct {
|
||||
Registry *StreamRegistry
|
||||
}
|
||||
|
||||
// close stream endpoints and deregister it
|
||||
// close stream endpoints and deregister it.
|
||||
func (s *Stream) close() {
|
||||
s.Registry.Close(s)
|
||||
}
|
||||
|
||||
// reset closes stream endpoints and deregisters it
|
||||
// reset closes stream endpoints and deregisters it.
|
||||
func (s *Stream) reset() {
|
||||
s.Registry.Reset(s)
|
||||
}
|
||||
@ -71,7 +71,7 @@ type StreamRegistry struct {
|
||||
ifconnmgr.ConnManager
|
||||
}
|
||||
|
||||
// Register registers a stream to the registry
|
||||
// Register registers a stream to the registry.
|
||||
func (r *StreamRegistry) Register(streamInfo *Stream) {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
@ -86,7 +86,7 @@ func (r *StreamRegistry) Register(streamInfo *Stream) {
|
||||
streamInfo.startStreaming()
|
||||
}
|
||||
|
||||
// Deregister deregisters stream from the registry
|
||||
// Deregister deregisters stream from the registry.
|
||||
func (r *StreamRegistry) Deregister(streamID uint64) {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
@ -105,14 +105,14 @@ func (r *StreamRegistry) Deregister(streamID uint64) {
|
||||
delete(r.Streams, streamID)
|
||||
}
|
||||
|
||||
// Close stream endpoints and deregister it
|
||||
// Close stream endpoints and deregister it.
|
||||
func (r *StreamRegistry) Close(s *Stream) {
|
||||
_ = s.Local.Close()
|
||||
_ = s.Remote.Close()
|
||||
s.Registry.Deregister(s.id)
|
||||
}
|
||||
|
||||
// Reset closes stream endpoints and deregisters it
|
||||
// Reset closes stream endpoints and deregisters it.
|
||||
func (r *StreamRegistry) Reset(s *Stream) {
|
||||
_ = s.Local.Close()
|
||||
_ = s.Remote.Reset()
|
||||
|
@ -201,7 +201,7 @@ func (ps *PeeringService) Start() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetState get the State of the PeeringService
|
||||
// GetState get the State of the PeeringService.
|
||||
func (ps *PeeringService) GetState() State {
|
||||
ps.mu.RLock()
|
||||
defer ps.mu.RUnlock()
|
||||
@ -306,6 +306,7 @@ func (nn *netNotifee) Connected(_ network.Network, c network.Conn) {
|
||||
go handler.stopIfConnected()
|
||||
}
|
||||
}
|
||||
|
||||
func (nn *netNotifee) Disconnected(_ network.Network, c network.Conn) {
|
||||
ps := (*PeeringService)(nn)
|
||||
|
||||
|
@ -5,7 +5,7 @@ import (
|
||||
)
|
||||
|
||||
// PluginDatastore is an interface that can be implemented to add handlers for
|
||||
// for different datastores
|
||||
// for different datastores.
|
||||
type PluginDatastore interface {
|
||||
Plugin
|
||||
|
||||
|
@ -5,7 +5,7 @@ import (
|
||||
)
|
||||
|
||||
// PluginIPLD is an interface that can be implemented to add handlers for
|
||||
// for different IPLD codecs
|
||||
// for different IPLD codecs.
|
||||
type PluginIPLD interface {
|
||||
Plugin
|
||||
|
||||
|
@ -93,7 +93,7 @@ type PluginLoader struct {
|
||||
repo string
|
||||
}
|
||||
|
||||
// NewPluginLoader creates new plugin loader
|
||||
// NewPluginLoader creates new plugin loader.
|
||||
func NewPluginLoader(repo string) (*PluginLoader, error) {
|
||||
loader := &PluginLoader{plugins: make([]plugin.Plugin, 0, len(preloadPlugins)), repo: repo}
|
||||
if repo != "" {
|
||||
@ -226,7 +226,7 @@ func loadDynamicPlugins(pluginDir string) ([]plugin.Plugin, error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if info.Mode().Perm()&0111 == 0 {
|
||||
if info.Mode().Perm()&0o111 == 0 {
|
||||
// file is not executable let's not load it
|
||||
// this is to prevent loading plugins from for example non-executable
|
||||
// mounts, some /tmp mounts are marked as such for security
|
||||
@ -245,7 +245,7 @@ func loadDynamicPlugins(pluginDir string) ([]plugin.Plugin, error) {
|
||||
return plugins, err
|
||||
}
|
||||
|
||||
// Initialize initializes all loaded plugins
|
||||
// Initialize initializes all loaded plugins.
|
||||
func (loader *PluginLoader) Initialize() error {
|
||||
if err := loader.transition(loaderLoading, loaderInitializing); err != nil {
|
||||
return err
|
||||
|
@ -13,7 +13,7 @@ import (
|
||||
badgerds "github.com/ipfs/go-ds-badger"
|
||||
)
|
||||
|
||||
// Plugins is exported list of plugins that will be loaded
|
||||
// Plugins is exported list of plugins that will be loaded.
|
||||
var Plugins = []plugin.Plugin{
|
||||
&badgerdsPlugin{},
|
||||
}
|
||||
@ -47,7 +47,7 @@ type datastoreConfig struct {
|
||||
}
|
||||
|
||||
// BadgerdsDatastoreConfig returns a configuration stub for a badger datastore
|
||||
// from the given parameters
|
||||
// from the given parameters.
|
||||
func (*badgerdsPlugin) DatastoreConfigParser() fsrepo.ConfigFromMap {
|
||||
return func(params map[string]interface{}) (fsrepo.DatastoreConfig, error) {
|
||||
var c datastoreConfig
|
||||
@ -113,7 +113,7 @@ func (c *datastoreConfig) Create(path string) (repo.Datastore, error) {
|
||||
p = filepath.Join(path, p)
|
||||
}
|
||||
|
||||
err := os.MkdirAll(p, 0755)
|
||||
err := os.MkdirAll(p, 0o755)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -8,7 +8,7 @@ import (
|
||||
mc "github.com/multiformats/go-multicodec"
|
||||
)
|
||||
|
||||
// Plugins is exported list of plugins that will be loaded
|
||||
// Plugins is exported list of plugins that will be loaded.
|
||||
var Plugins = []plugin.Plugin{
|
||||
&dagjosePlugin{},
|
||||
}
|
||||
|
@ -11,7 +11,7 @@ import (
|
||||
flatfs "github.com/ipfs/go-ds-flatfs"
|
||||
)
|
||||
|
||||
// Plugins is exported list of plugins that will be loaded
|
||||
// Plugins is exported list of plugins that will be loaded.
|
||||
var Plugins = []plugin.Plugin{
|
||||
&flatfsPlugin{},
|
||||
}
|
||||
@ -43,7 +43,7 @@ type datastoreConfig struct {
|
||||
}
|
||||
|
||||
// BadgerdsDatastoreConfig returns a configuration stub for a badger datastore
|
||||
// from the given parameters
|
||||
// from the given parameters.
|
||||
func (*flatfsPlugin) DatastoreConfigParser() fsrepo.ConfigFromMap {
|
||||
return func(params map[string]interface{}) (fsrepo.DatastoreConfig, error) {
|
||||
var c datastoreConfig
|
||||
|
@ -13,7 +13,7 @@ import (
|
||||
mc "github.com/multiformats/go-multicodec"
|
||||
)
|
||||
|
||||
// Plugins is exported list of plugins that will be loaded
|
||||
// Plugins is exported list of plugins that will be loaded.
|
||||
var Plugins = []plugin.Plugin{
|
||||
&gitPlugin{},
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user