mirror of
https://github.com/ipfs/kubo.git
synced 2025-05-17 23:16:11 +08:00

Importantly: * fixes a bunch of MFS bugs * pulls in some bitswap improvements License: MIT Signed-off-by: Steven Allen <steven@stebalien.com>
290 lines
7.9 KiB
Go
290 lines
7.9 KiB
Go
package core
|
|
|
|
import (
|
|
"context"
|
|
"crypto/rand"
|
|
"encoding/base64"
|
|
"errors"
|
|
"os"
|
|
"syscall"
|
|
"time"
|
|
|
|
filestore "github.com/ipfs/go-ipfs/filestore"
|
|
namesys "github.com/ipfs/go-ipfs/namesys"
|
|
pin "github.com/ipfs/go-ipfs/pin"
|
|
repo "github.com/ipfs/go-ipfs/repo"
|
|
cidv0v1 "github.com/ipfs/go-ipfs/thirdparty/cidv0v1"
|
|
"github.com/ipfs/go-ipfs/thirdparty/verifbs"
|
|
|
|
resolver "gx/ipfs/QmNYPETsdAu2uQ1k9q9S1jYEGURaLHV6cbYRSVFVRftpF8/go-path/resolver"
|
|
ci "gx/ipfs/QmNiJiXwWE3kRhZrC5ej3kSjWHm337pYfhjLGSCDNKJP2s/go-libp2p-crypto"
|
|
pstore "gx/ipfs/QmPiemjiKBC9VA7vZF82m4x1oygtg2c2YVqag8PX7dN1BD/go-libp2p-peerstore"
|
|
pstoremem "gx/ipfs/QmPiemjiKBC9VA7vZF82m4x1oygtg2c2YVqag8PX7dN1BD/go-libp2p-peerstore/pstoremem"
|
|
uio "gx/ipfs/QmQXze9tG878pa4Euya4rrDpyTNX3kQe4dhCaBzBozGgpe/go-unixfs/io"
|
|
bstore "gx/ipfs/QmS2aqUZLJp8kF1ihE5rvDGE5LvmKDPnx32w9Z1BW9xLV5/go-ipfs-blockstore"
|
|
goprocessctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context"
|
|
dag "gx/ipfs/QmTQdH4848iTVCJmKXYyRiK72HufWTLYQQ8iN3JaQ8K1Hq/go-merkledag"
|
|
offroute "gx/ipfs/QmVZ6cQXHoTQja4oo9GhhHZi7dThi4x98mRKgGtKnTy37u/go-ipfs-routing/offline"
|
|
ipns "gx/ipfs/QmWPFehHmySCdaGttQ48iwF7M6mBRrGE5GSPWKCuMWqJDR/go-ipns"
|
|
peer "gx/ipfs/QmY5Grm8pJdiSSVsYxx4uNRgweY72EmYwuSDbRnbFok3iY/go-libp2p-peer"
|
|
bserv "gx/ipfs/QmYPZzd9VqmJDwxUnThfeSbV1Y5o53aVPDijTB7j7rS9Ep/go-blockservice"
|
|
offline "gx/ipfs/QmYZwey1thDTynSrvd6qQkX24UpTka6TFhQ2v569UpoqxD/go-ipfs-exchange-offline"
|
|
libp2p "gx/ipfs/QmYxivS34F2M2n44WQQnRHGAKS8aoRUxwGpi9wk4Cdn4Jf/go-libp2p"
|
|
p2phost "gx/ipfs/QmaoXrM4Z41PD48JY36YqQGKQpLGjyLA2cKcLsES7YddAq/go-libp2p-host"
|
|
cfg "gx/ipfs/QmcRKBUqc2p3L1ZraoJjbXfs9E6xzvEuyK9iypb5RGwfsr/go-ipfs-config"
|
|
metrics "gx/ipfs/QmekzFM3hPZjTjUFGTABdQkEnQ3PTiMstY198PwSFr5w1Q/go-metrics-interface"
|
|
ds "gx/ipfs/Qmf4xQhNomPNhrtZc67qSnfJSjxjXs9LWvknJtSXwimPrM/go-datastore"
|
|
retry "gx/ipfs/Qmf4xQhNomPNhrtZc67qSnfJSjxjXs9LWvknJtSXwimPrM/go-datastore/retrystore"
|
|
dsync "gx/ipfs/Qmf4xQhNomPNhrtZc67qSnfJSjxjXs9LWvknJtSXwimPrM/go-datastore/sync"
|
|
record "gx/ipfs/QmfARXVCzpwFXQdepAJZuqyNDgV9doEsMnVCo1ssmuSe1U/go-libp2p-record"
|
|
)
|
|
|
|
type BuildCfg struct {
|
|
// If online is set, the node will have networking enabled
|
|
Online bool
|
|
|
|
// ExtraOpts is a map of extra options used to configure the ipfs nodes creation
|
|
ExtraOpts map[string]bool
|
|
|
|
// If permanent then node should run more expensive processes
|
|
// that will improve performance in long run
|
|
Permanent bool
|
|
|
|
// DisableEncryptedConnections disables connection encryption *entirely*.
|
|
// DO NOT SET THIS UNLESS YOU'RE TESTING.
|
|
DisableEncryptedConnections bool
|
|
|
|
// If NilRepo is set, a repo backed by a nil datastore will be constructed
|
|
NilRepo bool
|
|
|
|
Routing RoutingOption
|
|
Host HostOption
|
|
Repo repo.Repo
|
|
}
|
|
|
|
func (cfg *BuildCfg) getOpt(key string) bool {
|
|
if cfg.ExtraOpts == nil {
|
|
return false
|
|
}
|
|
|
|
return cfg.ExtraOpts[key]
|
|
}
|
|
|
|
func (cfg *BuildCfg) fillDefaults() error {
|
|
if cfg.Repo != nil && cfg.NilRepo {
|
|
return errors.New("cannot set a repo and specify nilrepo at the same time")
|
|
}
|
|
|
|
if cfg.Repo == nil {
|
|
var d ds.Datastore
|
|
d = ds.NewMapDatastore()
|
|
|
|
if cfg.NilRepo {
|
|
d = ds.NewNullDatastore()
|
|
}
|
|
r, err := defaultRepo(dsync.MutexWrap(d))
|
|
if err != nil {
|
|
return err
|
|
}
|
|
cfg.Repo = r
|
|
}
|
|
|
|
if cfg.Routing == nil {
|
|
cfg.Routing = DHTOption
|
|
}
|
|
|
|
if cfg.Host == nil {
|
|
cfg.Host = DefaultHostOption
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func defaultRepo(dstore repo.Datastore) (repo.Repo, error) {
|
|
c := cfg.Config{}
|
|
priv, pub, err := ci.GenerateKeyPairWithReader(ci.RSA, 1024, rand.Reader)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
pid, err := peer.IDFromPublicKey(pub)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
privkeyb, err := priv.Bytes()
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
c.Bootstrap = cfg.DefaultBootstrapAddresses
|
|
c.Addresses.Swarm = []string{"/ip4/0.0.0.0/tcp/4001"}
|
|
c.Identity.PeerID = pid.Pretty()
|
|
c.Identity.PrivKey = base64.StdEncoding.EncodeToString(privkeyb)
|
|
|
|
return &repo.Mock{
|
|
D: dstore,
|
|
C: c,
|
|
}, nil
|
|
}
|
|
|
|
// NewNode constructs and returns an IpfsNode using the given cfg.
|
|
func NewNode(ctx context.Context, cfg *BuildCfg) (*IpfsNode, error) {
|
|
if cfg == nil {
|
|
cfg = new(BuildCfg)
|
|
}
|
|
|
|
err := cfg.fillDefaults()
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
ctx = metrics.CtxScope(ctx, "ipfs")
|
|
|
|
n := &IpfsNode{
|
|
mode: offlineMode,
|
|
Repo: cfg.Repo,
|
|
ctx: ctx,
|
|
Peerstore: pstoremem.NewPeerstore(),
|
|
}
|
|
|
|
n.RecordValidator = record.NamespacedValidator{
|
|
"pk": record.PublicKeyValidator{},
|
|
"ipns": ipns.Validator{KeyBook: n.Peerstore},
|
|
}
|
|
|
|
if cfg.Online {
|
|
n.mode = onlineMode
|
|
}
|
|
|
|
// TODO: this is a weird circular-ish dependency, rework it
|
|
n.proc = goprocessctx.WithContextAndTeardown(ctx, n.teardown)
|
|
|
|
if err := setupNode(ctx, n, cfg); err != nil {
|
|
n.Close()
|
|
return nil, err
|
|
}
|
|
|
|
return n, nil
|
|
}
|
|
|
|
func isTooManyFDError(err error) bool {
|
|
perr, ok := err.(*os.PathError)
|
|
if ok && perr.Err == syscall.EMFILE {
|
|
return true
|
|
}
|
|
|
|
return false
|
|
}
|
|
|
|
func setupNode(ctx context.Context, n *IpfsNode, cfg *BuildCfg) error {
|
|
// setup local identity
|
|
if err := n.loadID(); err != nil {
|
|
return err
|
|
}
|
|
|
|
// load the private key (if present)
|
|
if err := n.loadPrivateKey(); err != nil {
|
|
return err
|
|
}
|
|
|
|
rds := &retry.Datastore{
|
|
Batching: n.Repo.Datastore(),
|
|
Delay: time.Millisecond * 200,
|
|
Retries: 6,
|
|
TempErrFunc: isTooManyFDError,
|
|
}
|
|
|
|
// hash security
|
|
bs := bstore.NewBlockstore(rds)
|
|
bs = &verifbs.VerifBS{Blockstore: bs}
|
|
|
|
opts := bstore.DefaultCacheOpts()
|
|
conf, err := n.Repo.Config()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
// TEMP: setting global sharding switch here
|
|
uio.UseHAMTSharding = conf.Experimental.ShardingEnabled
|
|
|
|
opts.HasBloomFilterSize = conf.Datastore.BloomFilterSize
|
|
if !cfg.Permanent {
|
|
opts.HasBloomFilterSize = 0
|
|
}
|
|
|
|
if !cfg.NilRepo {
|
|
bs, err = bstore.CachedBlockstore(ctx, bs, opts)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
}
|
|
|
|
bs = bstore.NewIdStore(bs)
|
|
|
|
bs = cidv0v1.NewBlockstore(bs)
|
|
|
|
n.BaseBlocks = bs
|
|
n.GCLocker = bstore.NewGCLocker()
|
|
n.Blockstore = bstore.NewGCBlockstore(bs, n.GCLocker)
|
|
|
|
if conf.Experimental.FilestoreEnabled || conf.Experimental.UrlstoreEnabled {
|
|
// hash security
|
|
n.Filestore = filestore.NewFilestore(bs, n.Repo.FileManager())
|
|
n.Blockstore = bstore.NewGCBlockstore(n.Filestore, n.GCLocker)
|
|
n.Blockstore = &verifbs.VerifBSGC{GCBlockstore: n.Blockstore}
|
|
}
|
|
|
|
rcfg, err := n.Repo.Config()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
if rcfg.Datastore.HashOnRead {
|
|
bs.HashOnRead(true)
|
|
}
|
|
|
|
hostOption := cfg.Host
|
|
if cfg.DisableEncryptedConnections {
|
|
innerHostOption := hostOption
|
|
hostOption = func(ctx context.Context, id peer.ID, ps pstore.Peerstore, options ...libp2p.Option) (p2phost.Host, error) {
|
|
return innerHostOption(ctx, id, ps, append(options, libp2p.NoSecurity)...)
|
|
}
|
|
log.Warningf(`Your IPFS node has been configured to run WITHOUT ENCRYPTED CONNECTIONS.
|
|
You will not be able to connect to any nodes configured to use encrypted connections`)
|
|
}
|
|
|
|
if cfg.Online {
|
|
do := setupDiscoveryOption(rcfg.Discovery)
|
|
if err := n.startOnlineServices(ctx, cfg.Routing, hostOption, do, cfg.getOpt("pubsub"), cfg.getOpt("ipnsps"), cfg.getOpt("mplex")); err != nil {
|
|
return err
|
|
}
|
|
} else {
|
|
n.Exchange = offline.Exchange(n.Blockstore)
|
|
n.Routing = offroute.NewOfflineRouter(n.Repo.Datastore(), n.RecordValidator)
|
|
n.Namesys = namesys.NewNameSystem(n.Routing, n.Repo.Datastore(), 0)
|
|
}
|
|
|
|
n.Blocks = bserv.New(n.Blockstore, n.Exchange)
|
|
n.DAG = dag.NewDAGService(n.Blocks)
|
|
|
|
internalDag := dag.NewDAGService(bserv.New(n.Blockstore, offline.Exchange(n.Blockstore)))
|
|
n.Pinning, err = pin.LoadPinner(n.Repo.Datastore(), n.DAG, internalDag)
|
|
if err != nil {
|
|
// TODO: we should move towards only running 'NewPinner' explicitly on
|
|
// node init instead of implicitly here as a result of the pinner keys
|
|
// not being found in the datastore.
|
|
// this is kinda sketchy and could cause data loss
|
|
n.Pinning = pin.NewPinner(n.Repo.Datastore(), n.DAG, internalDag)
|
|
}
|
|
n.Resolver = resolver.NewBasicResolver(n.DAG)
|
|
|
|
if cfg.Online {
|
|
if err := n.startLateOnlineServices(ctx); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
|
|
return n.loadFilesRoot()
|
|
}
|