1
0
mirror of https://github.com/ipfs/kubo.git synced 2025-08-02 17:22:42 +08:00
Files
kubo/core/builder.go
Steven Allen 3eba14aa24 gx update
Updates:

* go-kad-dht: Query performance improvements, DHT client fixes, validates
  records on *local* put.
* go-libp2p-swarm/go-libp2p-transport: Timeout improvements.
* go-multiaddr-net: Exposes useful Conn methods (CloseWrite, CloseRead, etc.)
* go-log: fixes possible panic when enabling/disabling events.
* go-multiaddr: fixes possible panic when stringifying malformed multiaddrs,
  adds support for consuming /p2p/ multiaddrs.

fixes #5113
unblocks #4895

License: MIT
Signed-off-by: Steven Allen <steven@stebalien.com>
2018-06-26 17:11:33 -07:00

273 lines
7.1 KiB
Go

package core
import (
"context"
"crypto/rand"
"encoding/base64"
"errors"
"os"
"syscall"
"time"
bserv "github.com/ipfs/go-ipfs/blockservice"
filestore "github.com/ipfs/go-ipfs/filestore"
dag "github.com/ipfs/go-ipfs/merkledag"
resolver "github.com/ipfs/go-ipfs/path/resolver"
pin "github.com/ipfs/go-ipfs/pin"
repo "github.com/ipfs/go-ipfs/repo"
cfg "github.com/ipfs/go-ipfs/repo/config"
"github.com/ipfs/go-ipfs/thirdparty/verifbs"
uio "github.com/ipfs/go-ipfs/unixfs/io"
offline "gx/ipfs/QmRCgkkCmf1nMrW2BLZZtjP3Xyw3GfZVYRLix9wrnW4NoR/go-ipfs-exchange-offline"
metrics "gx/ipfs/QmRg1gKTHzc3CZXSKzem8aR4E3TubFhbgXwfVuWnSK5CC5/go-metrics-interface"
goprocessctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context"
record "gx/ipfs/QmVsp2KdPYE6M8ryzCk5KHLo3zprcY5hBDaYx6uPCFUdxA/go-libp2p-record"
libp2p "gx/ipfs/QmZ86eLPtXkQ1Dfa992Q8NpXArUoWWh3y728JDcWvzRrvC/go-libp2p"
pstore "gx/ipfs/QmZR2XWVVBCtbgBWnQhWk2xcQfaR3W8faQPriAiaaj7rsr/go-libp2p-peerstore"
p2phost "gx/ipfs/Qmb8T6YBBsjYsVGfrihQLfCJveczZnneSBqBKkYEBWDjge/go-libp2p-host"
peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer"
bstore "gx/ipfs/QmdpuJBPBZ6sLPj9BQpn3Rpi38BT2cF1QMiUfyzNWeySW4/go-ipfs-blockstore"
ipns "gx/ipfs/Qmdue1XShFNi3mpizGx9NR9hyNEj6U2wEW93yGhKqKCFGN/go-ipns"
ci "gx/ipfs/Qme1knMqwt1hKZbc1BmQFmnm9f36nyQGwXxPGVpVJ9rMK5/go-libp2p-crypto"
ds "gx/ipfs/QmeiCcJfDW1GJnWUArudsv5rQsihpi4oyddPhdqo3CfX6i/go-datastore"
retry "gx/ipfs/QmeiCcJfDW1GJnWUArudsv5rQsihpi4oyddPhdqo3CfX6i/go-datastore/retrystore"
dsync "gx/ipfs/QmeiCcJfDW1GJnWUArudsv5rQsihpi4oyddPhdqo3CfX6i/go-datastore/sync"
)
type BuildCfg struct {
// If online is set, the node will have networking enabled
Online bool
// ExtraOpts is a map of extra options used to configure the ipfs nodes creation
ExtraOpts map[string]bool
// If permanent then node should run more expensive processes
// that will improve performance in long run
Permanent bool
// DisableEncryptedConnections disables connection encryption *entirely*.
// DO NOT SET THIS UNLESS YOU'RE TESTING.
DisableEncryptedConnections bool
// If NilRepo is set, a repo backed by a nil datastore will be constructed
NilRepo bool
Routing RoutingOption
Host HostOption
Repo repo.Repo
}
func (cfg *BuildCfg) getOpt(key string) bool {
if cfg.ExtraOpts == nil {
return false
}
return cfg.ExtraOpts[key]
}
func (cfg *BuildCfg) fillDefaults() error {
if cfg.Repo != nil && cfg.NilRepo {
return errors.New("cannot set a repo and specify nilrepo at the same time")
}
if cfg.Repo == nil {
var d ds.Datastore
d = ds.NewMapDatastore()
if cfg.NilRepo {
d = ds.NewNullDatastore()
}
r, err := defaultRepo(dsync.MutexWrap(d))
if err != nil {
return err
}
cfg.Repo = r
}
if cfg.Routing == nil {
cfg.Routing = DHTOption
}
if cfg.Host == nil {
cfg.Host = DefaultHostOption
}
return nil
}
func defaultRepo(dstore repo.Datastore) (repo.Repo, error) {
c := cfg.Config{}
priv, pub, err := ci.GenerateKeyPairWithReader(ci.RSA, 1024, rand.Reader)
if err != nil {
return nil, err
}
pid, err := peer.IDFromPublicKey(pub)
if err != nil {
return nil, err
}
privkeyb, err := priv.Bytes()
if err != nil {
return nil, err
}
c.Bootstrap = cfg.DefaultBootstrapAddresses
c.Addresses.Swarm = []string{"/ip4/0.0.0.0/tcp/4001"}
c.Identity.PeerID = pid.Pretty()
c.Identity.PrivKey = base64.StdEncoding.EncodeToString(privkeyb)
return &repo.Mock{
D: dstore,
C: c,
}, nil
}
// NewNode constructs and returns an IpfsNode using the given cfg.
func NewNode(ctx context.Context, cfg *BuildCfg) (*IpfsNode, error) {
if cfg == nil {
cfg = new(BuildCfg)
}
err := cfg.fillDefaults()
if err != nil {
return nil, err
}
ctx = metrics.CtxScope(ctx, "ipfs")
n := &IpfsNode{
mode: offlineMode,
Repo: cfg.Repo,
ctx: ctx,
Peerstore: pstore.NewPeerstore(),
}
n.RecordValidator = record.NamespacedValidator{
"pk": record.PublicKeyValidator{},
"ipns": ipns.Validator{KeyBook: n.Peerstore},
}
if cfg.Online {
n.mode = onlineMode
}
// TODO: this is a weird circular-ish dependency, rework it
n.proc = goprocessctx.WithContextAndTeardown(ctx, n.teardown)
if err := setupNode(ctx, n, cfg); err != nil {
n.Close()
return nil, err
}
return n, nil
}
func isTooManyFDError(err error) bool {
perr, ok := err.(*os.PathError)
if ok && perr.Err == syscall.EMFILE {
return true
}
return false
}
func setupNode(ctx context.Context, n *IpfsNode, cfg *BuildCfg) error {
// setup local peer ID (private key is loaded in online setup)
if err := n.loadID(); err != nil {
return err
}
rds := &retry.Datastore{
Batching: n.Repo.Datastore(),
Delay: time.Millisecond * 200,
Retries: 6,
TempErrFunc: isTooManyFDError,
}
// hash security
bs := bstore.NewBlockstore(rds)
bs = &verifbs.VerifBS{Blockstore: bs}
opts := bstore.DefaultCacheOpts()
conf, err := n.Repo.Config()
if err != nil {
return err
}
// TEMP: setting global sharding switch here
uio.UseHAMTSharding = conf.Experimental.ShardingEnabled
opts.HasBloomFilterSize = conf.Datastore.BloomFilterSize
if !cfg.Permanent {
opts.HasBloomFilterSize = 0
}
cbs, err := bstore.CachedBlockstore(ctx, bs, opts)
if err != nil {
return err
}
n.BaseBlocks = cbs
n.GCLocker = bstore.NewGCLocker()
n.Blockstore = bstore.NewGCBlockstore(cbs, n.GCLocker)
if conf.Experimental.FilestoreEnabled {
// hash security
n.Filestore = filestore.NewFilestore(cbs, n.Repo.FileManager())
n.Blockstore = bstore.NewGCBlockstore(n.Filestore, n.GCLocker)
n.Blockstore = &verifbs.VerifBSGC{GCBlockstore: n.Blockstore}
}
rcfg, err := n.Repo.Config()
if err != nil {
return err
}
if rcfg.Datastore.HashOnRead {
bs.HashOnRead(true)
}
hostOption := cfg.Host
if cfg.DisableEncryptedConnections {
innerHostOption := hostOption
hostOption = func(ctx context.Context, id peer.ID, ps pstore.Peerstore, options ...libp2p.Option) (p2phost.Host, error) {
return innerHostOption(ctx, id, ps, append(options, libp2p.NoSecurity)...)
}
log.Warningf(`Your IPFS node has been configured to run WITHOUT ENCRYPTED CONNECTIONS.
You will not be able to connect to any nodes configured to use encrypted connections`)
}
if cfg.Online {
do := setupDiscoveryOption(rcfg.Discovery)
if err := n.startOnlineServices(ctx, cfg.Routing, hostOption, do, cfg.getOpt("pubsub"), cfg.getOpt("ipnsps"), cfg.getOpt("mplex")); err != nil {
return err
}
} else {
n.Exchange = offline.Exchange(n.Blockstore)
}
n.Blocks = bserv.New(n.Blockstore, n.Exchange)
n.DAG = dag.NewDAGService(n.Blocks)
internalDag := dag.NewDAGService(bserv.New(n.Blockstore, offline.Exchange(n.Blockstore)))
n.Pinning, err = pin.LoadPinner(n.Repo.Datastore(), n.DAG, internalDag)
if err != nil {
// TODO: we should move towards only running 'NewPinner' explicitly on
// node init instead of implicitly here as a result of the pinner keys
// not being found in the datastore.
// this is kinda sketchy and could cause data loss
n.Pinning = pin.NewPinner(n.Repo.Datastore(), n.DAG, internalDag)
}
n.Resolver = resolver.NewBasicResolver(n.DAG)
if cfg.Online {
if err := n.startLateOnlineServices(ctx); err != nil {
return err
}
}
return n.loadFilesRoot()
}