mirror of
https://github.com/ipfs/kubo.git
synced 2025-05-20 08:27:29 +08:00
170 lines
4.0 KiB
Go
170 lines
4.0 KiB
Go
package core
|
|
|
|
import (
|
|
"crypto/rand"
|
|
"encoding/base64"
|
|
"errors"
|
|
|
|
bstore "github.com/ipfs/go-ipfs/blocks/blockstore"
|
|
key "github.com/ipfs/go-ipfs/blocks/key"
|
|
bserv "github.com/ipfs/go-ipfs/blockservice"
|
|
offline "github.com/ipfs/go-ipfs/exchange/offline"
|
|
dag "github.com/ipfs/go-ipfs/merkledag"
|
|
path "github.com/ipfs/go-ipfs/path"
|
|
pin "github.com/ipfs/go-ipfs/pin"
|
|
repo "github.com/ipfs/go-ipfs/repo"
|
|
cfg "github.com/ipfs/go-ipfs/repo/config"
|
|
ds "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore"
|
|
dsync "gx/ipfs/QmZ6A6P6AMo8SR3jXAwzTuSU6B9R2Y4eqW2yW9VvfUayDN/go-datastore/sync"
|
|
|
|
goprocessctx "gx/ipfs/QmQopLATEYMNg7dVqZRNDfeE2S1yKy8zrRh5xnYiuqeZBn/goprocess/context"
|
|
ci "gx/ipfs/QmUEUu1CM8bxBJxc3ZLojAi8evhTr4byQogWstABet79oY/go-libp2p-crypto"
|
|
pstore "gx/ipfs/QmXHUpFsnpCmanRnacqYkFoLoFfEq5yS2nUgGkAjJ1Nj9j/go-libp2p-peerstore"
|
|
context "gx/ipfs/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt/go-net/context"
|
|
)
|
|
|
|
type BuildCfg struct {
|
|
// If online is set, the node will have networking enabled
|
|
Online bool
|
|
|
|
// If NilRepo is set, a repo backed by a nil datastore will be constructed
|
|
NilRepo bool
|
|
|
|
Routing RoutingOption
|
|
Host HostOption
|
|
Repo repo.Repo
|
|
}
|
|
|
|
func (cfg *BuildCfg) fillDefaults() error {
|
|
if cfg.Repo != nil && cfg.NilRepo {
|
|
return errors.New("cannot set a repo and specify nilrepo at the same time")
|
|
}
|
|
|
|
if cfg.Repo == nil {
|
|
var d ds.Datastore
|
|
d = ds.NewMapDatastore()
|
|
if cfg.NilRepo {
|
|
d = ds.NewNullDatastore()
|
|
}
|
|
r, err := defaultRepo(dsync.MutexWrap(d))
|
|
if err != nil {
|
|
return err
|
|
}
|
|
cfg.Repo = r
|
|
}
|
|
|
|
if cfg.Routing == nil {
|
|
cfg.Routing = DHTOption
|
|
}
|
|
|
|
if cfg.Host == nil {
|
|
cfg.Host = DefaultHostOption
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func defaultRepo(dstore repo.Datastore) (repo.Repo, error) {
|
|
c := cfg.Config{}
|
|
priv, pub, err := ci.GenerateKeyPairWithReader(ci.RSA, 1024, rand.Reader)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
data, err := pub.Hash()
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
privkeyb, err := priv.Bytes()
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
c.Bootstrap = cfg.DefaultBootstrapAddresses
|
|
c.Addresses.Swarm = []string{"/ip4/0.0.0.0/tcp/4001"}
|
|
c.Identity.PeerID = key.Key(data).B58String()
|
|
c.Identity.PrivKey = base64.StdEncoding.EncodeToString(privkeyb)
|
|
|
|
return &repo.Mock{
|
|
D: dstore,
|
|
C: c,
|
|
}, nil
|
|
}
|
|
|
|
func NewNode(ctx context.Context, cfg *BuildCfg) (*IpfsNode, error) {
|
|
if cfg == nil {
|
|
cfg = new(BuildCfg)
|
|
}
|
|
|
|
err := cfg.fillDefaults()
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
n := &IpfsNode{
|
|
mode: offlineMode,
|
|
Repo: cfg.Repo,
|
|
ctx: ctx,
|
|
Peerstore: pstore.NewPeerstore(),
|
|
}
|
|
if cfg.Online {
|
|
n.mode = onlineMode
|
|
}
|
|
|
|
// TODO: this is a weird circular-ish dependency, rework it
|
|
n.proc = goprocessctx.WithContextAndTeardown(ctx, n.teardown)
|
|
|
|
if err := setupNode(ctx, n, cfg); err != nil {
|
|
n.Close()
|
|
return nil, err
|
|
}
|
|
|
|
return n, nil
|
|
}
|
|
|
|
func setupNode(ctx context.Context, n *IpfsNode, cfg *BuildCfg) error {
|
|
// setup local peer ID (private key is loaded in online setup)
|
|
if err := n.loadID(); err != nil {
|
|
return err
|
|
}
|
|
|
|
var err error
|
|
n.Blockstore, err = bstore.WriteCached(bstore.NewBlockstore(n.Repo.Datastore()), kSizeBlockstoreWriteCache)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
if cfg.Online {
|
|
rcfg, err := n.Repo.Config()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
do := setupDiscoveryOption(rcfg.Discovery)
|
|
if err := n.startOnlineServices(ctx, cfg.Routing, cfg.Host, do); err != nil {
|
|
return err
|
|
}
|
|
} else {
|
|
n.Exchange = offline.Exchange(n.Blockstore)
|
|
}
|
|
|
|
n.Blocks = bserv.New(n.Blockstore, n.Exchange)
|
|
n.DAG = dag.NewDAGService(n.Blocks)
|
|
n.Pinning, err = pin.LoadPinner(n.Repo.Datastore(), n.DAG)
|
|
if err != nil {
|
|
// TODO: we should move towards only running 'NewPinner' explicity on
|
|
// node init instead of implicitly here as a result of the pinner keys
|
|
// not being found in the datastore.
|
|
// this is kinda sketchy and could cause data loss
|
|
n.Pinning = pin.NewPinner(n.Repo.Datastore(), n.DAG)
|
|
}
|
|
n.Resolver = &path.Resolver{DAG: n.DAG}
|
|
|
|
err = n.loadFilesRoot()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
return nil
|
|
}
|