1
0
mirror of https://github.com/ipfs/kubo.git synced 2025-05-22 01:17:11 +08:00
Files
kubo/core/builder.go
Jeromy ab0c668ab8 fix panic caused by accessing config after repo closed
License: MIT
Signed-off-by: Jeromy <jeromyj@gmail.com>
2015-09-03 09:28:36 -07:00

164 lines
3.9 KiB
Go

package core
import (
"crypto/rand"
"encoding/base64"
"errors"
ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore"
dsync "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/sync"
goprocessctx "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess/context"
context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context"
bstore "github.com/ipfs/go-ipfs/blocks/blockstore"
key "github.com/ipfs/go-ipfs/blocks/key"
bserv "github.com/ipfs/go-ipfs/blockservice"
offline "github.com/ipfs/go-ipfs/exchange/offline"
dag "github.com/ipfs/go-ipfs/merkledag"
ci "github.com/ipfs/go-ipfs/p2p/crypto"
peer "github.com/ipfs/go-ipfs/p2p/peer"
path "github.com/ipfs/go-ipfs/path"
pin "github.com/ipfs/go-ipfs/pin"
repo "github.com/ipfs/go-ipfs/repo"
cfg "github.com/ipfs/go-ipfs/repo/config"
)
type BuildCfg struct {
// If online is set, the node will have networking enabled
Online bool
// If NilRepo is set, a repo backed by a nil datastore will be constructed
NilRepo bool
Routing RoutingOption
Host HostOption
Repo repo.Repo
}
func (cfg *BuildCfg) fillDefaults() error {
if cfg.Repo != nil && cfg.NilRepo {
return errors.New("cannot set a repo and specify nilrepo at the same time")
}
if cfg.Repo == nil {
var d ds.Datastore
d = ds.NewMapDatastore()
if cfg.NilRepo {
d = ds.NewNullDatastore()
}
r, err := defaultRepo(dsync.MutexWrap(d))
if err != nil {
return err
}
cfg.Repo = r
}
if cfg.Routing == nil {
cfg.Routing = DHTOption
}
if cfg.Host == nil {
cfg.Host = DefaultHostOption
}
return nil
}
func defaultRepo(dstore ds.ThreadSafeDatastore) (repo.Repo, error) {
c := cfg.Config{}
priv, pub, err := ci.GenerateKeyPairWithReader(ci.RSA, 1024, rand.Reader)
if err != nil {
return nil, err
}
data, err := pub.Hash()
if err != nil {
return nil, err
}
privkeyb, err := priv.Bytes()
if err != nil {
return nil, err
}
c.Bootstrap = cfg.DefaultBootstrapAddresses
c.Addresses.Swarm = []string{"/ip4/0.0.0.0/tcp/4001"}
c.Identity.PeerID = key.Key(data).B58String()
c.Identity.PrivKey = base64.StdEncoding.EncodeToString(privkeyb)
return &repo.Mock{
D: dstore,
C: c,
}, nil
}
func NewNode(ctx context.Context, cfg *BuildCfg) (*IpfsNode, error) {
if cfg == nil {
cfg = new(BuildCfg)
}
err := cfg.fillDefaults()
if err != nil {
return nil, err
}
n := &IpfsNode{
mode: offlineMode,
Repo: cfg.Repo,
ctx: ctx,
Peerstore: peer.NewPeerstore(),
}
if cfg.Online {
n.mode = onlineMode
}
// TODO: this is a weird circular-ish dependency, rework it
n.proc = goprocessctx.WithContextAndTeardown(ctx, n.teardown)
if err := setupNode(ctx, n, cfg); err != nil {
n.Close()
return nil, err
}
return n, nil
}
func setupNode(ctx context.Context, n *IpfsNode, cfg *BuildCfg) error {
// setup local peer ID (private key is loaded in online setup)
if err := n.loadID(); err != nil {
return err
}
var err error
n.Blockstore, err = bstore.WriteCached(bstore.NewBlockstore(n.Repo.Datastore()), kSizeBlockstoreWriteCache)
if err != nil {
return err
}
if cfg.Online {
rcfg, err := n.Repo.Config()
if err != nil {
return err
}
do := setupDiscoveryOption(rcfg.Discovery)
if err := n.startOnlineServices(ctx, cfg.Routing, cfg.Host, do); err != nil {
return err
}
} else {
n.Exchange = offline.Exchange(n.Blockstore)
}
n.Blocks = bserv.New(n.Blockstore, n.Exchange)
n.DAG = dag.NewDAGService(n.Blocks)
n.Pinning, err = pin.LoadPinner(n.Repo.Datastore(), n.DAG)
if err != nil {
// TODO: we should move towards only running 'NewPinner' explicity on
// node init instead of implicitly here as a result of the pinner keys
// not being found in the datastore.
// this is kinda sketchy and could cause data loss
n.Pinning = pin.NewPinner(n.Repo.Datastore(), n.DAG)
}
n.Resolver = &path.Resolver{DAG: n.DAG}
return nil
}