1
0
mirror of https://github.com/ipfs/kubo.git synced 2025-09-10 11:52:21 +08:00

refactor(routing) rename grandcentral to supernode

thanks @mappum

remove .go-ipfs
This commit is contained in:
Brian Tiger Chow
2015-02-04 04:07:30 -08:00
parent 0918636651
commit 19eb35137b
7 changed files with 37 additions and 37 deletions

View File

@ -12,7 +12,7 @@ import (
syncds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/sync"
core "github.com/jbenet/go-ipfs/core"
corehttp "github.com/jbenet/go-ipfs/core/corehttp"
"github.com/jbenet/go-ipfs/core/corerouting"
corerouting "github.com/jbenet/go-ipfs/core/corerouting"
config "github.com/jbenet/go-ipfs/repo/config"
fsrepo "github.com/jbenet/go-ipfs/repo/fsrepo"
s3datastore "github.com/jbenet/go-ipfs/thirdparty/s3-datastore"
@ -68,7 +68,7 @@ func run() error {
node, err := core.NewIPFSNode(ctx,
core.OnlineWithOptions(
repo,
corerouting.GrandCentralServer(enhanced),
corerouting.SupernodeServer(enhanced),
core.DefaultHostOption),
)
if err != nil {

View File

@ -8,8 +8,8 @@ import (
core "github.com/jbenet/go-ipfs/core"
"github.com/jbenet/go-ipfs/p2p/peer"
routing "github.com/jbenet/go-ipfs/routing"
grandcentral "github.com/jbenet/go-ipfs/routing/grandcentral"
gcproxy "github.com/jbenet/go-ipfs/routing/grandcentral/proxy"
supernode "github.com/jbenet/go-ipfs/routing/supernode"
gcproxy "github.com/jbenet/go-ipfs/routing/supernode/proxy"
)
// NB: DHT option is included in the core to avoid 1) because it's a sane
@ -17,16 +17,16 @@ import (
// the core if it's going to be the default)
var (
errHostMissing = errors.New("grandcentral client requires a Host component")
errIdentityMissing = errors.New("grandcentral server requires a peer ID identity")
errPeerstoreMissing = errors.New("grandcentral server requires a peerstore")
errServersMissing = errors.New("grandcentral client requires at least 1 server peer")
errHostMissing = errors.New("supernode routing client requires a Host component")
errIdentityMissing = errors.New("supernode routing server requires a peer ID identity")
errPeerstoreMissing = errors.New("supernode routing server requires a peerstore")
errServersMissing = errors.New("supernode routing client requires at least 1 server peer")
)
// GrandCentralServer returns a configuration for a routing server that stores
// SupernodeServer returns a configuration for a routing server that stores
// routing records to the provided datastore. Only routing records are store in
// the datastore.
func GrandCentralServer(recordSource datastore.ThreadSafeDatastore) core.RoutingOption {
func SupernodeServer(recordSource datastore.ThreadSafeDatastore) core.RoutingOption {
return func(ctx context.Context, node *core.IpfsNode) (routing.IpfsRouting, error) {
if node.Peerstore == nil {
return nil, errPeerstoreMissing
@ -37,7 +37,7 @@ func GrandCentralServer(recordSource datastore.ThreadSafeDatastore) core.Routing
if node.Identity == "" {
return nil, errIdentityMissing
}
server, err := grandcentral.NewServer(recordSource, node.Peerstore, node.Identity)
server, err := supernode.NewServer(recordSource, node.Peerstore, node.Identity)
if err != nil {
return nil, err
}
@ -45,13 +45,13 @@ func GrandCentralServer(recordSource datastore.ThreadSafeDatastore) core.Routing
Handler: server,
Local: node.Identity,
}
node.PeerHost.SetStreamHandler(gcproxy.ProtocolGCR, proxy.HandleStream)
return grandcentral.NewClient(proxy, node.PeerHost, node.Peerstore, node.Identity)
node.PeerHost.SetStreamHandler(gcproxy.ProtocolSNR, proxy.HandleStream)
return supernode.NewClient(proxy, node.PeerHost, node.Peerstore, node.Identity)
}
}
// TODO doc
func GrandCentralClient(remotes ...peer.PeerInfo) core.RoutingOption {
func SupernodeClient(remotes ...peer.PeerInfo) core.RoutingOption {
return func(ctx context.Context, node *core.IpfsNode) (routing.IpfsRouting, error) {
if len(remotes) < 1 {
return nil, errServersMissing
@ -78,7 +78,7 @@ func GrandCentralClient(remotes ...peer.PeerInfo) core.RoutingOption {
ids = append(ids, info.ID)
}
proxy := gcproxy.Standard(node.PeerHost, ids)
node.PeerHost.SetStreamHandler(gcproxy.ProtocolGCR, proxy.HandleStream)
return grandcentral.NewClient(proxy, node.PeerHost, node.Peerstore, node.Identity)
node.PeerHost.SetStreamHandler(gcproxy.ProtocolSNR, proxy.HandleStream)
return supernode.NewClient(proxy, node.PeerHost, node.Peerstore, node.Identity)
}
}

View File

@ -1,4 +1,4 @@
package grandcentral
package supernode
import (
"bytes"
@ -10,13 +10,13 @@ import (
peer "github.com/jbenet/go-ipfs/p2p/peer"
routing "github.com/jbenet/go-ipfs/routing"
pb "github.com/jbenet/go-ipfs/routing/dht/pb"
proxy "github.com/jbenet/go-ipfs/routing/grandcentral/proxy"
proxy "github.com/jbenet/go-ipfs/routing/supernode/proxy"
eventlog "github.com/jbenet/go-ipfs/thirdparty/eventlog"
u "github.com/jbenet/go-ipfs/util"
errors "github.com/jbenet/go-ipfs/util/debugerror"
)
var log = eventlog.Logger("grandcentral")
var log = eventlog.Logger("supernode")
type Client struct {
peerhost host.Host
@ -128,7 +128,7 @@ func makeRecord(ps peer.Peerstore, p peer.ID, k u.Key, v []byte) (*pb.Record, er
func (c *Client) Ping(ctx context.Context, id peer.ID) (time.Duration, error) {
defer log.EventBegin(ctx, "ping", id).Done()
return time.Nanosecond, errors.New("grandcentral routing does not support the ping method")
return time.Nanosecond, errors.New("supernode routing does not support the ping method")
}
var _ routing.IpfsRouting = &Client{}

View File

@ -13,9 +13,9 @@ import (
errors "github.com/jbenet/go-ipfs/util/debugerror"
)
const ProtocolGCR = "/ipfs/grandcentral"
const ProtocolSNR = "/ipfs/supernoderouting"
var log = eventlog.Logger("grandcentral/proxy")
var log = eventlog.Logger("supernode/proxy")
type Proxy interface {
HandleStream(inet.Stream)
@ -34,7 +34,7 @@ func Standard(h host.Host, remotes []peer.ID) Proxy {
func (p *standard) HandleStream(s inet.Stream) {
// TODO(brian): Should clients be able to satisfy requests?
log.Error("grandcentral client received (dropped) a routing message from", s.Conn().RemotePeer())
log.Error("supernode client received (dropped) a routing message from", s.Conn().RemotePeer())
s.Close()
}
@ -64,7 +64,7 @@ func (px *standard) sendMessage(ctx context.Context, m *dhtpb.Message, remote pe
if err = px.Host.Connect(ctx, peer.PeerInfo{ID: remote}); err != nil {
return err
}
s, err := px.Host.NewStream(ProtocolGCR, remote)
s, err := px.Host.NewStream(ProtocolSNR, remote)
if err != nil {
return err
}
@ -100,7 +100,7 @@ func (px *standard) sendRequest(ctx context.Context, m *dhtpb.Message, remote pe
e.SetError(err)
return nil, err
}
s, err := px.Host.NewStream(ProtocolGCR, remote)
s, err := px.Host.NewStream(ProtocolSNR, remote)
if err != nil {
e.SetError(err)
return nil, err

View File

@ -1,4 +1,4 @@
package grandcentral
package supernode
import (
"fmt"
@ -8,7 +8,7 @@ import (
datastore "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore"
peer "github.com/jbenet/go-ipfs/p2p/peer"
dhtpb "github.com/jbenet/go-ipfs/routing/dht/pb"
proxy "github.com/jbenet/go-ipfs/routing/grandcentral/proxy"
proxy "github.com/jbenet/go-ipfs/routing/supernode/proxy"
util "github.com/jbenet/go-ipfs/util"
errors "github.com/jbenet/go-ipfs/util/debugerror"
)
@ -21,7 +21,7 @@ type Server struct {
*proxy.Loopback // so server can be injected into client
}
// NewServer creates a new GrandCentral routing Server
// NewServer creates a new Supernode routing Server
func NewServer(ds datastore.ThreadSafeDatastore, ps peer.Peerstore, local peer.ID) (*Server, error) {
s := &Server{local, ds, ps, nil}
s.Loopback = &proxy.Loopback{

View File

@ -21,9 +21,9 @@ import (
testutil "github.com/jbenet/go-ipfs/util/testutil"
)
func TestGrandcentralBootstrappedAddCat(t *testing.T) {
// create 8 grandcentral bootstrap nodes
// create 2 grandcentral clients both bootstrapped to the bootstrap nodes
func TestSupernodeBootstrappedAddCat(t *testing.T) {
// create 8 supernode-routing bootstrap nodes
// create 2 supernode-routing clients both bootstrapped to the bootstrap nodes
// let the bootstrap nodes share a single datastore
// add a large file on one node then cat the file from the other
conf := testutil.LatencyConfig{
@ -31,16 +31,16 @@ func TestGrandcentralBootstrappedAddCat(t *testing.T) {
RoutingLatency: 0,
BlockstoreLatency: 0,
}
if err := RunGrandcentralBootstrappedAddCat(RandomBytes(100*unit.MB), conf); err != nil {
if err := RunSupernodeBootstrappedAddCat(RandomBytes(100*unit.MB), conf); err != nil {
t.Fatal(err)
}
}
func RunGrandcentralBootstrappedAddCat(data []byte, conf testutil.LatencyConfig) error {
func RunSupernodeBootstrappedAddCat(data []byte, conf testutil.LatencyConfig) error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
servers, clients, err := InitializeGrandCentralNetwork(ctx, 8, 2, conf)
servers, clients, err := InitializeSupernodeNetwork(ctx, 8, 2, conf)
if err != nil {
return err
}
@ -73,7 +73,7 @@ func RunGrandcentralBootstrappedAddCat(data []byte, conf testutil.LatencyConfig)
return nil
}
func InitializeGrandCentralNetwork(
func InitializeSupernodeNetwork(
ctx context.Context,
numServers, numClients int,
conf testutil.LatencyConfig) ([]*core.IpfsNode, []*core.IpfsNode, error) {
@ -100,7 +100,7 @@ func InitializeGrandCentralNetwork(
for i := range iter.N(numServers) {
p := serverPeers[i]
bootstrap, err := core.NewIPFSNode(ctx, MocknetTestRepo(p, mn.Host(p), conf,
corerouting.GrandCentralServer(routingDatastore)))
corerouting.SupernodeServer(routingDatastore)))
if err != nil {
return nil, nil, err
}
@ -117,7 +117,7 @@ func InitializeGrandCentralNetwork(
for i := range iter.N(numClients) {
p := clientPeers[i]
n, err := core.NewIPFSNode(ctx, MocknetTestRepo(p, mn.Host(p), conf,
corerouting.GrandCentralClient(bootstrapInfos...)))
corerouting.SupernodeClient(bootstrapInfos...)))
if err != nil {
return nil, nil, err
}