mirror of
https://github.com/ipfs/kubo.git
synced 2025-09-10 14:34:24 +08:00
@ -35,7 +35,7 @@ func Publish(n *core.IpfsNode, args []string, opts map[string]interface{}, out i
|
|||||||
}
|
}
|
||||||
|
|
||||||
// later, n.Keychain.Get(name).PrivKey
|
// later, n.Keychain.Get(name).PrivKey
|
||||||
k := n.Identity.PrivKey
|
k := n.Identity.PrivKey()
|
||||||
|
|
||||||
pub := nsys.NewRoutingPublisher(n.Routing)
|
pub := nsys.NewRoutingPublisher(n.Routing)
|
||||||
err := pub.Publish(k, ref)
|
err := pub.Publish(k, ref)
|
||||||
|
@ -19,7 +19,7 @@ func Resolve(n *core.IpfsNode, args []string, opts map[string]interface{}, out i
|
|||||||
if n.Identity == nil {
|
if n.Identity == nil {
|
||||||
return errors.New("Identity not loaded!")
|
return errors.New("Identity not loaded!")
|
||||||
}
|
}
|
||||||
name = n.Identity.ID.String()
|
name = n.Identity.ID().String()
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("Publish expects 1 or 2 args; got %d.", len(args))
|
return fmt.Errorf("Publish expects 1 or 2 args; got %d.", len(args))
|
||||||
|
@ -36,7 +36,7 @@ type IpfsNode struct {
|
|||||||
Config *config.Config
|
Config *config.Config
|
||||||
|
|
||||||
// the local node's identity
|
// the local node's identity
|
||||||
Identity *peer.Peer
|
Identity peer.Peer
|
||||||
|
|
||||||
// storage for other Peer instances
|
// storage for other Peer instances
|
||||||
Peerstore peer.Peerstore
|
Peerstore peer.Peerstore
|
||||||
@ -177,7 +177,7 @@ func NewIpfsNode(cfg *config.Config, online bool) (*IpfsNode, error) {
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func initIdentity(cfg *config.Config, peers peer.Peerstore, online bool) (*peer.Peer, error) {
|
func initIdentity(cfg *config.Config, peers peer.Peerstore, online bool) (peer.Peer, error) {
|
||||||
if cfg.Identity.PeerID == "" {
|
if cfg.Identity.PeerID == "" {
|
||||||
return nil, errors.New("Identity was not set in config (was ipfs init run?)")
|
return nil, errors.New("Identity was not set in config (was ipfs init run?)")
|
||||||
}
|
}
|
||||||
|
14
core/mock.go
14
core/mock.go
@ -8,7 +8,7 @@ import (
|
|||||||
mdag "github.com/jbenet/go-ipfs/merkledag"
|
mdag "github.com/jbenet/go-ipfs/merkledag"
|
||||||
nsys "github.com/jbenet/go-ipfs/namesys"
|
nsys "github.com/jbenet/go-ipfs/namesys"
|
||||||
path "github.com/jbenet/go-ipfs/path"
|
path "github.com/jbenet/go-ipfs/path"
|
||||||
"github.com/jbenet/go-ipfs/peer"
|
peer "github.com/jbenet/go-ipfs/peer"
|
||||||
mdht "github.com/jbenet/go-ipfs/routing/mock"
|
mdht "github.com/jbenet/go-ipfs/routing/mock"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -16,20 +16,18 @@ import (
|
|||||||
func NewMockNode() (*IpfsNode, error) {
|
func NewMockNode() (*IpfsNode, error) {
|
||||||
nd := new(IpfsNode)
|
nd := new(IpfsNode)
|
||||||
|
|
||||||
//Generate Identity
|
// Generate Identity
|
||||||
nd.Peerstore = peer.NewPeerstore()
|
sk, pk, err := ci.GenerateKeyPair(ci.RSA, 1024)
|
||||||
var err error
|
|
||||||
nd.Identity, err = nd.Peerstore.Get(peer.ID("TESTING"))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
pk, sk, err := ci.GenerateKeyPair(ci.RSA, 1024)
|
nd.Identity, err = peer.WithKeyPair(sk, pk)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
nd.Identity.PrivKey = pk
|
nd.Peerstore = peer.NewPeerstore()
|
||||||
nd.Identity.PubKey = sk
|
nd.Peerstore.Put(nd.Identity)
|
||||||
|
|
||||||
// Temp Datastore
|
// Temp Datastore
|
||||||
dstore := ds.NewMapDatastore()
|
dstore := ds.NewMapDatastore()
|
||||||
|
@ -53,7 +53,7 @@ func (s *SecurePipe) handshake() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
log.Debug("handshake: %s <--> %s", s.local, s.remote)
|
log.Debug("handshake: %s <--> %s", s.local, s.remote)
|
||||||
myPubKey, err := s.local.PubKey.Bytes()
|
myPubKey, err := s.local.PubKey().Bytes()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -132,7 +132,7 @@ func (s *SecurePipe) handshake() error {
|
|||||||
exPacket := new(Exchange)
|
exPacket := new(Exchange)
|
||||||
|
|
||||||
exPacket.Epubkey = epubkey
|
exPacket.Epubkey = epubkey
|
||||||
exPacket.Signature, err = s.local.PrivKey.Sign(handshake.Bytes())
|
exPacket.Signature, err = s.local.PrivKey().Sign(handshake.Bytes())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -167,7 +167,7 @@ func (s *SecurePipe) handshake() error {
|
|||||||
theirHandshake.Write(exchangeResp.GetEpubkey())
|
theirHandshake.Write(exchangeResp.GetEpubkey())
|
||||||
|
|
||||||
// u.POut("Remote Peer Identified as %s\n", s.remote)
|
// u.POut("Remote Peer Identified as %s\n", s.remote)
|
||||||
ok, err := s.remote.PubKey.Verify(theirHandshake.Bytes(), exchangeResp.GetSignature())
|
ok, err := s.remote.PubKey().Verify(theirHandshake.Bytes(), exchangeResp.GetSignature())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -340,7 +340,7 @@ func selectBest(myPrefs, theirPrefs string) (string, error) {
|
|||||||
// getOrConstructPeer attempts to fetch a peer from a peerstore.
|
// getOrConstructPeer attempts to fetch a peer from a peerstore.
|
||||||
// if succeeds, verify ID and PubKey match.
|
// if succeeds, verify ID and PubKey match.
|
||||||
// else, construct it.
|
// else, construct it.
|
||||||
func getOrConstructPeer(peers peer.Peerstore, rpk ci.PubKey) (*peer.Peer, error) {
|
func getOrConstructPeer(peers peer.Peerstore, rpk ci.PubKey) (peer.Peer, error) {
|
||||||
|
|
||||||
rid, err := peer.IDFromPubKey(rpk)
|
rid, err := peer.IDFromPubKey(rpk)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -18,8 +18,8 @@ type SecurePipe struct {
|
|||||||
Duplex
|
Duplex
|
||||||
insecure Duplex
|
insecure Duplex
|
||||||
|
|
||||||
local *peer.Peer
|
local peer.Peer
|
||||||
remote *peer.Peer
|
remote peer.Peer
|
||||||
peers peer.Peerstore
|
peers peer.Peerstore
|
||||||
|
|
||||||
params params
|
params params
|
||||||
@ -33,7 +33,7 @@ type params struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewSecurePipe constructs a pipe with channels of a given buffer size.
|
// NewSecurePipe constructs a pipe with channels of a given buffer size.
|
||||||
func NewSecurePipe(ctx context.Context, bufsize int, local *peer.Peer,
|
func NewSecurePipe(ctx context.Context, bufsize int, local peer.Peer,
|
||||||
peers peer.Peerstore, insecure Duplex) (*SecurePipe, error) {
|
peers peer.Peerstore, insecure Duplex) (*SecurePipe, error) {
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
@ -60,12 +60,12 @@ func NewSecurePipe(ctx context.Context, bufsize int, local *peer.Peer,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// LocalPeer retrieves the local peer.
|
// LocalPeer retrieves the local peer.
|
||||||
func (s *SecurePipe) LocalPeer() *peer.Peer {
|
func (s *SecurePipe) LocalPeer() peer.Peer {
|
||||||
return s.local
|
return s.local
|
||||||
}
|
}
|
||||||
|
|
||||||
// RemotePeer retrieves the local peer.
|
// RemotePeer retrieves the local peer.
|
||||||
func (s *SecurePipe) RemotePeer() *peer.Peer {
|
func (s *SecurePipe) RemotePeer() peer.Peer {
|
||||||
return s.remote
|
return s.remote
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -26,14 +26,14 @@ const ResponseTimeout = time.Second * 10
|
|||||||
type Diagnostics struct {
|
type Diagnostics struct {
|
||||||
network net.Network
|
network net.Network
|
||||||
sender net.Sender
|
sender net.Sender
|
||||||
self *peer.Peer
|
self peer.Peer
|
||||||
|
|
||||||
diagLock sync.Mutex
|
diagLock sync.Mutex
|
||||||
diagMap map[string]time.Time
|
diagMap map[string]time.Time
|
||||||
birth time.Time
|
birth time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewDiagnostics(self *peer.Peer, inet net.Network, sender net.Sender) *Diagnostics {
|
func NewDiagnostics(self peer.Peer, inet net.Network, sender net.Sender) *Diagnostics {
|
||||||
return &Diagnostics{
|
return &Diagnostics{
|
||||||
network: inet,
|
network: inet,
|
||||||
sender: sender,
|
sender: sender,
|
||||||
@ -67,20 +67,21 @@ func (di *DiagInfo) Marshal() []byte {
|
|||||||
return b
|
return b
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Diagnostics) getPeers() []*peer.Peer {
|
func (d *Diagnostics) getPeers() []peer.Peer {
|
||||||
return d.network.GetPeerList()
|
return d.network.GetPeerList()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Diagnostics) getDiagInfo() *DiagInfo {
|
func (d *Diagnostics) getDiagInfo() *DiagInfo {
|
||||||
di := new(DiagInfo)
|
di := new(DiagInfo)
|
||||||
di.CodeVersion = "github.com/jbenet/go-ipfs"
|
di.CodeVersion = "github.com/jbenet/go-ipfs"
|
||||||
di.ID = d.self.ID.Pretty()
|
di.ID = d.self.ID().Pretty()
|
||||||
di.LifeSpan = time.Since(d.birth)
|
di.LifeSpan = time.Since(d.birth)
|
||||||
di.Keys = nil // Currently no way to query datastore
|
di.Keys = nil // Currently no way to query datastore
|
||||||
di.BwIn, di.BwOut = d.network.GetBandwidthTotals()
|
di.BwIn, di.BwOut = d.network.GetBandwidthTotals()
|
||||||
|
|
||||||
for _, p := range d.getPeers() {
|
for _, p := range d.getPeers() {
|
||||||
di.Connections = append(di.Connections, connDiagInfo{p.GetLatency(), p.ID.Pretty()})
|
d := connDiagInfo{p.GetLatency(), p.ID().Pretty()}
|
||||||
|
di.Connections = append(di.Connections, d)
|
||||||
}
|
}
|
||||||
return di
|
return di
|
||||||
}
|
}
|
||||||
@ -116,7 +117,7 @@ func (d *Diagnostics) GetDiagnostic(timeout time.Duration) ([]*DiagInfo, error)
|
|||||||
for _, p := range peers {
|
for _, p := range peers {
|
||||||
log.Debug("Sending getDiagnostic to: %s", p)
|
log.Debug("Sending getDiagnostic to: %s", p)
|
||||||
sends++
|
sends++
|
||||||
go func(p *peer.Peer) {
|
go func(p peer.Peer) {
|
||||||
data, err := d.getDiagnosticFromPeer(ctx, p, pmes)
|
data, err := d.getDiagnosticFromPeer(ctx, p, pmes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("GetDiagnostic error: %v", err)
|
log.Error("GetDiagnostic error: %v", err)
|
||||||
@ -155,7 +156,7 @@ func AppendDiagnostics(data []byte, cur []*DiagInfo) []*DiagInfo {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// TODO: this method no longer needed.
|
// TODO: this method no longer needed.
|
||||||
func (d *Diagnostics) getDiagnosticFromPeer(ctx context.Context, p *peer.Peer, mes *Message) ([]byte, error) {
|
func (d *Diagnostics) getDiagnosticFromPeer(ctx context.Context, p peer.Peer, mes *Message) ([]byte, error) {
|
||||||
rpmes, err := d.sendRequest(ctx, p, mes)
|
rpmes, err := d.sendRequest(ctx, p, mes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -169,7 +170,7 @@ func newMessage(diagID string) *Message {
|
|||||||
return pmes
|
return pmes
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Diagnostics) sendRequest(ctx context.Context, p *peer.Peer, pmes *Message) (*Message, error) {
|
func (d *Diagnostics) sendRequest(ctx context.Context, p peer.Peer, pmes *Message) (*Message, error) {
|
||||||
|
|
||||||
mes, err := msg.FromObject(p, pmes)
|
mes, err := msg.FromObject(p, pmes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -197,7 +198,7 @@ func (d *Diagnostics) sendRequest(ctx context.Context, p *peer.Peer, pmes *Messa
|
|||||||
return rpmes, nil
|
return rpmes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Diagnostics) handleDiagnostic(p *peer.Peer, pmes *Message) (*Message, error) {
|
func (d *Diagnostics) handleDiagnostic(p peer.Peer, pmes *Message) (*Message, error) {
|
||||||
log.Debug("HandleDiagnostic from %s for id = %s", p, pmes.GetDiagID())
|
log.Debug("HandleDiagnostic from %s for id = %s", p, pmes.GetDiagID())
|
||||||
resp := newMessage(pmes.GetDiagID())
|
resp := newMessage(pmes.GetDiagID())
|
||||||
d.diagLock.Lock()
|
d.diagLock.Lock()
|
||||||
@ -220,7 +221,7 @@ func (d *Diagnostics) handleDiagnostic(p *peer.Peer, pmes *Message) (*Message, e
|
|||||||
for _, p := range d.getPeers() {
|
for _, p := range d.getPeers() {
|
||||||
log.Debug("Sending diagnostic request to peer: %s", p)
|
log.Debug("Sending diagnostic request to peer: %s", p)
|
||||||
sendcount++
|
sendcount++
|
||||||
go func(p *peer.Peer) {
|
go func(p peer.Peer) {
|
||||||
out, err := d.getDiagnosticFromPeer(ctx, p, pmes)
|
out, err := d.getDiagnosticFromPeer(ctx, p, pmes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("getDiagnostic error: %v", err)
|
log.Error("getDiagnostic error: %v", err)
|
||||||
@ -267,7 +268,7 @@ func (d *Diagnostics) HandleMessage(ctx context.Context, mes msg.NetMessage) msg
|
|||||||
|
|
||||||
// Print out diagnostic
|
// Print out diagnostic
|
||||||
log.Info("[peer: %s] Got message from [%s]\n",
|
log.Info("[peer: %s] Got message from [%s]\n",
|
||||||
d.self.ID.Pretty(), mPeer.ID.Pretty())
|
d.self.ID().Pretty(), mPeer.ID().Pretty())
|
||||||
|
|
||||||
// dispatch handler.
|
// dispatch handler.
|
||||||
rpmes, err := d.handleDiagnostic(mPeer, pmes)
|
rpmes, err := d.handleDiagnostic(mPeer, pmes)
|
||||||
|
@ -20,7 +20,7 @@ var log = u.Logger("bitswap")
|
|||||||
|
|
||||||
// NetMessageSession initializes a BitSwap session that communicates over the
|
// NetMessageSession initializes a BitSwap session that communicates over the
|
||||||
// provided NetMessage service
|
// provided NetMessage service
|
||||||
func NetMessageSession(parent context.Context, p *peer.Peer,
|
func NetMessageSession(parent context.Context, p peer.Peer,
|
||||||
net inet.Network, srv inet.Service, directory bsnet.Routing,
|
net inet.Network, srv inet.Service, directory bsnet.Routing,
|
||||||
d ds.Datastore, nice bool) exchange.Interface {
|
d ds.Datastore, nice bool) exchange.Interface {
|
||||||
|
|
||||||
@ -83,7 +83,7 @@ func (bs *bitswap) Block(parent context.Context, k u.Key) (*blocks.Block, error)
|
|||||||
message.AppendWanted(k)
|
message.AppendWanted(k)
|
||||||
for peerToQuery := range peersToQuery {
|
for peerToQuery := range peersToQuery {
|
||||||
log.Debug("bitswap got peersToQuery: %s", peerToQuery)
|
log.Debug("bitswap got peersToQuery: %s", peerToQuery)
|
||||||
go func(p *peer.Peer) {
|
go func(p peer.Peer) {
|
||||||
|
|
||||||
log.Debug("bitswap dialing peer: %s", p)
|
log.Debug("bitswap dialing peer: %s", p)
|
||||||
err := bs.sender.DialPeer(p)
|
err := bs.sender.DialPeer(p)
|
||||||
@ -131,8 +131,8 @@ func (bs *bitswap) HasBlock(ctx context.Context, blk blocks.Block) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// TODO(brian): handle errors
|
// TODO(brian): handle errors
|
||||||
func (bs *bitswap) ReceiveMessage(ctx context.Context, p *peer.Peer, incoming bsmsg.BitSwapMessage) (
|
func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.Peer, incoming bsmsg.BitSwapMessage) (
|
||||||
*peer.Peer, bsmsg.BitSwapMessage) {
|
peer.Peer, bsmsg.BitSwapMessage) {
|
||||||
log.Debug("ReceiveMessage from %v", p.Key())
|
log.Debug("ReceiveMessage from %v", p.Key())
|
||||||
|
|
||||||
if p == nil {
|
if p == nil {
|
||||||
@ -181,7 +181,7 @@ func (bs *bitswap) ReceiveError(err error) {
|
|||||||
|
|
||||||
// send strives to ensure that accounting is always performed when a message is
|
// send strives to ensure that accounting is always performed when a message is
|
||||||
// sent
|
// sent
|
||||||
func (bs *bitswap) send(ctx context.Context, p *peer.Peer, m bsmsg.BitSwapMessage) {
|
func (bs *bitswap) send(ctx context.Context, p peer.Peer, m bsmsg.BitSwapMessage) {
|
||||||
bs.sender.SendMessage(ctx, p, m)
|
bs.sender.SendMessage(ctx, p, m)
|
||||||
go bs.strategy.MessageSent(p, m)
|
go bs.strategy.MessageSent(p, m)
|
||||||
}
|
}
|
||||||
|
@ -44,7 +44,7 @@ func TestProviderForKeyButNetworkCannotFind(t *testing.T) {
|
|||||||
g := NewSessionGenerator(net, rs)
|
g := NewSessionGenerator(net, rs)
|
||||||
|
|
||||||
block := blocks.NewBlock([]byte("block"))
|
block := blocks.NewBlock([]byte("block"))
|
||||||
rs.Announce(&peer.Peer{}, block.Key()) // but not on network
|
rs.Announce(peer.WithIDString("testing"), block.Key()) // but not on network
|
||||||
|
|
||||||
solo := g.Next()
|
solo := g.Next()
|
||||||
|
|
||||||
@ -263,7 +263,7 @@ func (g *SessionGenerator) Instances(n int) []instance {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type instance struct {
|
type instance struct {
|
||||||
peer *peer.Peer
|
peer peer.Peer
|
||||||
exchange exchange.Interface
|
exchange exchange.Interface
|
||||||
blockstore bstore.Blockstore
|
blockstore bstore.Blockstore
|
||||||
}
|
}
|
||||||
@ -274,7 +274,7 @@ type instance struct {
|
|||||||
// sessions. To safeguard, use the SessionGenerator to generate sessions. It's
|
// sessions. To safeguard, use the SessionGenerator to generate sessions. It's
|
||||||
// just a much better idea.
|
// just a much better idea.
|
||||||
func session(net tn.Network, rs mock.RoutingServer, id peer.ID) instance {
|
func session(net tn.Network, rs mock.RoutingServer, id peer.ID) instance {
|
||||||
p := &peer.Peer{ID: id}
|
p := peer.WithID(id)
|
||||||
|
|
||||||
adapter := net.Adapter(p)
|
adapter := net.Adapter(p)
|
||||||
htc := rs.Client(p)
|
htc := rs.Client(p)
|
||||||
|
@ -19,7 +19,7 @@ type BitSwapMessage interface {
|
|||||||
|
|
||||||
type Exportable interface {
|
type Exportable interface {
|
||||||
ToProto() *PBMessage
|
ToProto() *PBMessage
|
||||||
ToNet(p *peer.Peer) (nm.NetMessage, error)
|
ToNet(p peer.Peer) (nm.NetMessage, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// message wraps a proto message for convenience
|
// message wraps a proto message for convenience
|
||||||
@ -82,6 +82,6 @@ func (m *message) ToProto() *PBMessage {
|
|||||||
return pb
|
return pb
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *message) ToNet(p *peer.Peer) (nm.NetMessage, error) {
|
func (m *message) ToNet(p peer.Peer) (nm.NetMessage, error) {
|
||||||
return nm.FromObject(p, m.ToProto())
|
return nm.FromObject(p, m.ToProto())
|
||||||
}
|
}
|
||||||
|
@ -88,7 +88,7 @@ func TestCopyProtoByValue(t *testing.T) {
|
|||||||
|
|
||||||
func TestToNetMethodSetsPeer(t *testing.T) {
|
func TestToNetMethodSetsPeer(t *testing.T) {
|
||||||
m := New()
|
m := New()
|
||||||
p := &peer.Peer{ID: []byte("X")}
|
p := peer.WithIDString("X")
|
||||||
netmsg, err := m.ToNet(p)
|
netmsg, err := m.ToNet(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@ -106,7 +106,8 @@ func TestToNetFromNetPreservesWantList(t *testing.T) {
|
|||||||
original.AppendWanted(u.Key("T"))
|
original.AppendWanted(u.Key("T"))
|
||||||
original.AppendWanted(u.Key("F"))
|
original.AppendWanted(u.Key("F"))
|
||||||
|
|
||||||
netmsg, err := original.ToNet(&peer.Peer{ID: []byte("X")})
|
p := peer.WithIDString("X")
|
||||||
|
netmsg, err := original.ToNet(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -136,7 +137,7 @@ func TestToAndFromNetMessage(t *testing.T) {
|
|||||||
original.AppendBlock(*blocks.NewBlock([]byte("F")))
|
original.AppendBlock(*blocks.NewBlock([]byte("F")))
|
||||||
original.AppendBlock(*blocks.NewBlock([]byte("M")))
|
original.AppendBlock(*blocks.NewBlock([]byte("M")))
|
||||||
|
|
||||||
p := &peer.Peer{ID: []byte("X")}
|
p := peer.WithIDString("X")
|
||||||
netmsg, err := original.ToNet(p)
|
netmsg, err := original.ToNet(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
|
@ -12,18 +12,18 @@ import (
|
|||||||
type Adapter interface {
|
type Adapter interface {
|
||||||
|
|
||||||
// DialPeer ensures there is a connection to peer.
|
// DialPeer ensures there is a connection to peer.
|
||||||
DialPeer(*peer.Peer) error
|
DialPeer(peer.Peer) error
|
||||||
|
|
||||||
// SendMessage sends a BitSwap message to a peer.
|
// SendMessage sends a BitSwap message to a peer.
|
||||||
SendMessage(
|
SendMessage(
|
||||||
context.Context,
|
context.Context,
|
||||||
*peer.Peer,
|
peer.Peer,
|
||||||
bsmsg.BitSwapMessage) error
|
bsmsg.BitSwapMessage) error
|
||||||
|
|
||||||
// SendRequest sends a BitSwap message to a peer and waits for a response.
|
// SendRequest sends a BitSwap message to a peer and waits for a response.
|
||||||
SendRequest(
|
SendRequest(
|
||||||
context.Context,
|
context.Context,
|
||||||
*peer.Peer,
|
peer.Peer,
|
||||||
bsmsg.BitSwapMessage) (incoming bsmsg.BitSwapMessage, err error)
|
bsmsg.BitSwapMessage) (incoming bsmsg.BitSwapMessage, err error)
|
||||||
|
|
||||||
// SetDelegate registers the Reciver to handle messages received from the
|
// SetDelegate registers the Reciver to handle messages received from the
|
||||||
@ -33,8 +33,8 @@ type Adapter interface {
|
|||||||
|
|
||||||
type Receiver interface {
|
type Receiver interface {
|
||||||
ReceiveMessage(
|
ReceiveMessage(
|
||||||
ctx context.Context, sender *peer.Peer, incoming bsmsg.BitSwapMessage) (
|
ctx context.Context, sender peer.Peer, incoming bsmsg.BitSwapMessage) (
|
||||||
destination *peer.Peer, outgoing bsmsg.BitSwapMessage)
|
destination peer.Peer, outgoing bsmsg.BitSwapMessage)
|
||||||
|
|
||||||
ReceiveError(error)
|
ReceiveError(error)
|
||||||
}
|
}
|
||||||
@ -42,7 +42,7 @@ type Receiver interface {
|
|||||||
// TODO rename -> Router?
|
// TODO rename -> Router?
|
||||||
type Routing interface {
|
type Routing interface {
|
||||||
// FindProvidersAsync returns a channel of providers for the given key
|
// FindProvidersAsync returns a channel of providers for the given key
|
||||||
FindProvidersAsync(context.Context, u.Key, int) <-chan *peer.Peer
|
FindProvidersAsync(context.Context, u.Key, int) <-chan peer.Peer
|
||||||
|
|
||||||
// Provide provides the key to the network
|
// Provide provides the key to the network
|
||||||
Provide(context.Context, u.Key) error
|
Provide(context.Context, u.Key) error
|
||||||
|
@ -60,13 +60,13 @@ func (adapter *impl) HandleMessage(
|
|||||||
return outgoing
|
return outgoing
|
||||||
}
|
}
|
||||||
|
|
||||||
func (adapter *impl) DialPeer(p *peer.Peer) error {
|
func (adapter *impl) DialPeer(p peer.Peer) error {
|
||||||
return adapter.net.DialPeer(p)
|
return adapter.net.DialPeer(p)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (adapter *impl) SendMessage(
|
func (adapter *impl) SendMessage(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
p *peer.Peer,
|
p peer.Peer,
|
||||||
outgoing bsmsg.BitSwapMessage) error {
|
outgoing bsmsg.BitSwapMessage) error {
|
||||||
|
|
||||||
nmsg, err := outgoing.ToNet(p)
|
nmsg, err := outgoing.ToNet(p)
|
||||||
@ -78,7 +78,7 @@ func (adapter *impl) SendMessage(
|
|||||||
|
|
||||||
func (adapter *impl) SendRequest(
|
func (adapter *impl) SendRequest(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
p *peer.Peer,
|
p peer.Peer,
|
||||||
outgoing bsmsg.BitSwapMessage) (bsmsg.BitSwapMessage, error) {
|
outgoing bsmsg.BitSwapMessage) (bsmsg.BitSwapMessage, error) {
|
||||||
|
|
||||||
outgoingMsg, err := outgoing.ToNet(p)
|
outgoingMsg, err := outgoing.ToNet(p)
|
||||||
|
@ -8,25 +8,25 @@ import (
|
|||||||
|
|
||||||
type Strategy interface {
|
type Strategy interface {
|
||||||
// Returns a slice of Peers with whom the local node has active sessions
|
// Returns a slice of Peers with whom the local node has active sessions
|
||||||
Peers() []*peer.Peer
|
Peers() []peer.Peer
|
||||||
|
|
||||||
// BlockIsWantedByPeer returns true if peer wants the block given by this
|
// BlockIsWantedByPeer returns true if peer wants the block given by this
|
||||||
// key
|
// key
|
||||||
BlockIsWantedByPeer(u.Key, *peer.Peer) bool
|
BlockIsWantedByPeer(u.Key, peer.Peer) bool
|
||||||
|
|
||||||
// ShouldSendTo(Peer) decides whether to send data to this Peer
|
// ShouldSendTo(Peer) decides whether to send data to this Peer
|
||||||
ShouldSendBlockToPeer(u.Key, *peer.Peer) bool
|
ShouldSendBlockToPeer(u.Key, peer.Peer) bool
|
||||||
|
|
||||||
// Seed initializes the decider to a deterministic state
|
// Seed initializes the decider to a deterministic state
|
||||||
Seed(int64)
|
Seed(int64)
|
||||||
|
|
||||||
// MessageReceived records receipt of message for accounting purposes
|
// MessageReceived records receipt of message for accounting purposes
|
||||||
MessageReceived(*peer.Peer, bsmsg.BitSwapMessage) error
|
MessageReceived(peer.Peer, bsmsg.BitSwapMessage) error
|
||||||
|
|
||||||
// MessageSent records sending of message for accounting purposes
|
// MessageSent records sending of message for accounting purposes
|
||||||
MessageSent(*peer.Peer, bsmsg.BitSwapMessage) error
|
MessageSent(peer.Peer, bsmsg.BitSwapMessage) error
|
||||||
|
|
||||||
NumBytesSentTo(*peer.Peer) uint64
|
NumBytesSentTo(peer.Peer) uint64
|
||||||
|
|
||||||
NumBytesReceivedFrom(*peer.Peer) uint64
|
NumBytesReceivedFrom(peer.Peer) uint64
|
||||||
}
|
}
|
||||||
|
@ -12,7 +12,7 @@ import (
|
|||||||
// access/lookups.
|
// access/lookups.
|
||||||
type keySet map[u.Key]struct{}
|
type keySet map[u.Key]struct{}
|
||||||
|
|
||||||
func newLedger(p *peer.Peer, strategy strategyFunc) *ledger {
|
func newLedger(p peer.Peer, strategy strategyFunc) *ledger {
|
||||||
return &ledger{
|
return &ledger{
|
||||||
wantList: keySet{},
|
wantList: keySet{},
|
||||||
Strategy: strategy,
|
Strategy: strategy,
|
||||||
@ -25,7 +25,7 @@ type ledger struct {
|
|||||||
lock sync.RWMutex
|
lock sync.RWMutex
|
||||||
|
|
||||||
// Partner is the remote Peer.
|
// Partner is the remote Peer.
|
||||||
Partner *peer.Peer
|
Partner peer.Peer
|
||||||
|
|
||||||
// Accounting tracks bytes sent and recieved.
|
// Accounting tracks bytes sent and recieved.
|
||||||
Accounting debtRatio
|
Accounting debtRatio
|
||||||
|
@ -37,20 +37,20 @@ type ledgerMap map[peerKey]*ledger
|
|||||||
type peerKey u.Key
|
type peerKey u.Key
|
||||||
|
|
||||||
// Peers returns a list of peers
|
// Peers returns a list of peers
|
||||||
func (s *strategist) Peers() []*peer.Peer {
|
func (s *strategist) Peers() []peer.Peer {
|
||||||
response := make([]*peer.Peer, 0)
|
response := make([]peer.Peer, 0)
|
||||||
for _, ledger := range s.ledgerMap {
|
for _, ledger := range s.ledgerMap {
|
||||||
response = append(response, ledger.Partner)
|
response = append(response, ledger.Partner)
|
||||||
}
|
}
|
||||||
return response
|
return response
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *strategist) BlockIsWantedByPeer(k u.Key, p *peer.Peer) bool {
|
func (s *strategist) BlockIsWantedByPeer(k u.Key, p peer.Peer) bool {
|
||||||
ledger := s.ledger(p)
|
ledger := s.ledger(p)
|
||||||
return ledger.WantListContains(k)
|
return ledger.WantListContains(k)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *strategist) ShouldSendBlockToPeer(k u.Key, p *peer.Peer) bool {
|
func (s *strategist) ShouldSendBlockToPeer(k u.Key, p peer.Peer) bool {
|
||||||
ledger := s.ledger(p)
|
ledger := s.ledger(p)
|
||||||
return ledger.ShouldSend()
|
return ledger.ShouldSend()
|
||||||
}
|
}
|
||||||
@ -59,7 +59,7 @@ func (s *strategist) Seed(int64) {
|
|||||||
// TODO
|
// TODO
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *strategist) MessageReceived(p *peer.Peer, m bsmsg.BitSwapMessage) error {
|
func (s *strategist) MessageReceived(p peer.Peer, m bsmsg.BitSwapMessage) error {
|
||||||
// TODO find a more elegant way to handle this check
|
// TODO find a more elegant way to handle this check
|
||||||
if p == nil {
|
if p == nil {
|
||||||
return errors.New("Strategy received nil peer")
|
return errors.New("Strategy received nil peer")
|
||||||
@ -84,7 +84,7 @@ func (s *strategist) MessageReceived(p *peer.Peer, m bsmsg.BitSwapMessage) error
|
|||||||
// inconsistent. Would need to ensure that Sends and acknowledgement of the
|
// inconsistent. Would need to ensure that Sends and acknowledgement of the
|
||||||
// send happen atomically
|
// send happen atomically
|
||||||
|
|
||||||
func (s *strategist) MessageSent(p *peer.Peer, m bsmsg.BitSwapMessage) error {
|
func (s *strategist) MessageSent(p peer.Peer, m bsmsg.BitSwapMessage) error {
|
||||||
l := s.ledger(p)
|
l := s.ledger(p)
|
||||||
for _, block := range m.Blocks() {
|
for _, block := range m.Blocks() {
|
||||||
l.SentBytes(len(block.Data))
|
l.SentBytes(len(block.Data))
|
||||||
@ -95,16 +95,16 @@ func (s *strategist) MessageSent(p *peer.Peer, m bsmsg.BitSwapMessage) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *strategist) NumBytesSentTo(p *peer.Peer) uint64 {
|
func (s *strategist) NumBytesSentTo(p peer.Peer) uint64 {
|
||||||
return s.ledger(p).Accounting.BytesSent
|
return s.ledger(p).Accounting.BytesSent
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *strategist) NumBytesReceivedFrom(p *peer.Peer) uint64 {
|
func (s *strategist) NumBytesReceivedFrom(p peer.Peer) uint64 {
|
||||||
return s.ledger(p).Accounting.BytesRecv
|
return s.ledger(p).Accounting.BytesRecv
|
||||||
}
|
}
|
||||||
|
|
||||||
// ledger lazily instantiates a ledger
|
// ledger lazily instantiates a ledger
|
||||||
func (s *strategist) ledger(p *peer.Peer) *ledger {
|
func (s *strategist) ledger(p peer.Peer) *ledger {
|
||||||
l, ok := s.ledgerMap[peerKey(p.Key())]
|
l, ok := s.ledgerMap[peerKey(p.Key())]
|
||||||
if !ok {
|
if !ok {
|
||||||
l = newLedger(p, s.strategyFunc)
|
l = newLedger(p, s.strategyFunc)
|
||||||
|
@ -10,13 +10,13 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type peerAndStrategist struct {
|
type peerAndStrategist struct {
|
||||||
*peer.Peer
|
peer.Peer
|
||||||
Strategy
|
Strategy
|
||||||
}
|
}
|
||||||
|
|
||||||
func newPeerAndStrategist(idStr string) peerAndStrategist {
|
func newPeerAndStrategist(idStr string) peerAndStrategist {
|
||||||
return peerAndStrategist{
|
return peerAndStrategist{
|
||||||
Peer: &peer.Peer{ID: peer.ID(idStr)},
|
Peer: peer.WithIDString(idStr),
|
||||||
Strategy: New(true),
|
Strategy: New(true),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -93,7 +93,7 @@ func TestPeerIsAddedToPeersWhenMessageReceivedOrSent(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func peerIsPartner(p *peer.Peer, s Strategy) bool {
|
func peerIsPartner(p peer.Peer, s Strategy) bool {
|
||||||
for _, partner := range s.Peers() {
|
for _, partner := range s.Peers() {
|
||||||
if partner.Key() == p.Key() {
|
if partner.Key() == p.Key() {
|
||||||
return true
|
return true
|
||||||
|
@ -13,20 +13,20 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type Network interface {
|
type Network interface {
|
||||||
Adapter(*peer.Peer) bsnet.Adapter
|
Adapter(peer.Peer) bsnet.Adapter
|
||||||
|
|
||||||
HasPeer(*peer.Peer) bool
|
HasPeer(peer.Peer) bool
|
||||||
|
|
||||||
SendMessage(
|
SendMessage(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
from *peer.Peer,
|
from peer.Peer,
|
||||||
to *peer.Peer,
|
to peer.Peer,
|
||||||
message bsmsg.BitSwapMessage) error
|
message bsmsg.BitSwapMessage) error
|
||||||
|
|
||||||
SendRequest(
|
SendRequest(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
from *peer.Peer,
|
from peer.Peer,
|
||||||
to *peer.Peer,
|
to peer.Peer,
|
||||||
message bsmsg.BitSwapMessage) (
|
message bsmsg.BitSwapMessage) (
|
||||||
incoming bsmsg.BitSwapMessage, err error)
|
incoming bsmsg.BitSwapMessage, err error)
|
||||||
}
|
}
|
||||||
@ -43,7 +43,7 @@ type network struct {
|
|||||||
clients map[util.Key]bsnet.Receiver
|
clients map[util.Key]bsnet.Receiver
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *network) Adapter(p *peer.Peer) bsnet.Adapter {
|
func (n *network) Adapter(p peer.Peer) bsnet.Adapter {
|
||||||
client := &networkClient{
|
client := &networkClient{
|
||||||
local: p,
|
local: p,
|
||||||
network: n,
|
network: n,
|
||||||
@ -52,7 +52,7 @@ func (n *network) Adapter(p *peer.Peer) bsnet.Adapter {
|
|||||||
return client
|
return client
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *network) HasPeer(p *peer.Peer) bool {
|
func (n *network) HasPeer(p peer.Peer) bool {
|
||||||
_, found := n.clients[p.Key()]
|
_, found := n.clients[p.Key()]
|
||||||
return found
|
return found
|
||||||
}
|
}
|
||||||
@ -61,8 +61,8 @@ func (n *network) HasPeer(p *peer.Peer) bool {
|
|||||||
// TODO what does the network layer do with errors received from services?
|
// TODO what does the network layer do with errors received from services?
|
||||||
func (n *network) SendMessage(
|
func (n *network) SendMessage(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
from *peer.Peer,
|
from peer.Peer,
|
||||||
to *peer.Peer,
|
to peer.Peer,
|
||||||
message bsmsg.BitSwapMessage) error {
|
message bsmsg.BitSwapMessage) error {
|
||||||
|
|
||||||
receiver, ok := n.clients[to.Key()]
|
receiver, ok := n.clients[to.Key()]
|
||||||
@ -79,7 +79,7 @@ func (n *network) SendMessage(
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (n *network) deliver(
|
func (n *network) deliver(
|
||||||
r bsnet.Receiver, from *peer.Peer, message bsmsg.BitSwapMessage) error {
|
r bsnet.Receiver, from peer.Peer, message bsmsg.BitSwapMessage) error {
|
||||||
if message == nil || from == nil {
|
if message == nil || from == nil {
|
||||||
return errors.New("Invalid input")
|
return errors.New("Invalid input")
|
||||||
}
|
}
|
||||||
@ -107,8 +107,8 @@ var NoResponse = errors.New("No response received from the receiver")
|
|||||||
// TODO
|
// TODO
|
||||||
func (n *network) SendRequest(
|
func (n *network) SendRequest(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
from *peer.Peer,
|
from peer.Peer,
|
||||||
to *peer.Peer,
|
to peer.Peer,
|
||||||
message bsmsg.BitSwapMessage) (
|
message bsmsg.BitSwapMessage) (
|
||||||
incoming bsmsg.BitSwapMessage, err error) {
|
incoming bsmsg.BitSwapMessage, err error) {
|
||||||
|
|
||||||
@ -130,7 +130,7 @@ func (n *network) SendRequest(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// TODO test when receiver doesn't immediately respond to the initiator of the request
|
// TODO test when receiver doesn't immediately respond to the initiator of the request
|
||||||
if !bytes.Equal(nextPeer.ID, from.ID) {
|
if !bytes.Equal(nextPeer.ID(), from.ID()) {
|
||||||
go func() {
|
go func() {
|
||||||
nextReceiver, ok := n.clients[nextPeer.Key()]
|
nextReceiver, ok := n.clients[nextPeer.Key()]
|
||||||
if !ok {
|
if !ok {
|
||||||
@ -144,26 +144,26 @@ func (n *network) SendRequest(
|
|||||||
}
|
}
|
||||||
|
|
||||||
type networkClient struct {
|
type networkClient struct {
|
||||||
local *peer.Peer
|
local peer.Peer
|
||||||
bsnet.Receiver
|
bsnet.Receiver
|
||||||
network Network
|
network Network
|
||||||
}
|
}
|
||||||
|
|
||||||
func (nc *networkClient) SendMessage(
|
func (nc *networkClient) SendMessage(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
to *peer.Peer,
|
to peer.Peer,
|
||||||
message bsmsg.BitSwapMessage) error {
|
message bsmsg.BitSwapMessage) error {
|
||||||
return nc.network.SendMessage(ctx, nc.local, to, message)
|
return nc.network.SendMessage(ctx, nc.local, to, message)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (nc *networkClient) SendRequest(
|
func (nc *networkClient) SendRequest(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
to *peer.Peer,
|
to peer.Peer,
|
||||||
message bsmsg.BitSwapMessage) (incoming bsmsg.BitSwapMessage, err error) {
|
message bsmsg.BitSwapMessage) (incoming bsmsg.BitSwapMessage, err error) {
|
||||||
return nc.network.SendRequest(ctx, nc.local, to, message)
|
return nc.network.SendRequest(ctx, nc.local, to, message)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (nc *networkClient) DialPeer(p *peer.Peer) error {
|
func (nc *networkClient) DialPeer(p peer.Peer) error {
|
||||||
// no need to do anything because dialing isn't a thing in this test net.
|
// no need to do anything because dialing isn't a thing in this test net.
|
||||||
if !nc.network.HasPeer(p) {
|
if !nc.network.HasPeer(p) {
|
||||||
return fmt.Errorf("Peer not in network: %s", p)
|
return fmt.Errorf("Peer not in network: %s", p)
|
||||||
|
@ -18,15 +18,15 @@ func TestSendRequestToCooperativePeer(t *testing.T) {
|
|||||||
|
|
||||||
t.Log("Get two network adapters")
|
t.Log("Get two network adapters")
|
||||||
|
|
||||||
initiator := net.Adapter(&peer.Peer{ID: []byte("initiator")})
|
initiator := net.Adapter(peer.WithIDString("initiator"))
|
||||||
recipient := net.Adapter(&peer.Peer{ID: idOfRecipient})
|
recipient := net.Adapter(peer.WithID(idOfRecipient))
|
||||||
|
|
||||||
expectedStr := "response from recipient"
|
expectedStr := "response from recipient"
|
||||||
recipient.SetDelegate(lambda(func(
|
recipient.SetDelegate(lambda(func(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
from *peer.Peer,
|
from peer.Peer,
|
||||||
incoming bsmsg.BitSwapMessage) (
|
incoming bsmsg.BitSwapMessage) (
|
||||||
*peer.Peer, bsmsg.BitSwapMessage) {
|
peer.Peer, bsmsg.BitSwapMessage) {
|
||||||
|
|
||||||
t.Log("Recipient received a message from the network")
|
t.Log("Recipient received a message from the network")
|
||||||
|
|
||||||
@ -43,7 +43,7 @@ func TestSendRequestToCooperativePeer(t *testing.T) {
|
|||||||
message := bsmsg.New()
|
message := bsmsg.New()
|
||||||
message.AppendBlock(*blocks.NewBlock([]byte("data")))
|
message.AppendBlock(*blocks.NewBlock([]byte("data")))
|
||||||
response, err := initiator.SendRequest(
|
response, err := initiator.SendRequest(
|
||||||
context.Background(), &peer.Peer{ID: idOfRecipient}, message)
|
context.Background(), peer.WithID(idOfRecipient), message)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -61,8 +61,8 @@ func TestSendRequestToCooperativePeer(t *testing.T) {
|
|||||||
func TestSendMessageAsyncButWaitForResponse(t *testing.T) {
|
func TestSendMessageAsyncButWaitForResponse(t *testing.T) {
|
||||||
net := VirtualNetwork()
|
net := VirtualNetwork()
|
||||||
idOfResponder := []byte("responder")
|
idOfResponder := []byte("responder")
|
||||||
waiter := net.Adapter(&peer.Peer{ID: []byte("waiter")})
|
waiter := net.Adapter(peer.WithIDString("waiter"))
|
||||||
responder := net.Adapter(&peer.Peer{ID: idOfResponder})
|
responder := net.Adapter(peer.WithID(idOfResponder))
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
@ -72,9 +72,9 @@ func TestSendMessageAsyncButWaitForResponse(t *testing.T) {
|
|||||||
|
|
||||||
responder.SetDelegate(lambda(func(
|
responder.SetDelegate(lambda(func(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
fromWaiter *peer.Peer,
|
fromWaiter peer.Peer,
|
||||||
msgFromWaiter bsmsg.BitSwapMessage) (
|
msgFromWaiter bsmsg.BitSwapMessage) (
|
||||||
*peer.Peer, bsmsg.BitSwapMessage) {
|
peer.Peer, bsmsg.BitSwapMessage) {
|
||||||
|
|
||||||
msgToWaiter := bsmsg.New()
|
msgToWaiter := bsmsg.New()
|
||||||
msgToWaiter.AppendBlock(*blocks.NewBlock([]byte(expectedStr)))
|
msgToWaiter.AppendBlock(*blocks.NewBlock([]byte(expectedStr)))
|
||||||
@ -84,9 +84,9 @@ func TestSendMessageAsyncButWaitForResponse(t *testing.T) {
|
|||||||
|
|
||||||
waiter.SetDelegate(lambda(func(
|
waiter.SetDelegate(lambda(func(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
fromResponder *peer.Peer,
|
fromResponder peer.Peer,
|
||||||
msgFromResponder bsmsg.BitSwapMessage) (
|
msgFromResponder bsmsg.BitSwapMessage) (
|
||||||
*peer.Peer, bsmsg.BitSwapMessage) {
|
peer.Peer, bsmsg.BitSwapMessage) {
|
||||||
|
|
||||||
// TODO assert that this came from the correct peer and that the message contents are as expected
|
// TODO assert that this came from the correct peer and that the message contents are as expected
|
||||||
ok := false
|
ok := false
|
||||||
@ -107,7 +107,7 @@ func TestSendMessageAsyncButWaitForResponse(t *testing.T) {
|
|||||||
messageSentAsync := bsmsg.New()
|
messageSentAsync := bsmsg.New()
|
||||||
messageSentAsync.AppendBlock(*blocks.NewBlock([]byte("data")))
|
messageSentAsync.AppendBlock(*blocks.NewBlock([]byte("data")))
|
||||||
errSending := waiter.SendMessage(
|
errSending := waiter.SendMessage(
|
||||||
context.Background(), &peer.Peer{ID: idOfResponder}, messageSentAsync)
|
context.Background(), peer.WithID(idOfResponder), messageSentAsync)
|
||||||
if errSending != nil {
|
if errSending != nil {
|
||||||
t.Fatal(errSending)
|
t.Fatal(errSending)
|
||||||
}
|
}
|
||||||
@ -115,8 +115,8 @@ func TestSendMessageAsyncButWaitForResponse(t *testing.T) {
|
|||||||
wg.Wait() // until waiter delegate function is executed
|
wg.Wait() // until waiter delegate function is executed
|
||||||
}
|
}
|
||||||
|
|
||||||
type receiverFunc func(ctx context.Context, p *peer.Peer,
|
type receiverFunc func(ctx context.Context, p peer.Peer,
|
||||||
incoming bsmsg.BitSwapMessage) (*peer.Peer, bsmsg.BitSwapMessage)
|
incoming bsmsg.BitSwapMessage) (peer.Peer, bsmsg.BitSwapMessage)
|
||||||
|
|
||||||
// lambda returns a Receiver instance given a receiver function
|
// lambda returns a Receiver instance given a receiver function
|
||||||
func lambda(f receiverFunc) bsnet.Receiver {
|
func lambda(f receiverFunc) bsnet.Receiver {
|
||||||
@ -126,13 +126,13 @@ func lambda(f receiverFunc) bsnet.Receiver {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type lambdaImpl struct {
|
type lambdaImpl struct {
|
||||||
f func(ctx context.Context, p *peer.Peer, incoming bsmsg.BitSwapMessage) (
|
f func(ctx context.Context, p peer.Peer, incoming bsmsg.BitSwapMessage) (
|
||||||
*peer.Peer, bsmsg.BitSwapMessage)
|
peer.Peer, bsmsg.BitSwapMessage)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (lam *lambdaImpl) ReceiveMessage(ctx context.Context,
|
func (lam *lambdaImpl) ReceiveMessage(ctx context.Context,
|
||||||
p *peer.Peer, incoming bsmsg.BitSwapMessage) (
|
p peer.Peer, incoming bsmsg.BitSwapMessage) (
|
||||||
*peer.Peer, bsmsg.BitSwapMessage) {
|
peer.Peer, bsmsg.BitSwapMessage) {
|
||||||
return lam.f(ctx, p, incoming)
|
return lam.f(ctx, p, incoming)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -209,7 +209,7 @@ func TestFastRepublish(t *testing.T) {
|
|||||||
|
|
||||||
node, mnt := setupIpnsTest(t, nil)
|
node, mnt := setupIpnsTest(t, nil)
|
||||||
|
|
||||||
h, err := node.Identity.PrivKey.GetPublic().Hash()
|
h, err := node.Identity.PrivKey().GetPublic().Hash()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -35,7 +35,7 @@ type FileSystem struct {
|
|||||||
|
|
||||||
// NewFileSystem constructs new fs using given core.IpfsNode instance.
|
// NewFileSystem constructs new fs using given core.IpfsNode instance.
|
||||||
func NewIpns(ipfs *core.IpfsNode, ipfspath string) (*FileSystem, error) {
|
func NewIpns(ipfs *core.IpfsNode, ipfspath string) (*FileSystem, error) {
|
||||||
root, err := CreateRoot(ipfs, []ci.PrivKey{ipfs.Identity.PrivKey}, ipfspath)
|
root, err := CreateRoot(ipfs, []ci.PrivKey{ipfs.Identity.PrivKey()}, ipfspath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -11,9 +11,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestRoutingResolve(t *testing.T) {
|
func TestRoutingResolve(t *testing.T) {
|
||||||
local := &peer.Peer{
|
local := peer.WithIDString("testID")
|
||||||
ID: []byte("testID"),
|
|
||||||
}
|
|
||||||
lds := ds.NewMapDatastore()
|
lds := ds.NewMapDatastore()
|
||||||
d := mock.NewMockRouter(local, lds)
|
d := mock.NewMockRouter(local, lds)
|
||||||
|
|
||||||
|
@ -42,8 +42,8 @@ func newMsgioPipe(size int) *msgioPipe {
|
|||||||
|
|
||||||
// singleConn represents a single connection to another Peer (IPFS Node).
|
// singleConn represents a single connection to another Peer (IPFS Node).
|
||||||
type singleConn struct {
|
type singleConn struct {
|
||||||
local *peer.Peer
|
local peer.Peer
|
||||||
remote *peer.Peer
|
remote peer.Peer
|
||||||
maconn manet.Conn
|
maconn manet.Conn
|
||||||
msgio *msgioPipe
|
msgio *msgioPipe
|
||||||
|
|
||||||
@ -51,7 +51,7 @@ type singleConn struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// newConn constructs a new connection
|
// newConn constructs a new connection
|
||||||
func newSingleConn(ctx context.Context, local, remote *peer.Peer,
|
func newSingleConn(ctx context.Context, local, remote peer.Peer,
|
||||||
maconn manet.Conn) (Conn, error) {
|
maconn manet.Conn) (Conn, error) {
|
||||||
|
|
||||||
conn := &singleConn{
|
conn := &singleConn{
|
||||||
@ -117,12 +117,12 @@ func (c *singleConn) RemoteMultiaddr() ma.Multiaddr {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// LocalPeer is the Peer on this side
|
// LocalPeer is the Peer on this side
|
||||||
func (c *singleConn) LocalPeer() *peer.Peer {
|
func (c *singleConn) LocalPeer() peer.Peer {
|
||||||
return c.local
|
return c.local
|
||||||
}
|
}
|
||||||
|
|
||||||
// RemotePeer is the Peer on the remote side
|
// RemotePeer is the Peer on the remote side
|
||||||
func (c *singleConn) RemotePeer() *peer.Peer {
|
func (c *singleConn) RemotePeer() peer.Peer {
|
||||||
return c.remote
|
return c.remote
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -12,7 +12,7 @@ import (
|
|||||||
|
|
||||||
// Dial connects to a particular peer, over a given network
|
// Dial connects to a particular peer, over a given network
|
||||||
// Example: d.Dial(ctx, "udp", peer)
|
// Example: d.Dial(ctx, "udp", peer)
|
||||||
func (d *Dialer) Dial(ctx context.Context, network string, remote *peer.Peer) (Conn, error) {
|
func (d *Dialer) Dial(ctx context.Context, network string, remote peer.Peer) (Conn, error) {
|
||||||
laddr := d.LocalPeer.NetAddress(network)
|
laddr := d.LocalPeer.NetAddress(network)
|
||||||
if laddr == nil {
|
if laddr == nil {
|
||||||
return nil, fmt.Errorf("No local address for network %s", network)
|
return nil, fmt.Errorf("No local address for network %s", network)
|
||||||
|
@ -10,7 +10,7 @@ import (
|
|||||||
ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr"
|
ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr"
|
||||||
)
|
)
|
||||||
|
|
||||||
func setupPeer(addr string) (*peer.Peer, error) {
|
func setupPeer(addr string) (peer.Peer, error) {
|
||||||
tcp, err := ma.NewMultiaddr(addr)
|
tcp, err := ma.NewMultiaddr(addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -21,14 +21,10 @@ func setupPeer(addr string) (*peer.Peer, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
id, err := peer.IDFromPubKey(pk)
|
p, err := peer.WithKeyPair(sk, pk)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
p := &peer.Peer{ID: id}
|
|
||||||
p.PrivKey = sk
|
|
||||||
p.PubKey = pk
|
|
||||||
p.AddAddress(tcp)
|
p.AddAddress(tcp)
|
||||||
return p, nil
|
return p, nil
|
||||||
}
|
}
|
||||||
|
@ -23,13 +23,13 @@ type Conn interface {
|
|||||||
LocalMultiaddr() ma.Multiaddr
|
LocalMultiaddr() ma.Multiaddr
|
||||||
|
|
||||||
// LocalPeer is the Peer on this side
|
// LocalPeer is the Peer on this side
|
||||||
LocalPeer() *peer.Peer
|
LocalPeer() peer.Peer
|
||||||
|
|
||||||
// RemoteMultiaddr is the Multiaddr on the remote side
|
// RemoteMultiaddr is the Multiaddr on the remote side
|
||||||
RemoteMultiaddr() ma.Multiaddr
|
RemoteMultiaddr() ma.Multiaddr
|
||||||
|
|
||||||
// RemotePeer is the Peer on the remote side
|
// RemotePeer is the Peer on the remote side
|
||||||
RemotePeer() *peer.Peer
|
RemotePeer() peer.Peer
|
||||||
|
|
||||||
// In returns a readable message channel
|
// In returns a readable message channel
|
||||||
In() <-chan []byte
|
In() <-chan []byte
|
||||||
@ -47,7 +47,7 @@ type Conn interface {
|
|||||||
type Dialer struct {
|
type Dialer struct {
|
||||||
|
|
||||||
// LocalPeer is the identity of the local Peer.
|
// LocalPeer is the identity of the local Peer.
|
||||||
LocalPeer *peer.Peer
|
LocalPeer peer.Peer
|
||||||
|
|
||||||
// Peerstore is the set of peers we know about locally. The Dialer needs it
|
// Peerstore is the set of peers we know about locally. The Dialer needs it
|
||||||
// because when an incoming connection is identified, we should reuse the
|
// because when an incoming connection is identified, we should reuse the
|
||||||
@ -65,7 +65,7 @@ type Listener interface {
|
|||||||
Multiaddr() ma.Multiaddr
|
Multiaddr() ma.Multiaddr
|
||||||
|
|
||||||
// LocalPeer is the identity of the local Peer.
|
// LocalPeer is the identity of the local Peer.
|
||||||
LocalPeer() *peer.Peer
|
LocalPeer() peer.Peer
|
||||||
|
|
||||||
// Peerstore is the set of peers we know about locally. The Listener needs it
|
// Peerstore is the set of peers we know about locally. The Listener needs it
|
||||||
// because when an incoming connection is identified, we should reuse the
|
// because when an incoming connection is identified, we should reuse the
|
||||||
|
@ -23,7 +23,7 @@ type listener struct {
|
|||||||
maddr ma.Multiaddr
|
maddr ma.Multiaddr
|
||||||
|
|
||||||
// LocalPeer is the identity of the local Peer.
|
// LocalPeer is the identity of the local Peer.
|
||||||
local *peer.Peer
|
local peer.Peer
|
||||||
|
|
||||||
// Peerstore is the set of peers we know about locally
|
// Peerstore is the set of peers we know about locally
|
||||||
peers peer.Peerstore
|
peers peer.Peerstore
|
||||||
@ -105,7 +105,7 @@ func (l *listener) Multiaddr() ma.Multiaddr {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// LocalPeer is the identity of the local Peer.
|
// LocalPeer is the identity of the local Peer.
|
||||||
func (l *listener) LocalPeer() *peer.Peer {
|
func (l *listener) LocalPeer() peer.Peer {
|
||||||
return l.local
|
return l.local
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -117,7 +117,7 @@ func (l *listener) Peerstore() peer.Peerstore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Listen listens on the particular multiaddr, with given peer and peerstore.
|
// Listen listens on the particular multiaddr, with given peer and peerstore.
|
||||||
func Listen(ctx context.Context, addr ma.Multiaddr, local *peer.Peer, peers peer.Peerstore) (Listener, error) {
|
func Listen(ctx context.Context, addr ma.Multiaddr, local peer.Peer, peers peer.Peerstore) (Listener, error) {
|
||||||
|
|
||||||
ml, err := manet.Listen(addr)
|
ml, err := manet.Listen(addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -27,8 +27,8 @@ type MultiConn struct {
|
|||||||
// this string is: /addr1/peer1/addr2/peer2 (peers ordered lexicographically)
|
// this string is: /addr1/peer1/addr2/peer2 (peers ordered lexicographically)
|
||||||
conns map[string]Conn
|
conns map[string]Conn
|
||||||
|
|
||||||
local *peer.Peer
|
local peer.Peer
|
||||||
remote *peer.Peer
|
remote peer.Peer
|
||||||
|
|
||||||
// fan-in/fan-out
|
// fan-in/fan-out
|
||||||
duplex Duplex
|
duplex Duplex
|
||||||
@ -39,7 +39,7 @@ type MultiConn struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewMultiConn constructs a new connection
|
// NewMultiConn constructs a new connection
|
||||||
func NewMultiConn(ctx context.Context, local, remote *peer.Peer, conns []Conn) (*MultiConn, error) {
|
func NewMultiConn(ctx context.Context, local, remote peer.Peer, conns []Conn) (*MultiConn, error) {
|
||||||
|
|
||||||
c := &MultiConn{
|
c := &MultiConn{
|
||||||
local: local,
|
local: local,
|
||||||
@ -72,6 +72,10 @@ func (c *MultiConn) Add(conns ...Conn) {
|
|||||||
log.Error("%s", c2)
|
log.Error("%s", c2)
|
||||||
c.Unlock() // ok to unlock (to log). panicing.
|
c.Unlock() // ok to unlock (to log). panicing.
|
||||||
log.Error("%s", c)
|
log.Error("%s", c)
|
||||||
|
log.Error("c.LocalPeer: %s %#v", c.LocalPeer(), c.LocalPeer())
|
||||||
|
log.Error("c2.LocalPeer: %s %#v", c2.LocalPeer(), c2.LocalPeer())
|
||||||
|
log.Error("c.RemotePeer: %s %#v", c.RemotePeer(), c.RemotePeer())
|
||||||
|
log.Error("c2.RemotePeer: %s %#v", c2.RemotePeer(), c2.RemotePeer())
|
||||||
c.Lock() // gotta relock to avoid lock panic from deferring.
|
c.Lock() // gotta relock to avoid lock panic from deferring.
|
||||||
panic("connection addresses mismatch")
|
panic("connection addresses mismatch")
|
||||||
}
|
}
|
||||||
@ -269,12 +273,12 @@ func (c *MultiConn) RemoteMultiaddr() ma.Multiaddr {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// LocalPeer is the Peer on this side
|
// LocalPeer is the Peer on this side
|
||||||
func (c *MultiConn) LocalPeer() *peer.Peer {
|
func (c *MultiConn) LocalPeer() peer.Peer {
|
||||||
return c.local
|
return c.local
|
||||||
}
|
}
|
||||||
|
|
||||||
// RemotePeer is the Peer on the remote side
|
// RemotePeer is the Peer on the remote side
|
||||||
func (c *MultiConn) RemotePeer() *peer.Peer {
|
func (c *MultiConn) RemotePeer() peer.Peer {
|
||||||
return c.remote
|
return c.remote
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -97,7 +97,7 @@ func setupMultiConns(t *testing.T, ctx context.Context) (a, b *MultiConn) {
|
|||||||
p2ps := peer.NewPeerstore()
|
p2ps := peer.NewPeerstore()
|
||||||
|
|
||||||
// listeners
|
// listeners
|
||||||
listen := func(addr ma.Multiaddr, p *peer.Peer, ps peer.Peerstore) Listener {
|
listen := func(addr ma.Multiaddr, p peer.Peer, ps peer.Peerstore) Listener {
|
||||||
l, err := Listen(ctx, addr, p, ps)
|
l, err := Listen(ctx, addr, p, ps)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@ -106,14 +106,14 @@ func setupMultiConns(t *testing.T, ctx context.Context) (a, b *MultiConn) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
log.Info("Setting up listeners")
|
log.Info("Setting up listeners")
|
||||||
p1l := listen(p1.Addresses[0], p1, p1ps)
|
p1l := listen(p1.Addresses()[0], p1, p1ps)
|
||||||
p2l := listen(p2.Addresses[0], p2, p2ps)
|
p2l := listen(p2.Addresses()[0], p2, p2ps)
|
||||||
|
|
||||||
// dialers
|
// dialers
|
||||||
p1d := &Dialer{Peerstore: p1ps, LocalPeer: p1}
|
p1d := &Dialer{Peerstore: p1ps, LocalPeer: p1}
|
||||||
p2d := &Dialer{Peerstore: p2ps, LocalPeer: p2}
|
p2d := &Dialer{Peerstore: p2ps, LocalPeer: p2}
|
||||||
|
|
||||||
dial := func(d *Dialer, dst *peer.Peer) <-chan Conn {
|
dial := func(d *Dialer, dst peer.Peer) <-chan Conn {
|
||||||
cc := make(chan Conn)
|
cc := make(chan Conn)
|
||||||
go func() {
|
go func() {
|
||||||
c, err := d.Dial(ctx, "tcp", dst)
|
c, err := d.Dial(ctx, "tcp", dst)
|
||||||
|
@ -79,6 +79,8 @@ func (c *secureConn) secureHandshake(peers peer.Peerstore) error {
|
|||||||
// perhaps return an error. TBD.
|
// perhaps return an error. TBD.
|
||||||
|
|
||||||
log.Error("secureConn peer mismatch. %v != %v", insecureSC.remote, c.secure.RemotePeer())
|
log.Error("secureConn peer mismatch. %v != %v", insecureSC.remote, c.secure.RemotePeer())
|
||||||
|
log.Error("insecureSC.remote: %s %#v", insecureSC.remote, insecureSC.remote)
|
||||||
|
log.Error("c.secure.LocalPeer: %s %#v", c.secure.RemotePeer(), c.secure.RemotePeer())
|
||||||
panic("secureConn peer mismatch. consructed incorrectly?")
|
panic("secureConn peer mismatch. consructed incorrectly?")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -114,12 +116,12 @@ func (c *secureConn) RemoteMultiaddr() ma.Multiaddr {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// LocalPeer is the Peer on this side
|
// LocalPeer is the Peer on this side
|
||||||
func (c *secureConn) LocalPeer() *peer.Peer {
|
func (c *secureConn) LocalPeer() peer.Peer {
|
||||||
return c.insecure.LocalPeer()
|
return c.insecure.LocalPeer()
|
||||||
}
|
}
|
||||||
|
|
||||||
// RemotePeer is the Peer on the remote side
|
// RemotePeer is the Peer on the remote side
|
||||||
func (c *secureConn) RemotePeer() *peer.Peer {
|
func (c *secureConn) RemotePeer() peer.Peer {
|
||||||
return c.insecure.RemotePeer()
|
return c.insecure.RemotePeer()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -15,19 +15,19 @@ type Network interface {
|
|||||||
// TODO: for now, only listen on addrs in local peer when initializing.
|
// TODO: for now, only listen on addrs in local peer when initializing.
|
||||||
|
|
||||||
// DialPeer attempts to establish a connection to a given peer
|
// DialPeer attempts to establish a connection to a given peer
|
||||||
DialPeer(*peer.Peer) error
|
DialPeer(peer.Peer) error
|
||||||
|
|
||||||
// ClosePeer connection to peer
|
// ClosePeer connection to peer
|
||||||
ClosePeer(*peer.Peer) error
|
ClosePeer(peer.Peer) error
|
||||||
|
|
||||||
// IsConnected returns whether a connection to given peer exists.
|
// IsConnected returns whether a connection to given peer exists.
|
||||||
IsConnected(*peer.Peer) (bool, error)
|
IsConnected(peer.Peer) (bool, error)
|
||||||
|
|
||||||
// GetProtocols returns the protocols registered in the network.
|
// GetProtocols returns the protocols registered in the network.
|
||||||
GetProtocols() *mux.ProtocolMap
|
GetProtocols() *mux.ProtocolMap
|
||||||
|
|
||||||
// GetPeerList returns the list of peers currently connected in this network.
|
// GetPeerList returns the list of peers currently connected in this network.
|
||||||
GetPeerList() []*peer.Peer
|
GetPeerList() []peer.Peer
|
||||||
|
|
||||||
// GetBandwidthTotals returns the total number of bytes passed through
|
// GetBandwidthTotals returns the total number of bytes passed through
|
||||||
// the network since it was instantiated
|
// the network since it was instantiated
|
||||||
|
@ -8,12 +8,12 @@ import (
|
|||||||
|
|
||||||
// NetMessage is the interface for the message
|
// NetMessage is the interface for the message
|
||||||
type NetMessage interface {
|
type NetMessage interface {
|
||||||
Peer() *peer.Peer
|
Peer() peer.Peer
|
||||||
Data() []byte
|
Data() []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
// New is the interface for constructing a new message.
|
// New is the interface for constructing a new message.
|
||||||
func New(p *peer.Peer, data []byte) NetMessage {
|
func New(p peer.Peer, data []byte) NetMessage {
|
||||||
return &message{peer: p, data: data}
|
return &message{peer: p, data: data}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -21,13 +21,13 @@ func New(p *peer.Peer, data []byte) NetMessage {
|
|||||||
// particular Peer.
|
// particular Peer.
|
||||||
type message struct {
|
type message struct {
|
||||||
// To or from, depending on direction.
|
// To or from, depending on direction.
|
||||||
peer *peer.Peer
|
peer peer.Peer
|
||||||
|
|
||||||
// Opaque data
|
// Opaque data
|
||||||
data []byte
|
data []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *message) Peer() *peer.Peer {
|
func (m *message) Peer() peer.Peer {
|
||||||
return m.peer
|
return m.peer
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -36,7 +36,7 @@ func (m *message) Data() []byte {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// FromObject creates a message from a protobuf-marshallable message.
|
// FromObject creates a message from a protobuf-marshallable message.
|
||||||
func FromObject(p *peer.Peer, data proto.Message) (NetMessage, error) {
|
func FromObject(p peer.Peer, data proto.Message) (NetMessage, error) {
|
||||||
bytes, err := proto.Marshal(data)
|
bytes, err := proto.Marshal(data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -21,14 +21,14 @@ func (t *TestProtocol) GetPipe() *msg.Pipe {
|
|||||||
return t.Pipe
|
return t.Pipe
|
||||||
}
|
}
|
||||||
|
|
||||||
func newPeer(t *testing.T, id string) *peer.Peer {
|
func newPeer(t *testing.T, id string) peer.Peer {
|
||||||
mh, err := mh.FromHexString(id)
|
mh, err := mh.FromHexString(id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return &peer.Peer{ID: peer.ID(mh)}
|
return peer.WithID(peer.ID(mh))
|
||||||
}
|
}
|
||||||
|
|
||||||
func testMsg(t *testing.T, m msg.NetMessage, data []byte) {
|
func testMsg(t *testing.T, m msg.NetMessage, data []byte) {
|
||||||
|
16
net/net.go
16
net/net.go
@ -15,7 +15,7 @@ import (
|
|||||||
type IpfsNetwork struct {
|
type IpfsNetwork struct {
|
||||||
|
|
||||||
// local peer
|
// local peer
|
||||||
local *peer.Peer
|
local peer.Peer
|
||||||
|
|
||||||
// protocol multiplexing
|
// protocol multiplexing
|
||||||
muxer *mux.Muxer
|
muxer *mux.Muxer
|
||||||
@ -29,7 +29,7 @@ type IpfsNetwork struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewIpfsNetwork is the structure that implements the network interface
|
// NewIpfsNetwork is the structure that implements the network interface
|
||||||
func NewIpfsNetwork(ctx context.Context, local *peer.Peer,
|
func NewIpfsNetwork(ctx context.Context, local peer.Peer,
|
||||||
peers peer.Peerstore, pmap *mux.ProtocolMap) (*IpfsNetwork, error) {
|
peers peer.Peerstore, pmap *mux.ProtocolMap) (*IpfsNetwork, error) {
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
@ -63,19 +63,19 @@ func NewIpfsNetwork(ctx context.Context, local *peer.Peer,
|
|||||||
// func (n *IpfsNetwork) Listen(*ma.Muliaddr) error {}
|
// func (n *IpfsNetwork) Listen(*ma.Muliaddr) error {}
|
||||||
|
|
||||||
// DialPeer attempts to establish a connection to a given peer
|
// DialPeer attempts to establish a connection to a given peer
|
||||||
func (n *IpfsNetwork) DialPeer(p *peer.Peer) error {
|
func (n *IpfsNetwork) DialPeer(p peer.Peer) error {
|
||||||
_, err := n.swarm.Dial(p)
|
_, err := n.swarm.Dial(p)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// ClosePeer connection to peer
|
// ClosePeer connection to peer
|
||||||
func (n *IpfsNetwork) ClosePeer(p *peer.Peer) error {
|
func (n *IpfsNetwork) ClosePeer(p peer.Peer) error {
|
||||||
return n.swarm.CloseConnection(p)
|
return n.swarm.CloseConnection(p)
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsConnected returns whether a connection to given peer exists.
|
// IsConnected returns whether a connection to given peer exists.
|
||||||
func (n *IpfsNetwork) IsConnected(p *peer.Peer) (bool, error) {
|
func (n *IpfsNetwork) IsConnected(p peer.Peer) (bool, error) {
|
||||||
return n.swarm.GetConnection(p.ID) != nil, nil
|
return n.swarm.GetConnection(p.ID()) != nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetProtocols returns the protocols registered in the network.
|
// GetProtocols returns the protocols registered in the network.
|
||||||
@ -108,10 +108,12 @@ func (n *IpfsNetwork) Close() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *IpfsNetwork) GetPeerList() []*peer.Peer {
|
// GetPeerList returns the networks list of connected peers
|
||||||
|
func (n *IpfsNetwork) GetPeerList() []peer.Peer {
|
||||||
return n.swarm.GetPeerList()
|
return n.swarm.GetPeerList()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetBandwidthTotals returns the total amount of bandwidth transferred
|
||||||
func (n *IpfsNetwork) GetBandwidthTotals() (in uint64, out uint64) {
|
func (n *IpfsNetwork) GetBandwidthTotals() (in uint64, out uint64) {
|
||||||
return n.muxer.GetBandwidthTotals()
|
return n.muxer.GetBandwidthTotals()
|
||||||
}
|
}
|
||||||
|
@ -133,7 +133,7 @@ func (s *service) SendMessage(ctx context.Context, m msg.NetMessage) error {
|
|||||||
func (s *service) SendRequest(ctx context.Context, m msg.NetMessage) (msg.NetMessage, error) {
|
func (s *service) SendRequest(ctx context.Context, m msg.NetMessage) (msg.NetMessage, error) {
|
||||||
|
|
||||||
// create a request
|
// create a request
|
||||||
r, err := NewRequest(m.Peer().ID)
|
r, err := NewRequest(m.Peer().ID())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -227,7 +227,7 @@ func (s *service) handleIncomingMessage(ctx context.Context, m msg.NetMessage) {
|
|||||||
log.Error("RequestID should identify a response here.")
|
log.Error("RequestID should identify a response here.")
|
||||||
}
|
}
|
||||||
|
|
||||||
key := RequestKey(m.Peer().ID, RequestID(rid))
|
key := RequestKey(m.Peer().ID(), RequestID(rid))
|
||||||
s.RequestsLock.RLock()
|
s.RequestsLock.RLock()
|
||||||
r, found := s.Requests[key]
|
r, found := s.Requests[key]
|
||||||
s.RequestsLock.RUnlock()
|
s.RequestsLock.RUnlock()
|
||||||
|
@ -25,14 +25,14 @@ func (t *ReverseHandler) HandleMessage(ctx context.Context, m msg.NetMessage) ms
|
|||||||
return msg.New(m.Peer(), d)
|
return msg.New(m.Peer(), d)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newPeer(t *testing.T, id string) *peer.Peer {
|
func newPeer(t *testing.T, id string) peer.Peer {
|
||||||
mh, err := mh.FromHexString(id)
|
mh, err := mh.FromHexString(id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return &peer.Peer{ID: peer.ID(mh)}
|
return peer.WithID(peer.ID(mh))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestServiceHandler(t *testing.T) {
|
func TestServiceHandler(t *testing.T) {
|
||||||
|
@ -14,11 +14,11 @@ import (
|
|||||||
func (s *Swarm) listen() error {
|
func (s *Swarm) listen() error {
|
||||||
hasErr := false
|
hasErr := false
|
||||||
retErr := &ListenErr{
|
retErr := &ListenErr{
|
||||||
Errors: make([]error, len(s.local.Addresses)),
|
Errors: make([]error, len(s.local.Addresses())),
|
||||||
}
|
}
|
||||||
|
|
||||||
// listen on every address
|
// listen on every address
|
||||||
for i, addr := range s.local.Addresses {
|
for i, addr := range s.local.Addresses() {
|
||||||
err := s.connListen(addr)
|
err := s.connListen(addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
hasErr = true
|
hasErr = true
|
||||||
|
@ -24,10 +24,10 @@ func TestSimultOpen(t *testing.T) {
|
|||||||
// connect everyone
|
// connect everyone
|
||||||
{
|
{
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
connect := func(s *Swarm, dst *peer.Peer) {
|
connect := func(s *Swarm, dst peer.Peer) {
|
||||||
// copy for other peer
|
// copy for other peer
|
||||||
cp := &peer.Peer{ID: dst.ID}
|
cp := peer.WithID(dst.ID())
|
||||||
cp.AddAddress(dst.Addresses[0])
|
cp.AddAddress(dst.Addresses()[0])
|
||||||
|
|
||||||
if _, err := s.Dial(cp); err != nil {
|
if _, err := s.Dial(cp); err != nil {
|
||||||
t.Fatal("error swarm dialing to peer", err)
|
t.Fatal("error swarm dialing to peer", err)
|
||||||
|
@ -45,7 +45,7 @@ func (e *ListenErr) Error() string {
|
|||||||
type Swarm struct {
|
type Swarm struct {
|
||||||
|
|
||||||
// local is the peer this swarm represents
|
// local is the peer this swarm represents
|
||||||
local *peer.Peer
|
local peer.Peer
|
||||||
|
|
||||||
// peers is a collection of peers for swarm to use
|
// peers is a collection of peers for swarm to use
|
||||||
peers peer.Peerstore
|
peers peer.Peerstore
|
||||||
@ -69,7 +69,7 @@ type Swarm struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewSwarm constructs a Swarm, with a Chan.
|
// NewSwarm constructs a Swarm, with a Chan.
|
||||||
func NewSwarm(ctx context.Context, local *peer.Peer, ps peer.Peerstore) (*Swarm, error) {
|
func NewSwarm(ctx context.Context, local peer.Peer, ps peer.Peerstore) (*Swarm, error) {
|
||||||
s := &Swarm{
|
s := &Swarm{
|
||||||
Pipe: msg.NewPipe(10),
|
Pipe: msg.NewPipe(10),
|
||||||
conns: conn.MultiConnMap{},
|
conns: conn.MultiConnMap{},
|
||||||
@ -104,13 +104,13 @@ func (s *Swarm) close() error {
|
|||||||
// etc. to achive connection.
|
// etc. to achive connection.
|
||||||
//
|
//
|
||||||
// For now, Dial uses only TCP. This will be extended.
|
// For now, Dial uses only TCP. This will be extended.
|
||||||
func (s *Swarm) Dial(peer *peer.Peer) (conn.Conn, error) {
|
func (s *Swarm) Dial(peer peer.Peer) (conn.Conn, error) {
|
||||||
if peer.ID.Equal(s.local.ID) {
|
if peer.ID().Equal(s.local.ID()) {
|
||||||
return nil, errors.New("Attempted connection to self!")
|
return nil, errors.New("Attempted connection to self!")
|
||||||
}
|
}
|
||||||
|
|
||||||
// check if we already have an open connection first
|
// check if we already have an open connection first
|
||||||
c := s.GetConnection(peer.ID)
|
c := s.GetConnection(peer.ID())
|
||||||
if c != nil {
|
if c != nil {
|
||||||
return c, nil
|
return c, nil
|
||||||
}
|
}
|
||||||
@ -167,14 +167,14 @@ func (s *Swarm) Connections() []conn.Conn {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CloseConnection removes a given peer from swarm + closes the connection
|
// CloseConnection removes a given peer from swarm + closes the connection
|
||||||
func (s *Swarm) CloseConnection(p *peer.Peer) error {
|
func (s *Swarm) CloseConnection(p peer.Peer) error {
|
||||||
c := s.GetConnection(p.ID)
|
c := s.GetConnection(p.ID())
|
||||||
if c == nil {
|
if c == nil {
|
||||||
return u.ErrNotFound
|
return u.ErrNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
s.connsLock.Lock()
|
s.connsLock.Lock()
|
||||||
delete(s.conns, u.Key(p.ID))
|
delete(s.conns, u.Key(p.ID()))
|
||||||
s.connsLock.Unlock()
|
s.connsLock.Unlock()
|
||||||
|
|
||||||
return c.Close()
|
return c.Close()
|
||||||
@ -190,8 +190,8 @@ func (s *Swarm) GetErrChan() chan error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetPeerList returns a copy of the set of peers swarm is connected to.
|
// GetPeerList returns a copy of the set of peers swarm is connected to.
|
||||||
func (s *Swarm) GetPeerList() []*peer.Peer {
|
func (s *Swarm) GetPeerList() []peer.Peer {
|
||||||
var out []*peer.Peer
|
var out []peer.Peer
|
||||||
s.connsLock.RLock()
|
s.connsLock.RLock()
|
||||||
for _, p := range s.conns {
|
for _, p := range s.conns {
|
||||||
out = append(out, p.RemotePeer())
|
out = append(out, p.RemotePeer())
|
||||||
|
@ -32,7 +32,7 @@ func pong(ctx context.Context, swarm *Swarm) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func setupPeer(t *testing.T, addr string) *peer.Peer {
|
func setupPeer(t *testing.T, addr string) peer.Peer {
|
||||||
tcp, err := ma.NewMultiaddr(addr)
|
tcp, err := ma.NewMultiaddr(addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@ -43,19 +43,15 @@ func setupPeer(t *testing.T, addr string) *peer.Peer {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
id, err := peer.IDFromPubKey(pk)
|
p, err := peer.WithKeyPair(sk, pk)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
p := &peer.Peer{ID: id}
|
|
||||||
p.PrivKey = sk
|
|
||||||
p.PubKey = pk
|
|
||||||
p.AddAddress(tcp)
|
p.AddAddress(tcp)
|
||||||
return p
|
return p
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeSwarms(ctx context.Context, t *testing.T, addrs []string) ([]*Swarm, []*peer.Peer) {
|
func makeSwarms(ctx context.Context, t *testing.T, addrs []string) ([]*Swarm, []peer.Peer) {
|
||||||
swarms := []*Swarm{}
|
swarms := []*Swarm{}
|
||||||
|
|
||||||
for _, addr := range addrs {
|
for _, addr := range addrs {
|
||||||
@ -68,7 +64,7 @@ func makeSwarms(ctx context.Context, t *testing.T, addrs []string) ([]*Swarm, []
|
|||||||
swarms = append(swarms, swarm)
|
swarms = append(swarms, swarm)
|
||||||
}
|
}
|
||||||
|
|
||||||
peers := make([]*peer.Peer, len(swarms))
|
peers := make([]peer.Peer, len(swarms))
|
||||||
for i, s := range swarms {
|
for i, s := range swarms {
|
||||||
peers[i] = s.local
|
peers[i] = s.local
|
||||||
}
|
}
|
||||||
@ -85,14 +81,14 @@ func SubtestSwarm(t *testing.T, addrs []string, MsgNum int) {
|
|||||||
// connect everyone
|
// connect everyone
|
||||||
{
|
{
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
connect := func(s *Swarm, dst *peer.Peer) {
|
connect := func(s *Swarm, dst peer.Peer) {
|
||||||
// copy for other peer
|
// copy for other peer
|
||||||
|
|
||||||
cp, err := s.peers.Get(dst.ID)
|
cp, err := s.peers.Get(dst.ID())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cp = &peer.Peer{ID: dst.ID}
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
cp.AddAddress(dst.Addresses[0])
|
cp.AddAddress(dst.Addresses()[0])
|
||||||
|
|
||||||
log.Info("SWARM TEST: %s dialing %s", s.local, dst)
|
log.Info("SWARM TEST: %s dialing %s", s.local, dst)
|
||||||
if _, err := s.Dial(cp); err != nil {
|
if _, err := s.Dial(cp); err != nil {
|
||||||
|
148
peer/peer.go
148
peer/peer.go
@ -49,17 +49,48 @@ func IDFromPubKey(pk ic.PubKey) (ID, error) {
|
|||||||
return ID(hash), nil
|
return ID(hash), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Map maps Key (string) : *Peer (slices are not comparable).
|
// Map maps Key (string) : *peer (slices are not comparable).
|
||||||
type Map map[u.Key]*Peer
|
type Map map[u.Key]Peer
|
||||||
|
|
||||||
// Peer represents the identity information of an IPFS Node, including
|
// Peer represents the identity information of an IPFS Node, including
|
||||||
// ID, and relevant Addresses.
|
// ID, and relevant Addresses.
|
||||||
type Peer struct {
|
type Peer interface {
|
||||||
ID ID
|
// ID returns the peer's ID
|
||||||
Addresses []ma.Multiaddr
|
ID() ID
|
||||||
|
|
||||||
PrivKey ic.PrivKey
|
// Key returns the ID as a Key (string) for maps.
|
||||||
PubKey ic.PubKey
|
Key() u.Key
|
||||||
|
|
||||||
|
// Addresses returns the peer's multiaddrs
|
||||||
|
Addresses() []ma.Multiaddr
|
||||||
|
|
||||||
|
// AddAddress adds the given Multiaddr address to Peer's addresses.
|
||||||
|
AddAddress(a ma.Multiaddr)
|
||||||
|
|
||||||
|
// NetAddress returns the first Multiaddr found for a given network.
|
||||||
|
NetAddress(n string) ma.Multiaddr
|
||||||
|
|
||||||
|
// Priv/PubKey returns the peer's Private Key
|
||||||
|
PrivKey() ic.PrivKey
|
||||||
|
PubKey() ic.PubKey
|
||||||
|
|
||||||
|
// LoadAndVerifyKeyPair unmarshalls, loads a private/public key pair.
|
||||||
|
// Error if (a) unmarshalling fails, or (b) pubkey does not match id.
|
||||||
|
LoadAndVerifyKeyPair(marshalled []byte) error
|
||||||
|
VerifyAndSetPrivKey(sk ic.PrivKey) error
|
||||||
|
VerifyAndSetPubKey(pk ic.PubKey) error
|
||||||
|
|
||||||
|
// Get/SetLatency manipulate the current latency measurement.
|
||||||
|
GetLatency() (out time.Duration)
|
||||||
|
SetLatency(laten time.Duration)
|
||||||
|
}
|
||||||
|
|
||||||
|
type peer struct {
|
||||||
|
id ID
|
||||||
|
addresses []ma.Multiaddr
|
||||||
|
|
||||||
|
privKey ic.PrivKey
|
||||||
|
pubKey ic.PubKey
|
||||||
|
|
||||||
latency time.Duration
|
latency time.Duration
|
||||||
|
|
||||||
@ -67,34 +98,56 @@ type Peer struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// String prints out the peer.
|
// String prints out the peer.
|
||||||
func (p *Peer) String() string {
|
func (p *peer) String() string {
|
||||||
return "[Peer " + p.ID.String()[:12] + "]"
|
return "[Peer " + p.id.String()[:12] + "]"
|
||||||
}
|
}
|
||||||
|
|
||||||
// Key returns the ID as a Key (string) for maps.
|
// Key returns the ID as a Key (string) for maps.
|
||||||
func (p *Peer) Key() u.Key {
|
func (p *peer) Key() u.Key {
|
||||||
return u.Key(p.ID)
|
return u.Key(p.id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ID returns the peer's ID
|
||||||
|
func (p *peer) ID() ID {
|
||||||
|
return p.id
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrivKey returns the peer's Private Key
|
||||||
|
func (p *peer) PrivKey() ic.PrivKey {
|
||||||
|
return p.privKey
|
||||||
|
}
|
||||||
|
|
||||||
|
// PubKey returns the peer's Private Key
|
||||||
|
func (p *peer) PubKey() ic.PubKey {
|
||||||
|
return p.pubKey
|
||||||
|
}
|
||||||
|
|
||||||
|
// Addresses returns the peer's multiaddrs
|
||||||
|
func (p *peer) Addresses() []ma.Multiaddr {
|
||||||
|
cp := make([]ma.Multiaddr, len(p.addresses))
|
||||||
|
copy(cp, p.addresses)
|
||||||
|
return cp
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddAddress adds the given Multiaddr address to Peer's addresses.
|
// AddAddress adds the given Multiaddr address to Peer's addresses.
|
||||||
func (p *Peer) AddAddress(a ma.Multiaddr) {
|
func (p *peer) AddAddress(a ma.Multiaddr) {
|
||||||
p.Lock()
|
p.Lock()
|
||||||
defer p.Unlock()
|
defer p.Unlock()
|
||||||
|
|
||||||
for _, addr := range p.Addresses {
|
for _, addr := range p.addresses {
|
||||||
if addr.Equal(a) {
|
if addr.Equal(a) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
p.Addresses = append(p.Addresses, a)
|
p.addresses = append(p.addresses, a)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NetAddress returns the first Multiaddr found for a given network.
|
// NetAddress returns the first Multiaddr found for a given network.
|
||||||
func (p *Peer) NetAddress(n string) ma.Multiaddr {
|
func (p *peer) NetAddress(n string) ma.Multiaddr {
|
||||||
p.RLock()
|
p.RLock()
|
||||||
defer p.RUnlock()
|
defer p.RUnlock()
|
||||||
|
|
||||||
for _, a := range p.Addresses {
|
for _, a := range p.addresses {
|
||||||
for _, p := range a.Protocols() {
|
for _, p := range a.Protocols() {
|
||||||
if p.Name == n {
|
if p.Name == n {
|
||||||
return a
|
return a
|
||||||
@ -105,7 +158,7 @@ func (p *Peer) NetAddress(n string) ma.Multiaddr {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetLatency retrieves the current latency measurement.
|
// GetLatency retrieves the current latency measurement.
|
||||||
func (p *Peer) GetLatency() (out time.Duration) {
|
func (p *peer) GetLatency() (out time.Duration) {
|
||||||
p.RLock()
|
p.RLock()
|
||||||
out = p.latency
|
out = p.latency
|
||||||
p.RUnlock()
|
p.RUnlock()
|
||||||
@ -116,7 +169,7 @@ func (p *Peer) GetLatency() (out time.Duration) {
|
|||||||
// TODO: Instead of just keeping a single number,
|
// TODO: Instead of just keeping a single number,
|
||||||
// keep a running average over the last hour or so
|
// keep a running average over the last hour or so
|
||||||
// Yep, should be EWMA or something. (-jbenet)
|
// Yep, should be EWMA or something. (-jbenet)
|
||||||
func (p *Peer) SetLatency(laten time.Duration) {
|
func (p *peer) SetLatency(laten time.Duration) {
|
||||||
p.Lock()
|
p.Lock()
|
||||||
if p.latency == 0 {
|
if p.latency == 0 {
|
||||||
p.latency = laten
|
p.latency = laten
|
||||||
@ -128,61 +181,98 @@ func (p *Peer) SetLatency(laten time.Duration) {
|
|||||||
|
|
||||||
// LoadAndVerifyKeyPair unmarshalls, loads a private/public key pair.
|
// LoadAndVerifyKeyPair unmarshalls, loads a private/public key pair.
|
||||||
// Error if (a) unmarshalling fails, or (b) pubkey does not match id.
|
// Error if (a) unmarshalling fails, or (b) pubkey does not match id.
|
||||||
func (p *Peer) LoadAndVerifyKeyPair(marshalled []byte) error {
|
func (p *peer) LoadAndVerifyKeyPair(marshalled []byte) error {
|
||||||
|
|
||||||
sk, err := ic.UnmarshalPrivateKey(marshalled)
|
sk, err := ic.UnmarshalPrivateKey(marshalled)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Failed to unmarshal private key: %v", err)
|
return fmt.Errorf("Failed to unmarshal private key: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return p.VerifyAndSetPrivKey(sk)
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifyAndSetPrivKey sets private key, given its pubkey matches the peer.ID
|
||||||
|
func (p *peer) VerifyAndSetPrivKey(sk ic.PrivKey) error {
|
||||||
|
|
||||||
// construct and assign pubkey. ensure it matches this peer
|
// construct and assign pubkey. ensure it matches this peer
|
||||||
if err := p.VerifyAndSetPubKey(sk.GetPublic()); err != nil {
|
if err := p.VerifyAndSetPubKey(sk.GetPublic()); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// if we didn't have the priavte key, assign it
|
// if we didn't have the priavte key, assign it
|
||||||
if p.PrivKey == nil {
|
if p.privKey == nil {
|
||||||
p.PrivKey = sk
|
p.privKey = sk
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// if we already had the keys, check they're equal.
|
// if we already had the keys, check they're equal.
|
||||||
if p.PrivKey.Equals(sk) {
|
if p.privKey.Equals(sk) {
|
||||||
return nil // as expected. keep the old objects.
|
return nil // as expected. keep the old objects.
|
||||||
}
|
}
|
||||||
|
|
||||||
// keys not equal. invariant violated. this warrants a panic.
|
// keys not equal. invariant violated. this warrants a panic.
|
||||||
// these keys should be _the same_ because peer.ID = H(pk)
|
// these keys should be _the same_ because peer.ID = H(pk)
|
||||||
// this mismatch should never happen.
|
// this mismatch should never happen.
|
||||||
log.Error("%s had PrivKey: %v -- got %v", p, p.PrivKey, sk)
|
log.Error("%s had PrivKey: %v -- got %v", p, p.privKey, sk)
|
||||||
panic("invariant violated: unexpected key mismatch")
|
panic("invariant violated: unexpected key mismatch")
|
||||||
}
|
}
|
||||||
|
|
||||||
// VerifyAndSetPubKey sets public key, given it matches the peer.ID
|
// VerifyAndSetPubKey sets public key, given it matches the peer.ID
|
||||||
func (p *Peer) VerifyAndSetPubKey(pk ic.PubKey) error {
|
func (p *peer) VerifyAndSetPubKey(pk ic.PubKey) error {
|
||||||
pkid, err := IDFromPubKey(pk)
|
pkid, err := IDFromPubKey(pk)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Failed to hash public key: %v", err)
|
return fmt.Errorf("Failed to hash public key: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !p.ID.Equal(pkid) {
|
if !p.id.Equal(pkid) {
|
||||||
return fmt.Errorf("Public key does not match peer.ID.")
|
return fmt.Errorf("Public key does not match peer.ID.")
|
||||||
}
|
}
|
||||||
|
|
||||||
// if we didn't have the keys, assign them.
|
// if we didn't have the keys, assign them.
|
||||||
if p.PubKey == nil {
|
if p.pubKey == nil {
|
||||||
p.PubKey = pk
|
p.pubKey = pk
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// if we already had the pubkey, check they're equal.
|
// if we already had the pubkey, check they're equal.
|
||||||
if p.PubKey.Equals(pk) {
|
if p.pubKey.Equals(pk) {
|
||||||
return nil // as expected. keep the old objects.
|
return nil // as expected. keep the old objects.
|
||||||
}
|
}
|
||||||
|
|
||||||
// keys not equal. invariant violated. this warrants a panic.
|
// keys not equal. invariant violated. this warrants a panic.
|
||||||
// these keys should be _the same_ because peer.ID = H(pk)
|
// these keys should be _the same_ because peer.ID = H(pk)
|
||||||
// this mismatch should never happen.
|
// this mismatch should never happen.
|
||||||
log.Error("%s had PubKey: %v -- got %v", p, p.PubKey, pk)
|
log.Error("%s had PubKey: %v -- got %v", p, p.pubKey, pk)
|
||||||
panic("invariant violated: unexpected key mismatch")
|
panic("invariant violated: unexpected key mismatch")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithKeyPair returns a Peer object with given keys.
|
||||||
|
func WithKeyPair(sk ic.PrivKey, pk ic.PubKey) (Peer, error) {
|
||||||
|
if sk == nil && pk == nil {
|
||||||
|
return nil, fmt.Errorf("PeerWithKeyPair nil keys")
|
||||||
|
}
|
||||||
|
|
||||||
|
pk2 := sk.GetPublic()
|
||||||
|
if pk == nil {
|
||||||
|
pk = pk2
|
||||||
|
} else if !pk.Equals(pk2) {
|
||||||
|
return nil, fmt.Errorf("key mismatch. pubkey is not privkey's pubkey")
|
||||||
|
}
|
||||||
|
|
||||||
|
pkid, err := IDFromPubKey(pk)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Failed to hash public key: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &peer{id: pkid, pubKey: pk, privKey: sk}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithID constructs a peer with given ID.
|
||||||
|
func WithID(id ID) Peer {
|
||||||
|
return &peer{id: id}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithIDString constructs a peer with given ID (string).
|
||||||
|
func WithIDString(id string) Peer {
|
||||||
|
return WithID(ID(id))
|
||||||
|
}
|
||||||
|
@ -27,12 +27,12 @@ func TestNetAddress(t *testing.T) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
p := Peer{ID: ID(mh)}
|
p := WithID(ID(mh))
|
||||||
p.AddAddress(tcp)
|
p.AddAddress(tcp)
|
||||||
p.AddAddress(udp)
|
p.AddAddress(udp)
|
||||||
p.AddAddress(tcp)
|
p.AddAddress(tcp)
|
||||||
|
|
||||||
if len(p.Addresses) == 3 {
|
if len(p.Addresses()) == 3 {
|
||||||
t.Error("added same address twice")
|
t.Error("added same address twice")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -11,8 +11,8 @@ import (
|
|||||||
|
|
||||||
// Peerstore provides a threadsafe collection for peers.
|
// Peerstore provides a threadsafe collection for peers.
|
||||||
type Peerstore interface {
|
type Peerstore interface {
|
||||||
Get(ID) (*Peer, error)
|
Get(ID) (Peer, error)
|
||||||
Put(*Peer) error
|
Put(Peer) error
|
||||||
Delete(ID) error
|
Delete(ID) error
|
||||||
All() (*Map, error)
|
All() (*Map, error)
|
||||||
}
|
}
|
||||||
@ -29,9 +29,13 @@ func NewPeerstore() Peerstore {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *peerstore) Get(i ID) (*Peer, error) {
|
func (p *peerstore) Get(i ID) (Peer, error) {
|
||||||
p.RLock()
|
p.Lock()
|
||||||
defer p.RUnlock()
|
defer p.Unlock()
|
||||||
|
|
||||||
|
if i == nil {
|
||||||
|
panic("wat")
|
||||||
|
}
|
||||||
|
|
||||||
k := u.Key(i).DsKey()
|
k := u.Key(i).DsKey()
|
||||||
val, err := p.peers.Get(k)
|
val, err := p.peers.Get(k)
|
||||||
@ -43,7 +47,7 @@ func (p *peerstore) Get(i ID) (*Peer, error) {
|
|||||||
|
|
||||||
// not found, construct it ourselves, add it to datastore, and return.
|
// not found, construct it ourselves, add it to datastore, and return.
|
||||||
case ds.ErrNotFound:
|
case ds.ErrNotFound:
|
||||||
peer := &Peer{ID: i}
|
peer := &peer{id: i}
|
||||||
if err := p.peers.Put(k, peer); err != nil {
|
if err := p.peers.Put(k, peer); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -51,7 +55,7 @@ func (p *peerstore) Get(i ID) (*Peer, error) {
|
|||||||
|
|
||||||
// no error, got it back fine
|
// no error, got it back fine
|
||||||
case nil:
|
case nil:
|
||||||
peer, ok := val.(*Peer)
|
peer, ok := val.(*peer)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, errors.New("stored value was not a Peer")
|
return nil, errors.New("stored value was not a Peer")
|
||||||
}
|
}
|
||||||
@ -59,11 +63,11 @@ func (p *peerstore) Get(i ID) (*Peer, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *peerstore) Put(peer *Peer) error {
|
func (p *peerstore) Put(peer Peer) error {
|
||||||
p.Lock()
|
p.Lock()
|
||||||
defer p.Unlock()
|
defer p.Unlock()
|
||||||
|
|
||||||
k := u.Key(peer.ID).DsKey()
|
k := peer.Key().DsKey()
|
||||||
return p.peers.Put(k, peer)
|
return p.peers.Put(k, peer)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -91,9 +95,9 @@ func (p *peerstore) All() (*Map, error) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
pval, ok := val.(*Peer)
|
pval, ok := val.(*peer)
|
||||||
if ok {
|
if ok {
|
||||||
(*ps)[u.Key(pval.ID)] = pval
|
(*ps)[pval.Key()] = pval
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return ps, nil
|
return ps, nil
|
||||||
|
@ -7,13 +7,13 @@ import (
|
|||||||
ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr"
|
ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr"
|
||||||
)
|
)
|
||||||
|
|
||||||
func setupPeer(id string, addr string) (*Peer, error) {
|
func setupPeer(id string, addr string) (Peer, error) {
|
||||||
tcp, err := ma.NewMultiaddr(addr)
|
tcp, err := ma.NewMultiaddr(addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
p := &Peer{ID: ID(id)}
|
p := WithIDString(id)
|
||||||
p.AddAddress(tcp)
|
p.AddAddress(tcp)
|
||||||
return p, nil
|
return p, nil
|
||||||
}
|
}
|
||||||
|
@ -13,7 +13,7 @@ import (
|
|||||||
// peerMetric tracks a peer and its distance to something else.
|
// peerMetric tracks a peer and its distance to something else.
|
||||||
type peerMetric struct {
|
type peerMetric struct {
|
||||||
// the peer
|
// the peer
|
||||||
peer *peer.Peer
|
peer peer.Peer
|
||||||
|
|
||||||
// big.Int for XOR metric
|
// big.Int for XOR metric
|
||||||
metric *big.Int
|
metric *big.Int
|
||||||
@ -64,11 +64,11 @@ func (pq *distancePQ) Len() int {
|
|||||||
return len(pq.heap)
|
return len(pq.heap)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pq *distancePQ) Enqueue(p *peer.Peer) {
|
func (pq *distancePQ) Enqueue(p peer.Peer) {
|
||||||
pq.Lock()
|
pq.Lock()
|
||||||
defer pq.Unlock()
|
defer pq.Unlock()
|
||||||
|
|
||||||
distance := ks.XORKeySpace.Key(p.ID).Distance(pq.from)
|
distance := ks.XORKeySpace.Key(p.ID()).Distance(pq.from)
|
||||||
|
|
||||||
heap.Push(&pq.heap, &peerMetric{
|
heap.Push(&pq.heap, &peerMetric{
|
||||||
peer: p,
|
peer: p,
|
||||||
@ -76,7 +76,7 @@ func (pq *distancePQ) Enqueue(p *peer.Peer) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pq *distancePQ) Dequeue() *peer.Peer {
|
func (pq *distancePQ) Dequeue() peer.Peer {
|
||||||
pq.Lock()
|
pq.Lock()
|
||||||
defer pq.Unlock()
|
defer pq.Unlock()
|
||||||
|
|
||||||
|
@ -11,8 +11,8 @@ type PeerQueue interface {
|
|||||||
Len() int
|
Len() int
|
||||||
|
|
||||||
// Enqueue adds this node to the queue.
|
// Enqueue adds this node to the queue.
|
||||||
Enqueue(*peer.Peer)
|
Enqueue(peer.Peer)
|
||||||
|
|
||||||
// Dequeue retrieves the highest (smallest int) priority node
|
// Dequeue retrieves the highest (smallest int) priority node
|
||||||
Dequeue() *peer.Peer
|
Dequeue() peer.Peer
|
||||||
}
|
}
|
||||||
|
@ -12,8 +12,8 @@ import (
|
|||||||
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
|
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
|
||||||
)
|
)
|
||||||
|
|
||||||
func newPeer(id string) *peer.Peer {
|
func newPeer(id string) peer.Peer {
|
||||||
return &peer.Peer{ID: peer.ID(id)}
|
return peer.WithIDString(id)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestQueue(t *testing.T) {
|
func TestQueue(t *testing.T) {
|
||||||
@ -66,10 +66,10 @@ func TestQueue(t *testing.T) {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func newPeerTime(t time.Time) *peer.Peer {
|
func newPeerTime(t time.Time) peer.Peer {
|
||||||
s := fmt.Sprintf("hmmm time: %v", t)
|
s := fmt.Sprintf("hmmm time: %v", t)
|
||||||
h := u.Hash([]byte(s))
|
h := u.Hash([]byte(s))
|
||||||
return &peer.Peer{ID: peer.ID(h)}
|
return peer.WithID(peer.ID(h))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSyncQueue(t *testing.T) {
|
func TestSyncQueue(t *testing.T) {
|
||||||
|
@ -9,8 +9,8 @@ import (
|
|||||||
// ChanQueue makes any PeerQueue synchronizable through channels.
|
// ChanQueue makes any PeerQueue synchronizable through channels.
|
||||||
type ChanQueue struct {
|
type ChanQueue struct {
|
||||||
Queue PeerQueue
|
Queue PeerQueue
|
||||||
EnqChan chan<- *peer.Peer
|
EnqChan chan<- peer.Peer
|
||||||
DeqChan <-chan *peer.Peer
|
DeqChan <-chan peer.Peer
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewChanQueue creates a ChanQueue by wrapping pq.
|
// NewChanQueue creates a ChanQueue by wrapping pq.
|
||||||
@ -23,8 +23,8 @@ func NewChanQueue(ctx context.Context, pq PeerQueue) *ChanQueue {
|
|||||||
func (cq *ChanQueue) process(ctx context.Context) {
|
func (cq *ChanQueue) process(ctx context.Context) {
|
||||||
|
|
||||||
// construct the channels here to be able to use them bidirectionally
|
// construct the channels here to be able to use them bidirectionally
|
||||||
enqChan := make(chan *peer.Peer, 10)
|
enqChan := make(chan peer.Peer, 10)
|
||||||
deqChan := make(chan *peer.Peer, 10)
|
deqChan := make(chan peer.Peer, 10)
|
||||||
|
|
||||||
cq.EnqChan = enqChan
|
cq.EnqChan = enqChan
|
||||||
cq.DeqChan = deqChan
|
cq.DeqChan = deqChan
|
||||||
@ -32,8 +32,8 @@ func (cq *ChanQueue) process(ctx context.Context) {
|
|||||||
go func() {
|
go func() {
|
||||||
defer close(deqChan)
|
defer close(deqChan)
|
||||||
|
|
||||||
var next *peer.Peer
|
var next peer.Peer
|
||||||
var item *peer.Peer
|
var item peer.Peer
|
||||||
var more bool
|
var more bool
|
||||||
|
|
||||||
for {
|
for {
|
||||||
|
@ -17,20 +17,21 @@ func newMessage(typ Message_MessageType, key string, level int) *Message {
|
|||||||
return m
|
return m
|
||||||
}
|
}
|
||||||
|
|
||||||
func peerToPBPeer(p *peer.Peer) *Message_Peer {
|
func peerToPBPeer(p peer.Peer) *Message_Peer {
|
||||||
pbp := new(Message_Peer)
|
pbp := new(Message_Peer)
|
||||||
if len(p.Addresses) == 0 || p.Addresses[0] == nil {
|
addrs := p.Addresses()
|
||||||
|
if len(addrs) == 0 || addrs[0] == nil {
|
||||||
pbp.Addr = proto.String("")
|
pbp.Addr = proto.String("")
|
||||||
} else {
|
} else {
|
||||||
addr := p.Addresses[0].String()
|
addr := addrs[0].String()
|
||||||
pbp.Addr = &addr
|
pbp.Addr = &addr
|
||||||
}
|
}
|
||||||
pid := string(p.ID)
|
pid := string(p.ID())
|
||||||
pbp.Id = &pid
|
pbp.Id = &pid
|
||||||
return pbp
|
return pbp
|
||||||
}
|
}
|
||||||
|
|
||||||
func peersToPBPeers(peers []*peer.Peer) []*Message_Peer {
|
func peersToPBPeers(peers []peer.Peer) []*Message_Peer {
|
||||||
pbpeers := make([]*Message_Peer, len(peers))
|
pbpeers := make([]*Message_Peer, len(peers))
|
||||||
for i, p := range peers {
|
for i, p := range peers {
|
||||||
pbpeers[i] = peerToPBPeer(p)
|
pbpeers[i] = peerToPBPeer(p)
|
||||||
|
@ -39,7 +39,7 @@ type IpfsDHT struct {
|
|||||||
sender inet.Sender
|
sender inet.Sender
|
||||||
|
|
||||||
// Local peer (yourself)
|
// Local peer (yourself)
|
||||||
self *peer.Peer
|
self peer.Peer
|
||||||
|
|
||||||
// Other peers
|
// Other peers
|
||||||
peerstore peer.Peerstore
|
peerstore peer.Peerstore
|
||||||
@ -60,7 +60,7 @@ type IpfsDHT struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewDHT creates a new DHT object with the given peer as the 'local' host
|
// NewDHT creates a new DHT object with the given peer as the 'local' host
|
||||||
func NewDHT(ctx context.Context, p *peer.Peer, ps peer.Peerstore, net inet.Network, sender inet.Sender, dstore ds.Datastore) *IpfsDHT {
|
func NewDHT(ctx context.Context, p peer.Peer, ps peer.Peerstore, net inet.Network, sender inet.Sender, dstore ds.Datastore) *IpfsDHT {
|
||||||
dht := new(IpfsDHT)
|
dht := new(IpfsDHT)
|
||||||
dht.network = net
|
dht.network = net
|
||||||
dht.sender = sender
|
dht.sender = sender
|
||||||
@ -69,12 +69,12 @@ func NewDHT(ctx context.Context, p *peer.Peer, ps peer.Peerstore, net inet.Netwo
|
|||||||
dht.peerstore = ps
|
dht.peerstore = ps
|
||||||
dht.ctx = ctx
|
dht.ctx = ctx
|
||||||
|
|
||||||
dht.providers = NewProviderManager(p.ID)
|
dht.providers = NewProviderManager(p.ID())
|
||||||
|
|
||||||
dht.routingTables = make([]*kb.RoutingTable, 3)
|
dht.routingTables = make([]*kb.RoutingTable, 3)
|
||||||
dht.routingTables[0] = kb.NewRoutingTable(20, kb.ConvertPeerID(p.ID), time.Millisecond*1000)
|
dht.routingTables[0] = kb.NewRoutingTable(20, kb.ConvertPeerID(p.ID()), time.Millisecond*1000)
|
||||||
dht.routingTables[1] = kb.NewRoutingTable(20, kb.ConvertPeerID(p.ID), time.Millisecond*1000)
|
dht.routingTables[1] = kb.NewRoutingTable(20, kb.ConvertPeerID(p.ID()), time.Millisecond*1000)
|
||||||
dht.routingTables[2] = kb.NewRoutingTable(20, kb.ConvertPeerID(p.ID), time.Hour)
|
dht.routingTables[2] = kb.NewRoutingTable(20, kb.ConvertPeerID(p.ID()), time.Hour)
|
||||||
dht.birth = time.Now()
|
dht.birth = time.Now()
|
||||||
|
|
||||||
if doPinging {
|
if doPinging {
|
||||||
@ -84,7 +84,7 @@ func NewDHT(ctx context.Context, p *peer.Peer, ps peer.Peerstore, net inet.Netwo
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Connect to a new peer at the given address, ping and add to the routing table
|
// Connect to a new peer at the given address, ping and add to the routing table
|
||||||
func (dht *IpfsDHT) Connect(ctx context.Context, npeer *peer.Peer) (*peer.Peer, error) {
|
func (dht *IpfsDHT) Connect(ctx context.Context, npeer peer.Peer) (peer.Peer, error) {
|
||||||
log.Debug("Connect to new peer: %s", npeer)
|
log.Debug("Connect to new peer: %s", npeer)
|
||||||
|
|
||||||
// TODO(jbenet,whyrusleeping)
|
// TODO(jbenet,whyrusleeping)
|
||||||
@ -175,7 +175,7 @@ func (dht *IpfsDHT) HandleMessage(ctx context.Context, mes msg.NetMessage) msg.N
|
|||||||
|
|
||||||
// sendRequest sends out a request using dht.sender, but also makes sure to
|
// sendRequest sends out a request using dht.sender, but also makes sure to
|
||||||
// measure the RTT for latency measurements.
|
// measure the RTT for latency measurements.
|
||||||
func (dht *IpfsDHT) sendRequest(ctx context.Context, p *peer.Peer, pmes *Message) (*Message, error) {
|
func (dht *IpfsDHT) sendRequest(ctx context.Context, p peer.Peer, pmes *Message) (*Message, error) {
|
||||||
|
|
||||||
mes, err := msg.FromObject(p, pmes)
|
mes, err := msg.FromObject(p, pmes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -208,7 +208,7 @@ func (dht *IpfsDHT) sendRequest(ctx context.Context, p *peer.Peer, pmes *Message
|
|||||||
}
|
}
|
||||||
|
|
||||||
// putValueToNetwork stores the given key/value pair at the peer 'p'
|
// putValueToNetwork stores the given key/value pair at the peer 'p'
|
||||||
func (dht *IpfsDHT) putValueToNetwork(ctx context.Context, p *peer.Peer,
|
func (dht *IpfsDHT) putValueToNetwork(ctx context.Context, p peer.Peer,
|
||||||
key string, value []byte) error {
|
key string, value []byte) error {
|
||||||
|
|
||||||
pmes := newMessage(Message_PUT_VALUE, string(key), 0)
|
pmes := newMessage(Message_PUT_VALUE, string(key), 0)
|
||||||
@ -224,12 +224,12 @@ func (dht *IpfsDHT) putValueToNetwork(ctx context.Context, p *peer.Peer,
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dht *IpfsDHT) putProvider(ctx context.Context, p *peer.Peer, key string) error {
|
func (dht *IpfsDHT) putProvider(ctx context.Context, p peer.Peer, key string) error {
|
||||||
|
|
||||||
pmes := newMessage(Message_ADD_PROVIDER, string(key), 0)
|
pmes := newMessage(Message_ADD_PROVIDER, string(key), 0)
|
||||||
|
|
||||||
// add self as the provider
|
// add self as the provider
|
||||||
pmes.ProviderPeers = peersToPBPeers([]*peer.Peer{dht.self})
|
pmes.ProviderPeers = peersToPBPeers([]peer.Peer{dht.self})
|
||||||
|
|
||||||
rpmes, err := dht.sendRequest(ctx, p, pmes)
|
rpmes, err := dht.sendRequest(ctx, p, pmes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -244,8 +244,8 @@ func (dht *IpfsDHT) putProvider(ctx context.Context, p *peer.Peer, key string) e
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dht *IpfsDHT) getValueOrPeers(ctx context.Context, p *peer.Peer,
|
func (dht *IpfsDHT) getValueOrPeers(ctx context.Context, p peer.Peer,
|
||||||
key u.Key, level int) ([]byte, []*peer.Peer, error) {
|
key u.Key, level int) ([]byte, []peer.Peer, error) {
|
||||||
|
|
||||||
pmes, err := dht.getValueSingle(ctx, p, key, level)
|
pmes, err := dht.getValueSingle(ctx, p, key, level)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -270,7 +270,7 @@ func (dht *IpfsDHT) getValueOrPeers(ctx context.Context, p *peer.Peer,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Perhaps we were given closer peers
|
// Perhaps we were given closer peers
|
||||||
var peers []*peer.Peer
|
var peers []peer.Peer
|
||||||
for _, pb := range pmes.GetCloserPeers() {
|
for _, pb := range pmes.GetCloserPeers() {
|
||||||
pr, err := dht.addPeer(pb)
|
pr, err := dht.addPeer(pb)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -289,8 +289,8 @@ func (dht *IpfsDHT) getValueOrPeers(ctx context.Context, p *peer.Peer,
|
|||||||
return nil, nil, u.ErrNotFound
|
return nil, nil, u.ErrNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dht *IpfsDHT) addPeer(pb *Message_Peer) (*peer.Peer, error) {
|
func (dht *IpfsDHT) addPeer(pb *Message_Peer) (peer.Peer, error) {
|
||||||
if peer.ID(pb.GetId()).Equal(dht.self.ID) {
|
if peer.ID(pb.GetId()).Equal(dht.self.ID()) {
|
||||||
return nil, errors.New("cannot add self as peer")
|
return nil, errors.New("cannot add self as peer")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -310,7 +310,7 @@ func (dht *IpfsDHT) addPeer(pb *Message_Peer) (*peer.Peer, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// getValueSingle simply performs the get value RPC with the given parameters
|
// getValueSingle simply performs the get value RPC with the given parameters
|
||||||
func (dht *IpfsDHT) getValueSingle(ctx context.Context, p *peer.Peer,
|
func (dht *IpfsDHT) getValueSingle(ctx context.Context, p peer.Peer,
|
||||||
key u.Key, level int) (*Message, error) {
|
key u.Key, level int) (*Message, error) {
|
||||||
|
|
||||||
pmes := newMessage(Message_GET_VALUE, string(key), level)
|
pmes := newMessage(Message_GET_VALUE, string(key), level)
|
||||||
@ -369,7 +369,7 @@ func (dht *IpfsDHT) putLocal(key u.Key, value []byte) error {
|
|||||||
|
|
||||||
// Update signals to all routingTables to Update their last-seen status
|
// Update signals to all routingTables to Update their last-seen status
|
||||||
// on the given peer.
|
// on the given peer.
|
||||||
func (dht *IpfsDHT) Update(p *peer.Peer) {
|
func (dht *IpfsDHT) Update(p peer.Peer) {
|
||||||
log.Debug("updating peer: %s latency = %f\n", p, p.GetLatency().Seconds())
|
log.Debug("updating peer: %s latency = %f\n", p, p.GetLatency().Seconds())
|
||||||
removedCount := 0
|
removedCount := 0
|
||||||
for _, route := range dht.routingTables {
|
for _, route := range dht.routingTables {
|
||||||
@ -390,7 +390,7 @@ func (dht *IpfsDHT) Update(p *peer.Peer) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// FindLocal looks for a peer with a given ID connected to this dht and returns the peer and the table it was found in.
|
// FindLocal looks for a peer with a given ID connected to this dht and returns the peer and the table it was found in.
|
||||||
func (dht *IpfsDHT) FindLocal(id peer.ID) (*peer.Peer, *kb.RoutingTable) {
|
func (dht *IpfsDHT) FindLocal(id peer.ID) (peer.Peer, *kb.RoutingTable) {
|
||||||
for _, table := range dht.routingTables {
|
for _, table := range dht.routingTables {
|
||||||
p := table.Find(id)
|
p := table.Find(id)
|
||||||
if p != nil {
|
if p != nil {
|
||||||
@ -400,7 +400,7 @@ func (dht *IpfsDHT) FindLocal(id peer.ID) (*peer.Peer, *kb.RoutingTable) {
|
|||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dht *IpfsDHT) findPeerSingle(ctx context.Context, p *peer.Peer, id peer.ID, level int) (*Message, error) {
|
func (dht *IpfsDHT) findPeerSingle(ctx context.Context, p peer.Peer, id peer.ID, level int) (*Message, error) {
|
||||||
pmes := newMessage(Message_FIND_NODE, string(id), level)
|
pmes := newMessage(Message_FIND_NODE, string(id), level)
|
||||||
return dht.sendRequest(ctx, p, pmes)
|
return dht.sendRequest(ctx, p, pmes)
|
||||||
}
|
}
|
||||||
@ -411,14 +411,14 @@ func (dht *IpfsDHT) printTables() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dht *IpfsDHT) findProvidersSingle(ctx context.Context, p *peer.Peer, key u.Key, level int) (*Message, error) {
|
func (dht *IpfsDHT) findProvidersSingle(ctx context.Context, p peer.Peer, key u.Key, level int) (*Message, error) {
|
||||||
pmes := newMessage(Message_GET_PROVIDERS, string(key), level)
|
pmes := newMessage(Message_GET_PROVIDERS, string(key), level)
|
||||||
return dht.sendRequest(ctx, p, pmes)
|
return dht.sendRequest(ctx, p, pmes)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Could be done async
|
// TODO: Could be done async
|
||||||
func (dht *IpfsDHT) addProviders(key u.Key, peers []*Message_Peer) []*peer.Peer {
|
func (dht *IpfsDHT) addProviders(key u.Key, peers []*Message_Peer) []peer.Peer {
|
||||||
var provArr []*peer.Peer
|
var provArr []peer.Peer
|
||||||
for _, prov := range peers {
|
for _, prov := range peers {
|
||||||
p, err := dht.peerFromInfo(prov)
|
p, err := dht.peerFromInfo(prov)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -429,7 +429,7 @@ func (dht *IpfsDHT) addProviders(key u.Key, peers []*Message_Peer) []*peer.Peer
|
|||||||
log.Debug("%s adding provider: %s for %s", dht.self, p, key)
|
log.Debug("%s adding provider: %s for %s", dht.self, p, key)
|
||||||
|
|
||||||
// Dont add outselves to the list
|
// Dont add outselves to the list
|
||||||
if p.ID.Equal(dht.self.ID) {
|
if p.ID().Equal(dht.self.ID()) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -441,7 +441,7 @@ func (dht *IpfsDHT) addProviders(key u.Key, peers []*Message_Peer) []*peer.Peer
|
|||||||
}
|
}
|
||||||
|
|
||||||
// nearestPeersToQuery returns the routing tables closest peers.
|
// nearestPeersToQuery returns the routing tables closest peers.
|
||||||
func (dht *IpfsDHT) nearestPeersToQuery(pmes *Message, count int) []*peer.Peer {
|
func (dht *IpfsDHT) nearestPeersToQuery(pmes *Message, count int) []peer.Peer {
|
||||||
level := pmes.GetClusterLevel()
|
level := pmes.GetClusterLevel()
|
||||||
cluster := dht.routingTables[level]
|
cluster := dht.routingTables[level]
|
||||||
|
|
||||||
@ -451,7 +451,7 @@ func (dht *IpfsDHT) nearestPeersToQuery(pmes *Message, count int) []*peer.Peer {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// betterPeerToQuery returns nearestPeersToQuery, but iff closer than self.
|
// betterPeerToQuery returns nearestPeersToQuery, but iff closer than self.
|
||||||
func (dht *IpfsDHT) betterPeersToQuery(pmes *Message, count int) []*peer.Peer {
|
func (dht *IpfsDHT) betterPeersToQuery(pmes *Message, count int) []peer.Peer {
|
||||||
closer := dht.nearestPeersToQuery(pmes, count)
|
closer := dht.nearestPeersToQuery(pmes, count)
|
||||||
|
|
||||||
// no node? nil
|
// no node? nil
|
||||||
@ -461,17 +461,17 @@ func (dht *IpfsDHT) betterPeersToQuery(pmes *Message, count int) []*peer.Peer {
|
|||||||
|
|
||||||
// == to self? thats bad
|
// == to self? thats bad
|
||||||
for _, p := range closer {
|
for _, p := range closer {
|
||||||
if p.ID.Equal(dht.self.ID) {
|
if p.ID().Equal(dht.self.ID()) {
|
||||||
log.Error("Attempted to return self! this shouldnt happen...")
|
log.Error("Attempted to return self! this shouldnt happen...")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var filtered []*peer.Peer
|
var filtered []peer.Peer
|
||||||
for _, p := range closer {
|
for _, p := range closer {
|
||||||
// must all be closer than self
|
// must all be closer than self
|
||||||
key := u.Key(pmes.GetKey())
|
key := u.Key(pmes.GetKey())
|
||||||
if !kb.Closer(dht.self.ID, p.ID, key) {
|
if !kb.Closer(dht.self.ID(), p.ID(), key) {
|
||||||
filtered = append(filtered, p)
|
filtered = append(filtered, p)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -480,7 +480,7 @@ func (dht *IpfsDHT) betterPeersToQuery(pmes *Message, count int) []*peer.Peer {
|
|||||||
return filtered
|
return filtered
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dht *IpfsDHT) getPeer(id peer.ID) (*peer.Peer, error) {
|
func (dht *IpfsDHT) getPeer(id peer.ID) (peer.Peer, error) {
|
||||||
p, err := dht.peerstore.Get(id)
|
p, err := dht.peerstore.Get(id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = fmt.Errorf("Failed to get peer from peerstore: %s", err)
|
err = fmt.Errorf("Failed to get peer from peerstore: %s", err)
|
||||||
@ -490,12 +490,12 @@ func (dht *IpfsDHT) getPeer(id peer.ID) (*peer.Peer, error) {
|
|||||||
return p, nil
|
return p, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dht *IpfsDHT) peerFromInfo(pbp *Message_Peer) (*peer.Peer, error) {
|
func (dht *IpfsDHT) peerFromInfo(pbp *Message_Peer) (peer.Peer, error) {
|
||||||
|
|
||||||
id := peer.ID(pbp.GetId())
|
id := peer.ID(pbp.GetId())
|
||||||
|
|
||||||
// continue if it's ourselves
|
// continue if it's ourselves
|
||||||
if id.Equal(dht.self.ID) {
|
if id.Equal(dht.self.ID()) {
|
||||||
return nil, errors.New("found self")
|
return nil, errors.New("found self")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -512,7 +512,7 @@ func (dht *IpfsDHT) peerFromInfo(pbp *Message_Peer) (*peer.Peer, error) {
|
|||||||
return p, nil
|
return p, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dht *IpfsDHT) ensureConnectedToPeer(pbp *Message_Peer) (*peer.Peer, error) {
|
func (dht *IpfsDHT) ensureConnectedToPeer(pbp *Message_Peer) (peer.Peer, error) {
|
||||||
p, err := dht.peerFromInfo(pbp)
|
p, err := dht.peerFromInfo(pbp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -20,7 +20,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
func setupDHT(ctx context.Context, t *testing.T, p *peer.Peer) *IpfsDHT {
|
func setupDHT(ctx context.Context, t *testing.T, p peer.Peer) *IpfsDHT {
|
||||||
peerstore := peer.NewPeerstore()
|
peerstore := peer.NewPeerstore()
|
||||||
|
|
||||||
dhts := netservice.NewService(nil) // nil handler for now, need to patch it
|
dhts := netservice.NewService(nil) // nil handler for now, need to patch it
|
||||||
@ -40,7 +40,7 @@ func setupDHT(ctx context.Context, t *testing.T, p *peer.Peer) *IpfsDHT {
|
|||||||
return d
|
return d
|
||||||
}
|
}
|
||||||
|
|
||||||
func setupDHTS(ctx context.Context, n int, t *testing.T) ([]ma.Multiaddr, []*peer.Peer, []*IpfsDHT) {
|
func setupDHTS(ctx context.Context, n int, t *testing.T) ([]ma.Multiaddr, []peer.Peer, []*IpfsDHT) {
|
||||||
var addrs []ma.Multiaddr
|
var addrs []ma.Multiaddr
|
||||||
for i := 0; i < n; i++ {
|
for i := 0; i < n; i++ {
|
||||||
a, err := ma.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", 5000+i))
|
a, err := ma.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", 5000+i))
|
||||||
@ -50,7 +50,7 @@ func setupDHTS(ctx context.Context, n int, t *testing.T) ([]ma.Multiaddr, []*pee
|
|||||||
addrs = append(addrs, a)
|
addrs = append(addrs, a)
|
||||||
}
|
}
|
||||||
|
|
||||||
var peers []*peer.Peer
|
var peers []peer.Peer
|
||||||
for i := 0; i < n; i++ {
|
for i := 0; i < n; i++ {
|
||||||
p := makePeer(addrs[i])
|
p := makePeer(addrs[i])
|
||||||
peers = append(peers, p)
|
peers = append(peers, p)
|
||||||
@ -64,21 +64,16 @@ func setupDHTS(ctx context.Context, n int, t *testing.T) ([]ma.Multiaddr, []*pee
|
|||||||
return addrs, peers, dhts
|
return addrs, peers, dhts
|
||||||
}
|
}
|
||||||
|
|
||||||
func makePeer(addr ma.Multiaddr) *peer.Peer {
|
func makePeer(addr ma.Multiaddr) peer.Peer {
|
||||||
p := new(peer.Peer)
|
|
||||||
p.AddAddress(addr)
|
|
||||||
sk, pk, err := ci.GenerateKeyPair(ci.RSA, 512)
|
sk, pk, err := ci.GenerateKeyPair(ci.RSA, 512)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
p.PrivKey = sk
|
p, err := peer.WithKeyPair(sk, pk)
|
||||||
p.PubKey = pk
|
|
||||||
id, err := peer.IDFromPubKey(pk)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
p.AddAddress(addr)
|
||||||
p.ID = id
|
|
||||||
return p
|
return p
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -289,7 +284,7 @@ func TestProvidesAsync(t *testing.T) {
|
|||||||
provs := dhts[0].FindProvidersAsync(ctxT, u.Key("hello"), 5)
|
provs := dhts[0].FindProvidersAsync(ctxT, u.Key("hello"), 5)
|
||||||
select {
|
select {
|
||||||
case p := <-provs:
|
case p := <-provs:
|
||||||
if !p.ID.Equal(dhts[3].self.ID) {
|
if !p.ID().Equal(dhts[3].self.ID()) {
|
||||||
t.Fatalf("got a provider, but not the right one. %s", p)
|
t.Fatalf("got a provider, but not the right one. %s", p)
|
||||||
}
|
}
|
||||||
case <-ctxT.Done():
|
case <-ctxT.Done():
|
||||||
@ -379,7 +374,7 @@ func TestFindPeer(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
ctxT, _ := context.WithTimeout(ctx, time.Second)
|
ctxT, _ := context.WithTimeout(ctx, time.Second)
|
||||||
p, err := dhts[0].FindPeer(ctxT, peers[2].ID)
|
p, err := dhts[0].FindPeer(ctxT, peers[2].ID())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -388,7 +383,7 @@ func TestFindPeer(t *testing.T) {
|
|||||||
t.Fatal("Failed to find peer.")
|
t.Fatal("Failed to find peer.")
|
||||||
}
|
}
|
||||||
|
|
||||||
if !p.ID.Equal(peers[2].ID) {
|
if !p.ID().Equal(peers[2].ID()) {
|
||||||
t.Fatal("Didnt find expected peer.")
|
t.Fatal("Didnt find expected peer.")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -32,12 +32,13 @@ func (di *diagInfo) Marshal() []byte {
|
|||||||
func (dht *IpfsDHT) getDiagInfo() *diagInfo {
|
func (dht *IpfsDHT) getDiagInfo() *diagInfo {
|
||||||
di := new(diagInfo)
|
di := new(diagInfo)
|
||||||
di.CodeVersion = "github.com/jbenet/go-ipfs"
|
di.CodeVersion = "github.com/jbenet/go-ipfs"
|
||||||
di.ID = dht.self.ID
|
di.ID = dht.self.ID()
|
||||||
di.LifeSpan = time.Since(dht.birth)
|
di.LifeSpan = time.Since(dht.birth)
|
||||||
di.Keys = nil // Currently no way to query datastore
|
di.Keys = nil // Currently no way to query datastore
|
||||||
|
|
||||||
for _, p := range dht.routingTables[0].ListPeers() {
|
for _, p := range dht.routingTables[0].ListPeers() {
|
||||||
di.Connections = append(di.Connections, connDiagInfo{p.GetLatency(), p.ID})
|
d := connDiagInfo{p.GetLatency(), p.ID()}
|
||||||
|
di.Connections = append(di.Connections, d)
|
||||||
}
|
}
|
||||||
return di
|
return di
|
||||||
}
|
}
|
||||||
|
@ -9,7 +9,6 @@ import (
|
|||||||
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto"
|
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto"
|
||||||
|
|
||||||
ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go"
|
ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go"
|
||||||
ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr"
|
|
||||||
msg "github.com/jbenet/go-ipfs/net/message"
|
msg "github.com/jbenet/go-ipfs/net/message"
|
||||||
mux "github.com/jbenet/go-ipfs/net/mux"
|
mux "github.com/jbenet/go-ipfs/net/mux"
|
||||||
peer "github.com/jbenet/go-ipfs/peer"
|
peer "github.com/jbenet/go-ipfs/peer"
|
||||||
@ -66,17 +65,17 @@ type fauxNet struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DialPeer attempts to establish a connection to a given peer
|
// DialPeer attempts to establish a connection to a given peer
|
||||||
func (f *fauxNet) DialPeer(*peer.Peer) error {
|
func (f *fauxNet) DialPeer(peer.Peer) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ClosePeer connection to peer
|
// ClosePeer connection to peer
|
||||||
func (f *fauxNet) ClosePeer(*peer.Peer) error {
|
func (f *fauxNet) ClosePeer(peer.Peer) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsConnected returns whether a connection to given peer exists.
|
// IsConnected returns whether a connection to given peer exists.
|
||||||
func (f *fauxNet) IsConnected(*peer.Peer) (bool, error) {
|
func (f *fauxNet) IsConnected(peer.Peer) (bool, error) {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -88,7 +87,7 @@ func (f *fauxNet) SendMessage(msg.NetMessage) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *fauxNet) GetPeerList() []*peer.Peer {
|
func (f *fauxNet) GetPeerList() []peer.Peer {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -107,11 +106,10 @@ func TestGetFailures(t *testing.T) {
|
|||||||
fs := &fauxSender{}
|
fs := &fauxSender{}
|
||||||
|
|
||||||
peerstore := peer.NewPeerstore()
|
peerstore := peer.NewPeerstore()
|
||||||
local := new(peer.Peer)
|
local := peer.WithIDString("test_peer")
|
||||||
local.ID = peer.ID("test_peer")
|
|
||||||
|
|
||||||
d := NewDHT(ctx, local, peerstore, fn, fs, ds.NewMapDatastore())
|
d := NewDHT(ctx, local, peerstore, fn, fs, ds.NewMapDatastore())
|
||||||
other := &peer.Peer{ID: peer.ID("other_peer")}
|
other := peer.WithIDString("other_peer")
|
||||||
d.Update(other)
|
d.Update(other)
|
||||||
|
|
||||||
// This one should time out
|
// This one should time out
|
||||||
@ -189,11 +187,10 @@ func TestGetFailures(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Maybe put these in some sort of "ipfs_testutil" package
|
// TODO: Maybe put these in some sort of "ipfs_testutil" package
|
||||||
func _randPeer() *peer.Peer {
|
func _randPeer() peer.Peer {
|
||||||
p := new(peer.Peer)
|
id := make(peer.ID, 16)
|
||||||
p.ID = make(peer.ID, 16)
|
crand.Read(id)
|
||||||
p.Addresses = []ma.Multiaddr{nil}
|
p := peer.WithID(id)
|
||||||
crand.Read(p.ID)
|
|
||||||
return p
|
return p
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -204,13 +201,13 @@ func TestNotFound(t *testing.T) {
|
|||||||
fn := &fauxNet{}
|
fn := &fauxNet{}
|
||||||
fs := &fauxSender{}
|
fs := &fauxSender{}
|
||||||
|
|
||||||
local := new(peer.Peer)
|
local := peer.WithIDString("test_peer")
|
||||||
local.ID = peer.ID("test_peer")
|
|
||||||
peerstore := peer.NewPeerstore()
|
peerstore := peer.NewPeerstore()
|
||||||
|
peerstore.Put(local)
|
||||||
|
|
||||||
d := NewDHT(ctx, local, peerstore, fn, fs, ds.NewMapDatastore())
|
d := NewDHT(ctx, local, peerstore, fn, fs, ds.NewMapDatastore())
|
||||||
|
|
||||||
var ps []*peer.Peer
|
var ps []peer.Peer
|
||||||
for i := 0; i < 5; i++ {
|
for i := 0; i < 5; i++ {
|
||||||
ps = append(ps, _randPeer())
|
ps = append(ps, _randPeer())
|
||||||
d.Update(ps[i])
|
d.Update(ps[i])
|
||||||
@ -228,7 +225,7 @@ func TestNotFound(t *testing.T) {
|
|||||||
case Message_GET_VALUE:
|
case Message_GET_VALUE:
|
||||||
resp := &Message{Type: pmes.Type}
|
resp := &Message{Type: pmes.Type}
|
||||||
|
|
||||||
peers := []*peer.Peer{}
|
peers := []peer.Peer{}
|
||||||
for i := 0; i < 7; i++ {
|
for i := 0; i < 7; i++ {
|
||||||
peers = append(peers, _randPeer())
|
peers = append(peers, _randPeer())
|
||||||
}
|
}
|
||||||
@ -270,13 +267,13 @@ func TestLessThanKResponses(t *testing.T) {
|
|||||||
u.Debug = false
|
u.Debug = false
|
||||||
fn := &fauxNet{}
|
fn := &fauxNet{}
|
||||||
fs := &fauxSender{}
|
fs := &fauxSender{}
|
||||||
|
local := peer.WithIDString("test_peer")
|
||||||
peerstore := peer.NewPeerstore()
|
peerstore := peer.NewPeerstore()
|
||||||
local := new(peer.Peer)
|
peerstore.Put(local)
|
||||||
local.ID = peer.ID("test_peer")
|
|
||||||
|
|
||||||
d := NewDHT(ctx, local, peerstore, fn, fs, ds.NewMapDatastore())
|
d := NewDHT(ctx, local, peerstore, fn, fs, ds.NewMapDatastore())
|
||||||
|
|
||||||
var ps []*peer.Peer
|
var ps []peer.Peer
|
||||||
for i := 0; i < 5; i++ {
|
for i := 0; i < 5; i++ {
|
||||||
ps = append(ps, _randPeer())
|
ps = append(ps, _randPeer())
|
||||||
d.Update(ps[i])
|
d.Update(ps[i])
|
||||||
@ -295,7 +292,7 @@ func TestLessThanKResponses(t *testing.T) {
|
|||||||
case Message_GET_VALUE:
|
case Message_GET_VALUE:
|
||||||
resp := &Message{
|
resp := &Message{
|
||||||
Type: pmes.Type,
|
Type: pmes.Type,
|
||||||
CloserPeers: peersToPBPeers([]*peer.Peer{other}),
|
CloserPeers: peersToPBPeers([]peer.Peer{other}),
|
||||||
}
|
}
|
||||||
|
|
||||||
mes, err := msg.FromObject(mes.Peer(), resp)
|
mes, err := msg.FromObject(mes.Peer(), resp)
|
||||||
|
@ -14,7 +14,7 @@ import (
|
|||||||
var CloserPeerCount = 4
|
var CloserPeerCount = 4
|
||||||
|
|
||||||
// dhthandler specifies the signature of functions that handle DHT messages.
|
// dhthandler specifies the signature of functions that handle DHT messages.
|
||||||
type dhtHandler func(*peer.Peer, *Message) (*Message, error)
|
type dhtHandler func(peer.Peer, *Message) (*Message, error)
|
||||||
|
|
||||||
func (dht *IpfsDHT) handlerForMsgType(t Message_MessageType) dhtHandler {
|
func (dht *IpfsDHT) handlerForMsgType(t Message_MessageType) dhtHandler {
|
||||||
switch t {
|
switch t {
|
||||||
@ -35,7 +35,7 @@ func (dht *IpfsDHT) handlerForMsgType(t Message_MessageType) dhtHandler {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dht *IpfsDHT) handleGetValue(p *peer.Peer, pmes *Message) (*Message, error) {
|
func (dht *IpfsDHT) handleGetValue(p peer.Peer, pmes *Message) (*Message, error) {
|
||||||
log.Debug("%s handleGetValue for key: %s\n", dht.self, pmes.GetKey())
|
log.Debug("%s handleGetValue for key: %s\n", dht.self, pmes.GetKey())
|
||||||
|
|
||||||
// setup response
|
// setup response
|
||||||
@ -93,7 +93,7 @@ func (dht *IpfsDHT) handleGetValue(p *peer.Peer, pmes *Message) (*Message, error
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Store a value in this peer local storage
|
// Store a value in this peer local storage
|
||||||
func (dht *IpfsDHT) handlePutValue(p *peer.Peer, pmes *Message) (*Message, error) {
|
func (dht *IpfsDHT) handlePutValue(p peer.Peer, pmes *Message) (*Message, error) {
|
||||||
dht.dslock.Lock()
|
dht.dslock.Lock()
|
||||||
defer dht.dslock.Unlock()
|
defer dht.dslock.Unlock()
|
||||||
dskey := u.Key(pmes.GetKey()).DsKey()
|
dskey := u.Key(pmes.GetKey()).DsKey()
|
||||||
@ -102,18 +102,18 @@ func (dht *IpfsDHT) handlePutValue(p *peer.Peer, pmes *Message) (*Message, error
|
|||||||
return pmes, err
|
return pmes, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dht *IpfsDHT) handlePing(p *peer.Peer, pmes *Message) (*Message, error) {
|
func (dht *IpfsDHT) handlePing(p peer.Peer, pmes *Message) (*Message, error) {
|
||||||
log.Debug("%s Responding to ping from %s!\n", dht.self, p)
|
log.Debug("%s Responding to ping from %s!\n", dht.self, p)
|
||||||
return pmes, nil
|
return pmes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dht *IpfsDHT) handleFindPeer(p *peer.Peer, pmes *Message) (*Message, error) {
|
func (dht *IpfsDHT) handleFindPeer(p peer.Peer, pmes *Message) (*Message, error) {
|
||||||
resp := newMessage(pmes.GetType(), "", pmes.GetClusterLevel())
|
resp := newMessage(pmes.GetType(), "", pmes.GetClusterLevel())
|
||||||
var closest []*peer.Peer
|
var closest []peer.Peer
|
||||||
|
|
||||||
// if looking for self... special case where we send it on CloserPeers.
|
// if looking for self... special case where we send it on CloserPeers.
|
||||||
if peer.ID(pmes.GetKey()).Equal(dht.self.ID) {
|
if peer.ID(pmes.GetKey()).Equal(dht.self.ID()) {
|
||||||
closest = []*peer.Peer{dht.self}
|
closest = []peer.Peer{dht.self}
|
||||||
} else {
|
} else {
|
||||||
closest = dht.betterPeersToQuery(pmes, CloserPeerCount)
|
closest = dht.betterPeersToQuery(pmes, CloserPeerCount)
|
||||||
}
|
}
|
||||||
@ -123,9 +123,9 @@ func (dht *IpfsDHT) handleFindPeer(p *peer.Peer, pmes *Message) (*Message, error
|
|||||||
return resp, nil
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var withAddresses []*peer.Peer
|
var withAddresses []peer.Peer
|
||||||
for _, p := range closest {
|
for _, p := range closest {
|
||||||
if len(p.Addresses) > 0 {
|
if len(p.Addresses()) > 0 {
|
||||||
withAddresses = append(withAddresses, p)
|
withAddresses = append(withAddresses, p)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -137,7 +137,7 @@ func (dht *IpfsDHT) handleFindPeer(p *peer.Peer, pmes *Message) (*Message, error
|
|||||||
return resp, nil
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dht *IpfsDHT) handleGetProviders(p *peer.Peer, pmes *Message) (*Message, error) {
|
func (dht *IpfsDHT) handleGetProviders(p peer.Peer, pmes *Message) (*Message, error) {
|
||||||
resp := newMessage(pmes.GetType(), pmes.GetKey(), pmes.GetClusterLevel())
|
resp := newMessage(pmes.GetType(), pmes.GetKey(), pmes.GetClusterLevel())
|
||||||
|
|
||||||
// check if we have this value, to add ourselves as provider.
|
// check if we have this value, to add ourselves as provider.
|
||||||
@ -171,10 +171,10 @@ func (dht *IpfsDHT) handleGetProviders(p *peer.Peer, pmes *Message) (*Message, e
|
|||||||
|
|
||||||
type providerInfo struct {
|
type providerInfo struct {
|
||||||
Creation time.Time
|
Creation time.Time
|
||||||
Value *peer.Peer
|
Value peer.Peer
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dht *IpfsDHT) handleAddProvider(p *peer.Peer, pmes *Message) (*Message, error) {
|
func (dht *IpfsDHT) handleAddProvider(p peer.Peer, pmes *Message) (*Message, error) {
|
||||||
key := u.Key(pmes.GetKey())
|
key := u.Key(pmes.GetKey())
|
||||||
|
|
||||||
log.Debug("%s adding %s as a provider for '%s'\n", dht.self, p, peer.ID(key))
|
log.Debug("%s adding %s as a provider for '%s'\n", dht.self, p, peer.ID(key))
|
||||||
@ -182,7 +182,7 @@ func (dht *IpfsDHT) handleAddProvider(p *peer.Peer, pmes *Message) (*Message, er
|
|||||||
// add provider should use the address given in the message
|
// add provider should use the address given in the message
|
||||||
for _, pb := range pmes.GetProviderPeers() {
|
for _, pb := range pmes.GetProviderPeers() {
|
||||||
pid := peer.ID(pb.GetId())
|
pid := peer.ID(pb.GetId())
|
||||||
if pid.Equal(p.ID) {
|
if pid.Equal(p.ID()) {
|
||||||
|
|
||||||
addr, err := pb.Address()
|
addr, err := pb.Address()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -20,12 +20,12 @@ type ProviderManager struct {
|
|||||||
|
|
||||||
type addProv struct {
|
type addProv struct {
|
||||||
k u.Key
|
k u.Key
|
||||||
val *peer.Peer
|
val peer.Peer
|
||||||
}
|
}
|
||||||
|
|
||||||
type getProv struct {
|
type getProv struct {
|
||||||
k u.Key
|
k u.Key
|
||||||
resp chan []*peer.Peer
|
resp chan []peer.Peer
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewProviderManager(local peer.ID) *ProviderManager {
|
func NewProviderManager(local peer.ID) *ProviderManager {
|
||||||
@ -45,7 +45,7 @@ func (pm *ProviderManager) run() {
|
|||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case np := <-pm.newprovs:
|
case np := <-pm.newprovs:
|
||||||
if np.val.ID.Equal(pm.lpeer) {
|
if np.val.ID().Equal(pm.lpeer) {
|
||||||
pm.local[np.k] = struct{}{}
|
pm.local[np.k] = struct{}{}
|
||||||
}
|
}
|
||||||
pi := new(providerInfo)
|
pi := new(providerInfo)
|
||||||
@ -54,7 +54,7 @@ func (pm *ProviderManager) run() {
|
|||||||
arr := pm.providers[np.k]
|
arr := pm.providers[np.k]
|
||||||
pm.providers[np.k] = append(arr, pi)
|
pm.providers[np.k] = append(arr, pi)
|
||||||
case gp := <-pm.getprovs:
|
case gp := <-pm.getprovs:
|
||||||
var parr []*peer.Peer
|
var parr []peer.Peer
|
||||||
provs := pm.providers[gp.k]
|
provs := pm.providers[gp.k]
|
||||||
for _, p := range provs {
|
for _, p := range provs {
|
||||||
parr = append(parr, p.Value)
|
parr = append(parr, p.Value)
|
||||||
@ -82,17 +82,17 @@ func (pm *ProviderManager) run() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pm *ProviderManager) AddProvider(k u.Key, val *peer.Peer) {
|
func (pm *ProviderManager) AddProvider(k u.Key, val peer.Peer) {
|
||||||
pm.newprovs <- &addProv{
|
pm.newprovs <- &addProv{
|
||||||
k: k,
|
k: k,
|
||||||
val: val,
|
val: val,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pm *ProviderManager) GetProviders(k u.Key) []*peer.Peer {
|
func (pm *ProviderManager) GetProviders(k u.Key) []peer.Peer {
|
||||||
gp := new(getProv)
|
gp := new(getProv)
|
||||||
gp.k = k
|
gp.k = k
|
||||||
gp.resp = make(chan []*peer.Peer)
|
gp.resp = make(chan []peer.Peer)
|
||||||
pm.getprovs <- gp
|
pm.getprovs <- gp
|
||||||
return <-gp.resp
|
return <-gp.resp
|
||||||
}
|
}
|
||||||
|
@ -11,7 +11,7 @@ func TestProviderManager(t *testing.T) {
|
|||||||
mid := peer.ID("testing")
|
mid := peer.ID("testing")
|
||||||
p := NewProviderManager(mid)
|
p := NewProviderManager(mid)
|
||||||
a := u.Key("test")
|
a := u.Key("test")
|
||||||
p.AddProvider(a, &peer.Peer{})
|
p.AddProvider(a, peer.WithIDString("testingprovider"))
|
||||||
resp := p.GetProviders(a)
|
resp := p.GetProviders(a)
|
||||||
if len(resp) != 1 {
|
if len(resp) != 1 {
|
||||||
t.Fatal("Could not retrieve provider.")
|
t.Fatal("Could not retrieve provider.")
|
||||||
|
@ -26,10 +26,10 @@ type dhtQuery struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type dhtQueryResult struct {
|
type dhtQueryResult struct {
|
||||||
value []byte // GetValue
|
value []byte // GetValue
|
||||||
peer *peer.Peer // FindPeer
|
peer peer.Peer // FindPeer
|
||||||
providerPeers []*peer.Peer // GetProviders
|
providerPeers []peer.Peer // GetProviders
|
||||||
closerPeers []*peer.Peer // *
|
closerPeers []peer.Peer // *
|
||||||
success bool
|
success bool
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -47,10 +47,10 @@ func newQuery(k u.Key, f queryFunc) *dhtQuery {
|
|||||||
// - the value
|
// - the value
|
||||||
// - a list of peers potentially better able to serve the query
|
// - a list of peers potentially better able to serve the query
|
||||||
// - an error
|
// - an error
|
||||||
type queryFunc func(context.Context, *peer.Peer) (*dhtQueryResult, error)
|
type queryFunc func(context.Context, peer.Peer) (*dhtQueryResult, error)
|
||||||
|
|
||||||
// Run runs the query at hand. pass in a list of peers to use first.
|
// Run runs the query at hand. pass in a list of peers to use first.
|
||||||
func (q *dhtQuery) Run(ctx context.Context, peers []*peer.Peer) (*dhtQueryResult, error) {
|
func (q *dhtQuery) Run(ctx context.Context, peers []peer.Peer) (*dhtQueryResult, error) {
|
||||||
runner := newQueryRunner(ctx, q)
|
runner := newQueryRunner(ctx, q)
|
||||||
return runner.Run(peers)
|
return runner.Run(peers)
|
||||||
}
|
}
|
||||||
@ -100,7 +100,7 @@ func newQueryRunner(ctx context.Context, q *dhtQuery) *dhtQueryRunner {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *dhtQueryRunner) Run(peers []*peer.Peer) (*dhtQueryResult, error) {
|
func (r *dhtQueryRunner) Run(peers []peer.Peer) (*dhtQueryResult, error) {
|
||||||
log.Debug("Run query with %d peers.", len(peers))
|
log.Debug("Run query with %d peers.", len(peers))
|
||||||
if len(peers) == 0 {
|
if len(peers) == 0 {
|
||||||
log.Warning("Running query with no peers!")
|
log.Warning("Running query with no peers!")
|
||||||
@ -148,7 +148,7 @@ func (r *dhtQueryRunner) Run(peers []*peer.Peer) (*dhtQueryResult, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *dhtQueryRunner) addPeerToQuery(next *peer.Peer, benchmark *peer.Peer) {
|
func (r *dhtQueryRunner) addPeerToQuery(next peer.Peer, benchmark peer.Peer) {
|
||||||
if next == nil {
|
if next == nil {
|
||||||
// wtf why are peers nil?!?
|
// wtf why are peers nil?!?
|
||||||
log.Error("Query getting nil peers!!!\n")
|
log.Error("Query getting nil peers!!!\n")
|
||||||
@ -156,7 +156,7 @@ func (r *dhtQueryRunner) addPeerToQuery(next *peer.Peer, benchmark *peer.Peer) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// if new peer further away than whom we got it from, bother (loops)
|
// if new peer further away than whom we got it from, bother (loops)
|
||||||
if benchmark != nil && kb.Closer(benchmark.ID, next.ID, r.query.key) {
|
if benchmark != nil && kb.Closer(benchmark.ID(), next.ID(), r.query.key) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -200,7 +200,7 @@ func (r *dhtQueryRunner) spawnWorkers() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *dhtQueryRunner) queryPeer(p *peer.Peer) {
|
func (r *dhtQueryRunner) queryPeer(p peer.Peer) {
|
||||||
log.Debug("spawned worker for: %v\n", p)
|
log.Debug("spawned worker for: %v\n", p)
|
||||||
|
|
||||||
// make sure we rate limit concurrency.
|
// make sure we rate limit concurrency.
|
||||||
|
@ -23,13 +23,13 @@ func (dht *IpfsDHT) PutValue(ctx context.Context, key u.Key, value []byte) error
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var peers []*peer.Peer
|
var peers []peer.Peer
|
||||||
for _, route := range dht.routingTables {
|
for _, route := range dht.routingTables {
|
||||||
npeers := route.NearestPeers(kb.ConvertKey(key), KValue)
|
npeers := route.NearestPeers(kb.ConvertKey(key), KValue)
|
||||||
peers = append(peers, npeers...)
|
peers = append(peers, npeers...)
|
||||||
}
|
}
|
||||||
|
|
||||||
query := newQuery(key, func(ctx context.Context, p *peer.Peer) (*dhtQueryResult, error) {
|
query := newQuery(key, func(ctx context.Context, p peer.Peer) (*dhtQueryResult, error) {
|
||||||
log.Debug("%s PutValue qry part %v", dht.self, p)
|
log.Debug("%s PutValue qry part %v", dht.self, p)
|
||||||
err := dht.putValueToNetwork(ctx, p, string(key), value)
|
err := dht.putValueToNetwork(ctx, p, string(key), value)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -65,7 +65,7 @@ func (dht *IpfsDHT) GetValue(ctx context.Context, key u.Key) ([]byte, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// setup the Query
|
// setup the Query
|
||||||
query := newQuery(key, func(ctx context.Context, p *peer.Peer) (*dhtQueryResult, error) {
|
query := newQuery(key, func(ctx context.Context, p peer.Peer) (*dhtQueryResult, error) {
|
||||||
|
|
||||||
val, peers, err := dht.getValueOrPeers(ctx, p, key, routeLevel)
|
val, peers, err := dht.getValueOrPeers(ctx, p, key, routeLevel)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -117,8 +117,8 @@ func (dht *IpfsDHT) Provide(ctx context.Context, key u.Key) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dht *IpfsDHT) FindProvidersAsync(ctx context.Context, key u.Key, count int) <-chan *peer.Peer {
|
func (dht *IpfsDHT) FindProvidersAsync(ctx context.Context, key u.Key, count int) <-chan peer.Peer {
|
||||||
peerOut := make(chan *peer.Peer, count)
|
peerOut := make(chan peer.Peer, count)
|
||||||
go func() {
|
go func() {
|
||||||
ps := newPeerSet()
|
ps := newPeerSet()
|
||||||
provs := dht.providers.GetProviders(key)
|
provs := dht.providers.GetProviders(key)
|
||||||
@ -136,7 +136,7 @@ func (dht *IpfsDHT) FindProvidersAsync(ctx context.Context, key u.Key, count int
|
|||||||
peers := dht.routingTables[0].NearestPeers(kb.ConvertKey(key), AlphaValue)
|
peers := dht.routingTables[0].NearestPeers(kb.ConvertKey(key), AlphaValue)
|
||||||
for _, pp := range peers {
|
for _, pp := range peers {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func(p *peer.Peer) {
|
go func(p peer.Peer) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
pmes, err := dht.findProvidersSingle(ctx, p, key, 0)
|
pmes, err := dht.findProvidersSingle(ctx, p, key, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -153,7 +153,7 @@ func (dht *IpfsDHT) FindProvidersAsync(ctx context.Context, key u.Key, count int
|
|||||||
}
|
}
|
||||||
|
|
||||||
//TODO: this function could also be done asynchronously
|
//TODO: this function could also be done asynchronously
|
||||||
func (dht *IpfsDHT) addPeerListAsync(k u.Key, peers []*Message_Peer, ps *peerSet, count int, out chan *peer.Peer) {
|
func (dht *IpfsDHT) addPeerListAsync(k u.Key, peers []*Message_Peer, ps *peerSet, count int, out chan peer.Peer) {
|
||||||
for _, pbp := range peers {
|
for _, pbp := range peers {
|
||||||
|
|
||||||
// construct new peer
|
// construct new peer
|
||||||
@ -173,7 +173,7 @@ func (dht *IpfsDHT) addPeerListAsync(k u.Key, peers []*Message_Peer, ps *peerSet
|
|||||||
|
|
||||||
// Find specific Peer
|
// Find specific Peer
|
||||||
// FindPeer searches for a peer with given ID.
|
// FindPeer searches for a peer with given ID.
|
||||||
func (dht *IpfsDHT) FindPeer(ctx context.Context, id peer.ID) (*peer.Peer, error) {
|
func (dht *IpfsDHT) FindPeer(ctx context.Context, id peer.ID) (peer.Peer, error) {
|
||||||
|
|
||||||
// Check if were already connected to them
|
// Check if were already connected to them
|
||||||
p, _ := dht.FindLocal(id)
|
p, _ := dht.FindLocal(id)
|
||||||
@ -186,7 +186,7 @@ func (dht *IpfsDHT) FindPeer(ctx context.Context, id peer.ID) (*peer.Peer, error
|
|||||||
if p == nil {
|
if p == nil {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
if p.ID.Equal(id) {
|
if p.ID().Equal(id) {
|
||||||
return p, nil
|
return p, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -205,7 +205,7 @@ func (dht *IpfsDHT) FindPeer(ctx context.Context, id peer.ID) (*peer.Peer, error
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if nxtPeer.ID.Equal(id) {
|
if nxtPeer.ID().Equal(id) {
|
||||||
return nxtPeer, nil
|
return nxtPeer, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -214,7 +214,7 @@ func (dht *IpfsDHT) FindPeer(ctx context.Context, id peer.ID) (*peer.Peer, error
|
|||||||
return nil, u.ErrNotFound
|
return nil, u.ErrNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dht *IpfsDHT) findPeerMultiple(ctx context.Context, id peer.ID) (*peer.Peer, error) {
|
func (dht *IpfsDHT) findPeerMultiple(ctx context.Context, id peer.ID) (peer.Peer, error) {
|
||||||
|
|
||||||
// Check if were already connected to them
|
// Check if were already connected to them
|
||||||
p, _ := dht.FindLocal(id)
|
p, _ := dht.FindLocal(id)
|
||||||
@ -230,7 +230,7 @@ func (dht *IpfsDHT) findPeerMultiple(ctx context.Context, id peer.ID) (*peer.Pee
|
|||||||
}
|
}
|
||||||
|
|
||||||
// setup query function
|
// setup query function
|
||||||
query := newQuery(u.Key(id), func(ctx context.Context, p *peer.Peer) (*dhtQueryResult, error) {
|
query := newQuery(u.Key(id), func(ctx context.Context, p peer.Peer) (*dhtQueryResult, error) {
|
||||||
pmes, err := dht.findPeerSingle(ctx, p, id, routeLevel)
|
pmes, err := dht.findPeerSingle(ctx, p, id, routeLevel)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("%s getPeer error: %v", dht.self, err)
|
log.Error("%s getPeer error: %v", dht.self, err)
|
||||||
@ -242,7 +242,7 @@ func (dht *IpfsDHT) findPeerMultiple(ctx context.Context, id peer.ID) (*peer.Pee
|
|||||||
routeLevel++
|
routeLevel++
|
||||||
}
|
}
|
||||||
|
|
||||||
nxtprs := make([]*peer.Peer, len(plist))
|
nxtprs := make([]peer.Peer, len(plist))
|
||||||
for i, fp := range plist {
|
for i, fp := range plist {
|
||||||
nxtp, err := dht.peerFromInfo(fp)
|
nxtp, err := dht.peerFromInfo(fp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -250,7 +250,7 @@ func (dht *IpfsDHT) findPeerMultiple(ctx context.Context, id peer.ID) (*peer.Pee
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if nxtp.ID.Equal(id) {
|
if nxtp.ID().Equal(id) {
|
||||||
return &dhtQueryResult{peer: nxtp, success: true}, nil
|
return &dhtQueryResult{peer: nxtp, success: true}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -272,7 +272,7 @@ func (dht *IpfsDHT) findPeerMultiple(ctx context.Context, id peer.ID) (*peer.Pee
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Ping a peer, log the time it took
|
// Ping a peer, log the time it took
|
||||||
func (dht *IpfsDHT) Ping(ctx context.Context, p *peer.Peer) error {
|
func (dht *IpfsDHT) Ping(ctx context.Context, p peer.Peer) error {
|
||||||
// Thoughts: maybe this should accept an ID and do a peer lookup?
|
// Thoughts: maybe this should accept an ID and do a peer lookup?
|
||||||
log.Info("ping %s start", p)
|
log.Info("ping %s start", p)
|
||||||
|
|
||||||
|
@ -52,15 +52,15 @@ func newPeerSet() *peerSet {
|
|||||||
return ps
|
return ps
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ps *peerSet) Add(p *peer.Peer) {
|
func (ps *peerSet) Add(p peer.Peer) {
|
||||||
ps.lk.Lock()
|
ps.lk.Lock()
|
||||||
ps.ps[string(p.ID)] = true
|
ps.ps[string(p.ID())] = true
|
||||||
ps.lk.Unlock()
|
ps.lk.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ps *peerSet) Contains(p *peer.Peer) bool {
|
func (ps *peerSet) Contains(p peer.Peer) bool {
|
||||||
ps.lk.RLock()
|
ps.lk.RLock()
|
||||||
_, ok := ps.ps[string(p.ID)]
|
_, ok := ps.ps[string(p.ID())]
|
||||||
ps.lk.RUnlock()
|
ps.lk.RUnlock()
|
||||||
return ok
|
return ok
|
||||||
}
|
}
|
||||||
@ -71,12 +71,12 @@ func (ps *peerSet) Size() int {
|
|||||||
return len(ps.ps)
|
return len(ps.ps)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ps *peerSet) AddIfSmallerThan(p *peer.Peer, maxsize int) bool {
|
func (ps *peerSet) AddIfSmallerThan(p peer.Peer, maxsize int) bool {
|
||||||
var success bool
|
var success bool
|
||||||
ps.lk.Lock()
|
ps.lk.Lock()
|
||||||
if _, ok := ps.ps[string(p.ID)]; !ok && len(ps.ps) < maxsize {
|
if _, ok := ps.ps[string(p.ID())]; !ok && len(ps.ps) < maxsize {
|
||||||
success = true
|
success = true
|
||||||
ps.ps[string(p.ID)] = true
|
ps.ps[string(p.ID())] = true
|
||||||
}
|
}
|
||||||
ps.lk.Unlock()
|
ps.lk.Unlock()
|
||||||
return success
|
return success
|
||||||
|
@ -23,7 +23,7 @@ func (b *Bucket) find(id peer.ID) *list.Element {
|
|||||||
b.lk.RLock()
|
b.lk.RLock()
|
||||||
defer b.lk.RUnlock()
|
defer b.lk.RUnlock()
|
||||||
for e := b.list.Front(); e != nil; e = e.Next() {
|
for e := b.list.Front(); e != nil; e = e.Next() {
|
||||||
if e.Value.(*peer.Peer).ID.Equal(id) {
|
if e.Value.(peer.Peer).ID().Equal(id) {
|
||||||
return e
|
return e
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -36,18 +36,18 @@ func (b *Bucket) moveToFront(e *list.Element) {
|
|||||||
b.lk.Unlock()
|
b.lk.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *Bucket) pushFront(p *peer.Peer) {
|
func (b *Bucket) pushFront(p peer.Peer) {
|
||||||
b.lk.Lock()
|
b.lk.Lock()
|
||||||
b.list.PushFront(p)
|
b.list.PushFront(p)
|
||||||
b.lk.Unlock()
|
b.lk.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *Bucket) popBack() *peer.Peer {
|
func (b *Bucket) popBack() peer.Peer {
|
||||||
b.lk.Lock()
|
b.lk.Lock()
|
||||||
defer b.lk.Unlock()
|
defer b.lk.Unlock()
|
||||||
last := b.list.Back()
|
last := b.list.Back()
|
||||||
b.list.Remove(last)
|
b.list.Remove(last)
|
||||||
return last.Value.(*peer.Peer)
|
return last.Value.(peer.Peer)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *Bucket) len() int {
|
func (b *Bucket) len() int {
|
||||||
@ -68,7 +68,7 @@ func (b *Bucket) Split(cpl int, target ID) *Bucket {
|
|||||||
newbuck.list = out
|
newbuck.list = out
|
||||||
e := b.list.Front()
|
e := b.list.Front()
|
||||||
for e != nil {
|
for e != nil {
|
||||||
peerID := ConvertPeerID(e.Value.(*peer.Peer).ID)
|
peerID := ConvertPeerID(e.Value.(peer.Peer).ID())
|
||||||
peerCPL := commonPrefixLen(peerID, target)
|
peerCPL := commonPrefixLen(peerID, target)
|
||||||
if peerCPL > cpl {
|
if peerCPL > cpl {
|
||||||
cur := e
|
cur := e
|
||||||
|
@ -42,10 +42,10 @@ func NewRoutingTable(bucketsize int, localID ID, latency time.Duration) *Routing
|
|||||||
|
|
||||||
// Update adds or moves the given peer to the front of its respective bucket
|
// Update adds or moves the given peer to the front of its respective bucket
|
||||||
// If a peer gets removed from a bucket, it is returned
|
// If a peer gets removed from a bucket, it is returned
|
||||||
func (rt *RoutingTable) Update(p *peer.Peer) *peer.Peer {
|
func (rt *RoutingTable) Update(p peer.Peer) peer.Peer {
|
||||||
rt.tabLock.Lock()
|
rt.tabLock.Lock()
|
||||||
defer rt.tabLock.Unlock()
|
defer rt.tabLock.Unlock()
|
||||||
peerID := ConvertPeerID(p.ID)
|
peerID := ConvertPeerID(p.ID())
|
||||||
cpl := commonPrefixLen(peerID, rt.local)
|
cpl := commonPrefixLen(peerID, rt.local)
|
||||||
|
|
||||||
bucketID := cpl
|
bucketID := cpl
|
||||||
@ -54,7 +54,7 @@ func (rt *RoutingTable) Update(p *peer.Peer) *peer.Peer {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bucket := rt.Buckets[bucketID]
|
bucket := rt.Buckets[bucketID]
|
||||||
e := bucket.find(p.ID)
|
e := bucket.find(p.ID())
|
||||||
if e == nil {
|
if e == nil {
|
||||||
// New peer, add to bucket
|
// New peer, add to bucket
|
||||||
if p.GetLatency() > rt.maxLatency {
|
if p.GetLatency() > rt.maxLatency {
|
||||||
@ -93,7 +93,7 @@ func (rt *RoutingTable) Update(p *peer.Peer) *peer.Peer {
|
|||||||
|
|
||||||
// A helper struct to sort peers by their distance to the local node
|
// A helper struct to sort peers by their distance to the local node
|
||||||
type peerDistance struct {
|
type peerDistance struct {
|
||||||
p *peer.Peer
|
p peer.Peer
|
||||||
distance ID
|
distance ID
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -110,8 +110,8 @@ func (p peerSorterArr) Less(a, b int) bool {
|
|||||||
|
|
||||||
func copyPeersFromList(target ID, peerArr peerSorterArr, peerList *list.List) peerSorterArr {
|
func copyPeersFromList(target ID, peerArr peerSorterArr, peerList *list.List) peerSorterArr {
|
||||||
for e := peerList.Front(); e != nil; e = e.Next() {
|
for e := peerList.Front(); e != nil; e = e.Next() {
|
||||||
p := e.Value.(*peer.Peer)
|
p := e.Value.(peer.Peer)
|
||||||
pID := ConvertPeerID(p.ID)
|
pID := ConvertPeerID(p.ID())
|
||||||
pd := peerDistance{
|
pd := peerDistance{
|
||||||
p: p,
|
p: p,
|
||||||
distance: xor(target, pID),
|
distance: xor(target, pID),
|
||||||
@ -126,16 +126,16 @@ func copyPeersFromList(target ID, peerArr peerSorterArr, peerList *list.List) pe
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Find a specific peer by ID or return nil
|
// Find a specific peer by ID or return nil
|
||||||
func (rt *RoutingTable) Find(id peer.ID) *peer.Peer {
|
func (rt *RoutingTable) Find(id peer.ID) peer.Peer {
|
||||||
srch := rt.NearestPeers(ConvertPeerID(id), 1)
|
srch := rt.NearestPeers(ConvertPeerID(id), 1)
|
||||||
if len(srch) == 0 || !srch[0].ID.Equal(id) {
|
if len(srch) == 0 || !srch[0].ID().Equal(id) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return srch[0]
|
return srch[0]
|
||||||
}
|
}
|
||||||
|
|
||||||
// NearestPeer returns a single peer that is nearest to the given ID
|
// NearestPeer returns a single peer that is nearest to the given ID
|
||||||
func (rt *RoutingTable) NearestPeer(id ID) *peer.Peer {
|
func (rt *RoutingTable) NearestPeer(id ID) peer.Peer {
|
||||||
peers := rt.NearestPeers(id, 1)
|
peers := rt.NearestPeers(id, 1)
|
||||||
if len(peers) > 0 {
|
if len(peers) > 0 {
|
||||||
return peers[0]
|
return peers[0]
|
||||||
@ -146,7 +146,7 @@ func (rt *RoutingTable) NearestPeer(id ID) *peer.Peer {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NearestPeers returns a list of the 'count' closest peers to the given ID
|
// NearestPeers returns a list of the 'count' closest peers to the given ID
|
||||||
func (rt *RoutingTable) NearestPeers(id ID, count int) []*peer.Peer {
|
func (rt *RoutingTable) NearestPeers(id ID, count int) []peer.Peer {
|
||||||
rt.tabLock.RLock()
|
rt.tabLock.RLock()
|
||||||
defer rt.tabLock.RUnlock()
|
defer rt.tabLock.RUnlock()
|
||||||
cpl := commonPrefixLen(id, rt.local)
|
cpl := commonPrefixLen(id, rt.local)
|
||||||
@ -178,7 +178,7 @@ func (rt *RoutingTable) NearestPeers(id ID, count int) []*peer.Peer {
|
|||||||
// Sort by distance to local peer
|
// Sort by distance to local peer
|
||||||
sort.Sort(peerArr)
|
sort.Sort(peerArr)
|
||||||
|
|
||||||
var out []*peer.Peer
|
var out []peer.Peer
|
||||||
for i := 0; i < count && i < peerArr.Len(); i++ {
|
for i := 0; i < count && i < peerArr.Len(); i++ {
|
||||||
out = append(out, peerArr[i].p)
|
out = append(out, peerArr[i].p)
|
||||||
}
|
}
|
||||||
@ -197,11 +197,11 @@ func (rt *RoutingTable) Size() int {
|
|||||||
|
|
||||||
// ListPeers takes a RoutingTable and returns a list of all peers from all buckets in the table.
|
// ListPeers takes a RoutingTable and returns a list of all peers from all buckets in the table.
|
||||||
// NOTE: This is potentially unsafe... use at your own risk
|
// NOTE: This is potentially unsafe... use at your own risk
|
||||||
func (rt *RoutingTable) ListPeers() []*peer.Peer {
|
func (rt *RoutingTable) ListPeers() []peer.Peer {
|
||||||
var peers []*peer.Peer
|
var peers []peer.Peer
|
||||||
for _, buck := range rt.Buckets {
|
for _, buck := range rt.Buckets {
|
||||||
for e := buck.getIter(); e != nil; e = e.Next() {
|
for e := buck.getIter(); e != nil; e = e.Next() {
|
||||||
peers = append(peers, e.Value.(*peer.Peer))
|
peers = append(peers, e.Value.(peer.Peer))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return peers
|
return peers
|
||||||
@ -213,6 +213,6 @@ func (rt *RoutingTable) Print() {
|
|||||||
rt.tabLock.RLock()
|
rt.tabLock.RLock()
|
||||||
peers := rt.ListPeers()
|
peers := rt.ListPeers()
|
||||||
for i, p := range peers {
|
for i, p := range peers {
|
||||||
fmt.Printf("%d) %s %s\n", i, p.ID.Pretty(), p.GetLatency().String())
|
fmt.Printf("%d) %s %s\n", i, p.ID().Pretty(), p.GetLatency().String())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -10,11 +10,10 @@ import (
|
|||||||
peer "github.com/jbenet/go-ipfs/peer"
|
peer "github.com/jbenet/go-ipfs/peer"
|
||||||
)
|
)
|
||||||
|
|
||||||
func _randPeer() *peer.Peer {
|
func _randPeer() peer.Peer {
|
||||||
p := new(peer.Peer)
|
id := make(peer.ID, 16)
|
||||||
p.ID = make(peer.ID, 16)
|
crand.Read(id)
|
||||||
crand.Read(p.ID)
|
return peer.WithID(id)
|
||||||
return p
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func _randID() ID {
|
func _randID() ID {
|
||||||
@ -29,25 +28,25 @@ func _randID() ID {
|
|||||||
func TestBucket(t *testing.T) {
|
func TestBucket(t *testing.T) {
|
||||||
b := newBucket()
|
b := newBucket()
|
||||||
|
|
||||||
peers := make([]*peer.Peer, 100)
|
peers := make([]peer.Peer, 100)
|
||||||
for i := 0; i < 100; i++ {
|
for i := 0; i < 100; i++ {
|
||||||
peers[i] = _randPeer()
|
peers[i] = _randPeer()
|
||||||
b.pushFront(peers[i])
|
b.pushFront(peers[i])
|
||||||
}
|
}
|
||||||
|
|
||||||
local := _randPeer()
|
local := _randPeer()
|
||||||
localID := ConvertPeerID(local.ID)
|
localID := ConvertPeerID(local.ID())
|
||||||
|
|
||||||
i := rand.Intn(len(peers))
|
i := rand.Intn(len(peers))
|
||||||
e := b.find(peers[i].ID)
|
e := b.find(peers[i].ID())
|
||||||
if e == nil {
|
if e == nil {
|
||||||
t.Errorf("Failed to find peer: %v", peers[i])
|
t.Errorf("Failed to find peer: %v", peers[i])
|
||||||
}
|
}
|
||||||
|
|
||||||
spl := b.Split(0, ConvertPeerID(local.ID))
|
spl := b.Split(0, ConvertPeerID(local.ID()))
|
||||||
llist := b.list
|
llist := b.list
|
||||||
for e := llist.Front(); e != nil; e = e.Next() {
|
for e := llist.Front(); e != nil; e = e.Next() {
|
||||||
p := ConvertPeerID(e.Value.(*peer.Peer).ID)
|
p := ConvertPeerID(e.Value.(peer.Peer).ID())
|
||||||
cpl := commonPrefixLen(p, localID)
|
cpl := commonPrefixLen(p, localID)
|
||||||
if cpl > 0 {
|
if cpl > 0 {
|
||||||
t.Fatalf("Split failed. found id with cpl > 0 in 0 bucket")
|
t.Fatalf("Split failed. found id with cpl > 0 in 0 bucket")
|
||||||
@ -56,7 +55,7 @@ func TestBucket(t *testing.T) {
|
|||||||
|
|
||||||
rlist := spl.list
|
rlist := spl.list
|
||||||
for e := rlist.Front(); e != nil; e = e.Next() {
|
for e := rlist.Front(); e != nil; e = e.Next() {
|
||||||
p := ConvertPeerID(e.Value.(*peer.Peer).ID)
|
p := ConvertPeerID(e.Value.(peer.Peer).ID())
|
||||||
cpl := commonPrefixLen(p, localID)
|
cpl := commonPrefixLen(p, localID)
|
||||||
if cpl == 0 {
|
if cpl == 0 {
|
||||||
t.Fatalf("Split failed. found id with cpl == 0 in non 0 bucket")
|
t.Fatalf("Split failed. found id with cpl == 0 in non 0 bucket")
|
||||||
@ -67,9 +66,9 @@ func TestBucket(t *testing.T) {
|
|||||||
// Right now, this just makes sure that it doesnt hang or crash
|
// Right now, this just makes sure that it doesnt hang or crash
|
||||||
func TestTableUpdate(t *testing.T) {
|
func TestTableUpdate(t *testing.T) {
|
||||||
local := _randPeer()
|
local := _randPeer()
|
||||||
rt := NewRoutingTable(10, ConvertPeerID(local.ID), time.Hour)
|
rt := NewRoutingTable(10, ConvertPeerID(local.ID()), time.Hour)
|
||||||
|
|
||||||
peers := make([]*peer.Peer, 100)
|
peers := make([]peer.Peer, 100)
|
||||||
for i := 0; i < 100; i++ {
|
for i := 0; i < 100; i++ {
|
||||||
peers[i] = _randPeer()
|
peers[i] = _randPeer()
|
||||||
}
|
}
|
||||||
@ -93,33 +92,33 @@ func TestTableUpdate(t *testing.T) {
|
|||||||
|
|
||||||
func TestTableFind(t *testing.T) {
|
func TestTableFind(t *testing.T) {
|
||||||
local := _randPeer()
|
local := _randPeer()
|
||||||
rt := NewRoutingTable(10, ConvertPeerID(local.ID), time.Hour)
|
rt := NewRoutingTable(10, ConvertPeerID(local.ID()), time.Hour)
|
||||||
|
|
||||||
peers := make([]*peer.Peer, 100)
|
peers := make([]peer.Peer, 100)
|
||||||
for i := 0; i < 5; i++ {
|
for i := 0; i < 5; i++ {
|
||||||
peers[i] = _randPeer()
|
peers[i] = _randPeer()
|
||||||
rt.Update(peers[i])
|
rt.Update(peers[i])
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Logf("Searching for peer: '%s'", peers[2])
|
t.Logf("Searching for peer: '%s'", peers[2])
|
||||||
found := rt.NearestPeer(ConvertPeerID(peers[2].ID))
|
found := rt.NearestPeer(ConvertPeerID(peers[2].ID()))
|
||||||
if !found.ID.Equal(peers[2].ID) {
|
if !found.ID().Equal(peers[2].ID()) {
|
||||||
t.Fatalf("Failed to lookup known node...")
|
t.Fatalf("Failed to lookup known node...")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTableFindMultiple(t *testing.T) {
|
func TestTableFindMultiple(t *testing.T) {
|
||||||
local := _randPeer()
|
local := _randPeer()
|
||||||
rt := NewRoutingTable(20, ConvertPeerID(local.ID), time.Hour)
|
rt := NewRoutingTable(20, ConvertPeerID(local.ID()), time.Hour)
|
||||||
|
|
||||||
peers := make([]*peer.Peer, 100)
|
peers := make([]peer.Peer, 100)
|
||||||
for i := 0; i < 18; i++ {
|
for i := 0; i < 18; i++ {
|
||||||
peers[i] = _randPeer()
|
peers[i] = _randPeer()
|
||||||
rt.Update(peers[i])
|
rt.Update(peers[i])
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Logf("Searching for peer: '%s'", peers[2])
|
t.Logf("Searching for peer: '%s'", peers[2])
|
||||||
found := rt.NearestPeers(ConvertPeerID(peers[2].ID), 15)
|
found := rt.NearestPeers(ConvertPeerID(peers[2].ID()), 15)
|
||||||
if len(found) != 15 {
|
if len(found) != 15 {
|
||||||
t.Fatalf("Got back different number of peers than we expected.")
|
t.Fatalf("Got back different number of peers than we expected.")
|
||||||
}
|
}
|
||||||
@ -131,7 +130,7 @@ func TestTableFindMultiple(t *testing.T) {
|
|||||||
func TestTableMultithreaded(t *testing.T) {
|
func TestTableMultithreaded(t *testing.T) {
|
||||||
local := peer.ID("localPeer")
|
local := peer.ID("localPeer")
|
||||||
tab := NewRoutingTable(20, ConvertPeerID(local), time.Hour)
|
tab := NewRoutingTable(20, ConvertPeerID(local), time.Hour)
|
||||||
var peers []*peer.Peer
|
var peers []peer.Peer
|
||||||
for i := 0; i < 500; i++ {
|
for i := 0; i < 500; i++ {
|
||||||
peers = append(peers, _randPeer())
|
peers = append(peers, _randPeer())
|
||||||
}
|
}
|
||||||
@ -156,7 +155,7 @@ func TestTableMultithreaded(t *testing.T) {
|
|||||||
go func() {
|
go func() {
|
||||||
for i := 0; i < 1000; i++ {
|
for i := 0; i < 1000; i++ {
|
||||||
n := rand.Intn(len(peers))
|
n := rand.Intn(len(peers))
|
||||||
tab.Find(peers[n].ID)
|
tab.Find(peers[n].ID())
|
||||||
}
|
}
|
||||||
done <- struct{}{}
|
done <- struct{}{}
|
||||||
}()
|
}()
|
||||||
@ -170,7 +169,7 @@ func BenchmarkUpdates(b *testing.B) {
|
|||||||
local := ConvertKey("localKey")
|
local := ConvertKey("localKey")
|
||||||
tab := NewRoutingTable(20, local, time.Hour)
|
tab := NewRoutingTable(20, local, time.Hour)
|
||||||
|
|
||||||
var peers []*peer.Peer
|
var peers []peer.Peer
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
peers = append(peers, _randPeer())
|
peers = append(peers, _randPeer())
|
||||||
}
|
}
|
||||||
@ -186,7 +185,7 @@ func BenchmarkFinds(b *testing.B) {
|
|||||||
local := ConvertKey("localKey")
|
local := ConvertKey("localKey")
|
||||||
tab := NewRoutingTable(20, local, time.Hour)
|
tab := NewRoutingTable(20, local, time.Hour)
|
||||||
|
|
||||||
var peers []*peer.Peer
|
var peers []peer.Peer
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
peers = append(peers, _randPeer())
|
peers = append(peers, _randPeer())
|
||||||
tab.Update(peers[i])
|
tab.Update(peers[i])
|
||||||
@ -194,6 +193,6 @@ func BenchmarkFinds(b *testing.B) {
|
|||||||
|
|
||||||
b.StartTimer()
|
b.StartTimer()
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
tab.Find(peers[i].ID)
|
tab.Find(peers[i].ID())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -17,10 +17,10 @@ var _ routing.IpfsRouting = &MockRouter{}
|
|||||||
type MockRouter struct {
|
type MockRouter struct {
|
||||||
datastore ds.Datastore
|
datastore ds.Datastore
|
||||||
hashTable RoutingServer
|
hashTable RoutingServer
|
||||||
peer *peer.Peer
|
peer peer.Peer
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewMockRouter(local *peer.Peer, dstore ds.Datastore) routing.IpfsRouting {
|
func NewMockRouter(local peer.Peer, dstore ds.Datastore) routing.IpfsRouting {
|
||||||
return &MockRouter{
|
return &MockRouter{
|
||||||
datastore: dstore,
|
datastore: dstore,
|
||||||
peer: local,
|
peer: local,
|
||||||
@ -50,16 +50,16 @@ func (mr *MockRouter) GetValue(ctx context.Context, key u.Key) ([]byte, error) {
|
|||||||
return data, nil
|
return data, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mr *MockRouter) FindProviders(ctx context.Context, key u.Key) ([]*peer.Peer, error) {
|
func (mr *MockRouter) FindProviders(ctx context.Context, key u.Key) ([]peer.Peer, error) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mr *MockRouter) FindPeer(ctx context.Context, pid peer.ID) (*peer.Peer, error) {
|
func (mr *MockRouter) FindPeer(ctx context.Context, pid peer.ID) (peer.Peer, error) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mr *MockRouter) FindProvidersAsync(ctx context.Context, k u.Key, max int) <-chan *peer.Peer {
|
func (mr *MockRouter) FindProvidersAsync(ctx context.Context, k u.Key, max int) <-chan peer.Peer {
|
||||||
out := make(chan *peer.Peer)
|
out := make(chan peer.Peer)
|
||||||
go func() {
|
go func() {
|
||||||
defer close(out)
|
defer close(out)
|
||||||
for i, p := range mr.hashTable.Providers(k) {
|
for i, p := range mr.hashTable.Providers(k) {
|
||||||
@ -81,11 +81,11 @@ func (mr *MockRouter) Provide(_ context.Context, key u.Key) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type RoutingServer interface {
|
type RoutingServer interface {
|
||||||
Announce(*peer.Peer, u.Key) error
|
Announce(peer.Peer, u.Key) error
|
||||||
|
|
||||||
Providers(u.Key) []*peer.Peer
|
Providers(u.Key) []peer.Peer
|
||||||
|
|
||||||
Client(p *peer.Peer) routing.IpfsRouting
|
Client(p peer.Peer) routing.IpfsRouting
|
||||||
}
|
}
|
||||||
|
|
||||||
func VirtualRoutingServer() RoutingServer {
|
func VirtualRoutingServer() RoutingServer {
|
||||||
@ -99,7 +99,7 @@ type hashTable struct {
|
|||||||
providers map[u.Key]peer.Map
|
providers map[u.Key]peer.Map
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rs *hashTable) Announce(p *peer.Peer, k u.Key) error {
|
func (rs *hashTable) Announce(p peer.Peer, k u.Key) error {
|
||||||
rs.lock.Lock()
|
rs.lock.Lock()
|
||||||
defer rs.lock.Unlock()
|
defer rs.lock.Unlock()
|
||||||
|
|
||||||
@ -111,10 +111,10 @@ func (rs *hashTable) Announce(p *peer.Peer, k u.Key) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rs *hashTable) Providers(k u.Key) []*peer.Peer {
|
func (rs *hashTable) Providers(k u.Key) []peer.Peer {
|
||||||
rs.lock.RLock()
|
rs.lock.RLock()
|
||||||
defer rs.lock.RUnlock()
|
defer rs.lock.RUnlock()
|
||||||
ret := make([]*peer.Peer, 0)
|
ret := make([]peer.Peer, 0)
|
||||||
peerset, ok := rs.providers[k]
|
peerset, ok := rs.providers[k]
|
||||||
if !ok {
|
if !ok {
|
||||||
return ret
|
return ret
|
||||||
@ -131,7 +131,7 @@ func (rs *hashTable) Providers(k u.Key) []*peer.Peer {
|
|||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rs *hashTable) Client(p *peer.Peer) routing.IpfsRouting {
|
func (rs *hashTable) Client(p peer.Peer) routing.IpfsRouting {
|
||||||
return &MockRouter{
|
return &MockRouter{
|
||||||
peer: p,
|
peer: p,
|
||||||
hashTable: rs,
|
hashTable: rs,
|
||||||
|
@ -20,9 +20,7 @@ func TestKeyNotFound(t *testing.T) {
|
|||||||
|
|
||||||
func TestSetAndGet(t *testing.T) {
|
func TestSetAndGet(t *testing.T) {
|
||||||
pid := peer.ID([]byte("the peer id"))
|
pid := peer.ID([]byte("the peer id"))
|
||||||
p := &peer.Peer{
|
p := peer.WithID(pid)
|
||||||
ID: pid,
|
|
||||||
}
|
|
||||||
k := u.Key("42")
|
k := u.Key("42")
|
||||||
rs := VirtualRoutingServer()
|
rs := VirtualRoutingServer()
|
||||||
err := rs.Announce(p, k)
|
err := rs.Announce(p, k)
|
||||||
@ -34,7 +32,7 @@ func TestSetAndGet(t *testing.T) {
|
|||||||
t.Fatal("should be one")
|
t.Fatal("should be one")
|
||||||
}
|
}
|
||||||
for _, elem := range providers {
|
for _, elem := range providers {
|
||||||
if bytes.Equal(elem.ID, pid) {
|
if bytes.Equal(elem.ID(), pid) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -42,7 +40,7 @@ func TestSetAndGet(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestClientFindProviders(t *testing.T) {
|
func TestClientFindProviders(t *testing.T) {
|
||||||
peer := &peer.Peer{ID: []byte("42")}
|
peer := peer.WithIDString("42")
|
||||||
rs := VirtualRoutingServer()
|
rs := VirtualRoutingServer()
|
||||||
client := rs.Client(peer)
|
client := rs.Client(peer)
|
||||||
|
|
||||||
@ -57,7 +55,7 @@ func TestClientFindProviders(t *testing.T) {
|
|||||||
|
|
||||||
isInHT := false
|
isInHT := false
|
||||||
for _, p := range providersFromHashTable {
|
for _, p := range providersFromHashTable {
|
||||||
if bytes.Equal(p.ID, peer.ID) {
|
if bytes.Equal(p.ID(), peer.ID()) {
|
||||||
isInHT = true
|
isInHT = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -67,7 +65,7 @@ func TestClientFindProviders(t *testing.T) {
|
|||||||
providersFromClient := client.FindProvidersAsync(context.Background(), u.Key("hello"), max)
|
providersFromClient := client.FindProvidersAsync(context.Background(), u.Key("hello"), max)
|
||||||
isInClient := false
|
isInClient := false
|
||||||
for p := range providersFromClient {
|
for p := range providersFromClient {
|
||||||
if bytes.Equal(p.ID, peer.ID) {
|
if bytes.Equal(p.ID(), peer.ID()) {
|
||||||
isInClient = true
|
isInClient = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -81,9 +79,7 @@ func TestClientOverMax(t *testing.T) {
|
|||||||
k := u.Key("hello")
|
k := u.Key("hello")
|
||||||
numProvidersForHelloKey := 100
|
numProvidersForHelloKey := 100
|
||||||
for i := 0; i < numProvidersForHelloKey; i++ {
|
for i := 0; i < numProvidersForHelloKey; i++ {
|
||||||
peer := &peer.Peer{
|
peer := peer.WithIDString(string(i))
|
||||||
ID: []byte(string(i)),
|
|
||||||
}
|
|
||||||
err := rs.Announce(peer, k)
|
err := rs.Announce(peer, k)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@ -96,7 +92,7 @@ func TestClientOverMax(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
max := 10
|
max := 10
|
||||||
peer := &peer.Peer{ID: []byte("TODO")}
|
peer := peer.WithIDString("TODO")
|
||||||
client := rs.Client(peer)
|
client := rs.Client(peer)
|
||||||
|
|
||||||
providersFromClient := client.FindProvidersAsync(context.Background(), k, max)
|
providersFromClient := client.FindProvidersAsync(context.Background(), k, max)
|
||||||
@ -118,9 +114,7 @@ func TestCanceledContext(t *testing.T) {
|
|||||||
i := 0
|
i := 0
|
||||||
go func() { // infinite stream
|
go func() { // infinite stream
|
||||||
for {
|
for {
|
||||||
peer := &peer.Peer{
|
peer := peer.WithIDString(string(i))
|
||||||
ID: []byte(string(i)),
|
|
||||||
}
|
|
||||||
err := rs.Announce(peer, k)
|
err := rs.Announce(peer, k)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@ -129,7 +123,7 @@ func TestCanceledContext(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
local := &peer.Peer{ID: []byte("peer id doesn't matter")}
|
local := peer.WithIDString("peer id doesn't matter")
|
||||||
client := rs.Client(local)
|
client := rs.Client(local)
|
||||||
|
|
||||||
t.Log("warning: max is finite so this test is non-deterministic")
|
t.Log("warning: max is finite so this test is non-deterministic")
|
||||||
|
@ -10,7 +10,7 @@ import (
|
|||||||
// IpfsRouting is the routing module interface
|
// IpfsRouting is the routing module interface
|
||||||
// It is implemented by things like DHTs, etc.
|
// It is implemented by things like DHTs, etc.
|
||||||
type IpfsRouting interface {
|
type IpfsRouting interface {
|
||||||
FindProvidersAsync(context.Context, u.Key, int) <-chan *peer.Peer
|
FindProvidersAsync(context.Context, u.Key, int) <-chan peer.Peer
|
||||||
|
|
||||||
// Basic Put/Get
|
// Basic Put/Get
|
||||||
|
|
||||||
@ -28,5 +28,5 @@ type IpfsRouting interface {
|
|||||||
|
|
||||||
// Find specific Peer
|
// Find specific Peer
|
||||||
// FindPeer searches for a peer with given ID.
|
// FindPeer searches for a peer with given ID.
|
||||||
FindPeer(context.Context, peer.ID) (*peer.Peer, error)
|
FindPeer(context.Context, peer.ID) (peer.Peer, error)
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user