mirror of
https://github.com/ipfs/kubo.git
synced 2025-09-10 14:34:24 +08:00
Merge pull request #728 from jbenet/addrsplosion
AddrSplosion III - Revenge of the granular TTL
This commit is contained in:
@ -190,7 +190,7 @@ func bootstrapConnect(ctx context.Context,
|
|||||||
defer log.EventBegin(ctx, "bootstrapDial", route.LocalPeer(), p.ID).Done()
|
defer log.EventBegin(ctx, "bootstrapDial", route.LocalPeer(), p.ID).Done()
|
||||||
log.Debugf("%s bootstrapping to %s", route.LocalPeer(), p.ID)
|
log.Debugf("%s bootstrapping to %s", route.LocalPeer(), p.ID)
|
||||||
|
|
||||||
ps.AddAddresses(p.ID, p.Addrs)
|
ps.AddAddrs(p.ID, p.Addrs, peer.PermanentAddrTTL)
|
||||||
err := route.Connect(ctx, p.ID)
|
err := route.Connect(ctx, p.ID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Event(ctx, "bootstrapDialFailed", p.ID)
|
log.Event(ctx, "bootstrapDialFailed", p.ID)
|
||||||
|
@ -151,18 +151,18 @@ func printPeer(ps peer.Peerstore, p peer.ID) (interface{}, error) {
|
|||||||
info.PublicKey = base64.StdEncoding.EncodeToString(pkb)
|
info.PublicKey = base64.StdEncoding.EncodeToString(pkb)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, a := range ps.Addresses(p) {
|
for _, a := range ps.Addrs(p) {
|
||||||
info.Addresses = append(info.Addresses, a.String())
|
info.Addresses = append(info.Addresses, a.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
if v, err := ps.Get(p, "ProtocolVersion"); err == nil {
|
if v, err := ps.Get(p, "ProtocolVersion"); err == nil {
|
||||||
if vs, ok := v.(string); ok {
|
if vs, ok := v.(string); ok {
|
||||||
info.AgentVersion = vs
|
info.ProtocolVersion = vs
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if v, err := ps.Get(p, "AgentVersion"); err == nil {
|
if v, err := ps.Get(p, "AgentVersion"); err == nil {
|
||||||
if vs, ok := v.(string); ok {
|
if vs, ok := v.(string); ok {
|
||||||
info.ProtocolVersion = vs
|
info.AgentVersion = vs
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -95,7 +95,7 @@ trip latency information.
|
|||||||
}
|
}
|
||||||
|
|
||||||
if addr != nil {
|
if addr != nil {
|
||||||
n.Peerstore.AddAddress(peerID, addr)
|
n.Peerstore.AddAddr(peerID, addr, peer.TempAddrTTL) // temporary
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set up number of pings
|
// Set up number of pings
|
||||||
@ -120,7 +120,7 @@ func pingPeer(ctx context.Context, n *core.IpfsNode, pid peer.ID, numPings int)
|
|||||||
go func() {
|
go func() {
|
||||||
defer close(outChan)
|
defer close(outChan)
|
||||||
|
|
||||||
if len(n.Peerstore.Addresses(pid)) == 0 {
|
if len(n.Peerstore.Addrs(pid)) == 0 {
|
||||||
// Make sure we can find the node in question
|
// Make sure we can find the node in question
|
||||||
outChan <- &PingResult{
|
outChan <- &PingResult{
|
||||||
Text: fmt.Sprintf("Looking up peer %s", pid.Pretty()),
|
Text: fmt.Sprintf("Looking up peer %s", pid.Pretty()),
|
||||||
@ -132,7 +132,7 @@ func pingPeer(ctx context.Context, n *core.IpfsNode, pid peer.ID, numPings int)
|
|||||||
outChan <- &PingResult{Text: fmt.Sprintf("Peer lookup error: %s", err)}
|
outChan <- &PingResult{Text: fmt.Sprintf("Peer lookup error: %s", err)}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
n.Peerstore.AddPeerInfo(p)
|
n.Peerstore.AddAddrs(p.ID, p.Addrs, peer.TempAddrTTL)
|
||||||
}
|
}
|
||||||
|
|
||||||
outChan <- &PingResult{Text: fmt.Sprintf("PING %s.", pid.Pretty())}
|
outChan <- &PingResult{Text: fmt.Sprintf("PING %s.", pid.Pretty())}
|
||||||
|
@ -18,11 +18,16 @@ type stringList struct {
|
|||||||
Strings []string
|
Strings []string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type addrMap struct {
|
||||||
|
Addrs map[string][]string
|
||||||
|
}
|
||||||
|
|
||||||
var SwarmCmd = &cmds.Command{
|
var SwarmCmd = &cmds.Command{
|
||||||
Helptext: cmds.HelpText{
|
Helptext: cmds.HelpText{
|
||||||
Tagline: "swarm inspection tool",
|
Tagline: "swarm inspection tool",
|
||||||
Synopsis: `
|
Synopsis: `
|
||||||
ipfs swarm peers - List peers with open connections
|
ipfs swarm peers - List peers with open connections
|
||||||
|
ipfs swarm addrs - List known addresses. Useful to debug.
|
||||||
ipfs swarm connect <address> - Open connection to a given address
|
ipfs swarm connect <address> - Open connection to a given address
|
||||||
ipfs swarm disconnect <address> - Close connection to a given address
|
ipfs swarm disconnect <address> - Close connection to a given address
|
||||||
`,
|
`,
|
||||||
@ -34,6 +39,7 @@ ipfs peers in the internet.
|
|||||||
},
|
},
|
||||||
Subcommands: map[string]*cmds.Command{
|
Subcommands: map[string]*cmds.Command{
|
||||||
"peers": swarmPeersCmd,
|
"peers": swarmPeersCmd,
|
||||||
|
"addrs": swarmAddrsCmd,
|
||||||
"connect": swarmConnectCmd,
|
"connect": swarmConnectCmd,
|
||||||
"disconnect": swarmDisconnectCmd,
|
"disconnect": swarmDisconnectCmd,
|
||||||
},
|
},
|
||||||
@ -77,6 +83,66 @@ ipfs swarm peers lists the set of peers this node is connected to.
|
|||||||
Type: stringList{},
|
Type: stringList{},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var swarmAddrsCmd = &cmds.Command{
|
||||||
|
Helptext: cmds.HelpText{
|
||||||
|
Tagline: "List known addresses. Useful to debug.",
|
||||||
|
ShortDescription: `
|
||||||
|
ipfs swarm addrs lists all addresses this node is aware of.
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
Run: func(req cmds.Request, res cmds.Response) {
|
||||||
|
|
||||||
|
n, err := req.Context().GetNode()
|
||||||
|
if err != nil {
|
||||||
|
res.SetError(err, cmds.ErrNormal)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if n.PeerHost == nil {
|
||||||
|
res.SetError(errNotOnline, cmds.ErrClient)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
addrs := make(map[string][]string)
|
||||||
|
ps := n.PeerHost.Network().Peerstore()
|
||||||
|
for _, p := range ps.Peers() {
|
||||||
|
s := p.Pretty()
|
||||||
|
for _, a := range ps.Addrs(p) {
|
||||||
|
addrs[s] = append(addrs[s], a.String())
|
||||||
|
}
|
||||||
|
sort.Sort(sort.StringSlice(addrs[s]))
|
||||||
|
}
|
||||||
|
|
||||||
|
res.SetOutput(&addrMap{Addrs: addrs})
|
||||||
|
},
|
||||||
|
Marshalers: cmds.MarshalerMap{
|
||||||
|
cmds.Text: func(res cmds.Response) (io.Reader, error) {
|
||||||
|
m, ok := res.Output().(*addrMap)
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.New("failed to cast map[string]string")
|
||||||
|
}
|
||||||
|
|
||||||
|
// sort the ids first
|
||||||
|
ids := make([]string, 0, len(m.Addrs))
|
||||||
|
for p := range m.Addrs {
|
||||||
|
ids = append(ids, p)
|
||||||
|
}
|
||||||
|
sort.Sort(sort.StringSlice(ids))
|
||||||
|
|
||||||
|
var buf bytes.Buffer
|
||||||
|
for _, p := range ids {
|
||||||
|
paddrs := m.Addrs[p]
|
||||||
|
buf.WriteString(fmt.Sprintf("%s (%d)\n", p, len(paddrs)))
|
||||||
|
for _, addr := range paddrs {
|
||||||
|
buf.WriteString("\t" + addr + "\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &buf, nil
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Type: addrMap{},
|
||||||
|
}
|
||||||
|
|
||||||
var swarmConnectCmd = &cmds.Command{
|
var swarmConnectCmd = &cmds.Command{
|
||||||
Helptext: cmds.HelpText{
|
Helptext: cmds.HelpText{
|
||||||
Tagline: "Open connection to a given address",
|
Tagline: "Open connection to a given address",
|
||||||
@ -236,7 +302,7 @@ func peersWithAddresses(ps peer.Peerstore, addrs []string) (pids []peer.ID, err
|
|||||||
|
|
||||||
for _, iaddr := range iaddrs {
|
for _, iaddr := range iaddrs {
|
||||||
pids = append(pids, iaddr.ID())
|
pids = append(pids, iaddr.ID())
|
||||||
ps.AddAddress(iaddr.ID(), iaddr.Multiaddr())
|
ps.AddAddr(iaddr.ID(), iaddr.Multiaddr(), peer.TempAddrTTL)
|
||||||
}
|
}
|
||||||
return pids, nil
|
return pids, nil
|
||||||
}
|
}
|
||||||
|
@ -476,16 +476,12 @@ func startListening(ctx context.Context, host p2phost.Host, cfg *config.Config)
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// explicitly set these as our listen addrs.
|
// list out our addresses
|
||||||
// (why not do it inside inet.NewNetwork? because this way we can
|
|
||||||
// listen on addresses without necessarily advertising those publicly.)
|
|
||||||
addrs, err := host.Network().InterfaceListenAddresses()
|
addrs, err := host.Network().InterfaceListenAddresses()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return debugerror.Wrap(err)
|
return debugerror.Wrap(err)
|
||||||
}
|
}
|
||||||
log.Infof("Swarm listening at: %s", addrs)
|
log.Infof("Swarm listening at: %s", addrs)
|
||||||
|
|
||||||
host.Peerstore().AddAddresses(host.ID(), addrs)
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -38,18 +38,24 @@ type impl struct {
|
|||||||
receiver Receiver
|
receiver Receiver
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (bsnet *impl) newStreamToPeer(ctx context.Context, p peer.ID) (inet.Stream, error) {
|
||||||
|
|
||||||
|
// first, make sure we're connected.
|
||||||
|
// if this fails, we cannot connect to given peer.
|
||||||
|
//TODO(jbenet) move this into host.NewStream?
|
||||||
|
if err := bsnet.host.Connect(ctx, peer.PeerInfo{ID: p}); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return bsnet.host.NewStream(ProtocolBitswap, p)
|
||||||
|
}
|
||||||
|
|
||||||
func (bsnet *impl) SendMessage(
|
func (bsnet *impl) SendMessage(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
p peer.ID,
|
p peer.ID,
|
||||||
outgoing bsmsg.BitSwapMessage) error {
|
outgoing bsmsg.BitSwapMessage) error {
|
||||||
|
|
||||||
// ensure we're connected
|
s, err := bsnet.newStreamToPeer(ctx, p)
|
||||||
//TODO(jbenet) move this into host.NewStream?
|
|
||||||
if err := bsnet.host.Connect(ctx, peer.PeerInfo{ID: p}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
s, err := bsnet.host.NewStream(ProtocolBitswap, p)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -68,13 +74,7 @@ func (bsnet *impl) SendRequest(
|
|||||||
p peer.ID,
|
p peer.ID,
|
||||||
outgoing bsmsg.BitSwapMessage) (bsmsg.BitSwapMessage, error) {
|
outgoing bsmsg.BitSwapMessage) (bsmsg.BitSwapMessage, error) {
|
||||||
|
|
||||||
// ensure we're connected
|
s, err := bsnet.newStreamToPeer(ctx, p)
|
||||||
//TODO(jbenet) move this into host.NewStream?
|
|
||||||
if err := bsnet.host.Connect(ctx, peer.PeerInfo{ID: p}); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
s, err := bsnet.host.NewStream(ProtocolBitswap, p)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -123,7 +123,7 @@ func (bsnet *impl) FindProvidersAsync(ctx context.Context, k util.Key, max int)
|
|||||||
if info.ID == bsnet.host.ID() {
|
if info.ID == bsnet.host.ID() {
|
||||||
continue // ignore self as provider
|
continue // ignore self as provider
|
||||||
}
|
}
|
||||||
bsnet.host.Peerstore().AddAddresses(info.ID, info.Addrs)
|
bsnet.host.Peerstore().AddAddrs(info.ID, info.Addrs, peer.TempAddrTTL)
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return
|
return
|
||||||
|
@ -192,7 +192,7 @@ func (r *etmReader) macCheckThenDecrypt(m []byte) (int, error) {
|
|||||||
|
|
||||||
// check mac. if failed, return error.
|
// check mac. if failed, return error.
|
||||||
if !hmac.Equal(macd, expected) {
|
if !hmac.Equal(macd, expected) {
|
||||||
log.Error("MAC Invalid:", expected, "!=", macd)
|
log.Debug("MAC Invalid:", expected, "!=", macd)
|
||||||
return 0, ErrMACInvalid
|
return 0, ErrMACInvalid
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -144,7 +144,7 @@ func (h *BasicHost) NewStream(pid protocol.ID, p peer.ID) (inet.Stream, error) {
|
|||||||
func (h *BasicHost) Connect(ctx context.Context, pi peer.PeerInfo) error {
|
func (h *BasicHost) Connect(ctx context.Context, pi peer.PeerInfo) error {
|
||||||
|
|
||||||
// absorb addresses into peerstore
|
// absorb addresses into peerstore
|
||||||
h.Peerstore().AddPeerInfo(pi)
|
h.Peerstore().AddAddrs(pi.ID, pi.Addrs, peer.TempAddrTTL)
|
||||||
|
|
||||||
cs := h.Network().ConnsToPeer(pi.ID)
|
cs := h.Network().ConnsToPeer(pi.ID)
|
||||||
if len(cs) > 0 {
|
if len(cs) > 0 {
|
||||||
@ -189,6 +189,10 @@ func (h *BasicHost) Addrs() []ma.Multiaddr {
|
|||||||
log.Debug("error retrieving network interface addrs")
|
log.Debug("error retrieving network interface addrs")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if h.ids != nil { // add external observed addresses
|
||||||
|
addrs = append(addrs, h.ids.OwnObservedAddrs()...)
|
||||||
|
}
|
||||||
|
|
||||||
if h.natmgr != nil { // natmgr is nil if we do not use nat option.
|
if h.natmgr != nil { // natmgr is nil if we do not use nat option.
|
||||||
nat := h.natmgr.NAT()
|
nat := h.natmgr.NAT()
|
||||||
if nat != nil { // nat is nil if not ready, or no nat is available.
|
if nat != nil { // nat is nil if not ready, or no nat is available.
|
||||||
|
@ -33,8 +33,8 @@ func (l *link) newConnPair(dialer *peernet) (*conn, *conn) {
|
|||||||
c.local = ln.peer
|
c.local = ln.peer
|
||||||
c.remote = rn.peer
|
c.remote = rn.peer
|
||||||
|
|
||||||
c.localAddr = ln.ps.Addresses(ln.peer)[0]
|
c.localAddr = ln.ps.Addrs(ln.peer)[0]
|
||||||
c.remoteAddr = rn.ps.Addresses(rn.peer)[0]
|
c.remoteAddr = rn.ps.Addrs(rn.peer)[0]
|
||||||
|
|
||||||
c.localPrivKey = ln.ps.PrivKey(ln.peer)
|
c.localPrivKey = ln.ps.PrivKey(ln.peer)
|
||||||
c.remotePubKey = rn.ps.PubKey(rn.peer)
|
c.remotePubKey = rn.ps.PubKey(rn.peer)
|
||||||
|
@ -49,7 +49,7 @@ func newPeernet(ctx context.Context, m *mocknet, k ic.PrivKey,
|
|||||||
|
|
||||||
// create our own entirely, so that peers knowledge doesn't get shared
|
// create our own entirely, so that peers knowledge doesn't get shared
|
||||||
ps := peer.NewPeerstore()
|
ps := peer.NewPeerstore()
|
||||||
ps.AddAddress(p, a)
|
ps.AddAddr(p, a, peer.PermanentAddrTTL)
|
||||||
ps.AddPrivKey(p, k)
|
ps.AddPrivKey(p, k)
|
||||||
ps.AddPubKey(p, k.GetPublic())
|
ps.AddPubKey(p, k.GetPublic())
|
||||||
|
|
||||||
@ -307,13 +307,13 @@ func (pn *peernet) BandwidthTotals() (in uint64, out uint64) {
|
|||||||
|
|
||||||
// Listen tells the network to start listening on given multiaddrs.
|
// Listen tells the network to start listening on given multiaddrs.
|
||||||
func (pn *peernet) Listen(addrs ...ma.Multiaddr) error {
|
func (pn *peernet) Listen(addrs ...ma.Multiaddr) error {
|
||||||
pn.Peerstore().AddAddresses(pn.LocalPeer(), addrs)
|
pn.Peerstore().AddAddrs(pn.LocalPeer(), addrs, peer.PermanentAddrTTL)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListenAddresses returns a list of addresses at which this network listens.
|
// ListenAddresses returns a list of addresses at which this network listens.
|
||||||
func (pn *peernet) ListenAddresses() []ma.Multiaddr {
|
func (pn *peernet) ListenAddresses() []ma.Multiaddr {
|
||||||
return pn.Peerstore().Addresses(pn.LocalPeer())
|
return pn.Peerstore().Addrs(pn.LocalPeer())
|
||||||
}
|
}
|
||||||
|
|
||||||
// InterfaceListenAddresses returns a list of addresses at which this network
|
// InterfaceListenAddresses returns a list of addresses at which this network
|
||||||
|
@ -48,7 +48,7 @@ func TestSimultDials(t *testing.T) {
|
|||||||
connect := func(s *Swarm, dst peer.ID, addr ma.Multiaddr) {
|
connect := func(s *Swarm, dst peer.ID, addr ma.Multiaddr) {
|
||||||
// copy for other peer
|
// copy for other peer
|
||||||
log.Debugf("TestSimultOpen: connecting: %s --> %s (%s)", s.local, dst, addr)
|
log.Debugf("TestSimultOpen: connecting: %s --> %s (%s)", s.local, dst, addr)
|
||||||
s.peers.AddAddress(dst, addr)
|
s.peers.AddAddr(dst, addr, peer.TempAddrTTL)
|
||||||
if _, err := s.Dial(ctx, dst); err != nil {
|
if _, err := s.Dial(ctx, dst); err != nil {
|
||||||
t.Fatal("error swarm dialing to peer", err)
|
t.Fatal("error swarm dialing to peer", err)
|
||||||
}
|
}
|
||||||
@ -125,7 +125,7 @@ func TestDialWait(t *testing.T) {
|
|||||||
s2p, s2addr, s2l := newSilentPeer(t)
|
s2p, s2addr, s2l := newSilentPeer(t)
|
||||||
go acceptAndHang(s2l)
|
go acceptAndHang(s2l)
|
||||||
defer s2l.Close()
|
defer s2l.Close()
|
||||||
s1.peers.AddAddress(s2p, s2addr)
|
s1.peers.AddAddr(s2p, s2addr, peer.PermanentAddrTTL)
|
||||||
|
|
||||||
before := time.Now()
|
before := time.Now()
|
||||||
if c, err := s1.Dial(ctx, s2p); err == nil {
|
if c, err := s1.Dial(ctx, s2p); err == nil {
|
||||||
@ -171,13 +171,13 @@ func TestDialBackoff(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
s1.peers.AddAddresses(s2.local, s2addrs)
|
s1.peers.AddAddrs(s2.local, s2addrs, peer.PermanentAddrTTL)
|
||||||
|
|
||||||
// dial to a non-existent peer.
|
// dial to a non-existent peer.
|
||||||
s3p, s3addr, s3l := newSilentPeer(t)
|
s3p, s3addr, s3l := newSilentPeer(t)
|
||||||
go acceptAndHang(s3l)
|
go acceptAndHang(s3l)
|
||||||
defer s3l.Close()
|
defer s3l.Close()
|
||||||
s1.peers.AddAddress(s3p, s3addr)
|
s1.peers.AddAddr(s3p, s3addr, peer.PermanentAddrTTL)
|
||||||
|
|
||||||
// in this test we will:
|
// in this test we will:
|
||||||
// 1) dial 10x to each node.
|
// 1) dial 10x to each node.
|
||||||
@ -389,7 +389,7 @@ func TestDialBackoffClears(t *testing.T) {
|
|||||||
defer s2l.Close()
|
defer s2l.Close()
|
||||||
|
|
||||||
// phase 1 -- dial to non-operational addresses
|
// phase 1 -- dial to non-operational addresses
|
||||||
s1.peers.AddAddress(s2.local, s2bad)
|
s1.peers.AddAddr(s2.local, s2bad, peer.PermanentAddrTTL)
|
||||||
|
|
||||||
before := time.Now()
|
before := time.Now()
|
||||||
if c, err := s1.Dial(ctx, s2.local); err == nil {
|
if c, err := s1.Dial(ctx, s2.local); err == nil {
|
||||||
@ -419,7 +419,7 @@ func TestDialBackoffClears(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
s1.peers.AddAddresses(s2.local, ifaceAddrs1)
|
s1.peers.AddAddrs(s2.local, ifaceAddrs1, peer.PermanentAddrTTL)
|
||||||
|
|
||||||
before = time.Now()
|
before = time.Now()
|
||||||
if c, err := s1.Dial(ctx, s2.local); err != nil {
|
if c, err := s1.Dial(ctx, s2.local); err != nil {
|
||||||
|
@ -19,7 +19,7 @@ func TestPeers(t *testing.T) {
|
|||||||
|
|
||||||
connect := func(s *Swarm, dst peer.ID, addr ma.Multiaddr) {
|
connect := func(s *Swarm, dst peer.ID, addr ma.Multiaddr) {
|
||||||
// TODO: make a DialAddr func.
|
// TODO: make a DialAddr func.
|
||||||
s.peers.AddAddress(dst, addr)
|
s.peers.AddAddr(dst, addr, peer.PermanentAddrTTL)
|
||||||
// t.Logf("connections from %s", s.LocalPeer())
|
// t.Logf("connections from %s", s.LocalPeer())
|
||||||
// for _, c := range s.ConnectionsToPeer(dst) {
|
// for _, c := range s.ConnectionsToPeer(dst) {
|
||||||
// t.Logf("connection from %s to %s: %v", s.LocalPeer(), dst, c)
|
// t.Logf("connection from %s to %s: %v", s.LocalPeer(), dst, c)
|
||||||
|
@ -25,7 +25,7 @@ func TestSimultOpen(t *testing.T) {
|
|||||||
connect := func(s *Swarm, dst peer.ID, addr ma.Multiaddr) {
|
connect := func(s *Swarm, dst peer.ID, addr ma.Multiaddr) {
|
||||||
// copy for other peer
|
// copy for other peer
|
||||||
log.Debugf("TestSimultOpen: connecting: %s --> %s (%s)", s.local, dst, addr)
|
log.Debugf("TestSimultOpen: connecting: %s --> %s (%s)", s.local, dst, addr)
|
||||||
s.peers.AddAddress(dst, addr)
|
s.peers.AddAddr(dst, addr, peer.PermanentAddrTTL)
|
||||||
if _, err := s.Dial(ctx, dst); err != nil {
|
if _, err := s.Dial(ctx, dst); err != nil {
|
||||||
t.Fatal("error swarm dialing to peer", err)
|
t.Fatal("error swarm dialing to peer", err)
|
||||||
}
|
}
|
||||||
|
@ -110,7 +110,7 @@ func TestDialBadAddrs(t *testing.T) {
|
|||||||
|
|
||||||
test := func(a ma.Multiaddr) {
|
test := func(a ma.Multiaddr) {
|
||||||
p := testutil.RandPeerIDFatal(t)
|
p := testutil.RandPeerIDFatal(t)
|
||||||
s.peers.AddAddress(p, a)
|
s.peers.AddAddr(p, a, peer.PermanentAddrTTL)
|
||||||
if _, err := s.Dial(ctx, p); err == nil {
|
if _, err := s.Dial(ctx, p); err == nil {
|
||||||
t.Error("swarm should not dial: %s", m)
|
t.Error("swarm should not dial: %s", m)
|
||||||
}
|
}
|
||||||
|
@ -3,6 +3,7 @@ package swarm
|
|||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"math/rand"
|
||||||
"net"
|
"net"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
@ -15,6 +16,9 @@ import (
|
|||||||
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
|
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
|
||||||
ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr"
|
ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr"
|
||||||
manet "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr-net"
|
manet "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr-net"
|
||||||
|
process "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess"
|
||||||
|
procctx "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess/context"
|
||||||
|
ratelimit "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess/ratelimit"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Diagram of dial sync:
|
// Diagram of dial sync:
|
||||||
@ -289,14 +293,14 @@ func (s *Swarm) dial(ctx context.Context, p peer.ID) (*Conn, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// get remote peer addrs
|
// get remote peer addrs
|
||||||
remoteAddrs := s.peers.Addresses(p)
|
remoteAddrs := s.peers.Addrs(p)
|
||||||
// make sure we can use the addresses.
|
// make sure we can use the addresses.
|
||||||
remoteAddrs = addrutil.FilterUsableAddrs(remoteAddrs)
|
remoteAddrs = addrutil.FilterUsableAddrs(remoteAddrs)
|
||||||
// drop out any addrs that would just dial ourselves. use ListenAddresses
|
// drop out any addrs that would just dial ourselves. use ListenAddresses
|
||||||
// as that is a more authoritative view than localAddrs.
|
// as that is a more authoritative view than localAddrs.
|
||||||
ila, _ := s.InterfaceListenAddresses()
|
ila, _ := s.InterfaceListenAddresses()
|
||||||
remoteAddrs = addrutil.Subtract(remoteAddrs, ila)
|
remoteAddrs = addrutil.Subtract(remoteAddrs, ila)
|
||||||
remoteAddrs = addrutil.Subtract(remoteAddrs, s.peers.Addresses(s.local))
|
remoteAddrs = addrutil.Subtract(remoteAddrs, s.peers.Addrs(s.local))
|
||||||
log.Debugf("%s swarm dialing %s -- remote:%s local:%s", s.local, p, remoteAddrs, s.ListenAddresses())
|
log.Debugf("%s swarm dialing %s -- remote:%s local:%s", s.local, p, remoteAddrs, s.ListenAddresses())
|
||||||
if len(remoteAddrs) == 0 {
|
if len(remoteAddrs) == 0 {
|
||||||
err := errors.New("peer has no addresses")
|
err := errors.New("peer has no addresses")
|
||||||
@ -353,43 +357,63 @@ func (s *Swarm) dialAddrs(ctx context.Context, d *conn.Dialer, p peer.ID, remote
|
|||||||
conns := make(chan conn.Conn, len(remoteAddrs))
|
conns := make(chan conn.Conn, len(remoteAddrs))
|
||||||
errs := make(chan error, len(remoteAddrs))
|
errs := make(chan error, len(remoteAddrs))
|
||||||
|
|
||||||
//TODO: rate limiting just in case?
|
// dialSingleAddr is used in the rate-limited async thing below.
|
||||||
for _, addr := range remoteAddrs {
|
dialSingleAddr := func(addr ma.Multiaddr) {
|
||||||
go func(addr ma.Multiaddr) {
|
connC, err := s.dialAddr(ctx, d, p, addr)
|
||||||
connC, err := s.dialAddr(ctx, d, p, addr)
|
|
||||||
|
|
||||||
// check parent still wants our results
|
// check parent still wants our results
|
||||||
|
select {
|
||||||
|
case <-foundConn:
|
||||||
|
if connC != nil {
|
||||||
|
connC.Close()
|
||||||
|
}
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
errs <- err
|
||||||
|
} else if connC == nil {
|
||||||
|
errs <- fmt.Errorf("failed to dial %s %s", p, addr)
|
||||||
|
} else {
|
||||||
|
conns <- connC
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// this whole thing is in a goroutine so we can use foundConn
|
||||||
|
// to end early.
|
||||||
|
go func() {
|
||||||
|
// rate limiting just in case. at most 10 addrs at once.
|
||||||
|
limiter := ratelimit.NewRateLimiter(procctx.WithContext(ctx), 10)
|
||||||
|
|
||||||
|
// permute addrs so we try different sets first each time.
|
||||||
|
for _, i := range rand.Perm(len(remoteAddrs)) {
|
||||||
select {
|
select {
|
||||||
case <-foundConn:
|
case <-foundConn: // if one of them succeeded already
|
||||||
if connC != nil {
|
break
|
||||||
connC.Close()
|
|
||||||
}
|
|
||||||
return
|
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
workerAddr := remoteAddrs[i] // shadow variable to avoid race
|
||||||
errs <- err
|
limiter.Go(func(worker process.Process) {
|
||||||
} else if connC == nil {
|
dialSingleAddr(workerAddr)
|
||||||
errs <- fmt.Errorf("failed to dial %s %s", p, addr)
|
})
|
||||||
} else {
|
}
|
||||||
conns <- connC
|
}()
|
||||||
}
|
|
||||||
}(addr)
|
|
||||||
}
|
|
||||||
|
|
||||||
err := fmt.Errorf("failed to dial %s", p)
|
// wair fot the results.
|
||||||
|
exitErr := fmt.Errorf("failed to dial %s", p)
|
||||||
for i := 0; i < len(remoteAddrs); i++ {
|
for i := 0; i < len(remoteAddrs); i++ {
|
||||||
select {
|
select {
|
||||||
case err = <-errs:
|
case exitErr = <-errs: //
|
||||||
log.Debug(err)
|
log.Debug(exitErr)
|
||||||
case connC := <-conns:
|
case connC := <-conns:
|
||||||
// take the first + return asap
|
// take the first + return asap
|
||||||
close(foundConn)
|
close(foundConn)
|
||||||
return connC, nil
|
return connC, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil, err
|
return nil, exitErr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Swarm) dialAddr(ctx context.Context, d *conn.Dialer, p peer.ID, addr ma.Multiaddr) (conn.Conn, error) {
|
func (s *Swarm) dialAddr(ctx context.Context, d *conn.Dialer, p peer.ID, addr ma.Multiaddr) (conn.Conn, error) {
|
||||||
|
@ -53,7 +53,7 @@ func (s *Swarm) setupListener(maddr ma.Multiaddr) error {
|
|||||||
// return err
|
// return err
|
||||||
// }
|
// }
|
||||||
// for _, a := range resolved {
|
// for _, a := range resolved {
|
||||||
// s.peers.AddAddress(s.local, a)
|
// s.peers.AddAddr(s.local, a)
|
||||||
// }
|
// }
|
||||||
|
|
||||||
sk := s.peers.PrivKey(s.local)
|
sk := s.peers.PrivKey(s.local)
|
||||||
|
@ -75,7 +75,7 @@ func connectSwarms(t *testing.T, ctx context.Context, swarms []*Swarm) {
|
|||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
connect := func(s *Swarm, dst peer.ID, addr ma.Multiaddr) {
|
connect := func(s *Swarm, dst peer.ID, addr ma.Multiaddr) {
|
||||||
// TODO: make a DialAddr func.
|
// TODO: make a DialAddr func.
|
||||||
s.peers.AddAddress(dst, addr)
|
s.peers.AddAddr(dst, addr, peer.PermanentAddrTTL)
|
||||||
if _, err := s.Dial(ctx, dst); err != nil {
|
if _, err := s.Dial(ctx, dst); err != nil {
|
||||||
t.Fatal("error swarm dialing to peer", err)
|
t.Fatal("error swarm dialing to peer", err)
|
||||||
}
|
}
|
||||||
|
188
p2p/peer/addr_manager.go
Normal file
188
p2p/peer/addr_manager.go
Normal file
@ -0,0 +1,188 @@
|
|||||||
|
package peer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
|
||||||
|
// TempAddrTTL is the ttl used for a short lived address
|
||||||
|
TempAddrTTL = time.Second * 10
|
||||||
|
|
||||||
|
// ProviderAddrTTL is the TTL of an address we've received from a provider.
|
||||||
|
// This is also a temporary address, but lasts longer. After this expires,
|
||||||
|
// the records we return will require an extra lookup.
|
||||||
|
ProviderAddrTTL = time.Minute * 10
|
||||||
|
|
||||||
|
// RecentlyConnectedAddrTTL is used when we recently connected to a peer.
|
||||||
|
// It means that we are reasonably certain of the peer's address.
|
||||||
|
RecentlyConnectedAddrTTL = time.Minute * 10
|
||||||
|
|
||||||
|
// OwnObservedAddrTTL is used for our own external addresses observed by peers.
|
||||||
|
OwnObservedAddrTTL = time.Minute * 20
|
||||||
|
|
||||||
|
// PermanentAddrTTL is the ttl for a "permanent address" (e.g. bootstrap nodes)
|
||||||
|
// if we haven't shipped you an update to ipfs in 356 days
|
||||||
|
// we probably arent running the same bootstrap nodes...
|
||||||
|
PermanentAddrTTL = time.Hour * 24 * 356
|
||||||
|
|
||||||
|
// ConnectedAddrTTL is the ttl used for the addresses of a peer to whom
|
||||||
|
// we're connected directly. This is basically permanent, as we will
|
||||||
|
// clear them + re-add under a TempAddrTTL after disconnecting.
|
||||||
|
ConnectedAddrTTL = PermanentAddrTTL
|
||||||
|
)
|
||||||
|
|
||||||
|
type expiringAddr struct {
|
||||||
|
Addr ma.Multiaddr
|
||||||
|
TTL time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *expiringAddr) ExpiredBy(t time.Time) bool {
|
||||||
|
return t.After(e.TTL)
|
||||||
|
}
|
||||||
|
|
||||||
|
type addrSet map[string]expiringAddr
|
||||||
|
|
||||||
|
// AddrManager manages addresses.
|
||||||
|
// The zero-value is ready to be used.
|
||||||
|
type AddrManager struct {
|
||||||
|
addrmu sync.Mutex // guards addrs
|
||||||
|
addrs map[ID]addrSet
|
||||||
|
}
|
||||||
|
|
||||||
|
// ensures the AddrManager is initialized.
|
||||||
|
// So we can use the zero value.
|
||||||
|
func (mgr *AddrManager) init() {
|
||||||
|
if mgr.addrs == nil {
|
||||||
|
mgr.addrs = make(map[ID]addrSet)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mgr *AddrManager) Peers() []ID {
|
||||||
|
mgr.addrmu.Lock()
|
||||||
|
defer mgr.addrmu.Unlock()
|
||||||
|
if mgr.addrs == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
pids := make([]ID, 0, len(mgr.addrs))
|
||||||
|
for pid := range mgr.addrs {
|
||||||
|
pids = append(pids, pid)
|
||||||
|
}
|
||||||
|
return pids
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddAddr calls AddAddrs(p, []ma.Multiaddr{addr}, ttl)
|
||||||
|
func (mgr *AddrManager) AddAddr(p ID, addr ma.Multiaddr, ttl time.Duration) {
|
||||||
|
mgr.AddAddrs(p, []ma.Multiaddr{addr}, ttl)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddAddrs gives AddrManager addresses to use, with a given ttl
|
||||||
|
// (time-to-live), after which the address is no longer valid.
|
||||||
|
// If the manager has a longer TTL, the operation is a no-op for that address
|
||||||
|
func (mgr *AddrManager) AddAddrs(p ID, addrs []ma.Multiaddr, ttl time.Duration) {
|
||||||
|
mgr.addrmu.Lock()
|
||||||
|
defer mgr.addrmu.Unlock()
|
||||||
|
|
||||||
|
// if ttl is zero, exit. nothing to do.
|
||||||
|
if ttl <= 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// so zero value can be used
|
||||||
|
mgr.init()
|
||||||
|
|
||||||
|
amap, found := mgr.addrs[p]
|
||||||
|
if !found {
|
||||||
|
amap = make(addrSet)
|
||||||
|
mgr.addrs[p] = amap
|
||||||
|
}
|
||||||
|
|
||||||
|
// only expand ttls
|
||||||
|
exp := time.Now().Add(ttl)
|
||||||
|
for _, addr := range addrs {
|
||||||
|
addrstr := addr.String()
|
||||||
|
a, found := amap[addrstr]
|
||||||
|
if !found || exp.After(a.TTL) {
|
||||||
|
amap[addrstr] = expiringAddr{Addr: addr, TTL: exp}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetAddr calls mgr.SetAddrs(p, addr, ttl)
|
||||||
|
func (mgr *AddrManager) SetAddr(p ID, addr ma.Multiaddr, ttl time.Duration) {
|
||||||
|
mgr.SetAddrs(p, []ma.Multiaddr{addr}, ttl)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetAddrs sets the ttl on addresses. This clears any TTL there previously.
|
||||||
|
// This is used when we receive the best estimate of the validity of an address.
|
||||||
|
func (mgr *AddrManager) SetAddrs(p ID, addrs []ma.Multiaddr, ttl time.Duration) {
|
||||||
|
mgr.addrmu.Lock()
|
||||||
|
defer mgr.addrmu.Unlock()
|
||||||
|
|
||||||
|
// so zero value can be used
|
||||||
|
mgr.init()
|
||||||
|
|
||||||
|
amap, found := mgr.addrs[p]
|
||||||
|
if !found {
|
||||||
|
amap = make(addrSet)
|
||||||
|
mgr.addrs[p] = amap
|
||||||
|
}
|
||||||
|
|
||||||
|
exp := time.Now().Add(ttl)
|
||||||
|
for _, addr := range addrs {
|
||||||
|
// re-set all of them for new ttl.
|
||||||
|
addrs := addr.String()
|
||||||
|
|
||||||
|
if ttl > 0 {
|
||||||
|
amap[addrs] = expiringAddr{Addr: addr, TTL: exp}
|
||||||
|
} else {
|
||||||
|
delete(amap, addrs)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Addresses returns all known (and valid) addresses for a given
|
||||||
|
func (mgr *AddrManager) Addrs(p ID) []ma.Multiaddr {
|
||||||
|
mgr.addrmu.Lock()
|
||||||
|
defer mgr.addrmu.Unlock()
|
||||||
|
|
||||||
|
// not initialized? nothing to give.
|
||||||
|
if mgr.addrs == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
maddrs, found := mgr.addrs[p]
|
||||||
|
if !found {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
now := time.Now()
|
||||||
|
good := make([]ma.Multiaddr, 0, len(maddrs))
|
||||||
|
var expired []string
|
||||||
|
for s, m := range maddrs {
|
||||||
|
if m.ExpiredBy(now) {
|
||||||
|
expired = append(expired, s)
|
||||||
|
} else {
|
||||||
|
good = append(good, m.Addr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// clean up the expired ones.
|
||||||
|
for _, s := range expired {
|
||||||
|
delete(maddrs, s)
|
||||||
|
}
|
||||||
|
return good
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearAddresses removes all previously stored addresses
|
||||||
|
func (mgr *AddrManager) ClearAddrs(p ID) {
|
||||||
|
mgr.addrmu.Lock()
|
||||||
|
defer mgr.addrmu.Unlock()
|
||||||
|
mgr.init()
|
||||||
|
|
||||||
|
mgr.addrs[p] = make(addrSet) // clear what was there before
|
||||||
|
}
|
180
p2p/peer/addr_manager_test.go
Normal file
180
p2p/peer/addr_manager_test.go
Normal file
@ -0,0 +1,180 @@
|
|||||||
|
package peer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr"
|
||||||
|
)
|
||||||
|
|
||||||
|
func IDS(t *testing.T, ids string) ID {
|
||||||
|
id, err := IDB58Decode(ids)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
func MA(t *testing.T, m string) ma.Multiaddr {
|
||||||
|
maddr, err := ma.NewMultiaddr(m)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
return maddr
|
||||||
|
}
|
||||||
|
|
||||||
|
func testHas(t *testing.T, exp, act []ma.Multiaddr) {
|
||||||
|
if len(exp) != len(act) {
|
||||||
|
t.Fatal("lengths not the same")
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, a := range exp {
|
||||||
|
found := false
|
||||||
|
|
||||||
|
for _, b := range act {
|
||||||
|
if a.Equal(b) {
|
||||||
|
found = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !found {
|
||||||
|
t.Fatal("expected address %s not found", a)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAddresses(t *testing.T) {
|
||||||
|
|
||||||
|
id1 := IDS(t, "QmcNstKuwBBoVTpSCSDrwzjgrRcaYXK833Psuz2EMHwyQN")
|
||||||
|
id2 := IDS(t, "QmRmPL3FDZKE3Qiwv1RosLdwdvbvg17b2hB39QPScgWKKZ")
|
||||||
|
id3 := IDS(t, "QmPhi7vBsChP7sjRoZGgg7bcKqF6MmCcQwvRbDte8aJ6Kn")
|
||||||
|
id4 := IDS(t, "QmPhi7vBsChP7sjRoZGgg7bcKqF6MmCcQwvRbDte8aJ5Kn")
|
||||||
|
id5 := IDS(t, "QmPhi7vBsChP7sjRoZGgg7bcKqF6MmCcQwvRbDte8aJ5Km")
|
||||||
|
|
||||||
|
ma11 := MA(t, "/ip4/1.2.3.1/tcp/1111")
|
||||||
|
ma21 := MA(t, "/ip4/2.2.3.2/tcp/1111")
|
||||||
|
ma22 := MA(t, "/ip4/2.2.3.2/tcp/2222")
|
||||||
|
ma31 := MA(t, "/ip4/3.2.3.3/tcp/1111")
|
||||||
|
ma32 := MA(t, "/ip4/3.2.3.3/tcp/2222")
|
||||||
|
ma33 := MA(t, "/ip4/3.2.3.3/tcp/3333")
|
||||||
|
ma41 := MA(t, "/ip4/4.2.3.3/tcp/1111")
|
||||||
|
ma42 := MA(t, "/ip4/4.2.3.3/tcp/2222")
|
||||||
|
ma43 := MA(t, "/ip4/4.2.3.3/tcp/3333")
|
||||||
|
ma44 := MA(t, "/ip4/4.2.3.3/tcp/4444")
|
||||||
|
ma51 := MA(t, "/ip4/5.2.3.3/tcp/1111")
|
||||||
|
ma52 := MA(t, "/ip4/5.2.3.3/tcp/2222")
|
||||||
|
ma53 := MA(t, "/ip4/5.2.3.3/tcp/3333")
|
||||||
|
ma54 := MA(t, "/ip4/5.2.3.3/tcp/4444")
|
||||||
|
ma55 := MA(t, "/ip4/5.2.3.3/tcp/5555")
|
||||||
|
|
||||||
|
ttl := time.Hour
|
||||||
|
m := AddrManager{}
|
||||||
|
m.AddAddr(id1, ma11, ttl)
|
||||||
|
|
||||||
|
m.AddAddrs(id2, []ma.Multiaddr{ma21, ma22}, ttl)
|
||||||
|
m.AddAddrs(id2, []ma.Multiaddr{ma21, ma22}, ttl) // idempotency
|
||||||
|
|
||||||
|
m.AddAddr(id3, ma31, ttl)
|
||||||
|
m.AddAddr(id3, ma32, ttl)
|
||||||
|
m.AddAddr(id3, ma33, ttl)
|
||||||
|
m.AddAddr(id3, ma33, ttl) // idempotency
|
||||||
|
m.AddAddr(id3, ma33, ttl)
|
||||||
|
|
||||||
|
m.AddAddrs(id4, []ma.Multiaddr{ma41, ma42, ma43, ma44}, ttl) // multiple
|
||||||
|
|
||||||
|
m.AddAddrs(id5, []ma.Multiaddr{ma21, ma22}, ttl) // clearing
|
||||||
|
m.AddAddrs(id5, []ma.Multiaddr{ma41, ma42, ma43, ma44}, ttl) // clearing
|
||||||
|
m.ClearAddrs(id5)
|
||||||
|
m.AddAddrs(id5, []ma.Multiaddr{ma51, ma52, ma53, ma54, ma55}, ttl) // clearing
|
||||||
|
|
||||||
|
// test the Addresses return value
|
||||||
|
testHas(t, []ma.Multiaddr{ma11}, m.Addrs(id1))
|
||||||
|
testHas(t, []ma.Multiaddr{ma21, ma22}, m.Addrs(id2))
|
||||||
|
testHas(t, []ma.Multiaddr{ma31, ma32, ma33}, m.Addrs(id3))
|
||||||
|
testHas(t, []ma.Multiaddr{ma41, ma42, ma43, ma44}, m.Addrs(id4))
|
||||||
|
testHas(t, []ma.Multiaddr{ma51, ma52, ma53, ma54, ma55}, m.Addrs(id5))
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAddressesExpire(t *testing.T) {
|
||||||
|
|
||||||
|
id1 := IDS(t, "QmcNstKuwBBoVTpSCSDrwzjgrRcaYXK833Psuz2EMHwyQN")
|
||||||
|
id2 := IDS(t, "QmcNstKuwBBoVTpSCSDrwzjgrRcaYXK833Psuz2EMHwyQM")
|
||||||
|
ma11 := MA(t, "/ip4/1.2.3.1/tcp/1111")
|
||||||
|
ma12 := MA(t, "/ip4/2.2.3.2/tcp/2222")
|
||||||
|
ma13 := MA(t, "/ip4/3.2.3.3/tcp/3333")
|
||||||
|
ma24 := MA(t, "/ip4/4.2.3.3/tcp/4444")
|
||||||
|
ma25 := MA(t, "/ip4/5.2.3.3/tcp/5555")
|
||||||
|
|
||||||
|
m := AddrManager{}
|
||||||
|
m.AddAddr(id1, ma11, time.Hour)
|
||||||
|
m.AddAddr(id1, ma12, time.Hour)
|
||||||
|
m.AddAddr(id1, ma13, time.Hour)
|
||||||
|
m.AddAddr(id2, ma24, time.Hour)
|
||||||
|
m.AddAddr(id2, ma25, time.Hour)
|
||||||
|
|
||||||
|
testHas(t, []ma.Multiaddr{ma11, ma12, ma13}, m.Addrs(id1))
|
||||||
|
testHas(t, []ma.Multiaddr{ma24, ma25}, m.Addrs(id2))
|
||||||
|
|
||||||
|
m.SetAddr(id1, ma11, 2*time.Hour)
|
||||||
|
m.SetAddr(id1, ma12, 2*time.Hour)
|
||||||
|
m.SetAddr(id1, ma13, 2*time.Hour)
|
||||||
|
m.SetAddr(id2, ma24, 2*time.Hour)
|
||||||
|
m.SetAddr(id2, ma25, 2*time.Hour)
|
||||||
|
|
||||||
|
testHas(t, []ma.Multiaddr{ma11, ma12, ma13}, m.Addrs(id1))
|
||||||
|
testHas(t, []ma.Multiaddr{ma24, ma25}, m.Addrs(id2))
|
||||||
|
|
||||||
|
m.SetAddr(id1, ma11, time.Millisecond)
|
||||||
|
<-time.After(time.Millisecond)
|
||||||
|
testHas(t, []ma.Multiaddr{ma12, ma13}, m.Addrs(id1))
|
||||||
|
testHas(t, []ma.Multiaddr{ma24, ma25}, m.Addrs(id2))
|
||||||
|
|
||||||
|
m.SetAddr(id1, ma13, time.Millisecond)
|
||||||
|
<-time.After(time.Millisecond)
|
||||||
|
testHas(t, []ma.Multiaddr{ma12}, m.Addrs(id1))
|
||||||
|
testHas(t, []ma.Multiaddr{ma24, ma25}, m.Addrs(id2))
|
||||||
|
|
||||||
|
m.SetAddr(id2, ma24, time.Millisecond)
|
||||||
|
<-time.After(time.Millisecond)
|
||||||
|
testHas(t, []ma.Multiaddr{ma12}, m.Addrs(id1))
|
||||||
|
testHas(t, []ma.Multiaddr{ma25}, m.Addrs(id2))
|
||||||
|
|
||||||
|
m.SetAddr(id2, ma25, time.Millisecond)
|
||||||
|
<-time.After(time.Millisecond)
|
||||||
|
testHas(t, []ma.Multiaddr{ma12}, m.Addrs(id1))
|
||||||
|
testHas(t, nil, m.Addrs(id2))
|
||||||
|
|
||||||
|
m.SetAddr(id1, ma12, time.Millisecond)
|
||||||
|
<-time.After(time.Millisecond)
|
||||||
|
testHas(t, nil, m.Addrs(id1))
|
||||||
|
testHas(t, nil, m.Addrs(id2))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestClearWorks(t *testing.T) {
|
||||||
|
|
||||||
|
id1 := IDS(t, "QmcNstKuwBBoVTpSCSDrwzjgrRcaYXK833Psuz2EMHwyQN")
|
||||||
|
id2 := IDS(t, "QmcNstKuwBBoVTpSCSDrwzjgrRcaYXK833Psuz2EMHwyQM")
|
||||||
|
ma11 := MA(t, "/ip4/1.2.3.1/tcp/1111")
|
||||||
|
ma12 := MA(t, "/ip4/2.2.3.2/tcp/2222")
|
||||||
|
ma13 := MA(t, "/ip4/3.2.3.3/tcp/3333")
|
||||||
|
ma24 := MA(t, "/ip4/4.2.3.3/tcp/4444")
|
||||||
|
ma25 := MA(t, "/ip4/5.2.3.3/tcp/5555")
|
||||||
|
|
||||||
|
m := AddrManager{}
|
||||||
|
m.AddAddr(id1, ma11, time.Hour)
|
||||||
|
m.AddAddr(id1, ma12, time.Hour)
|
||||||
|
m.AddAddr(id1, ma13, time.Hour)
|
||||||
|
m.AddAddr(id2, ma24, time.Hour)
|
||||||
|
m.AddAddr(id2, ma25, time.Hour)
|
||||||
|
|
||||||
|
testHas(t, []ma.Multiaddr{ma11, ma12, ma13}, m.Addrs(id1))
|
||||||
|
testHas(t, []ma.Multiaddr{ma24, ma25}, m.Addrs(id2))
|
||||||
|
|
||||||
|
m.ClearAddrs(id1)
|
||||||
|
m.ClearAddrs(id2)
|
||||||
|
|
||||||
|
testHas(t, nil, m.Addrs(id1))
|
||||||
|
testHas(t, nil, m.Addrs(id2))
|
||||||
|
}
|
@ -20,8 +20,8 @@ const (
|
|||||||
// Peerstore provides a threadsafe store of Peer related
|
// Peerstore provides a threadsafe store of Peer related
|
||||||
// information.
|
// information.
|
||||||
type Peerstore interface {
|
type Peerstore interface {
|
||||||
|
AddrBook
|
||||||
KeyBook
|
KeyBook
|
||||||
AddressBook
|
|
||||||
Metrics
|
Metrics
|
||||||
|
|
||||||
// Peers returns a list of all peer.IDs in this Peerstore
|
// Peers returns a list of all peer.IDs in this Peerstore
|
||||||
@ -32,9 +32,6 @@ type Peerstore interface {
|
|||||||
// that peer, useful to other services.
|
// that peer, useful to other services.
|
||||||
PeerInfo(ID) PeerInfo
|
PeerInfo(ID) PeerInfo
|
||||||
|
|
||||||
// AddPeerInfo absorbs the information listed in given PeerInfo.
|
|
||||||
AddPeerInfo(PeerInfo)
|
|
||||||
|
|
||||||
// Get/Put is a simple registry for other peer-related key/value pairs.
|
// Get/Put is a simple registry for other peer-related key/value pairs.
|
||||||
// if we find something we use often, it should become its own set of
|
// if we find something we use often, it should become its own set of
|
||||||
// methods. this is a last resort.
|
// methods. this is a last resort.
|
||||||
@ -42,109 +39,30 @@ type Peerstore interface {
|
|||||||
Put(id ID, key string, val interface{}) error
|
Put(id ID, key string, val interface{}) error
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddressBook tracks the addresses of Peers
|
// AddrBook is an interface that fits the new AddrManager. I'm patching
|
||||||
type AddressBook interface {
|
// it up in here to avoid changing a ton of the codebase.
|
||||||
Addresses(ID) []ma.Multiaddr // returns addresses for ID
|
type AddrBook interface {
|
||||||
AddAddress(ID, ma.Multiaddr) // Adds given addr for ID
|
|
||||||
AddAddresses(ID, []ma.Multiaddr) // Adds given addrs for ID
|
|
||||||
SetAddresses(ID, []ma.Multiaddr) // Sets given addrs for ID (clears previously stored)
|
|
||||||
}
|
|
||||||
|
|
||||||
type expiringAddr struct {
|
// AddAddr calls AddAddrs(p, []ma.Multiaddr{addr}, ttl)
|
||||||
Addr ma.Multiaddr
|
AddAddr(p ID, addr ma.Multiaddr, ttl time.Duration)
|
||||||
TTL time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *expiringAddr) Expired() bool {
|
// AddAddrs gives AddrManager addresses to use, with a given ttl
|
||||||
return time.Now().After(e.TTL)
|
// (time-to-live), after which the address is no longer valid.
|
||||||
}
|
// If the manager has a longer TTL, the operation is a no-op for that address
|
||||||
|
AddAddrs(p ID, addrs []ma.Multiaddr, ttl time.Duration)
|
||||||
|
|
||||||
type addressMap map[string]expiringAddr
|
// SetAddr calls mgr.SetAddrs(p, addr, ttl)
|
||||||
|
SetAddr(p ID, addr ma.Multiaddr, ttl time.Duration)
|
||||||
|
|
||||||
type addressbook struct {
|
// SetAddrs sets the ttl on addresses. This clears any TTL there previously.
|
||||||
sync.RWMutex // guards all fields
|
// This is used when we receive the best estimate of the validity of an address.
|
||||||
|
SetAddrs(p ID, addrs []ma.Multiaddr, ttl time.Duration)
|
||||||
|
|
||||||
addrs map[ID]addressMap
|
// Addresses returns all known (and valid) addresses for a given
|
||||||
ttl time.Duration // initial ttl
|
Addrs(p ID) []ma.Multiaddr
|
||||||
}
|
|
||||||
|
|
||||||
func newAddressbook() *addressbook {
|
// ClearAddresses removes all previously stored addresses
|
||||||
return &addressbook{
|
ClearAddrs(p ID)
|
||||||
addrs: map[ID]addressMap{},
|
|
||||||
ttl: AddressTTL,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ab *addressbook) Peers() []ID {
|
|
||||||
ab.RLock()
|
|
||||||
ps := make([]ID, 0, len(ab.addrs))
|
|
||||||
for p := range ab.addrs {
|
|
||||||
ps = append(ps, p)
|
|
||||||
}
|
|
||||||
ab.RUnlock()
|
|
||||||
return ps
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ab *addressbook) Addresses(p ID) []ma.Multiaddr {
|
|
||||||
ab.Lock()
|
|
||||||
defer ab.Unlock()
|
|
||||||
|
|
||||||
maddrs, found := ab.addrs[p]
|
|
||||||
if !found {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
good := make([]ma.Multiaddr, 0, len(maddrs))
|
|
||||||
var expired []string
|
|
||||||
for s, m := range maddrs {
|
|
||||||
if m.Expired() {
|
|
||||||
expired = append(expired, s)
|
|
||||||
} else {
|
|
||||||
good = append(good, m.Addr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// clean up the expired ones.
|
|
||||||
for _, s := range expired {
|
|
||||||
delete(ab.addrs[p], s)
|
|
||||||
}
|
|
||||||
return good
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ab *addressbook) AddAddress(p ID, m ma.Multiaddr) {
|
|
||||||
ab.AddAddresses(p, []ma.Multiaddr{m})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ab *addressbook) AddAddresses(p ID, ms []ma.Multiaddr) {
|
|
||||||
ab.Lock()
|
|
||||||
defer ab.Unlock()
|
|
||||||
|
|
||||||
amap, found := ab.addrs[p]
|
|
||||||
if !found {
|
|
||||||
amap = addressMap{}
|
|
||||||
ab.addrs[p] = amap
|
|
||||||
}
|
|
||||||
|
|
||||||
ttl := time.Now().Add(ab.ttl)
|
|
||||||
for _, m := range ms {
|
|
||||||
// re-set all of them for new ttl.
|
|
||||||
amap[m.String()] = expiringAddr{
|
|
||||||
Addr: m,
|
|
||||||
TTL: ttl,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ab *addressbook) SetAddresses(p ID, ms []ma.Multiaddr) {
|
|
||||||
ab.Lock()
|
|
||||||
defer ab.Unlock()
|
|
||||||
|
|
||||||
amap := addressMap{}
|
|
||||||
ttl := time.Now().Add(ab.ttl)
|
|
||||||
for _, m := range ms {
|
|
||||||
amap[m.String()] = expiringAddr{Addr: m, TTL: ttl}
|
|
||||||
}
|
|
||||||
ab.addrs[p] = amap // clear what was there before
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// KeyBook tracks the Public keys of Peers.
|
// KeyBook tracks the Public keys of Peers.
|
||||||
@ -231,8 +149,8 @@ func (kb *keybook) AddPrivKey(p ID, sk ic.PrivKey) error {
|
|||||||
|
|
||||||
type peerstore struct {
|
type peerstore struct {
|
||||||
keybook
|
keybook
|
||||||
addressbook
|
|
||||||
metrics
|
metrics
|
||||||
|
AddrManager
|
||||||
|
|
||||||
// store other data, like versions
|
// store other data, like versions
|
||||||
ds ds.ThreadSafeDatastore
|
ds ds.ThreadSafeDatastore
|
||||||
@ -242,8 +160,8 @@ type peerstore struct {
|
|||||||
func NewPeerstore() Peerstore {
|
func NewPeerstore() Peerstore {
|
||||||
return &peerstore{
|
return &peerstore{
|
||||||
keybook: *newKeybook(),
|
keybook: *newKeybook(),
|
||||||
addressbook: *newAddressbook(),
|
|
||||||
metrics: *(NewMetrics()).(*metrics),
|
metrics: *(NewMetrics()).(*metrics),
|
||||||
|
AddrManager: AddrManager{},
|
||||||
ds: dssync.MutexWrap(ds.NewMapDatastore()),
|
ds: dssync.MutexWrap(ds.NewMapDatastore()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -263,7 +181,7 @@ func (ps *peerstore) Peers() []ID {
|
|||||||
for _, p := range ps.keybook.Peers() {
|
for _, p := range ps.keybook.Peers() {
|
||||||
set[p] = struct{}{}
|
set[p] = struct{}{}
|
||||||
}
|
}
|
||||||
for _, p := range ps.addressbook.Peers() {
|
for _, p := range ps.AddrManager.Peers() {
|
||||||
set[p] = struct{}{}
|
set[p] = struct{}{}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -277,14 +195,10 @@ func (ps *peerstore) Peers() []ID {
|
|||||||
func (ps *peerstore) PeerInfo(p ID) PeerInfo {
|
func (ps *peerstore) PeerInfo(p ID) PeerInfo {
|
||||||
return PeerInfo{
|
return PeerInfo{
|
||||||
ID: p,
|
ID: p,
|
||||||
Addrs: ps.addressbook.Addresses(p),
|
Addrs: ps.AddrManager.Addrs(p),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ps *peerstore) AddPeerInfo(pi PeerInfo) {
|
|
||||||
ps.AddAddresses(pi.ID, pi.Addrs)
|
|
||||||
}
|
|
||||||
|
|
||||||
func PeerInfos(ps Peerstore, peers []ID) []PeerInfo {
|
func PeerInfos(ps Peerstore, peers []ID) []PeerInfo {
|
||||||
pi := make([]PeerInfo, len(peers))
|
pi := make([]PeerInfo, len(peers))
|
||||||
for i, p := range peers {
|
for i, p := range peers {
|
||||||
|
@ -1,185 +0,0 @@
|
|||||||
package peer
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr"
|
|
||||||
)
|
|
||||||
|
|
||||||
func IDS(t *testing.T, ids string) ID {
|
|
||||||
id, err := IDB58Decode(ids)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
return id
|
|
||||||
}
|
|
||||||
|
|
||||||
func MA(t *testing.T, m string) ma.Multiaddr {
|
|
||||||
maddr, err := ma.NewMultiaddr(m)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
return maddr
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAddresses(t *testing.T) {
|
|
||||||
|
|
||||||
ps := NewPeerstore()
|
|
||||||
|
|
||||||
id1 := IDS(t, "QmcNstKuwBBoVTpSCSDrwzjgrRcaYXK833Psuz2EMHwyQN")
|
|
||||||
id2 := IDS(t, "QmRmPL3FDZKE3Qiwv1RosLdwdvbvg17b2hB39QPScgWKKZ")
|
|
||||||
id3 := IDS(t, "QmPhi7vBsChP7sjRoZGgg7bcKqF6MmCcQwvRbDte8aJ6Kn")
|
|
||||||
id4 := IDS(t, "QmPhi7vBsChP7sjRoZGgg7bcKqF6MmCcQwvRbDte8aJ5Kn")
|
|
||||||
id5 := IDS(t, "QmPhi7vBsChP7sjRoZGgg7bcKqF6MmCcQwvRbDte8aJ5Km")
|
|
||||||
|
|
||||||
ma11 := MA(t, "/ip4/1.2.3.1/tcp/1111")
|
|
||||||
ma21 := MA(t, "/ip4/2.2.3.2/tcp/1111")
|
|
||||||
ma22 := MA(t, "/ip4/2.2.3.2/tcp/2222")
|
|
||||||
ma31 := MA(t, "/ip4/3.2.3.3/tcp/1111")
|
|
||||||
ma32 := MA(t, "/ip4/3.2.3.3/tcp/2222")
|
|
||||||
ma33 := MA(t, "/ip4/3.2.3.3/tcp/3333")
|
|
||||||
ma41 := MA(t, "/ip4/4.2.3.3/tcp/1111")
|
|
||||||
ma42 := MA(t, "/ip4/4.2.3.3/tcp/2222")
|
|
||||||
ma43 := MA(t, "/ip4/4.2.3.3/tcp/3333")
|
|
||||||
ma44 := MA(t, "/ip4/4.2.3.3/tcp/4444")
|
|
||||||
ma51 := MA(t, "/ip4/5.2.3.3/tcp/1111")
|
|
||||||
ma52 := MA(t, "/ip4/5.2.3.3/tcp/2222")
|
|
||||||
ma53 := MA(t, "/ip4/5.2.3.3/tcp/3333")
|
|
||||||
ma54 := MA(t, "/ip4/5.2.3.3/tcp/4444")
|
|
||||||
ma55 := MA(t, "/ip4/5.2.3.3/tcp/5555")
|
|
||||||
|
|
||||||
ps.AddAddress(id1, ma11)
|
|
||||||
ps.AddAddresses(id2, []ma.Multiaddr{ma21, ma22})
|
|
||||||
ps.AddAddresses(id2, []ma.Multiaddr{ma21, ma22}) // idempotency
|
|
||||||
ps.AddAddress(id3, ma31)
|
|
||||||
ps.AddAddress(id3, ma32)
|
|
||||||
ps.AddAddress(id3, ma33)
|
|
||||||
ps.AddAddress(id3, ma33) // idempotency
|
|
||||||
ps.AddAddress(id3, ma33)
|
|
||||||
ps.AddAddresses(id4, []ma.Multiaddr{ma41, ma42, ma43, ma44}) // multiple
|
|
||||||
ps.AddAddresses(id5, []ma.Multiaddr{ma21, ma22}) // clearing
|
|
||||||
ps.AddAddresses(id5, []ma.Multiaddr{ma41, ma42, ma43, ma44}) // clearing
|
|
||||||
ps.SetAddresses(id5, []ma.Multiaddr{ma51, ma52, ma53, ma54, ma55}) // clearing
|
|
||||||
|
|
||||||
test := func(exp, act []ma.Multiaddr) {
|
|
||||||
if len(exp) != len(act) {
|
|
||||||
t.Fatal("lengths not the same")
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, a := range exp {
|
|
||||||
found := false
|
|
||||||
|
|
||||||
for _, b := range act {
|
|
||||||
if a.Equal(b) {
|
|
||||||
found = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !found {
|
|
||||||
t.Fatal("expected address %s not found", a)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// test the Addresses return value
|
|
||||||
test([]ma.Multiaddr{ma11}, ps.Addresses(id1))
|
|
||||||
test([]ma.Multiaddr{ma21, ma22}, ps.Addresses(id2))
|
|
||||||
test([]ma.Multiaddr{ma31, ma32, ma33}, ps.Addresses(id3))
|
|
||||||
test([]ma.Multiaddr{ma41, ma42, ma43, ma44}, ps.Addresses(id4))
|
|
||||||
test([]ma.Multiaddr{ma51, ma52, ma53, ma54, ma55}, ps.Addresses(id5))
|
|
||||||
|
|
||||||
// test also the PeerInfo return
|
|
||||||
test([]ma.Multiaddr{ma11}, ps.PeerInfo(id1).Addrs)
|
|
||||||
test([]ma.Multiaddr{ma21, ma22}, ps.PeerInfo(id2).Addrs)
|
|
||||||
test([]ma.Multiaddr{ma31, ma32, ma33}, ps.PeerInfo(id3).Addrs)
|
|
||||||
test([]ma.Multiaddr{ma41, ma42, ma43, ma44}, ps.PeerInfo(id4).Addrs)
|
|
||||||
test([]ma.Multiaddr{ma51, ma52, ma53, ma54, ma55}, ps.PeerInfo(id5).Addrs)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAddressTTL(t *testing.T) {
|
|
||||||
|
|
||||||
ps := NewPeerstore()
|
|
||||||
id1 := IDS(t, "QmcNstKuwBBoVTpSCSDrwzjgrRcaYXK833Psuz2EMHwyQN")
|
|
||||||
ma1 := MA(t, "/ip4/1.2.3.1/tcp/1111")
|
|
||||||
ma2 := MA(t, "/ip4/2.2.3.2/tcp/2222")
|
|
||||||
ma3 := MA(t, "/ip4/3.2.3.3/tcp/3333")
|
|
||||||
ma4 := MA(t, "/ip4/4.2.3.3/tcp/4444")
|
|
||||||
ma5 := MA(t, "/ip4/5.2.3.3/tcp/5555")
|
|
||||||
|
|
||||||
ps.AddAddress(id1, ma1)
|
|
||||||
ps.AddAddress(id1, ma2)
|
|
||||||
ps.AddAddress(id1, ma3)
|
|
||||||
ps.AddAddress(id1, ma4)
|
|
||||||
ps.AddAddress(id1, ma5)
|
|
||||||
|
|
||||||
test := func(exp, act []ma.Multiaddr) {
|
|
||||||
if len(exp) != len(act) {
|
|
||||||
t.Fatal("lengths not the same")
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, a := range exp {
|
|
||||||
found := false
|
|
||||||
|
|
||||||
for _, b := range act {
|
|
||||||
if a.Equal(b) {
|
|
||||||
found = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !found {
|
|
||||||
t.Fatal("expected address %s not found", a)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
testTTL := func(ttle time.Duration, id ID, addr ma.Multiaddr) {
|
|
||||||
ab := ps.(*peerstore).addressbook
|
|
||||||
ttlat := ab.addrs[id][addr.String()].TTL
|
|
||||||
ttla := ttlat.Sub(time.Now())
|
|
||||||
if ttla > ttle {
|
|
||||||
t.Error("ttl is greater than expected", ttle, ttla)
|
|
||||||
}
|
|
||||||
if ttla < (ttle / 2) {
|
|
||||||
t.Error("ttl is smaller than expected", ttle/2, ttla)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// should they are there
|
|
||||||
ab := ps.(*peerstore).addressbook
|
|
||||||
if len(ab.addrs[id1]) != 5 {
|
|
||||||
t.Error("incorrect addr count", len(ab.addrs[id1]), ab.addrs[id1])
|
|
||||||
}
|
|
||||||
|
|
||||||
// test the Addresses return value
|
|
||||||
test([]ma.Multiaddr{ma1, ma2, ma3, ma4, ma5}, ps.Addresses(id1))
|
|
||||||
test([]ma.Multiaddr{ma1, ma2, ma3, ma4, ma5}, ps.PeerInfo(id1).Addrs)
|
|
||||||
|
|
||||||
// check the addr TTL is a bit smaller than the init TTL
|
|
||||||
testTTL(AddressTTL, id1, ma1)
|
|
||||||
testTTL(AddressTTL, id1, ma2)
|
|
||||||
testTTL(AddressTTL, id1, ma3)
|
|
||||||
testTTL(AddressTTL, id1, ma4)
|
|
||||||
testTTL(AddressTTL, id1, ma5)
|
|
||||||
|
|
||||||
// change the TTL
|
|
||||||
setTTL := func(id ID, addr ma.Multiaddr, ttl time.Time) {
|
|
||||||
a := ab.addrs[id][addr.String()]
|
|
||||||
a.TTL = ttl
|
|
||||||
ab.addrs[id][addr.String()] = a
|
|
||||||
}
|
|
||||||
setTTL(id1, ma1, time.Now().Add(-1*time.Second))
|
|
||||||
setTTL(id1, ma2, time.Now().Add(-1*time.Hour))
|
|
||||||
setTTL(id1, ma3, time.Now().Add(-1*AddressTTL))
|
|
||||||
|
|
||||||
// should no longer list those
|
|
||||||
test([]ma.Multiaddr{ma4, ma5}, ps.Addresses(id1))
|
|
||||||
test([]ma.Multiaddr{ma4, ma5}, ps.PeerInfo(id1).Addrs)
|
|
||||||
|
|
||||||
// should no longer be there
|
|
||||||
if len(ab.addrs[id1]) != 2 {
|
|
||||||
t.Error("incorrect addr count", len(ab.addrs[id1]), ab.addrs[id1])
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,7 +1,7 @@
|
|||||||
package identify
|
package identify
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
|
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
|
||||||
@ -11,10 +11,12 @@ import (
|
|||||||
|
|
||||||
host "github.com/jbenet/go-ipfs/p2p/host"
|
host "github.com/jbenet/go-ipfs/p2p/host"
|
||||||
inet "github.com/jbenet/go-ipfs/p2p/net"
|
inet "github.com/jbenet/go-ipfs/p2p/net"
|
||||||
|
peer "github.com/jbenet/go-ipfs/p2p/peer"
|
||||||
protocol "github.com/jbenet/go-ipfs/p2p/protocol"
|
protocol "github.com/jbenet/go-ipfs/p2p/protocol"
|
||||||
pb "github.com/jbenet/go-ipfs/p2p/protocol/identify/pb"
|
pb "github.com/jbenet/go-ipfs/p2p/protocol/identify/pb"
|
||||||
config "github.com/jbenet/go-ipfs/repo/config"
|
config "github.com/jbenet/go-ipfs/repo/config"
|
||||||
eventlog "github.com/jbenet/go-ipfs/thirdparty/eventlog"
|
eventlog "github.com/jbenet/go-ipfs/thirdparty/eventlog"
|
||||||
|
lgbl "github.com/jbenet/go-ipfs/util/eventlog/loggables"
|
||||||
)
|
)
|
||||||
|
|
||||||
var log = eventlog.Logger("net/identify")
|
var log = eventlog.Logger("net/identify")
|
||||||
@ -23,16 +25,9 @@ var log = eventlog.Logger("net/identify")
|
|||||||
const ID protocol.ID = "/ipfs/identify"
|
const ID protocol.ID = "/ipfs/identify"
|
||||||
|
|
||||||
// IpfsVersion holds the current protocol version for a client running this code
|
// IpfsVersion holds the current protocol version for a client running this code
|
||||||
var IpfsVersion *semver.Version
|
// TODO(jbenet): fix the versioning mess.
|
||||||
var ClientVersion = "go-ipfs/" + config.CurrentVersionNumber
|
const IpfsVersion = "ipfs/0.1.0"
|
||||||
|
const ClientVersion = "go-ipfs/" + config.CurrentVersionNumber
|
||||||
func init() {
|
|
||||||
var err error
|
|
||||||
IpfsVersion, err = semver.NewVersion("0.0.1")
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Errorf("invalid protocol version: %v", err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// IDService is a structure that implements ProtocolIdentify.
|
// IDService is a structure that implements ProtocolIdentify.
|
||||||
// It is a trivial service that gives the other peer some
|
// It is a trivial service that gives the other peer some
|
||||||
@ -49,6 +44,10 @@ type IDService struct {
|
|||||||
// for wait purposes
|
// for wait purposes
|
||||||
currid map[inet.Conn]chan struct{}
|
currid map[inet.Conn]chan struct{}
|
||||||
currmu sync.RWMutex
|
currmu sync.RWMutex
|
||||||
|
|
||||||
|
// our own observed addresses.
|
||||||
|
// TODO: instead of expiring, remove these when we disconnect
|
||||||
|
addrs peer.AddrManager
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewIDService(h host.Host) *IDService {
|
func NewIDService(h host.Host) *IDService {
|
||||||
@ -60,6 +59,11 @@ func NewIDService(h host.Host) *IDService {
|
|||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// OwnObservedAddrs returns the addresses peers have reported we've dialed from
|
||||||
|
func (ids *IDService) OwnObservedAddrs() []ma.Multiaddr {
|
||||||
|
return ids.addrs.Addrs(ids.Host.ID())
|
||||||
|
}
|
||||||
|
|
||||||
func (ids *IDService) IdentifyConn(c inet.Conn) {
|
func (ids *IDService) IdentifyConn(c inet.Conn) {
|
||||||
ids.currmu.Lock()
|
ids.currmu.Lock()
|
||||||
if wait, found := ids.currid[c]; found {
|
if wait, found := ids.currid[c]; found {
|
||||||
@ -148,9 +152,10 @@ func (ids *IDService) populateMessage(mes *pb.Identify, c inet.Conn) {
|
|||||||
log.Debugf("%s sent listen addrs to %s: %s", c.LocalPeer(), c.RemotePeer(), laddrs)
|
log.Debugf("%s sent listen addrs to %s: %s", c.LocalPeer(), c.RemotePeer(), laddrs)
|
||||||
|
|
||||||
// set protocol versions
|
// set protocol versions
|
||||||
s := IpfsVersion.String()
|
pv := IpfsVersion
|
||||||
mes.ProtocolVersion = &s
|
av := ClientVersion
|
||||||
mes.AgentVersion = &ClientVersion
|
mes.ProtocolVersion = &pv
|
||||||
|
mes.AgentVersion = &av
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ids *IDService) consumeMessage(mes *pb.Identify, c inet.Conn) {
|
func (ids *IDService) consumeMessage(mes *pb.Identify, c inet.Conn) {
|
||||||
@ -176,12 +181,22 @@ func (ids *IDService) consumeMessage(mes *pb.Identify, c inet.Conn) {
|
|||||||
|
|
||||||
// update our peerstore with the addresses. here, we SET the addresses, clearing old ones.
|
// update our peerstore with the addresses. here, we SET the addresses, clearing old ones.
|
||||||
// We are receiving from the peer itself. this is current address ground truth.
|
// We are receiving from the peer itself. this is current address ground truth.
|
||||||
ids.Host.Peerstore().SetAddresses(p, lmaddrs)
|
ids.Host.Peerstore().SetAddrs(p, lmaddrs, peer.ConnectedAddrTTL)
|
||||||
log.Debugf("%s received listen addrs for %s: %s", c.LocalPeer(), c.RemotePeer(), lmaddrs)
|
log.Debugf("%s received listen addrs for %s: %s", c.LocalPeer(), c.RemotePeer(), lmaddrs)
|
||||||
|
|
||||||
// get protocol versions
|
// get protocol versions
|
||||||
pv := mes.GetProtocolVersion()
|
pv := mes.GetProtocolVersion()
|
||||||
av := mes.GetAgentVersion()
|
av := mes.GetAgentVersion()
|
||||||
|
|
||||||
|
// version check. if we shouldn't talk, bail.
|
||||||
|
// TODO: at this point, we've already exchanged information.
|
||||||
|
// move this into a first handshake before the connection can open streams.
|
||||||
|
if !protocolVersionsAreCompatible(pv, IpfsVersion) {
|
||||||
|
logProtocolMismatchDisconnect(c, pv, av)
|
||||||
|
c.Close()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
ids.Host.Peerstore().Put(p, "ProtocolVersion", pv)
|
ids.Host.Peerstore().Put(p, "ProtocolVersion", pv)
|
||||||
ids.Host.Peerstore().Put(p, "AgentVersion", av)
|
ids.Host.Peerstore().Put(p, "AgentVersion", av)
|
||||||
}
|
}
|
||||||
@ -235,7 +250,7 @@ func (ids *IDService) consumeObservedAddress(observed []byte, c inet.Conn) {
|
|||||||
|
|
||||||
// ok! we have the observed version of one of our ListenAddresses!
|
// ok! we have the observed version of one of our ListenAddresses!
|
||||||
log.Debugf("added own observed listen addr: %s --> %s", c.LocalMultiaddr(), maddr)
|
log.Debugf("added own observed listen addr: %s --> %s", c.LocalMultiaddr(), maddr)
|
||||||
ids.Host.Peerstore().AddAddress(ids.Host.ID(), maddr)
|
ids.addrs.AddAddr(ids.Host.ID(), maddr, peer.OwnObservedAddrTTL)
|
||||||
}
|
}
|
||||||
|
|
||||||
func addrInAddrs(a ma.Multiaddr, as []ma.Multiaddr) bool {
|
func addrInAddrs(a ma.Multiaddr, as []ma.Multiaddr) bool {
|
||||||
@ -246,3 +261,63 @@ func addrInAddrs(a ma.Multiaddr, as []ma.Multiaddr) bool {
|
|||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// protocolVersionsAreCompatible checks that the two implementations
|
||||||
|
// can talk to each other. It will use semver, but for now while
|
||||||
|
// we're in tight development, we will return false for minor version
|
||||||
|
// changes too.
|
||||||
|
func protocolVersionsAreCompatible(v1, v2 string) bool {
|
||||||
|
if strings.HasPrefix(v1, "ipfs/") {
|
||||||
|
v1 = v1[5:]
|
||||||
|
}
|
||||||
|
if strings.HasPrefix(v2, "ipfs/") {
|
||||||
|
v2 = v2[5:]
|
||||||
|
}
|
||||||
|
|
||||||
|
v1s, err := semver.NewVersion(v1)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
v2s, err := semver.NewVersion(v2)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return v1s.Major == v2s.Major && v1s.Minor == v2s.Minor
|
||||||
|
}
|
||||||
|
|
||||||
|
// netNotifiee defines methods to be used with the IpfsDHT
|
||||||
|
type netNotifiee IDService
|
||||||
|
|
||||||
|
func (nn *netNotifiee) IDService() *IDService {
|
||||||
|
return (*IDService)(nn)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (nn *netNotifiee) Connected(n inet.Network, v inet.Conn) {
|
||||||
|
// TODO: deprecate the setConnHandler hook, and kick off
|
||||||
|
// identification here.
|
||||||
|
}
|
||||||
|
|
||||||
|
func (nn *netNotifiee) Disconnected(n inet.Network, v inet.Conn) {
|
||||||
|
// undo the setting of addresses to peer.ConnectedAddrTTL we did
|
||||||
|
ids := nn.IDService()
|
||||||
|
ps := ids.Host.Peerstore()
|
||||||
|
addrs := ps.Addrs(v.RemotePeer())
|
||||||
|
ps.SetAddrs(v.RemotePeer(), addrs, peer.RecentlyConnectedAddrTTL)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (nn *netNotifiee) OpenedStream(n inet.Network, v inet.Stream) {}
|
||||||
|
func (nn *netNotifiee) ClosedStream(n inet.Network, v inet.Stream) {}
|
||||||
|
func (nn *netNotifiee) Listen(n inet.Network, a ma.Multiaddr) {}
|
||||||
|
func (nn *netNotifiee) ListenClose(n inet.Network, a ma.Multiaddr) {}
|
||||||
|
|
||||||
|
func logProtocolMismatchDisconnect(c inet.Conn, protocol, agent string) {
|
||||||
|
lm := make(lgbl.DeferredMap)
|
||||||
|
lm["remotePeer"] = func() interface{} { return c.RemotePeer().Pretty() }
|
||||||
|
lm["remoteAddr"] = func() interface{} { return c.RemoteMultiaddr().String() }
|
||||||
|
lm["protocolVersion"] = protocol
|
||||||
|
lm["agentVersion"] = agent
|
||||||
|
log.Event(context.TODO(), "IdentifyProtocolMismatch", lm)
|
||||||
|
log.Debug("IdentifyProtocolMismatch %s %s %s (disconnected)", c.RemotePeer(), protocol, agent)
|
||||||
|
}
|
||||||
|
@ -38,7 +38,7 @@ func subtestIDService(t *testing.T, postDialWait time.Duration) {
|
|||||||
|
|
||||||
// the IDService should be opened automatically, by the network.
|
// the IDService should be opened automatically, by the network.
|
||||||
// what we should see now is that both peers know about each others listen addresses.
|
// what we should see now is that both peers know about each others listen addresses.
|
||||||
testKnowsAddrs(t, h1, h2p, h2.Peerstore().Addresses(h2p)) // has them
|
testKnowsAddrs(t, h1, h2p, h2.Peerstore().Addrs(h2p)) // has them
|
||||||
testHasProtocolVersions(t, h1, h2p)
|
testHasProtocolVersions(t, h1, h2p)
|
||||||
|
|
||||||
// now, this wait we do have to do. it's the wait for the Listening side
|
// now, this wait we do have to do. it's the wait for the Listening side
|
||||||
@ -50,12 +50,12 @@ func subtestIDService(t *testing.T, postDialWait time.Duration) {
|
|||||||
<-h2.IDService().IdentifyWait(c[0])
|
<-h2.IDService().IdentifyWait(c[0])
|
||||||
|
|
||||||
// and the protocol versions.
|
// and the protocol versions.
|
||||||
testKnowsAddrs(t, h2, h1p, h1.Peerstore().Addresses(h1p)) // has them
|
testKnowsAddrs(t, h2, h1p, h1.Peerstore().Addrs(h1p)) // has them
|
||||||
testHasProtocolVersions(t, h2, h1p)
|
testHasProtocolVersions(t, h2, h1p)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testKnowsAddrs(t *testing.T, h host.Host, p peer.ID, expected []ma.Multiaddr) {
|
func testKnowsAddrs(t *testing.T, h host.Host, p peer.ID, expected []ma.Multiaddr) {
|
||||||
actual := h.Peerstore().Addresses(p)
|
actual := h.Peerstore().Addrs(p)
|
||||||
|
|
||||||
if len(actual) != len(expected) {
|
if len(actual) != len(expected) {
|
||||||
t.Error("dont have the same addresses")
|
t.Error("dont have the same addresses")
|
||||||
@ -79,7 +79,7 @@ func testHasProtocolVersions(t *testing.T, h host.Host, p peer.ID) {
|
|||||||
t.Error("no protocol version")
|
t.Error("no protocol version")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if v.(string) != identify.IpfsVersion.String() {
|
if v.(string) != identify.IpfsVersion {
|
||||||
t.Error("protocol mismatch", err)
|
t.Error("protocol mismatch", err)
|
||||||
}
|
}
|
||||||
v, err = h.Peerstore().Get(p, "AgentVersion")
|
v, err = h.Peerstore().Get(p, "AgentVersion")
|
||||||
|
@ -22,14 +22,14 @@ func GenSwarmNetwork(t *testing.T, ctx context.Context) *swarm.Network {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
ps.AddAddresses(p.ID, n.ListenAddresses())
|
ps.AddAddrs(p.ID, n.ListenAddresses(), peer.PermanentAddrTTL)
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
func DivulgeAddresses(a, b inet.Network) {
|
func DivulgeAddresses(a, b inet.Network) {
|
||||||
id := a.LocalPeer()
|
id := a.LocalPeer()
|
||||||
addrs := a.Peerstore().Addresses(id)
|
addrs := a.Peerstore().Addrs(id)
|
||||||
b.Peerstore().AddAddresses(id, addrs)
|
b.Peerstore().AddAddrs(id, addrs, peer.PermanentAddrTTL)
|
||||||
}
|
}
|
||||||
|
|
||||||
func GenHostSwarm(t *testing.T, ctx context.Context) *bhost.BasicHost {
|
func GenHostSwarm(t *testing.T, ctx context.Context) *bhost.BasicHost {
|
||||||
|
@ -142,16 +142,20 @@ func (dht *IpfsDHT) putValueToPeer(ctx context.Context, p peer.ID,
|
|||||||
func (dht *IpfsDHT) putProvider(ctx context.Context, p peer.ID, key string) error {
|
func (dht *IpfsDHT) putProvider(ctx context.Context, p peer.ID, key string) error {
|
||||||
|
|
||||||
// add self as the provider
|
// add self as the provider
|
||||||
pi := dht.peerstore.PeerInfo(dht.self)
|
pi := peer.PeerInfo{
|
||||||
|
ID: dht.self,
|
||||||
|
Addrs: dht.host.Addrs(),
|
||||||
|
}
|
||||||
|
|
||||||
// // only share WAN-friendly addresses ??
|
// // only share WAN-friendly addresses ??
|
||||||
// pi.Addrs = addrutil.WANShareableAddrs(pi.Addrs)
|
// pi.Addrs = addrutil.WANShareableAddrs(pi.Addrs)
|
||||||
if len(pi.Addrs) < 1 {
|
if len(pi.Addrs) < 1 {
|
||||||
log.Infof("%s putProvider: %s for %s error: no wan-friendly addresses", dht.self, p, u.Key(key), pi.Addrs)
|
// log.Infof("%s putProvider: %s for %s error: no wan-friendly addresses", dht.self, p, u.Key(key), pi.Addrs)
|
||||||
return fmt.Errorf("no known addresses for self. cannot put provider.")
|
return fmt.Errorf("no known addresses for self. cannot put provider.")
|
||||||
}
|
}
|
||||||
|
|
||||||
pmes := pb.NewMessage(pb.Message_ADD_PROVIDER, string(key), 0)
|
pmes := pb.NewMessage(pb.Message_ADD_PROVIDER, string(key), 0)
|
||||||
pmes.ProviderPeers = pb.PeerInfosToPBPeers(dht.host.Network(), []peer.PeerInfo{pi})
|
pmes.ProviderPeers = pb.RawPeerInfosToPBPeers([]peer.PeerInfo{pi})
|
||||||
err := dht.sendMessage(ctx, p, pmes)
|
err := dht.sendMessage(ctx, p, pmes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -55,7 +55,7 @@ func setupDHTS(ctx context.Context, n int, t *testing.T) ([]ma.Multiaddr, []peer
|
|||||||
for i := 0; i < n; i++ {
|
for i := 0; i < n; i++ {
|
||||||
dhts[i] = setupDHT(ctx, t)
|
dhts[i] = setupDHT(ctx, t)
|
||||||
peers[i] = dhts[i].self
|
peers[i] = dhts[i].self
|
||||||
addrs[i] = dhts[i].peerstore.Addresses(dhts[i].self)[0]
|
addrs[i] = dhts[i].peerstore.Addrs(dhts[i].self)[0]
|
||||||
}
|
}
|
||||||
|
|
||||||
return addrs, peers, dhts
|
return addrs, peers, dhts
|
||||||
@ -64,12 +64,12 @@ func setupDHTS(ctx context.Context, n int, t *testing.T) ([]ma.Multiaddr, []peer
|
|||||||
func connect(t *testing.T, ctx context.Context, a, b *IpfsDHT) {
|
func connect(t *testing.T, ctx context.Context, a, b *IpfsDHT) {
|
||||||
|
|
||||||
idB := b.self
|
idB := b.self
|
||||||
addrB := b.peerstore.Addresses(idB)
|
addrB := b.peerstore.Addrs(idB)
|
||||||
if len(addrB) == 0 {
|
if len(addrB) == 0 {
|
||||||
t.Fatal("peers setup incorrectly: no local address")
|
t.Fatal("peers setup incorrectly: no local address")
|
||||||
}
|
}
|
||||||
|
|
||||||
a.peerstore.AddAddresses(idB, addrB)
|
a.peerstore.AddAddrs(idB, addrB, peer.TempAddrTTL)
|
||||||
if err := a.Connect(ctx, idB); err != nil {
|
if err := a.Connect(ctx, idB); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -754,20 +754,20 @@ func TestConnectCollision(t *testing.T) {
|
|||||||
dhtA := setupDHT(ctx, t)
|
dhtA := setupDHT(ctx, t)
|
||||||
dhtB := setupDHT(ctx, t)
|
dhtB := setupDHT(ctx, t)
|
||||||
|
|
||||||
addrA := dhtA.peerstore.Addresses(dhtA.self)[0]
|
addrA := dhtA.peerstore.Addrs(dhtA.self)[0]
|
||||||
addrB := dhtB.peerstore.Addresses(dhtB.self)[0]
|
addrB := dhtB.peerstore.Addrs(dhtB.self)[0]
|
||||||
|
|
||||||
peerA := dhtA.self
|
peerA := dhtA.self
|
||||||
peerB := dhtB.self
|
peerB := dhtB.self
|
||||||
|
|
||||||
errs := make(chan error)
|
errs := make(chan error)
|
||||||
go func() {
|
go func() {
|
||||||
dhtA.peerstore.AddAddress(peerB, addrB)
|
dhtA.peerstore.AddAddr(peerB, addrB, peer.TempAddrTTL)
|
||||||
err := dhtA.Connect(ctx, peerB)
|
err := dhtA.Connect(ctx, peerB)
|
||||||
errs <- err
|
errs <- err
|
||||||
}()
|
}()
|
||||||
go func() {
|
go func() {
|
||||||
dhtB.peerstore.AddAddress(peerA, addrA)
|
dhtB.peerstore.AddAddr(peerA, addrA, peer.TempAddrTTL)
|
||||||
err := dhtB.Connect(ctx, peerA)
|
err := dhtB.Connect(ctx, peerA)
|
||||||
errs <- err
|
errs <- err
|
||||||
}()
|
}()
|
||||||
|
@ -238,7 +238,7 @@ func (dht *IpfsDHT) handleAddProvider(ctx context.Context, p peer.ID, pmes *pb.M
|
|||||||
log.Infof("received provider %s for %s (addrs: %s)", p, key, pi.Addrs)
|
log.Infof("received provider %s for %s (addrs: %s)", p, key, pi.Addrs)
|
||||||
if pi.ID != dht.self { // dont add own addrs.
|
if pi.ID != dht.self { // dont add own addrs.
|
||||||
// add the received addresses to our peerstore.
|
// add the received addresses to our peerstore.
|
||||||
dht.peerstore.AddPeerInfo(pi)
|
dht.peerstore.AddAddrs(pi.ID, pi.Addrs, peer.ProviderAddrTTL)
|
||||||
}
|
}
|
||||||
dht.providers.AddProvider(key, p)
|
dht.providers.AddProvider(key, p)
|
||||||
}
|
}
|
||||||
|
@ -100,7 +100,7 @@ func (dht *IpfsDHT) closerPeersSingle(ctx context.Context, key u.Key, p peer.ID)
|
|||||||
for _, pbp := range pmes.GetCloserPeers() {
|
for _, pbp := range pmes.GetCloserPeers() {
|
||||||
pid := peer.ID(pbp.GetId())
|
pid := peer.ID(pbp.GetId())
|
||||||
if pid != dht.self { // dont add self
|
if pid != dht.self { // dont add self
|
||||||
dht.peerstore.AddAddresses(pid, pbp.Addresses())
|
dht.peerstore.AddAddrs(pid, pbp.Addresses(), peer.TempAddrTTL)
|
||||||
out = append(out, pid)
|
out = append(out, pid)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -253,7 +253,7 @@ func (r *dhtQueryRunner) queryPeer(cg ctxgroup.ContextGroup, p peer.ID) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// add their addresses to the dialer's peerstore
|
// add their addresses to the dialer's peerstore
|
||||||
r.query.dht.peerstore.AddPeerInfo(next)
|
r.query.dht.peerstore.AddAddrs(next.ID, next.Addrs, peer.TempAddrTTL)
|
||||||
r.addPeerToQuery(cg.Context(), next.ID)
|
r.addPeerToQuery(cg.Context(), next.ID)
|
||||||
log.Debugf("PEERS CLOSER -- worker for: %v added %v (%v)", p, next.ID, next.Addrs)
|
log.Debugf("PEERS CLOSER -- worker for: %v added %v (%v)", p, next.ID, next.Addrs)
|
||||||
}
|
}
|
||||||
|
@ -96,7 +96,7 @@ func (s *Server) handleMessage(
|
|||||||
}
|
}
|
||||||
for _, maddr := range provider.Addresses() {
|
for _, maddr := range provider.Addresses() {
|
||||||
// FIXME do we actually want to store to peerstore
|
// FIXME do we actually want to store to peerstore
|
||||||
s.peerstore.AddAddress(p, maddr)
|
s.peerstore.AddAddr(p, maddr, peer.TempAddrTTL)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
var providers []dhtpb.Message_Peer
|
var providers []dhtpb.Message_Peer
|
||||||
|
@ -64,11 +64,14 @@ test_cmp_repeat_10_sec() {
|
|||||||
test_cmp "$1" "$2"
|
test_cmp "$1" "$2"
|
||||||
}
|
}
|
||||||
|
|
||||||
test_run_repeat_10_sec() {
|
test_run_repeat_60_sec() {
|
||||||
for i in 1 2 3 4 5 6 7 8 9 10
|
for i in 1 2 3 4 5 6
|
||||||
do
|
do
|
||||||
(test_eval_ "$1") && return
|
for i in 1 2 3 4 5 6 7 8 9 10
|
||||||
sleep 1
|
do
|
||||||
|
(test_eval_ "$1") && return
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
done
|
done
|
||||||
return 1 # failed
|
return 1 # failed
|
||||||
}
|
}
|
||||||
@ -177,13 +180,13 @@ test_launch_ipfs_daemon() {
|
|||||||
test_expect_success "'ipfs daemon' is ready" '
|
test_expect_success "'ipfs daemon' is ready" '
|
||||||
IPFS_PID=$! &&
|
IPFS_PID=$! &&
|
||||||
test_wait_output_n_lines_60_sec actual_daemon 2 &&
|
test_wait_output_n_lines_60_sec actual_daemon 2 &&
|
||||||
test_run_repeat_10_sec "grep \"API server listening on $ADDR_API\" actual_daemon" ||
|
test_run_repeat_60_sec "grep \"API server listening on $ADDR_API\" actual_daemon" ||
|
||||||
fsh cat actual_daemon || fsh cat daemon_err
|
fsh cat actual_daemon || fsh cat daemon_err
|
||||||
'
|
'
|
||||||
|
|
||||||
if test "$ADDR_GWAY" != ""; then
|
if test "$ADDR_GWAY" != ""; then
|
||||||
test_expect_success "'ipfs daemon' output includes Gateway address" '
|
test_expect_success "'ipfs daemon' output includes Gateway address" '
|
||||||
test_run_repeat_10_sec "grep \"Gateway server listening on $ADDR_GWAY\" actual_daemon" ||
|
test_run_repeat_60_sec "grep \"Gateway server listening on $ADDR_GWAY\" actual_daemon" ||
|
||||||
fsh cat daemon_err
|
fsh cat daemon_err
|
||||||
'
|
'
|
||||||
fi
|
fi
|
||||||
|
Reference in New Issue
Block a user