mirror of
https://github.com/ipfs/kubo.git
synced 2025-06-25 23:21:54 +08:00
more work implementing coral type lookups
This commit is contained in:
@ -2,7 +2,6 @@ package dht
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
"errors"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
@ -28,7 +27,7 @@ type IpfsDHT struct {
|
|||||||
// NOTE: (currently, only a single table is used)
|
// NOTE: (currently, only a single table is used)
|
||||||
routes []*kb.RoutingTable
|
routes []*kb.RoutingTable
|
||||||
|
|
||||||
network *swarm.Swarm
|
network swarm.Network
|
||||||
|
|
||||||
// Local peer (yourself)
|
// Local peer (yourself)
|
||||||
self *peer.Peer
|
self *peer.Peer
|
||||||
@ -95,7 +94,7 @@ func (dht *IpfsDHT) Start() {
|
|||||||
go dht.handleMessages()
|
go dht.handleMessages()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Connect to a new peer at the given address
|
// Connect to a new peer at the given address, ping and add to the routing table
|
||||||
func (dht *IpfsDHT) Connect(addr *ma.Multiaddr) (*peer.Peer, error) {
|
func (dht *IpfsDHT) Connect(addr *ma.Multiaddr) (*peer.Peer, error) {
|
||||||
maddrstr, _ := addr.String()
|
maddrstr, _ := addr.String()
|
||||||
u.DOut("Connect to new peer: %s", maddrstr)
|
u.DOut("Connect to new peer: %s", maddrstr)
|
||||||
@ -104,8 +103,6 @@ func (dht *IpfsDHT) Connect(addr *ma.Multiaddr) (*peer.Peer, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
dht.Update(npeer)
|
|
||||||
|
|
||||||
// Ping new peer to register in their routing table
|
// Ping new peer to register in their routing table
|
||||||
// NOTE: this should be done better...
|
// NOTE: this should be done better...
|
||||||
err = dht.Ping(npeer, time.Second*2)
|
err = dht.Ping(npeer, time.Second*2)
|
||||||
@ -113,6 +110,8 @@ func (dht *IpfsDHT) Connect(addr *ma.Multiaddr) (*peer.Peer, error) {
|
|||||||
return nil, errors.New("failed to ping newly connected peer")
|
return nil, errors.New("failed to ping newly connected peer")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
dht.Update(npeer)
|
||||||
|
|
||||||
return npeer, nil
|
return npeer, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -122,9 +121,10 @@ func (dht *IpfsDHT) handleMessages() {
|
|||||||
u.DOut("Begin message handling routine")
|
u.DOut("Begin message handling routine")
|
||||||
|
|
||||||
checkTimeouts := time.NewTicker(time.Minute * 5)
|
checkTimeouts := time.NewTicker(time.Minute * 5)
|
||||||
|
ch := dht.network.GetChan()
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case mes, ok := <-dht.network.Chan.Incoming:
|
case mes, ok := <-ch.Incoming:
|
||||||
if !ok {
|
if !ok {
|
||||||
u.DOut("handleMessages closing, bad recv on incoming")
|
u.DOut("handleMessages closing, bad recv on incoming")
|
||||||
return
|
return
|
||||||
@ -184,8 +184,8 @@ func (dht *IpfsDHT) handleMessages() {
|
|||||||
dht.handleDiagnostic(mes.Peer, pmes)
|
dht.handleDiagnostic(mes.Peer, pmes)
|
||||||
}
|
}
|
||||||
|
|
||||||
case err := <-dht.network.Chan.Errors:
|
case err := <-ch.Errors:
|
||||||
u.DErr("dht err: %s", err)
|
u.PErr("dht err: %s", err)
|
||||||
case <-dht.shutdown:
|
case <-dht.shutdown:
|
||||||
checkTimeouts.Stop()
|
checkTimeouts.Stop()
|
||||||
return
|
return
|
||||||
@ -235,7 +235,7 @@ func (dht *IpfsDHT) putValueToNetwork(p *peer.Peer, key string, value []byte) er
|
|||||||
}
|
}
|
||||||
|
|
||||||
mes := swarm.NewMessage(p, pmes.ToProtobuf())
|
mes := swarm.NewMessage(p, pmes.ToProtobuf())
|
||||||
dht.network.Chan.Outgoing <- mes
|
dht.network.Send(mes)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -260,17 +260,26 @@ func (dht *IpfsDHT) handleGetValue(p *peer.Peer, pmes *PBDHTMessage) {
|
|||||||
resp.Success = true
|
resp.Success = true
|
||||||
} else {
|
} else {
|
||||||
// No providers?
|
// No providers?
|
||||||
// Find closest peer(s) to desired key and reply with that info
|
// Find closest peer on given cluster to desired key and reply with that info
|
||||||
closer := dht.routes[0].NearestPeer(kb.ConvertKey(u.Key(pmes.GetKey())))
|
|
||||||
resp.Peers = []*peer.Peer{closer}
|
level := pmes.GetValue()[0] // Using value field to specify cluster level
|
||||||
|
|
||||||
|
closer := dht.routes[level].NearestPeer(kb.ConvertKey(u.Key(pmes.GetKey())))
|
||||||
|
|
||||||
|
// If this peer is closer than the one from the table, return nil
|
||||||
|
if kb.Closer(dht.self.ID, closer.ID, u.Key(pmes.GetKey())) {
|
||||||
|
resp.Peers = nil
|
||||||
|
} else {
|
||||||
|
resp.Peers = []*peer.Peer{closer}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
//temp: what other errors can a datastore throw?
|
//temp: what other errors can a datastore return?
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
mes := swarm.NewMessage(p, resp.ToProtobuf())
|
mes := swarm.NewMessage(p, resp.ToProtobuf())
|
||||||
dht.network.Chan.Outgoing <- mes
|
dht.network.Send(mes)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Store a value in this peer local storage
|
// Store a value in this peer local storage
|
||||||
@ -290,84 +299,66 @@ func (dht *IpfsDHT) handlePing(p *peer.Peer, pmes *PBDHTMessage) {
|
|||||||
Id: pmes.GetId(),
|
Id: pmes.GetId(),
|
||||||
}
|
}
|
||||||
|
|
||||||
dht.network.Chan.Outgoing <- swarm.NewMessage(p, resp.ToProtobuf())
|
dht.network.Send(swarm.NewMessage(p, resp.ToProtobuf()))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dht *IpfsDHT) handleFindPeer(p *peer.Peer, pmes *PBDHTMessage) {
|
func (dht *IpfsDHT) handleFindPeer(p *peer.Peer, pmes *PBDHTMessage) {
|
||||||
success := true
|
resp := DHTMessage{
|
||||||
u.POut("handleFindPeer: searching for '%s'", peer.ID(pmes.GetKey()).Pretty())
|
Type: pmes.GetType(),
|
||||||
closest := dht.routes[0].NearestPeer(kb.ConvertKey(u.Key(pmes.GetKey())))
|
Id: pmes.GetId(),
|
||||||
|
Response: true,
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
mes := swarm.NewMessage(p, resp.ToProtobuf())
|
||||||
|
dht.network.Send(mes)
|
||||||
|
}()
|
||||||
|
level := pmes.GetValue()[0]
|
||||||
|
u.DOut("handleFindPeer: searching for '%s'", peer.ID(pmes.GetKey()).Pretty())
|
||||||
|
closest := dht.routes[level].NearestPeer(kb.ConvertKey(u.Key(pmes.GetKey())))
|
||||||
if closest == nil {
|
if closest == nil {
|
||||||
u.PErr("handleFindPeer: could not find anything.")
|
u.PErr("handleFindPeer: could not find anything.")
|
||||||
success = false
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(closest.Addresses) == 0 {
|
if len(closest.Addresses) == 0 {
|
||||||
u.PErr("handleFindPeer: no addresses for connected peer...")
|
u.PErr("handleFindPeer: no addresses for connected peer...")
|
||||||
success = false
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
u.POut("handleFindPeer: sending back '%s'", closest.ID.Pretty())
|
// If the found peer further away than this peer...
|
||||||
|
if kb.Closer(dht.self.ID, closest.ID, u.Key(pmes.GetKey())) {
|
||||||
addr, err := closest.Addresses[0].String()
|
return
|
||||||
if err != nil {
|
|
||||||
u.PErr(err.Error())
|
|
||||||
success = false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resp := DHTMessage{
|
u.DOut("handleFindPeer: sending back '%s'", closest.ID.Pretty())
|
||||||
Type: pmes.GetType(),
|
resp.Peers = []*peer.Peer{closest}
|
||||||
Response: true,
|
resp.Success = true
|
||||||
Id: pmes.GetId(),
|
|
||||||
Value: []byte(addr),
|
|
||||||
Success: success,
|
|
||||||
}
|
|
||||||
|
|
||||||
mes := swarm.NewMessage(p, resp.ToProtobuf())
|
|
||||||
dht.network.Chan.Outgoing <- mes
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dht *IpfsDHT) handleGetProviders(p *peer.Peer, pmes *PBDHTMessage) {
|
func (dht *IpfsDHT) handleGetProviders(p *peer.Peer, pmes *PBDHTMessage) {
|
||||||
|
resp := DHTMessage{
|
||||||
|
Type: PBDHTMessage_GET_PROVIDERS,
|
||||||
|
Key: pmes.GetKey(),
|
||||||
|
Id: pmes.GetId(),
|
||||||
|
Response: true,
|
||||||
|
}
|
||||||
|
|
||||||
dht.providerLock.RLock()
|
dht.providerLock.RLock()
|
||||||
providers := dht.providers[u.Key(pmes.GetKey())]
|
providers := dht.providers[u.Key(pmes.GetKey())]
|
||||||
dht.providerLock.RUnlock()
|
dht.providerLock.RUnlock()
|
||||||
if providers == nil || len(providers) == 0 {
|
if providers == nil || len(providers) == 0 {
|
||||||
// ?????
|
// TODO: work on tiering this
|
||||||
u.DOut("No known providers for requested key.")
|
closer := dht.routes[0].NearestPeer(kb.ConvertKey(u.Key(pmes.GetKey())))
|
||||||
}
|
resp.Peers = []*peer.Peer{closer}
|
||||||
|
} else {
|
||||||
// This is just a quick hack, formalize method of sending addrs later
|
for _, prov := range providers {
|
||||||
addrs := make(map[u.Key]string)
|
resp.Peers = append(resp.Peers, prov.Value)
|
||||||
for _, prov := range providers {
|
|
||||||
ma := prov.Value.NetAddress("tcp")
|
|
||||||
str, err := ma.String()
|
|
||||||
if err != nil {
|
|
||||||
u.PErr("Error: %s", err)
|
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
|
resp.Success = true
|
||||||
addrs[prov.Value.Key()] = str
|
|
||||||
}
|
|
||||||
|
|
||||||
success := true
|
|
||||||
data, err := json.Marshal(addrs)
|
|
||||||
if err != nil {
|
|
||||||
u.POut("handleGetProviders: error marshalling struct to JSON: %s", err)
|
|
||||||
data = nil
|
|
||||||
success = false
|
|
||||||
}
|
|
||||||
|
|
||||||
resp := DHTMessage{
|
|
||||||
Type: PBDHTMessage_GET_PROVIDERS,
|
|
||||||
Key: pmes.GetKey(),
|
|
||||||
Value: data,
|
|
||||||
Id: pmes.GetId(),
|
|
||||||
Response: true,
|
|
||||||
Success: success,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
mes := swarm.NewMessage(p, resp.ToProtobuf())
|
mes := swarm.NewMessage(p, resp.ToProtobuf())
|
||||||
dht.network.Chan.Outgoing <- mes
|
dht.network.Send(mes)
|
||||||
}
|
}
|
||||||
|
|
||||||
type providerInfo struct {
|
type providerInfo struct {
|
||||||
@ -445,7 +436,7 @@ func (dht *IpfsDHT) handleDiagnostic(p *peer.Peer, pmes *PBDHTMessage) {
|
|||||||
|
|
||||||
for _, ps := range seq {
|
for _, ps := range seq {
|
||||||
mes := swarm.NewMessage(ps, pmes)
|
mes := swarm.NewMessage(ps, pmes)
|
||||||
dht.network.Chan.Outgoing <- mes
|
dht.network.Send(mes)
|
||||||
}
|
}
|
||||||
|
|
||||||
buf := new(bytes.Buffer)
|
buf := new(bytes.Buffer)
|
||||||
@ -481,19 +472,21 @@ out:
|
|||||||
}
|
}
|
||||||
|
|
||||||
mes := swarm.NewMessage(p, resp.ToProtobuf())
|
mes := swarm.NewMessage(p, resp.ToProtobuf())
|
||||||
dht.network.Chan.Outgoing <- mes
|
dht.network.Send(mes)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dht *IpfsDHT) getValueSingle(p *peer.Peer, key u.Key, timeout time.Duration) ([]byte, error) {
|
// getValueSingle simply performs the get value RPC with the given parameters
|
||||||
|
func (dht *IpfsDHT) getValueSingle(p *peer.Peer, key u.Key, timeout time.Duration, level int) (*PBDHTMessage, error) {
|
||||||
pmes := DHTMessage{
|
pmes := DHTMessage{
|
||||||
Type: PBDHTMessage_GET_VALUE,
|
Type: PBDHTMessage_GET_VALUE,
|
||||||
Key: string(key),
|
Key: string(key),
|
||||||
Id: GenerateMessageID(),
|
Value: []byte{byte(level)},
|
||||||
|
Id: GenerateMessageID(),
|
||||||
}
|
}
|
||||||
response_chan := dht.ListenFor(pmes.Id, 1, time.Minute)
|
response_chan := dht.ListenFor(pmes.Id, 1, time.Minute)
|
||||||
|
|
||||||
mes := swarm.NewMessage(p, pmes.ToProtobuf())
|
mes := swarm.NewMessage(p, pmes.ToProtobuf())
|
||||||
dht.network.Chan.Outgoing <- mes
|
dht.network.Send(mes)
|
||||||
|
|
||||||
// Wait for either the response or a timeout
|
// Wait for either the response or a timeout
|
||||||
timeup := time.After(timeout)
|
timeup := time.After(timeout)
|
||||||
@ -511,46 +504,41 @@ func (dht *IpfsDHT) getValueSingle(p *peer.Peer, key u.Key, timeout time.Duratio
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// TODO: debate moving this logic out of this function to be handled by the caller
|
return pmes_out, nil
|
||||||
if pmes_out.GetSuccess() {
|
|
||||||
if pmes_out.Value == nil {
|
|
||||||
// We were given provider[s]
|
|
||||||
return dht.getFromProviderList(key, timeout, pmes_out.GetPeers())
|
|
||||||
}
|
|
||||||
// We were given the value
|
|
||||||
return pmes_out.GetValue(), nil
|
|
||||||
} else {
|
|
||||||
return pmes_out.GetValue(), u.ErrSearchIncomplete
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Im not certain on this implementation, we get a list of providers from someone
|
// TODO: Im not certain on this implementation, we get a list of peers/providers
|
||||||
// what do we do with it? Connect to each of them? randomly pick one to get the value from?
|
// from someone what do we do with it? Connect to each of them? randomly pick
|
||||||
// Or just connect to one at a time until we get a successful connection and request the
|
// one to get the value from? Or just connect to one at a time until we get a
|
||||||
// value from it?
|
// successful connection and request the value from it?
|
||||||
func (dht *IpfsDHT) getFromProviderList(key u.Key, timeout time.Duration, provlist []*PBDHTMessage_PBPeer) ([]byte, error) {
|
func (dht *IpfsDHT) getFromPeerList(key u.Key, timeout time.Duration,
|
||||||
for _, prov := range provlist {
|
peerlist []*PBDHTMessage_PBPeer, level int) ([]byte, error) {
|
||||||
prov_p, _ := dht.Find(peer.ID(prov.GetId()))
|
for _, pinfo := range peerlist {
|
||||||
if prov_p == nil {
|
p, _ := dht.Find(peer.ID(pinfo.GetId()))
|
||||||
maddr, err := ma.NewMultiaddr(prov.GetAddr())
|
if p == nil {
|
||||||
|
maddr, err := ma.NewMultiaddr(pinfo.GetAddr())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
u.PErr("getValue error: %s", err)
|
u.PErr("getValue error: %s", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
prov_p, err = dht.Connect(maddr)
|
p, err = dht.Connect(maddr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
u.PErr("getValue error: %s", err)
|
u.PErr("getValue error: %s", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
data, err := dht.getValueSingle(prov_p, key, timeout)
|
pmes, err := dht.getValueSingle(p, key, timeout, level)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
u.DErr("getFromProvs error: %s", err)
|
u.DErr("getFromPeers error: %s", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
dht.addProviderEntry(key, p)
|
||||||
|
|
||||||
return data, nil
|
// Make sure it was a successful get
|
||||||
|
if pmes.GetSuccess() && pmes.Value != nil {
|
||||||
|
return pmes.GetValue(), nil
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return nil, u.ErrNotFound
|
return nil, u.ErrNotFound
|
||||||
}
|
}
|
||||||
@ -584,3 +572,30 @@ func (dht *IpfsDHT) Find(id peer.ID) (*peer.Peer, *kb.RoutingTable) {
|
|||||||
}
|
}
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (dht *IpfsDHT) findPeerSingle(p *peer.Peer, id peer.ID, timeout time.Duration, level int) (*PBDHTMessage, error) {
|
||||||
|
pmes := DHTMessage{
|
||||||
|
Type: PBDHTMessage_FIND_NODE,
|
||||||
|
Key: string(id),
|
||||||
|
Id: GenerateMessageID(),
|
||||||
|
Value: []byte{byte(level)},
|
||||||
|
}
|
||||||
|
|
||||||
|
mes := swarm.NewMessage(p, pmes.ToProtobuf())
|
||||||
|
listenChan := dht.ListenFor(pmes.Id, 1, time.Minute)
|
||||||
|
dht.network.Send(mes)
|
||||||
|
after := time.After(timeout)
|
||||||
|
select {
|
||||||
|
case <-after:
|
||||||
|
dht.Unlisten(pmes.Id)
|
||||||
|
return nil, u.ErrTimeout
|
||||||
|
case resp := <-listenChan:
|
||||||
|
pmes_out := new(PBDHTMessage)
|
||||||
|
err := proto.Unmarshal(resp.Data, pmes_out)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return pmes_out, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -11,6 +11,37 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func setupDHTS(n int, t *testing.T) ([]*ma.Multiaddr, []*peer.Peer, []*IpfsDHT) {
|
||||||
|
var addrs []*ma.Multiaddr
|
||||||
|
for i := 0; i < 4; i++ {
|
||||||
|
a, err := ma.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", 5000+i))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
addrs = append(addrs, a)
|
||||||
|
}
|
||||||
|
|
||||||
|
var peers []*peer.Peer
|
||||||
|
for i := 0; i < 4; i++ {
|
||||||
|
p := new(peer.Peer)
|
||||||
|
p.AddAddress(addrs[i])
|
||||||
|
p.ID = peer.ID([]byte(fmt.Sprintf("peer_%d", i)))
|
||||||
|
peers = append(peers, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
var dhts []*IpfsDHT
|
||||||
|
for i := 0; i < 4; i++ {
|
||||||
|
d, err := NewDHT(peers[i])
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
dhts = append(dhts, d)
|
||||||
|
d.Start()
|
||||||
|
}
|
||||||
|
|
||||||
|
return addrs, peers, dhts
|
||||||
|
}
|
||||||
|
|
||||||
func TestPing(t *testing.T) {
|
func TestPing(t *testing.T) {
|
||||||
u.Debug = false
|
u.Debug = false
|
||||||
addr_a, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/2222")
|
addr_a, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/2222")
|
||||||
@ -90,11 +121,13 @@ func TestValueGetSet(t *testing.T) {
|
|||||||
dht_a.Start()
|
dht_a.Start()
|
||||||
dht_b.Start()
|
dht_b.Start()
|
||||||
|
|
||||||
|
errsa := dht_a.network.GetChan().Errors
|
||||||
|
errsb := dht_b.network.GetChan().Errors
|
||||||
go func() {
|
go func() {
|
||||||
select {
|
select {
|
||||||
case err := <-dht_a.network.Chan.Errors:
|
case err := <-errsa:
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
case err := <-dht_b.network.Chan.Errors:
|
case err := <-errsb:
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
@ -118,32 +151,8 @@ func TestValueGetSet(t *testing.T) {
|
|||||||
|
|
||||||
func TestProvides(t *testing.T) {
|
func TestProvides(t *testing.T) {
|
||||||
u.Debug = false
|
u.Debug = false
|
||||||
var addrs []*ma.Multiaddr
|
|
||||||
for i := 0; i < 4; i++ {
|
|
||||||
a, err := ma.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", 5000+i))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
addrs = append(addrs, a)
|
|
||||||
}
|
|
||||||
|
|
||||||
var peers []*peer.Peer
|
addrs, _, dhts := setupDHTS(4, t)
|
||||||
for i := 0; i < 4; i++ {
|
|
||||||
p := new(peer.Peer)
|
|
||||||
p.AddAddress(addrs[i])
|
|
||||||
p.ID = peer.ID([]byte(fmt.Sprintf("peer_%d", i)))
|
|
||||||
peers = append(peers, p)
|
|
||||||
}
|
|
||||||
|
|
||||||
var dhts []*IpfsDHT
|
|
||||||
for i := 0; i < 4; i++ {
|
|
||||||
d, err := NewDHT(peers[i])
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
dhts = append(dhts, d)
|
|
||||||
d.Start()
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err := dhts[0].Connect(addrs[1])
|
_, err := dhts[0].Connect(addrs[1])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -217,7 +226,7 @@ func TestLayeredGet(t *testing.T) {
|
|||||||
|
|
||||||
_, err := dhts[0].Connect(addrs[1])
|
_, err := dhts[0].Connect(addrs[1])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatalf("Failed to connect: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = dhts[1].Connect(addrs[2])
|
_, err = dhts[1].Connect(addrs[2])
|
||||||
@ -255,3 +264,65 @@ func TestLayeredGet(t *testing.T) {
|
|||||||
dhts[i].Halt()
|
dhts[i].Halt()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestFindPeer(t *testing.T) {
|
||||||
|
u.Debug = false
|
||||||
|
var addrs []*ma.Multiaddr
|
||||||
|
for i := 0; i < 4; i++ {
|
||||||
|
a, err := ma.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", 5000+i))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
addrs = append(addrs, a)
|
||||||
|
}
|
||||||
|
|
||||||
|
var peers []*peer.Peer
|
||||||
|
for i := 0; i < 4; i++ {
|
||||||
|
p := new(peer.Peer)
|
||||||
|
p.AddAddress(addrs[i])
|
||||||
|
p.ID = peer.ID([]byte(fmt.Sprintf("peer_%d", i)))
|
||||||
|
peers = append(peers, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
var dhts []*IpfsDHT
|
||||||
|
for i := 0; i < 4; i++ {
|
||||||
|
d, err := NewDHT(peers[i])
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
dhts = append(dhts, d)
|
||||||
|
d.Start()
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := dhts[0].Connect(addrs[1])
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = dhts[1].Connect(addrs[2])
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = dhts[1].Connect(addrs[3])
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
p, err := dhts[0].FindPeer(peers[2].ID, time.Second)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if p == nil {
|
||||||
|
t.Fatal("Failed to find peer.")
|
||||||
|
}
|
||||||
|
|
||||||
|
if !p.ID.Equal(peers[2].ID) {
|
||||||
|
t.Fatal("Didnt find expected peer.")
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < 4; i++ {
|
||||||
|
dhts[i].Halt()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -3,8 +3,6 @@ package dht
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -35,19 +33,19 @@ func GenerateMessageID() uint64 {
|
|||||||
// This is the top level "Store" operation of the DHT
|
// This is the top level "Store" operation of the DHT
|
||||||
func (s *IpfsDHT) PutValue(key u.Key, value []byte) {
|
func (s *IpfsDHT) PutValue(key u.Key, value []byte) {
|
||||||
complete := make(chan struct{})
|
complete := make(chan struct{})
|
||||||
for i, route := range s.routes {
|
for _, route := range s.routes {
|
||||||
p := route.NearestPeer(kb.ConvertKey(key))
|
p := route.NearestPeer(kb.ConvertKey(key))
|
||||||
if p == nil {
|
if p == nil {
|
||||||
s.network.Chan.Errors <- fmt.Errorf("No peer found on level %d", i)
|
s.network.Error(kb.ErrLookupFailure)
|
||||||
continue
|
|
||||||
go func() {
|
go func() {
|
||||||
complete <- struct{}{}
|
complete <- struct{}{}
|
||||||
}()
|
}()
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
go func() {
|
go func() {
|
||||||
err := s.putValueToNetwork(p, string(key), value)
|
err := s.putValueToNetwork(p, string(key), value)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.network.Chan.Errors <- err
|
s.network.Error(err)
|
||||||
}
|
}
|
||||||
complete <- struct{}{}
|
complete <- struct{}{}
|
||||||
}()
|
}()
|
||||||
@ -61,19 +59,46 @@ func (s *IpfsDHT) PutValue(key u.Key, value []byte) {
|
|||||||
// If the search does not succeed, a multiaddr string of a closer peer is
|
// If the search does not succeed, a multiaddr string of a closer peer is
|
||||||
// returned along with util.ErrSearchIncomplete
|
// returned along with util.ErrSearchIncomplete
|
||||||
func (s *IpfsDHT) GetValue(key u.Key, timeout time.Duration) ([]byte, error) {
|
func (s *IpfsDHT) GetValue(key u.Key, timeout time.Duration) ([]byte, error) {
|
||||||
for _, route := range s.routes {
|
route_level := 0
|
||||||
var p *peer.Peer
|
|
||||||
p = route.NearestPeer(kb.ConvertKey(key))
|
p := s.routes[route_level].NearestPeer(kb.ConvertKey(key))
|
||||||
if p == nil {
|
if p == nil {
|
||||||
return nil, errors.New("Table returned nil peer!")
|
return nil, kb.ErrLookupFailure
|
||||||
|
}
|
||||||
|
|
||||||
|
for route_level < len(s.routes) && p != nil {
|
||||||
|
pmes, err := s.getValueSingle(p, key, timeout, route_level)
|
||||||
|
if err != nil {
|
||||||
|
return nil, u.WrapError(err, "getValue Error")
|
||||||
}
|
}
|
||||||
|
|
||||||
b, err := s.getValueSingle(p, key, timeout)
|
if pmes.GetSuccess() {
|
||||||
if err == nil {
|
if pmes.Value == nil { // We were given provider[s]
|
||||||
return b, nil
|
return s.getFromPeerList(key, timeout, pmes.GetPeers(), route_level)
|
||||||
}
|
}
|
||||||
if err != u.ErrSearchIncomplete {
|
|
||||||
return nil, err
|
// Success! We were given the value
|
||||||
|
return pmes.GetValue(), nil
|
||||||
|
} else {
|
||||||
|
// We were given a closer node
|
||||||
|
closers := pmes.GetPeers()
|
||||||
|
if len(closers) > 0 {
|
||||||
|
maddr, err := ma.NewMultiaddr(closers[0].GetAddr())
|
||||||
|
if err != nil {
|
||||||
|
// ??? Move up route level???
|
||||||
|
panic("not yet implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: dht.Connect has overhead due to an internal
|
||||||
|
// ping to the target. Use something else
|
||||||
|
p, err = s.Connect(maddr)
|
||||||
|
if err != nil {
|
||||||
|
// Move up route level
|
||||||
|
panic("not yet implemented.")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
route_level++
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil, u.ErrNotFound
|
return nil, u.ErrNotFound
|
||||||
@ -86,7 +111,7 @@ func (s *IpfsDHT) GetValue(key u.Key, timeout time.Duration) ([]byte, error) {
|
|||||||
func (s *IpfsDHT) Provide(key u.Key) error {
|
func (s *IpfsDHT) Provide(key u.Key) error {
|
||||||
peers := s.routes[0].NearestPeers(kb.ConvertKey(key), PoolSize)
|
peers := s.routes[0].NearestPeers(kb.ConvertKey(key), PoolSize)
|
||||||
if len(peers) == 0 {
|
if len(peers) == 0 {
|
||||||
//return an error
|
return kb.ErrLookupFailure
|
||||||
}
|
}
|
||||||
|
|
||||||
pmes := DHTMessage{
|
pmes := DHTMessage{
|
||||||
@ -97,7 +122,7 @@ func (s *IpfsDHT) Provide(key u.Key) error {
|
|||||||
|
|
||||||
for _, p := range peers {
|
for _, p := range peers {
|
||||||
mes := swarm.NewMessage(p, pbmes)
|
mes := swarm.NewMessage(p, pbmes)
|
||||||
s.network.Chan.Outgoing <- mes
|
s.network.Send(mes)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -105,6 +130,9 @@ func (s *IpfsDHT) Provide(key u.Key) error {
|
|||||||
// FindProviders searches for peers who can provide the value for given key.
|
// FindProviders searches for peers who can provide the value for given key.
|
||||||
func (s *IpfsDHT) FindProviders(key u.Key, timeout time.Duration) ([]*peer.Peer, error) {
|
func (s *IpfsDHT) FindProviders(key u.Key, timeout time.Duration) ([]*peer.Peer, error) {
|
||||||
p := s.routes[0].NearestPeer(kb.ConvertKey(key))
|
p := s.routes[0].NearestPeer(kb.ConvertKey(key))
|
||||||
|
if p == nil {
|
||||||
|
return nil, kb.ErrLookupFailure
|
||||||
|
}
|
||||||
|
|
||||||
pmes := DHTMessage{
|
pmes := DHTMessage{
|
||||||
Type: PBDHTMessage_GET_PROVIDERS,
|
Type: PBDHTMessage_GET_PROVIDERS,
|
||||||
@ -116,7 +144,7 @@ func (s *IpfsDHT) FindProviders(key u.Key, timeout time.Duration) ([]*peer.Peer,
|
|||||||
|
|
||||||
listenChan := s.ListenFor(pmes.Id, 1, time.Minute)
|
listenChan := s.ListenFor(pmes.Id, 1, time.Minute)
|
||||||
u.DOut("Find providers for: '%s'", key)
|
u.DOut("Find providers for: '%s'", key)
|
||||||
s.network.Chan.Outgoing <- mes
|
s.network.Send(mes)
|
||||||
after := time.After(timeout)
|
after := time.After(timeout)
|
||||||
select {
|
select {
|
||||||
case <-after:
|
case <-after:
|
||||||
@ -129,17 +157,12 @@ func (s *IpfsDHT) FindProviders(key u.Key, timeout time.Duration) ([]*peer.Peer,
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
var addrs map[u.Key]string
|
|
||||||
err = json.Unmarshal(pmes_out.GetValue(), &addrs)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var prov_arr []*peer.Peer
|
var prov_arr []*peer.Peer
|
||||||
for pid, addr := range addrs {
|
for _, prov := range pmes_out.GetPeers() {
|
||||||
p := s.network.Find(pid)
|
p := s.network.Find(u.Key(prov.GetId()))
|
||||||
if p == nil {
|
if p == nil {
|
||||||
maddr, err := ma.NewMultiaddr(addr)
|
maddr, err := ma.NewMultiaddr(prov.GetAddr())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
u.PErr("error connecting to new peer: %s", err)
|
u.PErr("error connecting to new peer: %s", err)
|
||||||
continue
|
continue
|
||||||
@ -162,48 +185,36 @@ func (s *IpfsDHT) FindProviders(key u.Key, timeout time.Duration) ([]*peer.Peer,
|
|||||||
|
|
||||||
// FindPeer searches for a peer with given ID.
|
// FindPeer searches for a peer with given ID.
|
||||||
func (s *IpfsDHT) FindPeer(id peer.ID, timeout time.Duration) (*peer.Peer, error) {
|
func (s *IpfsDHT) FindPeer(id peer.ID, timeout time.Duration) (*peer.Peer, error) {
|
||||||
p := s.routes[0].NearestPeer(kb.ConvertPeerID(id))
|
route_level := 0
|
||||||
|
p := s.routes[route_level].NearestPeer(kb.ConvertPeerID(id))
|
||||||
pmes := DHTMessage{
|
if p == nil {
|
||||||
Type: PBDHTMessage_FIND_NODE,
|
return nil, kb.ErrLookupFailure
|
||||||
Key: string(id),
|
|
||||||
Id: GenerateMessageID(),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
mes := swarm.NewMessage(p, pmes.ToProtobuf())
|
for route_level < len(s.routes) {
|
||||||
|
pmes, err := s.findPeerSingle(p, id, timeout, route_level)
|
||||||
|
plist := pmes.GetPeers()
|
||||||
|
if len(plist) == 0 {
|
||||||
|
route_level++
|
||||||
|
}
|
||||||
|
found := plist[0]
|
||||||
|
|
||||||
listenChan := s.ListenFor(pmes.Id, 1, time.Minute)
|
addr, err := ma.NewMultiaddr(found.GetAddr())
|
||||||
s.network.Chan.Outgoing <- mes
|
|
||||||
after := time.After(timeout)
|
|
||||||
select {
|
|
||||||
case <-after:
|
|
||||||
s.Unlisten(pmes.Id)
|
|
||||||
return nil, u.ErrTimeout
|
|
||||||
case resp := <-listenChan:
|
|
||||||
pmes_out := new(PBDHTMessage)
|
|
||||||
err := proto.Unmarshal(resp.Data, pmes_out)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, u.WrapError(err, "FindPeer received bad info")
|
||||||
}
|
}
|
||||||
addr := string(pmes_out.GetValue())
|
|
||||||
maddr, err := ma.NewMultiaddr(addr)
|
nxtPeer, err := s.Connect(addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, u.WrapError(err, "FindPeer failed to connect to new peer.")
|
||||||
}
|
}
|
||||||
|
if pmes.GetSuccess() {
|
||||||
found_peer, err := s.Connect(maddr)
|
return nxtPeer, nil
|
||||||
if err != nil {
|
} else {
|
||||||
u.POut("Found peer but couldnt connect.")
|
p = nxtPeer
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if !found_peer.ID.Equal(id) {
|
|
||||||
u.POut("FindPeer: searching for '%s' but found '%s'", id.Pretty(), found_peer.ID.Pretty())
|
|
||||||
return found_peer, u.ErrSearchIncomplete
|
|
||||||
}
|
|
||||||
|
|
||||||
return found_peer, nil
|
|
||||||
}
|
}
|
||||||
|
return nil, u.ErrNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ping a peer, log the time it took
|
// Ping a peer, log the time it took
|
||||||
@ -216,14 +227,14 @@ func (dht *IpfsDHT) Ping(p *peer.Peer, timeout time.Duration) error {
|
|||||||
|
|
||||||
before := time.Now()
|
before := time.Now()
|
||||||
response_chan := dht.ListenFor(pmes.Id, 1, time.Minute)
|
response_chan := dht.ListenFor(pmes.Id, 1, time.Minute)
|
||||||
dht.network.Chan.Outgoing <- mes
|
dht.network.Send(mes)
|
||||||
|
|
||||||
tout := time.After(timeout)
|
tout := time.After(timeout)
|
||||||
select {
|
select {
|
||||||
case <-response_chan:
|
case <-response_chan:
|
||||||
roundtrip := time.Since(before)
|
roundtrip := time.Since(before)
|
||||||
p.SetLatency(roundtrip)
|
p.SetLatency(roundtrip)
|
||||||
u.POut("Ping took %s.", roundtrip.String())
|
u.DOut("Ping took %s.", roundtrip.String())
|
||||||
return nil
|
return nil
|
||||||
case <-tout:
|
case <-tout:
|
||||||
// Timed out, think about removing peer from network
|
// Timed out, think about removing peer from network
|
||||||
@ -249,7 +260,7 @@ func (dht *IpfsDHT) GetDiagnostic(timeout time.Duration) ([]*diagInfo, error) {
|
|||||||
pbmes := pmes.ToProtobuf()
|
pbmes := pmes.ToProtobuf()
|
||||||
for _, p := range targets {
|
for _, p := range targets {
|
||||||
mes := swarm.NewMessage(p, pbmes)
|
mes := swarm.NewMessage(p, pbmes)
|
||||||
dht.network.Chan.Outgoing <- mes
|
dht.network.Send(mes)
|
||||||
}
|
}
|
||||||
|
|
||||||
var out []*diagInfo
|
var out []*diagInfo
|
||||||
|
@ -3,11 +3,16 @@ package dht
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
|
"errors"
|
||||||
|
|
||||||
peer "github.com/jbenet/go-ipfs/peer"
|
peer "github.com/jbenet/go-ipfs/peer"
|
||||||
u "github.com/jbenet/go-ipfs/util"
|
u "github.com/jbenet/go-ipfs/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Returned if a routing table query returns no results. This is NOT expected
|
||||||
|
// behaviour
|
||||||
|
var ErrLookupFailure = errors.New("failed to find any peer in table")
|
||||||
|
|
||||||
// ID for IpfsDHT should be a byte slice, to allow for simpler operations
|
// ID for IpfsDHT should be a byte slice, to allow for simpler operations
|
||||||
// (xor). DHT ids are based on the peer.IDs.
|
// (xor). DHT ids are based on the peer.IDs.
|
||||||
//
|
//
|
||||||
@ -19,8 +24,8 @@ func (id ID) Equal(other ID) bool {
|
|||||||
return bytes.Equal(id, other)
|
return bytes.Equal(id, other)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (id ID) Less(other interface{}) bool {
|
func (id ID) Less(other ID) bool {
|
||||||
a, b := equalizeSizes(id, other.(ID))
|
a, b := equalizeSizes(id, other)
|
||||||
for i := 0; i < len(a); i++ {
|
for i := 0; i < len(a); i++ {
|
||||||
if a[i] != b[i] {
|
if a[i] != b[i] {
|
||||||
return a[i] < b[i]
|
return a[i] < b[i]
|
||||||
@ -80,3 +85,14 @@ func ConvertKey(id u.Key) ID {
|
|||||||
hash := sha256.Sum256([]byte(id))
|
hash := sha256.Sum256([]byte(id))
|
||||||
return hash[:]
|
return hash[:]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Returns true if a is closer to key than b is
|
||||||
|
func Closer(a, b peer.ID, key u.Key) bool {
|
||||||
|
aid := ConvertPeerID(a)
|
||||||
|
bid := ConvertPeerID(b)
|
||||||
|
tgt := ConvertKey(key)
|
||||||
|
adist := xor(aid, tgt)
|
||||||
|
bdist := xor(bid, tgt)
|
||||||
|
|
||||||
|
return adist.Less(bdist)
|
||||||
|
}
|
||||||
|
@ -355,3 +355,17 @@ func (s *Swarm) Drop(p *peer.Peer) error {
|
|||||||
|
|
||||||
return conn.Close()
|
return conn.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *Swarm) Send(mes *Message) {
|
||||||
|
s.Chan.Outgoing <- mes
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Swarm) Error(e error) {
|
||||||
|
s.Chan.Errors <- e
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Swarm) GetChan() *Chan {
|
||||||
|
return s.Chan
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ Network = &Swarm{}
|
||||||
|
26
util/util.go
26
util/util.go
@ -1,10 +1,12 @@
|
|||||||
package util
|
package util
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"os/user"
|
"os/user"
|
||||||
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
b58 "github.com/jbenet/go-base58"
|
b58 "github.com/jbenet/go-base58"
|
||||||
@ -34,6 +36,30 @@ func (k Key) Pretty() string {
|
|||||||
return b58.Encode([]byte(k))
|
return b58.Encode([]byte(k))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type IpfsError struct {
|
||||||
|
Inner error
|
||||||
|
Note string
|
||||||
|
Stack string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ie *IpfsError) Error() string {
|
||||||
|
buf := new(bytes.Buffer)
|
||||||
|
fmt.Fprintln(buf, ie.Inner)
|
||||||
|
fmt.Fprintln(buf, ie.Note)
|
||||||
|
fmt.Fprintln(buf, ie.Stack)
|
||||||
|
return buf.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func WrapError(err error, note string) error {
|
||||||
|
ie := new(IpfsError)
|
||||||
|
ie.Inner = err
|
||||||
|
ie.Note = note
|
||||||
|
stack := make([]byte, 2048)
|
||||||
|
n := runtime.Stack(stack, false)
|
||||||
|
ie.Stack = string(stack[:n])
|
||||||
|
return ie
|
||||||
|
}
|
||||||
|
|
||||||
// Hash is the global IPFS hash function. uses multihash SHA2_256, 256 bits
|
// Hash is the global IPFS hash function. uses multihash SHA2_256, 256 bits
|
||||||
func Hash(data []byte) (mh.Multihash, error) {
|
func Hash(data []byte) (mh.Multihash, error) {
|
||||||
return mh.Sum(data, mh.SHA2_256, -1)
|
return mh.Sum(data, mh.SHA2_256, -1)
|
||||||
|
Reference in New Issue
Block a user