1
0
mirror of https://github.com/ipfs/kubo.git synced 2025-06-24 22:38:27 +08:00

more work implementing coral type lookups

This commit is contained in:
Jeromy
2014-08-10 21:02:05 -07:00
parent 67ddab1e4e
commit a43886245e
6 changed files with 346 additions and 193 deletions

View File

@ -2,7 +2,6 @@ package dht
import (
"bytes"
"encoding/json"
"errors"
"sync"
"time"
@ -28,7 +27,7 @@ type IpfsDHT struct {
// NOTE: (currently, only a single table is used)
routes []*kb.RoutingTable
network *swarm.Swarm
network swarm.Network
// Local peer (yourself)
self *peer.Peer
@ -95,7 +94,7 @@ func (dht *IpfsDHT) Start() {
go dht.handleMessages()
}
// Connect to a new peer at the given address
// Connect to a new peer at the given address, ping and add to the routing table
func (dht *IpfsDHT) Connect(addr *ma.Multiaddr) (*peer.Peer, error) {
maddrstr, _ := addr.String()
u.DOut("Connect to new peer: %s", maddrstr)
@ -104,8 +103,6 @@ func (dht *IpfsDHT) Connect(addr *ma.Multiaddr) (*peer.Peer, error) {
return nil, err
}
dht.Update(npeer)
// Ping new peer to register in their routing table
// NOTE: this should be done better...
err = dht.Ping(npeer, time.Second*2)
@ -113,6 +110,8 @@ func (dht *IpfsDHT) Connect(addr *ma.Multiaddr) (*peer.Peer, error) {
return nil, errors.New("failed to ping newly connected peer")
}
dht.Update(npeer)
return npeer, nil
}
@ -122,9 +121,10 @@ func (dht *IpfsDHT) handleMessages() {
u.DOut("Begin message handling routine")
checkTimeouts := time.NewTicker(time.Minute * 5)
ch := dht.network.GetChan()
for {
select {
case mes, ok := <-dht.network.Chan.Incoming:
case mes, ok := <-ch.Incoming:
if !ok {
u.DOut("handleMessages closing, bad recv on incoming")
return
@ -184,8 +184,8 @@ func (dht *IpfsDHT) handleMessages() {
dht.handleDiagnostic(mes.Peer, pmes)
}
case err := <-dht.network.Chan.Errors:
u.DErr("dht err: %s", err)
case err := <-ch.Errors:
u.PErr("dht err: %s", err)
case <-dht.shutdown:
checkTimeouts.Stop()
return
@ -235,7 +235,7 @@ func (dht *IpfsDHT) putValueToNetwork(p *peer.Peer, key string, value []byte) er
}
mes := swarm.NewMessage(p, pmes.ToProtobuf())
dht.network.Chan.Outgoing <- mes
dht.network.Send(mes)
return nil
}
@ -260,17 +260,26 @@ func (dht *IpfsDHT) handleGetValue(p *peer.Peer, pmes *PBDHTMessage) {
resp.Success = true
} else {
// No providers?
// Find closest peer(s) to desired key and reply with that info
closer := dht.routes[0].NearestPeer(kb.ConvertKey(u.Key(pmes.GetKey())))
resp.Peers = []*peer.Peer{closer}
// Find closest peer on given cluster to desired key and reply with that info
level := pmes.GetValue()[0] // Using value field to specify cluster level
closer := dht.routes[level].NearestPeer(kb.ConvertKey(u.Key(pmes.GetKey())))
// If this peer is closer than the one from the table, return nil
if kb.Closer(dht.self.ID, closer.ID, u.Key(pmes.GetKey())) {
resp.Peers = nil
} else {
resp.Peers = []*peer.Peer{closer}
}
}
} else {
//temp: what other errors can a datastore throw?
//temp: what other errors can a datastore return?
panic(err)
}
mes := swarm.NewMessage(p, resp.ToProtobuf())
dht.network.Chan.Outgoing <- mes
dht.network.Send(mes)
}
// Store a value in this peer local storage
@ -290,84 +299,66 @@ func (dht *IpfsDHT) handlePing(p *peer.Peer, pmes *PBDHTMessage) {
Id: pmes.GetId(),
}
dht.network.Chan.Outgoing <- swarm.NewMessage(p, resp.ToProtobuf())
dht.network.Send(swarm.NewMessage(p, resp.ToProtobuf()))
}
func (dht *IpfsDHT) handleFindPeer(p *peer.Peer, pmes *PBDHTMessage) {
success := true
u.POut("handleFindPeer: searching for '%s'", peer.ID(pmes.GetKey()).Pretty())
closest := dht.routes[0].NearestPeer(kb.ConvertKey(u.Key(pmes.GetKey())))
resp := DHTMessage{
Type: pmes.GetType(),
Id: pmes.GetId(),
Response: true,
}
defer func() {
mes := swarm.NewMessage(p, resp.ToProtobuf())
dht.network.Send(mes)
}()
level := pmes.GetValue()[0]
u.DOut("handleFindPeer: searching for '%s'", peer.ID(pmes.GetKey()).Pretty())
closest := dht.routes[level].NearestPeer(kb.ConvertKey(u.Key(pmes.GetKey())))
if closest == nil {
u.PErr("handleFindPeer: could not find anything.")
success = false
return
}
if len(closest.Addresses) == 0 {
u.PErr("handleFindPeer: no addresses for connected peer...")
success = false
return
}
u.POut("handleFindPeer: sending back '%s'", closest.ID.Pretty())
addr, err := closest.Addresses[0].String()
if err != nil {
u.PErr(err.Error())
success = false
// If the found peer further away than this peer...
if kb.Closer(dht.self.ID, closest.ID, u.Key(pmes.GetKey())) {
return
}
resp := DHTMessage{
Type: pmes.GetType(),
Response: true,
Id: pmes.GetId(),
Value: []byte(addr),
Success: success,
}
mes := swarm.NewMessage(p, resp.ToProtobuf())
dht.network.Chan.Outgoing <- mes
u.DOut("handleFindPeer: sending back '%s'", closest.ID.Pretty())
resp.Peers = []*peer.Peer{closest}
resp.Success = true
}
func (dht *IpfsDHT) handleGetProviders(p *peer.Peer, pmes *PBDHTMessage) {
resp := DHTMessage{
Type: PBDHTMessage_GET_PROVIDERS,
Key: pmes.GetKey(),
Id: pmes.GetId(),
Response: true,
}
dht.providerLock.RLock()
providers := dht.providers[u.Key(pmes.GetKey())]
dht.providerLock.RUnlock()
if providers == nil || len(providers) == 0 {
// ?????
u.DOut("No known providers for requested key.")
}
// This is just a quick hack, formalize method of sending addrs later
addrs := make(map[u.Key]string)
for _, prov := range providers {
ma := prov.Value.NetAddress("tcp")
str, err := ma.String()
if err != nil {
u.PErr("Error: %s", err)
continue
// TODO: work on tiering this
closer := dht.routes[0].NearestPeer(kb.ConvertKey(u.Key(pmes.GetKey())))
resp.Peers = []*peer.Peer{closer}
} else {
for _, prov := range providers {
resp.Peers = append(resp.Peers, prov.Value)
}
addrs[prov.Value.Key()] = str
}
success := true
data, err := json.Marshal(addrs)
if err != nil {
u.POut("handleGetProviders: error marshalling struct to JSON: %s", err)
data = nil
success = false
}
resp := DHTMessage{
Type: PBDHTMessage_GET_PROVIDERS,
Key: pmes.GetKey(),
Value: data,
Id: pmes.GetId(),
Response: true,
Success: success,
resp.Success = true
}
mes := swarm.NewMessage(p, resp.ToProtobuf())
dht.network.Chan.Outgoing <- mes
dht.network.Send(mes)
}
type providerInfo struct {
@ -445,7 +436,7 @@ func (dht *IpfsDHT) handleDiagnostic(p *peer.Peer, pmes *PBDHTMessage) {
for _, ps := range seq {
mes := swarm.NewMessage(ps, pmes)
dht.network.Chan.Outgoing <- mes
dht.network.Send(mes)
}
buf := new(bytes.Buffer)
@ -481,19 +472,21 @@ out:
}
mes := swarm.NewMessage(p, resp.ToProtobuf())
dht.network.Chan.Outgoing <- mes
dht.network.Send(mes)
}
func (dht *IpfsDHT) getValueSingle(p *peer.Peer, key u.Key, timeout time.Duration) ([]byte, error) {
// getValueSingle simply performs the get value RPC with the given parameters
func (dht *IpfsDHT) getValueSingle(p *peer.Peer, key u.Key, timeout time.Duration, level int) (*PBDHTMessage, error) {
pmes := DHTMessage{
Type: PBDHTMessage_GET_VALUE,
Key: string(key),
Id: GenerateMessageID(),
Type: PBDHTMessage_GET_VALUE,
Key: string(key),
Value: []byte{byte(level)},
Id: GenerateMessageID(),
}
response_chan := dht.ListenFor(pmes.Id, 1, time.Minute)
mes := swarm.NewMessage(p, pmes.ToProtobuf())
dht.network.Chan.Outgoing <- mes
dht.network.Send(mes)
// Wait for either the response or a timeout
timeup := time.After(timeout)
@ -511,46 +504,41 @@ func (dht *IpfsDHT) getValueSingle(p *peer.Peer, key u.Key, timeout time.Duratio
if err != nil {
return nil, err
}
// TODO: debate moving this logic out of this function to be handled by the caller
if pmes_out.GetSuccess() {
if pmes_out.Value == nil {
// We were given provider[s]
return dht.getFromProviderList(key, timeout, pmes_out.GetPeers())
}
// We were given the value
return pmes_out.GetValue(), nil
} else {
return pmes_out.GetValue(), u.ErrSearchIncomplete
}
return pmes_out, nil
}
}
// TODO: Im not certain on this implementation, we get a list of providers from someone
// what do we do with it? Connect to each of them? randomly pick one to get the value from?
// Or just connect to one at a time until we get a successful connection and request the
// value from it?
func (dht *IpfsDHT) getFromProviderList(key u.Key, timeout time.Duration, provlist []*PBDHTMessage_PBPeer) ([]byte, error) {
for _, prov := range provlist {
prov_p, _ := dht.Find(peer.ID(prov.GetId()))
if prov_p == nil {
maddr, err := ma.NewMultiaddr(prov.GetAddr())
// TODO: Im not certain on this implementation, we get a list of peers/providers
// from someone what do we do with it? Connect to each of them? randomly pick
// one to get the value from? Or just connect to one at a time until we get a
// successful connection and request the value from it?
func (dht *IpfsDHT) getFromPeerList(key u.Key, timeout time.Duration,
peerlist []*PBDHTMessage_PBPeer, level int) ([]byte, error) {
for _, pinfo := range peerlist {
p, _ := dht.Find(peer.ID(pinfo.GetId()))
if p == nil {
maddr, err := ma.NewMultiaddr(pinfo.GetAddr())
if err != nil {
u.PErr("getValue error: %s", err)
continue
}
prov_p, err = dht.Connect(maddr)
p, err = dht.Connect(maddr)
if err != nil {
u.PErr("getValue error: %s", err)
continue
}
}
data, err := dht.getValueSingle(prov_p, key, timeout)
pmes, err := dht.getValueSingle(p, key, timeout, level)
if err != nil {
u.DErr("getFromProvs error: %s", err)
u.DErr("getFromPeers error: %s", err)
continue
}
dht.addProviderEntry(key, p)
return data, nil
// Make sure it was a successful get
if pmes.GetSuccess() && pmes.Value != nil {
return pmes.GetValue(), nil
}
}
return nil, u.ErrNotFound
}
@ -584,3 +572,30 @@ func (dht *IpfsDHT) Find(id peer.ID) (*peer.Peer, *kb.RoutingTable) {
}
return nil, nil
}
func (dht *IpfsDHT) findPeerSingle(p *peer.Peer, id peer.ID, timeout time.Duration, level int) (*PBDHTMessage, error) {
pmes := DHTMessage{
Type: PBDHTMessage_FIND_NODE,
Key: string(id),
Id: GenerateMessageID(),
Value: []byte{byte(level)},
}
mes := swarm.NewMessage(p, pmes.ToProtobuf())
listenChan := dht.ListenFor(pmes.Id, 1, time.Minute)
dht.network.Send(mes)
after := time.After(timeout)
select {
case <-after:
dht.Unlisten(pmes.Id)
return nil, u.ErrTimeout
case resp := <-listenChan:
pmes_out := new(PBDHTMessage)
err := proto.Unmarshal(resp.Data, pmes_out)
if err != nil {
return nil, err
}
return pmes_out, nil
}
}

View File

@ -11,6 +11,37 @@ import (
"time"
)
func setupDHTS(n int, t *testing.T) ([]*ma.Multiaddr, []*peer.Peer, []*IpfsDHT) {
var addrs []*ma.Multiaddr
for i := 0; i < 4; i++ {
a, err := ma.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", 5000+i))
if err != nil {
t.Fatal(err)
}
addrs = append(addrs, a)
}
var peers []*peer.Peer
for i := 0; i < 4; i++ {
p := new(peer.Peer)
p.AddAddress(addrs[i])
p.ID = peer.ID([]byte(fmt.Sprintf("peer_%d", i)))
peers = append(peers, p)
}
var dhts []*IpfsDHT
for i := 0; i < 4; i++ {
d, err := NewDHT(peers[i])
if err != nil {
t.Fatal(err)
}
dhts = append(dhts, d)
d.Start()
}
return addrs, peers, dhts
}
func TestPing(t *testing.T) {
u.Debug = false
addr_a, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/2222")
@ -90,11 +121,13 @@ func TestValueGetSet(t *testing.T) {
dht_a.Start()
dht_b.Start()
errsa := dht_a.network.GetChan().Errors
errsb := dht_b.network.GetChan().Errors
go func() {
select {
case err := <-dht_a.network.Chan.Errors:
case err := <-errsa:
t.Fatal(err)
case err := <-dht_b.network.Chan.Errors:
case err := <-errsb:
t.Fatal(err)
}
}()
@ -118,32 +151,8 @@ func TestValueGetSet(t *testing.T) {
func TestProvides(t *testing.T) {
u.Debug = false
var addrs []*ma.Multiaddr
for i := 0; i < 4; i++ {
a, err := ma.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", 5000+i))
if err != nil {
t.Fatal(err)
}
addrs = append(addrs, a)
}
var peers []*peer.Peer
for i := 0; i < 4; i++ {
p := new(peer.Peer)
p.AddAddress(addrs[i])
p.ID = peer.ID([]byte(fmt.Sprintf("peer_%d", i)))
peers = append(peers, p)
}
var dhts []*IpfsDHT
for i := 0; i < 4; i++ {
d, err := NewDHT(peers[i])
if err != nil {
t.Fatal(err)
}
dhts = append(dhts, d)
d.Start()
}
addrs, _, dhts := setupDHTS(4, t)
_, err := dhts[0].Connect(addrs[1])
if err != nil {
@ -217,7 +226,7 @@ func TestLayeredGet(t *testing.T) {
_, err := dhts[0].Connect(addrs[1])
if err != nil {
t.Fatal(err)
t.Fatalf("Failed to connect: %s", err)
}
_, err = dhts[1].Connect(addrs[2])
@ -255,3 +264,65 @@ func TestLayeredGet(t *testing.T) {
dhts[i].Halt()
}
}
func TestFindPeer(t *testing.T) {
u.Debug = false
var addrs []*ma.Multiaddr
for i := 0; i < 4; i++ {
a, err := ma.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", 5000+i))
if err != nil {
t.Fatal(err)
}
addrs = append(addrs, a)
}
var peers []*peer.Peer
for i := 0; i < 4; i++ {
p := new(peer.Peer)
p.AddAddress(addrs[i])
p.ID = peer.ID([]byte(fmt.Sprintf("peer_%d", i)))
peers = append(peers, p)
}
var dhts []*IpfsDHT
for i := 0; i < 4; i++ {
d, err := NewDHT(peers[i])
if err != nil {
t.Fatal(err)
}
dhts = append(dhts, d)
d.Start()
}
_, err := dhts[0].Connect(addrs[1])
if err != nil {
t.Fatal(err)
}
_, err = dhts[1].Connect(addrs[2])
if err != nil {
t.Fatal(err)
}
_, err = dhts[1].Connect(addrs[3])
if err != nil {
t.Fatal(err)
}
p, err := dhts[0].FindPeer(peers[2].ID, time.Second)
if err != nil {
t.Fatal(err)
}
if p == nil {
t.Fatal("Failed to find peer.")
}
if !p.ID.Equal(peers[2].ID) {
t.Fatal("Didnt find expected peer.")
}
for i := 0; i < 4; i++ {
dhts[i].Halt()
}
}

View File

@ -3,8 +3,6 @@ package dht
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"math/rand"
"time"
@ -35,19 +33,19 @@ func GenerateMessageID() uint64 {
// This is the top level "Store" operation of the DHT
func (s *IpfsDHT) PutValue(key u.Key, value []byte) {
complete := make(chan struct{})
for i, route := range s.routes {
for _, route := range s.routes {
p := route.NearestPeer(kb.ConvertKey(key))
if p == nil {
s.network.Chan.Errors <- fmt.Errorf("No peer found on level %d", i)
continue
s.network.Error(kb.ErrLookupFailure)
go func() {
complete <- struct{}{}
}()
continue
}
go func() {
err := s.putValueToNetwork(p, string(key), value)
if err != nil {
s.network.Chan.Errors <- err
s.network.Error(err)
}
complete <- struct{}{}
}()
@ -61,19 +59,46 @@ func (s *IpfsDHT) PutValue(key u.Key, value []byte) {
// If the search does not succeed, a multiaddr string of a closer peer is
// returned along with util.ErrSearchIncomplete
func (s *IpfsDHT) GetValue(key u.Key, timeout time.Duration) ([]byte, error) {
for _, route := range s.routes {
var p *peer.Peer
p = route.NearestPeer(kb.ConvertKey(key))
if p == nil {
return nil, errors.New("Table returned nil peer!")
route_level := 0
p := s.routes[route_level].NearestPeer(kb.ConvertKey(key))
if p == nil {
return nil, kb.ErrLookupFailure
}
for route_level < len(s.routes) && p != nil {
pmes, err := s.getValueSingle(p, key, timeout, route_level)
if err != nil {
return nil, u.WrapError(err, "getValue Error")
}
b, err := s.getValueSingle(p, key, timeout)
if err == nil {
return b, nil
}
if err != u.ErrSearchIncomplete {
return nil, err
if pmes.GetSuccess() {
if pmes.Value == nil { // We were given provider[s]
return s.getFromPeerList(key, timeout, pmes.GetPeers(), route_level)
}
// Success! We were given the value
return pmes.GetValue(), nil
} else {
// We were given a closer node
closers := pmes.GetPeers()
if len(closers) > 0 {
maddr, err := ma.NewMultiaddr(closers[0].GetAddr())
if err != nil {
// ??? Move up route level???
panic("not yet implemented")
}
// TODO: dht.Connect has overhead due to an internal
// ping to the target. Use something else
p, err = s.Connect(maddr)
if err != nil {
// Move up route level
panic("not yet implemented.")
}
} else {
route_level++
}
}
}
return nil, u.ErrNotFound
@ -86,7 +111,7 @@ func (s *IpfsDHT) GetValue(key u.Key, timeout time.Duration) ([]byte, error) {
func (s *IpfsDHT) Provide(key u.Key) error {
peers := s.routes[0].NearestPeers(kb.ConvertKey(key), PoolSize)
if len(peers) == 0 {
//return an error
return kb.ErrLookupFailure
}
pmes := DHTMessage{
@ -97,7 +122,7 @@ func (s *IpfsDHT) Provide(key u.Key) error {
for _, p := range peers {
mes := swarm.NewMessage(p, pbmes)
s.network.Chan.Outgoing <- mes
s.network.Send(mes)
}
return nil
}
@ -105,6 +130,9 @@ func (s *IpfsDHT) Provide(key u.Key) error {
// FindProviders searches for peers who can provide the value for given key.
func (s *IpfsDHT) FindProviders(key u.Key, timeout time.Duration) ([]*peer.Peer, error) {
p := s.routes[0].NearestPeer(kb.ConvertKey(key))
if p == nil {
return nil, kb.ErrLookupFailure
}
pmes := DHTMessage{
Type: PBDHTMessage_GET_PROVIDERS,
@ -116,7 +144,7 @@ func (s *IpfsDHT) FindProviders(key u.Key, timeout time.Duration) ([]*peer.Peer,
listenChan := s.ListenFor(pmes.Id, 1, time.Minute)
u.DOut("Find providers for: '%s'", key)
s.network.Chan.Outgoing <- mes
s.network.Send(mes)
after := time.After(timeout)
select {
case <-after:
@ -129,17 +157,12 @@ func (s *IpfsDHT) FindProviders(key u.Key, timeout time.Duration) ([]*peer.Peer,
if err != nil {
return nil, err
}
var addrs map[u.Key]string
err = json.Unmarshal(pmes_out.GetValue(), &addrs)
if err != nil {
return nil, err
}
var prov_arr []*peer.Peer
for pid, addr := range addrs {
p := s.network.Find(pid)
for _, prov := range pmes_out.GetPeers() {
p := s.network.Find(u.Key(prov.GetId()))
if p == nil {
maddr, err := ma.NewMultiaddr(addr)
maddr, err := ma.NewMultiaddr(prov.GetAddr())
if err != nil {
u.PErr("error connecting to new peer: %s", err)
continue
@ -162,48 +185,36 @@ func (s *IpfsDHT) FindProviders(key u.Key, timeout time.Duration) ([]*peer.Peer,
// FindPeer searches for a peer with given ID.
func (s *IpfsDHT) FindPeer(id peer.ID, timeout time.Duration) (*peer.Peer, error) {
p := s.routes[0].NearestPeer(kb.ConvertPeerID(id))
pmes := DHTMessage{
Type: PBDHTMessage_FIND_NODE,
Key: string(id),
Id: GenerateMessageID(),
route_level := 0
p := s.routes[route_level].NearestPeer(kb.ConvertPeerID(id))
if p == nil {
return nil, kb.ErrLookupFailure
}
mes := swarm.NewMessage(p, pmes.ToProtobuf())
for route_level < len(s.routes) {
pmes, err := s.findPeerSingle(p, id, timeout, route_level)
plist := pmes.GetPeers()
if len(plist) == 0 {
route_level++
}
found := plist[0]
listenChan := s.ListenFor(pmes.Id, 1, time.Minute)
s.network.Chan.Outgoing <- mes
after := time.After(timeout)
select {
case <-after:
s.Unlisten(pmes.Id)
return nil, u.ErrTimeout
case resp := <-listenChan:
pmes_out := new(PBDHTMessage)
err := proto.Unmarshal(resp.Data, pmes_out)
addr, err := ma.NewMultiaddr(found.GetAddr())
if err != nil {
return nil, err
return nil, u.WrapError(err, "FindPeer received bad info")
}
addr := string(pmes_out.GetValue())
maddr, err := ma.NewMultiaddr(addr)
nxtPeer, err := s.Connect(addr)
if err != nil {
return nil, err
return nil, u.WrapError(err, "FindPeer failed to connect to new peer.")
}
found_peer, err := s.Connect(maddr)
if err != nil {
u.POut("Found peer but couldnt connect.")
return nil, err
if pmes.GetSuccess() {
return nxtPeer, nil
} else {
p = nxtPeer
}
if !found_peer.ID.Equal(id) {
u.POut("FindPeer: searching for '%s' but found '%s'", id.Pretty(), found_peer.ID.Pretty())
return found_peer, u.ErrSearchIncomplete
}
return found_peer, nil
}
return nil, u.ErrNotFound
}
// Ping a peer, log the time it took
@ -216,14 +227,14 @@ func (dht *IpfsDHT) Ping(p *peer.Peer, timeout time.Duration) error {
before := time.Now()
response_chan := dht.ListenFor(pmes.Id, 1, time.Minute)
dht.network.Chan.Outgoing <- mes
dht.network.Send(mes)
tout := time.After(timeout)
select {
case <-response_chan:
roundtrip := time.Since(before)
p.SetLatency(roundtrip)
u.POut("Ping took %s.", roundtrip.String())
u.DOut("Ping took %s.", roundtrip.String())
return nil
case <-tout:
// Timed out, think about removing peer from network
@ -249,7 +260,7 @@ func (dht *IpfsDHT) GetDiagnostic(timeout time.Duration) ([]*diagInfo, error) {
pbmes := pmes.ToProtobuf()
for _, p := range targets {
mes := swarm.NewMessage(p, pbmes)
dht.network.Chan.Outgoing <- mes
dht.network.Send(mes)
}
var out []*diagInfo

View File

@ -3,11 +3,16 @@ package dht
import (
"bytes"
"crypto/sha256"
"errors"
peer "github.com/jbenet/go-ipfs/peer"
u "github.com/jbenet/go-ipfs/util"
)
// Returned if a routing table query returns no results. This is NOT expected
// behaviour
var ErrLookupFailure = errors.New("failed to find any peer in table")
// ID for IpfsDHT should be a byte slice, to allow for simpler operations
// (xor). DHT ids are based on the peer.IDs.
//
@ -19,8 +24,8 @@ func (id ID) Equal(other ID) bool {
return bytes.Equal(id, other)
}
func (id ID) Less(other interface{}) bool {
a, b := equalizeSizes(id, other.(ID))
func (id ID) Less(other ID) bool {
a, b := equalizeSizes(id, other)
for i := 0; i < len(a); i++ {
if a[i] != b[i] {
return a[i] < b[i]
@ -80,3 +85,14 @@ func ConvertKey(id u.Key) ID {
hash := sha256.Sum256([]byte(id))
return hash[:]
}
// Returns true if a is closer to key than b is
func Closer(a, b peer.ID, key u.Key) bool {
aid := ConvertPeerID(a)
bid := ConvertPeerID(b)
tgt := ConvertKey(key)
adist := xor(aid, tgt)
bdist := xor(bid, tgt)
return adist.Less(bdist)
}

View File

@ -355,3 +355,17 @@ func (s *Swarm) Drop(p *peer.Peer) error {
return conn.Close()
}
func (s *Swarm) Send(mes *Message) {
s.Chan.Outgoing <- mes
}
func (s *Swarm) Error(e error) {
s.Chan.Errors <- e
}
func (s *Swarm) GetChan() *Chan {
return s.Chan
}
var _ Network = &Swarm{}

View File

@ -1,10 +1,12 @@
package util
import (
"bytes"
"errors"
"fmt"
"os"
"os/user"
"runtime"
"strings"
b58 "github.com/jbenet/go-base58"
@ -34,6 +36,30 @@ func (k Key) Pretty() string {
return b58.Encode([]byte(k))
}
type IpfsError struct {
Inner error
Note string
Stack string
}
func (ie *IpfsError) Error() string {
buf := new(bytes.Buffer)
fmt.Fprintln(buf, ie.Inner)
fmt.Fprintln(buf, ie.Note)
fmt.Fprintln(buf, ie.Stack)
return buf.String()
}
func WrapError(err error, note string) error {
ie := new(IpfsError)
ie.Inner = err
ie.Note = note
stack := make([]byte, 2048)
n := runtime.Stack(stack, false)
ie.Stack = string(stack[:n])
return ie
}
// Hash is the global IPFS hash function. uses multihash SHA2_256, 256 bits
func Hash(data []byte) (mh.Multihash, error) {
return mh.Sum(data, mh.SHA2_256, -1)