1
0
mirror of https://github.com/ipfs/kubo.git synced 2025-09-10 09:52:20 +08:00
Files
kubo/routing/dht/lookup.go
Jeromy 235a9ec5fc Fix dht queries
Queries previously would sometimes only query three (alpha value) peers
before halting the operation. This PR changes the number of peers
grabbed from the routing table to start a query to K.

Dht nodes would also not respond with enough peers, as per the kademlia
paper, this has been changed to from 4 to 'K'.

The query mechanism itself also was flawed in that it would pull all the
peers it had yet to query out of the queue and 'start' the query for
them. The concurrency rate limiting was done inside the 'queryPeer'
method after the goroutine was spawned. This did not allow for peers
receiver from query replies to be properly queried in order of distance.

License: MIT
Signed-off-by: Jeromy <jeromyj@gmail.com>
2015-09-29 09:47:47 -07:00

110 lines
2.8 KiB
Go

package dht
import (
context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context"
key "github.com/ipfs/go-ipfs/blocks/key"
notif "github.com/ipfs/go-ipfs/notifications"
peer "github.com/ipfs/go-ipfs/p2p/peer"
kb "github.com/ipfs/go-ipfs/routing/kbucket"
pset "github.com/ipfs/go-ipfs/util/peerset"
)
// Required in order for proper JSON marshaling
func pointerizePeerInfos(pis []peer.PeerInfo) []*peer.PeerInfo {
out := make([]*peer.PeerInfo, len(pis))
for i, p := range pis {
np := p
out[i] = &np
}
return out
}
// Kademlia 'node lookup' operation. Returns a channel of the K closest peers
// to the given key
func (dht *IpfsDHT) GetClosestPeers(ctx context.Context, key key.Key) (<-chan peer.ID, error) {
e := log.EventBegin(ctx, "getClosestPeers", &key)
tablepeers := dht.routingTable.NearestPeers(kb.ConvertKey(key), KValue)
if len(tablepeers) == 0 {
return nil, kb.ErrLookupFailure
}
out := make(chan peer.ID, KValue)
peerset := pset.NewLimited(KValue)
for _, p := range tablepeers {
select {
case out <- p:
case <-ctx.Done():
return nil, ctx.Err()
}
peerset.Add(p)
}
// since the query doesnt actually pass our context down
// we have to hack this here. whyrusleeping isnt a huge fan of goprocess
parent := ctx
query := dht.newQuery(key, func(ctx context.Context, p peer.ID) (*dhtQueryResult, error) {
// For DHT query command
notif.PublishQueryEvent(parent, &notif.QueryEvent{
Type: notif.SendingQuery,
ID: p,
})
closer, err := dht.closerPeersSingle(ctx, key, p)
if err != nil {
log.Debugf("error getting closer peers: %s", err)
return nil, err
}
var filtered []peer.PeerInfo
for _, clp := range closer {
if kb.Closer(clp, dht.self, key) && peerset.TryAdd(clp) {
select {
case out <- clp:
case <-ctx.Done():
return nil, ctx.Err()
}
filtered = append(filtered, dht.peerstore.PeerInfo(clp))
}
}
// For DHT query command
notif.PublishQueryEvent(parent, &notif.QueryEvent{
Type: notif.PeerResponse,
ID: p,
Responses: pointerizePeerInfos(filtered),
})
return &dhtQueryResult{closerPeers: filtered}, nil
})
go func() {
defer close(out)
defer e.Done()
// run it!
_, err := query.Run(ctx, tablepeers)
if err != nil {
log.Debugf("closestPeers query run error: %s", err)
}
}()
return out, nil
}
func (dht *IpfsDHT) closerPeersSingle(ctx context.Context, key key.Key, p peer.ID) ([]peer.ID, error) {
pmes, err := dht.findPeerSingle(ctx, p, peer.ID(key))
if err != nil {
return nil, err
}
var out []peer.ID
for _, pbp := range pmes.GetCloserPeers() {
pid := peer.ID(pbp.GetId())
if pid != dht.self { // dont add self
dht.peerstore.AddAddrs(pid, pbp.Addresses(), peer.TempAddrTTL)
out = append(out, pid)
}
}
return out, nil
}