mirror of
https://github.com/ipfs/kubo.git
synced 2025-09-09 17:22:21 +08:00

Queries previously would sometimes only query three (alpha value) peers before halting the operation. This PR changes the number of peers grabbed from the routing table to start a query to K. Dht nodes would also not respond with enough peers, as per the kademlia paper, this has been changed to from 4 to 'K'. The query mechanism itself also was flawed in that it would pull all the peers it had yet to query out of the queue and 'start' the query for them. The concurrency rate limiting was done inside the 'queryPeer' method after the goroutine was spawned. This did not allow for peers receiver from query replies to be properly queried in order of distance. License: MIT Signed-off-by: Jeromy <jeromyj@gmail.com>
468 lines
12 KiB
Go
468 lines
12 KiB
Go
package dht
|
|
|
|
import (
|
|
"bytes"
|
|
"sync"
|
|
|
|
context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context"
|
|
key "github.com/ipfs/go-ipfs/blocks/key"
|
|
notif "github.com/ipfs/go-ipfs/notifications"
|
|
inet "github.com/ipfs/go-ipfs/p2p/net"
|
|
peer "github.com/ipfs/go-ipfs/p2p/peer"
|
|
"github.com/ipfs/go-ipfs/routing"
|
|
pb "github.com/ipfs/go-ipfs/routing/dht/pb"
|
|
kb "github.com/ipfs/go-ipfs/routing/kbucket"
|
|
record "github.com/ipfs/go-ipfs/routing/record"
|
|
pset "github.com/ipfs/go-ipfs/util/peerset"
|
|
)
|
|
|
|
// asyncQueryBuffer is the size of buffered channels in async queries. This
|
|
// buffer allows multiple queries to execute simultaneously, return their
|
|
// results and continue querying closer peers. Note that different query
|
|
// results will wait for the channel to drain.
|
|
var asyncQueryBuffer = 10
|
|
|
|
// This file implements the Routing interface for the IpfsDHT struct.
|
|
|
|
// Basic Put/Get
|
|
|
|
// PutValue adds value corresponding to given Key.
|
|
// This is the top level "Store" operation of the DHT
|
|
func (dht *IpfsDHT) PutValue(ctx context.Context, key key.Key, value []byte) error {
|
|
log.Debugf("PutValue %s", key)
|
|
sk, err := dht.getOwnPrivateKey()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
sign, err := dht.Validator.IsSigned(key)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
rec, err := record.MakePutRecord(sk, key, value, sign)
|
|
if err != nil {
|
|
log.Debug("Creation of record failed!")
|
|
return err
|
|
}
|
|
|
|
err = dht.putLocal(key, rec)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
pchan, err := dht.GetClosestPeers(ctx, key)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
wg := sync.WaitGroup{}
|
|
for p := range pchan {
|
|
wg.Add(1)
|
|
go func(p peer.ID) {
|
|
defer wg.Done()
|
|
notif.PublishQueryEvent(ctx, ¬if.QueryEvent{
|
|
Type: notif.Value,
|
|
ID: p,
|
|
})
|
|
|
|
err := dht.putValueToPeer(ctx, p, key, rec)
|
|
if err != nil {
|
|
log.Debugf("failed putting value to peer: %s", err)
|
|
}
|
|
}(p)
|
|
}
|
|
wg.Wait()
|
|
return nil
|
|
}
|
|
|
|
// GetValue searches for the value corresponding to given Key.
|
|
func (dht *IpfsDHT) GetValue(ctx context.Context, key key.Key) ([]byte, error) {
|
|
vals, err := dht.GetValues(ctx, key, 3)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
var recs [][]byte
|
|
for _, v := range vals {
|
|
recs = append(recs, v.Val)
|
|
}
|
|
|
|
i, err := dht.Selector.BestRecord(key, recs)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
best := recs[i]
|
|
log.Debugf("GetValue %v %v", key, best)
|
|
if best == nil {
|
|
log.Errorf("GetValue yielded correct record with nil value.")
|
|
return nil, routing.ErrNotFound
|
|
}
|
|
|
|
fixupRec, err := record.MakePutRecord(dht.peerstore.PrivKey(dht.self), key, best, true)
|
|
if err != nil {
|
|
// probably shouldnt actually 'error' here as we have found a value we like,
|
|
// but this call failing probably isnt something we want to ignore
|
|
return nil, err
|
|
}
|
|
|
|
for _, v := range vals {
|
|
// if someone sent us a different 'less-valid' record, lets correct them
|
|
if !bytes.Equal(v.Val, best) {
|
|
go func(v routing.RecvdVal) {
|
|
err := dht.putValueToPeer(ctx, v.From, key, fixupRec)
|
|
if err != nil {
|
|
log.Error("Error correcting DHT entry: ", err)
|
|
}
|
|
}(v)
|
|
}
|
|
}
|
|
|
|
return best, nil
|
|
}
|
|
|
|
func (dht *IpfsDHT) GetValues(ctx context.Context, key key.Key, nvals int) ([]routing.RecvdVal, error) {
|
|
var vals []routing.RecvdVal
|
|
var valslock sync.Mutex
|
|
|
|
// If we have it local, dont bother doing an RPC!
|
|
lrec, err := dht.getLocal(key)
|
|
if err == nil {
|
|
// TODO: this is tricky, we dont always want to trust our own value
|
|
// what if the authoritative source updated it?
|
|
log.Debug("have it locally")
|
|
vals = append(vals, routing.RecvdVal{
|
|
Val: lrec.GetValue(),
|
|
From: dht.self,
|
|
})
|
|
|
|
if nvals <= 1 {
|
|
return vals, nil
|
|
}
|
|
} else if nvals == 0 {
|
|
return nil, err
|
|
}
|
|
|
|
// get closest peers in the routing table
|
|
rtp := dht.routingTable.NearestPeers(kb.ConvertKey(key), KValue)
|
|
log.Debugf("peers in rt: %s", len(rtp), rtp)
|
|
if len(rtp) == 0 {
|
|
log.Warning("No peers from routing table!")
|
|
return nil, kb.ErrLookupFailure
|
|
}
|
|
|
|
// setup the Query
|
|
parent := ctx
|
|
query := dht.newQuery(key, func(ctx context.Context, p peer.ID) (*dhtQueryResult, error) {
|
|
notif.PublishQueryEvent(parent, ¬if.QueryEvent{
|
|
Type: notif.SendingQuery,
|
|
ID: p,
|
|
})
|
|
|
|
rec, peers, err := dht.getValueOrPeers(ctx, p, key)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
res := &dhtQueryResult{closerPeers: peers}
|
|
|
|
if rec.GetValue() != nil {
|
|
rv := routing.RecvdVal{
|
|
Val: rec.GetValue(),
|
|
From: p,
|
|
}
|
|
valslock.Lock()
|
|
vals = append(vals, rv)
|
|
|
|
// If weve collected enough records, we're done
|
|
if len(vals) >= nvals {
|
|
res.success = true
|
|
}
|
|
valslock.Unlock()
|
|
}
|
|
|
|
notif.PublishQueryEvent(parent, ¬if.QueryEvent{
|
|
Type: notif.PeerResponse,
|
|
ID: p,
|
|
Responses: pointerizePeerInfos(peers),
|
|
})
|
|
|
|
return res, nil
|
|
})
|
|
|
|
// run it!
|
|
_, err = query.Run(ctx, rtp)
|
|
if len(vals) == 0 {
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
}
|
|
|
|
return vals, nil
|
|
|
|
}
|
|
|
|
// Value provider layer of indirection.
|
|
// This is what DSHTs (Coral and MainlineDHT) do to store large values in a DHT.
|
|
|
|
// Provide makes this node announce that it can provide a value for the given key
|
|
func (dht *IpfsDHT) Provide(ctx context.Context, key key.Key) error {
|
|
defer log.EventBegin(ctx, "provide", &key).Done()
|
|
|
|
// add self locally
|
|
dht.providers.AddProvider(ctx, key, dht.self)
|
|
|
|
peers, err := dht.GetClosestPeers(ctx, key)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
wg := sync.WaitGroup{}
|
|
for p := range peers {
|
|
wg.Add(1)
|
|
go func(p peer.ID) {
|
|
defer wg.Done()
|
|
log.Debugf("putProvider(%s, %s)", key, p)
|
|
err := dht.putProvider(ctx, p, string(key))
|
|
if err != nil {
|
|
log.Debug(err)
|
|
}
|
|
}(p)
|
|
}
|
|
wg.Wait()
|
|
return nil
|
|
}
|
|
|
|
// FindProviders searches until the context expires.
|
|
func (dht *IpfsDHT) FindProviders(ctx context.Context, key key.Key) ([]peer.PeerInfo, error) {
|
|
var providers []peer.PeerInfo
|
|
for p := range dht.FindProvidersAsync(ctx, key, KValue) {
|
|
providers = append(providers, p)
|
|
}
|
|
return providers, nil
|
|
}
|
|
|
|
// FindProvidersAsync is the same thing as FindProviders, but returns a channel.
|
|
// Peers will be returned on the channel as soon as they are found, even before
|
|
// the search query completes.
|
|
func (dht *IpfsDHT) FindProvidersAsync(ctx context.Context, key key.Key, count int) <-chan peer.PeerInfo {
|
|
log.Event(ctx, "findProviders", &key)
|
|
peerOut := make(chan peer.PeerInfo, count)
|
|
go dht.findProvidersAsyncRoutine(ctx, key, count, peerOut)
|
|
return peerOut
|
|
}
|
|
|
|
func (dht *IpfsDHT) findProvidersAsyncRoutine(ctx context.Context, key key.Key, count int, peerOut chan peer.PeerInfo) {
|
|
defer log.EventBegin(ctx, "findProvidersAsync", &key).Done()
|
|
defer close(peerOut)
|
|
|
|
ps := pset.NewLimited(count)
|
|
provs := dht.providers.GetProviders(ctx, key)
|
|
for _, p := range provs {
|
|
// NOTE: assuming that this list of peers is unique
|
|
if ps.TryAdd(p) {
|
|
select {
|
|
case peerOut <- dht.peerstore.PeerInfo(p):
|
|
case <-ctx.Done():
|
|
return
|
|
}
|
|
}
|
|
|
|
// If we have enough peers locally, dont bother with remote RPC
|
|
if ps.Size() >= count {
|
|
return
|
|
}
|
|
}
|
|
|
|
// setup the Query
|
|
parent := ctx
|
|
query := dht.newQuery(key, func(ctx context.Context, p peer.ID) (*dhtQueryResult, error) {
|
|
notif.PublishQueryEvent(parent, ¬if.QueryEvent{
|
|
Type: notif.SendingQuery,
|
|
ID: p,
|
|
})
|
|
pmes, err := dht.findProvidersSingle(ctx, p, key)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
log.Debugf("%d provider entries", len(pmes.GetProviderPeers()))
|
|
provs := pb.PBPeersToPeerInfos(pmes.GetProviderPeers())
|
|
log.Debugf("%d provider entries decoded", len(provs))
|
|
|
|
// Add unique providers from request, up to 'count'
|
|
for _, prov := range provs {
|
|
log.Debugf("got provider: %s", prov)
|
|
if ps.TryAdd(prov.ID) {
|
|
log.Debugf("using provider: %s", prov)
|
|
select {
|
|
case peerOut <- prov:
|
|
case <-ctx.Done():
|
|
log.Debug("Context timed out sending more providers")
|
|
return nil, ctx.Err()
|
|
}
|
|
}
|
|
if ps.Size() >= count {
|
|
log.Debugf("got enough providers (%d/%d)", ps.Size(), count)
|
|
return &dhtQueryResult{success: true}, nil
|
|
}
|
|
}
|
|
|
|
// Give closer peers back to the query to be queried
|
|
closer := pmes.GetCloserPeers()
|
|
clpeers := pb.PBPeersToPeerInfos(closer)
|
|
log.Debugf("got closer peers: %d %s", len(clpeers), clpeers)
|
|
|
|
notif.PublishQueryEvent(parent, ¬if.QueryEvent{
|
|
Type: notif.PeerResponse,
|
|
ID: p,
|
|
Responses: pointerizePeerInfos(clpeers),
|
|
})
|
|
return &dhtQueryResult{closerPeers: clpeers}, nil
|
|
})
|
|
|
|
peers := dht.routingTable.NearestPeers(kb.ConvertKey(key), KValue)
|
|
_, err := query.Run(ctx, peers)
|
|
if err != nil {
|
|
log.Debugf("Query error: %s", err)
|
|
notif.PublishQueryEvent(ctx, ¬if.QueryEvent{
|
|
Type: notif.QueryError,
|
|
Extra: err.Error(),
|
|
})
|
|
}
|
|
}
|
|
|
|
// FindPeer searches for a peer with given ID.
|
|
func (dht *IpfsDHT) FindPeer(ctx context.Context, id peer.ID) (peer.PeerInfo, error) {
|
|
defer log.EventBegin(ctx, "FindPeer", id).Done()
|
|
|
|
// Check if were already connected to them
|
|
if pi := dht.FindLocal(id); pi.ID != "" {
|
|
return pi, nil
|
|
}
|
|
|
|
peers := dht.routingTable.NearestPeers(kb.ConvertPeerID(id), KValue)
|
|
if len(peers) == 0 {
|
|
return peer.PeerInfo{}, kb.ErrLookupFailure
|
|
}
|
|
|
|
// Sanity...
|
|
for _, p := range peers {
|
|
if p == id {
|
|
log.Debug("Found target peer in list of closest peers...")
|
|
return dht.peerstore.PeerInfo(p), nil
|
|
}
|
|
}
|
|
|
|
// setup the Query
|
|
parent := ctx
|
|
query := dht.newQuery(key.Key(id), func(ctx context.Context, p peer.ID) (*dhtQueryResult, error) {
|
|
notif.PublishQueryEvent(parent, ¬if.QueryEvent{
|
|
Type: notif.SendingQuery,
|
|
ID: p,
|
|
})
|
|
|
|
pmes, err := dht.findPeerSingle(ctx, p, id)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
closer := pmes.GetCloserPeers()
|
|
clpeerInfos := pb.PBPeersToPeerInfos(closer)
|
|
|
|
// see it we got the peer here
|
|
for _, npi := range clpeerInfos {
|
|
if npi.ID == id {
|
|
return &dhtQueryResult{
|
|
peer: npi,
|
|
success: true,
|
|
}, nil
|
|
}
|
|
}
|
|
|
|
notif.PublishQueryEvent(parent, ¬if.QueryEvent{
|
|
Type: notif.PeerResponse,
|
|
Responses: pointerizePeerInfos(clpeerInfos),
|
|
})
|
|
|
|
return &dhtQueryResult{closerPeers: clpeerInfos}, nil
|
|
})
|
|
|
|
// run it!
|
|
result, err := query.Run(ctx, peers)
|
|
if err != nil {
|
|
return peer.PeerInfo{}, err
|
|
}
|
|
|
|
log.Debugf("FindPeer %v %v", id, result.success)
|
|
if result.peer.ID == "" {
|
|
return peer.PeerInfo{}, routing.ErrNotFound
|
|
}
|
|
|
|
return result.peer, nil
|
|
}
|
|
|
|
// FindPeersConnectedToPeer searches for peers directly connected to a given peer.
|
|
func (dht *IpfsDHT) FindPeersConnectedToPeer(ctx context.Context, id peer.ID) (<-chan peer.PeerInfo, error) {
|
|
|
|
peerchan := make(chan peer.PeerInfo, asyncQueryBuffer)
|
|
peersSeen := peer.Set{}
|
|
|
|
peers := dht.routingTable.NearestPeers(kb.ConvertPeerID(id), KValue)
|
|
if len(peers) == 0 {
|
|
return nil, kb.ErrLookupFailure
|
|
}
|
|
|
|
// setup the Query
|
|
query := dht.newQuery(key.Key(id), func(ctx context.Context, p peer.ID) (*dhtQueryResult, error) {
|
|
|
|
pmes, err := dht.findPeerSingle(ctx, p, id)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
var clpeers []peer.PeerInfo
|
|
closer := pmes.GetCloserPeers()
|
|
for _, pbp := range closer {
|
|
pi := pb.PBPeerToPeerInfo(pbp)
|
|
|
|
// skip peers already seen
|
|
if _, found := peersSeen[pi.ID]; found {
|
|
continue
|
|
}
|
|
peersSeen[pi.ID] = struct{}{}
|
|
|
|
// if peer is connected, send it to our client.
|
|
if pb.Connectedness(*pbp.Connection) == inet.Connected {
|
|
select {
|
|
case <-ctx.Done():
|
|
return nil, ctx.Err()
|
|
case peerchan <- pi:
|
|
}
|
|
}
|
|
|
|
// if peer is the peer we're looking for, don't bother querying it.
|
|
// TODO maybe query it?
|
|
if pb.Connectedness(*pbp.Connection) != inet.Connected {
|
|
clpeers = append(clpeers, pi)
|
|
}
|
|
}
|
|
|
|
return &dhtQueryResult{closerPeers: clpeers}, nil
|
|
})
|
|
|
|
// run it! run it asynchronously to gen peers as results are found.
|
|
// this does no error checking
|
|
go func() {
|
|
if _, err := query.Run(ctx, peers); err != nil {
|
|
log.Debug(err)
|
|
}
|
|
|
|
// close the peerchan channel when done.
|
|
close(peerchan)
|
|
}()
|
|
|
|
return peerchan, nil
|
|
}
|