1
0
mirror of https://github.com/ipfs/kubo.git synced 2025-08-06 11:31:54 +08:00

really ugly impl of 'ipfs dht query' command

This commit is contained in:
Jeromy
2015-01-22 07:59:57 +00:00
parent 13b2003ea9
commit ec5276c29c
5 changed files with 297 additions and 75 deletions

108
core/commands/dht.go Normal file
View File

@ -0,0 +1,108 @@
package commands
import (
"bytes"
"errors"
"fmt"
"io"
"time"
cmds "github.com/jbenet/go-ipfs/commands"
ipdht "github.com/jbenet/go-ipfs/routing/dht"
u "github.com/jbenet/go-ipfs/util"
)
var DhtCmd = &cmds.Command{
Helptext: cmds.HelpText{
Tagline: "Issue commands directly through the DHT",
ShortDescription: ``,
},
Subcommands: map[string]*cmds.Command{
"query": queryDhtCmd,
},
}
var queryDhtCmd = &cmds.Command{
Helptext: cmds.HelpText{
Tagline: "Run a 'findClosestPeers' query through the DHT",
ShortDescription: ``,
},
Arguments: []cmds.Argument{
cmds.StringArg("peerID", true, true, "The peerID to run the query against"),
},
Options: []cmds.Option{
cmds.BoolOption("verbose", "v", "Write extra information"),
},
Run: func(req cmds.Request) (interface{}, error) {
n, err := req.Context().GetNode()
if err != nil {
return nil, err
}
dht, ok := n.Routing.(*ipdht.IpfsDHT)
if !ok {
return nil, errors.New("Routing service was not a dht")
}
events := make(chan *ipdht.QueryEvent)
closestPeers, err := dht.GetClosestPeers(req.Context().Context, u.Key(req.Arguments()[0]), events)
go func() {
defer close(events)
for p := range closestPeers {
events <- &ipdht.QueryEvent{
ID: p,
Type: ipdht.FinalPeer,
}
}
}()
outChan := make(chan interface{})
go func() {
defer close(outChan)
for e := range events {
outChan <- e
}
}()
return outChan, nil
},
Marshalers: cmds.MarshalerMap{
cmds.Text: func(res cmds.Response) (io.Reader, error) {
outChan, ok := res.Output().(<-chan interface{})
if !ok {
return nil, u.ErrCast()
}
marshal := func(v interface{}) (io.Reader, error) {
obj, ok := v.(*ipdht.QueryEvent)
if !ok {
return nil, u.ErrCast()
}
buf := new(bytes.Buffer)
fmt.Fprintf(buf, "%s: ", time.Now().Format("15:04:05.000"))
switch obj.Type {
case ipdht.FinalPeer:
fmt.Fprintf(buf, "%s\n", obj.ID)
case ipdht.PeerResponse:
fmt.Fprintf(buf, "* %s says use ", obj.ID)
for _, p := range obj.Responses {
fmt.Fprintf(buf, "%s ", p.ID)
}
fmt.Fprintln(buf)
case ipdht.SendingQuery:
fmt.Fprintf(buf, "* querying %s\n", obj.ID)
}
return buf, nil
}
return &cmds.ChannelMarshaler{
Channel: outChan,
Marshaler: marshal,
}, nil
},
},
Type: ipdht.QueryEvent{},
}

View File

@ -71,6 +71,7 @@ var rootSubcommands = map[string]*cmds.Command{
"cat": CatCmd,
"commands": CommandsDaemonCmd,
"config": ConfigCmd,
"dht": DhtCmd,
"diag": DiagCmd,
"id": IDCmd,
"log": LogCmd,

View File

@ -3,6 +3,7 @@ package peer
import (
"encoding/hex"
"encoding/json"
"fmt"
b58 "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-base58"
@ -131,6 +132,35 @@ type PeerInfo struct {
Addrs []ma.Multiaddr
}
func (pi *PeerInfo) MarshalJSON() ([]byte, error) {
out := make(map[string]interface{})
out["ID"] = IDB58Encode(pi.ID)
var addrs []string
for _, a := range pi.Addrs {
addrs = append(addrs, a.String())
}
out["Addrs"] = addrs
return json.Marshal(out)
}
func (pi *PeerInfo) UnmarshalJSON(b []byte) error {
var data map[string]interface{}
err := json.Unmarshal(b, &data)
if err != nil {
return err
}
pid, err := IDB58Decode(data["ID"].(string))
if err != nil {
return err
}
pi.ID = pid
addrs := data["Addrs"].([]interface{})
for _, a := range addrs {
pi.Addrs = append(pi.Addrs, ma.StringCast(a.(string)))
}
return nil
}
// IDSlice for sorting peers
type IDSlice []ID

156
routing/dht/lookup.go Normal file
View File

@ -0,0 +1,156 @@
package dht
import (
"encoding/json"
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
peer "github.com/jbenet/go-ipfs/p2p/peer"
kb "github.com/jbenet/go-ipfs/routing/kbucket"
u "github.com/jbenet/go-ipfs/util"
errors "github.com/jbenet/go-ipfs/util/debugerror"
pset "github.com/jbenet/go-ipfs/util/peerset"
)
type QueryEventType int
const (
SendingQuery QueryEventType = iota
PeerResponse
FinalPeer
)
type QueryEvent struct {
ID peer.ID
Type QueryEventType
Responses []*peer.PeerInfo
}
func pointerizePeerInfos(pis []peer.PeerInfo) []*peer.PeerInfo {
out := make([]*peer.PeerInfo, len(pis))
for i, p := range pis {
np := p
out[i] = &np
}
return out
}
// Kademlia 'node lookup' operation. Returns a channel of the K closest peers
// to the given key
func (dht *IpfsDHT) GetClosestPeers(ctx context.Context, key u.Key, events chan<- *QueryEvent) (<-chan peer.ID, error) {
e := log.EventBegin(ctx, "getClosestPeers", &key)
tablepeers := dht.routingTable.NearestPeers(kb.ConvertKey(key), AlphaValue)
if len(tablepeers) == 0 {
return nil, errors.Wrap(kb.ErrLookupFailure)
}
out := make(chan peer.ID, KValue)
peerset := pset.NewLimited(KValue)
for _, p := range tablepeers {
select {
case out <- p:
case <-ctx.Done():
return nil, ctx.Err()
}
peerset.Add(p)
}
query := dht.newQuery(key, func(ctx context.Context, p peer.ID) (*dhtQueryResult, error) {
// For DHT query command
select {
case events <- &QueryEvent{
Type: SendingQuery,
ID: p,
}:
}
closer, err := dht.closerPeersSingle(ctx, key, p)
if err != nil {
log.Errorf("error getting closer peers: %s", err)
return nil, err
}
var filtered []peer.PeerInfo
for _, clp := range closer {
if kb.Closer(clp, dht.self, key) && peerset.TryAdd(clp) {
select {
case out <- clp:
log.Error("Sending out peer: %s", clp.Pretty())
case <-ctx.Done():
return nil, ctx.Err()
}
filtered = append(filtered, dht.peerstore.PeerInfo(clp))
}
}
log.Errorf("filtered: %v", filtered)
// For DHT query command
select {
case events <- &QueryEvent{
Type: PeerResponse,
ID: p,
Responses: pointerizePeerInfos(filtered),
}:
}
return &dhtQueryResult{closerPeers: filtered}, nil
})
go func() {
defer close(out)
defer e.Done()
// run it!
_, err := query.Run(ctx, tablepeers)
if err != nil {
log.Debugf("closestPeers query run error: %s", err)
}
}()
return out, nil
}
func (dht *IpfsDHT) closerPeersSingle(ctx context.Context, key u.Key, p peer.ID) ([]peer.ID, error) {
pmes, err := dht.findPeerSingle(ctx, p, peer.ID(key))
if err != nil {
return nil, err
}
var out []peer.ID
for _, pbp := range pmes.GetCloserPeers() {
pid := peer.ID(pbp.GetId())
if pid != dht.self { // dont add self
dht.peerstore.AddAddresses(pid, pbp.Addresses())
out = append(out, pid)
}
}
return out, nil
}
func (qe *QueryEvent) MarshalJSON() ([]byte, error) {
out := make(map[string]interface{})
out["ID"] = peer.IDB58Encode(qe.ID)
out["Type"] = int(qe.Type)
out["Responses"] = qe.Responses
return json.Marshal(out)
}
func (qe *QueryEvent) UnmarshalJSON(b []byte) error {
temp := struct {
ID string
Type int
Responses []*peer.PeerInfo
}{}
err := json.Unmarshal(b, &temp)
if err != nil {
return err
}
pid, err := peer.IDB58Decode(temp.ID)
if err != nil {
return err
}
qe.ID = pid
qe.Type = QueryEventType(temp.Type)
qe.Responses = temp.Responses
return nil
}

View File

@ -48,7 +48,7 @@ func (dht *IpfsDHT) PutValue(ctx context.Context, key u.Key, value []byte) error
return err
}
pchan, err := dht.getClosestPeers(ctx, key)
pchan, err := dht.GetClosestPeers(ctx, key, nil)
if err != nil {
return err
}
@ -134,7 +134,7 @@ func (dht *IpfsDHT) Provide(ctx context.Context, key u.Key) error {
// add self locally
dht.providers.AddProvider(key, dht.self)
peers, err := dht.getClosestPeers(ctx, key)
peers, err := dht.GetClosestPeers(ctx, key, nil)
if err != nil {
return err
}
@ -164,79 +164,6 @@ func (dht *IpfsDHT) FindProviders(ctx context.Context, key u.Key) ([]peer.PeerIn
return providers, nil
}
// Kademlia 'node lookup' operation. Returns a channel of the K closest peers
// to the given key
func (dht *IpfsDHT) getClosestPeers(ctx context.Context, key u.Key) (<-chan peer.ID, error) {
e := log.EventBegin(ctx, "getClosestPeers", &key)
tablepeers := dht.routingTable.ListPeers()
if len(tablepeers) == 0 {
return nil, errors.Wrap(kb.ErrLookupFailure)
}
out := make(chan peer.ID, KValue)
peerset := pset.NewLimited(KValue)
for _, p := range tablepeers {
select {
case out <- p:
case <-ctx.Done():
return nil, ctx.Err()
}
peerset.Add(p)
}
query := dht.newQuery(key, func(ctx context.Context, p peer.ID) (*dhtQueryResult, error) {
closer, err := dht.closerPeersSingle(ctx, key, p)
if err != nil {
log.Errorf("error getting closer peers: %s", err)
return nil, err
}
var filtered []peer.PeerInfo
for _, p := range closer {
if kb.Closer(p, dht.self, key) && peerset.TryAdd(p) {
select {
case out <- p:
case <-ctx.Done():
return nil, ctx.Err()
}
filtered = append(filtered, dht.peerstore.PeerInfo(p))
}
}
return &dhtQueryResult{closerPeers: filtered}, nil
})
go func() {
defer close(out)
defer e.Done()
// run it!
_, err := query.Run(ctx, tablepeers)
if err != nil {
log.Debugf("closestPeers query run error: %s", err)
}
}()
return out, nil
}
func (dht *IpfsDHT) closerPeersSingle(ctx context.Context, key u.Key, p peer.ID) ([]peer.ID, error) {
pmes, err := dht.findPeerSingle(ctx, p, peer.ID(key))
if err != nil {
return nil, err
}
var out []peer.ID
for _, pbp := range pmes.GetCloserPeers() {
pid := peer.ID(pbp.GetId())
if pid != dht.self { // dont add self
dht.peerstore.AddAddresses(pid, pbp.Addresses())
out = append(out, pid)
}
}
return out, nil
}
// FindProvidersAsync is the same thing as FindProviders, but returns a channel.
// Peers will be returned on the channel as soon as they are found, even before
// the search query completes.