mirror of
https://github.com/ipfs/kubo.git
synced 2025-06-29 09:34:03 +08:00
comment out diagnostic
it'll have to change lots since the listener is gone
This commit is contained in:

committed by
Brian Tiger Chow

parent
69ed45c555
commit
c4536d127d
@ -1,7 +1,6 @@
|
|||||||
package dht
|
package dht
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
@ -341,62 +340,67 @@ type providerInfo struct {
|
|||||||
|
|
||||||
func (dht *IpfsDHT) handleAddProvider(p *peer.Peer, pmes *Message) {
|
func (dht *IpfsDHT) handleAddProvider(p *peer.Peer, pmes *Message) {
|
||||||
key := u.Key(pmes.GetKey())
|
key := u.Key(pmes.GetKey())
|
||||||
u.DOut("[%s] Adding [%s] as a provider for '%s'\n", dht.self.ID.Pretty(), p.ID.Pretty(), peer.ID(key).Pretty())
|
u.DOut("[%s] Adding [%s] as a provider for '%s'\n",
|
||||||
|
dht.self.ID.Pretty(), p.ID.Pretty(), peer.ID(key).Pretty())
|
||||||
dht.providers.AddProvider(key, p)
|
dht.providers.AddProvider(key, p)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Halt stops all communications from this peer and shut down
|
// Halt stops all communications from this peer and shut down
|
||||||
|
// TODO -- remove this in favor of context
|
||||||
func (dht *IpfsDHT) Halt() {
|
func (dht *IpfsDHT) Halt() {
|
||||||
dht.shutdown <- struct{}{}
|
dht.shutdown <- struct{}{}
|
||||||
dht.network.Close()
|
dht.network.Close()
|
||||||
dht.providers.Halt()
|
dht.providers.Halt()
|
||||||
dht.listener.Halt()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NOTE: not yet finished, low priority
|
// NOTE: not yet finished, low priority
|
||||||
func (dht *IpfsDHT) handleDiagnostic(p *peer.Peer, pmes *Message) {
|
func (dht *IpfsDHT) handleDiagnostic(p *peer.Peer, pmes *Message) (*Message, error) {
|
||||||
seq := dht.routingTables[0].NearestPeers(kb.ConvertPeerID(dht.self.ID), 10)
|
seq := dht.routingTables[0].NearestPeers(kb.ConvertPeerID(dht.self.ID), 10)
|
||||||
listenChan := dht.listener.Listen(pmes.GetId(), len(seq), time.Second*30)
|
|
||||||
|
|
||||||
for _, ps := range seq {
|
for _, ps := range seq {
|
||||||
mes := swarm.NewMessage(ps, pmes)
|
mes, err := msg.FromObject(ps, pmes)
|
||||||
dht.netChan.Outgoing <- mes
|
|
||||||
}
|
|
||||||
|
|
||||||
buf := new(bytes.Buffer)
|
|
||||||
di := dht.getDiagInfo()
|
|
||||||
buf.Write(di.Marshal())
|
|
||||||
|
|
||||||
// NOTE: this shouldnt be a hardcoded value
|
|
||||||
after := time.After(time.Second * 20)
|
|
||||||
count := len(seq)
|
|
||||||
for count > 0 {
|
|
||||||
select {
|
|
||||||
case <-after:
|
|
||||||
//Timeout, return what we have
|
|
||||||
goto out
|
|
||||||
case reqResp := <-listenChan:
|
|
||||||
pmesOut := new(Message)
|
|
||||||
err := proto.Unmarshal(reqResp.Data, pmesOut)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// It broke? eh, whatever, keep going
|
u.PErr("handleDiagnostics error creating message: %v\n", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
buf.Write(reqResp.Data)
|
// dht.sender.SendRequest(context.TODO(), mes)
|
||||||
count--
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
return nil, errors.New("not yet ported back")
|
||||||
|
|
||||||
out:
|
// buf := new(bytes.Buffer)
|
||||||
resp := Message{
|
// di := dht.getDiagInfo()
|
||||||
Type: Message_DIAGNOSTIC,
|
// buf.Write(di.Marshal())
|
||||||
ID: pmes.GetId(),
|
//
|
||||||
Value: buf.Bytes(),
|
// // NOTE: this shouldnt be a hardcoded value
|
||||||
Response: true,
|
// after := time.After(time.Second * 20)
|
||||||
}
|
// count := len(seq)
|
||||||
|
// for count > 0 {
|
||||||
mes := swarm.NewMessage(p, resp.ToProtobuf())
|
// select {
|
||||||
dht.netChan.Outgoing <- mes
|
// case <-after:
|
||||||
|
// //Timeout, return what we have
|
||||||
|
// goto out
|
||||||
|
// case reqResp := <-listenChan:
|
||||||
|
// pmesOut := new(Message)
|
||||||
|
// err := proto.Unmarshal(reqResp.Data, pmesOut)
|
||||||
|
// if err != nil {
|
||||||
|
// // It broke? eh, whatever, keep going
|
||||||
|
// continue
|
||||||
|
// }
|
||||||
|
// buf.Write(reqResp.Data)
|
||||||
|
// count--
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// out:
|
||||||
|
// resp := Message{
|
||||||
|
// Type: Message_DIAGNOSTIC,
|
||||||
|
// ID: pmes.GetId(),
|
||||||
|
// Value: buf.Bytes(),
|
||||||
|
// Response: true,
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// mes := swarm.NewMessage(p, resp.ToProtobuf())
|
||||||
|
// dht.netChan.Outgoing <- mes
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dht *IpfsDHT) getValueOrPeers(p *peer.Peer, key u.Key, timeout time.Duration, level int) ([]byte, []*peer.Peer, error) {
|
func (dht *IpfsDHT) getValueOrPeers(p *peer.Peer, key u.Key, timeout time.Duration, level int) ([]byte, []*peer.Peer, error) {
|
||||||
|
Reference in New Issue
Block a user