1
0
mirror of https://github.com/ipfs/kubo.git synced 2025-09-10 14:34:24 +08:00

changed logging, in dht and elsewhere

- use log.* instead of u.*
- use automatic type conversions to .String()
  (Peer.String() prints nicely, and avoids calling b58 encoding
  until needed)
This commit is contained in:
Juan Batiz-Benet
2014-10-07 21:29:03 -07:00
parent 7c5679536c
commit 66839fa1de
9 changed files with 39 additions and 40 deletions

View File

@ -101,7 +101,7 @@ func (s *SecurePipe) handshake() error {
if err != nil {
return err
}
u.DOut("[%s] Remote Peer Identified as %s\n", s.local, s.remote)
u.DOut("%s Remote Peer Identified as %s\n", s.local, s.remote)
exchange, err := selectBest(SupportedExchanges, proposeResp.GetExchanges())
if err != nil {
@ -205,7 +205,7 @@ func (s *SecurePipe) handshake() error {
return errors.New("Negotiation failed.")
}
u.DOut("[%s] handshake: Got node id: %s\n", s.local, s.remote)
u.DOut("%s handshake: Got node id: %s\n", s.local, s.remote)
return nil
}

View File

@ -55,7 +55,7 @@ type Peer struct {
// String prints out the peer.
func (p *Peer) String() string {
return "[Peer " + p.ID.Pretty() + "]"
return "[Peer " + p.ID.String() + "]"
}
// Key returns the ID as a Key (string) for maps.

View File

@ -221,7 +221,7 @@ func (dht *IpfsDHT) putProvider(ctx context.Context, p *peer.Peer, key string) e
return err
}
log.Debug("[%s] putProvider: %s for %s", dht.self, p, key)
log.Debug("%s putProvider: %s for %s", dht.self, p, key)
if *rpmes.Key != *pmes.Key {
return errors.New("provider not added correctly")
}
@ -345,7 +345,7 @@ func (dht *IpfsDHT) putLocal(key u.Key, value []byte) error {
// Update signals to all routingTables to Update their last-seen status
// on the given peer.
func (dht *IpfsDHT) Update(p *peer.Peer) {
log.Debug("updating peer: [%s] latency = %f\n", p, p.GetLatency().Seconds())
log.Debug("updating peer: %s latency = %f\n", p, p.GetLatency().Seconds())
removedCount := 0
for _, route := range dht.routingTables {
removed := route.Update(p)
@ -401,7 +401,7 @@ func (dht *IpfsDHT) addProviders(key u.Key, peers []*Message_Peer) []*peer.Peer
continue
}
log.Debug("[%s] adding provider: %s for %s", dht.self, p, key)
log.Debug("%s adding provider: %s for %s", dht.self, p, key)
// Dont add outselves to the list
if p.ID.Equal(dht.self.ID) {

View File

@ -287,7 +287,7 @@ func TestProvidesAsync(t *testing.T) {
select {
case p := <-provs:
if !p.ID.Equal(dhts[3].self.ID) {
t.Fatalf("got a provider, but not the right one. %v", p.ID.Pretty())
t.Fatalf("got a provider, but not the right one. %s", p)
}
case <-ctx.Done():
t.Fatal("Didnt get back providers")

View File

@ -38,7 +38,7 @@ func (dht *IpfsDHT) handlerForMsgType(t Message_MessageType) dhtHandler {
}
func (dht *IpfsDHT) handleGetValue(p *peer.Peer, pmes *Message) (*Message, error) {
u.DOut("[%s] handleGetValue for key: %s\n", dht.self.ID.Pretty(), pmes.GetKey())
log.Debug("%s handleGetValue for key: %s\n", dht.self, pmes.GetKey())
// setup response
resp := newMessage(pmes.GetType(), pmes.GetKey(), pmes.GetClusterLevel())
@ -50,10 +50,10 @@ func (dht *IpfsDHT) handleGetValue(p *peer.Peer, pmes *Message) (*Message, error
}
// let's first check if we have the value locally.
u.DOut("[%s] handleGetValue looking into ds\n", dht.self.ID.Pretty())
log.Debug("%s handleGetValue looking into ds\n", dht.self)
dskey := u.Key(pmes.GetKey()).DsKey()
iVal, err := dht.datastore.Get(dskey)
u.DOut("[%s] handleGetValue looking into ds GOT %v\n", dht.self.ID.Pretty(), iVal)
log.Debug("%s handleGetValue looking into ds GOT %v\n", dht.self, iVal)
// if we got an unexpected error, bail.
if err != nil && err != ds.ErrNotFound {
@ -65,7 +65,7 @@ func (dht *IpfsDHT) handleGetValue(p *peer.Peer, pmes *Message) (*Message, error
// if we have the value, send it back
if err == nil {
u.DOut("[%s] handleGetValue success!\n", dht.self.ID.Pretty())
log.Debug("%s handleGetValue success!\n", dht.self)
byts, ok := iVal.([]byte)
if !ok {
@ -78,14 +78,14 @@ func (dht *IpfsDHT) handleGetValue(p *peer.Peer, pmes *Message) (*Message, error
// if we know any providers for the requested value, return those.
provs := dht.providers.GetProviders(u.Key(pmes.GetKey()))
if len(provs) > 0 {
u.DOut("handleGetValue returning %d provider[s]\n", len(provs))
log.Debug("handleGetValue returning %d provider[s]\n", len(provs))
resp.ProviderPeers = peersToPBPeers(provs)
}
// Find closest peer on given cluster to desired key and reply with that info
closer := dht.betterPeerToQuery(pmes)
if closer != nil {
u.DOut("handleGetValue returning a closer peer: '%s'\n", closer.ID.Pretty())
log.Debug("handleGetValue returning a closer peer: '%s'\n", closer)
resp.CloserPeers = peersToPBPeers([]*peer.Peer{closer})
}
@ -98,12 +98,12 @@ func (dht *IpfsDHT) handlePutValue(p *peer.Peer, pmes *Message) (*Message, error
defer dht.dslock.Unlock()
dskey := u.Key(pmes.GetKey()).DsKey()
err := dht.datastore.Put(dskey, pmes.GetValue())
u.DOut("[%s] handlePutValue %v %v\n", dht.self.ID.Pretty(), dskey, pmes.GetValue())
log.Debug("%s handlePutValue %v %v\n", dht.self, dskey, pmes.GetValue())
return pmes, err
}
func (dht *IpfsDHT) handlePing(p *peer.Peer, pmes *Message) (*Message, error) {
u.DOut("[%s] Responding to ping from [%s]!\n", dht.self.ID.Pretty(), p.ID.Pretty())
log.Debug("%s Responding to ping from %s!\n", dht.self, p)
return pmes, nil
}
@ -119,16 +119,16 @@ func (dht *IpfsDHT) handleFindPeer(p *peer.Peer, pmes *Message) (*Message, error
}
if closest == nil {
u.PErr("handleFindPeer: could not find anything.\n")
log.Error("handleFindPeer: could not find anything.\n")
return resp, nil
}
if len(closest.Addresses) == 0 {
u.PErr("handleFindPeer: no addresses for connected peer...\n")
log.Error("handleFindPeer: no addresses for connected peer...\n")
return resp, nil
}
u.DOut("handleFindPeer: sending back '%s'\n", closest.ID.Pretty())
log.Debug("handleFindPeer: sending back '%s'\n", closest)
resp.CloserPeers = peersToPBPeers([]*peer.Peer{closest})
return resp, nil
}
@ -140,7 +140,7 @@ func (dht *IpfsDHT) handleGetProviders(p *peer.Peer, pmes *Message) (*Message, e
dsk := u.Key(pmes.GetKey()).DsKey()
has, err := dht.datastore.Has(dsk)
if err != nil && err != ds.ErrNotFound {
u.PErr("unexpected datastore error: %v\n", err)
log.Error("unexpected datastore error: %v\n", err)
has = false
}
@ -172,8 +172,7 @@ type providerInfo struct {
func (dht *IpfsDHT) handleAddProvider(p *peer.Peer, pmes *Message) (*Message, error) {
key := u.Key(pmes.GetKey())
u.DOut("[%s] Adding [%s] as a provider for '%s'\n",
dht.self.ID.Pretty(), p.ID.Pretty(), peer.ID(key).Pretty())
log.Debug("%s adding %s as a provider for '%s'\n", dht.self, p, peer.ID(key))
dht.providers.AddProvider(key, p)
return pmes, nil // send back same msg as confirmation.
@ -192,7 +191,7 @@ func (dht *IpfsDHT) handleDiagnostic(p *peer.Peer, pmes *Message) (*Message, err
for _, ps := range seq {
_, err := msg.FromObject(ps, pmes)
if err != nil {
u.PErr("handleDiagnostics error creating message: %v\n", err)
log.Error("handleDiagnostics error creating message: %v\n", err)
continue
}
// dht.sender.SendRequest(context.TODO(), mes)

View File

@ -151,7 +151,7 @@ func (r *dhtQueryRunner) Run(peers []*peer.Peer) (*dhtQueryResult, error) {
func (r *dhtQueryRunner) addPeerToQuery(next *peer.Peer, benchmark *peer.Peer) {
if next == nil {
// wtf why are peers nil?!?
u.PErr("Query getting nil peers!!!\n")
log.Error("Query getting nil peers!!!\n")
return
}
@ -170,7 +170,7 @@ func (r *dhtQueryRunner) addPeerToQuery(next *peer.Peer, benchmark *peer.Peer) {
r.peersSeen[next.Key()] = next
r.Unlock()
log.Debug("adding peer to query: %v\n", next.ID.Pretty())
log.Debug("adding peer to query: %v\n", next)
// do this after unlocking to prevent possible deadlocks.
r.peersRemaining.Increment(1)
@ -194,14 +194,14 @@ func (r *dhtQueryRunner) spawnWorkers() {
if !more {
return // channel closed.
}
u.DOut("spawning worker for: %v\n", p.ID.Pretty())
log.Debug("spawning worker for: %v\n", p)
go r.queryPeer(p)
}
}
}
func (r *dhtQueryRunner) queryPeer(p *peer.Peer) {
u.DOut("spawned worker for: %v\n", p.ID.Pretty())
log.Debug("spawned worker for: %v\n", p)
// make sure we rate limit concurrency.
select {
@ -211,33 +211,33 @@ func (r *dhtQueryRunner) queryPeer(p *peer.Peer) {
return
}
u.DOut("running worker for: %v\n", p.ID.Pretty())
log.Debug("running worker for: %v\n", p)
// finally, run the query against this peer
res, err := r.query.qfunc(r.ctx, p)
if err != nil {
u.DOut("ERROR worker for: %v %v\n", p.ID.Pretty(), err)
log.Debug("ERROR worker for: %v %v\n", p, err)
r.Lock()
r.errs = append(r.errs, err)
r.Unlock()
} else if res.success {
u.DOut("SUCCESS worker for: %v\n", p.ID.Pretty(), res)
log.Debug("SUCCESS worker for: %v\n", p, res)
r.Lock()
r.result = res
r.Unlock()
r.cancel() // signal to everyone that we're done.
} else if res.closerPeers != nil {
u.DOut("PEERS CLOSER -- worker for: %v\n", p.ID.Pretty())
log.Debug("PEERS CLOSER -- worker for: %v\n", p)
for _, next := range res.closerPeers {
r.addPeerToQuery(next, p)
}
}
// signal we're done proccessing peer p
u.DOut("completing worker for: %v\n", p.ID.Pretty())
log.Debug("completing worker for: %v\n", p)
r.peersRemaining.Decrement(1)
r.rateLimit <- struct{}{}
}

View File

@ -18,7 +18,7 @@ import (
// PutValue adds value corresponding to given Key.
// This is the top level "Store" operation of the DHT
func (dht *IpfsDHT) PutValue(ctx context.Context, key u.Key, value []byte) error {
log.Debug("PutValue %s", key.Pretty())
log.Debug("PutValue %s", key)
err := dht.putLocal(key, value)
if err != nil {
return err
@ -31,7 +31,7 @@ func (dht *IpfsDHT) PutValue(ctx context.Context, key u.Key, value []byte) error
}
query := newQuery(key, func(ctx context.Context, p *peer.Peer) (*dhtQueryResult, error) {
log.Debug("[%s] PutValue qry part %v", dht.self.ID.Pretty(), p.ID.Pretty())
log.Debug("%s PutValue qry part %v", dht.self, p)
err := dht.putValueToNetwork(ctx, p, string(key), value)
if err != nil {
return nil, err
@ -47,7 +47,7 @@ func (dht *IpfsDHT) PutValue(ctx context.Context, key u.Key, value []byte) error
// If the search does not succeed, a multiaddr string of a closer peer is
// returned along with util.ErrSearchIncomplete
func (dht *IpfsDHT) GetValue(ctx context.Context, key u.Key) ([]byte, error) {
log.Debug("Get Value [%s]", key.Pretty())
log.Debug("Get Value [%s]", key)
// If we have it local, dont bother doing an RPC!
// NOTE: this might not be what we want to do...
@ -189,7 +189,7 @@ func (dht *IpfsDHT) addPeerListAsync(k u.Key, peers []*Message_Peer, ps *peerSet
// FindProviders searches for peers who can provide the value for given key.
func (dht *IpfsDHT) FindProviders(ctx context.Context, key u.Key) ([]*peer.Peer, error) {
// get closest peer
log.Debug("Find providers for: '%s'", key.Pretty())
log.Debug("Find providers for: '%s'", key)
p := dht.routingTables[0].NearestPeer(kb.ConvertKey(key))
if p == nil {
return nil, nil
@ -333,11 +333,11 @@ func (dht *IpfsDHT) findPeerMultiple(ctx context.Context, id peer.ID) (*peer.Pee
// Ping a peer, log the time it took
func (dht *IpfsDHT) Ping(ctx context.Context, p *peer.Peer) error {
// Thoughts: maybe this should accept an ID and do a peer lookup?
log.Info("ping %s start", p.ID.Pretty())
log.Info("ping %s start", p)
pmes := newMessage(Message_PING, "", 0)
_, err := dht.sendRequest(ctx, p, pmes)
log.Info("ping %s end (err = %s)", p.ID.Pretty(), err)
log.Info("ping %s end (err = %s)", p, err)
return err
}

View File

@ -101,7 +101,7 @@ func TestTableFind(t *testing.T) {
rt.Update(peers[i])
}
t.Logf("Searching for peer: '%s'", peers[2].ID.Pretty())
t.Logf("Searching for peer: '%s'", peers[2])
found := rt.NearestPeer(ConvertPeerID(peers[2].ID))
if !found.ID.Equal(peers[2].ID) {
t.Fatalf("Failed to lookup known node...")
@ -118,7 +118,7 @@ func TestTableFindMultiple(t *testing.T) {
rt.Update(peers[i])
}
t.Logf("Searching for peer: '%s'", peers[2].ID.Pretty())
t.Logf("Searching for peer: '%s'", peers[2])
found := rt.NearestPeers(ConvertPeerID(peers[2].ID), 15)
if len(found) != 15 {
t.Fatalf("Got back different number of peers than we expected.")

View File

@ -41,7 +41,7 @@ type Key string
// String is utililty function for printing out keys as strings (Pretty).
func (k Key) String() string {
return key.Pretty()
return k.Pretty()
}
// Pretty returns Key in a b58 encoded string