mirror of
https://github.com/ipfs/kubo.git
synced 2025-08-06 11:31:54 +08:00
dropped down log.Errors
This commit is contained in:
@ -31,7 +31,7 @@ func (s *Swarm) InterfaceListenAddresses() ([]ma.Multiaddr, error) {
|
||||
func checkNATWarning(s *Swarm, observed ma.Multiaddr, expected ma.Multiaddr) {
|
||||
listen, err := s.InterfaceListenAddresses()
|
||||
if err != nil {
|
||||
log.Errorf("Error retrieving swarm.InterfaceListenAddresses: %s", err)
|
||||
log.Debugf("Error retrieving swarm.InterfaceListenAddresses: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -73,7 +73,7 @@ func (ids *IDService) IdentifyConn(c inet.Conn) {
|
||||
|
||||
s, err := c.NewStream()
|
||||
if err != nil {
|
||||
log.Error("error opening initial stream for %s", ID)
|
||||
log.Debugf("error opening initial stream for %s", ID)
|
||||
log.Event(context.TODO(), "IdentifyOpenFailed", c.RemotePeer())
|
||||
} else {
|
||||
|
||||
@ -91,7 +91,7 @@ func (ids *IDService) IdentifyConn(c inet.Conn) {
|
||||
ids.currmu.Unlock()
|
||||
|
||||
if !found {
|
||||
log.Errorf("IdentifyConn failed to find channel (programmer error) for %s", c)
|
||||
log.Debugf("IdentifyConn failed to find channel (programmer error) for %s", c)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -109,7 +109,6 @@ func (m *Mux) HandleSync(s inet.Stream) {
|
||||
name, handler, err := m.readHeader(s)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("protocol mux error: %s", err)
|
||||
log.Error(err)
|
||||
log.Event(ctx, "muxError", lgbl.Error(err))
|
||||
s.Close()
|
||||
return
|
||||
|
@ -46,7 +46,7 @@ func NewRelayService(h host.Host, sh inet.StreamHandler) *RelayService {
|
||||
// requestHandler is the function called by clients
|
||||
func (rs *RelayService) requestHandler(s inet.Stream) {
|
||||
if err := rs.handleStream(s); err != nil {
|
||||
log.Error("RelayService error:", err)
|
||||
log.Debugf("RelayService error:", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -104,7 +104,7 @@ func (s AutoUpdateSetting) String() string {
|
||||
func (v *Version) checkPeriodDuration() time.Duration {
|
||||
d, err := strconv.Atoi(v.CheckPeriod)
|
||||
if err != nil {
|
||||
log.Error("config.Version.CheckPeriod parse error. Using default.")
|
||||
log.Warning("config.Version.CheckPeriod parse error. Using default.")
|
||||
return defaultCheckPeriod
|
||||
}
|
||||
return time.Duration(d)
|
||||
|
@ -151,7 +151,7 @@ func (dht *IpfsDHT) putProvider(ctx context.Context, p peer.ID, key string) erro
|
||||
// // only share WAN-friendly addresses ??
|
||||
// pi.Addrs = addrutil.WANShareableAddrs(pi.Addrs)
|
||||
if len(pi.Addrs) < 1 {
|
||||
log.Errorf("%s putProvider: %s for %s error: no wan-friendly addresses", dht.self, p, u.Key(key), pi.Addrs)
|
||||
log.Infof("%s putProvider: %s for %s error: no wan-friendly addresses", dht.self, p, u.Key(key), pi.Addrs)
|
||||
return fmt.Errorf("no known addresses for self. cannot put provider.")
|
||||
}
|
||||
|
||||
@ -185,7 +185,7 @@ func (dht *IpfsDHT) getValueOrPeers(ctx context.Context, p peer.ID,
|
||||
// make sure record is valid.
|
||||
err = dht.verifyRecordOnline(ctx, record)
|
||||
if err != nil {
|
||||
log.Error("Received invalid record!")
|
||||
log.Info("Received invalid record! (discarded)")
|
||||
return nil, nil, err
|
||||
}
|
||||
return record.GetValue(), nil, nil
|
||||
@ -235,7 +235,7 @@ func (dht *IpfsDHT) getLocal(key u.Key) ([]byte, error) {
|
||||
if u.Debug {
|
||||
err = dht.verifyRecordLocally(rec)
|
||||
if err != nil {
|
||||
log.Errorf("local record verify failed: %s", err)
|
||||
log.Debugf("local record verify failed: %s (discarded)", err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
@ -248,7 +248,7 @@ func (dht *IpfsDHT) getLocal(key u.Key) ([]byte, error) {
|
||||
func (dht *IpfsDHT) getOwnPrivateKey() (ci.PrivKey, error) {
|
||||
sk := dht.peerstore.PrivKey(dht.self)
|
||||
if sk == nil {
|
||||
log.Errorf("%s dht cannot get own private key!", dht.self)
|
||||
log.Warningf("%s dht cannot get own private key!", dht.self)
|
||||
return nil, fmt.Errorf("cannot get private key to sign record!")
|
||||
}
|
||||
return sk, nil
|
||||
@ -323,7 +323,7 @@ func (dht *IpfsDHT) betterPeersToQuery(pmes *pb.Message, p peer.ID, count int) [
|
||||
// == to self? thats bad
|
||||
for _, p := range closer {
|
||||
if p == dht.self {
|
||||
log.Error("Attempted to return self! this shouldnt happen...")
|
||||
log.Debug("Attempted to return self! this shouldnt happen...")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@ -370,7 +370,7 @@ func (dht *IpfsDHT) PingRoutine(t time.Duration) {
|
||||
ctx, _ := context.WithTimeout(dht.Context(), time.Second*5)
|
||||
_, err := dht.Ping(ctx, p)
|
||||
if err != nil {
|
||||
log.Errorf("Ping error: %s", err)
|
||||
log.Debugf("Ping error: %s", err)
|
||||
}
|
||||
}
|
||||
case <-dht.Closing():
|
||||
|
@ -76,7 +76,7 @@ func (dht *IpfsDHT) BootstrapOnSignal(cfg BootstrapConfig, signal <-chan time.Ti
|
||||
|
||||
ctx := dht.Context()
|
||||
if err := dht.runBootstrap(ctx, cfg); err != nil {
|
||||
log.Error(err)
|
||||
log.Warning(err)
|
||||
// A bootstrapping error is important to notice but not fatal.
|
||||
}
|
||||
})
|
||||
@ -117,7 +117,7 @@ func (dht *IpfsDHT) runBootstrap(ctx context.Context, cfg BootstrapConfig) error
|
||||
// woah, actually found a peer with that ID? this shouldn't happen normally
|
||||
// (as the ID we use is not a real ID). this is an odd error worth logging.
|
||||
err := fmt.Errorf("Bootstrap peer error: Actually FOUND peer. (%s, %s)", id, p)
|
||||
log.Errorf("%s", err)
|
||||
log.Warningf("%s", err)
|
||||
merr = append(merr, err)
|
||||
}
|
||||
}
|
||||
|
@ -31,7 +31,7 @@ func (dht *IpfsDHT) handleNewMessage(s inet.Stream) {
|
||||
// receive msg
|
||||
pmes := new(pb.Message)
|
||||
if err := r.ReadMsg(pmes); err != nil {
|
||||
log.Errorf("Error unmarshaling data: %s", err)
|
||||
log.Debugf("Error unmarshaling data: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
@ -41,14 +41,14 @@ func (dht *IpfsDHT) handleNewMessage(s inet.Stream) {
|
||||
// get handler for this msg type.
|
||||
handler := dht.handlerForMsgType(pmes.GetType())
|
||||
if handler == nil {
|
||||
log.Error("got back nil handler from handlerForMsgType")
|
||||
log.Debug("got back nil handler from handlerForMsgType")
|
||||
return
|
||||
}
|
||||
|
||||
// dispatch handler.
|
||||
rpmes, err := handler(ctx, mPeer, pmes)
|
||||
if err != nil {
|
||||
log.Errorf("handle message error: %s", err)
|
||||
log.Debugf("handle message error: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
@ -60,7 +60,7 @@ func (dht *IpfsDHT) handleNewMessage(s inet.Stream) {
|
||||
|
||||
// send out response msg
|
||||
if err := w.WriteMsg(rpmes); err != nil {
|
||||
log.Errorf("send response error: %s", err)
|
||||
log.Debugf("send response error: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -262,7 +262,7 @@ func waitForWellFormedTables(t *testing.T, dhts []*IpfsDHT, minPeers, avgPeers i
|
||||
for {
|
||||
select {
|
||||
case <-timeoutA:
|
||||
log.Error("did not reach well-formed routing tables by %s", timeout)
|
||||
log.Errorf("did not reach well-formed routing tables by %s", timeout)
|
||||
return false // failed
|
||||
case <-time.After(5 * time.Millisecond):
|
||||
if checkTables() {
|
||||
|
@ -78,7 +78,7 @@ func (dht *IpfsDHT) handleGetValue(ctx context.Context, p peer.ID, pmes *pb.Mess
|
||||
rec := new(pb.Record)
|
||||
err := proto.Unmarshal(byts, rec)
|
||||
if err != nil {
|
||||
log.Error("Failed to unmarshal dht record from datastore")
|
||||
log.Debug("Failed to unmarshal dht record from datastore")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -119,7 +119,7 @@ func (dht *IpfsDHT) handlePutValue(ctx context.Context, p peer.ID, pmes *pb.Mess
|
||||
dskey := u.Key(pmes.GetKey()).DsKey()
|
||||
|
||||
if err := dht.verifyRecordLocally(pmes.GetRecord()); err != nil {
|
||||
log.Errorf("Bad dht record in PUT from: %s. %s", u.Key(pmes.GetRecord().GetAuthor()), err)
|
||||
log.Debugf("Bad dht record in PUT from: %s. %s", u.Key(pmes.GetRecord().GetAuthor()), err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -181,7 +181,7 @@ func (dht *IpfsDHT) handleGetProviders(ctx context.Context, p peer.ID, pmes *pb.
|
||||
// check if we have this value, to add ourselves as provider.
|
||||
has, err := dht.datastore.Has(key.DsKey())
|
||||
if err != nil && err != ds.ErrNotFound {
|
||||
log.Errorf("unexpected datastore error: %v\n", err)
|
||||
log.Debugf("unexpected datastore error: %v\n", err)
|
||||
has = false
|
||||
}
|
||||
|
||||
@ -226,12 +226,12 @@ func (dht *IpfsDHT) handleAddProvider(ctx context.Context, p peer.ID, pmes *pb.M
|
||||
if pi.ID != p {
|
||||
// we should ignore this provider reccord! not from originator.
|
||||
// (we chould sign them and check signature later...)
|
||||
log.Errorf("handleAddProvider received provider %s from %s. Ignore.", pi.ID, p)
|
||||
log.Debugf("handleAddProvider received provider %s from %s. Ignore.", pi.ID, p)
|
||||
continue
|
||||
}
|
||||
|
||||
if len(pi.Addrs) < 1 {
|
||||
log.Errorf("%s got no valid addresses for provider %s. Ignore.", dht.self, p)
|
||||
log.Debugf("%s got no valid addresses for provider %s. Ignore.", dht.self, p)
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -51,7 +51,7 @@ func (dht *IpfsDHT) GetClosestPeers(ctx context.Context, key u.Key) (<-chan peer
|
||||
|
||||
closer, err := dht.closerPeersSingle(ctx, key, p)
|
||||
if err != nil {
|
||||
log.Errorf("error getting closer peers: %s", err)
|
||||
log.Debugf("error getting closer peers: %s", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -84,7 +84,7 @@ func (m *Message_Peer) Addresses() []ma.Multiaddr {
|
||||
for i, addr := range m.Addrs {
|
||||
maddrs[i], err = ma.NewMultiaddrBytes(addr)
|
||||
if err != nil {
|
||||
log.Error("error decoding Multiaddr for peer: %s", m.GetId())
|
||||
log.Debugf("error decoding Multiaddr for peer: %s", m.GetId())
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
@ -46,7 +46,7 @@ func (dht *IpfsDHT) getPublicKeyOnline(ctx context.Context, p peer.ID) (ci.PubKe
|
||||
|
||||
pk, err = ci.UnmarshalPublicKey(val)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to unmarshal public key: %s", err)
|
||||
log.Debugf("Failed to unmarshal public key: %s", err)
|
||||
return nil, err
|
||||
}
|
||||
return pk, nil
|
||||
|
@ -45,7 +45,7 @@ func (dht *IpfsDHT) PutValue(ctx context.Context, key u.Key, value []byte) error
|
||||
|
||||
rec, err := record.MakePutRecord(sk, key, value)
|
||||
if err != nil {
|
||||
log.Error("Creation of record failed!")
|
||||
log.Debug("Creation of record failed!")
|
||||
return err
|
||||
}
|
||||
|
||||
@ -61,7 +61,7 @@ func (dht *IpfsDHT) PutValue(ctx context.Context, key u.Key, value []byte) error
|
||||
defer wg.Done()
|
||||
err := dht.putValueToPeer(ctx, p, key, rec)
|
||||
if err != nil {
|
||||
log.Errorf("failed putting value to peer: %s", err)
|
||||
log.Debugf("failed putting value to peer: %s", err)
|
||||
}
|
||||
}(p)
|
||||
}
|
||||
@ -142,7 +142,7 @@ func (dht *IpfsDHT) Provide(ctx context.Context, key u.Key) error {
|
||||
log.Debugf("putProvider(%s, %s)", key, p)
|
||||
err := dht.putProvider(ctx, p, string(key))
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
log.Debug(err)
|
||||
}
|
||||
}(p)
|
||||
}
|
||||
@ -214,7 +214,7 @@ func (dht *IpfsDHT) findProvidersAsyncRoutine(ctx context.Context, key u.Key, co
|
||||
select {
|
||||
case peerOut <- prov:
|
||||
case <-ctx.Done():
|
||||
log.Error("Context timed out sending more providers")
|
||||
log.Debug("Context timed out sending more providers")
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
}
|
||||
@ -240,7 +240,7 @@ func (dht *IpfsDHT) findProvidersAsyncRoutine(ctx context.Context, key u.Key, co
|
||||
peers := dht.routingTable.ListPeers()
|
||||
_, err := query.Run(ctx, peers)
|
||||
if err != nil {
|
||||
log.Errorf("Query error: %s", err)
|
||||
log.Debugf("Query error: %s", err)
|
||||
notif.PublishQueryEvent(ctx, ¬if.QueryEvent{
|
||||
Type: notif.QueryError,
|
||||
Extra: err.Error(),
|
||||
@ -265,7 +265,7 @@ func (dht *IpfsDHT) FindPeer(ctx context.Context, id peer.ID) (peer.PeerInfo, er
|
||||
// Sanity...
|
||||
for _, p := range peers {
|
||||
if p == id {
|
||||
log.Error("Found target peer in list of closest peers...")
|
||||
log.Debug("Found target peer in list of closest peers...")
|
||||
return dht.peerstore.PeerInfo(p), nil
|
||||
}
|
||||
}
|
||||
@ -370,7 +370,7 @@ func (dht *IpfsDHT) FindPeersConnectedToPeer(ctx context.Context, id peer.ID) (<
|
||||
// this does no error checking
|
||||
go func() {
|
||||
if _, err := query.Run(ctx, peers); err != nil {
|
||||
log.Error(err)
|
||||
log.Debug(err)
|
||||
}
|
||||
|
||||
// close the peerchan channel when done.
|
||||
|
@ -135,7 +135,7 @@ func (rt *RoutingTable) NearestPeer(id ID) peer.ID {
|
||||
return peers[0]
|
||||
}
|
||||
|
||||
log.Errorf("NearestPeer: Returning nil, table size = %d", rt.Size())
|
||||
log.Debugf("NearestPeer: Returning nil, table size = %d", rt.Size())
|
||||
return ""
|
||||
}
|
||||
|
||||
|
@ -34,11 +34,11 @@ func (v Validator) VerifyRecord(r *pb.Record, pk ci.PubKey) error {
|
||||
blob := RecordBlobForSig(r)
|
||||
ok, err := pk.Verify(blob, r.GetSignature())
|
||||
if err != nil {
|
||||
log.Error("Signature verify failed.")
|
||||
log.Info("Signature verify failed. (ignored)")
|
||||
return err
|
||||
}
|
||||
if !ok {
|
||||
log.Error("dht found a forged record! (ignored)")
|
||||
log.Info("dht found a forged record! (ignored)")
|
||||
return ErrBadRecord
|
||||
}
|
||||
|
||||
@ -51,7 +51,7 @@ func (v Validator) VerifyRecord(r *pb.Record, pk ci.PubKey) error {
|
||||
|
||||
fnc, ok := v[parts[1]]
|
||||
if !ok {
|
||||
log.Errorf("Unrecognized key prefix: %s", parts[1])
|
||||
log.Infof("Unrecognized key prefix: %s", parts[1])
|
||||
return ErrInvalidRecordType
|
||||
}
|
||||
|
||||
|
@ -147,7 +147,7 @@ func (dm *DagModifier) WriteAt(b []byte, offset uint64) (int, error) {
|
||||
n := &mdag.Node{Data: ft.WrapData(sb)}
|
||||
_, err := dm.dagserv.Add(n)
|
||||
if err != nil {
|
||||
log.Errorf("Failed adding node to DAG service: %s", err)
|
||||
log.Warningf("Failed adding node to DAG service: %s", err)
|
||||
return 0, err
|
||||
}
|
||||
lnk, err := mdag.MakeLink(n)
|
||||
|
@ -158,7 +158,7 @@ func ShouldAutoUpdate(setting config.AutoUpdateSetting, newVer string) bool {
|
||||
|
||||
nv, err := semver.NewVersion(newVer)
|
||||
if err != nil {
|
||||
log.Errorf("could not parse version string: %s", err)
|
||||
log.Infof("could not parse version string: %s", err)
|
||||
return false
|
||||
}
|
||||
|
||||
@ -229,7 +229,7 @@ func CliCheckForUpdates(cfg *config.Config, repoPath string) error {
|
||||
|
||||
// if another, unexpected error occurred, note it.
|
||||
if err != nil {
|
||||
log.Errorf("Error while checking for update: %v", err)
|
||||
log.Debugf("Error while checking for update: %v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -242,7 +242,7 @@ func CliCheckForUpdates(cfg *config.Config, repoPath string) error {
|
||||
log.Noticef("Applying update %s", u.Version)
|
||||
|
||||
if err = Apply(u); err != nil {
|
||||
log.Error(err.Error())
|
||||
log.Debug(err)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -56,7 +56,7 @@ func SetupLogging() {
|
||||
var err error
|
||||
lvl, err = logging.LogLevel(logenv)
|
||||
if err != nil {
|
||||
log.Errorf("logging.LogLevel() Error: %q", err)
|
||||
log.Debugf("logging.LogLevel() Error: %q", err)
|
||||
lvl = logging.ERROR // reset to ERROR, could be undefined now(?)
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user