mirror of
https://github.com/ipfs/kubo.git
synced 2025-06-25 23:21:54 +08:00
Merge pull request #2853 from ipfs/feature/decapitalize-Debug
Decapitalized log.Debug messages
This commit is contained in:
@ -86,7 +86,7 @@ func (s *BlockService) GetBlock(ctx context.Context, k key.Key) (blocks.Block, e
|
||||
if err == blockstore.ErrNotFound && s.Exchange != nil {
|
||||
// TODO be careful checking ErrNotFound. If the underlying
|
||||
// implementation changes, this will break.
|
||||
log.Debug("Blockservice: Searching bitswap.")
|
||||
log.Debug("Blockservice: Searching bitswap")
|
||||
blk, err := s.Exchange.GetBlock(ctx, k)
|
||||
if err != nil {
|
||||
if err == blockstore.ErrNotFound {
|
||||
@ -97,7 +97,7 @@ func (s *BlockService) GetBlock(ctx context.Context, k key.Key) (blocks.Block, e
|
||||
return blk, nil
|
||||
}
|
||||
|
||||
log.Debug("Blockservice GetBlock: Not found.")
|
||||
log.Debug("Blockservice GetBlock: Not found")
|
||||
if err == blockstore.ErrNotFound {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
@ -119,7 +119,7 @@ func (s *BlockService) GetBlocks(ctx context.Context, ks []key.Key) <-chan block
|
||||
misses = append(misses, k)
|
||||
continue
|
||||
}
|
||||
log.Debug("Blockservice: Got data in datastore.")
|
||||
log.Debug("Blockservice: Got data in datastore")
|
||||
select {
|
||||
case out <- hit:
|
||||
case <-ctx.Done():
|
||||
|
@ -54,7 +54,7 @@ environment variable:
|
||||
|
||||
log.Info("checking if daemon is running...")
|
||||
if daemonLocked {
|
||||
log.Debug("Ipfs daemon is running.")
|
||||
log.Debug("ipfs daemon is running")
|
||||
e := "ipfs daemon is running. please stop it to run this command"
|
||||
return cmds.ClientError(e)
|
||||
}
|
||||
|
@ -288,7 +288,7 @@ func (i *cmdInvocation) requestedHelp() (short bool, long bool, err error) {
|
||||
func callPreCommandHooks(ctx context.Context, details cmdDetails, req cmds.Request, root *cmds.Command) error {
|
||||
|
||||
log.Event(ctx, "callPreCommandHooks", &details)
|
||||
log.Debug("Calling pre-command hooks...")
|
||||
log.Debug("calling pre-command hooks...")
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -325,7 +325,7 @@ func callCommand(ctx context.Context, req cmds.Request, root *cmds.Command, cmd
|
||||
}
|
||||
|
||||
if client != nil && !cmd.External {
|
||||
log.Debug("Executing command via API")
|
||||
log.Debug("executing command via API")
|
||||
res, err = client.Send(req)
|
||||
if err != nil {
|
||||
if isConnRefused(err) {
|
||||
@ -335,7 +335,7 @@ func callCommand(ctx context.Context, req cmds.Request, root *cmds.Command, cmd
|
||||
}
|
||||
|
||||
} else {
|
||||
log.Debug("Executing command locally")
|
||||
log.Debug("executing command locally")
|
||||
|
||||
err := req.SetRootContext(ctx)
|
||||
if err != nil {
|
||||
|
@ -120,11 +120,11 @@ func (i Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
func (i internalHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
log.Debug("Incoming API request: ", r.URL)
|
||||
log.Debug("incoming API request: ", r.URL)
|
||||
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
log.Error("A panic has occurred in the commands handler!")
|
||||
log.Error("a panic has occurred in the commands handler!")
|
||||
log.Error(r)
|
||||
|
||||
debug.PrintStack()
|
||||
|
@ -58,7 +58,7 @@ Publish an <ipfs-path> to another public key (not implemented):
|
||||
cmds.StringOption("ttl", "Time duration this record should be cached for (caution: experimental)."),
|
||||
},
|
||||
Run: func(req cmds.Request, res cmds.Response) {
|
||||
log.Debug("Begin Publish")
|
||||
log.Debug("begin publish")
|
||||
n, err := req.InvocContext().GetNode()
|
||||
if err != nil {
|
||||
res.SetError(err, cmds.ErrNormal)
|
||||
|
@ -135,7 +135,7 @@ func newID() string {
|
||||
|
||||
// GetDiagnostic runs a diagnostics request across the entire network
|
||||
func (d *Diagnostics) GetDiagnostic(ctx context.Context, timeout time.Duration) ([]*DiagInfo, error) {
|
||||
log.Debug("Getting diagnostic.")
|
||||
log.Debug("getting diagnostic")
|
||||
ctx, cancel := context.WithTimeout(ctx, timeout)
|
||||
defer cancel()
|
||||
|
||||
@ -144,7 +144,7 @@ func (d *Diagnostics) GetDiagnostic(ctx context.Context, timeout time.Duration)
|
||||
d.diagMap[diagID] = time.Now()
|
||||
d.diagLock.Unlock()
|
||||
|
||||
log.Debug("Begin Diagnostic")
|
||||
log.Debug("begin diagnostic")
|
||||
|
||||
peers := d.getPeers()
|
||||
log.Debugf("Sending diagnostic request to %d peers.", len(peers))
|
||||
@ -236,7 +236,7 @@ func (d *Diagnostics) getDiagnosticFromPeer(ctx context.Context, p peer.ID, pmes
|
||||
return
|
||||
}
|
||||
if rpmes == nil {
|
||||
log.Debug("Got no response back from diag request.")
|
||||
log.Debug("got no response back from diag request")
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -55,7 +55,7 @@ func NewFileSystem(ipfs *core.IpfsNode, sk ci.PrivKey, ipfspath, ipnspath string
|
||||
|
||||
// Root constructs the Root of the filesystem, a Root object.
|
||||
func (f *FileSystem) Root() (fs.Node, error) {
|
||||
log.Debug("Filesystem, get root")
|
||||
log.Debug("filesystem, get root")
|
||||
return f.RootNode, nil
|
||||
}
|
||||
|
||||
@ -417,7 +417,7 @@ func (fi *FileNode) Fsync(ctx context.Context, req *fuse.FsyncRequest) error {
|
||||
func (fi *File) Forget() {
|
||||
err := fi.fi.Sync()
|
||||
if err != nil {
|
||||
log.Debug("Forget file error: ", err)
|
||||
log.Debug("forget file error: ", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -70,7 +70,7 @@ func (s *Root) Lookup(ctx context.Context, name string) (fs.Node, error) {
|
||||
|
||||
// ReadDirAll reads a particular directory. Disallowed for root.
|
||||
func (*Root) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) {
|
||||
log.Debug("Read Root.")
|
||||
log.Debug("read Root")
|
||||
return nil, fuse.EPERM
|
||||
}
|
||||
|
||||
@ -89,7 +89,7 @@ func (s *Node) loadData() error {
|
||||
|
||||
// Attr returns the attributes of a given node.
|
||||
func (s *Node) Attr(ctx context.Context, a *fuse.Attr) error {
|
||||
log.Debug("Node attr.")
|
||||
log.Debug("Node attr")
|
||||
if s.cached == nil {
|
||||
if err := s.loadData(); err != nil {
|
||||
return fmt.Errorf("readonly: loadData() failed: %s", err)
|
||||
|
@ -314,7 +314,7 @@ func ValidateIpnsRecord(k key.Key, val []byte) error {
|
||||
case pb.IpnsEntry_EOL:
|
||||
t, err := u.ParseRFC3339(string(entry.GetValidity()))
|
||||
if err != nil {
|
||||
log.Debug("Failed parsing time for ipns record EOL")
|
||||
log.Debug("failed parsing time for ipns record EOL")
|
||||
return err
|
||||
}
|
||||
if time.Now().After(t) {
|
||||
|
@ -86,7 +86,7 @@ func (s *Resolver) ResolvePathComponents(ctx context.Context, fpath Path) ([]*me
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debug("Resolve dag get.")
|
||||
log.Debug("resolve dag get")
|
||||
nd, err := s.DAG.Get(ctx, key.Key(h))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -267,7 +267,7 @@ func (dht *IpfsDHT) betterPeersToQuery(pmes *pb.Message, p peer.ID, count int) [
|
||||
// == to self? thats bad
|
||||
for _, p := range closer {
|
||||
if p == dht.self {
|
||||
log.Debug("Attempted to return self! this shouldnt happen...")
|
||||
log.Debug("attempted to return self! this shouldn't happen...")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
@ -54,7 +54,7 @@ func (dht *IpfsDHT) handleNewMessage(s inet.Stream) {
|
||||
|
||||
// if nil response, return it before serializing
|
||||
if rpmes == nil {
|
||||
log.Debug("Got back nil response from request.")
|
||||
log.Debug("got back nil response from request")
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -108,7 +108,7 @@ func (dht *IpfsDHT) checkLocalDatastore(k key.Key) (*pb.Record, error) {
|
||||
rec := new(pb.Record)
|
||||
err = proto.Unmarshal(byts, rec)
|
||||
if err != nil {
|
||||
log.Debug("Failed to unmarshal DHT record from datastore.")
|
||||
log.Debug("failed to unmarshal DHT record from datastore")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -84,7 +84,7 @@ func (dht *IpfsDHT) getPublicKeyFromNode(ctx context.Context, p peer.ID) (ci.Pub
|
||||
// validity because a) we can't. b) we know the hash of the
|
||||
// key we're looking for.
|
||||
val := record.GetValue()
|
||||
log.Debug("DHT got a value from other peer.")
|
||||
log.Debug("DHT got a value from other peer")
|
||||
|
||||
pk, err = ci.UnmarshalPublicKey(val)
|
||||
if err != nil {
|
||||
|
@ -46,7 +46,7 @@ func (dht *IpfsDHT) PutValue(ctx context.Context, key key.Key, value []byte) err
|
||||
|
||||
rec, err := record.MakePutRecord(sk, key, value, sign)
|
||||
if err != nil {
|
||||
log.Debug("Creation of record failed!")
|
||||
log.Debug("creation of record failed!")
|
||||
return err
|
||||
}
|
||||
|
||||
@ -346,7 +346,7 @@ func (dht *IpfsDHT) findProvidersAsyncRoutine(ctx context.Context, key key.Key,
|
||||
select {
|
||||
case peerOut <- prov:
|
||||
case <-ctx.Done():
|
||||
log.Debug("Context timed out sending more providers")
|
||||
log.Debug("context timed out sending more providers")
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
}
|
||||
@ -397,7 +397,7 @@ func (dht *IpfsDHT) FindPeer(ctx context.Context, id peer.ID) (pstore.PeerInfo,
|
||||
// Sanity...
|
||||
for _, p := range peers {
|
||||
if p == id {
|
||||
log.Debug("Found target peer in list of closest peers...")
|
||||
log.Debug("found target peer in list of closest peers...")
|
||||
return dht.peerstore.PeerInfo(p), nil
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user