1
0
mirror of https://github.com/ipfs/kubo.git synced 2025-07-03 04:37:30 +08:00

Merge pull request #203 from jbenet/govet

go-vet friendly code
This commit is contained in:
Juan Batiz-Benet
2014-10-25 03:55:40 -07:00
59 changed files with 278 additions and 209 deletions

8
Godeps/Godeps.json generated
View File

@ -87,6 +87,10 @@
"ImportPath": "github.com/jbenet/go-is-domain", "ImportPath": "github.com/jbenet/go-is-domain",
"Rev": "93b717f2ae17838a265e30277275ee99ee7198d6" "Rev": "93b717f2ae17838a265e30277275ee99ee7198d6"
}, },
{
"ImportPath": "github.com/jbenet/go-logging",
"Rev": "74bec4b83f6d45d1402c1e9d94c0c29e39f6e0ea"
},
{ {
"ImportPath": "github.com/jbenet/go-msgio", "ImportPath": "github.com/jbenet/go-msgio",
"Rev": "c9069ab79c95aa0686347b516972c7329c4391f2" "Rev": "c9069ab79c95aa0686347b516972c7329c4391f2"
@ -109,10 +113,6 @@
"ImportPath": "github.com/mitchellh/go-homedir", "ImportPath": "github.com/mitchellh/go-homedir",
"Rev": "7d2d8c8a4e078ce3c58736ab521a40b37a504c52" "Rev": "7d2d8c8a4e078ce3c58736ab521a40b37a504c52"
}, },
{
"ImportPath": "github.com/op/go-logging",
"Rev": "3df864a88c7f005e676db4f026a4fe2f14929be3"
},
{ {
"ImportPath": "github.com/syndtr/goleveldb/leveldb", "ImportPath": "github.com/syndtr/goleveldb/leveldb",
"Rev": "99056d50e56252fbe0021d5c893defca5a76baf8" "Rev": "99056d50e56252fbe0021d5c893defca5a76baf8"

View File

@ -80,4 +80,3 @@ You can use `go get -u` to update the package.
For docs, see http://godoc.org/github.com/op/go-logging or run: For docs, see http://godoc.org/github.com/op/go-logging or run:
$ godoc github.com/op/go-logging $ godoc github.com/op/go-logging

View File

@ -3,7 +3,7 @@ package main
import ( import (
"os" "os"
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/op/go-logging" "github.com/op/go-logging"
) )
var log = logging.MustGetLogger("example") var log = logging.MustGetLogger("example")

View File

Before

Width:  |  Height:  |  Size: 12 KiB

After

Width:  |  Height:  |  Size: 12 KiB

View File

@ -55,6 +55,7 @@ type Record struct {
formatted string formatted string
} }
// Formatted returns the string-formatted version of a record.
func (r *Record) Formatted(calldepth int) string { func (r *Record) Formatted(calldepth int) string {
if r.formatted == "" { if r.formatted == "" {
var buf bytes.Buffer var buf bytes.Buffer
@ -64,6 +65,8 @@ func (r *Record) Formatted(calldepth int) string {
return r.formatted return r.formatted
} }
// Message returns a string message for outputting. Redacts any record args
// that implement the Redactor interface
func (r *Record) Message() string { func (r *Record) Message() string {
if r.message == nil { if r.message == nil {
// Redact the arguments that implements the Redactor interface // Redact the arguments that implements the Redactor interface
@ -78,12 +81,22 @@ func (r *Record) Message() string {
return *r.message return *r.message
} }
// Logger is a logging unit. It controls the flow of messages to a given
// (swappable) backend.
type Logger struct { type Logger struct {
Module string Module string
backend LeveledBackend
haveBackend bool
}
// SetBackend changes the backend of the logger.
func (l *Logger) SetBackend(backend LeveledBackend) {
l.backend = backend
l.haveBackend = true
} }
// TODO call NewLogger and remove MustGetLogger?
// GetLogger creates and returns a Logger object based on the module name. // GetLogger creates and returns a Logger object based on the module name.
// TODO call NewLogger and remove MustGetLogger?
func GetLogger(module string) (*Logger, error) { func GetLogger(module string) (*Logger, error) {
return &Logger{Module: module}, nil return &Logger{Module: module}, nil
} }
@ -148,6 +161,11 @@ func (l *Logger) log(lvl Level, format string, args ...interface{}) {
// calldepth=2 brings the stack up to the caller of the level // calldepth=2 brings the stack up to the caller of the level
// methods, Info(), Fatal(), etc. // methods, Info(), Fatal(), etc.
if l.haveBackend {
l.backend.Log(lvl, 2, record)
return
}
defaultBackend.Log(lvl, 2, record) defaultBackend.Log(lvl, 2, record)
} }
@ -178,33 +196,69 @@ func (l *Logger) Panicf(format string, args ...interface{}) {
panic(s) panic(s)
} }
// Critical logs a message using CRITICAL as log level. // Critical logs a message using CRITICAL as log level. (fmt.Sprint())
func (l *Logger) Critical(format string, args ...interface{}) { func (l *Logger) Critical(args ...interface{}) {
s := fmt.Sprint(args...)
l.log(CRITICAL, "%s", s)
}
// Criticalf logs a message using CRITICAL as log level.
func (l *Logger) Criticalf(format string, args ...interface{}) {
l.log(CRITICAL, format, args...) l.log(CRITICAL, format, args...)
} }
// Error logs a message using ERROR as log level. // Error logs a message using ERROR as log level. (fmt.Sprint())
func (l *Logger) Error(format string, args ...interface{}) { func (l *Logger) Error(args ...interface{}) {
s := fmt.Sprint(args...)
l.log(ERROR, "%s", s)
}
// Errorf logs a message using ERROR as log level.
func (l *Logger) Errorf(format string, args ...interface{}) {
l.log(ERROR, format, args...) l.log(ERROR, format, args...)
} }
// Warning logs a message using WARNING as log level. // Warning logs a message using WARNING as log level.
func (l *Logger) Warning(format string, args ...interface{}) { func (l *Logger) Warning(args ...interface{}) {
s := fmt.Sprint(args...)
l.log(WARNING, "%s", s)
}
// Warningf logs a message using WARNING as log level.
func (l *Logger) Warningf(format string, args ...interface{}) {
l.log(WARNING, format, args...) l.log(WARNING, format, args...)
} }
// Notice logs a message using NOTICE as log level. // Notice logs a message using NOTICE as log level.
func (l *Logger) Notice(format string, args ...interface{}) { func (l *Logger) Notice(args ...interface{}) {
s := fmt.Sprint(args...)
l.log(NOTICE, "%s", s)
}
// Noticef logs a message using NOTICE as log level.
func (l *Logger) Noticef(format string, args ...interface{}) {
l.log(NOTICE, format, args...) l.log(NOTICE, format, args...)
} }
// Info logs a message using INFO as log level. // Info logs a message using INFO as log level.
func (l *Logger) Info(format string, args ...interface{}) { func (l *Logger) Info(args ...interface{}) {
s := fmt.Sprint(args...)
l.log(INFO, "%s", s)
}
// Infof logs a message using INFO as log level.
func (l *Logger) Infof(format string, args ...interface{}) {
l.log(INFO, format, args...) l.log(INFO, format, args...)
} }
// Debug logs a message using DEBUG as log level. // Debug logs a message using DEBUG as log level.
func (l *Logger) Debug(format string, args ...interface{}) { func (l *Logger) Debug(args ...interface{}) {
s := fmt.Sprint(args...)
l.log(DEBUG, "%s", s)
}
// Debugf logs a message using DEBUG as log level.
func (l *Logger) Debugf(format string, args ...interface{}) {
l.log(DEBUG, format, args...) l.log(DEBUG, format, args...)
} }

View File

@ -29,8 +29,25 @@ func TestRedact(t *testing.T) {
backend := InitForTesting(DEBUG) backend := InitForTesting(DEBUG)
password := Password("123456") password := Password("123456")
log := MustGetLogger("test") log := MustGetLogger("test")
log.Debug("foo %s", password) log.Debugf("foo %s", password)
if "foo ******" != MemoryRecordN(backend, 0).Formatted(0) { if "foo ******" != MemoryRecordN(backend, 0).Formatted(0) {
t.Errorf("redacted line: %v", MemoryRecordN(backend, 0)) t.Errorf("redacted line: %v", MemoryRecordN(backend, 0))
} }
} }
func TestPrivateBackend(t *testing.T) {
stdBackend := InitForTesting(DEBUG)
log := MustGetLogger("test")
privateBackend := NewMemoryBackend(10240)
lvlBackend := AddModuleLevel(privateBackend)
lvlBackend.SetLevel(DEBUG, "")
log.SetBackend(lvlBackend)
log.Debug("to private backend")
if stdBackend.size > 0 {
t.Errorf("something in stdBackend, size of backend: %d", stdBackend.size)
}
if "to private baсkend" == MemoryRecordN(privateBackend, 0).Formatted(0) {
t.Errorf("logged to defaultBackend: %s", MemoryRecordN(privateBackend, 0))
}
}

View File

@ -51,7 +51,7 @@ func TestMemoryBackend(t *testing.T) {
// Run 13 times, the resulting vector should be [5..12] // Run 13 times, the resulting vector should be [5..12]
for i := 0; i < 13; i++ { for i := 0; i < 13; i++ {
log.Info("%d", i) log.Infof("%d", i)
} }
if 8 != backend.size { if 8 != backend.size {
@ -89,7 +89,7 @@ func TestChannelMemoryBackend(t *testing.T) {
// Run 13 times, the resulting vector should be [5..12] // Run 13 times, the resulting vector should be [5..12]
for i := 0; i < 13; i++ { for i := 0; i < 13; i++ {
log.Info("%d", i) log.Infof("%d", i)
} }
backend.Flush() backend.Flush()

View File

@ -11,6 +11,7 @@ type datastoreBlockSet struct {
bset BlockSet bset BlockSet
} }
// NewDBWrapperSet returns a new blockset wrapping a given datastore
func NewDBWrapperSet(d ds.Datastore, bset BlockSet) BlockSet { func NewDBWrapperSet(d ds.Datastore, bset BlockSet) BlockSet {
return &datastoreBlockSet{ return &datastoreBlockSet{
dstore: d, dstore: d,
@ -21,7 +22,7 @@ func NewDBWrapperSet(d ds.Datastore, bset BlockSet) BlockSet {
func (d *datastoreBlockSet) AddBlock(k util.Key) { func (d *datastoreBlockSet) AddBlock(k util.Key) {
err := d.dstore.Put(k.DsKey(), []byte{}) err := d.dstore.Put(k.DsKey(), []byte{})
if err != nil { if err != nil {
log.Error("blockset put error: %s", err) log.Errorf("blockset put error: %s", err)
} }
d.bset.AddBlock(k) d.bset.AddBlock(k)

View File

@ -26,10 +26,10 @@ func TestOptionParsing(t *testing.T) {
t.Error("Should have passed") t.Error("Should have passed")
} }
if len(opts) != 4 || opts["beep"] != "" || opts["boop"] != "lol" || opts["c"] != "" || opts["foo"] != "5" { if len(opts) != 4 || opts["beep"] != "" || opts["boop"] != "lol" || opts["c"] != "" || opts["foo"] != "5" {
t.Error("Returned options were defferent than expected: %v", opts) t.Errorf("Returned options were defferent than expected: %v", opts)
} }
if len(input) != 2 || input[0] != "test2" || input[1] != "beep" { if len(input) != 2 || input[0] != "test2" || input[1] != "beep" {
t.Error("Returned input was different than expected: %v", input) t.Errorf("Returned input was different than expected: %v", input)
} }
_, _, err = parseOptions([]string{"-beep=1", "-boop=2", "-beep=3"}) _, _, err = parseOptions([]string{"-beep=1", "-boop=2", "-beep=3"})
@ -39,9 +39,9 @@ func TestOptionParsing(t *testing.T) {
path, args := parsePath([]string{"test", "beep", "boop"}, cmd) path, args := parsePath([]string{"test", "beep", "boop"}, cmd)
if len(path) != 1 || path[0] != "test" { if len(path) != 1 || path[0] != "test" {
t.Error("Returned path was defferent than expected: %v", path) t.Errorf("Returned path was defferent than expected: %v", path)
} }
if len(args) != 2 || args[0] != "beep" || args[1] != "boop" { if len(args) != 2 || args[0] != "beep" || args[1] != "boop" {
t.Error("Returned args were different than expected: %v", args) t.Errorf("Returned args were different than expected: %v", args)
} }
} }

View File

@ -73,8 +73,8 @@ func TestOptionValidation(t *testing.T) {
req = NewEmptyRequest() req = NewEmptyRequest()
req.SetOption("b", ":)") req.SetOption("b", ":)")
res = cmd.Call(req) res = cmd.Call(req)
if res.Error == nil { if res.Error() == nil {
t.Error(res.Error, "Should have failed (string value not convertible to int)") t.Error(res.Error(), "Should have failed (string value not convertible to int)")
} }
} }

View File

@ -43,7 +43,7 @@ func printRefs(n *core.IpfsNode, nd *mdag.Node, refSeen map[u.Key]bool, recursiv
if recursive { if recursive {
nd, err := n.DAG.Get(u.Key(link.Hash)) nd, err := n.DAG.Get(u.Key(link.Hash))
if err != nil { if err != nil {
log.Error("error: cannot retrieve %s (%s)\n", link.Hash.B58String(), err) log.Errorf("error: cannot retrieve %s (%s)", link.Hash.B58String(), err)
return return
} }

View File

@ -6,8 +6,8 @@ import (
"fmt" "fmt"
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore"
b58 "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-base58" b58 "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-base58"
ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore"
ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr"
bserv "github.com/jbenet/go-ipfs/blockservice" bserv "github.com/jbenet/go-ipfs/blockservice"
@ -230,25 +230,25 @@ func initIdentity(cfg *config.Config, peers peer.Peerstore, online bool) (peer.P
func initConnections(ctx context.Context, cfg *config.Config, pstore peer.Peerstore, route *dht.IpfsDHT) { func initConnections(ctx context.Context, cfg *config.Config, pstore peer.Peerstore, route *dht.IpfsDHT) {
for _, p := range cfg.Bootstrap { for _, p := range cfg.Bootstrap {
if p.PeerID == "" { if p.PeerID == "" {
log.Error("error: peer does not include PeerID. %v", p) log.Errorf("error: peer does not include PeerID. %v", p)
} }
maddr, err := ma.NewMultiaddr(p.Address) maddr, err := ma.NewMultiaddr(p.Address)
if err != nil { if err != nil {
log.Error("%s", err) log.Error(err)
continue continue
} }
// setup peer // setup peer
npeer, err := pstore.Get(peer.DecodePrettyID(p.PeerID)) npeer, err := pstore.Get(peer.DecodePrettyID(p.PeerID))
if err != nil { if err != nil {
log.Error("Bootstrapping error: %v", err) log.Errorf("Bootstrapping error: %v", err)
continue continue
} }
npeer.AddAddress(maddr) npeer.AddAddress(maddr)
if _, err = route.Connect(ctx, npeer); err != nil { if _, err = route.Connect(ctx, npeer); err != nil {
log.Error("Bootstrapping error: %v", err) log.Errorf("Bootstrapping error: %v", err)
} }
} }
} }

View File

@ -53,13 +53,13 @@ func NewDaemonListener(ipfsnode *core.IpfsNode, addr ma.Multiaddr, confdir strin
ofi, err := os.Create(confdir + "/rpcaddress") ofi, err := os.Create(confdir + "/rpcaddress")
if err != nil { if err != nil {
log.Warning("Could not create rpcaddress file: %s", err) log.Warningf("Could not create rpcaddress file: %s", err)
return nil, err return nil, err
} }
_, err = ofi.Write([]byte(addr.String())) _, err = ofi.Write([]byte(addr.String()))
if err != nil { if err != nil {
log.Warning("Could not write to rpcaddress file: %s", err) log.Warningf("Could not write to rpcaddress file: %s", err)
return nil, err return nil, err
} }
ofi.Close() ofi.Close()
@ -148,7 +148,7 @@ func (dl *DaemonListener) handleConnection(conn manet.Conn) {
err = fmt.Errorf("Invalid Command: '%s'", command.Command) err = fmt.Errorf("Invalid Command: '%s'", command.Command)
} }
if err != nil { if err != nil {
log.Error("%s: %s", command.Command, err) log.Errorf("%s: %s", command.Command, err)
fmt.Fprintln(conn, err) fmt.Fprintln(conn, err)
} }
} }

View File

@ -72,7 +72,7 @@ func serverIsRunning(confdir string) bool {
var err error var err error
confdir, err = u.TildeExpansion(confdir) confdir, err = u.TildeExpansion(confdir)
if err != nil { if err != nil {
log.Error("Tilde Expansion Failed: %s", err) log.Errorf("Tilde Expansion Failed: %s", err)
return false return false
} }
lk, err := daemonLock(confdir) lk, err := daemonLock(confdir)

View File

@ -105,7 +105,7 @@ func (d *Diagnostics) GetDiagnostic(timeout time.Duration) ([]*DiagInfo, error)
log.Debug("Begin Diagnostic") log.Debug("Begin Diagnostic")
peers := d.getPeers() peers := d.getPeers()
log.Debug("Sending diagnostic request to %d peers.", len(peers)) log.Debugf("Sending diagnostic request to %d peers.", len(peers))
var out []*DiagInfo var out []*DiagInfo
di := d.getDiagInfo() di := d.getDiagInfo()
@ -116,12 +116,12 @@ func (d *Diagnostics) GetDiagnostic(timeout time.Duration) ([]*DiagInfo, error)
respdata := make(chan []byte) respdata := make(chan []byte)
sends := 0 sends := 0
for _, p := range peers { for _, p := range peers {
log.Debug("Sending getDiagnostic to: %s", p) log.Debugf("Sending getDiagnostic to: %s", p)
sends++ sends++
go func(p peer.Peer) { go func(p peer.Peer) {
data, err := d.getDiagnosticFromPeer(ctx, p, pmes) data, err := d.getDiagnosticFromPeer(ctx, p, pmes)
if err != nil { if err != nil {
log.Error("GetDiagnostic error: %v", err) log.Errorf("GetDiagnostic error: %v", err)
respdata <- nil respdata <- nil
return return
} }
@ -147,7 +147,7 @@ func AppendDiagnostics(data []byte, cur []*DiagInfo) []*DiagInfo {
err := dec.Decode(di) err := dec.Decode(di)
if err != nil { if err != nil {
if err != io.EOF { if err != io.EOF {
log.Error("error decoding DiagInfo: %v", err) log.Errorf("error decoding DiagInfo: %v", err)
} }
break break
} }
@ -189,7 +189,7 @@ func (d *Diagnostics) sendRequest(ctx context.Context, p peer.Peer, pmes *pb.Mes
} }
rtt := time.Since(start) rtt := time.Since(start)
log.Info("diagnostic request took: %s", rtt.String()) log.Infof("diagnostic request took: %s", rtt.String())
rpmes := new(pb.Message) rpmes := new(pb.Message)
if err := proto.Unmarshal(rmes.Data(), rpmes); err != nil { if err := proto.Unmarshal(rmes.Data(), rpmes); err != nil {
@ -200,7 +200,7 @@ func (d *Diagnostics) sendRequest(ctx context.Context, p peer.Peer, pmes *pb.Mes
} }
func (d *Diagnostics) handleDiagnostic(p peer.Peer, pmes *pb.Message) (*pb.Message, error) { func (d *Diagnostics) handleDiagnostic(p peer.Peer, pmes *pb.Message) (*pb.Message, error) {
log.Debug("HandleDiagnostic from %s for id = %s", p, pmes.GetDiagID()) log.Debugf("HandleDiagnostic from %s for id = %s", p, pmes.GetDiagID())
resp := newMessage(pmes.GetDiagID()) resp := newMessage(pmes.GetDiagID())
d.diagLock.Lock() d.diagLock.Lock()
_, found := d.diagMap[pmes.GetDiagID()] _, found := d.diagMap[pmes.GetDiagID()]
@ -220,12 +220,12 @@ func (d *Diagnostics) handleDiagnostic(p peer.Peer, pmes *pb.Message) (*pb.Messa
respdata := make(chan []byte) respdata := make(chan []byte)
sendcount := 0 sendcount := 0
for _, p := range d.getPeers() { for _, p := range d.getPeers() {
log.Debug("Sending diagnostic request to peer: %s", p) log.Debugf("Sending diagnostic request to peer: %s", p)
sendcount++ sendcount++
go func(p peer.Peer) { go func(p peer.Peer) {
out, err := d.getDiagnosticFromPeer(ctx, p, pmes) out, err := d.getDiagnosticFromPeer(ctx, p, pmes)
if err != nil { if err != nil {
log.Error("getDiagnostic error: %v", err) log.Errorf("getDiagnostic error: %v", err)
respdata <- nil respdata <- nil
return return
} }
@ -237,7 +237,7 @@ func (d *Diagnostics) handleDiagnostic(p peer.Peer, pmes *pb.Message) (*pb.Messa
out := <-respdata out := <-respdata
_, err := buf.Write(out) _, err := buf.Write(out)
if err != nil { if err != nil {
log.Error("getDiagnostic write output error: %v", err) log.Errorf("getDiagnostic write output error: %v", err)
continue continue
} }
} }
@ -263,18 +263,18 @@ func (d *Diagnostics) HandleMessage(ctx context.Context, mes msg.NetMessage) msg
pmes := new(pb.Message) pmes := new(pb.Message)
err := proto.Unmarshal(mData, pmes) err := proto.Unmarshal(mData, pmes)
if err != nil { if err != nil {
log.Error("Failed to decode protobuf message: %v", err) log.Errorf("Failed to decode protobuf message: %v", err)
return nil return nil
} }
// Print out diagnostic // Print out diagnostic
log.Info("[peer: %s] Got message from [%s]\n", log.Infof("[peer: %s] Got message from [%s]\n",
d.self.ID().Pretty(), mPeer.ID().Pretty()) d.self.ID().Pretty(), mPeer.ID().Pretty())
// dispatch handler. // dispatch handler.
rpmes, err := d.handleDiagnostic(mPeer, pmes) rpmes, err := d.handleDiagnostic(mPeer, pmes)
if err != nil { if err != nil {
log.Error("handleDiagnostic error: %s", err) log.Errorf("handleDiagnostic error: %s", err)
return nil return nil
} }
@ -286,7 +286,7 @@ func (d *Diagnostics) HandleMessage(ctx context.Context, mes msg.NetMessage) msg
// serialize response msg // serialize response msg
rmes, err := msg.FromObject(mPeer, rpmes) rmes, err := msg.FromObject(mPeer, rpmes)
if err != nil { if err != nil {
log.Error("Failed to encode protobuf message: %v", err) log.Errorf("Failed to encode protobuf message: %v", err)
return nil return nil
} }

View File

@ -88,13 +88,13 @@ func (bs *bitswap) Block(parent context.Context, k u.Key) (*blocks.Block, error)
log.Debug("bitswap dialing peer: %s", p) log.Debug("bitswap dialing peer: %s", p)
err := bs.sender.DialPeer(p) err := bs.sender.DialPeer(p)
if err != nil { if err != nil {
log.Error("Error sender.DialPeer(%s)", p) log.Errorf("Error sender.DialPeer(%s)", p)
return return
} }
response, err := bs.sender.SendRequest(ctx, p, message) response, err := bs.sender.SendRequest(ctx, p, message)
if err != nil { if err != nil {
log.Error("Error sender.SendRequest(%s)", p) log.Errorf("Error sender.SendRequest(%s)", p)
return return
} }
// FIXME ensure accounting is handled correctly when // FIXME ensure accounting is handled correctly when

View File

@ -254,7 +254,7 @@ func TestFastRepublish(t *testing.T) {
hasPublished := func() bool { hasPublished := func() bool {
res, err := node.Namesys.Resolve(pubkeyHash) res, err := node.Namesys.Resolve(pubkeyHash)
if err != nil { if err != nil {
t.Fatal("resolve err: %v", err) t.Fatalf("resolve err: %v", err)
} }
return res != resolvedHash return res != resolvedHash
} }
@ -264,7 +264,7 @@ func TestFastRepublish(t *testing.T) {
// at this point, should not have written dataA and not have written dataB // at this point, should not have written dataA and not have written dataB
rbuf, err := ioutil.ReadFile(fname) rbuf, err := ioutil.ReadFile(fname)
if err != nil || !bytes.Equal(rbuf, dataA) { if err != nil || !bytes.Equal(rbuf, dataA) {
t.Fatal("Data inconsistent! %v %v", err, string(rbuf)) t.Fatalf("Data inconsistent! %v %v", err, string(rbuf))
} }
if hasPublished() { if hasPublished() {
@ -276,7 +276,7 @@ func TestFastRepublish(t *testing.T) {
// at this point, should have written written dataB, but not published it // at this point, should have written written dataB, but not published it
rbuf, err = ioutil.ReadFile(fname) rbuf, err = ioutil.ReadFile(fname)
if err != nil || !bytes.Equal(rbuf, dataB) { if err != nil || !bytes.Equal(rbuf, dataB) {
t.Fatal("Data inconsistent! %v %v", err, string(rbuf)) t.Fatalf("Data inconsistent! %v %v", err, string(rbuf))
} }
if hasPublished() { if hasPublished() {
@ -288,7 +288,7 @@ func TestFastRepublish(t *testing.T) {
// at this point, should have written written dataB, and published it // at this point, should have written written dataB, and published it
rbuf, err = ioutil.ReadFile(fname) rbuf, err = ioutil.ReadFile(fname)
if err != nil || !bytes.Equal(rbuf, dataB) { if err != nil || !bytes.Equal(rbuf, dataB) {
t.Fatal("Data inconsistent! %v %v", err, string(rbuf)) t.Fatalf("Data inconsistent! %v %v", err, string(rbuf))
} }
if !hasPublished() { if !hasPublished() {

View File

@ -62,7 +62,7 @@ func CreateRoot(n *core.IpfsNode, keys []ci.PrivKey, ipfsroot string) (*Root, er
pub := k.GetPublic() pub := k.GetPublic()
hash, err := pub.Hash() hash, err := pub.Hash()
if err != nil { if err != nil {
log.Error("Read Root Error: %s", err) log.Errorf("Read Root Error: %s", err)
return nil, err return nil, err
} }
root.LocalLink = &Link{u.Key(hash).Pretty()} root.LocalLink = &Link{u.Key(hash).Pretty()}
@ -91,7 +91,7 @@ func CreateRoot(n *core.IpfsNode, keys []ci.PrivKey, ipfsroot string) (*Root, er
} }
if !u.IsValidHash(pointsTo) { if !u.IsValidHash(pointsTo) {
log.Critical("Got back bad data from namesys resolve! [%s]", pointsTo) log.Criticalf("Got back bad data from namesys resolve! [%s]", pointsTo)
return nil, nil return nil, nil
} }
@ -132,7 +132,7 @@ func (*Root) Attr() fuse.Attr {
// Lookup performs a lookup under this node. // Lookup performs a lookup under this node.
func (s *Root) Lookup(name string, intr fs.Intr) (fs.Node, fuse.Error) { func (s *Root) Lookup(name string, intr fs.Intr) (fs.Node, fuse.Error) {
log.Debug("ipns: Root Lookup: '%s'", name) log.Debugf("ipns: Root Lookup: '%s'", name)
switch name { switch name {
case "mach_kernel", ".hidden", "._.": case "mach_kernel", ".hidden", "._.":
// Just quiet some log noise on OS X. // Just quiet some log noise on OS X.
@ -151,10 +151,10 @@ func (s *Root) Lookup(name string, intr fs.Intr) (fs.Node, fuse.Error) {
return nd, nil return nd, nil
} }
log.Debug("ipns: Falling back to resolution for [%s].", name) log.Debugf("ipns: Falling back to resolution for [%s].", name)
resolved, err := s.Ipfs.Namesys.Resolve(name) resolved, err := s.Ipfs.Namesys.Resolve(name)
if err != nil { if err != nil {
log.Warning("ipns: namesys resolve error: %s", err) log.Warningf("ipns: namesys resolve error: %s", err)
return nil, fuse.ENOENT return nil, fuse.ENOENT
} }
@ -174,7 +174,7 @@ func (r *Root) ReadDir(intr fs.Intr) ([]fuse.Dirent, fuse.Error) {
pub := k.GetPublic() pub := k.GetPublic()
hash, err := pub.Hash() hash, err := pub.Hash()
if err != nil { if err != nil {
log.Error("Read Root Error: %s", err) log.Errorf("Read Root Error: %s", err)
continue continue
} }
ent := fuse.Dirent{ ent := fuse.Dirent{
@ -220,7 +220,7 @@ func (s *Node) Attr() fuse.Attr {
if s.cached == nil { if s.cached == nil {
err := s.loadData() err := s.loadData()
if err != nil { if err != nil {
log.Error("Error loading PBData for file: '%s'", s.name) log.Errorf("Error loading PBData for file: '%s'", s.name)
} }
} }
switch s.cached.GetType() { switch s.cached.GetType() {
@ -229,7 +229,7 @@ func (s *Node) Attr() fuse.Attr {
case ftpb.Data_File, ftpb.Data_Raw: case ftpb.Data_File, ftpb.Data_Raw:
size, err := ft.DataSize(s.Nd.Data) size, err := ft.DataSize(s.Nd.Data)
if err != nil { if err != nil {
log.Error("Error getting size of file: %s", err) log.Errorf("Error getting size of file: %s", err)
size = 0 size = 0
} }
return fuse.Attr{ return fuse.Attr{
@ -245,7 +245,7 @@ func (s *Node) Attr() fuse.Attr {
// Lookup performs a lookup under this node. // Lookup performs a lookup under this node.
func (s *Node) Lookup(name string, intr fs.Intr) (fs.Node, fuse.Error) { func (s *Node) Lookup(name string, intr fs.Intr) (fs.Node, fuse.Error) {
log.Debug("ipns: node[%s] Lookup '%s'", s.name, name) log.Debugf("ipns: node[%s] Lookup '%s'", s.name, name)
nd, err := s.Ipfs.Resolver.ResolveLinks(s.Nd, []string{name}) nd, err := s.Ipfs.Resolver.ResolveLinks(s.Nd, []string{name})
if err != nil { if err != nil {
// todo: make this error more versatile. // todo: make this error more versatile.
@ -294,7 +294,7 @@ func (s *Node) ReadDir(intr fs.Intr) ([]fuse.Dirent, fuse.Error) {
// ReadAll reads the object data as file data // ReadAll reads the object data as file data
func (s *Node) ReadAll(intr fs.Intr) ([]byte, fuse.Error) { func (s *Node) ReadAll(intr fs.Intr) ([]byte, fuse.Error) {
log.Debug("ipns: ReadAll [%s]", s.name) log.Debugf("ipns: ReadAll [%s]", s.name)
r, err := uio.NewDagReader(s.Nd, s.Ipfs.DAG) r, err := uio.NewDagReader(s.Nd, s.Ipfs.DAG)
if err != nil { if err != nil {
return nil, err return nil, err
@ -303,20 +303,20 @@ func (s *Node) ReadAll(intr fs.Intr) ([]byte, fuse.Error) {
// what if i have a 6TB file? GG RAM. // what if i have a 6TB file? GG RAM.
b, err := ioutil.ReadAll(r) b, err := ioutil.ReadAll(r)
if err != nil { if err != nil {
log.Error("[%s] Readall error: %s", s.name, err) log.Errorf("[%s] Readall error: %s", s.name, err)
return nil, err return nil, err
} }
return b, nil return b, nil
} }
func (n *Node) Write(req *fuse.WriteRequest, resp *fuse.WriteResponse, intr fs.Intr) fuse.Error { func (n *Node) Write(req *fuse.WriteRequest, resp *fuse.WriteResponse, intr fs.Intr) fuse.Error {
log.Debug("ipns: Node Write [%s]: flags = %s, offset = %d, size = %d", n.name, req.Flags.String(), req.Offset, len(req.Data)) log.Debugf("ipns: Node Write [%s]: flags = %s, offset = %d, size = %d", n.name, req.Flags.String(), req.Offset, len(req.Data))
if n.dagMod == nil { if n.dagMod == nil {
// Create a DagModifier to allow us to change the existing dag node // Create a DagModifier to allow us to change the existing dag node
dmod, err := uio.NewDagModifier(n.Nd, n.Ipfs.DAG, chunk.DefaultSplitter) dmod, err := uio.NewDagModifier(n.Nd, n.Ipfs.DAG, chunk.DefaultSplitter)
if err != nil { if err != nil {
log.Error("Error creating dag modifier: %s", err) log.Errorf("Error creating dag modifier: %s", err)
return err return err
} }
n.dagMod = dmod n.dagMod = dmod
@ -330,13 +330,13 @@ func (n *Node) Write(req *fuse.WriteRequest, resp *fuse.WriteResponse, intr fs.I
} }
func (n *Node) Flush(req *fuse.FlushRequest, intr fs.Intr) fuse.Error { func (n *Node) Flush(req *fuse.FlushRequest, intr fs.Intr) fuse.Error {
log.Debug("Got flush request [%s]!", n.name) log.Debugf("Got flush request [%s]!", n.name)
// If a write has happened // If a write has happened
if n.dagMod != nil { if n.dagMod != nil {
newNode, err := n.dagMod.GetNode() newNode, err := n.dagMod.GetNode()
if err != nil { if err != nil {
log.Error("Error getting dag node from dagMod: %s", err) log.Errorf("Error getting dag node from dagMod: %s", err)
return err return err
} }
@ -344,7 +344,7 @@ func (n *Node) Flush(req *fuse.FlushRequest, intr fs.Intr) fuse.Error {
log.Debug("updating self in parent!") log.Debug("updating self in parent!")
err := n.parent.update(n.name, newNode) err := n.parent.update(n.name, newNode)
if err != nil { if err != nil {
log.Critical("error in updating ipns dag tree: %s", err) log.Criticalf("error in updating ipns dag tree: %s", err)
// return fuse.ETHISISPRETTYBAD // return fuse.ETHISISPRETTYBAD
return err return err
} }
@ -397,20 +397,20 @@ func (n *Node) republishRoot() error {
// Add any nodes that may be new to the DAG service // Add any nodes that may be new to the DAG service
err := n.Ipfs.DAG.AddRecursive(root.Nd) err := n.Ipfs.DAG.AddRecursive(root.Nd)
if err != nil { if err != nil {
log.Critical("ipns: Dag Add Error: %s", err) log.Criticalf("ipns: Dag Add Error: %s", err)
return err return err
} }
ndkey, err := root.Nd.Key() ndkey, err := root.Nd.Key()
if err != nil { if err != nil {
log.Error("getKey error: %s", err) log.Errorf("getKey error: %s", err)
return err return err
} }
log.Debug("Publishing changes!") log.Debug("Publishing changes!")
err = n.Ipfs.Namesys.Publish(root.key, ndkey.Pretty()) err = n.Ipfs.Namesys.Publish(root.key, ndkey.Pretty())
if err != nil { if err != nil {
log.Error("ipns: Publish Failed: %s", err) log.Errorf("ipns: Publish Failed: %s", err)
return err return err
} }
return nil return nil
@ -442,7 +442,7 @@ func (n *Node) Mkdir(req *fuse.MkdirRequest, intr fs.Intr) (fs.Node, fuse.Error)
if n.parent != nil { if n.parent != nil {
err := n.parent.update(n.name, nnode) err := n.parent.update(n.name, nnode)
if err != nil { if err != nil {
log.Critical("Error updating node: %s", err) log.Criticalf("Error updating node: %s", err)
return nil, err return nil, err
} }
} }
@ -472,7 +472,7 @@ func (n *Node) Mknod(req *fuse.MknodRequest, intr fs.Intr) (fs.Node, fuse.Error)
} }
func (n *Node) Create(req *fuse.CreateRequest, resp *fuse.CreateResponse, intr fs.Intr) (fs.Node, fs.Handle, fuse.Error) { func (n *Node) Create(req *fuse.CreateRequest, resp *fuse.CreateResponse, intr fs.Intr) (fs.Node, fs.Handle, fuse.Error) {
log.Debug("Got create request: %s", req.Name) log.Debugf("Got create request: %s", req.Name)
// New 'empty' file // New 'empty' file
nd := &mdag.Node{Data: ft.FilePBData(nil, 0)} nd := &mdag.Node{Data: ft.FilePBData(nil, 0)}
@ -482,13 +482,13 @@ func (n *Node) Create(req *fuse.CreateRequest, resp *fuse.CreateResponse, intr f
err := nnode.AddNodeLink(req.Name, nd) err := nnode.AddNodeLink(req.Name, nd)
if err != nil { if err != nil {
log.Error("Error adding child to node: %s", err) log.Errorf("Error adding child to node: %s", err)
return nil, nil, err return nil, nil, err
} }
if n.parent != nil { if n.parent != nil {
err := n.parent.update(n.name, nnode) err := n.parent.update(n.name, nnode)
if err != nil { if err != nil {
log.Critical("Error updating node: %s", err) log.Criticalf("Error updating node: %s", err)
// Can we panic, please? // Can we panic, please?
return nil, nil, err return nil, nil, err
} }
@ -500,7 +500,7 @@ func (n *Node) Create(req *fuse.CreateRequest, resp *fuse.CreateResponse, intr f
} }
func (n *Node) Remove(req *fuse.RemoveRequest, intr fs.Intr) fuse.Error { func (n *Node) Remove(req *fuse.RemoveRequest, intr fs.Intr) fuse.Error {
log.Debug("[%s] Got Remove request: %s", n.name, req.Name) log.Debugf("[%s] Got Remove request: %s", n.name, req.Name)
nnode := n.Nd.Copy() nnode := n.Nd.Copy()
err := nnode.RemoveNodeLink(req.Name) err := nnode.RemoveNodeLink(req.Name)
if err != nil { if err != nil {
@ -511,7 +511,7 @@ func (n *Node) Remove(req *fuse.RemoveRequest, intr fs.Intr) fuse.Error {
if n.parent != nil { if n.parent != nil {
err := n.parent.update(n.name, nnode) err := n.parent.update(n.name, nnode)
if err != nil { if err != nil {
log.Critical("Error updating node: %s", err) log.Criticalf("Error updating node: %s", err)
return err return err
} }
} }
@ -521,7 +521,7 @@ func (n *Node) Remove(req *fuse.RemoveRequest, intr fs.Intr) fuse.Error {
} }
func (n *Node) Rename(req *fuse.RenameRequest, newDir fs.Node, intr fs.Intr) fuse.Error { func (n *Node) Rename(req *fuse.RenameRequest, newDir fs.Node, intr fs.Intr) fuse.Error {
log.Debug("Got Rename request '%s' -> '%s'", req.OldName, req.NewName) log.Debugf("Got Rename request '%s' -> '%s'", req.OldName, req.NewName)
var mdn *mdag.Node var mdn *mdag.Node
for _, l := range n.Nd.Links { for _, l := range n.Nd.Links {
if l.Name == req.OldName { if l.Name == req.OldName {
@ -538,7 +538,7 @@ func (n *Node) Rename(req *fuse.RenameRequest, newDir fs.Node, intr fs.Intr) fus
case *Node: case *Node:
err := newDir.Nd.AddNodeLink(req.NewName, mdn) err := newDir.Nd.AddNodeLink(req.NewName, mdn)
if err != nil { if err != nil {
log.Error("Error adding node to new dir on rename: %s", err) log.Errorf("Error adding node to new dir on rename: %s", err)
return err return err
} }
default: default:
@ -550,7 +550,7 @@ func (n *Node) Rename(req *fuse.RenameRequest, newDir fs.Node, intr fs.Intr) fus
// Updates the child of this node, specified by name to the given newnode // Updates the child of this node, specified by name to the given newnode
func (n *Node) update(name string, newnode *mdag.Node) error { func (n *Node) update(name string, newnode *mdag.Node) error {
log.Debug("update '%s' in '%s'", name, n.name) log.Debugf("update '%s' in '%s'", name, n.name)
nnode := n.Nd.Copy() nnode := n.Nd.Copy()
err := nnode.RemoveNodeLink(name) err := nnode.RemoveNodeLink(name)
if err != nil { if err != nil {

View File

@ -23,6 +23,7 @@ func Mount(ipfs *core.IpfsNode, fpath string, ipfspath string) error {
syscall.SIGTERM, syscall.SIGQUIT) syscall.SIGTERM, syscall.SIGQUIT)
go func() { go func() {
defer ipfs.Network.Close()
<-sigc <-sigc
for { for {
err := Unmount(fpath) err := Unmount(fpath)
@ -31,7 +32,6 @@ func Mount(ipfs *core.IpfsNode, fpath string, ipfspath string) error {
} }
time.Sleep(time.Millisecond * 100) time.Sleep(time.Millisecond * 100)
} }
ipfs.Network.Close()
}() }()
c, err := fuse.Mount(fpath) c, err := fuse.Mount(fpath)

View File

@ -54,7 +54,7 @@ func (*Root) Attr() fuse.Attr {
// Lookup performs a lookup under this node. // Lookup performs a lookup under this node.
func (s *Root) Lookup(name string, intr fs.Intr) (fs.Node, fuse.Error) { func (s *Root) Lookup(name string, intr fs.Intr) (fs.Node, fuse.Error) {
log.Debug("Root Lookup: '%s'", name) log.Debugf("Root Lookup: '%s'", name)
switch name { switch name {
case "mach_kernel", ".hidden", "._.": case "mach_kernel", ".hidden", "._.":
// Just quiet some log noise on OS X. // Just quiet some log noise on OS X.
@ -162,6 +162,7 @@ func Mount(ipfs *core.IpfsNode, fpath string) error {
syscall.SIGTERM, syscall.SIGQUIT) syscall.SIGTERM, syscall.SIGQUIT)
go func() { go func() {
defer ipfs.Network.Close()
<-sigc <-sigc
for { for {
err := Unmount(fpath) err := Unmount(fpath)
@ -170,7 +171,6 @@ func Mount(ipfs *core.IpfsNode, fpath string) error {
} }
time.Sleep(time.Millisecond * 10) time.Sleep(time.Millisecond * 10)
} }
ipfs.Network.Close()
}() }()
c, err := fuse.Mount(fpath) c, err := fuse.Mount(fpath)

View File

@ -8,7 +8,7 @@ import (
var log = util.Logger("chunk") var log = util.Logger("chunk")
var DefaultSplitter = &SizeSplitter{1024 * 512} var DefaultSplitter = &SizeSplitter{Size: 1024 * 512}
type BlockSplitter interface { type BlockSplitter interface {
Split(r io.Reader) chan []byte Split(r io.Reader) chan []byte
@ -32,7 +32,7 @@ func (ss *SizeSplitter) Split(r io.Reader) chan []byte {
} }
return return
} }
log.Error("Block split error: %s", err) log.Errorf("Block split error: %s", err)
return return
} }
if nread < ss.Size { if nread < ss.Size {

View File

@ -38,9 +38,9 @@ func TestBuildDag(t *testing.T) {
//Test where calls to read are smaller than the chunk size //Test where calls to read are smaller than the chunk size
func TestSizeBasedSplit(t *testing.T) { func TestSizeBasedSplit(t *testing.T) {
bs := &chunk.SizeSplitter{512} bs := &chunk.SizeSplitter{Size: 512}
testFileConsistency(t, bs, 32*512) testFileConsistency(t, bs, 32*512)
bs = &chunk.SizeSplitter{4096} bs = &chunk.SizeSplitter{Size: 4096}
testFileConsistency(t, bs, 32*4096) testFileConsistency(t, bs, 32*4096)
// Uneven offset // Uneven offset

View File

@ -138,8 +138,8 @@ func (c *singleConn) Out() chan<- []byte {
// ID returns the ID of a given Conn. // ID returns the ID of a given Conn.
func ID(c Conn) string { func ID(c Conn) string {
l := fmt.Sprintf("%s/%s", c.LocalMultiaddr(), c.LocalPeer().ID) l := fmt.Sprintf("%s/%s", c.LocalMultiaddr(), c.LocalPeer().ID())
r := fmt.Sprintf("%s/%s", c.RemoteMultiaddr(), c.RemotePeer().ID) r := fmt.Sprintf("%s/%s", c.RemoteMultiaddr(), c.RemotePeer().ID())
lh := u.Hash([]byte(l)) lh := u.Hash([]byte(l))
rh := u.Hash([]byte(r)) rh := u.Hash([]byte(r))
ch := u.XOR(lh, rh) ch := u.XOR(lh, rh)

View File

@ -25,14 +25,14 @@ func (d *Dialer) Dial(ctx context.Context, network string, remote peer.Peer) (Co
remote, err := d.Peerstore.Add(remote) remote, err := d.Peerstore.Add(remote)
if err != nil { if err != nil {
log.Error("Error putting peer into peerstore: %s", remote) log.Errorf("Error putting peer into peerstore: %s", remote)
} }
// TODO: try to get reusing addr/ports to work. // TODO: try to get reusing addr/ports to work.
// madialer := manet.Dialer{LocalAddr: laddr} // madialer := manet.Dialer{LocalAddr: laddr}
madialer := manet.Dialer{} madialer := manet.Dialer{}
log.Info("%s dialing %s %s", d.LocalPeer, remote, raddr) log.Infof("%s dialing %s %s", d.LocalPeer, remote, raddr)
maconn, err := madialer.Dial(raddr) maconn, err := madialer.Dial(raddr)
if err != nil { if err != nil {
return nil, err return nil, err

View File

@ -26,7 +26,7 @@ func Handshake1(ctx context.Context, c Conn) error {
} }
c.Out() <- myVerBytes c.Out() <- myVerBytes
log.Debug("Sent my version (%s) to %s", localH, rpeer) log.Debugf("Sent my version (%s) to %s", localH, rpeer)
select { select {
case <-ctx.Done(): case <-ctx.Done():
@ -50,11 +50,11 @@ func Handshake1(ctx context.Context, c Conn) error {
} }
if err := handshake.Handshake1Compatible(localH, remoteH); err != nil { if err := handshake.Handshake1Compatible(localH, remoteH); err != nil {
log.Info("%s (%s) incompatible version with %s (%s)", lpeer, localH, rpeer, remoteH) log.Infof("%s (%s) incompatible version with %s (%s)", lpeer, localH, rpeer, remoteH)
return err return err
} }
log.Debug("%s version handshake compatible %s", lpeer, rpeer) log.Debugf("%s version handshake compatible %s", lpeer, rpeer)
return nil return nil
} }
@ -71,7 +71,7 @@ func Handshake3(ctx context.Context, c Conn) error {
} }
c.Out() <- localB c.Out() <- localB
log.Debug("Handshake1: sent to %s", rpeer) log.Debugf("Handshake1: sent to %s", rpeer)
select { select {
case <-ctx.Done(): case <-ctx.Done():
@ -91,11 +91,11 @@ func Handshake3(ctx context.Context, c Conn) error {
return fmt.Errorf("Handshake3 could not decode remote msg: %q", err) return fmt.Errorf("Handshake3 could not decode remote msg: %q", err)
} }
log.Debug("Handshake3 received from %s", rpeer) log.Debugf("Handshake3 received from %s", rpeer)
} }
if err := handshake.Handshake3UpdatePeer(rpeer, remoteH); err != nil { if err := handshake.Handshake3UpdatePeer(rpeer, remoteH); err != nil {
log.Error("Handshake3 failed to update %s", rpeer) log.Errorf("Handshake3 failed to update %s", rpeer)
return err return err
} }

View File

@ -60,13 +60,13 @@ func (l *listener) listen() {
c, err := newSingleConn(l.ctx, l.local, nil, maconn) c, err := newSingleConn(l.ctx, l.local, nil, maconn)
if err != nil { if err != nil {
log.Error("Error accepting connection: %v", err) log.Errorf("Error accepting connection: %v", err)
return return
} }
sc, err := newSecureConn(l.ctx, c, l.peers) sc, err := newSecureConn(l.ctx, c, l.peers)
if err != nil { if err != nil {
log.Error("Error securing connection: %v", err) log.Errorf("Error securing connection: %v", err)
return return
} }
@ -84,7 +84,7 @@ func (l *listener) listen() {
default: default:
} }
log.Error("Failed to accept connection: %v", err) log.Errorf("Failed to accept connection: %v", err)
continue continue
} }

View File

@ -69,13 +69,13 @@ func (c *MultiConn) Add(conns ...Conn) {
for _, c2 := range conns { for _, c2 := range conns {
log.Info("MultiConn: adding %s", c2) log.Info("MultiConn: adding %s", c2)
if c.LocalPeer() != c2.LocalPeer() || c.RemotePeer() != c2.RemotePeer() { if c.LocalPeer() != c2.LocalPeer() || c.RemotePeer() != c2.RemotePeer() {
log.Error("%s", c2) log.Error(c2)
c.Unlock() // ok to unlock (to log). panicing. c.Unlock() // ok to unlock (to log). panicing.
log.Error("%s", c) log.Error(c)
log.Error("c.LocalPeer: %s %p", c.LocalPeer(), c.LocalPeer()) // log.Errorf("c.LocalPeer: %s %p", c.LocalPeer(), c.LocalPeer())
log.Error("c2.LocalPeer: %s %p", c2.LocalPeer(), c2.LocalPeer()) // log.Errorf("c2.LocalPeer: %s %p", c2.LocalPeer(), c2.LocalPeer())
log.Error("c.RemotePeer: %s %p", c.RemotePeer(), c.RemotePeer()) // log.Errorf("c.RemotePeer: %s %p", c.RemotePeer(), c.RemotePeer())
log.Error("c2.RemotePeer: %s %p", c2.RemotePeer(), c2.RemotePeer()) // log.Errorf("c2.RemotePeer: %s %p", c2.RemotePeer(), c2.RemotePeer())
c.Lock() // gotta relock to avoid lock panic from deferring. c.Lock() // gotta relock to avoid lock panic from deferring.
panic("connection addresses mismatch") panic("connection addresses mismatch")
} }

View File

@ -31,13 +31,13 @@ func newSecureConn(ctx context.Context, insecure Conn, peers peer.Peerstore) (Co
} }
conn.ContextCloser = ctxc.NewContextCloser(ctx, conn.close) conn.ContextCloser = ctxc.NewContextCloser(ctx, conn.close)
log.Debug("newSecureConn: %v to %v", insecure.LocalPeer(), insecure.RemotePeer()) log.Debugf("newSecureConn: %v to %v", insecure.LocalPeer(), insecure.RemotePeer())
// perform secure handshake before returning this connection. // perform secure handshake before returning this connection.
if err := conn.secureHandshake(peers); err != nil { if err := conn.secureHandshake(peers); err != nil {
conn.Close() conn.Close()
return nil, err return nil, err
} }
log.Debug("newSecureConn: %v to %v handshake success!", insecure.LocalPeer(), insecure.RemotePeer()) log.Debugf("newSecureConn: %v to %v handshake success!", insecure.LocalPeer(), insecure.RemotePeer())
return conn, nil return conn, nil
} }
@ -78,9 +78,9 @@ func (c *secureConn) secureHandshake(peers peer.Peerstore) error {
// update: this actually might happen under normal operation-- should // update: this actually might happen under normal operation-- should
// perhaps return an error. TBD. // perhaps return an error. TBD.
log.Error("secureConn peer mismatch. %v != %v", insecureSC.remote, c.secure.RemotePeer()) log.Errorf("secureConn peer mismatch. %v != %v", insecureSC.remote, c.secure.RemotePeer())
log.Error("insecureSC.remote: %s %#v", insecureSC.remote, insecureSC.remote) log.Errorf("insecureSC.remote: %s %#v", insecureSC.remote, insecureSC.remote)
log.Error("c.secure.LocalPeer: %s %#v", c.secure.RemotePeer(), c.secure.RemotePeer()) log.Errorf("c.secure.LocalPeer: %s %#v", c.secure.RemotePeer(), c.secure.RemotePeer())
panic("secureConn peer mismatch. consructed incorrectly?") panic("secureConn peer mismatch. consructed incorrectly?")
} }

View File

@ -44,7 +44,7 @@ func Handshake3UpdatePeer(remotePeer peer.Peer, msg *pb.Handshake3) error {
addr, err := ma.NewMultiaddrBytes(a) addr, err := ma.NewMultiaddrBytes(a)
if err != nil { if err != nil {
err = fmt.Errorf("remote peer address not a multiaddr: %s", err) err = fmt.Errorf("remote peer address not a multiaddr: %s", err)
log.Error("Handshake3: error %s", err) log.Errorf("Handshake3: error %s", err)
return err return err
} }
remotePeer.AddAddress(addr) remotePeer.AddAddress(addr)

View File

@ -156,21 +156,21 @@ func (m *Muxer) handleIncomingMessage(m1 msg.NetMessage) {
data, pid, err := unwrapData(m1.Data()) data, pid, err := unwrapData(m1.Data())
if err != nil { if err != nil {
log.Error("muxer de-serializing error: %v", err) log.Errorf("muxer de-serializing error: %v", err)
return return
} }
m2 := msg.New(m1.Peer(), data) m2 := msg.New(m1.Peer(), data)
proto, found := m.Protocols[pid] proto, found := m.Protocols[pid]
if !found { if !found {
log.Error("muxer unknown protocol %v", pid) log.Errorf("muxer unknown protocol %v", pid)
return return
} }
select { select {
case proto.GetPipe().Incoming <- m2: case proto.GetPipe().Incoming <- m2:
case <-m.ctx.Done(): case <-m.ctx.Done():
log.Error("%s", m.ctx.Err()) log.Error(m.ctx.Err())
return return
} }
} }
@ -198,7 +198,7 @@ func (m *Muxer) handleOutgoingMessages(pid pb.ProtocolID, proto Protocol) {
func (m *Muxer) handleOutgoingMessage(pid pb.ProtocolID, m1 msg.NetMessage) { func (m *Muxer) handleOutgoingMessage(pid pb.ProtocolID, m1 msg.NetMessage) {
data, err := wrapData(m1.Data(), pid) data, err := wrapData(m1.Data(), pid)
if err != nil { if err != nil {
log.Error("muxer serializing error: %v", err) log.Errorf("muxer serializing error: %v", err)
return return
} }

View File

@ -198,14 +198,14 @@ func (s *service) handleIncomingMessage(ctx context.Context, m msg.NetMessage) {
// unwrap the incoming message // unwrap the incoming message
data, rid, err := unwrapData(m.Data()) data, rid, err := unwrapData(m.Data())
if err != nil { if err != nil {
log.Error("de-serializing error: %v", err) log.Errorf("de-serializing error: %v", err)
} }
m2 := msg.New(m.Peer(), data) m2 := msg.New(m.Peer(), data)
// if it's a request (or has no RequestID), handle it // if it's a request (or has no RequestID), handle it
if rid == nil || rid.IsRequest() { if rid == nil || rid.IsRequest() {
if s.Handler == nil { if s.Handler == nil {
log.Error("service dropped msg: %v", m) log.Errorf("service dropped msg: %v", m)
return // no handler, drop it. return // no handler, drop it.
} }
@ -216,7 +216,7 @@ func (s *service) handleIncomingMessage(ctx context.Context, m msg.NetMessage) {
if r1 != nil { if r1 != nil {
err := s.sendMessage(ctx, r1, rid.Response()) err := s.sendMessage(ctx, r1, rid.Response())
if err != nil { if err != nil {
log.Error("error sending response message: %v", err) log.Errorf("error sending response message: %v", err)
} }
} }
return return
@ -224,7 +224,7 @@ func (s *service) handleIncomingMessage(ctx context.Context, m msg.NetMessage) {
// Otherwise, it is a response. handle it. // Otherwise, it is a response. handle it.
if !rid.IsResponse() { if !rid.IsResponse() {
log.Error("RequestID should identify a response here.") log.Errorf("RequestID should identify a response here.")
} }
key := RequestKey(m.Peer().ID(), RequestID(rid)) key := RequestKey(m.Peer().ID(), RequestID(rid))
@ -233,7 +233,7 @@ func (s *service) handleIncomingMessage(ctx context.Context, m msg.NetMessage) {
s.RequestsLock.RUnlock() s.RequestsLock.RUnlock()
if !found { if !found {
log.Error("no request key %v (timeout?)", []byte(key)) log.Errorf("no request key %v (timeout?)", []byte(key))
return return
} }

View File

@ -24,7 +24,7 @@ func (s *Swarm) listen() error {
if err != nil { if err != nil {
hasErr = true hasErr = true
retErr.Errors[i] = err retErr.Errors[i] = err
log.Error("Failed to listen on: %s - %s", addr, err) log.Errorf("Failed to listen on: %s - %s", addr, err)
} }
} }
@ -116,7 +116,7 @@ func (s *Swarm) connSetup(c conn.Conn) (conn.Conn, error) {
conns := []conn.Conn{c} conns := []conn.Conn{c}
mc, err := conn.NewMultiConn(s.Context(), s.local, c.RemotePeer(), conns) mc, err := conn.NewMultiConn(s.Context(), s.local, c.RemotePeer(), conns)
if err != nil { if err != nil {
log.Error("error creating multiconn: %s", err) log.Errorf("error creating multiconn: %s", err)
c.Close() c.Close()
return nil, err return nil, err
} }
@ -126,15 +126,15 @@ func (s *Swarm) connSetup(c conn.Conn) (conn.Conn, error) {
// kick off reader goroutine // kick off reader goroutine
go s.fanInSingle(mc) go s.fanInSingle(mc)
log.Debug("added new multiconn: %s", mc) log.Debugf("added new multiconn: %s", mc)
} else { } else {
s.connsLock.Unlock() // unlock before adding new conn s.connsLock.Unlock() // unlock before adding new conn
mc.Add(c) mc.Add(c)
log.Debug("multiconn found: %s", mc) log.Debugf("multiconn found: %s", mc)
} }
log.Debug("multiconn added new conn %s", c) log.Debugf("multiconn added new conn %s", c)
return c, nil return c, nil
} }
@ -151,7 +151,7 @@ func (s *Swarm) fanOut() {
case msg, ok := <-s.Outgoing: case msg, ok := <-s.Outgoing:
if !ok { if !ok {
log.Info("%s outgoing channel closed", s) log.Infof("%s outgoing channel closed", s)
return return
} }
@ -162,12 +162,12 @@ func (s *Swarm) fanOut() {
if !found { if !found {
e := fmt.Errorf("Sent msg to peer without open conn: %v", msg.Peer()) e := fmt.Errorf("Sent msg to peer without open conn: %v", msg.Peer())
s.errChan <- e s.errChan <- e
log.Error("%s", e) log.Error(e)
continue continue
} }
i++ i++
log.Debug("%s sent message to %s (%d)", s.local, msg.Peer(), i) log.Debugf("%s sent message to %s (%d)", s.local, msg.Peer(), i)
// queue it in the connection's buffer // queue it in the connection's buffer
c.Out() <- msg.Data() c.Out() <- msg.Data()
} }
@ -202,11 +202,11 @@ func (s *Swarm) fanInSingle(c conn.Conn) {
case data, ok := <-c.In(): case data, ok := <-c.In():
if !ok { if !ok {
log.Info("%s in channel closed", c) log.Infof("%s in channel closed", c)
return // channel closed. return // channel closed.
} }
i++ i++
log.Debug("%s received message from %s (%d)", s.local, c.RemotePeer(), i) log.Debugf("%s received message from %s (%d)", s.local, c.RemotePeer(), i)
s.Incoming <- msg.New(c.RemotePeer(), data) s.Incoming <- msg.New(c.RemotePeer(), data)
} }
} }

View File

@ -238,7 +238,7 @@ func (p *peer) VerifyAndSetPrivKey(sk ic.PrivKey) error {
// keys not equal. invariant violated. this warrants a panic. // keys not equal. invariant violated. this warrants a panic.
// these keys should be _the same_ because peer.ID = H(pk) // these keys should be _the same_ because peer.ID = H(pk)
// this mismatch should never happen. // this mismatch should never happen.
log.Error("%s had PrivKey: %v -- got %v", p, p.privKey, sk) log.Errorf("%s had PrivKey: %v -- got %v", p, p.privKey, sk)
panic("invariant violated: unexpected key mismatch") panic("invariant violated: unexpected key mismatch")
} }
@ -270,7 +270,7 @@ func (p *peer) VerifyAndSetPubKey(pk ic.PubKey) error {
// keys not equal. invariant violated. this warrants a panic. // keys not equal. invariant violated. this warrants a panic.
// these keys should be _the same_ because peer.ID = H(pk) // these keys should be _the same_ because peer.ID = H(pk)
// this mismatch should never happen. // this mismatch should never happen.
log.Error("%s had PubKey: %v -- got %v", p, p.pubKey, pk) log.Errorf("%s had PubKey: %v -- got %v", p, p.pubKey, pk)
panic("invariant violated: unexpected key mismatch") panic("invariant violated: unexpected key mismatch")
} }

View File

@ -24,7 +24,7 @@ func TestPinnerBasic(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
dserv := &mdag.DAGService{bserv} dserv := &mdag.DAGService{Blocks: bserv}
p := NewPinner(dstore, dserv) p := NewPinner(dstore, dserv)

View File

@ -84,7 +84,7 @@ func NewDHT(ctx context.Context, p peer.Peer, ps peer.Peerstore, dialer inet.Dia
// Connect to a new peer at the given address, ping and add to the routing table // Connect to a new peer at the given address, ping and add to the routing table
func (dht *IpfsDHT) Connect(ctx context.Context, npeer peer.Peer) (peer.Peer, error) { func (dht *IpfsDHT) Connect(ctx context.Context, npeer peer.Peer) (peer.Peer, error) {
log.Debug("Connect to new peer: %s", npeer) log.Debugf("Connect to new peer: %s", npeer)
// TODO(jbenet,whyrusleeping) // TODO(jbenet,whyrusleeping)
// //
@ -139,7 +139,7 @@ func (dht *IpfsDHT) HandleMessage(ctx context.Context, mes msg.NetMessage) msg.N
dht.Update(mPeer) dht.Update(mPeer)
// Print out diagnostic // Print out diagnostic
log.Debug("[peer: %s] Got message type: '%s' [from = %s]\n", log.Debugf("%s got message type: '%s' from %s",
dht.self, Message_MessageType_name[int32(pmes.GetType())], mPeer) dht.self, Message_MessageType_name[int32(pmes.GetType())], mPeer)
// get handler for this msg type. // get handler for this msg type.
@ -152,7 +152,7 @@ func (dht *IpfsDHT) HandleMessage(ctx context.Context, mes msg.NetMessage) msg.N
// dispatch handler. // dispatch handler.
rpmes, err := handler(mPeer, pmes) rpmes, err := handler(mPeer, pmes)
if err != nil { if err != nil {
log.Error("handle message error: %s", err) log.Errorf("handle message error: %s", err)
return nil return nil
} }
@ -165,7 +165,7 @@ func (dht *IpfsDHT) HandleMessage(ctx context.Context, mes msg.NetMessage) msg.N
// serialize response msg // serialize response msg
rmes, err := msg.FromObject(mPeer, rpmes) rmes, err := msg.FromObject(mPeer, rpmes)
if err != nil { if err != nil {
log.Error("serialze response error: %s", err) log.Errorf("serialze response error: %s", err)
return nil return nil
} }
@ -184,7 +184,7 @@ func (dht *IpfsDHT) sendRequest(ctx context.Context, p peer.Peer, pmes *Message)
start := time.Now() start := time.Now()
// Print out diagnostic // Print out diagnostic
log.Debug("Sent message type: '%s' [to = %s]", log.Debugf("Sent message type: '%s' to %s",
Message_MessageType_name[int32(pmes.GetType())], p) Message_MessageType_name[int32(pmes.GetType())], p)
rmes, err := dht.sender.SendRequest(ctx, mes) rmes, err := dht.sender.SendRequest(ctx, mes)
@ -235,7 +235,7 @@ func (dht *IpfsDHT) putProvider(ctx context.Context, p peer.Peer, key string) er
return err return err
} }
log.Debug("%s putProvider: %s for %s", dht.self, p, key) log.Debugf("%s putProvider: %s for %s", dht.self, p, key)
if rpmes.GetKey() != pmes.GetKey() { if rpmes.GetKey() != pmes.GetKey() {
return errors.New("provider not added correctly") return errors.New("provider not added correctly")
} }
@ -251,7 +251,7 @@ func (dht *IpfsDHT) getValueOrPeers(ctx context.Context, p peer.Peer,
return nil, nil, err return nil, nil, err
} }
log.Debug("pmes.GetValue() %v", pmes.GetValue()) log.Debugf("pmes.GetValue() %v", pmes.GetValue())
if value := pmes.GetValue(); value != nil { if value := pmes.GetValue(); value != nil {
// Success! We were given the value // Success! We were given the value
log.Debug("getValueOrPeers: got value") log.Debug("getValueOrPeers: got value")
@ -273,7 +273,7 @@ func (dht *IpfsDHT) getValueOrPeers(ctx context.Context, p peer.Peer,
for _, pb := range pmes.GetCloserPeers() { for _, pb := range pmes.GetCloserPeers() {
pr, err := dht.peerFromInfo(pb) pr, err := dht.peerFromInfo(pb)
if err != nil { if err != nil {
log.Error("%s", err) log.Error(err)
continue continue
} }
peers = append(peers, pr) peers = append(peers, pr)
@ -306,13 +306,13 @@ func (dht *IpfsDHT) getFromPeerList(ctx context.Context, key u.Key,
for _, pinfo := range peerlist { for _, pinfo := range peerlist {
p, err := dht.ensureConnectedToPeer(pinfo) p, err := dht.ensureConnectedToPeer(pinfo)
if err != nil { if err != nil {
log.Error("getFromPeers error: %s", err) log.Errorf("getFromPeers error: %s", err)
continue continue
} }
pmes, err := dht.getValueSingle(ctx, p, key, level) pmes, err := dht.getValueSingle(ctx, p, key, level)
if err != nil { if err != nil {
log.Error("getFromPeers error: %s\n", err) log.Errorf("getFromPeers error: %s\n", err)
continue continue
} }
@ -349,7 +349,7 @@ func (dht *IpfsDHT) putLocal(key u.Key, value []byte) error {
// Update signals to all routingTables to Update their last-seen status // Update signals to all routingTables to Update their last-seen status
// on the given peer. // on the given peer.
func (dht *IpfsDHT) Update(p peer.Peer) { func (dht *IpfsDHT) Update(p peer.Peer) {
log.Debug("updating peer: %s latency = %f\n", p, p.GetLatency().Seconds()) log.Debugf("updating peer: %s latency = %f\n", p, p.GetLatency().Seconds())
removedCount := 0 removedCount := 0
for _, route := range dht.routingTables { for _, route := range dht.routingTables {
removed := route.Update(p) removed := route.Update(p)
@ -394,11 +394,11 @@ func (dht *IpfsDHT) addProviders(key u.Key, peers []*Message_Peer) []peer.Peer {
for _, prov := range peers { for _, prov := range peers {
p, err := dht.peerFromInfo(prov) p, err := dht.peerFromInfo(prov)
if err != nil { if err != nil {
log.Error("error getting peer from info: %v", err) log.Errorf("error getting peer from info: %v", err)
continue continue
} }
log.Debug("%s adding provider: %s for %s", dht.self, p, key) log.Debugf("%s adding provider: %s for %s", dht.self, p, key)
// Dont add outselves to the list // Dont add outselves to the list
if p.ID().Equal(dht.self.ID()) { if p.ID().Equal(dht.self.ID()) {
@ -456,7 +456,7 @@ func (dht *IpfsDHT) getPeer(id peer.ID) (peer.Peer, error) {
p, err := dht.peerstore.Get(id) p, err := dht.peerstore.Get(id)
if err != nil { if err != nil {
err = fmt.Errorf("Failed to get peer from peerstore: %s", err) err = fmt.Errorf("Failed to get peer from peerstore: %s", err)
log.Error("%s", err) log.Error(err)
return nil, err return nil, err
} }
return p, nil return p, nil
@ -505,7 +505,7 @@ func (dht *IpfsDHT) loadProvidableKeys() error {
for _, dsk := range kl { for _, dsk := range kl {
k := u.KeyFromDsKey(dsk) k := u.KeyFromDsKey(dsk)
if len(k) == 0 { if len(k) == 0 {
log.Error("loadProvidableKeys error: %v", dsk) log.Errorf("loadProvidableKeys error: %v", dsk)
} }
dht.providers.AddProvider(k, dht.self) dht.providers.AddProvider(k, dht.self)
@ -526,7 +526,7 @@ func (dht *IpfsDHT) PingRoutine(t time.Duration) {
ctx, _ := context.WithTimeout(dht.ctx, time.Second*5) ctx, _ := context.WithTimeout(dht.ctx, time.Second*5)
err := dht.Ping(ctx, p) err := dht.Ping(ctx, p)
if err != nil { if err != nil {
log.Error("Ping error: %s", err) log.Errorf("Ping error: %s", err)
} }
} }
case <-dht.ctx.Done(): case <-dht.ctx.Done():
@ -541,6 +541,6 @@ func (dht *IpfsDHT) Bootstrap(ctx context.Context) {
rand.Read(id) rand.Read(id)
_, err := dht.FindPeer(ctx, peer.ID(id)) _, err := dht.FindPeer(ctx, peer.ID(id))
if err != nil { if err != nil {
log.Error("Bootstrap peer error: %s", err) log.Errorf("Bootstrap peer error: %s", err)
} }
} }

View File

@ -30,14 +30,14 @@ func (l *logDhtRPC) EndLog() {
func (l *logDhtRPC) Print() { func (l *logDhtRPC) Print() {
b, err := json.Marshal(l) b, err := json.Marshal(l)
if err != nil { if err != nil {
log.Debug("Error marshaling logDhtRPC object: %s", err) log.Debugf("Error marshaling logDhtRPC object: %s", err)
} else { } else {
log.Debug(string(b)) log.Debug(string(b))
} }
} }
func (l *logDhtRPC) String() string { func (l *logDhtRPC) String() string {
return fmt.Sprintf("DHT RPC: %s took %s, success = %s", l.Type, l.Duration, l.Success) return fmt.Sprintf("DHT RPC: %s took %s, success = %v", l.Type, l.Duration, l.Success)
} }
func (l *logDhtRPC) EndAndPrint() { func (l *logDhtRPC) EndAndPrint() {

View File

@ -36,7 +36,7 @@ func (dht *IpfsDHT) handlerForMsgType(t Message_MessageType) dhtHandler {
} }
func (dht *IpfsDHT) handleGetValue(p peer.Peer, pmes *Message) (*Message, error) { func (dht *IpfsDHT) handleGetValue(p peer.Peer, pmes *Message) (*Message, error) {
log.Debug("%s handleGetValue for key: %s\n", dht.self, pmes.GetKey()) log.Debugf("%s handleGetValue for key: %s\n", dht.self, pmes.GetKey())
// setup response // setup response
resp := newMessage(pmes.GetType(), pmes.GetKey(), pmes.GetClusterLevel()) resp := newMessage(pmes.GetType(), pmes.GetKey(), pmes.GetClusterLevel())
@ -48,10 +48,10 @@ func (dht *IpfsDHT) handleGetValue(p peer.Peer, pmes *Message) (*Message, error)
} }
// let's first check if we have the value locally. // let's first check if we have the value locally.
log.Debug("%s handleGetValue looking into ds", dht.self) log.Debugf("%s handleGetValue looking into ds", dht.self)
dskey := u.Key(pmes.GetKey()).DsKey() dskey := u.Key(pmes.GetKey()).DsKey()
iVal, err := dht.datastore.Get(dskey) iVal, err := dht.datastore.Get(dskey)
log.Debug("%s handleGetValue looking into ds GOT %v", dht.self, iVal) log.Debugf("%s handleGetValue looking into ds GOT %v", dht.self, iVal)
// if we got an unexpected error, bail. // if we got an unexpected error, bail.
if err != nil && err != ds.ErrNotFound { if err != nil && err != ds.ErrNotFound {
@ -63,7 +63,7 @@ func (dht *IpfsDHT) handleGetValue(p peer.Peer, pmes *Message) (*Message, error)
// if we have the value, send it back // if we have the value, send it back
if err == nil { if err == nil {
log.Debug("%s handleGetValue success!", dht.self) log.Debugf("%s handleGetValue success!", dht.self)
byts, ok := iVal.([]byte) byts, ok := iVal.([]byte)
if !ok { if !ok {
@ -76,7 +76,7 @@ func (dht *IpfsDHT) handleGetValue(p peer.Peer, pmes *Message) (*Message, error)
// if we know any providers for the requested value, return those. // if we know any providers for the requested value, return those.
provs := dht.providers.GetProviders(u.Key(pmes.GetKey())) provs := dht.providers.GetProviders(u.Key(pmes.GetKey()))
if len(provs) > 0 { if len(provs) > 0 {
log.Debug("handleGetValue returning %d provider[s]\n", len(provs)) log.Debugf("handleGetValue returning %d provider[s]", len(provs))
resp.ProviderPeers = peersToPBPeers(provs) resp.ProviderPeers = peersToPBPeers(provs)
} }
@ -84,7 +84,7 @@ func (dht *IpfsDHT) handleGetValue(p peer.Peer, pmes *Message) (*Message, error)
closer := dht.betterPeersToQuery(pmes, CloserPeerCount) closer := dht.betterPeersToQuery(pmes, CloserPeerCount)
if closer != nil { if closer != nil {
for _, p := range closer { for _, p := range closer {
log.Debug("handleGetValue returning closer peer: '%s'", p) log.Debugf("handleGetValue returning closer peer: '%s'", p)
if len(p.Addresses()) < 1 { if len(p.Addresses()) < 1 {
log.Critical("no addresses on peer being sent!") log.Critical("no addresses on peer being sent!")
} }
@ -101,12 +101,12 @@ func (dht *IpfsDHT) handlePutValue(p peer.Peer, pmes *Message) (*Message, error)
defer dht.dslock.Unlock() defer dht.dslock.Unlock()
dskey := u.Key(pmes.GetKey()).DsKey() dskey := u.Key(pmes.GetKey()).DsKey()
err := dht.datastore.Put(dskey, pmes.GetValue()) err := dht.datastore.Put(dskey, pmes.GetValue())
log.Debug("%s handlePutValue %v %v\n", dht.self, dskey, pmes.GetValue()) log.Debugf("%s handlePutValue %v %v\n", dht.self, dskey, pmes.GetValue())
return pmes, err return pmes, err
} }
func (dht *IpfsDHT) handlePing(p peer.Peer, pmes *Message) (*Message, error) { func (dht *IpfsDHT) handlePing(p peer.Peer, pmes *Message) (*Message, error) {
log.Debug("%s Responding to ping from %s!\n", dht.self, p) log.Debugf("%s Responding to ping from %s!\n", dht.self, p)
return pmes, nil return pmes, nil
} }
@ -122,7 +122,7 @@ func (dht *IpfsDHT) handleFindPeer(p peer.Peer, pmes *Message) (*Message, error)
} }
if closest == nil { if closest == nil {
log.Error("handleFindPeer: could not find anything.") log.Errorf("handleFindPeer: could not find anything.")
return resp, nil return resp, nil
} }
@ -134,7 +134,7 @@ func (dht *IpfsDHT) handleFindPeer(p peer.Peer, pmes *Message) (*Message, error)
} }
for _, p := range withAddresses { for _, p := range withAddresses {
log.Debug("handleFindPeer: sending back '%s'", p) log.Debugf("handleFindPeer: sending back '%s'", p)
} }
resp.CloserPeers = peersToPBPeers(withAddresses) resp.CloserPeers = peersToPBPeers(withAddresses)
return resp, nil return resp, nil
@ -144,11 +144,11 @@ func (dht *IpfsDHT) handleGetProviders(p peer.Peer, pmes *Message) (*Message, er
resp := newMessage(pmes.GetType(), pmes.GetKey(), pmes.GetClusterLevel()) resp := newMessage(pmes.GetType(), pmes.GetKey(), pmes.GetClusterLevel())
// check if we have this value, to add ourselves as provider. // check if we have this value, to add ourselves as provider.
log.Debug("handling GetProviders: '%s'", pmes.GetKey()) log.Debugf("handling GetProviders: '%s'", pmes.GetKey())
dsk := u.Key(pmes.GetKey()).DsKey() dsk := u.Key(pmes.GetKey()).DsKey()
has, err := dht.datastore.Has(dsk) has, err := dht.datastore.Has(dsk)
if err != nil && err != ds.ErrNotFound { if err != nil && err != ds.ErrNotFound {
log.Error("unexpected datastore error: %v\n", err) log.Errorf("unexpected datastore error: %v\n", err)
has = false has = false
} }
@ -180,7 +180,7 @@ type providerInfo struct {
func (dht *IpfsDHT) handleAddProvider(p peer.Peer, pmes *Message) (*Message, error) { func (dht *IpfsDHT) handleAddProvider(p peer.Peer, pmes *Message) (*Message, error) {
key := u.Key(pmes.GetKey()) key := u.Key(pmes.GetKey())
log.Debug("%s adding %s as a provider for '%s'\n", dht.self, p, peer.ID(key)) log.Debugf("%s adding %s as a provider for '%s'\n", dht.self, p, peer.ID(key))
// add provider should use the address given in the message // add provider should use the address given in the message
for _, pb := range pmes.GetProviderPeers() { for _, pb := range pmes.GetProviderPeers() {
@ -189,16 +189,16 @@ func (dht *IpfsDHT) handleAddProvider(p peer.Peer, pmes *Message) (*Message, err
addr, err := pb.Address() addr, err := pb.Address()
if err != nil { if err != nil {
log.Error("provider %s error with address %s", p, *pb.Addr) log.Errorf("provider %s error with address %s", p, *pb.Addr)
continue continue
} }
log.Info("received provider %s %s for %s", p, addr, key) log.Infof("received provider %s %s for %s", p, addr, key)
p.AddAddress(addr) p.AddAddress(addr)
dht.providers.AddProvider(key, p) dht.providers.AddProvider(key, p)
} else { } else {
log.Error("handleAddProvider received provider %s from %s", pid, p) log.Errorf("handleAddProvider received provider %s from %s", pid, p)
} }
} }

View File

@ -17,7 +17,7 @@ import (
// PutValue adds value corresponding to given Key. // PutValue adds value corresponding to given Key.
// This is the top level "Store" operation of the DHT // This is the top level "Store" operation of the DHT
func (dht *IpfsDHT) PutValue(ctx context.Context, key u.Key, value []byte) error { func (dht *IpfsDHT) PutValue(ctx context.Context, key u.Key, value []byte) error {
log.Debug("PutValue %s", key) log.Debugf("PutValue %s", key)
err := dht.putLocal(key, value) err := dht.putLocal(key, value)
if err != nil { if err != nil {
return err return err
@ -30,7 +30,7 @@ func (dht *IpfsDHT) PutValue(ctx context.Context, key u.Key, value []byte) error
} }
query := newQuery(key, dht.dialer, func(ctx context.Context, p peer.Peer) (*dhtQueryResult, error) { query := newQuery(key, dht.dialer, func(ctx context.Context, p peer.Peer) (*dhtQueryResult, error) {
log.Debug("%s PutValue qry part %v", dht.self, p) log.Debugf("%s PutValue qry part %v", dht.self, p)
err := dht.putValueToNetwork(ctx, p, string(key), value) err := dht.putValueToNetwork(ctx, p, string(key), value)
if err != nil { if err != nil {
return nil, err return nil, err
@ -46,7 +46,7 @@ func (dht *IpfsDHT) PutValue(ctx context.Context, key u.Key, value []byte) error
// If the search does not succeed, a multiaddr string of a closer peer is // If the search does not succeed, a multiaddr string of a closer peer is
// returned along with util.ErrSearchIncomplete // returned along with util.ErrSearchIncomplete
func (dht *IpfsDHT) GetValue(ctx context.Context, key u.Key) ([]byte, error) { func (dht *IpfsDHT) GetValue(ctx context.Context, key u.Key) ([]byte, error) {
log.Debug("Get Value [%s]", key) log.Debugf("Get Value [%s]", key)
// If we have it local, dont bother doing an RPC! // If we have it local, dont bother doing an RPC!
// NOTE: this might not be what we want to do... // NOTE: this might not be what we want to do...
@ -86,7 +86,7 @@ func (dht *IpfsDHT) GetValue(ctx context.Context, key u.Key) ([]byte, error) {
return nil, err return nil, err
} }
log.Debug("GetValue %v %v", key, result.value) log.Debugf("GetValue %v %v", key, result.value)
if result.value == nil { if result.value == nil {
return nil, u.ErrNotFound return nil, u.ErrNotFound
} }
@ -140,7 +140,7 @@ func (dht *IpfsDHT) FindProvidersAsync(ctx context.Context, key u.Key, count int
defer wg.Done() defer wg.Done()
pmes, err := dht.findProvidersSingle(ctx, p, key, 0) pmes, err := dht.findProvidersSingle(ctx, p, key, 0)
if err != nil { if err != nil {
log.Error("%s", err) log.Error(err)
return return
} }
dht.addPeerListAsync(key, pmes.GetProviderPeers(), ps, count, peerOut) dht.addPeerListAsync(key, pmes.GetProviderPeers(), ps, count, peerOut)
@ -218,7 +218,7 @@ func (dht *IpfsDHT) FindPeer(ctx context.Context, id peer.ID) (peer.Peer, error)
for _, pbp := range closer { for _, pbp := range closer {
np, err := dht.getPeer(peer.ID(pbp.GetId())) np, err := dht.getPeer(peer.ID(pbp.GetId()))
if err != nil { if err != nil {
log.Warning("Received invalid peer from query") log.Warningf("Received invalid peer from query: %v", err)
continue continue
} }
ma, err := pbp.Address() ma, err := pbp.Address()
@ -256,10 +256,10 @@ func (dht *IpfsDHT) FindPeer(ctx context.Context, id peer.ID) (peer.Peer, error)
// Ping a peer, log the time it took // Ping a peer, log the time it took
func (dht *IpfsDHT) Ping(ctx context.Context, p peer.Peer) error { func (dht *IpfsDHT) Ping(ctx context.Context, p peer.Peer) error {
// Thoughts: maybe this should accept an ID and do a peer lookup? // Thoughts: maybe this should accept an ID and do a peer lookup?
log.Info("ping %s start", p) log.Infof("ping %s start", p)
pmes := newMessage(Message_PING, "", 0) pmes := newMessage(Message_PING, "", 0)
_, err := dht.sendRequest(ctx, p, pmes) _, err := dht.sendRequest(ctx, p, pmes)
log.Info("ping %s end (err = %s)", p, err) log.Infof("ping %s end (err = %s)", p, err)
return err return err
} }

View File

@ -125,7 +125,7 @@ func copyPeersFromList(target ID, peerArr peerSorterArr, peerList *list.List) pe
} }
peerArr = append(peerArr, &pd) peerArr = append(peerArr, &pd)
if e == nil { if e == nil {
log.Debug("list element was nil.\n") log.Debug("list element was nil")
return peerArr return peerArr
} }
} }
@ -148,7 +148,7 @@ func (rt *RoutingTable) NearestPeer(id ID) peer.Peer {
return peers[0] return peers[0]
} }
log.Error("NearestPeer: Returning nil, table size = %d", rt.Size()) log.Errorf("NearestPeer: Returning nil, table size = %d", rt.Size())
return nil return nil
} }

View File

@ -59,7 +59,7 @@ func (dm *DagModifier) WriteAt(b []byte, offset uint64) (int, error) {
origlen := len(b) origlen := len(b)
if end <= zeroblocklen { if end <= zeroblocklen {
log.Debug("Writing into zero block.") log.Debug("Writing into zero block")
// Replacing zeroeth data block (embedded in the root node) // Replacing zeroeth data block (embedded in the root node)
//TODO: check chunking here //TODO: check chunking here
copy(dm.pbdata.Data[offset:], b) copy(dm.pbdata.Data[offset:], b)
@ -76,7 +76,7 @@ func (dm *DagModifier) WriteAt(b []byte, offset uint64) (int, error) {
traversed = uint64(zeroblocklen) traversed = uint64(zeroblocklen)
for i, size := range dm.pbdata.Blocksizes { for i, size := range dm.pbdata.Blocksizes {
if uint64(offset) < traversed+size { if uint64(offset) < traversed+size {
log.Debug("Starting mod at block %d. [%d < %d + %d]", i, offset, traversed, size) log.Debugf("Starting mod at block %d. [%d < %d + %d]", i, offset, traversed, size)
// Here is where we start // Here is where we start
startsubblk = i startsubblk = i
lnk := dm.curNode.Links[i] lnk := dm.curNode.Links[i]
@ -145,7 +145,7 @@ func (dm *DagModifier) WriteAt(b []byte, offset uint64) (int, error) {
n := &mdag.Node{Data: ft.WrapData(sb)} n := &mdag.Node{Data: ft.WrapData(sb)}
_, err := dm.dagserv.Add(n) _, err := dm.dagserv.Add(n)
if err != nil { if err != nil {
log.Error("Failed adding node to DAG service: %s", err) log.Errorf("Failed adding node to DAG service: %s", err)
return 0, err return 0, err
} }
lnk, err := mdag.MakeLink(n) lnk, err := mdag.MakeLink(n)

View File

@ -6,7 +6,6 @@ import (
"io/ioutil" "io/ioutil"
"testing" "testing"
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/op/go-logging"
bs "github.com/jbenet/go-ipfs/blockservice" bs "github.com/jbenet/go-ipfs/blockservice"
"github.com/jbenet/go-ipfs/importer/chunk" "github.com/jbenet/go-ipfs/importer/chunk"
mdag "github.com/jbenet/go-ipfs/merkledag" mdag "github.com/jbenet/go-ipfs/merkledag"
@ -14,6 +13,7 @@ import (
u "github.com/jbenet/go-ipfs/util" u "github.com/jbenet/go-ipfs/util"
ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore"
logging "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-logging"
) )
func getMockDagServ(t *testing.T) *mdag.DAGService { func getMockDagServ(t *testing.T) *mdag.DAGService {
@ -22,11 +22,11 @@ func getMockDagServ(t *testing.T) *mdag.DAGService {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
return &mdag.DAGService{bserv} return &mdag.DAGService{Blocks: bserv}
} }
func getNode(t *testing.T, dserv *mdag.DAGService, size int64) ([]byte, *mdag.Node) { func getNode(t *testing.T, dserv *mdag.DAGService, size int64) ([]byte, *mdag.Node) {
dw := NewDagWriter(dserv, &chunk.SizeSplitter{500}) dw := NewDagWriter(dserv, &chunk.SizeSplitter{Size: 500})
n, err := io.CopyN(dw, u.NewFastRand(), size) n, err := io.CopyN(dw, u.NewFastRand(), size)
if err != nil { if err != nil {
@ -99,7 +99,7 @@ func TestDagModifierBasic(t *testing.T) {
dserv := getMockDagServ(t) dserv := getMockDagServ(t)
b, n := getNode(t, dserv, 50000) b, n := getNode(t, dserv, 50000)
dagmod, err := NewDagModifier(n, dserv, &chunk.SizeSplitter{512}) dagmod, err := NewDagModifier(n, dserv, &chunk.SizeSplitter{Size: 512})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -142,7 +142,7 @@ func TestDagModifierBasic(t *testing.T) {
expected := uint64(50000 + 3500 + 3000) expected := uint64(50000 + 3500 + 3000)
if size != expected { if size != expected {
t.Fatal("Final reported size is incorrect [%d != %d]", size, expected) t.Fatalf("Final reported size is incorrect [%d != %d]", size, expected)
} }
} }
@ -150,7 +150,7 @@ func TestMultiWrite(t *testing.T) {
dserv := getMockDagServ(t) dserv := getMockDagServ(t)
_, n := getNode(t, dserv, 0) _, n := getNode(t, dserv, 0)
dagmod, err := NewDagModifier(n, dserv, &chunk.SizeSplitter{512}) dagmod, err := NewDagModifier(n, dserv, &chunk.SizeSplitter{Size: 512})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -191,7 +191,7 @@ func TestMultiWriteCoal(t *testing.T) {
dserv := getMockDagServ(t) dserv := getMockDagServ(t)
_, n := getNode(t, dserv, 0) _, n := getNode(t, dserv, 0)
dagmod, err := NewDagModifier(n, dserv, &chunk.SizeSplitter{512}) dagmod, err := NewDagModifier(n, dserv, &chunk.SizeSplitter{Size: 512})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -53,8 +53,8 @@ func TestDagWriter(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
dag := &mdag.DAGService{bserv} dag := &mdag.DAGService{Blocks: bserv}
dw := NewDagWriter(dag, &chunk.SizeSplitter{4096}) dw := NewDagWriter(dag, &chunk.SizeSplitter{Size: 4096})
nbytes := int64(1024 * 1024 * 2) nbytes := int64(1024 * 1024 * 2)
n, err := io.CopyN(dw, &datasource{}, nbytes) n, err := io.CopyN(dw, &datasource{}, nbytes)
@ -87,8 +87,8 @@ func TestMassiveWrite(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
dag := &mdag.DAGService{bserv} dag := &mdag.DAGService{Blocks: bserv}
dw := NewDagWriter(dag, &chunk.SizeSplitter{4096}) dw := NewDagWriter(dag, &chunk.SizeSplitter{Size: 4096})
nbytes := int64(1024 * 1024 * 1024 * 16) nbytes := int64(1024 * 1024 * 1024 * 16)
n, err := io.CopyN(dw, &datasource{}, nbytes) n, err := io.CopyN(dw, &datasource{}, nbytes)
@ -107,13 +107,13 @@ func BenchmarkDagWriter(b *testing.B) {
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
} }
dag := &mdag.DAGService{bserv} dag := &mdag.DAGService{Blocks: bserv}
b.ResetTimer() b.ResetTimer()
nbytes := int64(100000) nbytes := int64(100000)
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
b.SetBytes(nbytes) b.SetBytes(nbytes)
dw := NewDagWriter(dag, &chunk.SizeSplitter{4096}) dw := NewDagWriter(dag, &chunk.SizeSplitter{Size: 4096})
n, err := io.CopyN(dw, &datasource{}, nbytes) n, err := io.CopyN(dw, &datasource{}, nbytes)
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)

View File

@ -85,10 +85,10 @@ func init() {
var err error var err error
currentVersion, err = parseVersion() currentVersion, err = parseVersion()
if err != nil { if err != nil {
log.Error("invalid version number in code (must be semver): %q\n", Version) log.Errorf("invalid version number in code (must be semver): %q", Version)
os.Exit(1) os.Exit(1)
} }
log.Info("go-ipfs Version: %s", currentVersion) log.Infof("go-ipfs Version: %s", currentVersion)
} }
func parseVersion() (*semver.Version, error) { func parseVersion() (*semver.Version, error) {
@ -138,7 +138,7 @@ func ShouldAutoUpdate(setting config.AutoUpdateSetting, newVer string) bool {
nv, err := semver.NewVersion(newVer) nv, err := semver.NewVersion(newVer)
if err != nil { if err != nil {
log.Error("could not parse version string: %s", err) log.Errorf("could not parse version string: %s", err)
return false return false
} }
@ -189,7 +189,7 @@ func CliCheckForUpdates(cfg *config.Config, confFile string) error {
u, err := CheckForUpdate() u, err := CheckForUpdate()
// if there is no update available, record it, and exit. // if there is no update available, record it, and exit.
if err == check.NoUpdateAvailable { if err == check.NoUpdateAvailable {
log.Notice("No update available, checked on %s", time.Now()) log.Noticef("No update available, checked on %s", time.Now())
config.RecordUpdateCheck(cfg, confFile) // only record if we checked successfully. config.RecordUpdateCheck(cfg, confFile) // only record if we checked successfully.
return nil return nil
} }
@ -197,7 +197,7 @@ func CliCheckForUpdates(cfg *config.Config, confFile string) error {
// if another, unexpected error occurred, note it. // if another, unexpected error occurred, note it.
if err != nil { if err != nil {
if cfg.Version.Check == config.CheckError { if cfg.Version.Check == config.CheckError {
log.Error("Error while checking for update: %v\n", err) log.Errorf("Error while checking for update: %v", err)
return nil return nil
} }
// when "warn" version.check mode we just show a warning message // when "warn" version.check mode we just show a warning message
@ -211,7 +211,7 @@ func CliCheckForUpdates(cfg *config.Config, confFile string) error {
if cfg.Version.AutoUpdate != config.UpdateNever { if cfg.Version.AutoUpdate != config.UpdateNever {
// and we should auto update // and we should auto update
if ShouldAutoUpdate(cfg.Version.AutoUpdate, u.Version) { if ShouldAutoUpdate(cfg.Version.AutoUpdate, u.Version) {
log.Notice("Applying update %s", u.Version) log.Noticef("Applying update %s", u.Version)
if err = Apply(u); err != nil { if err = Apply(u); err != nil {
log.Error(err.Error()) log.Error(err.Error())

View File

@ -4,7 +4,7 @@ import (
"fmt" "fmt"
"os" "os"
logging "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/op/go-logging" logging "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-logging"
) )
func init() { func init() {
@ -55,7 +55,7 @@ func SetupLogging() {
var err error var err error
lvl, err = logging.LogLevel(logenv) lvl, err = logging.LogLevel(logenv)
if err != nil { if err != nil {
log.Error("logging.LogLevel() Error: %q", err) log.Errorf("logging.LogLevel() Error: %q", err)
lvl = logging.ERROR // reset to ERROR, could be undefined now(?) lvl = logging.ERROR // reset to ERROR, could be undefined now(?)
} }
} }
@ -74,7 +74,7 @@ func SetAllLoggers(lvl logging.Level) {
logging.SetLevel(lvl, "") logging.SetLevel(lvl, "")
for n, log := range loggers { for n, log := range loggers {
logging.SetLevel(lvl, n) logging.SetLevel(lvl, n)
log.Notice("setting logger: %q to %v", n, lvl) log.Noticef("setting logger: %q to %v", n, lvl)
} }
} }

View File

@ -119,8 +119,6 @@ func (r *randGen) Read(p []byte) (n int, err error) {
val >>= 8 val >>= 8
} }
} }
panic("unreachable")
} }
// GetenvBool is the way to check an env var as a boolean // GetenvBool is the way to check an env var as a boolean