mirror of
https://github.com/ipfs/kubo.git
synced 2025-07-01 19:24:14 +08:00
8
Godeps/Godeps.json
generated
8
Godeps/Godeps.json
generated
@ -87,6 +87,10 @@
|
||||
"ImportPath": "github.com/jbenet/go-is-domain",
|
||||
"Rev": "93b717f2ae17838a265e30277275ee99ee7198d6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/jbenet/go-logging",
|
||||
"Rev": "74bec4b83f6d45d1402c1e9d94c0c29e39f6e0ea"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/jbenet/go-msgio",
|
||||
"Rev": "c9069ab79c95aa0686347b516972c7329c4391f2"
|
||||
@ -109,10 +113,6 @@
|
||||
"ImportPath": "github.com/mitchellh/go-homedir",
|
||||
"Rev": "7d2d8c8a4e078ce3c58736ab521a40b37a504c52"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/op/go-logging",
|
||||
"Rev": "3df864a88c7f005e676db4f026a4fe2f14929be3"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb",
|
||||
"Rev": "99056d50e56252fbe0021d5c893defca5a76baf8"
|
||||
|
@ -80,4 +80,3 @@ You can use `go get -u` to update the package.
|
||||
For docs, see http://godoc.org/github.com/op/go-logging or run:
|
||||
|
||||
$ godoc github.com/op/go-logging
|
||||
|
@ -3,7 +3,7 @@ package main
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/op/go-logging"
|
||||
"github.com/op/go-logging"
|
||||
)
|
||||
|
||||
var log = logging.MustGetLogger("example")
|
Before Width: | Height: | Size: 12 KiB After Width: | Height: | Size: 12 KiB |
@ -55,6 +55,7 @@ type Record struct {
|
||||
formatted string
|
||||
}
|
||||
|
||||
// Formatted returns the string-formatted version of a record.
|
||||
func (r *Record) Formatted(calldepth int) string {
|
||||
if r.formatted == "" {
|
||||
var buf bytes.Buffer
|
||||
@ -64,6 +65,8 @@ func (r *Record) Formatted(calldepth int) string {
|
||||
return r.formatted
|
||||
}
|
||||
|
||||
// Message returns a string message for outputting. Redacts any record args
|
||||
// that implement the Redactor interface
|
||||
func (r *Record) Message() string {
|
||||
if r.message == nil {
|
||||
// Redact the arguments that implements the Redactor interface
|
||||
@ -78,12 +81,22 @@ func (r *Record) Message() string {
|
||||
return *r.message
|
||||
}
|
||||
|
||||
// Logger is a logging unit. It controls the flow of messages to a given
|
||||
// (swappable) backend.
|
||||
type Logger struct {
|
||||
Module string
|
||||
backend LeveledBackend
|
||||
haveBackend bool
|
||||
}
|
||||
|
||||
// SetBackend changes the backend of the logger.
|
||||
func (l *Logger) SetBackend(backend LeveledBackend) {
|
||||
l.backend = backend
|
||||
l.haveBackend = true
|
||||
}
|
||||
|
||||
// TODO call NewLogger and remove MustGetLogger?
|
||||
// GetLogger creates and returns a Logger object based on the module name.
|
||||
// TODO call NewLogger and remove MustGetLogger?
|
||||
func GetLogger(module string) (*Logger, error) {
|
||||
return &Logger{Module: module}, nil
|
||||
}
|
||||
@ -148,6 +161,11 @@ func (l *Logger) log(lvl Level, format string, args ...interface{}) {
|
||||
|
||||
// calldepth=2 brings the stack up to the caller of the level
|
||||
// methods, Info(), Fatal(), etc.
|
||||
if l.haveBackend {
|
||||
l.backend.Log(lvl, 2, record)
|
||||
return
|
||||
}
|
||||
|
||||
defaultBackend.Log(lvl, 2, record)
|
||||
}
|
||||
|
||||
@ -178,33 +196,69 @@ func (l *Logger) Panicf(format string, args ...interface{}) {
|
||||
panic(s)
|
||||
}
|
||||
|
||||
// Critical logs a message using CRITICAL as log level.
|
||||
func (l *Logger) Critical(format string, args ...interface{}) {
|
||||
// Critical logs a message using CRITICAL as log level. (fmt.Sprint())
|
||||
func (l *Logger) Critical(args ...interface{}) {
|
||||
s := fmt.Sprint(args...)
|
||||
l.log(CRITICAL, "%s", s)
|
||||
}
|
||||
|
||||
// Criticalf logs a message using CRITICAL as log level.
|
||||
func (l *Logger) Criticalf(format string, args ...interface{}) {
|
||||
l.log(CRITICAL, format, args...)
|
||||
}
|
||||
|
||||
// Error logs a message using ERROR as log level.
|
||||
func (l *Logger) Error(format string, args ...interface{}) {
|
||||
// Error logs a message using ERROR as log level. (fmt.Sprint())
|
||||
func (l *Logger) Error(args ...interface{}) {
|
||||
s := fmt.Sprint(args...)
|
||||
l.log(ERROR, "%s", s)
|
||||
}
|
||||
|
||||
// Errorf logs a message using ERROR as log level.
|
||||
func (l *Logger) Errorf(format string, args ...interface{}) {
|
||||
l.log(ERROR, format, args...)
|
||||
}
|
||||
|
||||
// Warning logs a message using WARNING as log level.
|
||||
func (l *Logger) Warning(format string, args ...interface{}) {
|
||||
func (l *Logger) Warning(args ...interface{}) {
|
||||
s := fmt.Sprint(args...)
|
||||
l.log(WARNING, "%s", s)
|
||||
}
|
||||
|
||||
// Warningf logs a message using WARNING as log level.
|
||||
func (l *Logger) Warningf(format string, args ...interface{}) {
|
||||
l.log(WARNING, format, args...)
|
||||
}
|
||||
|
||||
// Notice logs a message using NOTICE as log level.
|
||||
func (l *Logger) Notice(format string, args ...interface{}) {
|
||||
func (l *Logger) Notice(args ...interface{}) {
|
||||
s := fmt.Sprint(args...)
|
||||
l.log(NOTICE, "%s", s)
|
||||
}
|
||||
|
||||
// Noticef logs a message using NOTICE as log level.
|
||||
func (l *Logger) Noticef(format string, args ...interface{}) {
|
||||
l.log(NOTICE, format, args...)
|
||||
}
|
||||
|
||||
// Info logs a message using INFO as log level.
|
||||
func (l *Logger) Info(format string, args ...interface{}) {
|
||||
func (l *Logger) Info(args ...interface{}) {
|
||||
s := fmt.Sprint(args...)
|
||||
l.log(INFO, "%s", s)
|
||||
}
|
||||
|
||||
// Infof logs a message using INFO as log level.
|
||||
func (l *Logger) Infof(format string, args ...interface{}) {
|
||||
l.log(INFO, format, args...)
|
||||
}
|
||||
|
||||
// Debug logs a message using DEBUG as log level.
|
||||
func (l *Logger) Debug(format string, args ...interface{}) {
|
||||
func (l *Logger) Debug(args ...interface{}) {
|
||||
s := fmt.Sprint(args...)
|
||||
l.log(DEBUG, "%s", s)
|
||||
}
|
||||
|
||||
// Debugf logs a message using DEBUG as log level.
|
||||
func (l *Logger) Debugf(format string, args ...interface{}) {
|
||||
l.log(DEBUG, format, args...)
|
||||
}
|
||||
|
@ -29,8 +29,25 @@ func TestRedact(t *testing.T) {
|
||||
backend := InitForTesting(DEBUG)
|
||||
password := Password("123456")
|
||||
log := MustGetLogger("test")
|
||||
log.Debug("foo %s", password)
|
||||
log.Debugf("foo %s", password)
|
||||
if "foo ******" != MemoryRecordN(backend, 0).Formatted(0) {
|
||||
t.Errorf("redacted line: %v", MemoryRecordN(backend, 0))
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrivateBackend(t *testing.T) {
|
||||
stdBackend := InitForTesting(DEBUG)
|
||||
log := MustGetLogger("test")
|
||||
privateBackend := NewMemoryBackend(10240)
|
||||
lvlBackend := AddModuleLevel(privateBackend)
|
||||
lvlBackend.SetLevel(DEBUG, "")
|
||||
log.SetBackend(lvlBackend)
|
||||
log.Debug("to private backend")
|
||||
if stdBackend.size > 0 {
|
||||
t.Errorf("something in stdBackend, size of backend: %d", stdBackend.size)
|
||||
}
|
||||
if "to private baсkend" == MemoryRecordN(privateBackend, 0).Formatted(0) {
|
||||
t.Errorf("logged to defaultBackend: %s", MemoryRecordN(privateBackend, 0))
|
||||
}
|
||||
|
||||
}
|
@ -51,7 +51,7 @@ func TestMemoryBackend(t *testing.T) {
|
||||
|
||||
// Run 13 times, the resulting vector should be [5..12]
|
||||
for i := 0; i < 13; i++ {
|
||||
log.Info("%d", i)
|
||||
log.Infof("%d", i)
|
||||
}
|
||||
|
||||
if 8 != backend.size {
|
||||
@ -89,7 +89,7 @@ func TestChannelMemoryBackend(t *testing.T) {
|
||||
|
||||
// Run 13 times, the resulting vector should be [5..12]
|
||||
for i := 0; i < 13; i++ {
|
||||
log.Info("%d", i)
|
||||
log.Infof("%d", i)
|
||||
}
|
||||
backend.Flush()
|
||||
|
@ -11,6 +11,7 @@ type datastoreBlockSet struct {
|
||||
bset BlockSet
|
||||
}
|
||||
|
||||
// NewDBWrapperSet returns a new blockset wrapping a given datastore
|
||||
func NewDBWrapperSet(d ds.Datastore, bset BlockSet) BlockSet {
|
||||
return &datastoreBlockSet{
|
||||
dstore: d,
|
||||
@ -21,7 +22,7 @@ func NewDBWrapperSet(d ds.Datastore, bset BlockSet) BlockSet {
|
||||
func (d *datastoreBlockSet) AddBlock(k util.Key) {
|
||||
err := d.dstore.Put(k.DsKey(), []byte{})
|
||||
if err != nil {
|
||||
log.Error("blockset put error: %s", err)
|
||||
log.Errorf("blockset put error: %s", err)
|
||||
}
|
||||
|
||||
d.bset.AddBlock(k)
|
||||
|
@ -26,10 +26,10 @@ func TestOptionParsing(t *testing.T) {
|
||||
t.Error("Should have passed")
|
||||
}
|
||||
if len(opts) != 4 || opts["beep"] != "" || opts["boop"] != "lol" || opts["c"] != "" || opts["foo"] != "5" {
|
||||
t.Error("Returned options were defferent than expected: %v", opts)
|
||||
t.Errorf("Returned options were defferent than expected: %v", opts)
|
||||
}
|
||||
if len(input) != 2 || input[0] != "test2" || input[1] != "beep" {
|
||||
t.Error("Returned input was different than expected: %v", input)
|
||||
t.Errorf("Returned input was different than expected: %v", input)
|
||||
}
|
||||
|
||||
_, _, err = parseOptions([]string{"-beep=1", "-boop=2", "-beep=3"})
|
||||
@ -39,9 +39,9 @@ func TestOptionParsing(t *testing.T) {
|
||||
|
||||
path, args := parsePath([]string{"test", "beep", "boop"}, cmd)
|
||||
if len(path) != 1 || path[0] != "test" {
|
||||
t.Error("Returned path was defferent than expected: %v", path)
|
||||
t.Errorf("Returned path was defferent than expected: %v", path)
|
||||
}
|
||||
if len(args) != 2 || args[0] != "beep" || args[1] != "boop" {
|
||||
t.Error("Returned args were different than expected: %v", args)
|
||||
t.Errorf("Returned args were different than expected: %v", args)
|
||||
}
|
||||
}
|
||||
|
@ -73,8 +73,8 @@ func TestOptionValidation(t *testing.T) {
|
||||
req = NewEmptyRequest()
|
||||
req.SetOption("b", ":)")
|
||||
res = cmd.Call(req)
|
||||
if res.Error == nil {
|
||||
t.Error(res.Error, "Should have failed (string value not convertible to int)")
|
||||
if res.Error() == nil {
|
||||
t.Error(res.Error(), "Should have failed (string value not convertible to int)")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -43,7 +43,7 @@ func printRefs(n *core.IpfsNode, nd *mdag.Node, refSeen map[u.Key]bool, recursiv
|
||||
if recursive {
|
||||
nd, err := n.DAG.Get(u.Key(link.Hash))
|
||||
if err != nil {
|
||||
log.Error("error: cannot retrieve %s (%s)\n", link.Hash.B58String(), err)
|
||||
log.Errorf("error: cannot retrieve %s (%s)", link.Hash.B58String(), err)
|
||||
return
|
||||
}
|
||||
|
||||
|
10
core/core.go
10
core/core.go
@ -6,8 +6,8 @@ import (
|
||||
"fmt"
|
||||
|
||||
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
|
||||
ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore"
|
||||
b58 "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-base58"
|
||||
ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore"
|
||||
ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr"
|
||||
|
||||
bserv "github.com/jbenet/go-ipfs/blockservice"
|
||||
@ -230,25 +230,25 @@ func initIdentity(cfg *config.Config, peers peer.Peerstore, online bool) (peer.P
|
||||
func initConnections(ctx context.Context, cfg *config.Config, pstore peer.Peerstore, route *dht.IpfsDHT) {
|
||||
for _, p := range cfg.Bootstrap {
|
||||
if p.PeerID == "" {
|
||||
log.Error("error: peer does not include PeerID. %v", p)
|
||||
log.Errorf("error: peer does not include PeerID. %v", p)
|
||||
}
|
||||
|
||||
maddr, err := ma.NewMultiaddr(p.Address)
|
||||
if err != nil {
|
||||
log.Error("%s", err)
|
||||
log.Error(err)
|
||||
continue
|
||||
}
|
||||
|
||||
// setup peer
|
||||
npeer, err := pstore.Get(peer.DecodePrettyID(p.PeerID))
|
||||
if err != nil {
|
||||
log.Error("Bootstrapping error: %v", err)
|
||||
log.Errorf("Bootstrapping error: %v", err)
|
||||
continue
|
||||
}
|
||||
npeer.AddAddress(maddr)
|
||||
|
||||
if _, err = route.Connect(ctx, npeer); err != nil {
|
||||
log.Error("Bootstrapping error: %v", err)
|
||||
log.Errorf("Bootstrapping error: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -53,13 +53,13 @@ func NewDaemonListener(ipfsnode *core.IpfsNode, addr ma.Multiaddr, confdir strin
|
||||
|
||||
ofi, err := os.Create(confdir + "/rpcaddress")
|
||||
if err != nil {
|
||||
log.Warning("Could not create rpcaddress file: %s", err)
|
||||
log.Warningf("Could not create rpcaddress file: %s", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = ofi.Write([]byte(addr.String()))
|
||||
if err != nil {
|
||||
log.Warning("Could not write to rpcaddress file: %s", err)
|
||||
log.Warningf("Could not write to rpcaddress file: %s", err)
|
||||
return nil, err
|
||||
}
|
||||
ofi.Close()
|
||||
@ -148,7 +148,7 @@ func (dl *DaemonListener) handleConnection(conn manet.Conn) {
|
||||
err = fmt.Errorf("Invalid Command: '%s'", command.Command)
|
||||
}
|
||||
if err != nil {
|
||||
log.Error("%s: %s", command.Command, err)
|
||||
log.Errorf("%s: %s", command.Command, err)
|
||||
fmt.Fprintln(conn, err)
|
||||
}
|
||||
}
|
||||
|
@ -72,7 +72,7 @@ func serverIsRunning(confdir string) bool {
|
||||
var err error
|
||||
confdir, err = u.TildeExpansion(confdir)
|
||||
if err != nil {
|
||||
log.Error("Tilde Expansion Failed: %s", err)
|
||||
log.Errorf("Tilde Expansion Failed: %s", err)
|
||||
return false
|
||||
}
|
||||
lk, err := daemonLock(confdir)
|
||||
|
@ -105,7 +105,7 @@ func (d *Diagnostics) GetDiagnostic(timeout time.Duration) ([]*DiagInfo, error)
|
||||
log.Debug("Begin Diagnostic")
|
||||
|
||||
peers := d.getPeers()
|
||||
log.Debug("Sending diagnostic request to %d peers.", len(peers))
|
||||
log.Debugf("Sending diagnostic request to %d peers.", len(peers))
|
||||
|
||||
var out []*DiagInfo
|
||||
di := d.getDiagInfo()
|
||||
@ -116,12 +116,12 @@ func (d *Diagnostics) GetDiagnostic(timeout time.Duration) ([]*DiagInfo, error)
|
||||
respdata := make(chan []byte)
|
||||
sends := 0
|
||||
for _, p := range peers {
|
||||
log.Debug("Sending getDiagnostic to: %s", p)
|
||||
log.Debugf("Sending getDiagnostic to: %s", p)
|
||||
sends++
|
||||
go func(p peer.Peer) {
|
||||
data, err := d.getDiagnosticFromPeer(ctx, p, pmes)
|
||||
if err != nil {
|
||||
log.Error("GetDiagnostic error: %v", err)
|
||||
log.Errorf("GetDiagnostic error: %v", err)
|
||||
respdata <- nil
|
||||
return
|
||||
}
|
||||
@ -147,7 +147,7 @@ func AppendDiagnostics(data []byte, cur []*DiagInfo) []*DiagInfo {
|
||||
err := dec.Decode(di)
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
log.Error("error decoding DiagInfo: %v", err)
|
||||
log.Errorf("error decoding DiagInfo: %v", err)
|
||||
}
|
||||
break
|
||||
}
|
||||
@ -189,7 +189,7 @@ func (d *Diagnostics) sendRequest(ctx context.Context, p peer.Peer, pmes *pb.Mes
|
||||
}
|
||||
|
||||
rtt := time.Since(start)
|
||||
log.Info("diagnostic request took: %s", rtt.String())
|
||||
log.Infof("diagnostic request took: %s", rtt.String())
|
||||
|
||||
rpmes := new(pb.Message)
|
||||
if err := proto.Unmarshal(rmes.Data(), rpmes); err != nil {
|
||||
@ -200,7 +200,7 @@ func (d *Diagnostics) sendRequest(ctx context.Context, p peer.Peer, pmes *pb.Mes
|
||||
}
|
||||
|
||||
func (d *Diagnostics) handleDiagnostic(p peer.Peer, pmes *pb.Message) (*pb.Message, error) {
|
||||
log.Debug("HandleDiagnostic from %s for id = %s", p, pmes.GetDiagID())
|
||||
log.Debugf("HandleDiagnostic from %s for id = %s", p, pmes.GetDiagID())
|
||||
resp := newMessage(pmes.GetDiagID())
|
||||
d.diagLock.Lock()
|
||||
_, found := d.diagMap[pmes.GetDiagID()]
|
||||
@ -220,12 +220,12 @@ func (d *Diagnostics) handleDiagnostic(p peer.Peer, pmes *pb.Message) (*pb.Messa
|
||||
respdata := make(chan []byte)
|
||||
sendcount := 0
|
||||
for _, p := range d.getPeers() {
|
||||
log.Debug("Sending diagnostic request to peer: %s", p)
|
||||
log.Debugf("Sending diagnostic request to peer: %s", p)
|
||||
sendcount++
|
||||
go func(p peer.Peer) {
|
||||
out, err := d.getDiagnosticFromPeer(ctx, p, pmes)
|
||||
if err != nil {
|
||||
log.Error("getDiagnostic error: %v", err)
|
||||
log.Errorf("getDiagnostic error: %v", err)
|
||||
respdata <- nil
|
||||
return
|
||||
}
|
||||
@ -237,7 +237,7 @@ func (d *Diagnostics) handleDiagnostic(p peer.Peer, pmes *pb.Message) (*pb.Messa
|
||||
out := <-respdata
|
||||
_, err := buf.Write(out)
|
||||
if err != nil {
|
||||
log.Error("getDiagnostic write output error: %v", err)
|
||||
log.Errorf("getDiagnostic write output error: %v", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
@ -263,18 +263,18 @@ func (d *Diagnostics) HandleMessage(ctx context.Context, mes msg.NetMessage) msg
|
||||
pmes := new(pb.Message)
|
||||
err := proto.Unmarshal(mData, pmes)
|
||||
if err != nil {
|
||||
log.Error("Failed to decode protobuf message: %v", err)
|
||||
log.Errorf("Failed to decode protobuf message: %v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Print out diagnostic
|
||||
log.Info("[peer: %s] Got message from [%s]\n",
|
||||
log.Infof("[peer: %s] Got message from [%s]\n",
|
||||
d.self.ID().Pretty(), mPeer.ID().Pretty())
|
||||
|
||||
// dispatch handler.
|
||||
rpmes, err := d.handleDiagnostic(mPeer, pmes)
|
||||
if err != nil {
|
||||
log.Error("handleDiagnostic error: %s", err)
|
||||
log.Errorf("handleDiagnostic error: %s", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -286,7 +286,7 @@ func (d *Diagnostics) HandleMessage(ctx context.Context, mes msg.NetMessage) msg
|
||||
// serialize response msg
|
||||
rmes, err := msg.FromObject(mPeer, rpmes)
|
||||
if err != nil {
|
||||
log.Error("Failed to encode protobuf message: %v", err)
|
||||
log.Errorf("Failed to encode protobuf message: %v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -88,13 +88,13 @@ func (bs *bitswap) Block(parent context.Context, k u.Key) (*blocks.Block, error)
|
||||
log.Debug("bitswap dialing peer: %s", p)
|
||||
err := bs.sender.DialPeer(p)
|
||||
if err != nil {
|
||||
log.Error("Error sender.DialPeer(%s)", p)
|
||||
log.Errorf("Error sender.DialPeer(%s)", p)
|
||||
return
|
||||
}
|
||||
|
||||
response, err := bs.sender.SendRequest(ctx, p, message)
|
||||
if err != nil {
|
||||
log.Error("Error sender.SendRequest(%s)", p)
|
||||
log.Errorf("Error sender.SendRequest(%s)", p)
|
||||
return
|
||||
}
|
||||
// FIXME ensure accounting is handled correctly when
|
||||
|
@ -254,7 +254,7 @@ func TestFastRepublish(t *testing.T) {
|
||||
hasPublished := func() bool {
|
||||
res, err := node.Namesys.Resolve(pubkeyHash)
|
||||
if err != nil {
|
||||
t.Fatal("resolve err: %v", err)
|
||||
t.Fatalf("resolve err: %v", err)
|
||||
}
|
||||
return res != resolvedHash
|
||||
}
|
||||
@ -264,7 +264,7 @@ func TestFastRepublish(t *testing.T) {
|
||||
// at this point, should not have written dataA and not have written dataB
|
||||
rbuf, err := ioutil.ReadFile(fname)
|
||||
if err != nil || !bytes.Equal(rbuf, dataA) {
|
||||
t.Fatal("Data inconsistent! %v %v", err, string(rbuf))
|
||||
t.Fatalf("Data inconsistent! %v %v", err, string(rbuf))
|
||||
}
|
||||
|
||||
if hasPublished() {
|
||||
@ -276,7 +276,7 @@ func TestFastRepublish(t *testing.T) {
|
||||
// at this point, should have written written dataB, but not published it
|
||||
rbuf, err = ioutil.ReadFile(fname)
|
||||
if err != nil || !bytes.Equal(rbuf, dataB) {
|
||||
t.Fatal("Data inconsistent! %v %v", err, string(rbuf))
|
||||
t.Fatalf("Data inconsistent! %v %v", err, string(rbuf))
|
||||
}
|
||||
|
||||
if hasPublished() {
|
||||
@ -288,7 +288,7 @@ func TestFastRepublish(t *testing.T) {
|
||||
// at this point, should have written written dataB, and published it
|
||||
rbuf, err = ioutil.ReadFile(fname)
|
||||
if err != nil || !bytes.Equal(rbuf, dataB) {
|
||||
t.Fatal("Data inconsistent! %v %v", err, string(rbuf))
|
||||
t.Fatalf("Data inconsistent! %v %v", err, string(rbuf))
|
||||
}
|
||||
|
||||
if !hasPublished() {
|
||||
|
@ -62,7 +62,7 @@ func CreateRoot(n *core.IpfsNode, keys []ci.PrivKey, ipfsroot string) (*Root, er
|
||||
pub := k.GetPublic()
|
||||
hash, err := pub.Hash()
|
||||
if err != nil {
|
||||
log.Error("Read Root Error: %s", err)
|
||||
log.Errorf("Read Root Error: %s", err)
|
||||
return nil, err
|
||||
}
|
||||
root.LocalLink = &Link{u.Key(hash).Pretty()}
|
||||
@ -91,7 +91,7 @@ func CreateRoot(n *core.IpfsNode, keys []ci.PrivKey, ipfsroot string) (*Root, er
|
||||
}
|
||||
|
||||
if !u.IsValidHash(pointsTo) {
|
||||
log.Critical("Got back bad data from namesys resolve! [%s]", pointsTo)
|
||||
log.Criticalf("Got back bad data from namesys resolve! [%s]", pointsTo)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@ -132,7 +132,7 @@ func (*Root) Attr() fuse.Attr {
|
||||
|
||||
// Lookup performs a lookup under this node.
|
||||
func (s *Root) Lookup(name string, intr fs.Intr) (fs.Node, fuse.Error) {
|
||||
log.Debug("ipns: Root Lookup: '%s'", name)
|
||||
log.Debugf("ipns: Root Lookup: '%s'", name)
|
||||
switch name {
|
||||
case "mach_kernel", ".hidden", "._.":
|
||||
// Just quiet some log noise on OS X.
|
||||
@ -151,10 +151,10 @@ func (s *Root) Lookup(name string, intr fs.Intr) (fs.Node, fuse.Error) {
|
||||
return nd, nil
|
||||
}
|
||||
|
||||
log.Debug("ipns: Falling back to resolution for [%s].", name)
|
||||
log.Debugf("ipns: Falling back to resolution for [%s].", name)
|
||||
resolved, err := s.Ipfs.Namesys.Resolve(name)
|
||||
if err != nil {
|
||||
log.Warning("ipns: namesys resolve error: %s", err)
|
||||
log.Warningf("ipns: namesys resolve error: %s", err)
|
||||
return nil, fuse.ENOENT
|
||||
}
|
||||
|
||||
@ -174,7 +174,7 @@ func (r *Root) ReadDir(intr fs.Intr) ([]fuse.Dirent, fuse.Error) {
|
||||
pub := k.GetPublic()
|
||||
hash, err := pub.Hash()
|
||||
if err != nil {
|
||||
log.Error("Read Root Error: %s", err)
|
||||
log.Errorf("Read Root Error: %s", err)
|
||||
continue
|
||||
}
|
||||
ent := fuse.Dirent{
|
||||
@ -220,7 +220,7 @@ func (s *Node) Attr() fuse.Attr {
|
||||
if s.cached == nil {
|
||||
err := s.loadData()
|
||||
if err != nil {
|
||||
log.Error("Error loading PBData for file: '%s'", s.name)
|
||||
log.Errorf("Error loading PBData for file: '%s'", s.name)
|
||||
}
|
||||
}
|
||||
switch s.cached.GetType() {
|
||||
@ -229,7 +229,7 @@ func (s *Node) Attr() fuse.Attr {
|
||||
case ftpb.Data_File, ftpb.Data_Raw:
|
||||
size, err := ft.DataSize(s.Nd.Data)
|
||||
if err != nil {
|
||||
log.Error("Error getting size of file: %s", err)
|
||||
log.Errorf("Error getting size of file: %s", err)
|
||||
size = 0
|
||||
}
|
||||
return fuse.Attr{
|
||||
@ -245,7 +245,7 @@ func (s *Node) Attr() fuse.Attr {
|
||||
|
||||
// Lookup performs a lookup under this node.
|
||||
func (s *Node) Lookup(name string, intr fs.Intr) (fs.Node, fuse.Error) {
|
||||
log.Debug("ipns: node[%s] Lookup '%s'", s.name, name)
|
||||
log.Debugf("ipns: node[%s] Lookup '%s'", s.name, name)
|
||||
nd, err := s.Ipfs.Resolver.ResolveLinks(s.Nd, []string{name})
|
||||
if err != nil {
|
||||
// todo: make this error more versatile.
|
||||
@ -294,7 +294,7 @@ func (s *Node) ReadDir(intr fs.Intr) ([]fuse.Dirent, fuse.Error) {
|
||||
|
||||
// ReadAll reads the object data as file data
|
||||
func (s *Node) ReadAll(intr fs.Intr) ([]byte, fuse.Error) {
|
||||
log.Debug("ipns: ReadAll [%s]", s.name)
|
||||
log.Debugf("ipns: ReadAll [%s]", s.name)
|
||||
r, err := uio.NewDagReader(s.Nd, s.Ipfs.DAG)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -303,20 +303,20 @@ func (s *Node) ReadAll(intr fs.Intr) ([]byte, fuse.Error) {
|
||||
// what if i have a 6TB file? GG RAM.
|
||||
b, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
log.Error("[%s] Readall error: %s", s.name, err)
|
||||
log.Errorf("[%s] Readall error: %s", s.name, err)
|
||||
return nil, err
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (n *Node) Write(req *fuse.WriteRequest, resp *fuse.WriteResponse, intr fs.Intr) fuse.Error {
|
||||
log.Debug("ipns: Node Write [%s]: flags = %s, offset = %d, size = %d", n.name, req.Flags.String(), req.Offset, len(req.Data))
|
||||
log.Debugf("ipns: Node Write [%s]: flags = %s, offset = %d, size = %d", n.name, req.Flags.String(), req.Offset, len(req.Data))
|
||||
|
||||
if n.dagMod == nil {
|
||||
// Create a DagModifier to allow us to change the existing dag node
|
||||
dmod, err := uio.NewDagModifier(n.Nd, n.Ipfs.DAG, chunk.DefaultSplitter)
|
||||
if err != nil {
|
||||
log.Error("Error creating dag modifier: %s", err)
|
||||
log.Errorf("Error creating dag modifier: %s", err)
|
||||
return err
|
||||
}
|
||||
n.dagMod = dmod
|
||||
@ -330,13 +330,13 @@ func (n *Node) Write(req *fuse.WriteRequest, resp *fuse.WriteResponse, intr fs.I
|
||||
}
|
||||
|
||||
func (n *Node) Flush(req *fuse.FlushRequest, intr fs.Intr) fuse.Error {
|
||||
log.Debug("Got flush request [%s]!", n.name)
|
||||
log.Debugf("Got flush request [%s]!", n.name)
|
||||
|
||||
// If a write has happened
|
||||
if n.dagMod != nil {
|
||||
newNode, err := n.dagMod.GetNode()
|
||||
if err != nil {
|
||||
log.Error("Error getting dag node from dagMod: %s", err)
|
||||
log.Errorf("Error getting dag node from dagMod: %s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -344,7 +344,7 @@ func (n *Node) Flush(req *fuse.FlushRequest, intr fs.Intr) fuse.Error {
|
||||
log.Debug("updating self in parent!")
|
||||
err := n.parent.update(n.name, newNode)
|
||||
if err != nil {
|
||||
log.Critical("error in updating ipns dag tree: %s", err)
|
||||
log.Criticalf("error in updating ipns dag tree: %s", err)
|
||||
// return fuse.ETHISISPRETTYBAD
|
||||
return err
|
||||
}
|
||||
@ -397,20 +397,20 @@ func (n *Node) republishRoot() error {
|
||||
// Add any nodes that may be new to the DAG service
|
||||
err := n.Ipfs.DAG.AddRecursive(root.Nd)
|
||||
if err != nil {
|
||||
log.Critical("ipns: Dag Add Error: %s", err)
|
||||
log.Criticalf("ipns: Dag Add Error: %s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
ndkey, err := root.Nd.Key()
|
||||
if err != nil {
|
||||
log.Error("getKey error: %s", err)
|
||||
log.Errorf("getKey error: %s", err)
|
||||
return err
|
||||
}
|
||||
log.Debug("Publishing changes!")
|
||||
|
||||
err = n.Ipfs.Namesys.Publish(root.key, ndkey.Pretty())
|
||||
if err != nil {
|
||||
log.Error("ipns: Publish Failed: %s", err)
|
||||
log.Errorf("ipns: Publish Failed: %s", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@ -442,7 +442,7 @@ func (n *Node) Mkdir(req *fuse.MkdirRequest, intr fs.Intr) (fs.Node, fuse.Error)
|
||||
if n.parent != nil {
|
||||
err := n.parent.update(n.name, nnode)
|
||||
if err != nil {
|
||||
log.Critical("Error updating node: %s", err)
|
||||
log.Criticalf("Error updating node: %s", err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
@ -472,7 +472,7 @@ func (n *Node) Mknod(req *fuse.MknodRequest, intr fs.Intr) (fs.Node, fuse.Error)
|
||||
}
|
||||
|
||||
func (n *Node) Create(req *fuse.CreateRequest, resp *fuse.CreateResponse, intr fs.Intr) (fs.Node, fs.Handle, fuse.Error) {
|
||||
log.Debug("Got create request: %s", req.Name)
|
||||
log.Debugf("Got create request: %s", req.Name)
|
||||
|
||||
// New 'empty' file
|
||||
nd := &mdag.Node{Data: ft.FilePBData(nil, 0)}
|
||||
@ -482,13 +482,13 @@ func (n *Node) Create(req *fuse.CreateRequest, resp *fuse.CreateResponse, intr f
|
||||
|
||||
err := nnode.AddNodeLink(req.Name, nd)
|
||||
if err != nil {
|
||||
log.Error("Error adding child to node: %s", err)
|
||||
log.Errorf("Error adding child to node: %s", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
if n.parent != nil {
|
||||
err := n.parent.update(n.name, nnode)
|
||||
if err != nil {
|
||||
log.Critical("Error updating node: %s", err)
|
||||
log.Criticalf("Error updating node: %s", err)
|
||||
// Can we panic, please?
|
||||
return nil, nil, err
|
||||
}
|
||||
@ -500,7 +500,7 @@ func (n *Node) Create(req *fuse.CreateRequest, resp *fuse.CreateResponse, intr f
|
||||
}
|
||||
|
||||
func (n *Node) Remove(req *fuse.RemoveRequest, intr fs.Intr) fuse.Error {
|
||||
log.Debug("[%s] Got Remove request: %s", n.name, req.Name)
|
||||
log.Debugf("[%s] Got Remove request: %s", n.name, req.Name)
|
||||
nnode := n.Nd.Copy()
|
||||
err := nnode.RemoveNodeLink(req.Name)
|
||||
if err != nil {
|
||||
@ -511,7 +511,7 @@ func (n *Node) Remove(req *fuse.RemoveRequest, intr fs.Intr) fuse.Error {
|
||||
if n.parent != nil {
|
||||
err := n.parent.update(n.name, nnode)
|
||||
if err != nil {
|
||||
log.Critical("Error updating node: %s", err)
|
||||
log.Criticalf("Error updating node: %s", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -521,7 +521,7 @@ func (n *Node) Remove(req *fuse.RemoveRequest, intr fs.Intr) fuse.Error {
|
||||
}
|
||||
|
||||
func (n *Node) Rename(req *fuse.RenameRequest, newDir fs.Node, intr fs.Intr) fuse.Error {
|
||||
log.Debug("Got Rename request '%s' -> '%s'", req.OldName, req.NewName)
|
||||
log.Debugf("Got Rename request '%s' -> '%s'", req.OldName, req.NewName)
|
||||
var mdn *mdag.Node
|
||||
for _, l := range n.Nd.Links {
|
||||
if l.Name == req.OldName {
|
||||
@ -538,7 +538,7 @@ func (n *Node) Rename(req *fuse.RenameRequest, newDir fs.Node, intr fs.Intr) fus
|
||||
case *Node:
|
||||
err := newDir.Nd.AddNodeLink(req.NewName, mdn)
|
||||
if err != nil {
|
||||
log.Error("Error adding node to new dir on rename: %s", err)
|
||||
log.Errorf("Error adding node to new dir on rename: %s", err)
|
||||
return err
|
||||
}
|
||||
default:
|
||||
@ -550,7 +550,7 @@ func (n *Node) Rename(req *fuse.RenameRequest, newDir fs.Node, intr fs.Intr) fus
|
||||
|
||||
// Updates the child of this node, specified by name to the given newnode
|
||||
func (n *Node) update(name string, newnode *mdag.Node) error {
|
||||
log.Debug("update '%s' in '%s'", name, n.name)
|
||||
log.Debugf("update '%s' in '%s'", name, n.name)
|
||||
nnode := n.Nd.Copy()
|
||||
err := nnode.RemoveNodeLink(name)
|
||||
if err != nil {
|
||||
|
@ -23,6 +23,7 @@ func Mount(ipfs *core.IpfsNode, fpath string, ipfspath string) error {
|
||||
syscall.SIGTERM, syscall.SIGQUIT)
|
||||
|
||||
go func() {
|
||||
defer ipfs.Network.Close()
|
||||
<-sigc
|
||||
for {
|
||||
err := Unmount(fpath)
|
||||
@ -31,7 +32,6 @@ func Mount(ipfs *core.IpfsNode, fpath string, ipfspath string) error {
|
||||
}
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
}
|
||||
ipfs.Network.Close()
|
||||
}()
|
||||
|
||||
c, err := fuse.Mount(fpath)
|
||||
|
@ -54,7 +54,7 @@ func (*Root) Attr() fuse.Attr {
|
||||
|
||||
// Lookup performs a lookup under this node.
|
||||
func (s *Root) Lookup(name string, intr fs.Intr) (fs.Node, fuse.Error) {
|
||||
log.Debug("Root Lookup: '%s'", name)
|
||||
log.Debugf("Root Lookup: '%s'", name)
|
||||
switch name {
|
||||
case "mach_kernel", ".hidden", "._.":
|
||||
// Just quiet some log noise on OS X.
|
||||
@ -162,6 +162,7 @@ func Mount(ipfs *core.IpfsNode, fpath string) error {
|
||||
syscall.SIGTERM, syscall.SIGQUIT)
|
||||
|
||||
go func() {
|
||||
defer ipfs.Network.Close()
|
||||
<-sigc
|
||||
for {
|
||||
err := Unmount(fpath)
|
||||
@ -170,7 +171,6 @@ func Mount(ipfs *core.IpfsNode, fpath string) error {
|
||||
}
|
||||
time.Sleep(time.Millisecond * 10)
|
||||
}
|
||||
ipfs.Network.Close()
|
||||
}()
|
||||
|
||||
c, err := fuse.Mount(fpath)
|
||||
|
@ -8,7 +8,7 @@ import (
|
||||
|
||||
var log = util.Logger("chunk")
|
||||
|
||||
var DefaultSplitter = &SizeSplitter{1024 * 512}
|
||||
var DefaultSplitter = &SizeSplitter{Size: 1024 * 512}
|
||||
|
||||
type BlockSplitter interface {
|
||||
Split(r io.Reader) chan []byte
|
||||
@ -32,7 +32,7 @@ func (ss *SizeSplitter) Split(r io.Reader) chan []byte {
|
||||
}
|
||||
return
|
||||
}
|
||||
log.Error("Block split error: %s", err)
|
||||
log.Errorf("Block split error: %s", err)
|
||||
return
|
||||
}
|
||||
if nread < ss.Size {
|
||||
|
@ -38,9 +38,9 @@ func TestBuildDag(t *testing.T) {
|
||||
|
||||
//Test where calls to read are smaller than the chunk size
|
||||
func TestSizeBasedSplit(t *testing.T) {
|
||||
bs := &chunk.SizeSplitter{512}
|
||||
bs := &chunk.SizeSplitter{Size: 512}
|
||||
testFileConsistency(t, bs, 32*512)
|
||||
bs = &chunk.SizeSplitter{4096}
|
||||
bs = &chunk.SizeSplitter{Size: 4096}
|
||||
testFileConsistency(t, bs, 32*4096)
|
||||
|
||||
// Uneven offset
|
||||
|
@ -138,8 +138,8 @@ func (c *singleConn) Out() chan<- []byte {
|
||||
|
||||
// ID returns the ID of a given Conn.
|
||||
func ID(c Conn) string {
|
||||
l := fmt.Sprintf("%s/%s", c.LocalMultiaddr(), c.LocalPeer().ID)
|
||||
r := fmt.Sprintf("%s/%s", c.RemoteMultiaddr(), c.RemotePeer().ID)
|
||||
l := fmt.Sprintf("%s/%s", c.LocalMultiaddr(), c.LocalPeer().ID())
|
||||
r := fmt.Sprintf("%s/%s", c.RemoteMultiaddr(), c.RemotePeer().ID())
|
||||
lh := u.Hash([]byte(l))
|
||||
rh := u.Hash([]byte(r))
|
||||
ch := u.XOR(lh, rh)
|
||||
|
@ -25,14 +25,14 @@ func (d *Dialer) Dial(ctx context.Context, network string, remote peer.Peer) (Co
|
||||
|
||||
remote, err := d.Peerstore.Add(remote)
|
||||
if err != nil {
|
||||
log.Error("Error putting peer into peerstore: %s", remote)
|
||||
log.Errorf("Error putting peer into peerstore: %s", remote)
|
||||
}
|
||||
|
||||
// TODO: try to get reusing addr/ports to work.
|
||||
// madialer := manet.Dialer{LocalAddr: laddr}
|
||||
madialer := manet.Dialer{}
|
||||
|
||||
log.Info("%s dialing %s %s", d.LocalPeer, remote, raddr)
|
||||
log.Infof("%s dialing %s %s", d.LocalPeer, remote, raddr)
|
||||
maconn, err := madialer.Dial(raddr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -26,7 +26,7 @@ func Handshake1(ctx context.Context, c Conn) error {
|
||||
}
|
||||
|
||||
c.Out() <- myVerBytes
|
||||
log.Debug("Sent my version (%s) to %s", localH, rpeer)
|
||||
log.Debugf("Sent my version (%s) to %s", localH, rpeer)
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
@ -50,11 +50,11 @@ func Handshake1(ctx context.Context, c Conn) error {
|
||||
}
|
||||
|
||||
if err := handshake.Handshake1Compatible(localH, remoteH); err != nil {
|
||||
log.Info("%s (%s) incompatible version with %s (%s)", lpeer, localH, rpeer, remoteH)
|
||||
log.Infof("%s (%s) incompatible version with %s (%s)", lpeer, localH, rpeer, remoteH)
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debug("%s version handshake compatible %s", lpeer, rpeer)
|
||||
log.Debugf("%s version handshake compatible %s", lpeer, rpeer)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -71,7 +71,7 @@ func Handshake3(ctx context.Context, c Conn) error {
|
||||
}
|
||||
|
||||
c.Out() <- localB
|
||||
log.Debug("Handshake1: sent to %s", rpeer)
|
||||
log.Debugf("Handshake1: sent to %s", rpeer)
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
@ -91,11 +91,11 @@ func Handshake3(ctx context.Context, c Conn) error {
|
||||
return fmt.Errorf("Handshake3 could not decode remote msg: %q", err)
|
||||
}
|
||||
|
||||
log.Debug("Handshake3 received from %s", rpeer)
|
||||
log.Debugf("Handshake3 received from %s", rpeer)
|
||||
}
|
||||
|
||||
if err := handshake.Handshake3UpdatePeer(rpeer, remoteH); err != nil {
|
||||
log.Error("Handshake3 failed to update %s", rpeer)
|
||||
log.Errorf("Handshake3 failed to update %s", rpeer)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -60,13 +60,13 @@ func (l *listener) listen() {
|
||||
|
||||
c, err := newSingleConn(l.ctx, l.local, nil, maconn)
|
||||
if err != nil {
|
||||
log.Error("Error accepting connection: %v", err)
|
||||
log.Errorf("Error accepting connection: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
sc, err := newSecureConn(l.ctx, c, l.peers)
|
||||
if err != nil {
|
||||
log.Error("Error securing connection: %v", err)
|
||||
log.Errorf("Error securing connection: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
@ -84,7 +84,7 @@ func (l *listener) listen() {
|
||||
default:
|
||||
}
|
||||
|
||||
log.Error("Failed to accept connection: %v", err)
|
||||
log.Errorf("Failed to accept connection: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -69,13 +69,13 @@ func (c *MultiConn) Add(conns ...Conn) {
|
||||
for _, c2 := range conns {
|
||||
log.Info("MultiConn: adding %s", c2)
|
||||
if c.LocalPeer() != c2.LocalPeer() || c.RemotePeer() != c2.RemotePeer() {
|
||||
log.Error("%s", c2)
|
||||
log.Error(c2)
|
||||
c.Unlock() // ok to unlock (to log). panicing.
|
||||
log.Error("%s", c)
|
||||
log.Error("c.LocalPeer: %s %p", c.LocalPeer(), c.LocalPeer())
|
||||
log.Error("c2.LocalPeer: %s %p", c2.LocalPeer(), c2.LocalPeer())
|
||||
log.Error("c.RemotePeer: %s %p", c.RemotePeer(), c.RemotePeer())
|
||||
log.Error("c2.RemotePeer: %s %p", c2.RemotePeer(), c2.RemotePeer())
|
||||
log.Error(c)
|
||||
// log.Errorf("c.LocalPeer: %s %p", c.LocalPeer(), c.LocalPeer())
|
||||
// log.Errorf("c2.LocalPeer: %s %p", c2.LocalPeer(), c2.LocalPeer())
|
||||
// log.Errorf("c.RemotePeer: %s %p", c.RemotePeer(), c.RemotePeer())
|
||||
// log.Errorf("c2.RemotePeer: %s %p", c2.RemotePeer(), c2.RemotePeer())
|
||||
c.Lock() // gotta relock to avoid lock panic from deferring.
|
||||
panic("connection addresses mismatch")
|
||||
}
|
||||
|
@ -31,13 +31,13 @@ func newSecureConn(ctx context.Context, insecure Conn, peers peer.Peerstore) (Co
|
||||
}
|
||||
conn.ContextCloser = ctxc.NewContextCloser(ctx, conn.close)
|
||||
|
||||
log.Debug("newSecureConn: %v to %v", insecure.LocalPeer(), insecure.RemotePeer())
|
||||
log.Debugf("newSecureConn: %v to %v", insecure.LocalPeer(), insecure.RemotePeer())
|
||||
// perform secure handshake before returning this connection.
|
||||
if err := conn.secureHandshake(peers); err != nil {
|
||||
conn.Close()
|
||||
return nil, err
|
||||
}
|
||||
log.Debug("newSecureConn: %v to %v handshake success!", insecure.LocalPeer(), insecure.RemotePeer())
|
||||
log.Debugf("newSecureConn: %v to %v handshake success!", insecure.LocalPeer(), insecure.RemotePeer())
|
||||
|
||||
return conn, nil
|
||||
}
|
||||
@ -78,9 +78,9 @@ func (c *secureConn) secureHandshake(peers peer.Peerstore) error {
|
||||
// update: this actually might happen under normal operation-- should
|
||||
// perhaps return an error. TBD.
|
||||
|
||||
log.Error("secureConn peer mismatch. %v != %v", insecureSC.remote, c.secure.RemotePeer())
|
||||
log.Error("insecureSC.remote: %s %#v", insecureSC.remote, insecureSC.remote)
|
||||
log.Error("c.secure.LocalPeer: %s %#v", c.secure.RemotePeer(), c.secure.RemotePeer())
|
||||
log.Errorf("secureConn peer mismatch. %v != %v", insecureSC.remote, c.secure.RemotePeer())
|
||||
log.Errorf("insecureSC.remote: %s %#v", insecureSC.remote, insecureSC.remote)
|
||||
log.Errorf("c.secure.LocalPeer: %s %#v", c.secure.RemotePeer(), c.secure.RemotePeer())
|
||||
panic("secureConn peer mismatch. consructed incorrectly?")
|
||||
}
|
||||
|
||||
|
@ -44,7 +44,7 @@ func Handshake3UpdatePeer(remotePeer peer.Peer, msg *pb.Handshake3) error {
|
||||
addr, err := ma.NewMultiaddrBytes(a)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("remote peer address not a multiaddr: %s", err)
|
||||
log.Error("Handshake3: error %s", err)
|
||||
log.Errorf("Handshake3: error %s", err)
|
||||
return err
|
||||
}
|
||||
remotePeer.AddAddress(addr)
|
||||
|
@ -156,21 +156,21 @@ func (m *Muxer) handleIncomingMessage(m1 msg.NetMessage) {
|
||||
|
||||
data, pid, err := unwrapData(m1.Data())
|
||||
if err != nil {
|
||||
log.Error("muxer de-serializing error: %v", err)
|
||||
log.Errorf("muxer de-serializing error: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
m2 := msg.New(m1.Peer(), data)
|
||||
proto, found := m.Protocols[pid]
|
||||
if !found {
|
||||
log.Error("muxer unknown protocol %v", pid)
|
||||
log.Errorf("muxer unknown protocol %v", pid)
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case proto.GetPipe().Incoming <- m2:
|
||||
case <-m.ctx.Done():
|
||||
log.Error("%s", m.ctx.Err())
|
||||
log.Error(m.ctx.Err())
|
||||
return
|
||||
}
|
||||
}
|
||||
@ -198,7 +198,7 @@ func (m *Muxer) handleOutgoingMessages(pid pb.ProtocolID, proto Protocol) {
|
||||
func (m *Muxer) handleOutgoingMessage(pid pb.ProtocolID, m1 msg.NetMessage) {
|
||||
data, err := wrapData(m1.Data(), pid)
|
||||
if err != nil {
|
||||
log.Error("muxer serializing error: %v", err)
|
||||
log.Errorf("muxer serializing error: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -198,14 +198,14 @@ func (s *service) handleIncomingMessage(ctx context.Context, m msg.NetMessage) {
|
||||
// unwrap the incoming message
|
||||
data, rid, err := unwrapData(m.Data())
|
||||
if err != nil {
|
||||
log.Error("de-serializing error: %v", err)
|
||||
log.Errorf("de-serializing error: %v", err)
|
||||
}
|
||||
m2 := msg.New(m.Peer(), data)
|
||||
|
||||
// if it's a request (or has no RequestID), handle it
|
||||
if rid == nil || rid.IsRequest() {
|
||||
if s.Handler == nil {
|
||||
log.Error("service dropped msg: %v", m)
|
||||
log.Errorf("service dropped msg: %v", m)
|
||||
return // no handler, drop it.
|
||||
}
|
||||
|
||||
@ -216,7 +216,7 @@ func (s *service) handleIncomingMessage(ctx context.Context, m msg.NetMessage) {
|
||||
if r1 != nil {
|
||||
err := s.sendMessage(ctx, r1, rid.Response())
|
||||
if err != nil {
|
||||
log.Error("error sending response message: %v", err)
|
||||
log.Errorf("error sending response message: %v", err)
|
||||
}
|
||||
}
|
||||
return
|
||||
@ -224,7 +224,7 @@ func (s *service) handleIncomingMessage(ctx context.Context, m msg.NetMessage) {
|
||||
|
||||
// Otherwise, it is a response. handle it.
|
||||
if !rid.IsResponse() {
|
||||
log.Error("RequestID should identify a response here.")
|
||||
log.Errorf("RequestID should identify a response here.")
|
||||
}
|
||||
|
||||
key := RequestKey(m.Peer().ID(), RequestID(rid))
|
||||
@ -233,7 +233,7 @@ func (s *service) handleIncomingMessage(ctx context.Context, m msg.NetMessage) {
|
||||
s.RequestsLock.RUnlock()
|
||||
|
||||
if !found {
|
||||
log.Error("no request key %v (timeout?)", []byte(key))
|
||||
log.Errorf("no request key %v (timeout?)", []byte(key))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -24,7 +24,7 @@ func (s *Swarm) listen() error {
|
||||
if err != nil {
|
||||
hasErr = true
|
||||
retErr.Errors[i] = err
|
||||
log.Error("Failed to listen on: %s - %s", addr, err)
|
||||
log.Errorf("Failed to listen on: %s - %s", addr, err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -116,7 +116,7 @@ func (s *Swarm) connSetup(c conn.Conn) (conn.Conn, error) {
|
||||
conns := []conn.Conn{c}
|
||||
mc, err := conn.NewMultiConn(s.Context(), s.local, c.RemotePeer(), conns)
|
||||
if err != nil {
|
||||
log.Error("error creating multiconn: %s", err)
|
||||
log.Errorf("error creating multiconn: %s", err)
|
||||
c.Close()
|
||||
return nil, err
|
||||
}
|
||||
@ -126,15 +126,15 @@ func (s *Swarm) connSetup(c conn.Conn) (conn.Conn, error) {
|
||||
|
||||
// kick off reader goroutine
|
||||
go s.fanInSingle(mc)
|
||||
log.Debug("added new multiconn: %s", mc)
|
||||
log.Debugf("added new multiconn: %s", mc)
|
||||
} else {
|
||||
s.connsLock.Unlock() // unlock before adding new conn
|
||||
|
||||
mc.Add(c)
|
||||
log.Debug("multiconn found: %s", mc)
|
||||
log.Debugf("multiconn found: %s", mc)
|
||||
}
|
||||
|
||||
log.Debug("multiconn added new conn %s", c)
|
||||
log.Debugf("multiconn added new conn %s", c)
|
||||
return c, nil
|
||||
}
|
||||
|
||||
@ -151,7 +151,7 @@ func (s *Swarm) fanOut() {
|
||||
|
||||
case msg, ok := <-s.Outgoing:
|
||||
if !ok {
|
||||
log.Info("%s outgoing channel closed", s)
|
||||
log.Infof("%s outgoing channel closed", s)
|
||||
return
|
||||
}
|
||||
|
||||
@ -162,12 +162,12 @@ func (s *Swarm) fanOut() {
|
||||
if !found {
|
||||
e := fmt.Errorf("Sent msg to peer without open conn: %v", msg.Peer())
|
||||
s.errChan <- e
|
||||
log.Error("%s", e)
|
||||
log.Error(e)
|
||||
continue
|
||||
}
|
||||
|
||||
i++
|
||||
log.Debug("%s sent message to %s (%d)", s.local, msg.Peer(), i)
|
||||
log.Debugf("%s sent message to %s (%d)", s.local, msg.Peer(), i)
|
||||
// queue it in the connection's buffer
|
||||
c.Out() <- msg.Data()
|
||||
}
|
||||
@ -202,11 +202,11 @@ func (s *Swarm) fanInSingle(c conn.Conn) {
|
||||
|
||||
case data, ok := <-c.In():
|
||||
if !ok {
|
||||
log.Info("%s in channel closed", c)
|
||||
log.Infof("%s in channel closed", c)
|
||||
return // channel closed.
|
||||
}
|
||||
i++
|
||||
log.Debug("%s received message from %s (%d)", s.local, c.RemotePeer(), i)
|
||||
log.Debugf("%s received message from %s (%d)", s.local, c.RemotePeer(), i)
|
||||
s.Incoming <- msg.New(c.RemotePeer(), data)
|
||||
}
|
||||
}
|
||||
|
@ -238,7 +238,7 @@ func (p *peer) VerifyAndSetPrivKey(sk ic.PrivKey) error {
|
||||
// keys not equal. invariant violated. this warrants a panic.
|
||||
// these keys should be _the same_ because peer.ID = H(pk)
|
||||
// this mismatch should never happen.
|
||||
log.Error("%s had PrivKey: %v -- got %v", p, p.privKey, sk)
|
||||
log.Errorf("%s had PrivKey: %v -- got %v", p, p.privKey, sk)
|
||||
panic("invariant violated: unexpected key mismatch")
|
||||
}
|
||||
|
||||
@ -270,7 +270,7 @@ func (p *peer) VerifyAndSetPubKey(pk ic.PubKey) error {
|
||||
// keys not equal. invariant violated. this warrants a panic.
|
||||
// these keys should be _the same_ because peer.ID = H(pk)
|
||||
// this mismatch should never happen.
|
||||
log.Error("%s had PubKey: %v -- got %v", p, p.pubKey, pk)
|
||||
log.Errorf("%s had PubKey: %v -- got %v", p, p.pubKey, pk)
|
||||
panic("invariant violated: unexpected key mismatch")
|
||||
}
|
||||
|
||||
|
@ -24,7 +24,7 @@ func TestPinnerBasic(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
dserv := &mdag.DAGService{bserv}
|
||||
dserv := &mdag.DAGService{Blocks: bserv}
|
||||
|
||||
p := NewPinner(dstore, dserv)
|
||||
|
||||
|
@ -84,7 +84,7 @@ func NewDHT(ctx context.Context, p peer.Peer, ps peer.Peerstore, dialer inet.Dia
|
||||
|
||||
// Connect to a new peer at the given address, ping and add to the routing table
|
||||
func (dht *IpfsDHT) Connect(ctx context.Context, npeer peer.Peer) (peer.Peer, error) {
|
||||
log.Debug("Connect to new peer: %s", npeer)
|
||||
log.Debugf("Connect to new peer: %s", npeer)
|
||||
|
||||
// TODO(jbenet,whyrusleeping)
|
||||
//
|
||||
@ -139,7 +139,7 @@ func (dht *IpfsDHT) HandleMessage(ctx context.Context, mes msg.NetMessage) msg.N
|
||||
dht.Update(mPeer)
|
||||
|
||||
// Print out diagnostic
|
||||
log.Debug("[peer: %s] Got message type: '%s' [from = %s]\n",
|
||||
log.Debugf("%s got message type: '%s' from %s",
|
||||
dht.self, Message_MessageType_name[int32(pmes.GetType())], mPeer)
|
||||
|
||||
// get handler for this msg type.
|
||||
@ -152,7 +152,7 @@ func (dht *IpfsDHT) HandleMessage(ctx context.Context, mes msg.NetMessage) msg.N
|
||||
// dispatch handler.
|
||||
rpmes, err := handler(mPeer, pmes)
|
||||
if err != nil {
|
||||
log.Error("handle message error: %s", err)
|
||||
log.Errorf("handle message error: %s", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -165,7 +165,7 @@ func (dht *IpfsDHT) HandleMessage(ctx context.Context, mes msg.NetMessage) msg.N
|
||||
// serialize response msg
|
||||
rmes, err := msg.FromObject(mPeer, rpmes)
|
||||
if err != nil {
|
||||
log.Error("serialze response error: %s", err)
|
||||
log.Errorf("serialze response error: %s", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -184,7 +184,7 @@ func (dht *IpfsDHT) sendRequest(ctx context.Context, p peer.Peer, pmes *Message)
|
||||
start := time.Now()
|
||||
|
||||
// Print out diagnostic
|
||||
log.Debug("Sent message type: '%s' [to = %s]",
|
||||
log.Debugf("Sent message type: '%s' to %s",
|
||||
Message_MessageType_name[int32(pmes.GetType())], p)
|
||||
|
||||
rmes, err := dht.sender.SendRequest(ctx, mes)
|
||||
@ -235,7 +235,7 @@ func (dht *IpfsDHT) putProvider(ctx context.Context, p peer.Peer, key string) er
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debug("%s putProvider: %s for %s", dht.self, p, key)
|
||||
log.Debugf("%s putProvider: %s for %s", dht.self, p, key)
|
||||
if rpmes.GetKey() != pmes.GetKey() {
|
||||
return errors.New("provider not added correctly")
|
||||
}
|
||||
@ -251,7 +251,7 @@ func (dht *IpfsDHT) getValueOrPeers(ctx context.Context, p peer.Peer,
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
log.Debug("pmes.GetValue() %v", pmes.GetValue())
|
||||
log.Debugf("pmes.GetValue() %v", pmes.GetValue())
|
||||
if value := pmes.GetValue(); value != nil {
|
||||
// Success! We were given the value
|
||||
log.Debug("getValueOrPeers: got value")
|
||||
@ -273,7 +273,7 @@ func (dht *IpfsDHT) getValueOrPeers(ctx context.Context, p peer.Peer,
|
||||
for _, pb := range pmes.GetCloserPeers() {
|
||||
pr, err := dht.peerFromInfo(pb)
|
||||
if err != nil {
|
||||
log.Error("%s", err)
|
||||
log.Error(err)
|
||||
continue
|
||||
}
|
||||
peers = append(peers, pr)
|
||||
@ -306,13 +306,13 @@ func (dht *IpfsDHT) getFromPeerList(ctx context.Context, key u.Key,
|
||||
for _, pinfo := range peerlist {
|
||||
p, err := dht.ensureConnectedToPeer(pinfo)
|
||||
if err != nil {
|
||||
log.Error("getFromPeers error: %s", err)
|
||||
log.Errorf("getFromPeers error: %s", err)
|
||||
continue
|
||||
}
|
||||
|
||||
pmes, err := dht.getValueSingle(ctx, p, key, level)
|
||||
if err != nil {
|
||||
log.Error("getFromPeers error: %s\n", err)
|
||||
log.Errorf("getFromPeers error: %s\n", err)
|
||||
continue
|
||||
}
|
||||
|
||||
@ -349,7 +349,7 @@ func (dht *IpfsDHT) putLocal(key u.Key, value []byte) error {
|
||||
// Update signals to all routingTables to Update their last-seen status
|
||||
// on the given peer.
|
||||
func (dht *IpfsDHT) Update(p peer.Peer) {
|
||||
log.Debug("updating peer: %s latency = %f\n", p, p.GetLatency().Seconds())
|
||||
log.Debugf("updating peer: %s latency = %f\n", p, p.GetLatency().Seconds())
|
||||
removedCount := 0
|
||||
for _, route := range dht.routingTables {
|
||||
removed := route.Update(p)
|
||||
@ -394,11 +394,11 @@ func (dht *IpfsDHT) addProviders(key u.Key, peers []*Message_Peer) []peer.Peer {
|
||||
for _, prov := range peers {
|
||||
p, err := dht.peerFromInfo(prov)
|
||||
if err != nil {
|
||||
log.Error("error getting peer from info: %v", err)
|
||||
log.Errorf("error getting peer from info: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
log.Debug("%s adding provider: %s for %s", dht.self, p, key)
|
||||
log.Debugf("%s adding provider: %s for %s", dht.self, p, key)
|
||||
|
||||
// Dont add outselves to the list
|
||||
if p.ID().Equal(dht.self.ID()) {
|
||||
@ -456,7 +456,7 @@ func (dht *IpfsDHT) getPeer(id peer.ID) (peer.Peer, error) {
|
||||
p, err := dht.peerstore.Get(id)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("Failed to get peer from peerstore: %s", err)
|
||||
log.Error("%s", err)
|
||||
log.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
return p, nil
|
||||
@ -505,7 +505,7 @@ func (dht *IpfsDHT) loadProvidableKeys() error {
|
||||
for _, dsk := range kl {
|
||||
k := u.KeyFromDsKey(dsk)
|
||||
if len(k) == 0 {
|
||||
log.Error("loadProvidableKeys error: %v", dsk)
|
||||
log.Errorf("loadProvidableKeys error: %v", dsk)
|
||||
}
|
||||
|
||||
dht.providers.AddProvider(k, dht.self)
|
||||
@ -526,7 +526,7 @@ func (dht *IpfsDHT) PingRoutine(t time.Duration) {
|
||||
ctx, _ := context.WithTimeout(dht.ctx, time.Second*5)
|
||||
err := dht.Ping(ctx, p)
|
||||
if err != nil {
|
||||
log.Error("Ping error: %s", err)
|
||||
log.Errorf("Ping error: %s", err)
|
||||
}
|
||||
}
|
||||
case <-dht.ctx.Done():
|
||||
@ -541,6 +541,6 @@ func (dht *IpfsDHT) Bootstrap(ctx context.Context) {
|
||||
rand.Read(id)
|
||||
_, err := dht.FindPeer(ctx, peer.ID(id))
|
||||
if err != nil {
|
||||
log.Error("Bootstrap peer error: %s", err)
|
||||
log.Errorf("Bootstrap peer error: %s", err)
|
||||
}
|
||||
}
|
||||
|
@ -30,14 +30,14 @@ func (l *logDhtRPC) EndLog() {
|
||||
func (l *logDhtRPC) Print() {
|
||||
b, err := json.Marshal(l)
|
||||
if err != nil {
|
||||
log.Debug("Error marshaling logDhtRPC object: %s", err)
|
||||
log.Debugf("Error marshaling logDhtRPC object: %s", err)
|
||||
} else {
|
||||
log.Debug(string(b))
|
||||
}
|
||||
}
|
||||
|
||||
func (l *logDhtRPC) String() string {
|
||||
return fmt.Sprintf("DHT RPC: %s took %s, success = %s", l.Type, l.Duration, l.Success)
|
||||
return fmt.Sprintf("DHT RPC: %s took %s, success = %v", l.Type, l.Duration, l.Success)
|
||||
}
|
||||
|
||||
func (l *logDhtRPC) EndAndPrint() {
|
||||
|
@ -36,7 +36,7 @@ func (dht *IpfsDHT) handlerForMsgType(t Message_MessageType) dhtHandler {
|
||||
}
|
||||
|
||||
func (dht *IpfsDHT) handleGetValue(p peer.Peer, pmes *Message) (*Message, error) {
|
||||
log.Debug("%s handleGetValue for key: %s\n", dht.self, pmes.GetKey())
|
||||
log.Debugf("%s handleGetValue for key: %s\n", dht.self, pmes.GetKey())
|
||||
|
||||
// setup response
|
||||
resp := newMessage(pmes.GetType(), pmes.GetKey(), pmes.GetClusterLevel())
|
||||
@ -48,10 +48,10 @@ func (dht *IpfsDHT) handleGetValue(p peer.Peer, pmes *Message) (*Message, error)
|
||||
}
|
||||
|
||||
// let's first check if we have the value locally.
|
||||
log.Debug("%s handleGetValue looking into ds", dht.self)
|
||||
log.Debugf("%s handleGetValue looking into ds", dht.self)
|
||||
dskey := u.Key(pmes.GetKey()).DsKey()
|
||||
iVal, err := dht.datastore.Get(dskey)
|
||||
log.Debug("%s handleGetValue looking into ds GOT %v", dht.self, iVal)
|
||||
log.Debugf("%s handleGetValue looking into ds GOT %v", dht.self, iVal)
|
||||
|
||||
// if we got an unexpected error, bail.
|
||||
if err != nil && err != ds.ErrNotFound {
|
||||
@ -63,7 +63,7 @@ func (dht *IpfsDHT) handleGetValue(p peer.Peer, pmes *Message) (*Message, error)
|
||||
|
||||
// if we have the value, send it back
|
||||
if err == nil {
|
||||
log.Debug("%s handleGetValue success!", dht.self)
|
||||
log.Debugf("%s handleGetValue success!", dht.self)
|
||||
|
||||
byts, ok := iVal.([]byte)
|
||||
if !ok {
|
||||
@ -76,7 +76,7 @@ func (dht *IpfsDHT) handleGetValue(p peer.Peer, pmes *Message) (*Message, error)
|
||||
// if we know any providers for the requested value, return those.
|
||||
provs := dht.providers.GetProviders(u.Key(pmes.GetKey()))
|
||||
if len(provs) > 0 {
|
||||
log.Debug("handleGetValue returning %d provider[s]\n", len(provs))
|
||||
log.Debugf("handleGetValue returning %d provider[s]", len(provs))
|
||||
resp.ProviderPeers = peersToPBPeers(provs)
|
||||
}
|
||||
|
||||
@ -84,7 +84,7 @@ func (dht *IpfsDHT) handleGetValue(p peer.Peer, pmes *Message) (*Message, error)
|
||||
closer := dht.betterPeersToQuery(pmes, CloserPeerCount)
|
||||
if closer != nil {
|
||||
for _, p := range closer {
|
||||
log.Debug("handleGetValue returning closer peer: '%s'", p)
|
||||
log.Debugf("handleGetValue returning closer peer: '%s'", p)
|
||||
if len(p.Addresses()) < 1 {
|
||||
log.Critical("no addresses on peer being sent!")
|
||||
}
|
||||
@ -101,12 +101,12 @@ func (dht *IpfsDHT) handlePutValue(p peer.Peer, pmes *Message) (*Message, error)
|
||||
defer dht.dslock.Unlock()
|
||||
dskey := u.Key(pmes.GetKey()).DsKey()
|
||||
err := dht.datastore.Put(dskey, pmes.GetValue())
|
||||
log.Debug("%s handlePutValue %v %v\n", dht.self, dskey, pmes.GetValue())
|
||||
log.Debugf("%s handlePutValue %v %v\n", dht.self, dskey, pmes.GetValue())
|
||||
return pmes, err
|
||||
}
|
||||
|
||||
func (dht *IpfsDHT) handlePing(p peer.Peer, pmes *Message) (*Message, error) {
|
||||
log.Debug("%s Responding to ping from %s!\n", dht.self, p)
|
||||
log.Debugf("%s Responding to ping from %s!\n", dht.self, p)
|
||||
return pmes, nil
|
||||
}
|
||||
|
||||
@ -122,7 +122,7 @@ func (dht *IpfsDHT) handleFindPeer(p peer.Peer, pmes *Message) (*Message, error)
|
||||
}
|
||||
|
||||
if closest == nil {
|
||||
log.Error("handleFindPeer: could not find anything.")
|
||||
log.Errorf("handleFindPeer: could not find anything.")
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
@ -134,7 +134,7 @@ func (dht *IpfsDHT) handleFindPeer(p peer.Peer, pmes *Message) (*Message, error)
|
||||
}
|
||||
|
||||
for _, p := range withAddresses {
|
||||
log.Debug("handleFindPeer: sending back '%s'", p)
|
||||
log.Debugf("handleFindPeer: sending back '%s'", p)
|
||||
}
|
||||
resp.CloserPeers = peersToPBPeers(withAddresses)
|
||||
return resp, nil
|
||||
@ -144,11 +144,11 @@ func (dht *IpfsDHT) handleGetProviders(p peer.Peer, pmes *Message) (*Message, er
|
||||
resp := newMessage(pmes.GetType(), pmes.GetKey(), pmes.GetClusterLevel())
|
||||
|
||||
// check if we have this value, to add ourselves as provider.
|
||||
log.Debug("handling GetProviders: '%s'", pmes.GetKey())
|
||||
log.Debugf("handling GetProviders: '%s'", pmes.GetKey())
|
||||
dsk := u.Key(pmes.GetKey()).DsKey()
|
||||
has, err := dht.datastore.Has(dsk)
|
||||
if err != nil && err != ds.ErrNotFound {
|
||||
log.Error("unexpected datastore error: %v\n", err)
|
||||
log.Errorf("unexpected datastore error: %v\n", err)
|
||||
has = false
|
||||
}
|
||||
|
||||
@ -180,7 +180,7 @@ type providerInfo struct {
|
||||
func (dht *IpfsDHT) handleAddProvider(p peer.Peer, pmes *Message) (*Message, error) {
|
||||
key := u.Key(pmes.GetKey())
|
||||
|
||||
log.Debug("%s adding %s as a provider for '%s'\n", dht.self, p, peer.ID(key))
|
||||
log.Debugf("%s adding %s as a provider for '%s'\n", dht.self, p, peer.ID(key))
|
||||
|
||||
// add provider should use the address given in the message
|
||||
for _, pb := range pmes.GetProviderPeers() {
|
||||
@ -189,16 +189,16 @@ func (dht *IpfsDHT) handleAddProvider(p peer.Peer, pmes *Message) (*Message, err
|
||||
|
||||
addr, err := pb.Address()
|
||||
if err != nil {
|
||||
log.Error("provider %s error with address %s", p, *pb.Addr)
|
||||
log.Errorf("provider %s error with address %s", p, *pb.Addr)
|
||||
continue
|
||||
}
|
||||
|
||||
log.Info("received provider %s %s for %s", p, addr, key)
|
||||
log.Infof("received provider %s %s for %s", p, addr, key)
|
||||
p.AddAddress(addr)
|
||||
dht.providers.AddProvider(key, p)
|
||||
|
||||
} else {
|
||||
log.Error("handleAddProvider received provider %s from %s", pid, p)
|
||||
log.Errorf("handleAddProvider received provider %s from %s", pid, p)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -17,7 +17,7 @@ import (
|
||||
// PutValue adds value corresponding to given Key.
|
||||
// This is the top level "Store" operation of the DHT
|
||||
func (dht *IpfsDHT) PutValue(ctx context.Context, key u.Key, value []byte) error {
|
||||
log.Debug("PutValue %s", key)
|
||||
log.Debugf("PutValue %s", key)
|
||||
err := dht.putLocal(key, value)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -30,7 +30,7 @@ func (dht *IpfsDHT) PutValue(ctx context.Context, key u.Key, value []byte) error
|
||||
}
|
||||
|
||||
query := newQuery(key, dht.dialer, func(ctx context.Context, p peer.Peer) (*dhtQueryResult, error) {
|
||||
log.Debug("%s PutValue qry part %v", dht.self, p)
|
||||
log.Debugf("%s PutValue qry part %v", dht.self, p)
|
||||
err := dht.putValueToNetwork(ctx, p, string(key), value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -46,7 +46,7 @@ func (dht *IpfsDHT) PutValue(ctx context.Context, key u.Key, value []byte) error
|
||||
// If the search does not succeed, a multiaddr string of a closer peer is
|
||||
// returned along with util.ErrSearchIncomplete
|
||||
func (dht *IpfsDHT) GetValue(ctx context.Context, key u.Key) ([]byte, error) {
|
||||
log.Debug("Get Value [%s]", key)
|
||||
log.Debugf("Get Value [%s]", key)
|
||||
|
||||
// If we have it local, dont bother doing an RPC!
|
||||
// NOTE: this might not be what we want to do...
|
||||
@ -86,7 +86,7 @@ func (dht *IpfsDHT) GetValue(ctx context.Context, key u.Key) ([]byte, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debug("GetValue %v %v", key, result.value)
|
||||
log.Debugf("GetValue %v %v", key, result.value)
|
||||
if result.value == nil {
|
||||
return nil, u.ErrNotFound
|
||||
}
|
||||
@ -140,7 +140,7 @@ func (dht *IpfsDHT) FindProvidersAsync(ctx context.Context, key u.Key, count int
|
||||
defer wg.Done()
|
||||
pmes, err := dht.findProvidersSingle(ctx, p, key, 0)
|
||||
if err != nil {
|
||||
log.Error("%s", err)
|
||||
log.Error(err)
|
||||
return
|
||||
}
|
||||
dht.addPeerListAsync(key, pmes.GetProviderPeers(), ps, count, peerOut)
|
||||
@ -218,7 +218,7 @@ func (dht *IpfsDHT) FindPeer(ctx context.Context, id peer.ID) (peer.Peer, error)
|
||||
for _, pbp := range closer {
|
||||
np, err := dht.getPeer(peer.ID(pbp.GetId()))
|
||||
if err != nil {
|
||||
log.Warning("Received invalid peer from query")
|
||||
log.Warningf("Received invalid peer from query: %v", err)
|
||||
continue
|
||||
}
|
||||
ma, err := pbp.Address()
|
||||
@ -256,10 +256,10 @@ func (dht *IpfsDHT) FindPeer(ctx context.Context, id peer.ID) (peer.Peer, error)
|
||||
// Ping a peer, log the time it took
|
||||
func (dht *IpfsDHT) Ping(ctx context.Context, p peer.Peer) error {
|
||||
// Thoughts: maybe this should accept an ID and do a peer lookup?
|
||||
log.Info("ping %s start", p)
|
||||
log.Infof("ping %s start", p)
|
||||
|
||||
pmes := newMessage(Message_PING, "", 0)
|
||||
_, err := dht.sendRequest(ctx, p, pmes)
|
||||
log.Info("ping %s end (err = %s)", p, err)
|
||||
log.Infof("ping %s end (err = %s)", p, err)
|
||||
return err
|
||||
}
|
||||
|
@ -125,7 +125,7 @@ func copyPeersFromList(target ID, peerArr peerSorterArr, peerList *list.List) pe
|
||||
}
|
||||
peerArr = append(peerArr, &pd)
|
||||
if e == nil {
|
||||
log.Debug("list element was nil.\n")
|
||||
log.Debug("list element was nil")
|
||||
return peerArr
|
||||
}
|
||||
}
|
||||
@ -148,7 +148,7 @@ func (rt *RoutingTable) NearestPeer(id ID) peer.Peer {
|
||||
return peers[0]
|
||||
}
|
||||
|
||||
log.Error("NearestPeer: Returning nil, table size = %d", rt.Size())
|
||||
log.Errorf("NearestPeer: Returning nil, table size = %d", rt.Size())
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -59,7 +59,7 @@ func (dm *DagModifier) WriteAt(b []byte, offset uint64) (int, error) {
|
||||
origlen := len(b)
|
||||
|
||||
if end <= zeroblocklen {
|
||||
log.Debug("Writing into zero block.")
|
||||
log.Debug("Writing into zero block")
|
||||
// Replacing zeroeth data block (embedded in the root node)
|
||||
//TODO: check chunking here
|
||||
copy(dm.pbdata.Data[offset:], b)
|
||||
@ -76,7 +76,7 @@ func (dm *DagModifier) WriteAt(b []byte, offset uint64) (int, error) {
|
||||
traversed = uint64(zeroblocklen)
|
||||
for i, size := range dm.pbdata.Blocksizes {
|
||||
if uint64(offset) < traversed+size {
|
||||
log.Debug("Starting mod at block %d. [%d < %d + %d]", i, offset, traversed, size)
|
||||
log.Debugf("Starting mod at block %d. [%d < %d + %d]", i, offset, traversed, size)
|
||||
// Here is where we start
|
||||
startsubblk = i
|
||||
lnk := dm.curNode.Links[i]
|
||||
@ -145,7 +145,7 @@ func (dm *DagModifier) WriteAt(b []byte, offset uint64) (int, error) {
|
||||
n := &mdag.Node{Data: ft.WrapData(sb)}
|
||||
_, err := dm.dagserv.Add(n)
|
||||
if err != nil {
|
||||
log.Error("Failed adding node to DAG service: %s", err)
|
||||
log.Errorf("Failed adding node to DAG service: %s", err)
|
||||
return 0, err
|
||||
}
|
||||
lnk, err := mdag.MakeLink(n)
|
||||
|
@ -6,7 +6,6 @@ import (
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/op/go-logging"
|
||||
bs "github.com/jbenet/go-ipfs/blockservice"
|
||||
"github.com/jbenet/go-ipfs/importer/chunk"
|
||||
mdag "github.com/jbenet/go-ipfs/merkledag"
|
||||
@ -14,6 +13,7 @@ import (
|
||||
u "github.com/jbenet/go-ipfs/util"
|
||||
|
||||
ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore"
|
||||
logging "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-logging"
|
||||
)
|
||||
|
||||
func getMockDagServ(t *testing.T) *mdag.DAGService {
|
||||
@ -22,11 +22,11 @@ func getMockDagServ(t *testing.T) *mdag.DAGService {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return &mdag.DAGService{bserv}
|
||||
return &mdag.DAGService{Blocks: bserv}
|
||||
}
|
||||
|
||||
func getNode(t *testing.T, dserv *mdag.DAGService, size int64) ([]byte, *mdag.Node) {
|
||||
dw := NewDagWriter(dserv, &chunk.SizeSplitter{500})
|
||||
dw := NewDagWriter(dserv, &chunk.SizeSplitter{Size: 500})
|
||||
|
||||
n, err := io.CopyN(dw, u.NewFastRand(), size)
|
||||
if err != nil {
|
||||
@ -99,7 +99,7 @@ func TestDagModifierBasic(t *testing.T) {
|
||||
dserv := getMockDagServ(t)
|
||||
b, n := getNode(t, dserv, 50000)
|
||||
|
||||
dagmod, err := NewDagModifier(n, dserv, &chunk.SizeSplitter{512})
|
||||
dagmod, err := NewDagModifier(n, dserv, &chunk.SizeSplitter{Size: 512})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -142,7 +142,7 @@ func TestDagModifierBasic(t *testing.T) {
|
||||
|
||||
expected := uint64(50000 + 3500 + 3000)
|
||||
if size != expected {
|
||||
t.Fatal("Final reported size is incorrect [%d != %d]", size, expected)
|
||||
t.Fatalf("Final reported size is incorrect [%d != %d]", size, expected)
|
||||
}
|
||||
}
|
||||
|
||||
@ -150,7 +150,7 @@ func TestMultiWrite(t *testing.T) {
|
||||
dserv := getMockDagServ(t)
|
||||
_, n := getNode(t, dserv, 0)
|
||||
|
||||
dagmod, err := NewDagModifier(n, dserv, &chunk.SizeSplitter{512})
|
||||
dagmod, err := NewDagModifier(n, dserv, &chunk.SizeSplitter{Size: 512})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -191,7 +191,7 @@ func TestMultiWriteCoal(t *testing.T) {
|
||||
dserv := getMockDagServ(t)
|
||||
_, n := getNode(t, dserv, 0)
|
||||
|
||||
dagmod, err := NewDagModifier(n, dserv, &chunk.SizeSplitter{512})
|
||||
dagmod, err := NewDagModifier(n, dserv, &chunk.SizeSplitter{Size: 512})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -53,8 +53,8 @@ func TestDagWriter(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
dag := &mdag.DAGService{bserv}
|
||||
dw := NewDagWriter(dag, &chunk.SizeSplitter{4096})
|
||||
dag := &mdag.DAGService{Blocks: bserv}
|
||||
dw := NewDagWriter(dag, &chunk.SizeSplitter{Size: 4096})
|
||||
|
||||
nbytes := int64(1024 * 1024 * 2)
|
||||
n, err := io.CopyN(dw, &datasource{}, nbytes)
|
||||
@ -87,8 +87,8 @@ func TestMassiveWrite(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
dag := &mdag.DAGService{bserv}
|
||||
dw := NewDagWriter(dag, &chunk.SizeSplitter{4096})
|
||||
dag := &mdag.DAGService{Blocks: bserv}
|
||||
dw := NewDagWriter(dag, &chunk.SizeSplitter{Size: 4096})
|
||||
|
||||
nbytes := int64(1024 * 1024 * 1024 * 16)
|
||||
n, err := io.CopyN(dw, &datasource{}, nbytes)
|
||||
@ -107,13 +107,13 @@ func BenchmarkDagWriter(b *testing.B) {
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
dag := &mdag.DAGService{bserv}
|
||||
dag := &mdag.DAGService{Blocks: bserv}
|
||||
|
||||
b.ResetTimer()
|
||||
nbytes := int64(100000)
|
||||
for i := 0; i < b.N; i++ {
|
||||
b.SetBytes(nbytes)
|
||||
dw := NewDagWriter(dag, &chunk.SizeSplitter{4096})
|
||||
dw := NewDagWriter(dag, &chunk.SizeSplitter{Size: 4096})
|
||||
n, err := io.CopyN(dw, &datasource{}, nbytes)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
|
@ -85,10 +85,10 @@ func init() {
|
||||
var err error
|
||||
currentVersion, err = parseVersion()
|
||||
if err != nil {
|
||||
log.Error("invalid version number in code (must be semver): %q\n", Version)
|
||||
log.Errorf("invalid version number in code (must be semver): %q", Version)
|
||||
os.Exit(1)
|
||||
}
|
||||
log.Info("go-ipfs Version: %s", currentVersion)
|
||||
log.Infof("go-ipfs Version: %s", currentVersion)
|
||||
}
|
||||
|
||||
func parseVersion() (*semver.Version, error) {
|
||||
@ -138,7 +138,7 @@ func ShouldAutoUpdate(setting config.AutoUpdateSetting, newVer string) bool {
|
||||
|
||||
nv, err := semver.NewVersion(newVer)
|
||||
if err != nil {
|
||||
log.Error("could not parse version string: %s", err)
|
||||
log.Errorf("could not parse version string: %s", err)
|
||||
return false
|
||||
}
|
||||
|
||||
@ -189,7 +189,7 @@ func CliCheckForUpdates(cfg *config.Config, confFile string) error {
|
||||
u, err := CheckForUpdate()
|
||||
// if there is no update available, record it, and exit.
|
||||
if err == check.NoUpdateAvailable {
|
||||
log.Notice("No update available, checked on %s", time.Now())
|
||||
log.Noticef("No update available, checked on %s", time.Now())
|
||||
config.RecordUpdateCheck(cfg, confFile) // only record if we checked successfully.
|
||||
return nil
|
||||
}
|
||||
@ -197,7 +197,7 @@ func CliCheckForUpdates(cfg *config.Config, confFile string) error {
|
||||
// if another, unexpected error occurred, note it.
|
||||
if err != nil {
|
||||
if cfg.Version.Check == config.CheckError {
|
||||
log.Error("Error while checking for update: %v\n", err)
|
||||
log.Errorf("Error while checking for update: %v", err)
|
||||
return nil
|
||||
}
|
||||
// when "warn" version.check mode we just show a warning message
|
||||
@ -211,7 +211,7 @@ func CliCheckForUpdates(cfg *config.Config, confFile string) error {
|
||||
if cfg.Version.AutoUpdate != config.UpdateNever {
|
||||
// and we should auto update
|
||||
if ShouldAutoUpdate(cfg.Version.AutoUpdate, u.Version) {
|
||||
log.Notice("Applying update %s", u.Version)
|
||||
log.Noticef("Applying update %s", u.Version)
|
||||
|
||||
if err = Apply(u); err != nil {
|
||||
log.Error(err.Error())
|
||||
|
@ -4,7 +4,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
logging "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/op/go-logging"
|
||||
logging "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-logging"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -55,7 +55,7 @@ func SetupLogging() {
|
||||
var err error
|
||||
lvl, err = logging.LogLevel(logenv)
|
||||
if err != nil {
|
||||
log.Error("logging.LogLevel() Error: %q", err)
|
||||
log.Errorf("logging.LogLevel() Error: %q", err)
|
||||
lvl = logging.ERROR // reset to ERROR, could be undefined now(?)
|
||||
}
|
||||
}
|
||||
@ -74,7 +74,7 @@ func SetAllLoggers(lvl logging.Level) {
|
||||
logging.SetLevel(lvl, "")
|
||||
for n, log := range loggers {
|
||||
logging.SetLevel(lvl, n)
|
||||
log.Notice("setting logger: %q to %v", n, lvl)
|
||||
log.Noticef("setting logger: %q to %v", n, lvl)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -119,8 +119,6 @@ func (r *randGen) Read(p []byte) (n int, err error) {
|
||||
val >>= 8
|
||||
}
|
||||
}
|
||||
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// GetenvBool is the way to check an env var as a boolean
|
||||
|
Reference in New Issue
Block a user