1
0
mirror of https://github.com/ipfs/kubo.git synced 2025-06-30 18:13:54 +08:00

Merge pull request #3824 from ipfs/fix/govet/some-error

fix: multiple govet warnings
This commit is contained in:
Jeromy Johnson
2017-03-23 20:56:10 -07:00
committed by GitHub
13 changed files with 32 additions and 21 deletions

View File

@ -6,6 +6,7 @@ exclude_paths:
- test/ - test/
- Godeps/ - Godeps/
- thirdparty/ - thirdparty/
- "**/*.pb.go"
engines: engines:
fixme: fixme:

View File

@ -198,14 +198,14 @@ func (bs *blockstore) AllKeysChan(ctx context.Context) (<-chan *cid.Cid, error)
return return
} }
if e.Error != nil { if e.Error != nil {
log.Errorf("blockstore.AllKeysChan got err:", e.Error) log.Errorf("blockstore.AllKeysChan got err: %s", e.Error)
return return
} }
// need to convert to key.Key using key.KeyFromDsKey. // need to convert to key.Key using key.KeyFromDsKey.
k, err := dshelp.DsKeyToCid(ds.RawKey(e.Key)) k, err := dshelp.DsKeyToCid(ds.RawKey(e.Key))
if err != nil { if err != nil {
log.Warningf("error parsing key from DsKey: ", err) log.Warningf("error parsing key from DsKey: %s", err)
continue continue
} }

View File

@ -31,7 +31,9 @@ func testBloomCached(bs Blockstore, ctx context.Context) (*bloomcache, error) {
func TestPutManyAddsToBloom(t *testing.T) { func TestPutManyAddsToBloom(t *testing.T) {
bs := NewBlockstore(syncds.MutexWrap(ds.NewMapDatastore())) bs := NewBlockstore(syncds.MutexWrap(ds.NewMapDatastore()))
ctx, _ := context.WithTimeout(context.Background(), 1*time.Second) ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
defer cancel()
cachedbs, err := testBloomCached(bs, ctx) cachedbs, err := testBloomCached(bs, ctx)
select { select {
@ -75,7 +77,9 @@ func TestHasIsBloomCached(t *testing.T) {
for i := 0; i < 1000; i++ { for i := 0; i < 1000; i++ {
bs.Put(blocks.NewBlock([]byte(fmt.Sprintf("data: %d", i)))) bs.Put(blocks.NewBlock([]byte(fmt.Sprintf("data: %d", i))))
} }
ctx, _ := context.WithTimeout(context.Background(), 1*time.Second) ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
defer cancel()
cachedbs, err := testBloomCached(bs, ctx) cachedbs, err := testBloomCached(bs, ctx)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)

View File

@ -32,7 +32,7 @@ func TestWriteThroughWorks(t *testing.T) {
bserv.AddBlock(block) bserv.AddBlock(block)
if bstore.PutCounter != 2 { if bstore.PutCounter != 2 {
t.Fatal("Put should have called again, should be 2 is: %d", bstore.PutCounter) t.Fatalf("Put should have called again, should be 2 is: %d", bstore.PutCounter)
} }
} }

View File

@ -1,21 +1,24 @@
package main package main
import ( import (
"context"
"flag" "flag"
"log" "log"
"os" "os"
"os/signal" "os/signal"
"path/filepath" "path/filepath"
context "context"
homedir "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/mitchellh/go-homedir"
commands "github.com/ipfs/go-ipfs/commands" commands "github.com/ipfs/go-ipfs/commands"
core "github.com/ipfs/go-ipfs/core" core "github.com/ipfs/go-ipfs/core"
corehttp "github.com/ipfs/go-ipfs/core/corehttp" corehttp "github.com/ipfs/go-ipfs/core/corehttp"
coreunix "github.com/ipfs/go-ipfs/core/coreunix" coreunix "github.com/ipfs/go-ipfs/core/coreunix"
config "github.com/ipfs/go-ipfs/repo/config" config "github.com/ipfs/go-ipfs/repo/config"
fsrepo "github.com/ipfs/go-ipfs/repo/fsrepo" fsrepo "github.com/ipfs/go-ipfs/repo/fsrepo"
homedir "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/mitchellh/go-homedir"
process "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess" process "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess"
fsnotify "gx/ipfs/QmczzCMvJ3HV57WBKDy8b4ucp7quT325JjDbixYRS5Pwvv/fsnotify.v1" fsnotify "gx/ipfs/QmczzCMvJ3HV57WBKDy8b4ucp7quT325JjDbixYRS5Pwvv/fsnotify.v1"
) )
@ -141,7 +144,6 @@ func run(ipfsPath, watchPath string) error {
log.Println(err) log.Println(err)
} }
} }
return nil
} }
func addTree(w *fsnotify.Watcher, root string) error { func addTree(w *fsnotify.Watcher, root string) error {

View File

@ -1,6 +1,7 @@
package http package http
import ( import (
"context"
"errors" "errors"
"fmt" "fmt"
"io" "io"
@ -11,11 +12,10 @@ import (
"strings" "strings"
"sync" "sync"
context "context"
"github.com/ipfs/go-ipfs/repo/config"
cors "gx/ipfs/QmPG2kW5t27LuHgHnvhUwbHCNHAt2eUcb4gPHqofrESUdB/cors"
cmds "github.com/ipfs/go-ipfs/commands" cmds "github.com/ipfs/go-ipfs/commands"
"github.com/ipfs/go-ipfs/repo/config"
cors "gx/ipfs/QmPG2kW5t27LuHgHnvhUwbHCNHAt2eUcb4gPHqofrESUdB/cors"
logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log"
) )
@ -323,7 +323,6 @@ func flushCopy(w io.Writer, r io.Reader) error {
f.Flush() f.Flush()
} }
return nil
} }
func sanitizedErrStr(err error) string { func sanitizedErrStr(err error) string {

View File

@ -205,7 +205,7 @@ func TestCatOffline(t *testing.T) {
_, err = api.Cat(ctx, coreapi.ResolvedPath("/ipns/Qmfoobar", nil, nil)) _, err = api.Cat(ctx, coreapi.ResolvedPath("/ipns/Qmfoobar", nil, nil))
if err != coreiface.ErrOffline { if err != coreiface.ErrOffline {
t.Fatalf("expected ErrOffline, got: %", err) t.Fatalf("expected ErrOffline, got: %s", err)
} }
} }
@ -223,7 +223,7 @@ func TestLs(t *testing.T) {
} }
parts := strings.Split(k, "/") parts := strings.Split(k, "/")
if len(parts) != 2 { if len(parts) != 2 {
t.Errorf("unexpected path:", k) t.Errorf("unexpected path: %s", k)
} }
p := coreapi.ResolvedPath("/ipfs/"+parts[0], nil, nil) p := coreapi.ResolvedPath("/ipfs/"+parts[0], nil, nil)

View File

@ -36,6 +36,7 @@ func (f *Filestore) AllKeysChan(ctx context.Context) (<-chan *cid.Cid, error) {
a, err := f.bs.AllKeysChan(ctx) a, err := f.bs.AllKeysChan(ctx)
if err != nil { if err != nil {
cancel()
return nil, err return nil, err
} }

View File

@ -125,8 +125,6 @@ func loadRoot(ctx context.Context, rt *keyRoot, ipfs *core.IpfsNode, name string
default: default:
return nil, errors.New("unrecognized type") return nil, errors.New("unrecognized type")
} }
panic("not reached")
} }
type keyRoot struct { type keyRoot struct {

View File

@ -24,7 +24,7 @@ func TestPathParsing(t *testing.T) {
_, err := ParsePath(p) _, err := ParsePath(p)
valid := (err == nil) valid := (err == nil)
if valid != expected { if valid != expected {
t.Fatalf("expected %s to have valid == %s", p, expected) t.Fatalf("expected %s to have valid == %t", p, expected)
} }
} }
} }

View File

@ -341,7 +341,9 @@ func TestPinRecursiveFail(t *testing.T) {
} }
// NOTE: This isnt a time based test, we expect the pin to fail // NOTE: This isnt a time based test, we expect the pin to fail
mctx, _ := context.WithTimeout(ctx, time.Millisecond) mctx, cancel := context.WithTimeout(ctx, time.Millisecond)
defer cancel()
err = p.Pin(mctx, a, true) err = p.Pin(mctx, a, true)
if err == nil { if err == nil {
t.Fatal("should have failed to pin here") t.Fatal("should have failed to pin here")
@ -358,7 +360,8 @@ func TestPinRecursiveFail(t *testing.T) {
} }
// this one is time based... but shouldnt cause any issues // this one is time based... but shouldnt cause any issues
mctx, _ = context.WithTimeout(ctx, time.Second) mctx, cancel = context.WithTimeout(ctx, time.Second)
defer cancel()
err = p.Pin(mctx, a, true) err = p.Pin(mctx, a, true)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)

View File

@ -95,7 +95,7 @@ func TestSet(t *testing.T) {
for _, c := range inputs { for _, c := range inputs {
if !seen.Has(c) { if !seen.Has(c) {
t.Fatalf("expected to have %s, didnt find it") t.Fatalf("expected to have '%s', didnt find it", c)
} }
} }
} }

View File

@ -336,15 +336,18 @@ func (dm *DagModifier) readPrep() error {
ctx, cancel := context.WithCancel(dm.ctx) ctx, cancel := context.WithCancel(dm.ctx)
dr, err := uio.NewDagReader(ctx, dm.curNode, dm.dagserv) dr, err := uio.NewDagReader(ctx, dm.curNode, dm.dagserv)
if err != nil { if err != nil {
cancel()
return err return err
} }
i, err := dr.Seek(int64(dm.curWrOff), os.SEEK_SET) i, err := dr.Seek(int64(dm.curWrOff), os.SEEK_SET)
if err != nil { if err != nil {
cancel()
return err return err
} }
if i != int64(dm.curWrOff) { if i != int64(dm.curWrOff) {
cancel()
return ErrSeekFail return ErrSeekFail
} }