mirror of
https://github.com/ipfs/kubo.git
synced 2025-07-15 07:58:15 +08:00
Extract blocks/blockstore package to go-ipfs-blockstore
This extracts the blocks/blockstore package and renames the blocks/blockstore/util package to /blocks/blockstoreutil (because util depends on Pin and I don't plan to extract Pin and its depedencies). The history of blocks/blockstore has been preserved. It has been gx'ed and imported. Imports have been rewritten accordingly and re-ordered. License: MIT Signed-off-by: Hector Sanjuan <hector@protocol.ai>
This commit is contained in:
@ -1,156 +0,0 @@
|
||||
package blockstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format"
|
||||
|
||||
ds "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore"
|
||||
"gx/ipfs/QmRg1gKTHzc3CZXSKzem8aR4E3TubFhbgXwfVuWnSK5CC5/go-metrics-interface"
|
||||
lru "gx/ipfs/QmVYxfoJQiZijTgPNHCHgHELvQpbsJNTg6Crmc3dQkj3yy/golang-lru"
|
||||
cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid"
|
||||
)
|
||||
|
||||
// arccache wraps a BlockStore with an Adaptive Replacement Cache (ARC) for
|
||||
// block Cids. This provides block access-time improvements, allowing
|
||||
// to short-cut many searches without query-ing the underlying datastore.
|
||||
type arccache struct {
|
||||
arc *lru.ARCCache
|
||||
blockstore Blockstore
|
||||
|
||||
hits metrics.Counter
|
||||
total metrics.Counter
|
||||
}
|
||||
|
||||
func newARCCachedBS(ctx context.Context, bs Blockstore, lruSize int) (*arccache, error) {
|
||||
arc, err := lru.NewARC(lruSize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c := &arccache{arc: arc, blockstore: bs}
|
||||
c.hits = metrics.NewCtx(ctx, "arc.hits_total", "Number of ARC cache hits").Counter()
|
||||
c.total = metrics.NewCtx(ctx, "arc_total", "Total number of ARC cache requests").Counter()
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (b *arccache) DeleteBlock(k *cid.Cid) error {
|
||||
if has, ok := b.hasCached(k); ok && !has {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
b.arc.Remove(k) // Invalidate cache before deleting.
|
||||
err := b.blockstore.DeleteBlock(k)
|
||||
switch err {
|
||||
case nil, ds.ErrNotFound, ErrNotFound:
|
||||
b.addCache(k, false)
|
||||
return err
|
||||
default:
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// if ok == false has is inconclusive
|
||||
// if ok == true then has respons to question: is it contained
|
||||
func (b *arccache) hasCached(k *cid.Cid) (has bool, ok bool) {
|
||||
b.total.Inc()
|
||||
if k == nil {
|
||||
log.Error("nil cid in arccache")
|
||||
// Return cache invalid so the call to blockstore happens
|
||||
// in case of invalid key and correct error is created.
|
||||
return false, false
|
||||
}
|
||||
|
||||
h, ok := b.arc.Get(k.KeyString())
|
||||
if ok {
|
||||
b.hits.Inc()
|
||||
return h.(bool), true
|
||||
}
|
||||
return false, false
|
||||
}
|
||||
|
||||
func (b *arccache) Has(k *cid.Cid) (bool, error) {
|
||||
if has, ok := b.hasCached(k); ok {
|
||||
return has, nil
|
||||
}
|
||||
|
||||
res, err := b.blockstore.Has(k)
|
||||
if err == nil {
|
||||
b.addCache(k, res)
|
||||
}
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (b *arccache) Get(k *cid.Cid) (blocks.Block, error) {
|
||||
if k == nil {
|
||||
log.Error("nil cid in arc cache")
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
|
||||
if has, ok := b.hasCached(k); ok && !has {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
|
||||
bl, err := b.blockstore.Get(k)
|
||||
if bl == nil && err == ErrNotFound {
|
||||
b.addCache(k, false)
|
||||
} else if bl != nil {
|
||||
b.addCache(k, true)
|
||||
}
|
||||
return bl, err
|
||||
}
|
||||
|
||||
func (b *arccache) Put(bl blocks.Block) error {
|
||||
if has, ok := b.hasCached(bl.Cid()); ok && has {
|
||||
return nil
|
||||
}
|
||||
|
||||
err := b.blockstore.Put(bl)
|
||||
if err == nil {
|
||||
b.addCache(bl.Cid(), true)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (b *arccache) PutMany(bs []blocks.Block) error {
|
||||
var good []blocks.Block
|
||||
for _, block := range bs {
|
||||
// call put on block if result is inconclusive or we are sure that
|
||||
// the block isn't in storage
|
||||
if has, ok := b.hasCached(block.Cid()); !ok || (ok && !has) {
|
||||
good = append(good, block)
|
||||
}
|
||||
}
|
||||
err := b.blockstore.PutMany(good)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, block := range good {
|
||||
b.addCache(block.Cid(), true)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *arccache) HashOnRead(enabled bool) {
|
||||
b.blockstore.HashOnRead(enabled)
|
||||
}
|
||||
|
||||
func (b *arccache) addCache(c *cid.Cid, has bool) {
|
||||
b.arc.Add(c.KeyString(), has)
|
||||
}
|
||||
|
||||
func (b *arccache) AllKeysChan(ctx context.Context) (<-chan *cid.Cid, error) {
|
||||
return b.blockstore.AllKeysChan(ctx)
|
||||
}
|
||||
|
||||
func (b *arccache) GCLock() Unlocker {
|
||||
return b.blockstore.(GCBlockstore).GCLock()
|
||||
}
|
||||
|
||||
func (b *arccache) PinLock() Unlocker {
|
||||
return b.blockstore.(GCBlockstore).PinLock()
|
||||
}
|
||||
|
||||
func (b *arccache) GCRequested() bool {
|
||||
return b.blockstore.(GCBlockstore).GCRequested()
|
||||
}
|
@ -1,201 +0,0 @@
|
||||
package blockstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format"
|
||||
|
||||
ds "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore"
|
||||
syncds "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore/sync"
|
||||
cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid"
|
||||
)
|
||||
|
||||
var exampleBlock = blocks.NewBlock([]byte("foo"))
|
||||
|
||||
func testArcCached(ctx context.Context, bs Blockstore) (*arccache, error) {
|
||||
if ctx == nil {
|
||||
ctx = context.TODO()
|
||||
}
|
||||
opts := DefaultCacheOpts()
|
||||
opts.HasBloomFilterSize = 0
|
||||
opts.HasBloomFilterHashes = 0
|
||||
bbs, err := CachedBlockstore(ctx, bs, opts)
|
||||
if err == nil {
|
||||
return bbs.(*arccache), nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func createStores(t *testing.T) (*arccache, Blockstore, *callbackDatastore) {
|
||||
cd := &callbackDatastore{f: func() {}, ds: ds.NewMapDatastore()}
|
||||
bs := NewBlockstore(syncds.MutexWrap(cd))
|
||||
arc, err := testArcCached(context.TODO(), bs)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return arc, bs, cd
|
||||
}
|
||||
|
||||
func trap(message string, cd *callbackDatastore, t *testing.T) {
|
||||
cd.SetFunc(func() {
|
||||
t.Fatal(message)
|
||||
})
|
||||
}
|
||||
func untrap(cd *callbackDatastore) {
|
||||
cd.SetFunc(func() {})
|
||||
}
|
||||
|
||||
func TestRemoveCacheEntryOnDelete(t *testing.T) {
|
||||
arc, _, cd := createStores(t)
|
||||
|
||||
arc.Put(exampleBlock)
|
||||
|
||||
cd.Lock()
|
||||
writeHitTheDatastore := false
|
||||
cd.Unlock()
|
||||
|
||||
cd.SetFunc(func() {
|
||||
writeHitTheDatastore = true
|
||||
})
|
||||
|
||||
arc.DeleteBlock(exampleBlock.Cid())
|
||||
arc.Put(exampleBlock)
|
||||
if !writeHitTheDatastore {
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
func TestElideDuplicateWrite(t *testing.T) {
|
||||
arc, _, cd := createStores(t)
|
||||
|
||||
arc.Put(exampleBlock)
|
||||
trap("write hit datastore", cd, t)
|
||||
arc.Put(exampleBlock)
|
||||
}
|
||||
|
||||
func TestHasRequestTriggersCache(t *testing.T) {
|
||||
arc, _, cd := createStores(t)
|
||||
|
||||
arc.Has(exampleBlock.Cid())
|
||||
trap("has hit datastore", cd, t)
|
||||
if has, err := arc.Has(exampleBlock.Cid()); has || err != nil {
|
||||
t.Fatal("has was true but there is no such block")
|
||||
}
|
||||
|
||||
untrap(cd)
|
||||
err := arc.Put(exampleBlock)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
trap("has hit datastore", cd, t)
|
||||
|
||||
if has, err := arc.Has(exampleBlock.Cid()); !has || err != nil {
|
||||
t.Fatal("has returned invalid result")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetFillsCache(t *testing.T) {
|
||||
arc, _, cd := createStores(t)
|
||||
|
||||
if bl, err := arc.Get(exampleBlock.Cid()); bl != nil || err == nil {
|
||||
t.Fatal("block was found or there was no error")
|
||||
}
|
||||
|
||||
trap("has hit datastore", cd, t)
|
||||
|
||||
if has, err := arc.Has(exampleBlock.Cid()); has || err != nil {
|
||||
t.Fatal("has was true but there is no such block")
|
||||
}
|
||||
|
||||
untrap(cd)
|
||||
|
||||
if err := arc.Put(exampleBlock); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
trap("has hit datastore", cd, t)
|
||||
|
||||
if has, err := arc.Has(exampleBlock.Cid()); !has || err != nil {
|
||||
t.Fatal("has returned invalid result")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetAndDeleteFalseShortCircuit(t *testing.T) {
|
||||
arc, _, cd := createStores(t)
|
||||
|
||||
arc.Has(exampleBlock.Cid())
|
||||
|
||||
trap("get hit datastore", cd, t)
|
||||
|
||||
if bl, err := arc.Get(exampleBlock.Cid()); bl != nil || err != ErrNotFound {
|
||||
t.Fatal("get returned invalid result")
|
||||
}
|
||||
|
||||
if arc.DeleteBlock(exampleBlock.Cid()) != ErrNotFound {
|
||||
t.Fatal("expected ErrNotFound error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestArcCreationFailure(t *testing.T) {
|
||||
if arc, err := newARCCachedBS(context.TODO(), nil, -1); arc != nil || err == nil {
|
||||
t.Fatal("expected error and no cache")
|
||||
}
|
||||
}
|
||||
|
||||
func TestInvalidKey(t *testing.T) {
|
||||
arc, _, _ := createStores(t)
|
||||
|
||||
bl, err := arc.Get(nil)
|
||||
|
||||
if bl != nil {
|
||||
t.Fatal("blocks should be nil")
|
||||
}
|
||||
if err == nil {
|
||||
t.Fatal("expected error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHasAfterSucessfulGetIsCached(t *testing.T) {
|
||||
arc, bs, cd := createStores(t)
|
||||
|
||||
bs.Put(exampleBlock)
|
||||
|
||||
arc.Get(exampleBlock.Cid())
|
||||
|
||||
trap("has hit datastore", cd, t)
|
||||
arc.Has(exampleBlock.Cid())
|
||||
}
|
||||
|
||||
func TestDifferentKeyObjectsWork(t *testing.T) {
|
||||
arc, bs, cd := createStores(t)
|
||||
|
||||
bs.Put(exampleBlock)
|
||||
|
||||
arc.Get(exampleBlock.Cid())
|
||||
|
||||
trap("has hit datastore", cd, t)
|
||||
cidstr := exampleBlock.Cid().String()
|
||||
|
||||
ncid, err := cid.Decode(cidstr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
arc.Has(ncid)
|
||||
}
|
||||
|
||||
func TestPutManyCaches(t *testing.T) {
|
||||
arc, _, cd := createStores(t)
|
||||
arc.PutMany([]blocks.Block{exampleBlock})
|
||||
|
||||
trap("has hit datastore", cd, t)
|
||||
arc.Has(exampleBlock.Cid())
|
||||
untrap(cd)
|
||||
arc.DeleteBlock(exampleBlock.Cid())
|
||||
|
||||
arc.Put(exampleBlock)
|
||||
trap("PunMany has hit datastore", cd, t)
|
||||
arc.PutMany([]blocks.Block{exampleBlock})
|
||||
}
|
@ -1,282 +0,0 @@
|
||||
// Package blockstore implements a thin wrapper over a datastore, giving a
|
||||
// clean interface for Getting and Putting block objects.
|
||||
package blockstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
ds "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore"
|
||||
dsns "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore/namespace"
|
||||
dsq "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore/query"
|
||||
logging "gx/ipfs/QmRb5jh8z2E8hMGN2tkvs1yHynUanqnZ3UeKwgN1i9P1F8/go-log"
|
||||
cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid"
|
||||
dshelp "gx/ipfs/QmdQTPWduSeyveSxeCAte33M592isSW5Z979g81aJphrgn/go-ipfs-ds-help"
|
||||
blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format"
|
||||
)
|
||||
|
||||
var log = logging.Logger("blockstore")
|
||||
|
||||
// BlockPrefix namespaces blockstore datastores
|
||||
var BlockPrefix = ds.NewKey("blocks")
|
||||
|
||||
// ErrValueTypeMismatch is an error returned when the item retrieved from
|
||||
// the datatstore is not a block.
|
||||
var ErrValueTypeMismatch = errors.New("the retrieved value is not a Block")
|
||||
|
||||
// ErrHashMismatch is an error returned when the hash of a block
|
||||
// is different than expected.
|
||||
var ErrHashMismatch = errors.New("block in storage has different hash than requested")
|
||||
|
||||
// ErrNotFound is an error returned when a block is not found.
|
||||
var ErrNotFound = errors.New("blockstore: block not found")
|
||||
|
||||
// Blockstore wraps a Datastore block-centered methods and provides a layer
|
||||
// of abstraction which allows to add different caching strategies.
|
||||
type Blockstore interface {
|
||||
DeleteBlock(*cid.Cid) error
|
||||
Has(*cid.Cid) (bool, error)
|
||||
Get(*cid.Cid) (blocks.Block, error)
|
||||
|
||||
// Put puts a given block to the underlying datastore
|
||||
Put(blocks.Block) error
|
||||
|
||||
// PutMany puts a slice of blocks at the same time using batching
|
||||
// capabilities of the underlying datastore whenever possible.
|
||||
PutMany([]blocks.Block) error
|
||||
|
||||
// AllKeysChan returns a channel from which
|
||||
// the CIDs in the Blockstore can be read. It should respect
|
||||
// the given context, closing the channel if it becomes Done.
|
||||
AllKeysChan(ctx context.Context) (<-chan *cid.Cid, error)
|
||||
|
||||
// HashOnRead specifies if every read block should be
|
||||
// rehashed to make sure it matches its CID.
|
||||
HashOnRead(enabled bool)
|
||||
}
|
||||
|
||||
// GCLocker abstract functionality to lock a blockstore when performing
|
||||
// garbage-collection operations.
|
||||
type GCLocker interface {
|
||||
// GCLock locks the blockstore for garbage collection. No operations
|
||||
// that expect to finish with a pin should ocurr simultaneously.
|
||||
// Reading during GC is safe, and requires no lock.
|
||||
GCLock() Unlocker
|
||||
|
||||
// PinLock locks the blockstore for sequences of puts expected to finish
|
||||
// with a pin (before GC). Multiple put->pin sequences can write through
|
||||
// at the same time, but no GC should not happen simulatenously.
|
||||
// Reading during Pinning is safe, and requires no lock.
|
||||
PinLock() Unlocker
|
||||
|
||||
// GcRequested returns true if GCLock has been called and is waiting to
|
||||
// take the lock
|
||||
GCRequested() bool
|
||||
}
|
||||
|
||||
// GCBlockstore is a blockstore that can safely run garbage-collection
|
||||
// operations.
|
||||
type GCBlockstore interface {
|
||||
Blockstore
|
||||
GCLocker
|
||||
}
|
||||
|
||||
// NewGCBlockstore returns a default implementation of GCBlockstore
|
||||
// using the given Blockstore and GCLocker.
|
||||
func NewGCBlockstore(bs Blockstore, gcl GCLocker) GCBlockstore {
|
||||
return gcBlockstore{bs, gcl}
|
||||
}
|
||||
|
||||
type gcBlockstore struct {
|
||||
Blockstore
|
||||
GCLocker
|
||||
}
|
||||
|
||||
// NewBlockstore returns a default Blockstore implementation
|
||||
// using the provided datastore.Batching backend.
|
||||
func NewBlockstore(d ds.Batching) Blockstore {
|
||||
var dsb ds.Batching
|
||||
dd := dsns.Wrap(d, BlockPrefix)
|
||||
dsb = dd
|
||||
return &blockstore{
|
||||
datastore: dsb,
|
||||
}
|
||||
}
|
||||
|
||||
type blockstore struct {
|
||||
datastore ds.Batching
|
||||
|
||||
rehash bool
|
||||
}
|
||||
|
||||
func (bs *blockstore) HashOnRead(enabled bool) {
|
||||
bs.rehash = enabled
|
||||
}
|
||||
|
||||
func (bs *blockstore) Get(k *cid.Cid) (blocks.Block, error) {
|
||||
if k == nil {
|
||||
log.Error("nil cid in blockstore")
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
|
||||
maybeData, err := bs.datastore.Get(dshelp.CidToDsKey(k))
|
||||
if err == ds.ErrNotFound {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bdata, ok := maybeData.([]byte)
|
||||
if !ok {
|
||||
return nil, ErrValueTypeMismatch
|
||||
}
|
||||
|
||||
if bs.rehash {
|
||||
rbcid, err := k.Prefix().Sum(bdata)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !rbcid.Equals(k) {
|
||||
return nil, ErrHashMismatch
|
||||
}
|
||||
|
||||
return blocks.NewBlockWithCid(bdata, rbcid)
|
||||
}
|
||||
return blocks.NewBlockWithCid(bdata, k)
|
||||
}
|
||||
|
||||
func (bs *blockstore) Put(block blocks.Block) error {
|
||||
k := dshelp.CidToDsKey(block.Cid())
|
||||
|
||||
// Has is cheaper than Put, so see if we already have it
|
||||
exists, err := bs.datastore.Has(k)
|
||||
if err == nil && exists {
|
||||
return nil // already stored.
|
||||
}
|
||||
return bs.datastore.Put(k, block.RawData())
|
||||
}
|
||||
|
||||
func (bs *blockstore) PutMany(blocks []blocks.Block) error {
|
||||
t, err := bs.datastore.Batch()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, b := range blocks {
|
||||
k := dshelp.CidToDsKey(b.Cid())
|
||||
exists, err := bs.datastore.Has(k)
|
||||
if err == nil && exists {
|
||||
continue
|
||||
}
|
||||
|
||||
err = t.Put(k, b.RawData())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return t.Commit()
|
||||
}
|
||||
|
||||
func (bs *blockstore) Has(k *cid.Cid) (bool, error) {
|
||||
return bs.datastore.Has(dshelp.CidToDsKey(k))
|
||||
}
|
||||
|
||||
func (bs *blockstore) DeleteBlock(k *cid.Cid) error {
|
||||
err := bs.datastore.Delete(dshelp.CidToDsKey(k))
|
||||
if err == ds.ErrNotFound {
|
||||
return ErrNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// AllKeysChan runs a query for keys from the blockstore.
|
||||
// this is very simplistic, in the future, take dsq.Query as a param?
|
||||
//
|
||||
// AllKeysChan respects context.
|
||||
func (bs *blockstore) AllKeysChan(ctx context.Context) (<-chan *cid.Cid, error) {
|
||||
|
||||
// KeysOnly, because that would be _a lot_ of data.
|
||||
q := dsq.Query{KeysOnly: true}
|
||||
res, err := bs.datastore.Query(q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
output := make(chan *cid.Cid, dsq.KeysOnlyBufSize)
|
||||
go func() {
|
||||
defer func() {
|
||||
res.Close() // ensure exit (signals early exit, too)
|
||||
close(output)
|
||||
}()
|
||||
|
||||
for {
|
||||
e, ok := res.NextSync()
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if e.Error != nil {
|
||||
log.Errorf("blockstore.AllKeysChan got err: %s", e.Error)
|
||||
return
|
||||
}
|
||||
|
||||
// need to convert to key.Key using key.KeyFromDsKey.
|
||||
k, err := dshelp.DsKeyToCid(ds.RawKey(e.Key))
|
||||
if err != nil {
|
||||
log.Warningf("error parsing key from DsKey: %s", err)
|
||||
continue
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case output <- k:
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return output, nil
|
||||
}
|
||||
|
||||
// NewGCLocker returns a default implementation of
|
||||
// GCLocker using standard [RW] mutexes.
|
||||
func NewGCLocker() GCLocker {
|
||||
return &gclocker{}
|
||||
}
|
||||
|
||||
type gclocker struct {
|
||||
lk sync.RWMutex
|
||||
gcreq int32
|
||||
}
|
||||
|
||||
// Unlocker represents an object which can Unlock
|
||||
// something.
|
||||
type Unlocker interface {
|
||||
Unlock()
|
||||
}
|
||||
|
||||
type unlocker struct {
|
||||
unlock func()
|
||||
}
|
||||
|
||||
func (u *unlocker) Unlock() {
|
||||
u.unlock()
|
||||
u.unlock = nil // ensure its not called twice
|
||||
}
|
||||
|
||||
func (bs *gclocker) GCLock() Unlocker {
|
||||
atomic.AddInt32(&bs.gcreq, 1)
|
||||
bs.lk.Lock()
|
||||
atomic.AddInt32(&bs.gcreq, -1)
|
||||
return &unlocker{bs.lk.Unlock}
|
||||
}
|
||||
|
||||
func (bs *gclocker) PinLock() Unlocker {
|
||||
bs.lk.RLock()
|
||||
return &unlocker{bs.lk.RUnlock}
|
||||
}
|
||||
|
||||
func (bs *gclocker) GCRequested() bool {
|
||||
return atomic.LoadInt32(&bs.gcreq) > 0
|
||||
}
|
@ -1,253 +0,0 @@
|
||||
package blockstore
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
u "gx/ipfs/QmNiJuT8Ja3hMVpBHXv3Q6dwmperaQ6JjLtpMQgMCD7xvx/go-ipfs-util"
|
||||
ds "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore"
|
||||
dsq "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore/query"
|
||||
ds_sync "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore/sync"
|
||||
cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid"
|
||||
dshelp "gx/ipfs/QmdQTPWduSeyveSxeCAte33M592isSW5Z979g81aJphrgn/go-ipfs-ds-help"
|
||||
blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format"
|
||||
)
|
||||
|
||||
func TestGetWhenKeyNotPresent(t *testing.T) {
|
||||
bs := NewBlockstore(ds_sync.MutexWrap(ds.NewMapDatastore()))
|
||||
c := cid.NewCidV0(u.Hash([]byte("stuff")))
|
||||
bl, err := bs.Get(c)
|
||||
|
||||
if bl != nil {
|
||||
t.Error("nil block expected")
|
||||
}
|
||||
if err == nil {
|
||||
t.Error("error expected, got nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetWhenKeyIsNil(t *testing.T) {
|
||||
bs := NewBlockstore(ds_sync.MutexWrap(ds.NewMapDatastore()))
|
||||
_, err := bs.Get(nil)
|
||||
if err != ErrNotFound {
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
func TestPutThenGetBlock(t *testing.T) {
|
||||
bs := NewBlockstore(ds_sync.MutexWrap(ds.NewMapDatastore()))
|
||||
block := blocks.NewBlock([]byte("some data"))
|
||||
|
||||
err := bs.Put(block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
blockFromBlockstore, err := bs.Get(block.Cid())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(block.RawData(), blockFromBlockstore.RawData()) {
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
func TestHashOnRead(t *testing.T) {
|
||||
orginalDebug := u.Debug
|
||||
defer (func() {
|
||||
u.Debug = orginalDebug
|
||||
})()
|
||||
u.Debug = false
|
||||
|
||||
bs := NewBlockstore(ds_sync.MutexWrap(ds.NewMapDatastore()))
|
||||
bl := blocks.NewBlock([]byte("some data"))
|
||||
blBad, err := blocks.NewBlockWithCid([]byte("some other data"), bl.Cid())
|
||||
if err != nil {
|
||||
t.Fatal("debug is off, still got an error")
|
||||
}
|
||||
bl2 := blocks.NewBlock([]byte("some other data"))
|
||||
bs.Put(blBad)
|
||||
bs.Put(bl2)
|
||||
bs.HashOnRead(true)
|
||||
|
||||
if _, err := bs.Get(bl.Cid()); err != ErrHashMismatch {
|
||||
t.Fatalf("expected '%v' got '%v'\n", ErrHashMismatch, err)
|
||||
}
|
||||
|
||||
if b, err := bs.Get(bl2.Cid()); err != nil || b.String() != bl2.String() {
|
||||
t.Fatal("got wrong blocks")
|
||||
}
|
||||
}
|
||||
|
||||
func newBlockStoreWithKeys(t *testing.T, d ds.Datastore, N int) (Blockstore, []*cid.Cid) {
|
||||
if d == nil {
|
||||
d = ds.NewMapDatastore()
|
||||
}
|
||||
bs := NewBlockstore(ds_sync.MutexWrap(d))
|
||||
|
||||
keys := make([]*cid.Cid, N)
|
||||
for i := 0; i < N; i++ {
|
||||
block := blocks.NewBlock([]byte(fmt.Sprintf("some data %d", i)))
|
||||
err := bs.Put(block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
keys[i] = block.Cid()
|
||||
}
|
||||
return bs, keys
|
||||
}
|
||||
|
||||
func collect(ch <-chan *cid.Cid) []*cid.Cid {
|
||||
var keys []*cid.Cid
|
||||
for k := range ch {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
return keys
|
||||
}
|
||||
|
||||
func TestAllKeysSimple(t *testing.T) {
|
||||
bs, keys := newBlockStoreWithKeys(t, nil, 100)
|
||||
|
||||
ctx := context.Background()
|
||||
ch, err := bs.AllKeysChan(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
keys2 := collect(ch)
|
||||
|
||||
// for _, k2 := range keys2 {
|
||||
// t.Log("found ", k2.B58String())
|
||||
// }
|
||||
|
||||
expectMatches(t, keys, keys2)
|
||||
}
|
||||
|
||||
func TestAllKeysRespectsContext(t *testing.T) {
|
||||
N := 100
|
||||
|
||||
d := &queryTestDS{ds: ds.NewMapDatastore()}
|
||||
bs, _ := newBlockStoreWithKeys(t, d, N)
|
||||
|
||||
started := make(chan struct{}, 1)
|
||||
done := make(chan struct{}, 1)
|
||||
errors := make(chan error, 100)
|
||||
|
||||
getKeys := func(ctx context.Context) {
|
||||
started <- struct{}{}
|
||||
ch, err := bs.AllKeysChan(ctx) // once without cancelling
|
||||
if err != nil {
|
||||
errors <- err
|
||||
}
|
||||
_ = collect(ch)
|
||||
done <- struct{}{}
|
||||
errors <- nil // a nil one to signal break
|
||||
}
|
||||
|
||||
var results dsq.Results
|
||||
var resultsmu = make(chan struct{})
|
||||
resultChan := make(chan dsq.Result)
|
||||
d.SetFunc(func(q dsq.Query) (dsq.Results, error) {
|
||||
results = dsq.ResultsWithChan(q, resultChan)
|
||||
resultsmu <- struct{}{}
|
||||
return results, nil
|
||||
})
|
||||
|
||||
go getKeys(context.Background())
|
||||
|
||||
// make sure it's waiting.
|
||||
<-started
|
||||
<-resultsmu
|
||||
select {
|
||||
case <-done:
|
||||
t.Fatal("sync is wrong")
|
||||
case <-results.Process().Closing():
|
||||
t.Fatal("should not be closing")
|
||||
case <-results.Process().Closed():
|
||||
t.Fatal("should not be closed")
|
||||
default:
|
||||
}
|
||||
|
||||
e := dsq.Entry{Key: BlockPrefix.ChildString("foo").String()}
|
||||
resultChan <- dsq.Result{Entry: e} // let it go.
|
||||
close(resultChan)
|
||||
<-done // should be done now.
|
||||
<-results.Process().Closed() // should be closed now
|
||||
|
||||
// print any errors
|
||||
for err := range errors {
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestErrValueTypeMismatch(t *testing.T) {
|
||||
block := blocks.NewBlock([]byte("some data"))
|
||||
|
||||
datastore := ds.NewMapDatastore()
|
||||
k := BlockPrefix.Child(dshelp.CidToDsKey(block.Cid()))
|
||||
datastore.Put(k, "data that isn't a block!")
|
||||
|
||||
blockstore := NewBlockstore(ds_sync.MutexWrap(datastore))
|
||||
|
||||
_, err := blockstore.Get(block.Cid())
|
||||
if err != ErrValueTypeMismatch {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func expectMatches(t *testing.T, expect, actual []*cid.Cid) {
|
||||
|
||||
if len(expect) != len(actual) {
|
||||
t.Errorf("expect and actual differ: %d != %d", len(expect), len(actual))
|
||||
}
|
||||
for _, ek := range expect {
|
||||
found := false
|
||||
for _, ak := range actual {
|
||||
if ek.Equals(ak) {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Error("expected key not found: ", ek)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type queryTestDS struct {
|
||||
cb func(q dsq.Query) (dsq.Results, error)
|
||||
ds ds.Datastore
|
||||
}
|
||||
|
||||
func (c *queryTestDS) SetFunc(f func(dsq.Query) (dsq.Results, error)) { c.cb = f }
|
||||
|
||||
func (c *queryTestDS) Put(key ds.Key, value interface{}) (err error) {
|
||||
return c.ds.Put(key, value)
|
||||
}
|
||||
|
||||
func (c *queryTestDS) Get(key ds.Key) (value interface{}, err error) {
|
||||
return c.ds.Get(key)
|
||||
}
|
||||
|
||||
func (c *queryTestDS) Has(key ds.Key) (exists bool, err error) {
|
||||
return c.ds.Has(key)
|
||||
}
|
||||
|
||||
func (c *queryTestDS) Delete(key ds.Key) (err error) {
|
||||
return c.ds.Delete(key)
|
||||
}
|
||||
|
||||
func (c *queryTestDS) Query(q dsq.Query) (dsq.Results, error) {
|
||||
if c.cb != nil {
|
||||
return c.cb(q)
|
||||
}
|
||||
return c.ds.Query(q)
|
||||
}
|
||||
|
||||
func (c *queryTestDS) Batch() (ds.Batch, error) {
|
||||
return ds.NewBasicBatch(c), nil
|
||||
}
|
@ -1,187 +0,0 @@
|
||||
package blockstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format"
|
||||
|
||||
"gx/ipfs/QmRg1gKTHzc3CZXSKzem8aR4E3TubFhbgXwfVuWnSK5CC5/go-metrics-interface"
|
||||
bloom "gx/ipfs/QmXqKGu7QzfRzFC4yd5aL9sThYx22vY163VGwmxfp5qGHk/bbloom"
|
||||
cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid"
|
||||
)
|
||||
|
||||
// bloomCached returns a Blockstore that caches Has requests using a Bloom
|
||||
// filter. bloomSize is size of bloom filter in bytes. hashCount specifies the
|
||||
// number of hashing functions in the bloom filter (usually known as k).
|
||||
func bloomCached(ctx context.Context, bs Blockstore, bloomSize, hashCount int) (*bloomcache, error) {
|
||||
bl, err := bloom.New(float64(bloomSize), float64(hashCount))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bc := &bloomcache{blockstore: bs, bloom: bl}
|
||||
bc.hits = metrics.NewCtx(ctx, "bloom.hits_total",
|
||||
"Number of cache hits in bloom cache").Counter()
|
||||
bc.total = metrics.NewCtx(ctx, "bloom_total",
|
||||
"Total number of requests to bloom cache").Counter()
|
||||
|
||||
bc.Invalidate()
|
||||
go bc.Rebuild(ctx)
|
||||
if metrics.Active() {
|
||||
go func() {
|
||||
fill := metrics.NewCtx(ctx, "bloom_fill_ratio",
|
||||
"Ratio of bloom filter fullnes, (updated once a minute)").Gauge()
|
||||
|
||||
<-bc.rebuildChan
|
||||
t := time.NewTicker(1 * time.Minute)
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
t.Stop()
|
||||
return
|
||||
case <-t.C:
|
||||
fill.Set(bc.bloom.FillRatio())
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
return bc, nil
|
||||
}
|
||||
|
||||
type bloomcache struct {
|
||||
bloom *bloom.Bloom
|
||||
active int32
|
||||
|
||||
// This chan is only used for testing to wait for bloom to enable
|
||||
rebuildChan chan struct{}
|
||||
blockstore Blockstore
|
||||
|
||||
// Statistics
|
||||
hits metrics.Counter
|
||||
total metrics.Counter
|
||||
}
|
||||
|
||||
func (b *bloomcache) Invalidate() {
|
||||
b.rebuildChan = make(chan struct{})
|
||||
atomic.StoreInt32(&b.active, 0)
|
||||
}
|
||||
|
||||
func (b *bloomcache) BloomActive() bool {
|
||||
return atomic.LoadInt32(&b.active) != 0
|
||||
}
|
||||
|
||||
func (b *bloomcache) Rebuild(ctx context.Context) {
|
||||
evt := log.EventBegin(ctx, "bloomcache.Rebuild")
|
||||
defer evt.Done()
|
||||
|
||||
ch, err := b.blockstore.AllKeysChan(ctx)
|
||||
if err != nil {
|
||||
log.Errorf("AllKeysChan failed in bloomcache rebuild with: %v", err)
|
||||
return
|
||||
}
|
||||
finish := false
|
||||
for !finish {
|
||||
select {
|
||||
case key, ok := <-ch:
|
||||
if ok {
|
||||
b.bloom.AddTS(key.Bytes()) // Use binary key, the more compact the better
|
||||
} else {
|
||||
finish = true
|
||||
}
|
||||
case <-ctx.Done():
|
||||
log.Warning("Cache rebuild closed by context finishing.")
|
||||
return
|
||||
}
|
||||
}
|
||||
close(b.rebuildChan)
|
||||
atomic.StoreInt32(&b.active, 1)
|
||||
}
|
||||
|
||||
func (b *bloomcache) DeleteBlock(k *cid.Cid) error {
|
||||
if has, ok := b.hasCached(k); ok && !has {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
return b.blockstore.DeleteBlock(k)
|
||||
}
|
||||
|
||||
// if ok == false has is inconclusive
|
||||
// if ok == true then has respons to question: is it contained
|
||||
func (b *bloomcache) hasCached(k *cid.Cid) (has bool, ok bool) {
|
||||
b.total.Inc()
|
||||
if k == nil {
|
||||
log.Error("nil cid in bloom cache")
|
||||
// Return cache invalid so call to blockstore
|
||||
// in case of invalid key is forwarded deeper
|
||||
return false, false
|
||||
}
|
||||
if b.BloomActive() {
|
||||
blr := b.bloom.HasTS(k.Bytes())
|
||||
if !blr { // not contained in bloom is only conclusive answer bloom gives
|
||||
b.hits.Inc()
|
||||
return false, true
|
||||
}
|
||||
}
|
||||
return false, false
|
||||
}
|
||||
|
||||
func (b *bloomcache) Has(k *cid.Cid) (bool, error) {
|
||||
if has, ok := b.hasCached(k); ok {
|
||||
return has, nil
|
||||
}
|
||||
|
||||
return b.blockstore.Has(k)
|
||||
}
|
||||
|
||||
func (b *bloomcache) Get(k *cid.Cid) (blocks.Block, error) {
|
||||
if has, ok := b.hasCached(k); ok && !has {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
|
||||
return b.blockstore.Get(k)
|
||||
}
|
||||
|
||||
func (b *bloomcache) Put(bl blocks.Block) error {
|
||||
// See comment in PutMany
|
||||
err := b.blockstore.Put(bl)
|
||||
if err == nil {
|
||||
b.bloom.AddTS(bl.Cid().Bytes())
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (b *bloomcache) PutMany(bs []blocks.Block) error {
|
||||
// bloom cache gives only conclusive resulty if key is not contained
|
||||
// to reduce number of puts we need conclusive information if block is contained
|
||||
// this means that PutMany can't be improved with bloom cache so we just
|
||||
// just do a passthrough.
|
||||
err := b.blockstore.PutMany(bs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, bl := range bs {
|
||||
b.bloom.AddTS(bl.Cid().Bytes())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *bloomcache) HashOnRead(enabled bool) {
|
||||
b.blockstore.HashOnRead(enabled)
|
||||
}
|
||||
|
||||
func (b *bloomcache) AllKeysChan(ctx context.Context) (<-chan *cid.Cid, error) {
|
||||
return b.blockstore.AllKeysChan(ctx)
|
||||
}
|
||||
|
||||
func (b *bloomcache) GCLock() Unlocker {
|
||||
return b.blockstore.(GCBlockstore).GCLock()
|
||||
}
|
||||
|
||||
func (b *bloomcache) PinLock() Unlocker {
|
||||
return b.blockstore.(GCBlockstore).PinLock()
|
||||
}
|
||||
|
||||
func (b *bloomcache) GCRequested() bool {
|
||||
return b.blockstore.(GCBlockstore).GCRequested()
|
||||
}
|
@ -1,180 +0,0 @@
|
||||
package blockstore
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format"
|
||||
|
||||
context "context"
|
||||
ds "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore"
|
||||
dsq "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore/query"
|
||||
syncds "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore/sync"
|
||||
)
|
||||
|
||||
func testBloomCached(ctx context.Context, bs Blockstore) (*bloomcache, error) {
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
}
|
||||
opts := DefaultCacheOpts()
|
||||
opts.HasARCCacheSize = 0
|
||||
bbs, err := CachedBlockstore(ctx, bs, opts)
|
||||
if err == nil {
|
||||
return bbs.(*bloomcache), nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func TestPutManyAddsToBloom(t *testing.T) {
|
||||
bs := NewBlockstore(syncds.MutexWrap(ds.NewMapDatastore()))
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
|
||||
defer cancel()
|
||||
|
||||
cachedbs, err := testBloomCached(ctx, bs)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-cachedbs.rebuildChan:
|
||||
case <-ctx.Done():
|
||||
t.Fatalf("Timeout wating for rebuild: %d", cachedbs.bloom.ElementsAdded())
|
||||
}
|
||||
|
||||
block1 := blocks.NewBlock([]byte("foo"))
|
||||
block2 := blocks.NewBlock([]byte("bar"))
|
||||
|
||||
cachedbs.PutMany([]blocks.Block{block1})
|
||||
has, err := cachedbs.Has(block1.Cid())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !has {
|
||||
t.Fatal("added block is reported missing")
|
||||
}
|
||||
|
||||
has, err = cachedbs.Has(block2.Cid())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if has {
|
||||
t.Fatal("not added block is reported to be in blockstore")
|
||||
}
|
||||
}
|
||||
|
||||
func TestReturnsErrorWhenSizeNegative(t *testing.T) {
|
||||
bs := NewBlockstore(syncds.MutexWrap(ds.NewMapDatastore()))
|
||||
_, err := bloomCached(context.Background(), bs, -1, 1)
|
||||
if err == nil {
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
func TestHasIsBloomCached(t *testing.T) {
|
||||
cd := &callbackDatastore{f: func() {}, ds: ds.NewMapDatastore()}
|
||||
bs := NewBlockstore(syncds.MutexWrap(cd))
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
bs.Put(blocks.NewBlock([]byte(fmt.Sprintf("data: %d", i))))
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
|
||||
defer cancel()
|
||||
|
||||
cachedbs, err := testBloomCached(ctx, bs)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-cachedbs.rebuildChan:
|
||||
case <-ctx.Done():
|
||||
t.Fatalf("Timeout wating for rebuild: %d", cachedbs.bloom.ElementsAdded())
|
||||
}
|
||||
|
||||
cacheFails := 0
|
||||
cd.SetFunc(func() {
|
||||
cacheFails++
|
||||
})
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
cachedbs.Has(blocks.NewBlock([]byte(fmt.Sprintf("data: %d", i+2000))).Cid())
|
||||
}
|
||||
|
||||
if float64(cacheFails)/float64(1000) > float64(0.05) {
|
||||
t.Fatal("Bloom filter has cache miss rate of more than 5%")
|
||||
}
|
||||
|
||||
cacheFails = 0
|
||||
block := blocks.NewBlock([]byte("newBlock"))
|
||||
|
||||
cachedbs.PutMany([]blocks.Block{block})
|
||||
if cacheFails != 2 {
|
||||
t.Fatalf("expected two datastore hits: %d", cacheFails)
|
||||
}
|
||||
cachedbs.Put(block)
|
||||
if cacheFails != 3 {
|
||||
t.Fatalf("expected datastore hit: %d", cacheFails)
|
||||
}
|
||||
|
||||
if has, err := cachedbs.Has(block.Cid()); !has || err != nil {
|
||||
t.Fatal("has gave wrong response")
|
||||
}
|
||||
|
||||
bl, err := cachedbs.Get(block.Cid())
|
||||
if bl.String() != block.String() {
|
||||
t.Fatal("block data doesn't match")
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
t.Fatal("there should't be an error")
|
||||
}
|
||||
}
|
||||
|
||||
type callbackDatastore struct {
|
||||
sync.Mutex
|
||||
f func()
|
||||
ds ds.Datastore
|
||||
}
|
||||
|
||||
func (c *callbackDatastore) SetFunc(f func()) {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
c.f = f
|
||||
}
|
||||
|
||||
func (c *callbackDatastore) CallF() {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
c.f()
|
||||
}
|
||||
|
||||
func (c *callbackDatastore) Put(key ds.Key, value interface{}) (err error) {
|
||||
c.CallF()
|
||||
return c.ds.Put(key, value)
|
||||
}
|
||||
|
||||
func (c *callbackDatastore) Get(key ds.Key) (value interface{}, err error) {
|
||||
c.CallF()
|
||||
return c.ds.Get(key)
|
||||
}
|
||||
|
||||
func (c *callbackDatastore) Has(key ds.Key) (exists bool, err error) {
|
||||
c.CallF()
|
||||
return c.ds.Has(key)
|
||||
}
|
||||
|
||||
func (c *callbackDatastore) Delete(key ds.Key) (err error) {
|
||||
c.CallF()
|
||||
return c.ds.Delete(key)
|
||||
}
|
||||
|
||||
func (c *callbackDatastore) Query(q dsq.Query) (dsq.Results, error) {
|
||||
c.CallF()
|
||||
return c.ds.Query(q)
|
||||
}
|
||||
|
||||
func (c *callbackDatastore) Batch() (ds.Batch, error) {
|
||||
return ds.NewBasicBatch(c), nil
|
||||
}
|
@ -1,56 +0,0 @@
|
||||
package blockstore
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
context "context"
|
||||
|
||||
"gx/ipfs/QmRg1gKTHzc3CZXSKzem8aR4E3TubFhbgXwfVuWnSK5CC5/go-metrics-interface"
|
||||
)
|
||||
|
||||
// CacheOpts wraps options for CachedBlockStore().
|
||||
// Next to each option is it aproximate memory usage per unit
|
||||
type CacheOpts struct {
|
||||
HasBloomFilterSize int // 1 byte
|
||||
HasBloomFilterHashes int // No size, 7 is usually best, consult bloom papers
|
||||
HasARCCacheSize int // 32 bytes
|
||||
}
|
||||
|
||||
// DefaultCacheOpts returns a CacheOpts initialized with default values.
|
||||
func DefaultCacheOpts() CacheOpts {
|
||||
return CacheOpts{
|
||||
HasBloomFilterSize: 512 << 10,
|
||||
HasBloomFilterHashes: 7,
|
||||
HasARCCacheSize: 64 << 10,
|
||||
}
|
||||
}
|
||||
|
||||
// CachedBlockstore returns a blockstore wrapped in an ARCCache and
|
||||
// then in a bloom filter cache, if the options indicate it.
|
||||
func CachedBlockstore(
|
||||
ctx context.Context,
|
||||
bs Blockstore,
|
||||
opts CacheOpts) (cbs Blockstore, err error) {
|
||||
cbs = bs
|
||||
|
||||
if opts.HasBloomFilterSize < 0 || opts.HasBloomFilterHashes < 0 ||
|
||||
opts.HasARCCacheSize < 0 {
|
||||
return nil, errors.New("all options for cache need to be greater than zero")
|
||||
}
|
||||
|
||||
if opts.HasBloomFilterSize != 0 && opts.HasBloomFilterHashes == 0 {
|
||||
return nil, errors.New("bloom filter hash count can't be 0 when there is size set")
|
||||
}
|
||||
|
||||
ctx = metrics.CtxSubScope(ctx, "bs.cache")
|
||||
|
||||
if opts.HasARCCacheSize > 0 {
|
||||
cbs, err = newARCCachedBS(ctx, cbs, opts.HasARCCacheSize)
|
||||
}
|
||||
if opts.HasBloomFilterSize != 0 {
|
||||
// *8 because of bytes to bits conversion
|
||||
cbs, err = bloomCached(ctx, cbs, opts.HasBloomFilterSize*8, opts.HasBloomFilterHashes)
|
||||
}
|
||||
|
||||
return cbs, err
|
||||
}
|
@ -1,38 +0,0 @@
|
||||
package blockstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestCachingOptsLessThanZero(t *testing.T) {
|
||||
opts := DefaultCacheOpts()
|
||||
opts.HasARCCacheSize = -1
|
||||
|
||||
if _, err := CachedBlockstore(context.TODO(), nil, opts); err == nil {
|
||||
t.Error("wrong ARC setting was not detected")
|
||||
}
|
||||
|
||||
opts = DefaultCacheOpts()
|
||||
opts.HasBloomFilterSize = -1
|
||||
|
||||
if _, err := CachedBlockstore(context.TODO(), nil, opts); err == nil {
|
||||
t.Error("negative bloom size was not detected")
|
||||
}
|
||||
|
||||
opts = DefaultCacheOpts()
|
||||
opts.HasBloomFilterHashes = -1
|
||||
|
||||
if _, err := CachedBlockstore(context.TODO(), nil, opts); err == nil {
|
||||
t.Error("negative hashes setting was not detected")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBloomHashesAtZero(t *testing.T) {
|
||||
opts := DefaultCacheOpts()
|
||||
opts.HasBloomFilterHashes = 0
|
||||
|
||||
if _, err := CachedBlockstore(context.TODO(), nil, opts); err == nil {
|
||||
t.Error("zero hashes setting with positive size was not detected")
|
||||
}
|
||||
}
|
@ -5,11 +5,11 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
ds "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore"
|
||||
cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid"
|
||||
|
||||
bs "github.com/ipfs/go-ipfs/blocks/blockstore"
|
||||
"github.com/ipfs/go-ipfs/pin"
|
||||
|
||||
ds "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore"
|
||||
bs "gx/ipfs/QmTVDM4LCSUMFNQzbDLL9zQwp8usE6QHymFdh3h8vL9v6b/go-ipfs-blockstore"
|
||||
cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid"
|
||||
)
|
||||
|
||||
// RemovedBlock is used to respresent the result of removing a block.
|
@ -9,11 +9,11 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/ipfs/go-ipfs/blocks/blockstore"
|
||||
exchange "github.com/ipfs/go-ipfs/exchange"
|
||||
bitswap "github.com/ipfs/go-ipfs/exchange/bitswap"
|
||||
|
||||
logging "gx/ipfs/QmRb5jh8z2E8hMGN2tkvs1yHynUanqnZ3UeKwgN1i9P1F8/go-log"
|
||||
blockstore "gx/ipfs/QmTVDM4LCSUMFNQzbDLL9zQwp8usE6QHymFdh3h8vL9v6b/go-ipfs-blockstore"
|
||||
cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid"
|
||||
blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format"
|
||||
)
|
||||
|
@ -3,13 +3,13 @@ package blockservice
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ipfs/go-ipfs/blocks/blockstore"
|
||||
butil "github.com/ipfs/go-ipfs/blocks/blocksutil"
|
||||
offline "github.com/ipfs/go-ipfs/exchange/offline"
|
||||
"gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format"
|
||||
|
||||
ds "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore"
|
||||
dssync "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore/sync"
|
||||
blockstore "gx/ipfs/QmTVDM4LCSUMFNQzbDLL9zQwp8usE6QHymFdh3h8vL9v6b/go-ipfs-blockstore"
|
||||
blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format"
|
||||
)
|
||||
|
||||
func TestWriteThroughWorks(t *testing.T) {
|
||||
|
@ -7,15 +7,15 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
blockstore "github.com/ipfs/go-ipfs/blocks/blockstore"
|
||||
. "github.com/ipfs/go-ipfs/blockservice"
|
||||
offline "github.com/ipfs/go-ipfs/exchange/offline"
|
||||
blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format"
|
||||
|
||||
u "gx/ipfs/QmNiJuT8Ja3hMVpBHXv3Q6dwmperaQ6JjLtpMQgMCD7xvx/go-ipfs-util"
|
||||
ds "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore"
|
||||
dssync "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore/sync"
|
||||
blockstore "gx/ipfs/QmTVDM4LCSUMFNQzbDLL9zQwp8usE6QHymFdh3h8vL9v6b/go-ipfs-blockstore"
|
||||
cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid"
|
||||
blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format"
|
||||
)
|
||||
|
||||
func newObject(data []byte) blocks.Block {
|
||||
|
@ -9,7 +9,6 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
bstore "github.com/ipfs/go-ipfs/blocks/blockstore"
|
||||
bserv "github.com/ipfs/go-ipfs/blockservice"
|
||||
offline "github.com/ipfs/go-ipfs/exchange/offline"
|
||||
filestore "github.com/ipfs/go-ipfs/filestore"
|
||||
@ -24,6 +23,7 @@ import (
|
||||
dsync "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore/sync"
|
||||
metrics "gx/ipfs/QmRg1gKTHzc3CZXSKzem8aR4E3TubFhbgXwfVuWnSK5CC5/go-metrics-interface"
|
||||
goprocessctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context"
|
||||
bstore "gx/ipfs/QmTVDM4LCSUMFNQzbDLL9zQwp8usE6QHymFdh3h8vL9v6b/go-ipfs-blockstore"
|
||||
pstore "gx/ipfs/QmXauCuJzmzapetmC6W4TuDJLL1yFFrVzSHoWv8YdbmnxH/go-libp2p-peerstore"
|
||||
peer "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer"
|
||||
ci "gx/ipfs/QmaPbCnUMBohSGo3KnxEa2bHqyJVVeEEcwtqJAYxerieBo/go-libp2p-crypto"
|
||||
|
@ -7,7 +7,6 @@ import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
bstore "github.com/ipfs/go-ipfs/blocks/blockstore"
|
||||
blockservice "github.com/ipfs/go-ipfs/blockservice"
|
||||
core "github.com/ipfs/go-ipfs/core"
|
||||
"github.com/ipfs/go-ipfs/core/coreunix"
|
||||
@ -17,11 +16,12 @@ import (
|
||||
mfs "github.com/ipfs/go-ipfs/mfs"
|
||||
ft "github.com/ipfs/go-ipfs/unixfs"
|
||||
|
||||
"gx/ipfs/QmZ9hww8R3FKrDRCYPxhN13m6XgjPDpaSvdUfisPvERzXz/go-ipfs-cmds"
|
||||
bstore "gx/ipfs/QmTVDM4LCSUMFNQzbDLL9zQwp8usE6QHymFdh3h8vL9v6b/go-ipfs-blockstore"
|
||||
cmds "gx/ipfs/QmZ9hww8R3FKrDRCYPxhN13m6XgjPDpaSvdUfisPvERzXz/go-ipfs-cmds"
|
||||
mh "gx/ipfs/QmZyZDi491cCNTLfAhwcaDii2Kg4pwKRkhqQzURGDvY6ua/go-multihash"
|
||||
"gx/ipfs/QmceUdzxkimdYsgtX733uNgzf1DLHyBKN6ehGSp85ayppM/go-ipfs-cmdkit"
|
||||
"gx/ipfs/QmceUdzxkimdYsgtX733uNgzf1DLHyBKN6ehGSp85ayppM/go-ipfs-cmdkit/files"
|
||||
"gx/ipfs/QmeWjRodbcZFKe5tMN7poEx3izym6osrLSnTLf9UjJZBbs/pb"
|
||||
cmdkit "gx/ipfs/QmceUdzxkimdYsgtX733uNgzf1DLHyBKN6ehGSp85ayppM/go-ipfs-cmdkit"
|
||||
files "gx/ipfs/QmceUdzxkimdYsgtX733uNgzf1DLHyBKN6ehGSp85ayppM/go-ipfs-cmdkit/files"
|
||||
pb "gx/ipfs/QmeWjRodbcZFKe5tMN7poEx3izym6osrLSnTLf9UjJZBbs/pb"
|
||||
)
|
||||
|
||||
// ErrDepthLimitExceeded indicates that the max depth has been exceded.
|
||||
|
@ -8,7 +8,7 @@ import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
util "github.com/ipfs/go-ipfs/blocks/blockstore/util"
|
||||
util "github.com/ipfs/go-ipfs/blocks/blockstoreutil"
|
||||
e "github.com/ipfs/go-ipfs/core/commands/e"
|
||||
|
||||
"gx/ipfs/QmZ9hww8R3FKrDRCYPxhN13m6XgjPDpaSvdUfisPvERzXz/go-ipfs-cmds"
|
||||
|
@ -9,15 +9,15 @@ import (
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
|
||||
bstore "github.com/ipfs/go-ipfs/blocks/blockstore"
|
||||
oldcmds "github.com/ipfs/go-ipfs/commands"
|
||||
lgc "github.com/ipfs/go-ipfs/commands/legacy"
|
||||
e "github.com/ipfs/go-ipfs/core/commands/e"
|
||||
corerepo "github.com/ipfs/go-ipfs/core/corerepo"
|
||||
config "github.com/ipfs/go-ipfs/repo/config"
|
||||
fsrepo "github.com/ipfs/go-ipfs/repo/fsrepo"
|
||||
lockfile "github.com/ipfs/go-ipfs/repo/fsrepo/lock"
|
||||
|
||||
lgc "github.com/ipfs/go-ipfs/commands/legacy"
|
||||
bstore "gx/ipfs/QmTVDM4LCSUMFNQzbDLL9zQwp8usE6QHymFdh3h8vL9v6b/go-ipfs-blockstore"
|
||||
cmds "gx/ipfs/QmZ9hww8R3FKrDRCYPxhN13m6XgjPDpaSvdUfisPvERzXz/go-ipfs-cmds"
|
||||
cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid"
|
||||
cmdkit "gx/ipfs/QmceUdzxkimdYsgtX733uNgzf1DLHyBKN6ehGSp85ayppM/go-ipfs-cmdkit"
|
||||
|
@ -21,7 +21,6 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
bstore "github.com/ipfs/go-ipfs/blocks/blockstore"
|
||||
bserv "github.com/ipfs/go-ipfs/blockservice"
|
||||
exchange "github.com/ipfs/go-ipfs/exchange"
|
||||
bitswap "github.com/ipfs/go-ipfs/exchange/bitswap"
|
||||
@ -55,6 +54,7 @@ import (
|
||||
floodsub "gx/ipfs/QmSFihvoND3eDaAYRCeLgLPt62yCPgMZs1NSZmKFEtJQQw/go-libp2p-floodsub"
|
||||
mamask "gx/ipfs/QmSMZwvs3n4GBikZ7hKzT17c3bk65FmyZo2JqtJ16swqCv/multiaddr-filter"
|
||||
swarm "gx/ipfs/QmSwZMWwFZSUpe5muU2xgTUwppH24KfMwdPXiwbEp2c6G5/go-libp2p-swarm"
|
||||
bstore "gx/ipfs/QmTVDM4LCSUMFNQzbDLL9zQwp8usE6QHymFdh3h8vL9v6b/go-ipfs-blockstore"
|
||||
routing "gx/ipfs/QmTiWLZ6Fo5j4KcTVutZJ5KWRRJrbxzmxA4td8NfEdrPh7/go-libp2p-routing"
|
||||
dht "gx/ipfs/QmVSep2WwKcXxMonPASsAJ3nZVjfVMKgMcaSigxKnUWpJv/go-libp2p-kad-dht"
|
||||
circuit "gx/ipfs/QmVTnHzuyECV9JzbXXfZRj1pKtgknp1esamUb2EH33mJkA/go-libp2p-circuit"
|
||||
|
@ -8,7 +8,7 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
util "github.com/ipfs/go-ipfs/blocks/blockstore/util"
|
||||
util "github.com/ipfs/go-ipfs/blocks/blockstoreutil"
|
||||
coreiface "github.com/ipfs/go-ipfs/core/coreapi/interface"
|
||||
caopts "github.com/ipfs/go-ipfs/core/coreapi/interface/options"
|
||||
|
||||
|
@ -9,8 +9,6 @@ import (
|
||||
gopath "path"
|
||||
"strconv"
|
||||
|
||||
bs "github.com/ipfs/go-ipfs/blocks/blockstore"
|
||||
bstore "github.com/ipfs/go-ipfs/blocks/blockstore"
|
||||
bserv "github.com/ipfs/go-ipfs/blockservice"
|
||||
core "github.com/ipfs/go-ipfs/core"
|
||||
"github.com/ipfs/go-ipfs/exchange/offline"
|
||||
@ -21,12 +19,13 @@ import (
|
||||
mfs "github.com/ipfs/go-ipfs/mfs"
|
||||
"github.com/ipfs/go-ipfs/pin"
|
||||
unixfs "github.com/ipfs/go-ipfs/unixfs"
|
||||
posinfo "gx/ipfs/Qmb3jLEFAQrqdVgWUajqEyuuDoavkSq1XQXz6tWdFWF995/go-ipfs-posinfo"
|
||||
|
||||
ds "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore"
|
||||
syncds "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore/sync"
|
||||
logging "gx/ipfs/QmRb5jh8z2E8hMGN2tkvs1yHynUanqnZ3UeKwgN1i9P1F8/go-log"
|
||||
bstore "gx/ipfs/QmTVDM4LCSUMFNQzbDLL9zQwp8usE6QHymFdh3h8vL9v6b/go-ipfs-blockstore"
|
||||
chunker "gx/ipfs/QmWo8jYc19ppG7YoTsrr2kEtLRbARTJho5oNXFTR6B7Peq/go-ipfs-chunker"
|
||||
posinfo "gx/ipfs/Qmb3jLEFAQrqdVgWUajqEyuuDoavkSq1XQXz6tWdFWF995/go-ipfs-posinfo"
|
||||
cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid"
|
||||
files "gx/ipfs/QmceUdzxkimdYsgtX733uNgzf1DLHyBKN6ehGSp85ayppM/go-ipfs-cmdkit/files"
|
||||
ipld "gx/ipfs/Qme5bWv7wtjUNGsK2BNGVUFPKiuxWrsqrtvYwCLRw8YFES/go-ipld-format"
|
||||
@ -107,7 +106,7 @@ type Adder struct {
|
||||
Chunker string
|
||||
root ipld.Node
|
||||
mroot *mfs.Root
|
||||
unlocker bs.Unlocker
|
||||
unlocker bstore.Unlocker
|
||||
tempRoot *cid.Cid
|
||||
Prefix *cid.Prefix
|
||||
liveNodes uint64
|
||||
|
@ -10,7 +10,6 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ipfs/go-ipfs/blocks/blockstore"
|
||||
"github.com/ipfs/go-ipfs/blockservice"
|
||||
"github.com/ipfs/go-ipfs/core"
|
||||
dag "github.com/ipfs/go-ipfs/merkledag"
|
||||
@ -18,11 +17,12 @@ import (
|
||||
"github.com/ipfs/go-ipfs/repo"
|
||||
"github.com/ipfs/go-ipfs/repo/config"
|
||||
ds2 "github.com/ipfs/go-ipfs/thirdparty/datastore2"
|
||||
pi "gx/ipfs/Qmb3jLEFAQrqdVgWUajqEyuuDoavkSq1XQXz6tWdFWF995/go-ipfs-posinfo"
|
||||
|
||||
blockstore "gx/ipfs/QmTVDM4LCSUMFNQzbDLL9zQwp8usE6QHymFdh3h8vL9v6b/go-ipfs-blockstore"
|
||||
pi "gx/ipfs/Qmb3jLEFAQrqdVgWUajqEyuuDoavkSq1XQXz6tWdFWF995/go-ipfs-posinfo"
|
||||
cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid"
|
||||
"gx/ipfs/QmceUdzxkimdYsgtX733uNgzf1DLHyBKN6ehGSp85ayppM/go-ipfs-cmdkit/files"
|
||||
"gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format"
|
||||
files "gx/ipfs/QmceUdzxkimdYsgtX733uNgzf1DLHyBKN6ehGSp85ayppM/go-ipfs-cmdkit/files"
|
||||
blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format"
|
||||
)
|
||||
|
||||
const testPeerID = "QmTFauExutTsy4XP6JbMFcw2Wa9645HJt2bTqL6qYDCKfe"
|
||||
|
@ -6,7 +6,6 @@ import (
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
bstore "github.com/ipfs/go-ipfs/blocks/blockstore"
|
||||
bserv "github.com/ipfs/go-ipfs/blockservice"
|
||||
core "github.com/ipfs/go-ipfs/core"
|
||||
offline "github.com/ipfs/go-ipfs/exchange/offline"
|
||||
@ -18,6 +17,7 @@ import (
|
||||
u "gx/ipfs/QmNiJuT8Ja3hMVpBHXv3Q6dwmperaQ6JjLtpMQgMCD7xvx/go-ipfs-util"
|
||||
ds "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore"
|
||||
dssync "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore/sync"
|
||||
bstore "gx/ipfs/QmTVDM4LCSUMFNQzbDLL9zQwp8usE6QHymFdh3h8vL9v6b/go-ipfs-blockstore"
|
||||
chunker "gx/ipfs/QmWo8jYc19ppG7YoTsrr2kEtLRbARTJho5oNXFTR6B7Peq/go-ipfs-chunker"
|
||||
cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid"
|
||||
ipld "gx/ipfs/Qme5bWv7wtjUNGsK2BNGVUFPKiuxWrsqrtvYwCLRw8YFES/go-ipld-format"
|
||||
|
@ -10,7 +10,6 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
blockstore "github.com/ipfs/go-ipfs/blocks/blockstore"
|
||||
exchange "github.com/ipfs/go-ipfs/exchange"
|
||||
decision "github.com/ipfs/go-ipfs/exchange/bitswap/decision"
|
||||
bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message"
|
||||
@ -23,6 +22,7 @@ import (
|
||||
metrics "gx/ipfs/QmRg1gKTHzc3CZXSKzem8aR4E3TubFhbgXwfVuWnSK5CC5/go-metrics-interface"
|
||||
process "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess"
|
||||
procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context"
|
||||
blockstore "gx/ipfs/QmTVDM4LCSUMFNQzbDLL9zQwp8usE6QHymFdh3h8vL9v6b/go-ipfs-blockstore"
|
||||
peer "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer"
|
||||
cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid"
|
||||
blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format"
|
||||
|
@ -8,12 +8,12 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
blockstore "github.com/ipfs/go-ipfs/blocks/blockstore"
|
||||
blocksutil "github.com/ipfs/go-ipfs/blocks/blocksutil"
|
||||
decision "github.com/ipfs/go-ipfs/exchange/bitswap/decision"
|
||||
tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet"
|
||||
|
||||
delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay"
|
||||
blockstore "gx/ipfs/QmTVDM4LCSUMFNQzbDLL9zQwp8usE6QHymFdh3h8vL9v6b/go-ipfs-blockstore"
|
||||
tu "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil"
|
||||
travis "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil/ci/travis"
|
||||
p2ptestutil "gx/ipfs/QmYVR3C8DWPHdHxvLtNFYfjsXgaRAdh6hPMNH3KiwCgu4o/go-libp2p-netutil"
|
||||
|
@ -6,10 +6,11 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
bstore "github.com/ipfs/go-ipfs/blocks/blockstore"
|
||||
bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message"
|
||||
wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist"
|
||||
|
||||
logging "gx/ipfs/QmRb5jh8z2E8hMGN2tkvs1yHynUanqnZ3UeKwgN1i9P1F8/go-log"
|
||||
bstore "gx/ipfs/QmTVDM4LCSUMFNQzbDLL9zQwp8usE6QHymFdh3h8vL9v6b/go-ipfs-blockstore"
|
||||
peer "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer"
|
||||
blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format"
|
||||
)
|
||||
|
@ -9,10 +9,11 @@ import (
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
blockstore "github.com/ipfs/go-ipfs/blocks/blockstore"
|
||||
message "github.com/ipfs/go-ipfs/exchange/bitswap/message"
|
||||
|
||||
ds "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore"
|
||||
dssync "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore/sync"
|
||||
blockstore "gx/ipfs/QmTVDM4LCSUMFNQzbDLL9zQwp8usE6QHymFdh3h8vL9v6b/go-ipfs-blockstore"
|
||||
testutil "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil"
|
||||
peer "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer"
|
||||
blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format"
|
||||
|
@ -4,11 +4,11 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
blockstore "github.com/ipfs/go-ipfs/blocks/blockstore"
|
||||
notifications "github.com/ipfs/go-ipfs/exchange/bitswap/notifications"
|
||||
blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format"
|
||||
|
||||
blockstore "gx/ipfs/QmTVDM4LCSUMFNQzbDLL9zQwp8usE6QHymFdh3h8vL9v6b/go-ipfs-blockstore"
|
||||
cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid"
|
||||
blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format"
|
||||
)
|
||||
|
||||
type getBlocksFunc func(context.Context, []*cid.Cid) (<-chan blocks.Block, error)
|
||||
|
@ -4,13 +4,13 @@ import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
blockstore "github.com/ipfs/go-ipfs/blocks/blockstore"
|
||||
tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet"
|
||||
datastore2 "github.com/ipfs/go-ipfs/thirdparty/datastore2"
|
||||
|
||||
ds "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore"
|
||||
ds_sync "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore/sync"
|
||||
delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay"
|
||||
blockstore "gx/ipfs/QmTVDM4LCSUMFNQzbDLL9zQwp8usE6QHymFdh3h8vL9v6b/go-ipfs-blockstore"
|
||||
testutil "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil"
|
||||
p2ptestutil "gx/ipfs/QmYVR3C8DWPHdHxvLtNFYfjsXgaRAdh6hPMNH3KiwCgu4o/go-libp2p-netutil"
|
||||
peer "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer"
|
||||
|
@ -5,11 +5,11 @@ package offline
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ipfs/go-ipfs/blocks/blockstore"
|
||||
exchange "github.com/ipfs/go-ipfs/exchange"
|
||||
blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format"
|
||||
|
||||
blockstore "gx/ipfs/QmTVDM4LCSUMFNQzbDLL9zQwp8usE6QHymFdh3h8vL9v6b/go-ipfs-blockstore"
|
||||
cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid"
|
||||
blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format"
|
||||
)
|
||||
|
||||
func Exchange(bs blockstore.Blockstore) exchange.Interface {
|
||||
|
@ -4,14 +4,14 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/ipfs/go-ipfs/blocks/blockstore"
|
||||
"github.com/ipfs/go-ipfs/blocks/blocksutil"
|
||||
blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format"
|
||||
|
||||
u "gx/ipfs/QmNiJuT8Ja3hMVpBHXv3Q6dwmperaQ6JjLtpMQgMCD7xvx/go-ipfs-util"
|
||||
ds "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore"
|
||||
ds_sync "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore/sync"
|
||||
blockstore "gx/ipfs/QmTVDM4LCSUMFNQzbDLL9zQwp8usE6QHymFdh3h8vL9v6b/go-ipfs-blockstore"
|
||||
cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid"
|
||||
blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format"
|
||||
)
|
||||
|
||||
func TestBlockReturnsErr(t *testing.T) {
|
||||
|
@ -3,12 +3,12 @@ package reprovide
|
||||
import (
|
||||
"context"
|
||||
|
||||
blocks "github.com/ipfs/go-ipfs/blocks/blockstore"
|
||||
merkledag "github.com/ipfs/go-ipfs/merkledag"
|
||||
pin "github.com/ipfs/go-ipfs/pin"
|
||||
ipld "gx/ipfs/Qme5bWv7wtjUNGsK2BNGVUFPKiuxWrsqrtvYwCLRw8YFES/go-ipld-format"
|
||||
|
||||
blocks "gx/ipfs/QmTVDM4LCSUMFNQzbDLL9zQwp8usE6QHymFdh3h8vL9v6b/go-ipfs-blockstore"
|
||||
cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid"
|
||||
ipld "gx/ipfs/Qme5bWv7wtjUNGsK2BNGVUFPKiuxWrsqrtvYwCLRw8YFES/go-ipld-format"
|
||||
)
|
||||
|
||||
// NewBlockstoreProvider returns key provider using bstore.AllKeysChan
|
||||
|
@ -4,10 +4,9 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
blockstore "github.com/ipfs/go-ipfs/blocks/blockstore"
|
||||
|
||||
ds "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore"
|
||||
dssync "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore/sync"
|
||||
blockstore "gx/ipfs/QmTVDM4LCSUMFNQzbDLL9zQwp8usE6QHymFdh3h8vL9v6b/go-ipfs-blockstore"
|
||||
testutil "gx/ipfs/QmVvkK7s5imCiq3JVbL3pGfnhcCnf3LrFJPF4GE2sAoGZf/go-testutil"
|
||||
pstore "gx/ipfs/QmXauCuJzmzapetmC6W4TuDJLL1yFFrVzSHoWv8YdbmnxH/go-libp2p-peerstore"
|
||||
mock "gx/ipfs/QmZRcGYvxdauCd7hHnMYLYqcZRaDjv24c7eUNyJojAcdBb/go-ipfs-routing/mock"
|
||||
|
@ -10,10 +10,9 @@ package filestore
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ipfs/go-ipfs/blocks/blockstore"
|
||||
|
||||
dsq "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore/query"
|
||||
logging "gx/ipfs/QmRb5jh8z2E8hMGN2tkvs1yHynUanqnZ3UeKwgN1i9P1F8/go-log"
|
||||
blockstore "gx/ipfs/QmTVDM4LCSUMFNQzbDLL9zQwp8usE6QHymFdh3h8vL9v6b/go-ipfs-blockstore"
|
||||
posinfo "gx/ipfs/Qmb3jLEFAQrqdVgWUajqEyuuDoavkSq1XQXz6tWdFWF995/go-ipfs-posinfo"
|
||||
cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid"
|
||||
blocks "gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format"
|
||||
|
@ -7,11 +7,11 @@ import (
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/ipfs/go-ipfs/blocks/blockstore"
|
||||
dag "github.com/ipfs/go-ipfs/merkledag"
|
||||
posinfo "gx/ipfs/Qmb3jLEFAQrqdVgWUajqEyuuDoavkSq1XQXz6tWdFWF995/go-ipfs-posinfo"
|
||||
|
||||
ds "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore"
|
||||
blockstore "gx/ipfs/QmTVDM4LCSUMFNQzbDLL9zQwp8usE6QHymFdh3h8vL9v6b/go-ipfs-blockstore"
|
||||
posinfo "gx/ipfs/Qmb3jLEFAQrqdVgWUajqEyuuDoavkSq1XQXz6tWdFWF995/go-ipfs-posinfo"
|
||||
cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid"
|
||||
)
|
||||
|
||||
|
@ -7,13 +7,13 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/ipfs/go-ipfs/blocks/blockstore"
|
||||
pb "github.com/ipfs/go-ipfs/filestore/pb"
|
||||
|
||||
ds "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore"
|
||||
dsns "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore/namespace"
|
||||
dsq "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore/query"
|
||||
proto "gx/ipfs/QmT6n4mspWYEya864BhCUJEgyxiRfmiSY9ruQwTUNpRKaM/protobuf/proto"
|
||||
blockstore "gx/ipfs/QmTVDM4LCSUMFNQzbDLL9zQwp8usE6QHymFdh3h8vL9v6b/go-ipfs-blockstore"
|
||||
posinfo "gx/ipfs/Qmb3jLEFAQrqdVgWUajqEyuuDoavkSq1XQXz6tWdFWF995/go-ipfs-posinfo"
|
||||
cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid"
|
||||
dshelp "gx/ipfs/QmdQTPWduSeyveSxeCAte33M592isSW5Z979g81aJphrgn/go-ipfs-ds-help"
|
||||
|
@ -4,11 +4,11 @@ import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/ipfs/go-ipfs/blocks/blockstore"
|
||||
pb "github.com/ipfs/go-ipfs/filestore/pb"
|
||||
|
||||
ds "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore"
|
||||
dsq "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore/query"
|
||||
blockstore "gx/ipfs/QmTVDM4LCSUMFNQzbDLL9zQwp8usE6QHymFdh3h8vL9v6b/go-ipfs-blockstore"
|
||||
cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid"
|
||||
dshelp "gx/ipfs/QmdQTPWduSeyveSxeCAte33M592isSW5Z979g81aJphrgn/go-ipfs-ds-help"
|
||||
)
|
||||
|
@ -1,13 +1,13 @@
|
||||
package mdutils
|
||||
|
||||
import (
|
||||
"github.com/ipfs/go-ipfs/blocks/blockstore"
|
||||
bsrv "github.com/ipfs/go-ipfs/blockservice"
|
||||
"github.com/ipfs/go-ipfs/exchange/offline"
|
||||
dag "github.com/ipfs/go-ipfs/merkledag"
|
||||
|
||||
ds "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore"
|
||||
dssync "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore/sync"
|
||||
blockstore "gx/ipfs/QmTVDM4LCSUMFNQzbDLL9zQwp8usE6QHymFdh3h8vL9v6b/go-ipfs-blockstore"
|
||||
ipld "gx/ipfs/Qme5bWv7wtjUNGsK2BNGVUFPKiuxWrsqrtvYwCLRw8YFES/go-ipld-format"
|
||||
)
|
||||
|
||||
|
@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
bstore "github.com/ipfs/go-ipfs/blocks/blockstore"
|
||||
bserv "github.com/ipfs/go-ipfs/blockservice"
|
||||
offline "github.com/ipfs/go-ipfs/exchange/offline"
|
||||
dag "github.com/ipfs/go-ipfs/merkledag"
|
||||
@ -12,6 +11,7 @@ import (
|
||||
|
||||
ds "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore"
|
||||
syncds "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore/sync"
|
||||
bstore "gx/ipfs/QmTVDM4LCSUMFNQzbDLL9zQwp8usE6QHymFdh3h8vL9v6b/go-ipfs-blockstore"
|
||||
ipld "gx/ipfs/Qme5bWv7wtjUNGsK2BNGVUFPKiuxWrsqrtvYwCLRw8YFES/go-ipld-format"
|
||||
)
|
||||
|
||||
|
@ -14,7 +14,6 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
bstore "github.com/ipfs/go-ipfs/blocks/blockstore"
|
||||
bserv "github.com/ipfs/go-ipfs/blockservice"
|
||||
offline "github.com/ipfs/go-ipfs/exchange/offline"
|
||||
importer "github.com/ipfs/go-ipfs/importer"
|
||||
@ -26,6 +25,7 @@ import (
|
||||
u "gx/ipfs/QmNiJuT8Ja3hMVpBHXv3Q6dwmperaQ6JjLtpMQgMCD7xvx/go-ipfs-util"
|
||||
ds "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore"
|
||||
dssync "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore/sync"
|
||||
bstore "gx/ipfs/QmTVDM4LCSUMFNQzbDLL9zQwp8usE6QHymFdh3h8vL9v6b/go-ipfs-blockstore"
|
||||
chunker "gx/ipfs/QmWo8jYc19ppG7YoTsrr2kEtLRbARTJho5oNXFTR6B7Peq/go-ipfs-chunker"
|
||||
cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid"
|
||||
ipld "gx/ipfs/Qme5bWv7wtjUNGsK2BNGVUFPKiuxWrsqrtvYwCLRw8YFES/go-ipld-format"
|
||||
|
@ -557,6 +557,12 @@
|
||||
"hash": "QmZRcGYvxdauCd7hHnMYLYqcZRaDjv24c7eUNyJojAcdBb",
|
||||
"name": "go-ipfs-routing",
|
||||
"version": "0.0.1"
|
||||
},
|
||||
{
|
||||
"author": "hsanjuan",
|
||||
"hash": "QmTVDM4LCSUMFNQzbDLL9zQwp8usE6QHymFdh3h8vL9v6b",
|
||||
"name": "go-ipfs-blockstore",
|
||||
"version": "0.0.1"
|
||||
}
|
||||
],
|
||||
"gxVersion": "0.10.0",
|
||||
|
@ -6,7 +6,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
bstore "github.com/ipfs/go-ipfs/blocks/blockstore"
|
||||
bserv "github.com/ipfs/go-ipfs/blockservice"
|
||||
offline "github.com/ipfs/go-ipfs/exchange/offline"
|
||||
dag "github.com/ipfs/go-ipfs/merkledag"
|
||||
@ -14,6 +13,7 @@ import (
|
||||
|
||||
dstore "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore"
|
||||
logging "gx/ipfs/QmRb5jh8z2E8hMGN2tkvs1yHynUanqnZ3UeKwgN1i9P1F8/go-log"
|
||||
bstore "gx/ipfs/QmTVDM4LCSUMFNQzbDLL9zQwp8usE6QHymFdh3h8vL9v6b/go-ipfs-blockstore"
|
||||
cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid"
|
||||
ipld "gx/ipfs/Qme5bWv7wtjUNGsK2BNGVUFPKiuxWrsqrtvYwCLRw8YFES/go-ipld-format"
|
||||
)
|
||||
|
@ -5,14 +5,14 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ipfs/go-ipfs/blocks/blockstore"
|
||||
bs "github.com/ipfs/go-ipfs/blockservice"
|
||||
"github.com/ipfs/go-ipfs/exchange/offline"
|
||||
mdag "github.com/ipfs/go-ipfs/merkledag"
|
||||
|
||||
"gx/ipfs/QmNiJuT8Ja3hMVpBHXv3Q6dwmperaQ6JjLtpMQgMCD7xvx/go-ipfs-util"
|
||||
util "gx/ipfs/QmNiJuT8Ja3hMVpBHXv3Q6dwmperaQ6JjLtpMQgMCD7xvx/go-ipfs-util"
|
||||
ds "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore"
|
||||
dssync "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore/sync"
|
||||
blockstore "gx/ipfs/QmTVDM4LCSUMFNQzbDLL9zQwp8usE6QHymFdh3h8vL9v6b/go-ipfs-blockstore"
|
||||
cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid"
|
||||
)
|
||||
|
||||
|
@ -5,13 +5,13 @@ import (
|
||||
"encoding/binary"
|
||||
"testing"
|
||||
|
||||
blockstore "github.com/ipfs/go-ipfs/blocks/blockstore"
|
||||
bserv "github.com/ipfs/go-ipfs/blockservice"
|
||||
offline "github.com/ipfs/go-ipfs/exchange/offline"
|
||||
dag "github.com/ipfs/go-ipfs/merkledag"
|
||||
|
||||
ds "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore"
|
||||
dsq "gx/ipfs/QmPpegoMqhAEqjncrzArm7KVWAkCm78rqL2DPuNjhPrshg/go-datastore/query"
|
||||
blockstore "gx/ipfs/QmTVDM4LCSUMFNQzbDLL9zQwp8usE6QHymFdh3h8vL9v6b/go-ipfs-blockstore"
|
||||
cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid"
|
||||
)
|
||||
|
||||
|
Reference in New Issue
Block a user