1
0
mirror of https://github.com/ipfs/kubo.git synced 2025-06-26 23:53:19 +08:00

Extract bitswap to go-bitswap

License: MIT
Signed-off-by: Jeromy <jeromyj@gmail.com>
This commit is contained in:
Jeromy
2018-07-27 14:47:32 -07:00
parent dfd19c470e
commit 39c5c47c94
37 changed files with 12 additions and 5865 deletions

View File

@ -53,9 +53,6 @@ include $(dir)/Rules.mk
dir := merkledag/pb
include $(dir)/Rules.mk
dir := exchange/bitswap/message/pb
include $(dir)/Rules.mk
dir := pin/internal/pb
include $(dir)/Rules.mk

View File

@ -2,8 +2,8 @@ package bstest
import (
. "github.com/ipfs/go-ipfs/blockservice"
bitswap "github.com/ipfs/go-ipfs/exchange/bitswap"
tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet"
bitswap "gx/ipfs/QmSLYFS88MpPsszqWdhGSxvHyoTnmaU4A74SD6KGib6Z3m/go-bitswap"
tn "gx/ipfs/QmSLYFS88MpPsszqWdhGSxvHyoTnmaU4A74SD6KGib6Z3m/go-bitswap/testnet"
delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay"
mockrouting "gx/ipfs/QmbFRJeEmEU16y3BmKKaD4a9fm5oHsEAMHe2vSB1UnfLMi/go-ipfs-routing/mock"

View File

@ -8,8 +8,8 @@ import (
oldcmds "github.com/ipfs/go-ipfs/commands"
lgc "github.com/ipfs/go-ipfs/commands/legacy"
e "github.com/ipfs/go-ipfs/core/commands/e"
bitswap "github.com/ipfs/go-ipfs/exchange/bitswap"
decision "github.com/ipfs/go-ipfs/exchange/bitswap/decision"
bitswap "gx/ipfs/QmSLYFS88MpPsszqWdhGSxvHyoTnmaU4A74SD6KGib6Z3m/go-bitswap"
decision "gx/ipfs/QmSLYFS88MpPsszqWdhGSxvHyoTnmaU4A74SD6KGib6Z3m/go-bitswap/decision"
cmds "gx/ipfs/QmNueRyPRQiV7PUEpnP4GgGLuK1rKQLaRW7sfPvUetYig1/go-ipfs-cmds"
"gx/ipfs/QmPSBJL4momYnE7DcUyk2DVhD6rH488ZmHBGLbxNdhU44K/go-humanize"

View File

@ -21,8 +21,6 @@ import (
"time"
bserv "github.com/ipfs/go-ipfs/blockservice"
bitswap "github.com/ipfs/go-ipfs/exchange/bitswap"
bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network"
rp "github.com/ipfs/go-ipfs/exchange/reprovide"
filestore "github.com/ipfs/go-ipfs/filestore"
mount "github.com/ipfs/go-ipfs/fuse/mount"
@ -36,6 +34,8 @@ import (
repo "github.com/ipfs/go-ipfs/repo"
config "github.com/ipfs/go-ipfs/repo/config"
ft "github.com/ipfs/go-ipfs/unixfs"
bitswap "gx/ipfs/QmSLYFS88MpPsszqWdhGSxvHyoTnmaU4A74SD6KGib6Z3m/go-bitswap"
bsnet "gx/ipfs/QmSLYFS88MpPsszqWdhGSxvHyoTnmaU4A74SD6KGib6Z3m/go-bitswap/network"
u "gx/ipfs/QmPdKqUcHGFdeSpvjVoaTRPPstGif9GBZb5Q56RVw9o69A/go-ipfs-util"
rhelpers "gx/ipfs/QmQpvpeXa8rBfDmt3bdh2ckw2867vsYN1ozf79X7U5rij9/go-libp2p-routing-helpers"

View File

@ -1,37 +0,0 @@
# Bitswap
## Protocol
Bitswap is the data trading module for ipfs, it manages requesting and sending
blocks to and from other peers in the network. Bitswap has two main jobs, the
first is to acquire blocks requested by the client from the network. The second
is to judiciously send blocks in its possession to other peers who want them.
Bitswap is a message based protocol, as opposed to response-reply. All messages
contain wantlists, or blocks. Upon receiving a wantlist, a node should consider
sending out wanted blocks if they have them. Upon receiving blocks, the node
should send out a notification called a 'Cancel' signifying that they no longer
want the block. At a protocol level, bitswap is very simple.
## go-ipfs Implementation
Internally, when a message with a wantlist is received, it is sent to the
decision engine to be considered, and blocks that we have that are wanted are
placed into the peer request queue. Any block we possess that is wanted by
another peer has a task in the peer request queue created for it. The peer
request queue is a priority queue that sorts available tasks by some metric,
currently, that metric is very simple and aims to fairly address the tasks
of each other peer. More advanced decision logic will be implemented in the
future. Task workers pull tasks to be done off of the queue, retrieve the block
to be sent, and send it off. The number of task workers is limited by a constant
factor.
Client requests for new blocks are handled by the want manager, for every new
block (or set of blocks) wanted, the 'WantBlocks' method is invoked. The want
manager then ensures that connected peers are notified of the new block that we
want by sending the new entries to a message queue for each peer. The message
queue will loop while there is work available and do the following: 1) Ensure it
has a connection to its peer, 2) grab the message to be sent, and 3) send it.
If new messages are added while the loop is in steps 1 or 3, the messages are
combined into one to avoid having to keep an actual queue and send multiple
messages. The same process occurs when the client receives a block and sends a
cancel message for it.

View File

@ -1,454 +0,0 @@
// package bitswap implements the IPFS exchange interface with the BitSwap
// bilateral exchange protocol.
package bitswap
import (
"context"
"errors"
"math"
"sync"
"sync/atomic"
"time"
decision "github.com/ipfs/go-ipfs/exchange/bitswap/decision"
bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message"
bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network"
notifications "github.com/ipfs/go-ipfs/exchange/bitswap/notifications"
delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay"
flags "gx/ipfs/QmRMGdC6HKdLsPDABL9aXPDidrpmEHzJqFWSvshkbn9Hj8/go-ipfs-flags"
process "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess"
procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context"
blocks "gx/ipfs/QmVzK524a2VWLqyvtBeiHKsUAWYgeAk4DBeZoY7vpNPNRx/go-block-format"
cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid"
blockstore "gx/ipfs/QmadMhXJLHMFjpRmh85XjpmVDkEtQpNYEZNRpWRvYVLrvb/go-ipfs-blockstore"
exchange "gx/ipfs/Qmc2faLf7URkHpsbfYM4EMbr8iSAcGAe8VPgVi64HVnwji/go-ipfs-exchange-interface"
logging "gx/ipfs/QmcVVHfdyv15GVPk7NrxdWjh2hLVccXnoD8j2tyQShiXJb/go-log"
peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer"
metrics "gx/ipfs/QmekzFM3hPZjTjUFGTABdQkEnQ3PTiMstY198PwSFr5w1Q/go-metrics-interface"
)
var log = logging.Logger("bitswap")
const (
// maxProvidersPerRequest specifies the maximum number of providers desired
// from the network. This value is specified because the network streams
// results.
// TODO: if a 'non-nice' strategy is implemented, consider increasing this value
maxProvidersPerRequest = 3
providerRequestTimeout = time.Second * 10
provideTimeout = time.Second * 15
sizeBatchRequestChan = 32
// kMaxPriority is the max priority as defined by the bitswap protocol
kMaxPriority = math.MaxInt32
)
var (
HasBlockBufferSize = 256
provideKeysBufferSize = 2048
provideWorkerMax = 512
// the 1<<18+15 is to observe old file chunks that are 1<<18 + 14 in size
metricsBuckets = []float64{1 << 6, 1 << 10, 1 << 14, 1 << 18, 1<<18 + 15, 1 << 22}
)
func init() {
if flags.LowMemMode {
HasBlockBufferSize = 64
provideKeysBufferSize = 512
provideWorkerMax = 16
}
}
var rebroadcastDelay = delay.Fixed(time.Minute)
// New initializes a BitSwap instance that communicates over the provided
// BitSwapNetwork. This function registers the returned instance as the network
// delegate.
// Runs until context is cancelled.
func New(parent context.Context, network bsnet.BitSwapNetwork,
bstore blockstore.Blockstore) exchange.Interface {
// important to use provided parent context (since it may include important
// loggable data). It's probably not a good idea to allow bitswap to be
// coupled to the concerns of the ipfs daemon in this way.
//
// FIXME(btc) Now that bitswap manages itself using a process, it probably
// shouldn't accept a context anymore. Clients should probably use Close()
// exclusively. We should probably find another way to share logging data
ctx, cancelFunc := context.WithCancel(parent)
ctx = metrics.CtxSubScope(ctx, "bitswap")
dupHist := metrics.NewCtx(ctx, "recv_dup_blocks_bytes", "Summary of duplicate"+
" data blocks recived").Histogram(metricsBuckets)
allHist := metrics.NewCtx(ctx, "recv_all_blocks_bytes", "Summary of all"+
" data blocks recived").Histogram(metricsBuckets)
notif := notifications.New()
px := process.WithTeardown(func() error {
notif.Shutdown()
return nil
})
bs := &Bitswap{
blockstore: bstore,
notifications: notif,
engine: decision.NewEngine(ctx, bstore), // TODO close the engine with Close() method
network: network,
findKeys: make(chan *blockRequest, sizeBatchRequestChan),
process: px,
newBlocks: make(chan *cid.Cid, HasBlockBufferSize),
provideKeys: make(chan *cid.Cid, provideKeysBufferSize),
wm: NewWantManager(ctx, network),
counters: new(counters),
dupMetric: dupHist,
allMetric: allHist,
}
go bs.wm.Run()
network.SetDelegate(bs)
// Start up bitswaps async worker routines
bs.startWorkers(px, ctx)
// bind the context and process.
// do it over here to avoid closing before all setup is done.
go func() {
<-px.Closing() // process closes first
cancelFunc()
}()
procctx.CloseAfterContext(px, ctx) // parent cancelled first
return bs
}
// Bitswap instances implement the bitswap protocol.
type Bitswap struct {
// the peermanager manages sending messages to peers in a way that
// wont block bitswap operation
wm *WantManager
// the engine is the bit of logic that decides who to send which blocks to
engine *decision.Engine
// network delivers messages on behalf of the session
network bsnet.BitSwapNetwork
// blockstore is the local database
// NB: ensure threadsafety
blockstore blockstore.Blockstore
// notifications engine for receiving new blocks and routing them to the
// appropriate user requests
notifications notifications.PubSub
// findKeys sends keys to a worker to find and connect to providers for them
findKeys chan *blockRequest
// newBlocks is a channel for newly added blocks to be provided to the
// network. blocks pushed down this channel get buffered and fed to the
// provideKeys channel later on to avoid too much network activity
newBlocks chan *cid.Cid
// provideKeys directly feeds provide workers
provideKeys chan *cid.Cid
process process.Process
// Counters for various statistics
counterLk sync.Mutex
counters *counters
// Metrics interface metrics
dupMetric metrics.Histogram
allMetric metrics.Histogram
// Sessions
sessions []*Session
sessLk sync.Mutex
sessID uint64
sessIDLk sync.Mutex
}
type counters struct {
blocksRecvd uint64
dupBlocksRecvd uint64
dupDataRecvd uint64
blocksSent uint64
dataSent uint64
dataRecvd uint64
messagesRecvd uint64
}
type blockRequest struct {
Cid *cid.Cid
Ctx context.Context
}
// GetBlock attempts to retrieve a particular block from peers within the
// deadline enforced by the context.
func (bs *Bitswap) GetBlock(parent context.Context, k *cid.Cid) (blocks.Block, error) {
return getBlock(parent, k, bs.GetBlocks)
}
func (bs *Bitswap) WantlistForPeer(p peer.ID) []*cid.Cid {
var out []*cid.Cid
for _, e := range bs.engine.WantlistForPeer(p) {
out = append(out, e.Cid)
}
return out
}
func (bs *Bitswap) LedgerForPeer(p peer.ID) *decision.Receipt {
return bs.engine.LedgerForPeer(p)
}
// GetBlocks returns a channel where the caller may receive blocks that
// correspond to the provided |keys|. Returns an error if BitSwap is unable to
// begin this request within the deadline enforced by the context.
//
// NB: Your request remains open until the context expires. To conserve
// resources, provide a context with a reasonably short deadline (ie. not one
// that lasts throughout the lifetime of the server)
func (bs *Bitswap) GetBlocks(ctx context.Context, keys []*cid.Cid) (<-chan blocks.Block, error) {
if len(keys) == 0 {
out := make(chan blocks.Block)
close(out)
return out, nil
}
select {
case <-bs.process.Closing():
return nil, errors.New("bitswap is closed")
default:
}
promise := bs.notifications.Subscribe(ctx, keys...)
for _, k := range keys {
log.Event(ctx, "Bitswap.GetBlockRequest.Start", k)
}
mses := bs.getNextSessionID()
bs.wm.WantBlocks(ctx, keys, nil, mses)
// NB: Optimization. Assumes that providers of key[0] are likely to
// be able to provide for all keys. This currently holds true in most
// every situation. Later, this assumption may not hold as true.
req := &blockRequest{
Cid: keys[0],
Ctx: ctx,
}
remaining := cid.NewSet()
for _, k := range keys {
remaining.Add(k)
}
out := make(chan blocks.Block)
go func() {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
defer close(out)
defer func() {
// can't just defer this call on its own, arguments are resolved *when* the defer is created
bs.CancelWants(remaining.Keys(), mses)
}()
for {
select {
case blk, ok := <-promise:
if !ok {
return
}
bs.CancelWants([]*cid.Cid{blk.Cid()}, mses)
remaining.Remove(blk.Cid())
select {
case out <- blk:
case <-ctx.Done():
return
}
case <-ctx.Done():
return
}
}
}()
select {
case bs.findKeys <- req:
return out, nil
case <-ctx.Done():
return nil, ctx.Err()
}
}
func (bs *Bitswap) getNextSessionID() uint64 {
bs.sessIDLk.Lock()
defer bs.sessIDLk.Unlock()
bs.sessID++
return bs.sessID
}
// CancelWant removes a given key from the wantlist
func (bs *Bitswap) CancelWants(cids []*cid.Cid, ses uint64) {
if len(cids) == 0 {
return
}
bs.wm.CancelWants(context.Background(), cids, nil, ses)
}
// HasBlock announces the existence of a block to this bitswap service. The
// service will potentially notify its peers.
func (bs *Bitswap) HasBlock(blk blocks.Block) error {
return bs.receiveBlockFrom(blk, "")
}
// TODO: Some of this stuff really only needs to be done when adding a block
// from the user, not when receiving it from the network.
// In case you run `git blame` on this comment, I'll save you some time: ask
// @whyrusleeping, I don't know the answers you seek.
func (bs *Bitswap) receiveBlockFrom(blk blocks.Block, from peer.ID) error {
select {
case <-bs.process.Closing():
return errors.New("bitswap is closed")
default:
}
err := bs.blockstore.Put(blk)
if err != nil {
log.Errorf("Error writing block to datastore: %s", err)
return err
}
// NOTE: There exists the possiblity for a race condition here. If a user
// creates a node, then adds it to the dagservice while another goroutine
// is waiting on a GetBlock for that object, they will receive a reference
// to the same node. We should address this soon, but i'm not going to do
// it now as it requires more thought and isnt causing immediate problems.
bs.notifications.Publish(blk)
k := blk.Cid()
ks := []*cid.Cid{k}
for _, s := range bs.SessionsForBlock(k) {
s.receiveBlockFrom(from, blk)
bs.CancelWants(ks, s.id)
}
bs.engine.AddBlock(blk)
select {
case bs.newBlocks <- blk.Cid():
// send block off to be reprovided
case <-bs.process.Closing():
return bs.process.Close()
}
return nil
}
// SessionsForBlock returns a slice of all sessions that may be interested in the given cid
func (bs *Bitswap) SessionsForBlock(c *cid.Cid) []*Session {
bs.sessLk.Lock()
defer bs.sessLk.Unlock()
var out []*Session
for _, s := range bs.sessions {
if s.interestedIn(c) {
out = append(out, s)
}
}
return out
}
func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg.BitSwapMessage) {
atomic.AddUint64(&bs.counters.messagesRecvd, 1)
// This call records changes to wantlists, blocks received,
// and number of bytes transfered.
bs.engine.MessageReceived(p, incoming)
// TODO: this is bad, and could be easily abused.
// Should only track *useful* messages in ledger
iblocks := incoming.Blocks()
if len(iblocks) == 0 {
return
}
wg := sync.WaitGroup{}
for _, block := range iblocks {
wg.Add(1)
go func(b blocks.Block) { // TODO: this probably doesnt need to be a goroutine...
defer wg.Done()
bs.updateReceiveCounters(b)
log.Debugf("got block %s from %s", b, p)
if err := bs.receiveBlockFrom(b, p); err != nil {
log.Warningf("ReceiveMessage recvBlockFrom error: %s", err)
}
log.Event(ctx, "Bitswap.GetBlockRequest.End", b.Cid())
}(block)
}
wg.Wait()
}
var ErrAlreadyHaveBlock = errors.New("already have block")
func (bs *Bitswap) updateReceiveCounters(b blocks.Block) {
blkLen := len(b.RawData())
has, err := bs.blockstore.Has(b.Cid())
if err != nil {
log.Infof("blockstore.Has error: %s", err)
return
}
bs.allMetric.Observe(float64(blkLen))
if has {
bs.dupMetric.Observe(float64(blkLen))
}
bs.counterLk.Lock()
defer bs.counterLk.Unlock()
c := bs.counters
c.blocksRecvd++
c.dataRecvd += uint64(len(b.RawData()))
if has {
c.dupBlocksRecvd++
c.dupDataRecvd += uint64(blkLen)
}
}
// Connected/Disconnected warns bitswap about peer connections
func (bs *Bitswap) PeerConnected(p peer.ID) {
bs.wm.Connected(p)
bs.engine.PeerConnected(p)
}
// Connected/Disconnected warns bitswap about peer connections
func (bs *Bitswap) PeerDisconnected(p peer.ID) {
bs.wm.Disconnected(p)
bs.engine.PeerDisconnected(p)
}
func (bs *Bitswap) ReceiveError(err error) {
log.Infof("Bitswap ReceiveError: %s", err)
// TODO log the network error
// TODO bubble the network error up to the parent context/error logger
}
func (bs *Bitswap) Close() error {
return bs.process.Close()
}
func (bs *Bitswap) GetWantlist() []*cid.Cid {
entries := bs.wm.wl.Entries()
out := make([]*cid.Cid, 0, len(entries))
for _, e := range entries {
out = append(out, e.Cid)
}
return out
}
func (bs *Bitswap) IsOnline() bool {
return true
}

View File

@ -1,674 +0,0 @@
package bitswap
import (
"bytes"
"context"
"fmt"
"sync"
"testing"
"time"
decision "github.com/ipfs/go-ipfs/exchange/bitswap/decision"
tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet"
delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay"
blocks "gx/ipfs/QmVzK524a2VWLqyvtBeiHKsUAWYgeAk4DBeZoY7vpNPNRx/go-block-format"
cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid"
blocksutil "gx/ipfs/QmYqPGpZ9Yemr55xus9DiEztkns6Jti5XJ7hC94JbvkdqZ/go-ipfs-blocksutil"
blockstore "gx/ipfs/QmadMhXJLHMFjpRmh85XjpmVDkEtQpNYEZNRpWRvYVLrvb/go-ipfs-blockstore"
mockrouting "gx/ipfs/QmbFRJeEmEU16y3BmKKaD4a9fm5oHsEAMHe2vSB1UnfLMi/go-ipfs-routing/mock"
tu "gx/ipfs/QmcW4FGAt24fdK1jBgWQn3yP4R9ZLyWQqjozv9QK7epRhL/go-testutil"
travis "gx/ipfs/QmcW4FGAt24fdK1jBgWQn3yP4R9ZLyWQqjozv9QK7epRhL/go-testutil/ci/travis"
p2ptestutil "gx/ipfs/QmcxUtMB5sJrXR3znSvkrDd2ghvwGM8rLRqwJiPUdgQwat/go-libp2p-netutil"
detectrace "gx/ipfs/Qmf7HqcW7LtCi1W8y2bdx2eJpze74jkbKqpByxgXikdbLF/go-detect-race"
)
// FIXME the tests are really sensitive to the network delay. fix them to work
// well under varying conditions
const kNetworkDelay = 0 * time.Millisecond
func getVirtualNetwork() tn.Network {
return tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay))
}
func TestClose(t *testing.T) {
vnet := getVirtualNetwork()
sesgen := NewTestSessionGenerator(vnet)
defer sesgen.Close()
bgen := blocksutil.NewBlockGenerator()
block := bgen.Next()
bitswap := sesgen.Next()
bitswap.Exchange.Close()
bitswap.Exchange.GetBlock(context.Background(), block.Cid())
}
func TestProviderForKeyButNetworkCannotFind(t *testing.T) { // TODO revisit this
rs := mockrouting.NewServer()
net := tn.VirtualNetwork(rs, delay.Fixed(kNetworkDelay))
g := NewTestSessionGenerator(net)
defer g.Close()
block := blocks.NewBlock([]byte("block"))
pinfo := p2ptestutil.RandTestBogusIdentityOrFatal(t)
rs.Client(pinfo).Provide(context.Background(), block.Cid(), true) // but not on network
solo := g.Next()
defer solo.Exchange.Close()
ctx, cancel := context.WithTimeout(context.Background(), time.Nanosecond)
defer cancel()
_, err := solo.Exchange.GetBlock(ctx, block.Cid())
if err != context.DeadlineExceeded {
t.Fatal("Expected DeadlineExceeded error")
}
}
func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) {
net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay))
block := blocks.NewBlock([]byte("block"))
g := NewTestSessionGenerator(net)
defer g.Close()
peers := g.Instances(2)
hasBlock := peers[0]
defer hasBlock.Exchange.Close()
if err := hasBlock.Exchange.HasBlock(block); err != nil {
t.Fatal(err)
}
wantsBlock := peers[1]
defer wantsBlock.Exchange.Close()
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
received, err := wantsBlock.Exchange.GetBlock(ctx, block.Cid())
if err != nil {
t.Log(err)
t.Fatal("Expected to succeed")
}
if !bytes.Equal(block.RawData(), received.RawData()) {
t.Fatal("Data doesn't match")
}
}
func TestLargeSwarm(t *testing.T) {
if testing.Short() {
t.SkipNow()
}
numInstances := 100
numBlocks := 2
if detectrace.WithRace() {
// when running with the race detector, 500 instances launches
// well over 8k goroutines. This hits a race detector limit.
numInstances = 75
} else if travis.IsRunning() {
numInstances = 200
} else {
t.Parallel()
}
PerformDistributionTest(t, numInstances, numBlocks)
}
func TestLargeFile(t *testing.T) {
if testing.Short() {
t.SkipNow()
}
if !travis.IsRunning() {
t.Parallel()
}
numInstances := 10
numBlocks := 100
PerformDistributionTest(t, numInstances, numBlocks)
}
func TestLargeFileNoRebroadcast(t *testing.T) {
rbd := rebroadcastDelay.Get()
rebroadcastDelay.Set(time.Hour * 24 * 365 * 10) // ten years should be long enough
if testing.Short() {
t.SkipNow()
}
numInstances := 10
numBlocks := 100
PerformDistributionTest(t, numInstances, numBlocks)
rebroadcastDelay.Set(rbd)
}
func TestLargeFileTwoPeers(t *testing.T) {
if testing.Short() {
t.SkipNow()
}
numInstances := 2
numBlocks := 100
PerformDistributionTest(t, numInstances, numBlocks)
}
func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) {
ctx := context.Background()
if testing.Short() {
t.SkipNow()
}
net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay))
sg := NewTestSessionGenerator(net)
defer sg.Close()
bg := blocksutil.NewBlockGenerator()
instances := sg.Instances(numInstances)
blocks := bg.Blocks(numBlocks)
t.Log("Give the blocks to the first instance")
nump := len(instances) - 1
// assert we're properly connected
for _, inst := range instances {
peers := inst.Exchange.wm.ConnectedPeers()
for i := 0; i < 10 && len(peers) != nump; i++ {
time.Sleep(time.Millisecond * 50)
peers = inst.Exchange.wm.ConnectedPeers()
}
if len(peers) != nump {
t.Fatal("not enough peers connected to instance")
}
}
var blkeys []*cid.Cid
first := instances[0]
for _, b := range blocks {
blkeys = append(blkeys, b.Cid())
first.Exchange.HasBlock(b)
}
t.Log("Distribute!")
wg := sync.WaitGroup{}
errs := make(chan error)
for _, inst := range instances[1:] {
wg.Add(1)
go func(inst Instance) {
defer wg.Done()
outch, err := inst.Exchange.GetBlocks(ctx, blkeys)
if err != nil {
errs <- err
}
for range outch {
}
}(inst)
}
go func() {
wg.Wait()
close(errs)
}()
for err := range errs {
if err != nil {
t.Fatal(err)
}
}
t.Log("Verify!")
for _, inst := range instances {
for _, b := range blocks {
if _, err := inst.Blockstore().Get(b.Cid()); err != nil {
t.Fatal(err)
}
}
}
}
// TODO simplify this test. get to the _essence_!
func TestSendToWantingPeer(t *testing.T) {
if testing.Short() {
t.SkipNow()
}
net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay))
sg := NewTestSessionGenerator(net)
defer sg.Close()
bg := blocksutil.NewBlockGenerator()
prev := rebroadcastDelay.Set(time.Second / 2)
defer func() { rebroadcastDelay.Set(prev) }()
peers := sg.Instances(2)
peerA := peers[0]
peerB := peers[1]
t.Logf("Session %v\n", peerA.Peer)
t.Logf("Session %v\n", peerB.Peer)
waitTime := time.Second * 5
alpha := bg.Next()
// peerA requests and waits for block alpha
ctx, cancel := context.WithTimeout(context.Background(), waitTime)
defer cancel()
alphaPromise, err := peerA.Exchange.GetBlocks(ctx, []*cid.Cid{alpha.Cid()})
if err != nil {
t.Fatal(err)
}
// peerB announces to the network that he has block alpha
err = peerB.Exchange.HasBlock(alpha)
if err != nil {
t.Fatal(err)
}
// At some point, peerA should get alpha (or timeout)
blkrecvd, ok := <-alphaPromise
if !ok {
t.Fatal("context timed out and broke promise channel!")
}
if !blkrecvd.Cid().Equals(alpha.Cid()) {
t.Fatal("Wrong block!")
}
}
func TestEmptyKey(t *testing.T) {
net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay))
sg := NewTestSessionGenerator(net)
defer sg.Close()
bs := sg.Instances(1)[0].Exchange
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
defer cancel()
_, err := bs.GetBlock(ctx, nil)
if err != blockstore.ErrNotFound {
t.Error("empty str key should return ErrNotFound")
}
}
func assertStat(t *testing.T, st *Stat, sblks, rblks, sdata, rdata uint64) {
if sblks != st.BlocksSent {
t.Errorf("mismatch in blocks sent: %d vs %d", sblks, st.BlocksSent)
}
if rblks != st.BlocksReceived {
t.Errorf("mismatch in blocks recvd: %d vs %d", rblks, st.BlocksReceived)
}
if sdata != st.DataSent {
t.Errorf("mismatch in data sent: %d vs %d", sdata, st.DataSent)
}
if rdata != st.DataReceived {
t.Errorf("mismatch in data recvd: %d vs %d", rdata, st.DataReceived)
}
}
func TestBasicBitswap(t *testing.T) {
net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay))
sg := NewTestSessionGenerator(net)
defer sg.Close()
bg := blocksutil.NewBlockGenerator()
t.Log("Test a one node trying to get one block from another")
instances := sg.Instances(3)
blocks := bg.Blocks(1)
err := instances[0].Exchange.HasBlock(blocks[0])
if err != nil {
t.Fatal(err)
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
defer cancel()
blk, err := instances[1].Exchange.GetBlock(ctx, blocks[0].Cid())
if err != nil {
t.Fatal(err)
}
if err = tu.WaitFor(ctx, func() error {
if len(instances[2].Exchange.WantlistForPeer(instances[1].Peer)) != 0 {
return fmt.Errorf("should have no items in other peers wantlist")
}
if len(instances[1].Exchange.GetWantlist()) != 0 {
return fmt.Errorf("shouldnt have anything in wantlist")
}
return nil
}); err != nil {
t.Fatal(err)
}
st0, err := instances[0].Exchange.Stat()
if err != nil {
t.Fatal(err)
}
st1, err := instances[1].Exchange.Stat()
if err != nil {
t.Fatal(err)
}
st2, err := instances[2].Exchange.Stat()
if err != nil {
t.Fatal(err)
}
t.Log("stat node 0")
assertStat(t, st0, 1, 0, uint64(len(blk.RawData())), 0)
t.Log("stat node 1")
assertStat(t, st1, 0, 1, 0, uint64(len(blk.RawData())))
t.Log("stat node 2")
assertStat(t, st2, 0, 0, 0, 0)
if !bytes.Equal(blk.RawData(), blocks[0].RawData()) {
t.Errorf("blocks aren't equal: expected %v, actual %v", blocks[0].RawData(), blk.RawData())
}
t.Log(blk)
for _, inst := range instances {
err := inst.Exchange.Close()
if err != nil {
t.Fatal(err)
}
}
}
func TestDoubleGet(t *testing.T) {
net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay))
sg := NewTestSessionGenerator(net)
defer sg.Close()
bg := blocksutil.NewBlockGenerator()
t.Log("Test a one node trying to get one block from another")
instances := sg.Instances(2)
blocks := bg.Blocks(1)
// NOTE: A race condition can happen here where these GetBlocks requests go
// through before the peers even get connected. This is okay, bitswap
// *should* be able to handle this.
ctx1, cancel1 := context.WithCancel(context.Background())
blkch1, err := instances[1].Exchange.GetBlocks(ctx1, []*cid.Cid{blocks[0].Cid()})
if err != nil {
t.Fatal(err)
}
ctx2, cancel2 := context.WithCancel(context.Background())
defer cancel2()
blkch2, err := instances[1].Exchange.GetBlocks(ctx2, []*cid.Cid{blocks[0].Cid()})
if err != nil {
t.Fatal(err)
}
// ensure both requests make it into the wantlist at the same time
time.Sleep(time.Millisecond * 20)
cancel1()
_, ok := <-blkch1
if ok {
t.Fatal("expected channel to be closed")
}
err = instances[0].Exchange.HasBlock(blocks[0])
if err != nil {
t.Fatal(err)
}
select {
case blk, ok := <-blkch2:
if !ok {
t.Fatal("expected to get the block here")
}
t.Log(blk)
case <-time.After(time.Second * 5):
p1wl := instances[0].Exchange.WantlistForPeer(instances[1].Peer)
if len(p1wl) != 1 {
t.Logf("wantlist view didnt have 1 item (had %d)", len(p1wl))
} else if !p1wl[0].Equals(blocks[0].Cid()) {
t.Logf("had 1 item, it was wrong: %s %s", blocks[0].Cid(), p1wl[0])
} else {
t.Log("had correct wantlist, somehow")
}
t.Fatal("timed out waiting on block")
}
for _, inst := range instances {
err := inst.Exchange.Close()
if err != nil {
t.Fatal(err)
}
}
}
func TestWantlistCleanup(t *testing.T) {
net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay))
sg := NewTestSessionGenerator(net)
defer sg.Close()
bg := blocksutil.NewBlockGenerator()
instances := sg.Instances(1)[0]
bswap := instances.Exchange
blocks := bg.Blocks(20)
var keys []*cid.Cid
for _, b := range blocks {
keys = append(keys, b.Cid())
}
ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*50)
defer cancel()
_, err := bswap.GetBlock(ctx, keys[0])
if err != context.DeadlineExceeded {
t.Fatal("shouldnt have fetched any blocks")
}
time.Sleep(time.Millisecond * 50)
if len(bswap.GetWantlist()) > 0 {
t.Fatal("should not have anyting in wantlist")
}
ctx, cancel = context.WithTimeout(context.Background(), time.Millisecond*50)
defer cancel()
_, err = bswap.GetBlocks(ctx, keys[:10])
if err != nil {
t.Fatal(err)
}
<-ctx.Done()
time.Sleep(time.Millisecond * 50)
if len(bswap.GetWantlist()) > 0 {
t.Fatal("should not have anyting in wantlist")
}
_, err = bswap.GetBlocks(context.Background(), keys[:1])
if err != nil {
t.Fatal(err)
}
ctx, cancel = context.WithCancel(context.Background())
_, err = bswap.GetBlocks(ctx, keys[10:])
if err != nil {
t.Fatal(err)
}
time.Sleep(time.Millisecond * 50)
if len(bswap.GetWantlist()) != 11 {
t.Fatal("should have 11 keys in wantlist")
}
cancel()
time.Sleep(time.Millisecond * 50)
if !(len(bswap.GetWantlist()) == 1 && bswap.GetWantlist()[0] == keys[0]) {
t.Fatal("should only have keys[0] in wantlist")
}
}
func assertLedgerMatch(ra, rb *decision.Receipt) error {
if ra.Sent != rb.Recv {
return fmt.Errorf("mismatch in ledgers (exchanged bytes): %d sent vs %d recvd", ra.Sent, rb.Recv)
}
if ra.Recv != rb.Sent {
return fmt.Errorf("mismatch in ledgers (exchanged bytes): %d recvd vs %d sent", ra.Recv, rb.Sent)
}
if ra.Exchanged != rb.Exchanged {
return fmt.Errorf("mismatch in ledgers (exchanged blocks): %d vs %d ", ra.Exchanged, rb.Exchanged)
}
return nil
}
func assertLedgerEqual(ra, rb *decision.Receipt) error {
if ra.Value != rb.Value {
return fmt.Errorf("mismatch in ledgers (value/debt ratio): %f vs %f ", ra.Value, rb.Value)
}
if ra.Sent != rb.Sent {
return fmt.Errorf("mismatch in ledgers (sent bytes): %d vs %d", ra.Sent, rb.Sent)
}
if ra.Recv != rb.Recv {
return fmt.Errorf("mismatch in ledgers (recvd bytes): %d vs %d", ra.Recv, rb.Recv)
}
if ra.Exchanged != rb.Exchanged {
return fmt.Errorf("mismatch in ledgers (exchanged blocks): %d vs %d ", ra.Exchanged, rb.Exchanged)
}
return nil
}
func newReceipt(sent, recv, exchanged uint64) *decision.Receipt {
return &decision.Receipt{
Peer: "test",
Value: float64(sent) / (1 + float64(recv)),
Sent: sent,
Recv: recv,
Exchanged: exchanged,
}
}
func TestBitswapLedgerOneWay(t *testing.T) {
net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay))
sg := NewTestSessionGenerator(net)
defer sg.Close()
bg := blocksutil.NewBlockGenerator()
t.Log("Test ledgers match when one peer sends block to another")
instances := sg.Instances(2)
blocks := bg.Blocks(1)
err := instances[0].Exchange.HasBlock(blocks[0])
if err != nil {
t.Fatal(err)
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
defer cancel()
blk, err := instances[1].Exchange.GetBlock(ctx, blocks[0].Cid())
if err != nil {
t.Fatal(err)
}
ra := instances[0].Exchange.LedgerForPeer(instances[1].Peer)
rb := instances[1].Exchange.LedgerForPeer(instances[0].Peer)
// compare peer ledger receipts
err = assertLedgerMatch(ra, rb)
if err != nil {
t.Fatal(err)
}
// check that receipts have intended values
ratest := newReceipt(1, 0, 1)
err = assertLedgerEqual(ratest, ra)
if err != nil {
t.Fatal(err)
}
rbtest := newReceipt(0, 1, 1)
err = assertLedgerEqual(rbtest, rb)
if err != nil {
t.Fatal(err)
}
t.Log(blk)
for _, inst := range instances {
err := inst.Exchange.Close()
if err != nil {
t.Fatal(err)
}
}
}
func TestBitswapLedgerTwoWay(t *testing.T) {
net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay))
sg := NewTestSessionGenerator(net)
defer sg.Close()
bg := blocksutil.NewBlockGenerator()
t.Log("Test ledgers match when two peers send one block to each other")
instances := sg.Instances(2)
blocks := bg.Blocks(2)
err := instances[0].Exchange.HasBlock(blocks[0])
if err != nil {
t.Fatal(err)
}
err = instances[1].Exchange.HasBlock(blocks[1])
if err != nil {
t.Fatal(err)
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
defer cancel()
_, err = instances[1].Exchange.GetBlock(ctx, blocks[0].Cid())
if err != nil {
t.Fatal(err)
}
ctx, cancel = context.WithTimeout(context.Background(), time.Second*5)
defer cancel()
blk, err := instances[0].Exchange.GetBlock(ctx, blocks[1].Cid())
if err != nil {
t.Fatal(err)
}
ra := instances[0].Exchange.LedgerForPeer(instances[1].Peer)
rb := instances[1].Exchange.LedgerForPeer(instances[0].Peer)
// compare peer ledger receipts
err = assertLedgerMatch(ra, rb)
if err != nil {
t.Fatal(err)
}
// check that receipts have intended values
rtest := newReceipt(1, 1, 2)
err = assertLedgerEqual(rtest, ra)
if err != nil {
t.Fatal(err)
}
err = assertLedgerEqual(rtest, rb)
if err != nil {
t.Fatal(err)
}
t.Log(blk)
for _, inst := range instances {
err := inst.Exchange.Close()
if err != nil {
t.Fatal(err)
}
}
}

View File

@ -1,30 +0,0 @@
package decision
import (
"fmt"
"math"
"testing"
"github.com/ipfs/go-ipfs/exchange/bitswap/wantlist"
u "gx/ipfs/QmPdKqUcHGFdeSpvjVoaTRPPstGif9GBZb5Q56RVw9o69A/go-ipfs-util"
cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid"
"gx/ipfs/QmcW4FGAt24fdK1jBgWQn3yP4R9ZLyWQqjozv9QK7epRhL/go-testutil"
"gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer"
)
// FWIW: At the time of this commit, including a timestamp in task increases
// time cost of Push by 3%.
func BenchmarkTaskQueuePush(b *testing.B) {
q := newPRQ()
peers := []peer.ID{
testutil.RandPeerIDFatal(b),
testutil.RandPeerIDFatal(b),
testutil.RandPeerIDFatal(b),
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
c := cid.NewCidV0(u.Hash([]byte(fmt.Sprint(i))))
q.Push(&wantlist.Entry{Cid: c, Priority: math.MaxInt32}, peers[i%len(peers)])
}
}

View File

@ -1,356 +0,0 @@
// package decision implements the decision engine for the bitswap service.
package decision
import (
"context"
"sync"
"time"
bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message"
wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist"
blocks "gx/ipfs/QmVzK524a2VWLqyvtBeiHKsUAWYgeAk4DBeZoY7vpNPNRx/go-block-format"
bstore "gx/ipfs/QmadMhXJLHMFjpRmh85XjpmVDkEtQpNYEZNRpWRvYVLrvb/go-ipfs-blockstore"
logging "gx/ipfs/QmcVVHfdyv15GVPk7NrxdWjh2hLVccXnoD8j2tyQShiXJb/go-log"
peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer"
)
// TODO consider taking responsibility for other types of requests. For
// example, there could be a |cancelQueue| for all of the cancellation
// messages that need to go out. There could also be a |wantlistQueue| for
// the local peer's wantlists. Alternatively, these could all be bundled
// into a single, intelligent global queue that efficiently
// batches/combines and takes all of these into consideration.
//
// Right now, messages go onto the network for four reasons:
// 1. an initial `sendwantlist` message to a provider of the first key in a
// request
// 2. a periodic full sweep of `sendwantlist` messages to all providers
// 3. upon receipt of blocks, a `cancel` message to all peers
// 4. draining the priority queue of `blockrequests` from peers
//
// Presently, only `blockrequests` are handled by the decision engine.
// However, there is an opportunity to give it more responsibility! If the
// decision engine is given responsibility for all of the others, it can
// intelligently decide how to combine requests efficiently.
//
// Some examples of what would be possible:
//
// * when sending out the wantlists, include `cancel` requests
// * when handling `blockrequests`, include `sendwantlist` and `cancel` as
// appropriate
// * when handling `cancel`, if we recently received a wanted block from a
// peer, include a partial wantlist that contains a few other high priority
// blocks
//
// In a sense, if we treat the decision engine as a black box, it could do
// whatever it sees fit to produce desired outcomes (get wanted keys
// quickly, maintain good relationships with peers, etc).
var log = logging.Logger("engine")
const (
// outboxChanBuffer must be 0 to prevent stale messages from being sent
outboxChanBuffer = 0
)
// Envelope contains a message for a Peer
type Envelope struct {
// Peer is the intended recipient
Peer peer.ID
// Block is the payload
Block blocks.Block
// A callback to notify the decision queue that the task is complete
Sent func()
}
type Engine struct {
// peerRequestQueue is a priority queue of requests received from peers.
// Requests are popped from the queue, packaged up, and placed in the
// outbox.
peerRequestQueue *prq
// FIXME it's a bit odd for the client and the worker to both share memory
// (both modify the peerRequestQueue) and also to communicate over the
// workSignal channel. consider sending requests over the channel and
// allowing the worker to have exclusive access to the peerRequestQueue. In
// that case, no lock would be required.
workSignal chan struct{}
// outbox contains outgoing messages to peers. This is owned by the
// taskWorker goroutine
outbox chan (<-chan *Envelope)
bs bstore.Blockstore
lock sync.Mutex // protects the fields immediatly below
// ledgerMap lists Ledgers by their Partner key.
ledgerMap map[peer.ID]*ledger
ticker *time.Ticker
}
func NewEngine(ctx context.Context, bs bstore.Blockstore) *Engine {
e := &Engine{
ledgerMap: make(map[peer.ID]*ledger),
bs: bs,
peerRequestQueue: newPRQ(),
outbox: make(chan (<-chan *Envelope), outboxChanBuffer),
workSignal: make(chan struct{}, 1),
ticker: time.NewTicker(time.Millisecond * 100),
}
go e.taskWorker(ctx)
return e
}
func (e *Engine) WantlistForPeer(p peer.ID) (out []*wl.Entry) {
partner := e.findOrCreate(p)
partner.lk.Lock()
defer partner.lk.Unlock()
return partner.wantList.SortedEntries()
}
func (e *Engine) LedgerForPeer(p peer.ID) *Receipt {
ledger := e.findOrCreate(p)
ledger.lk.Lock()
defer ledger.lk.Unlock()
return &Receipt{
Peer: ledger.Partner.String(),
Value: ledger.Accounting.Value(),
Sent: ledger.Accounting.BytesSent,
Recv: ledger.Accounting.BytesRecv,
Exchanged: ledger.ExchangeCount(),
}
}
func (e *Engine) taskWorker(ctx context.Context) {
defer close(e.outbox) // because taskWorker uses the channel exclusively
for {
oneTimeUse := make(chan *Envelope, 1) // buffer to prevent blocking
select {
case <-ctx.Done():
return
case e.outbox <- oneTimeUse:
}
// receiver is ready for an outoing envelope. let's prepare one. first,
// we must acquire a task from the PQ...
envelope, err := e.nextEnvelope(ctx)
if err != nil {
close(oneTimeUse)
return // ctx cancelled
}
oneTimeUse <- envelope // buffered. won't block
close(oneTimeUse)
}
}
// nextEnvelope runs in the taskWorker goroutine. Returns an error if the
// context is cancelled before the next Envelope can be created.
func (e *Engine) nextEnvelope(ctx context.Context) (*Envelope, error) {
for {
nextTask := e.peerRequestQueue.Pop()
for nextTask == nil {
select {
case <-ctx.Done():
return nil, ctx.Err()
case <-e.workSignal:
nextTask = e.peerRequestQueue.Pop()
case <-e.ticker.C:
e.peerRequestQueue.thawRound()
nextTask = e.peerRequestQueue.Pop()
}
}
// with a task in hand, we're ready to prepare the envelope...
block, err := e.bs.Get(nextTask.Entry.Cid)
if err != nil {
log.Errorf("tried to execute a task and errored fetching block: %s", err)
// If we don't have the block, don't hold that against the peer
// make sure to update that the task has been 'completed'
nextTask.Done()
continue
}
return &Envelope{
Peer: nextTask.Target,
Block: block,
Sent: func() {
nextTask.Done()
select {
case e.workSignal <- struct{}{}:
// work completing may mean that our queue will provide new
// work to be done.
default:
}
},
}, nil
}
}
// Outbox returns a channel of one-time use Envelope channels.
func (e *Engine) Outbox() <-chan (<-chan *Envelope) {
return e.outbox
}
// Returns a slice of Peers with whom the local node has active sessions
func (e *Engine) Peers() []peer.ID {
e.lock.Lock()
defer e.lock.Unlock()
response := make([]peer.ID, 0, len(e.ledgerMap))
for _, ledger := range e.ledgerMap {
response = append(response, ledger.Partner)
}
return response
}
// MessageReceived performs book-keeping. Returns error if passed invalid
// arguments.
func (e *Engine) MessageReceived(p peer.ID, m bsmsg.BitSwapMessage) error {
if len(m.Wantlist()) == 0 && len(m.Blocks()) == 0 {
log.Debugf("received empty message from %s", p)
}
newWorkExists := false
defer func() {
if newWorkExists {
e.signalNewWork()
}
}()
l := e.findOrCreate(p)
l.lk.Lock()
defer l.lk.Unlock()
if m.Full() {
l.wantList = wl.New()
}
for _, entry := range m.Wantlist() {
if entry.Cancel {
log.Debugf("%s cancel %s", p, entry.Cid)
l.CancelWant(entry.Cid)
e.peerRequestQueue.Remove(entry.Cid, p)
} else {
log.Debugf("wants %s - %d", entry.Cid, entry.Priority)
l.Wants(entry.Cid, entry.Priority)
if exists, err := e.bs.Has(entry.Cid); err == nil && exists {
e.peerRequestQueue.Push(entry.Entry, p)
newWorkExists = true
}
}
}
for _, block := range m.Blocks() {
log.Debugf("got block %s %d bytes", block, len(block.RawData()))
l.ReceivedBytes(len(block.RawData()))
}
return nil
}
func (e *Engine) addBlock(block blocks.Block) {
work := false
for _, l := range e.ledgerMap {
l.lk.Lock()
if entry, ok := l.WantListContains(block.Cid()); ok {
e.peerRequestQueue.Push(entry, l.Partner)
work = true
}
l.lk.Unlock()
}
if work {
e.signalNewWork()
}
}
func (e *Engine) AddBlock(block blocks.Block) {
e.lock.Lock()
defer e.lock.Unlock()
e.addBlock(block)
}
// TODO add contents of m.WantList() to my local wantlist? NB: could introduce
// race conditions where I send a message, but MessageSent gets handled after
// MessageReceived. The information in the local wantlist could become
// inconsistent. Would need to ensure that Sends and acknowledgement of the
// send happen atomically
func (e *Engine) MessageSent(p peer.ID, m bsmsg.BitSwapMessage) error {
l := e.findOrCreate(p)
l.lk.Lock()
defer l.lk.Unlock()
for _, block := range m.Blocks() {
l.SentBytes(len(block.RawData()))
l.wantList.Remove(block.Cid())
e.peerRequestQueue.Remove(block.Cid(), p)
}
return nil
}
func (e *Engine) PeerConnected(p peer.ID) {
e.lock.Lock()
defer e.lock.Unlock()
l, ok := e.ledgerMap[p]
if !ok {
l = newLedger(p)
e.ledgerMap[p] = l
}
l.lk.Lock()
defer l.lk.Unlock()
l.ref++
}
func (e *Engine) PeerDisconnected(p peer.ID) {
e.lock.Lock()
defer e.lock.Unlock()
l, ok := e.ledgerMap[p]
if !ok {
return
}
l.lk.Lock()
defer l.lk.Unlock()
l.ref--
if l.ref <= 0 {
delete(e.ledgerMap, p)
}
}
func (e *Engine) numBytesSentTo(p peer.ID) uint64 {
// NB not threadsafe
return e.findOrCreate(p).Accounting.BytesSent
}
func (e *Engine) numBytesReceivedFrom(p peer.ID) uint64 {
// NB not threadsafe
return e.findOrCreate(p).Accounting.BytesRecv
}
// ledger lazily instantiates a ledger
func (e *Engine) findOrCreate(p peer.ID) *ledger {
e.lock.Lock()
defer e.lock.Unlock()
l, ok := e.ledgerMap[p]
if !ok {
l = newLedger(p)
e.ledgerMap[p] = l
}
return l
}
func (e *Engine) signalNewWork() {
// Signal task generation to restart (if stopped!)
select {
case e.workSignal <- struct{}{}:
default:
}
}

View File

@ -1,215 +0,0 @@
package decision
import (
"context"
"errors"
"fmt"
"math"
"strings"
"sync"
"testing"
message "github.com/ipfs/go-ipfs/exchange/bitswap/message"
blocks "gx/ipfs/QmVzK524a2VWLqyvtBeiHKsUAWYgeAk4DBeZoY7vpNPNRx/go-block-format"
blockstore "gx/ipfs/QmadMhXJLHMFjpRmh85XjpmVDkEtQpNYEZNRpWRvYVLrvb/go-ipfs-blockstore"
testutil "gx/ipfs/QmcW4FGAt24fdK1jBgWQn3yP4R9ZLyWQqjozv9QK7epRhL/go-testutil"
peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer"
ds "gx/ipfs/QmeiCcJfDW1GJnWUArudsv5rQsihpi4oyddPhdqo3CfX6i/go-datastore"
dssync "gx/ipfs/QmeiCcJfDW1GJnWUArudsv5rQsihpi4oyddPhdqo3CfX6i/go-datastore/sync"
)
type peerAndEngine struct {
Peer peer.ID
Engine *Engine
}
func newEngine(ctx context.Context, idStr string) peerAndEngine {
return peerAndEngine{
Peer: peer.ID(idStr),
//Strategy: New(true),
Engine: NewEngine(ctx,
blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore()))),
}
}
func TestConsistentAccounting(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
sender := newEngine(ctx, "Ernie")
receiver := newEngine(ctx, "Bert")
// Send messages from Ernie to Bert
for i := 0; i < 1000; i++ {
m := message.New(false)
content := []string{"this", "is", "message", "i"}
m.AddBlock(blocks.NewBlock([]byte(strings.Join(content, " "))))
sender.Engine.MessageSent(receiver.Peer, m)
receiver.Engine.MessageReceived(sender.Peer, m)
}
// Ensure sender records the change
if sender.Engine.numBytesSentTo(receiver.Peer) == 0 {
t.Fatal("Sent bytes were not recorded")
}
// Ensure sender and receiver have the same values
if sender.Engine.numBytesSentTo(receiver.Peer) != receiver.Engine.numBytesReceivedFrom(sender.Peer) {
t.Fatal("Inconsistent book-keeping. Strategies don't agree")
}
// Ensure sender didn't record receving anything. And that the receiver
// didn't record sending anything
if receiver.Engine.numBytesSentTo(sender.Peer) != 0 || sender.Engine.numBytesReceivedFrom(receiver.Peer) != 0 {
t.Fatal("Bert didn't send bytes to Ernie")
}
}
func TestPeerIsAddedToPeersWhenMessageReceivedOrSent(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
sanfrancisco := newEngine(ctx, "sf")
seattle := newEngine(ctx, "sea")
m := message.New(true)
sanfrancisco.Engine.MessageSent(seattle.Peer, m)
seattle.Engine.MessageReceived(sanfrancisco.Peer, m)
if seattle.Peer == sanfrancisco.Peer {
t.Fatal("Sanity Check: Peers have same Key!")
}
if !peerIsPartner(seattle.Peer, sanfrancisco.Engine) {
t.Fatal("Peer wasn't added as a Partner")
}
if !peerIsPartner(sanfrancisco.Peer, seattle.Engine) {
t.Fatal("Peer wasn't added as a Partner")
}
seattle.Engine.PeerDisconnected(sanfrancisco.Peer)
if peerIsPartner(sanfrancisco.Peer, seattle.Engine) {
t.Fatal("expected peer to be removed")
}
}
func peerIsPartner(p peer.ID, e *Engine) bool {
for _, partner := range e.Peers() {
if partner == p {
return true
}
}
return false
}
func TestOutboxClosedWhenEngineClosed(t *testing.T) {
t.SkipNow() // TODO implement *Engine.Close
e := NewEngine(context.Background(), blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())))
var wg sync.WaitGroup
wg.Add(1)
go func() {
for nextEnvelope := range e.Outbox() {
<-nextEnvelope
}
wg.Done()
}()
// e.Close()
wg.Wait()
if _, ok := <-e.Outbox(); ok {
t.Fatal("channel should be closed")
}
}
func TestPartnerWantsThenCancels(t *testing.T) {
numRounds := 10
if testing.Short() {
numRounds = 1
}
alphabet := strings.Split("abcdefghijklmnopqrstuvwxyz", "")
vowels := strings.Split("aeiou", "")
type testCase [][]string
testcases := []testCase{
{
alphabet, vowels,
},
{
alphabet, stringsComplement(alphabet, vowels),
},
}
bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore()))
for _, letter := range alphabet {
block := blocks.NewBlock([]byte(letter))
if err := bs.Put(block); err != nil {
t.Fatal(err)
}
}
for i := 0; i < numRounds; i++ {
for _, testcase := range testcases {
set := testcase[0]
cancels := testcase[1]
keeps := stringsComplement(set, cancels)
e := NewEngine(context.Background(), bs)
partner := testutil.RandPeerIDFatal(t)
partnerWants(e, set, partner)
partnerCancels(e, cancels, partner)
if err := checkHandledInOrder(t, e, keeps); err != nil {
t.Logf("run #%d of %d", i, numRounds)
t.Fatal(err)
}
}
}
}
func partnerWants(e *Engine, keys []string, partner peer.ID) {
add := message.New(false)
for i, letter := range keys {
block := blocks.NewBlock([]byte(letter))
add.AddEntry(block.Cid(), math.MaxInt32-i)
}
e.MessageReceived(partner, add)
}
func partnerCancels(e *Engine, keys []string, partner peer.ID) {
cancels := message.New(false)
for _, k := range keys {
block := blocks.NewBlock([]byte(k))
cancels.Cancel(block.Cid())
}
e.MessageReceived(partner, cancels)
}
func checkHandledInOrder(t *testing.T, e *Engine, keys []string) error {
for _, k := range keys {
next := <-e.Outbox()
envelope := <-next
received := envelope.Block
expected := blocks.NewBlock([]byte(k))
if !received.Cid().Equals(expected.Cid()) {
return errors.New(fmt.Sprintln("received", string(received.RawData()), "expected", string(expected.RawData())))
}
}
return nil
}
func stringsComplement(set, subset []string) []string {
m := make(map[string]struct{})
for _, letter := range subset {
m[letter] = struct{}{}
}
var complement []string
for _, letter := range set {
if _, exists := m[letter]; !exists {
complement = append(complement, letter)
}
}
return complement
}

View File

@ -1,94 +0,0 @@
package decision
import (
"sync"
"time"
wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist"
cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid"
peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer"
)
func newLedger(p peer.ID) *ledger {
return &ledger{
wantList: wl.New(),
Partner: p,
sentToPeer: make(map[string]time.Time),
}
}
// ledger stores the data exchange relationship between two peers.
// NOT threadsafe
type ledger struct {
// Partner is the remote Peer.
Partner peer.ID
// Accounting tracks bytes sent and received.
Accounting debtRatio
// lastExchange is the time of the last data exchange.
lastExchange time.Time
// exchangeCount is the number of exchanges with this peer
exchangeCount uint64
// wantList is a (bounded, small) set of keys that Partner desires.
wantList *wl.Wantlist
// sentToPeer is a set of keys to ensure we dont send duplicate blocks
// to a given peer
sentToPeer map[string]time.Time
// ref is the reference count for this ledger, its used to ensure we
// don't drop the reference to this ledger in multi-connection scenarios
ref int
lk sync.Mutex
}
type Receipt struct {
Peer string
Value float64
Sent uint64
Recv uint64
Exchanged uint64
}
type debtRatio struct {
BytesSent uint64
BytesRecv uint64
}
func (dr *debtRatio) Value() float64 {
return float64(dr.BytesSent) / float64(dr.BytesRecv+1)
}
func (l *ledger) SentBytes(n int) {
l.exchangeCount++
l.lastExchange = time.Now()
l.Accounting.BytesSent += uint64(n)
}
func (l *ledger) ReceivedBytes(n int) {
l.exchangeCount++
l.lastExchange = time.Now()
l.Accounting.BytesRecv += uint64(n)
}
func (l *ledger) Wants(k *cid.Cid, priority int) {
log.Debugf("peer %s wants %s", l.Partner, k)
l.wantList.Add(k, priority)
}
func (l *ledger) CancelWant(k *cid.Cid) {
l.wantList.Remove(k)
}
func (l *ledger) WantListContains(k *cid.Cid) (*wl.Entry, bool) {
return l.wantList.Contains(k)
}
func (l *ledger) ExchangeCount() uint64 {
return l.exchangeCount
}

View File

@ -1,310 +0,0 @@
package decision
import (
"sync"
"time"
wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist"
cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid"
pq "gx/ipfs/QmZUbTDJ39JpvtFCSubiWeUTQRvMA1tVE5RZCJrY4oeAsC/go-ipfs-pq"
peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer"
)
type peerRequestQueue interface {
// Pop returns the next peerRequestTask. Returns nil if the peerRequestQueue is empty.
Pop() *peerRequestTask
Push(entry *wantlist.Entry, to peer.ID)
Remove(k *cid.Cid, p peer.ID)
// NB: cannot expose simply expose taskQueue.Len because trashed elements
// may exist. These trashed elements should not contribute to the count.
}
func newPRQ() *prq {
return &prq{
taskMap: make(map[string]*peerRequestTask),
partners: make(map[peer.ID]*activePartner),
frozen: make(map[peer.ID]*activePartner),
pQueue: pq.New(partnerCompare),
}
}
// verify interface implementation
var _ peerRequestQueue = &prq{}
// TODO: at some point, the strategy needs to plug in here
// to help decide how to sort tasks (on add) and how to select
// tasks (on getnext). For now, we are assuming a dumb/nice strategy.
type prq struct {
lock sync.Mutex
pQueue pq.PQ
taskMap map[string]*peerRequestTask
partners map[peer.ID]*activePartner
frozen map[peer.ID]*activePartner
}
// Push currently adds a new peerRequestTask to the end of the list
func (tl *prq) Push(entry *wantlist.Entry, to peer.ID) {
tl.lock.Lock()
defer tl.lock.Unlock()
partner, ok := tl.partners[to]
if !ok {
partner = newActivePartner()
tl.pQueue.Push(partner)
tl.partners[to] = partner
}
partner.activelk.Lock()
defer partner.activelk.Unlock()
if partner.activeBlocks.Has(entry.Cid) {
return
}
if task, ok := tl.taskMap[taskKey(to, entry.Cid)]; ok {
task.Entry.Priority = entry.Priority
partner.taskQueue.Update(task.index)
return
}
task := &peerRequestTask{
Entry: entry,
Target: to,
created: time.Now(),
Done: func() {
tl.lock.Lock()
partner.TaskDone(entry.Cid)
tl.pQueue.Update(partner.Index())
tl.lock.Unlock()
},
}
partner.taskQueue.Push(task)
tl.taskMap[task.Key()] = task
partner.requests++
tl.pQueue.Update(partner.Index())
}
// Pop 'pops' the next task to be performed. Returns nil if no task exists.
func (tl *prq) Pop() *peerRequestTask {
tl.lock.Lock()
defer tl.lock.Unlock()
if tl.pQueue.Len() == 0 {
return nil
}
partner := tl.pQueue.Pop().(*activePartner)
var out *peerRequestTask
for partner.taskQueue.Len() > 0 && partner.freezeVal == 0 {
out = partner.taskQueue.Pop().(*peerRequestTask)
delete(tl.taskMap, out.Key())
if out.trash {
out = nil
continue // discarding tasks that have been removed
}
partner.StartTask(out.Entry.Cid)
partner.requests--
break // and return |out|
}
tl.pQueue.Push(partner)
return out
}
// Remove removes a task from the queue
func (tl *prq) Remove(k *cid.Cid, p peer.ID) {
tl.lock.Lock()
t, ok := tl.taskMap[taskKey(p, k)]
if ok {
// remove the task "lazily"
// simply mark it as trash, so it'll be dropped when popped off the
// queue.
t.trash = true
// having canceled a block, we now account for that in the given partner
partner := tl.partners[p]
partner.requests--
// we now also 'freeze' that partner. If they sent us a cancel for a
// block we were about to send them, we should wait a short period of time
// to make sure we receive any other in-flight cancels before sending
// them a block they already potentially have
if partner.freezeVal == 0 {
tl.frozen[p] = partner
}
partner.freezeVal++
tl.pQueue.Update(partner.index)
}
tl.lock.Unlock()
}
func (tl *prq) fullThaw() {
tl.lock.Lock()
defer tl.lock.Unlock()
for id, partner := range tl.frozen {
partner.freezeVal = 0
delete(tl.frozen, id)
tl.pQueue.Update(partner.index)
}
}
func (tl *prq) thawRound() {
tl.lock.Lock()
defer tl.lock.Unlock()
for id, partner := range tl.frozen {
partner.freezeVal -= (partner.freezeVal + 1) / 2
if partner.freezeVal <= 0 {
delete(tl.frozen, id)
}
tl.pQueue.Update(partner.index)
}
}
type peerRequestTask struct {
Entry *wantlist.Entry
Target peer.ID
// A callback to signal that this task has been completed
Done func()
// trash in a book-keeping field
trash bool
// created marks the time that the task was added to the queue
created time.Time
index int // book-keeping field used by the pq container
}
// Key uniquely identifies a task.
func (t *peerRequestTask) Key() string {
return taskKey(t.Target, t.Entry.Cid)
}
// Index implements pq.Elem
func (t *peerRequestTask) Index() int {
return t.index
}
// SetIndex implements pq.Elem
func (t *peerRequestTask) SetIndex(i int) {
t.index = i
}
// taskKey returns a key that uniquely identifies a task.
func taskKey(p peer.ID, k *cid.Cid) string {
return string(p) + k.KeyString()
}
// FIFO is a basic task comparator that returns tasks in the order created.
var FIFO = func(a, b *peerRequestTask) bool {
return a.created.Before(b.created)
}
// V1 respects the target peer's wantlist priority. For tasks involving
// different peers, the oldest task is prioritized.
var V1 = func(a, b *peerRequestTask) bool {
if a.Target == b.Target {
return a.Entry.Priority > b.Entry.Priority
}
return FIFO(a, b)
}
func wrapCmp(f func(a, b *peerRequestTask) bool) func(a, b pq.Elem) bool {
return func(a, b pq.Elem) bool {
return f(a.(*peerRequestTask), b.(*peerRequestTask))
}
}
type activePartner struct {
// Active is the number of blocks this peer is currently being sent
// active must be locked around as it will be updated externally
activelk sync.Mutex
active int
activeBlocks *cid.Set
// requests is the number of blocks this peer is currently requesting
// request need not be locked around as it will only be modified under
// the peerRequestQueue's locks
requests int
// for the PQ interface
index int
freezeVal int
// priority queue of tasks belonging to this peer
taskQueue pq.PQ
}
func newActivePartner() *activePartner {
return &activePartner{
taskQueue: pq.New(wrapCmp(V1)),
activeBlocks: cid.NewSet(),
}
}
// partnerCompare implements pq.ElemComparator
// returns true if peer 'a' has higher priority than peer 'b'
func partnerCompare(a, b pq.Elem) bool {
pa := a.(*activePartner)
pb := b.(*activePartner)
// having no blocks in their wantlist means lowest priority
// having both of these checks ensures stability of the sort
if pa.requests == 0 {
return false
}
if pb.requests == 0 {
return true
}
if pa.freezeVal > pb.freezeVal {
return false
}
if pa.freezeVal < pb.freezeVal {
return true
}
if pa.active == pb.active {
// sorting by taskQueue.Len() aids in cleaning out trash entries faster
// if we sorted instead by requests, one peer could potentially build up
// a huge number of cancelled entries in the queue resulting in a memory leak
return pa.taskQueue.Len() > pb.taskQueue.Len()
}
return pa.active < pb.active
}
// StartTask signals that a task was started for this partner
func (p *activePartner) StartTask(k *cid.Cid) {
p.activelk.Lock()
p.activeBlocks.Add(k)
p.active++
p.activelk.Unlock()
}
// TaskDone signals that a task was completed for this partner
func (p *activePartner) TaskDone(k *cid.Cid) {
p.activelk.Lock()
p.activeBlocks.Remove(k)
p.active--
if p.active < 0 {
panic("more tasks finished than started!")
}
p.activelk.Unlock()
}
// Index implements pq.Elem
func (p *activePartner) Index() int {
return p.index
}
// SetIndex implements pq.Elem
func (p *activePartner) SetIndex(i int) {
p.index = i
}

View File

@ -1,128 +0,0 @@
package decision
import (
"fmt"
"math"
"math/rand"
"sort"
"strings"
"testing"
"github.com/ipfs/go-ipfs/exchange/bitswap/wantlist"
u "gx/ipfs/QmPdKqUcHGFdeSpvjVoaTRPPstGif9GBZb5Q56RVw9o69A/go-ipfs-util"
cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid"
"gx/ipfs/QmcW4FGAt24fdK1jBgWQn3yP4R9ZLyWQqjozv9QK7epRhL/go-testutil"
)
func TestPushPop(t *testing.T) {
prq := newPRQ()
partner := testutil.RandPeerIDFatal(t)
alphabet := strings.Split("abcdefghijklmnopqrstuvwxyz", "")
vowels := strings.Split("aeiou", "")
consonants := func() []string {
var out []string
for _, letter := range alphabet {
skip := false
for _, vowel := range vowels {
if letter == vowel {
skip = true
}
}
if !skip {
out = append(out, letter)
}
}
return out
}()
sort.Strings(alphabet)
sort.Strings(vowels)
sort.Strings(consonants)
// add a bunch of blocks. cancel some. drain the queue. the queue should only have the kept entries
for _, index := range rand.Perm(len(alphabet)) { // add blocks for all letters
letter := alphabet[index]
t.Log(partner.String())
c := cid.NewCidV0(u.Hash([]byte(letter)))
prq.Push(&wantlist.Entry{Cid: c, Priority: math.MaxInt32 - index}, partner)
}
for _, consonant := range consonants {
c := cid.NewCidV0(u.Hash([]byte(consonant)))
prq.Remove(c, partner)
}
prq.fullThaw()
var out []string
for {
received := prq.Pop()
if received == nil {
break
}
out = append(out, received.Entry.Cid.String())
}
// Entries popped should already be in correct order
for i, expected := range vowels {
exp := cid.NewCidV0(u.Hash([]byte(expected))).String()
if out[i] != exp {
t.Fatal("received", out[i], "expected", expected)
}
}
}
// This test checks that peers wont starve out other peers
func TestPeerRepeats(t *testing.T) {
prq := newPRQ()
a := testutil.RandPeerIDFatal(t)
b := testutil.RandPeerIDFatal(t)
c := testutil.RandPeerIDFatal(t)
d := testutil.RandPeerIDFatal(t)
// Have each push some blocks
for i := 0; i < 5; i++ {
elcid := cid.NewCidV0(u.Hash([]byte(fmt.Sprint(i))))
prq.Push(&wantlist.Entry{Cid: elcid}, a)
prq.Push(&wantlist.Entry{Cid: elcid}, b)
prq.Push(&wantlist.Entry{Cid: elcid}, c)
prq.Push(&wantlist.Entry{Cid: elcid}, d)
}
// now, pop off four entries, there should be one from each
var targets []string
var tasks []*peerRequestTask
for i := 0; i < 4; i++ {
t := prq.Pop()
targets = append(targets, t.Target.Pretty())
tasks = append(tasks, t)
}
expected := []string{a.Pretty(), b.Pretty(), c.Pretty(), d.Pretty()}
sort.Strings(expected)
sort.Strings(targets)
t.Log(targets)
t.Log(expected)
for i, s := range targets {
if expected[i] != s {
t.Fatal("unexpected peer", s, expected[i])
}
}
// Now, if one of the tasks gets finished, the next task off the queue should
// be for the same peer
for blockI := 0; blockI < 4; blockI++ {
for i := 0; i < 4; i++ {
// its okay to mark the same task done multiple times here (JUST FOR TESTING)
tasks[i].Done()
ntask := prq.Pop()
if ntask.Target != tasks[i].Target {
t.Fatal("Expected task from peer with lowest active count")
}
}
}
}

View File

@ -1,100 +0,0 @@
package bitswap
import (
"context"
"errors"
notifications "github.com/ipfs/go-ipfs/exchange/bitswap/notifications"
blocks "gx/ipfs/QmVzK524a2VWLqyvtBeiHKsUAWYgeAk4DBeZoY7vpNPNRx/go-block-format"
cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid"
blockstore "gx/ipfs/QmadMhXJLHMFjpRmh85XjpmVDkEtQpNYEZNRpWRvYVLrvb/go-ipfs-blockstore"
)
type getBlocksFunc func(context.Context, []*cid.Cid) (<-chan blocks.Block, error)
func getBlock(p context.Context, k *cid.Cid, gb getBlocksFunc) (blocks.Block, error) {
if k == nil {
log.Error("nil cid in GetBlock")
return nil, blockstore.ErrNotFound
}
// Any async work initiated by this function must end when this function
// returns. To ensure this, derive a new context. Note that it is okay to
// listen on parent in this scope, but NOT okay to pass |parent| to
// functions called by this one. Otherwise those functions won't return
// when this context's cancel func is executed. This is difficult to
// enforce. May this comment keep you safe.
ctx, cancel := context.WithCancel(p)
defer cancel()
promise, err := gb(ctx, []*cid.Cid{k})
if err != nil {
return nil, err
}
select {
case block, ok := <-promise:
if !ok {
select {
case <-ctx.Done():
return nil, ctx.Err()
default:
return nil, errors.New("promise channel was closed")
}
}
return block, nil
case <-p.Done():
return nil, p.Err()
}
}
type wantFunc func(context.Context, []*cid.Cid)
func getBlocksImpl(ctx context.Context, keys []*cid.Cid, notif notifications.PubSub, want wantFunc, cwants func([]*cid.Cid)) (<-chan blocks.Block, error) {
if len(keys) == 0 {
out := make(chan blocks.Block)
close(out)
return out, nil
}
remaining := cid.NewSet()
promise := notif.Subscribe(ctx, keys...)
for _, k := range keys {
log.Event(ctx, "Bitswap.GetBlockRequest.Start", k)
remaining.Add(k)
}
want(ctx, keys)
out := make(chan blocks.Block)
go handleIncoming(ctx, remaining, promise, out, cwants)
return out, nil
}
func handleIncoming(ctx context.Context, remaining *cid.Set, in <-chan blocks.Block, out chan blocks.Block, cfun func([]*cid.Cid)) {
ctx, cancel := context.WithCancel(ctx)
defer func() {
cancel()
close(out)
// can't just defer this call on its own, arguments are resolved *when* the defer is created
cfun(remaining.Keys())
}()
for {
select {
case blk, ok := <-in:
if !ok {
return
}
remaining.Remove(blk.Cid())
select {
case out <- blk:
case <-ctx.Done():
return
}
case <-ctx.Done():
return
}
}
}

View File

@ -1,249 +0,0 @@
package message
import (
"fmt"
"io"
pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb"
wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist"
blocks "gx/ipfs/QmVzK524a2VWLqyvtBeiHKsUAWYgeAk4DBeZoY7vpNPNRx/go-block-format"
inet "gx/ipfs/QmPjvxTpVH8qJyQDnxnsxF9kv9jezKD1kozz1hs3fCGsNh/go-libp2p-net"
cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid"
ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io"
proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto"
)
// TODO move message.go into the bitswap package
// TODO move bs/msg/internal/pb to bs/internal/pb and rename pb package to bitswap_pb
type BitSwapMessage interface {
// Wantlist returns a slice of unique keys that represent data wanted by
// the sender.
Wantlist() []Entry
// Blocks returns a slice of unique blocks
Blocks() []blocks.Block
// AddEntry adds an entry to the Wantlist.
AddEntry(key *cid.Cid, priority int)
Cancel(key *cid.Cid)
Empty() bool
// A full wantlist is an authoritative copy, a 'non-full' wantlist is a patch-set
Full() bool
AddBlock(blocks.Block)
Exportable
Loggable() map[string]interface{}
}
type Exportable interface {
ToProtoV0() *pb.Message
ToProtoV1() *pb.Message
ToNetV0(w io.Writer) error
ToNetV1(w io.Writer) error
}
type impl struct {
full bool
wantlist map[string]*Entry
blocks map[string]blocks.Block
}
func New(full bool) BitSwapMessage {
return newMsg(full)
}
func newMsg(full bool) *impl {
return &impl{
blocks: make(map[string]blocks.Block),
wantlist: make(map[string]*Entry),
full: full,
}
}
type Entry struct {
*wantlist.Entry
Cancel bool
}
func newMessageFromProto(pbm pb.Message) (BitSwapMessage, error) {
m := newMsg(pbm.GetWantlist().GetFull())
for _, e := range pbm.GetWantlist().GetEntries() {
c, err := cid.Cast([]byte(e.GetBlock()))
if err != nil {
return nil, fmt.Errorf("incorrectly formatted cid in wantlist: %s", err)
}
m.addEntry(c, int(e.GetPriority()), e.GetCancel())
}
// deprecated
for _, d := range pbm.GetBlocks() {
// CIDv0, sha256, protobuf only
b := blocks.NewBlock(d)
m.AddBlock(b)
}
//
for _, b := range pbm.GetPayload() {
pref, err := cid.PrefixFromBytes(b.GetPrefix())
if err != nil {
return nil, err
}
c, err := pref.Sum(b.GetData())
if err != nil {
return nil, err
}
blk, err := blocks.NewBlockWithCid(b.GetData(), c)
if err != nil {
return nil, err
}
m.AddBlock(blk)
}
return m, nil
}
func (m *impl) Full() bool {
return m.full
}
func (m *impl) Empty() bool {
return len(m.blocks) == 0 && len(m.wantlist) == 0
}
func (m *impl) Wantlist() []Entry {
out := make([]Entry, 0, len(m.wantlist))
for _, e := range m.wantlist {
out = append(out, *e)
}
return out
}
func (m *impl) Blocks() []blocks.Block {
bs := make([]blocks.Block, 0, len(m.blocks))
for _, block := range m.blocks {
bs = append(bs, block)
}
return bs
}
func (m *impl) Cancel(k *cid.Cid) {
delete(m.wantlist, k.KeyString())
m.addEntry(k, 0, true)
}
func (m *impl) AddEntry(k *cid.Cid, priority int) {
m.addEntry(k, priority, false)
}
func (m *impl) addEntry(c *cid.Cid, priority int, cancel bool) {
k := c.KeyString()
e, exists := m.wantlist[k]
if exists {
e.Priority = priority
e.Cancel = cancel
} else {
m.wantlist[k] = &Entry{
Entry: &wantlist.Entry{
Cid: c,
Priority: priority,
},
Cancel: cancel,
}
}
}
func (m *impl) AddBlock(b blocks.Block) {
m.blocks[b.Cid().KeyString()] = b
}
func FromNet(r io.Reader) (BitSwapMessage, error) {
pbr := ggio.NewDelimitedReader(r, inet.MessageSizeMax)
return FromPBReader(pbr)
}
func FromPBReader(pbr ggio.Reader) (BitSwapMessage, error) {
pb := new(pb.Message)
if err := pbr.ReadMsg(pb); err != nil {
return nil, err
}
return newMessageFromProto(*pb)
}
func (m *impl) ToProtoV0() *pb.Message {
pbm := new(pb.Message)
pbm.Wantlist = new(pb.Message_Wantlist)
pbm.Wantlist.Entries = make([]*pb.Message_Wantlist_Entry, 0, len(m.wantlist))
for _, e := range m.wantlist {
pbm.Wantlist.Entries = append(pbm.Wantlist.Entries, &pb.Message_Wantlist_Entry{
Block: proto.String(e.Cid.KeyString()),
Priority: proto.Int32(int32(e.Priority)),
Cancel: proto.Bool(e.Cancel),
})
}
pbm.Wantlist.Full = proto.Bool(m.full)
blocks := m.Blocks()
pbm.Blocks = make([][]byte, 0, len(blocks))
for _, b := range blocks {
pbm.Blocks = append(pbm.Blocks, b.RawData())
}
return pbm
}
func (m *impl) ToProtoV1() *pb.Message {
pbm := new(pb.Message)
pbm.Wantlist = new(pb.Message_Wantlist)
pbm.Wantlist.Entries = make([]*pb.Message_Wantlist_Entry, 0, len(m.wantlist))
for _, e := range m.wantlist {
pbm.Wantlist.Entries = append(pbm.Wantlist.Entries, &pb.Message_Wantlist_Entry{
Block: proto.String(e.Cid.KeyString()),
Priority: proto.Int32(int32(e.Priority)),
Cancel: proto.Bool(e.Cancel),
})
}
pbm.Wantlist.Full = proto.Bool(m.full)
blocks := m.Blocks()
pbm.Payload = make([]*pb.Message_Block, 0, len(blocks))
for _, b := range blocks {
blk := &pb.Message_Block{
Data: b.RawData(),
Prefix: b.Cid().Prefix().Bytes(),
}
pbm.Payload = append(pbm.Payload, blk)
}
return pbm
}
func (m *impl) ToNetV0(w io.Writer) error {
pbw := ggio.NewDelimitedWriter(w)
return pbw.WriteMsg(m.ToProtoV0())
}
func (m *impl) ToNetV1(w io.Writer) error {
pbw := ggio.NewDelimitedWriter(w)
return pbw.WriteMsg(m.ToProtoV1())
}
func (m *impl) Loggable() map[string]interface{} {
blocks := make([]string, 0, len(m.blocks))
for _, v := range m.blocks {
blocks = append(blocks, v.Cid().String())
}
return map[string]interface{}{
"blocks": blocks,
"wants": m.Wantlist(),
}
}

View File

@ -1,200 +0,0 @@
package message
import (
"bytes"
"testing"
pb "github.com/ipfs/go-ipfs/exchange/bitswap/message/pb"
u "gx/ipfs/QmPdKqUcHGFdeSpvjVoaTRPPstGif9GBZb5Q56RVw9o69A/go-ipfs-util"
blocks "gx/ipfs/QmVzK524a2VWLqyvtBeiHKsUAWYgeAk4DBeZoY7vpNPNRx/go-block-format"
cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid"
proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto"
)
func mkFakeCid(s string) *cid.Cid {
return cid.NewCidV0(u.Hash([]byte(s)))
}
func TestAppendWanted(t *testing.T) {
str := mkFakeCid("foo")
m := New(true)
m.AddEntry(str, 1)
if !wantlistContains(m.ToProtoV0().GetWantlist(), str) {
t.Fail()
}
}
func TestNewMessageFromProto(t *testing.T) {
str := mkFakeCid("a_key")
protoMessage := new(pb.Message)
protoMessage.Wantlist = new(pb.Message_Wantlist)
protoMessage.Wantlist.Entries = []*pb.Message_Wantlist_Entry{
{Block: proto.String(str.KeyString())},
}
if !wantlistContains(protoMessage.Wantlist, str) {
t.Fail()
}
m, err := newMessageFromProto(*protoMessage)
if err != nil {
t.Fatal(err)
}
if !wantlistContains(m.ToProtoV0().GetWantlist(), str) {
t.Fail()
}
}
func TestAppendBlock(t *testing.T) {
strs := make([]string, 2)
strs = append(strs, "Celeritas")
strs = append(strs, "Incendia")
m := New(true)
for _, str := range strs {
block := blocks.NewBlock([]byte(str))
m.AddBlock(block)
}
// assert strings are in proto message
for _, blockbytes := range m.ToProtoV0().GetBlocks() {
s := bytes.NewBuffer(blockbytes).String()
if !contains(strs, s) {
t.Fail()
}
}
}
func TestWantlist(t *testing.T) {
keystrs := []*cid.Cid{mkFakeCid("foo"), mkFakeCid("bar"), mkFakeCid("baz"), mkFakeCid("bat")}
m := New(true)
for _, s := range keystrs {
m.AddEntry(s, 1)
}
exported := m.Wantlist()
for _, k := range exported {
present := false
for _, s := range keystrs {
if s.Equals(k.Cid) {
present = true
}
}
if !present {
t.Logf("%v isn't in original list", k.Cid)
t.Fail()
}
}
}
func TestCopyProtoByValue(t *testing.T) {
str := mkFakeCid("foo")
m := New(true)
protoBeforeAppend := m.ToProtoV0()
m.AddEntry(str, 1)
if wantlistContains(protoBeforeAppend.GetWantlist(), str) {
t.Fail()
}
}
func TestToNetFromNetPreservesWantList(t *testing.T) {
original := New(true)
original.AddEntry(mkFakeCid("M"), 1)
original.AddEntry(mkFakeCid("B"), 1)
original.AddEntry(mkFakeCid("D"), 1)
original.AddEntry(mkFakeCid("T"), 1)
original.AddEntry(mkFakeCid("F"), 1)
buf := new(bytes.Buffer)
if err := original.ToNetV1(buf); err != nil {
t.Fatal(err)
}
copied, err := FromNet(buf)
if err != nil {
t.Fatal(err)
}
if !copied.Full() {
t.Fatal("fullness attribute got dropped on marshal")
}
keys := make(map[string]bool)
for _, k := range copied.Wantlist() {
keys[k.Cid.KeyString()] = true
}
for _, k := range original.Wantlist() {
if _, ok := keys[k.Cid.KeyString()]; !ok {
t.Fatalf("Key Missing: \"%v\"", k)
}
}
}
func TestToAndFromNetMessage(t *testing.T) {
original := New(true)
original.AddBlock(blocks.NewBlock([]byte("W")))
original.AddBlock(blocks.NewBlock([]byte("E")))
original.AddBlock(blocks.NewBlock([]byte("F")))
original.AddBlock(blocks.NewBlock([]byte("M")))
buf := new(bytes.Buffer)
if err := original.ToNetV1(buf); err != nil {
t.Fatal(err)
}
m2, err := FromNet(buf)
if err != nil {
t.Fatal(err)
}
keys := make(map[string]bool)
for _, b := range m2.Blocks() {
keys[b.Cid().KeyString()] = true
}
for _, b := range original.Blocks() {
if _, ok := keys[b.Cid().KeyString()]; !ok {
t.Fail()
}
}
}
func wantlistContains(wantlist *pb.Message_Wantlist, c *cid.Cid) bool {
for _, e := range wantlist.GetEntries() {
if e.GetBlock() == c.KeyString() {
return true
}
}
return false
}
func contains(strs []string, x string) bool {
for _, s := range strs {
if s == x {
return true
}
}
return false
}
func TestDuplicates(t *testing.T) {
b := blocks.NewBlock([]byte("foo"))
msg := New(true)
msg.AddEntry(b.Cid(), 1)
msg.AddEntry(b.Cid(), 1)
if len(msg.Wantlist()) != 1 {
t.Fatal("Duplicate in BitSwapMessage")
}
msg.AddBlock(b)
msg.AddBlock(b)
if len(msg.Blocks()) != 1 {
t.Fatal("Duplicate in BitSwapMessage")
}
}

View File

@ -1,8 +0,0 @@
# TODO(brian): add proto tasks
all: message.pb.go
message.pb.go: message.proto
protoc --gogo_out=. --proto_path=../../../../../:/usr/local/opt/protobuf/include:. $<
clean:
rm message.pb.go

View File

@ -1,8 +0,0 @@
include mk/header.mk
PB_$(d) = $(wildcard $(d)/*.proto)
TGTS_$(d) = $(PB_$(d):.proto=.pb.go)
#DEPS_GO += $(TGTS_$(d))
include mk/footer.mk

View File

@ -1,142 +0,0 @@
// Code generated by protoc-gen-gogo.
// source: message.proto
// DO NOT EDIT!
/*
Package bitswap_message_pb is a generated protocol buffer package.
It is generated from these files:
message.proto
It has these top-level messages:
Message
*/
package bitswap_message_pb
import proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
type Message struct {
Wantlist *Message_Wantlist `protobuf:"bytes,1,opt,name=wantlist" json:"wantlist,omitempty"`
Blocks [][]byte `protobuf:"bytes,2,rep,name=blocks" json:"blocks,omitempty"`
Payload []*Message_Block `protobuf:"bytes,3,rep,name=payload" json:"payload,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *Message) Reset() { *m = Message{} }
func (m *Message) String() string { return proto.CompactTextString(m) }
func (*Message) ProtoMessage() {}
func (m *Message) GetWantlist() *Message_Wantlist {
if m != nil {
return m.Wantlist
}
return nil
}
func (m *Message) GetBlocks() [][]byte {
if m != nil {
return m.Blocks
}
return nil
}
func (m *Message) GetPayload() []*Message_Block {
if m != nil {
return m.Payload
}
return nil
}
type Message_Wantlist struct {
Entries []*Message_Wantlist_Entry `protobuf:"bytes,1,rep,name=entries" json:"entries,omitempty"`
Full *bool `protobuf:"varint,2,opt,name=full" json:"full,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *Message_Wantlist) Reset() { *m = Message_Wantlist{} }
func (m *Message_Wantlist) String() string { return proto.CompactTextString(m) }
func (*Message_Wantlist) ProtoMessage() {}
func (m *Message_Wantlist) GetEntries() []*Message_Wantlist_Entry {
if m != nil {
return m.Entries
}
return nil
}
func (m *Message_Wantlist) GetFull() bool {
if m != nil && m.Full != nil {
return *m.Full
}
return false
}
type Message_Wantlist_Entry struct {
Block *string `protobuf:"bytes,1,opt,name=block" json:"block,omitempty"`
Priority *int32 `protobuf:"varint,2,opt,name=priority" json:"priority,omitempty"`
Cancel *bool `protobuf:"varint,3,opt,name=cancel" json:"cancel,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *Message_Wantlist_Entry) Reset() { *m = Message_Wantlist_Entry{} }
func (m *Message_Wantlist_Entry) String() string { return proto.CompactTextString(m) }
func (*Message_Wantlist_Entry) ProtoMessage() {}
func (m *Message_Wantlist_Entry) GetBlock() string {
if m != nil && m.Block != nil {
return *m.Block
}
return ""
}
func (m *Message_Wantlist_Entry) GetPriority() int32 {
if m != nil && m.Priority != nil {
return *m.Priority
}
return 0
}
func (m *Message_Wantlist_Entry) GetCancel() bool {
if m != nil && m.Cancel != nil {
return *m.Cancel
}
return false
}
type Message_Block struct {
Prefix []byte `protobuf:"bytes,1,opt,name=prefix" json:"prefix,omitempty"`
Data []byte `protobuf:"bytes,2,opt,name=data" json:"data,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *Message_Block) Reset() { *m = Message_Block{} }
func (m *Message_Block) String() string { return proto.CompactTextString(m) }
func (*Message_Block) ProtoMessage() {}
func (m *Message_Block) GetPrefix() []byte {
if m != nil {
return m.Prefix
}
return nil
}
func (m *Message_Block) GetData() []byte {
if m != nil {
return m.Data
}
return nil
}
func init() {
proto.RegisterType((*Message)(nil), "bitswap.message.pb.Message")
proto.RegisterType((*Message_Wantlist)(nil), "bitswap.message.pb.Message.Wantlist")
proto.RegisterType((*Message_Wantlist_Entry)(nil), "bitswap.message.pb.Message.Wantlist.Entry")
proto.RegisterType((*Message_Block)(nil), "bitswap.message.pb.Message.Block")
}

View File

@ -1,25 +0,0 @@
package bitswap.message.pb;
message Message {
message Wantlist {
message Entry {
optional string block = 1; // the block cid (cidV0 in bitswap 1.0.0, cidV1 in bitswap 1.1.0)
optional int32 priority = 2; // the priority (normalized). default to 1
optional bool cancel = 3; // whether this revokes an entry
}
repeated Entry entries = 1; // a list of wantlist entries
optional bool full = 2; // whether this is the full wantlist. default to false
}
message Block {
optional bytes prefix = 1; // CID prefix (cid version, multicodec and multihash prefix (type + length)
optional bytes data = 2;
}
optional Wantlist wantlist = 1;
repeated bytes blocks = 2; // used to send Blocks in bitswap 1.0.0
repeated Block payload = 3; // used to send Blocks in bitswap 1.1.0
}

View File

@ -1,70 +0,0 @@
package network
import (
"context"
bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message"
ifconnmgr "gx/ipfs/QmXuucFcuvAWYAJfhHV2h4BYreHEAsLSsiquosiXeuduTN/go-libp2p-interface-connmgr"
cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid"
protocol "gx/ipfs/QmZNkThpqfVXs9GNbexPrfBbXSLNYeKrE7jwFM2oqHbyqN/go-libp2p-protocol"
peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer"
)
var (
// These two are equivalent, legacy
ProtocolBitswapOne protocol.ID = "/ipfs/bitswap/1.0.0"
ProtocolBitswapNoVers protocol.ID = "/ipfs/bitswap"
ProtocolBitswap protocol.ID = "/ipfs/bitswap/1.1.0"
)
// BitSwapNetwork provides network connectivity for BitSwap sessions
type BitSwapNetwork interface {
// SendMessage sends a BitSwap message to a peer.
SendMessage(
context.Context,
peer.ID,
bsmsg.BitSwapMessage) error
// SetDelegate registers the Reciver to handle messages received from the
// network.
SetDelegate(Receiver)
ConnectTo(context.Context, peer.ID) error
NewMessageSender(context.Context, peer.ID) (MessageSender, error)
ConnectionManager() ifconnmgr.ConnManager
Routing
}
type MessageSender interface {
SendMsg(context.Context, bsmsg.BitSwapMessage) error
Close() error
Reset() error
}
// Implement Receiver to receive messages from the BitSwapNetwork
type Receiver interface {
ReceiveMessage(
ctx context.Context,
sender peer.ID,
incoming bsmsg.BitSwapMessage)
ReceiveError(error)
// Connected/Disconnected warns bitswap about peer connections
PeerConnected(peer.ID)
PeerDisconnected(peer.ID)
}
type Routing interface {
// FindProvidersAsync returns a channel of providers for the given key
FindProvidersAsync(context.Context, *cid.Cid, int) <-chan peer.ID
// Provide provides the key to the network
Provide(context.Context, *cid.Cid) error
}

View File

@ -1,230 +0,0 @@
package network
import (
"context"
"fmt"
"io"
"time"
bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message"
inet "gx/ipfs/QmPjvxTpVH8qJyQDnxnsxF9kv9jezKD1kozz1hs3fCGsNh/go-libp2p-net"
ifconnmgr "gx/ipfs/QmXuucFcuvAWYAJfhHV2h4BYreHEAsLSsiquosiXeuduTN/go-libp2p-interface-connmgr"
cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid"
ma "gx/ipfs/QmYmsdtJ3HsodkePE3eU3TsCaP2YvPZJ4LoXnNkDE5Tpt7/go-multiaddr"
routing "gx/ipfs/QmZ383TySJVeZWzGnWui6pRcKyYZk9VkKTuW7tmKRWk5au/go-libp2p-routing"
ggio "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/io"
pstore "gx/ipfs/QmZR2XWVVBCtbgBWnQhWk2xcQfaR3W8faQPriAiaaj7rsr/go-libp2p-peerstore"
host "gx/ipfs/Qmb8T6YBBsjYsVGfrihQLfCJveczZnneSBqBKkYEBWDjge/go-libp2p-host"
logging "gx/ipfs/QmcVVHfdyv15GVPk7NrxdWjh2hLVccXnoD8j2tyQShiXJb/go-log"
peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer"
)
var log = logging.Logger("bitswap_network")
var sendMessageTimeout = time.Minute * 10
// NewFromIpfsHost returns a BitSwapNetwork supported by underlying IPFS host
func NewFromIpfsHost(host host.Host, r routing.ContentRouting) BitSwapNetwork {
bitswapNetwork := impl{
host: host,
routing: r,
}
host.SetStreamHandler(ProtocolBitswap, bitswapNetwork.handleNewStream)
host.SetStreamHandler(ProtocolBitswapOne, bitswapNetwork.handleNewStream)
host.SetStreamHandler(ProtocolBitswapNoVers, bitswapNetwork.handleNewStream)
host.Network().Notify((*netNotifiee)(&bitswapNetwork))
// TODO: StopNotify.
return &bitswapNetwork
}
// impl transforms the ipfs network interface, which sends and receives
// NetMessage objects, into the bitswap network interface.
type impl struct {
host host.Host
routing routing.ContentRouting
// inbound messages from the network are forwarded to the receiver
receiver Receiver
}
type streamMessageSender struct {
s inet.Stream
}
func (s *streamMessageSender) Close() error {
return inet.FullClose(s.s)
}
func (s *streamMessageSender) Reset() error {
return s.s.Reset()
}
func (s *streamMessageSender) SendMsg(ctx context.Context, msg bsmsg.BitSwapMessage) error {
return msgToStream(ctx, s.s, msg)
}
func msgToStream(ctx context.Context, s inet.Stream, msg bsmsg.BitSwapMessage) error {
deadline := time.Now().Add(sendMessageTimeout)
if dl, ok := ctx.Deadline(); ok {
deadline = dl
}
if err := s.SetWriteDeadline(deadline); err != nil {
log.Warningf("error setting deadline: %s", err)
}
switch s.Protocol() {
case ProtocolBitswap:
if err := msg.ToNetV1(s); err != nil {
log.Debugf("error: %s", err)
return err
}
case ProtocolBitswapOne, ProtocolBitswapNoVers:
if err := msg.ToNetV0(s); err != nil {
log.Debugf("error: %s", err)
return err
}
default:
return fmt.Errorf("unrecognized protocol on remote: %s", s.Protocol())
}
if err := s.SetWriteDeadline(time.Time{}); err != nil {
log.Warningf("error resetting deadline: %s", err)
}
return nil
}
func (bsnet *impl) NewMessageSender(ctx context.Context, p peer.ID) (MessageSender, error) {
s, err := bsnet.newStreamToPeer(ctx, p)
if err != nil {
return nil, err
}
return &streamMessageSender{s: s}, nil
}
func (bsnet *impl) newStreamToPeer(ctx context.Context, p peer.ID) (inet.Stream, error) {
return bsnet.host.NewStream(ctx, p, ProtocolBitswap, ProtocolBitswapOne, ProtocolBitswapNoVers)
}
func (bsnet *impl) SendMessage(
ctx context.Context,
p peer.ID,
outgoing bsmsg.BitSwapMessage) error {
s, err := bsnet.newStreamToPeer(ctx, p)
if err != nil {
return err
}
if err = msgToStream(ctx, s, outgoing); err != nil {
s.Reset()
return err
}
// TODO(https://github.com/libp2p/go-libp2p-net/issues/28): Avoid this goroutine.
go inet.AwaitEOF(s)
return s.Close()
}
func (bsnet *impl) SetDelegate(r Receiver) {
bsnet.receiver = r
}
func (bsnet *impl) ConnectTo(ctx context.Context, p peer.ID) error {
return bsnet.host.Connect(ctx, pstore.PeerInfo{ID: p})
}
// FindProvidersAsync returns a channel of providers for the given key
func (bsnet *impl) FindProvidersAsync(ctx context.Context, k *cid.Cid, max int) <-chan peer.ID {
// Since routing queries are expensive, give bitswap the peers to which we
// have open connections. Note that this may cause issues if bitswap starts
// precisely tracking which peers provide certain keys. This optimization
// would be misleading. In the long run, this may not be the most
// appropriate place for this optimization, but it won't cause any harm in
// the short term.
connectedPeers := bsnet.host.Network().Peers()
out := make(chan peer.ID, len(connectedPeers)) // just enough buffer for these connectedPeers
for _, id := range connectedPeers {
if id == bsnet.host.ID() {
continue // ignore self as provider
}
out <- id
}
go func() {
defer close(out)
providers := bsnet.routing.FindProvidersAsync(ctx, k, max)
for info := range providers {
if info.ID == bsnet.host.ID() {
continue // ignore self as provider
}
bsnet.host.Peerstore().AddAddrs(info.ID, info.Addrs, pstore.TempAddrTTL)
select {
case <-ctx.Done():
return
case out <- info.ID:
}
}
}()
return out
}
// Provide provides the key to the network
func (bsnet *impl) Provide(ctx context.Context, k *cid.Cid) error {
return bsnet.routing.Provide(ctx, k, true)
}
// handleNewStream receives a new stream from the network.
func (bsnet *impl) handleNewStream(s inet.Stream) {
defer s.Close()
if bsnet.receiver == nil {
s.Reset()
return
}
reader := ggio.NewDelimitedReader(s, inet.MessageSizeMax)
for {
received, err := bsmsg.FromPBReader(reader)
if err != nil {
if err != io.EOF {
s.Reset()
go bsnet.receiver.ReceiveError(err)
log.Debugf("bitswap net handleNewStream from %s error: %s", s.Conn().RemotePeer(), err)
}
return
}
p := s.Conn().RemotePeer()
ctx := context.Background()
log.Debugf("bitswap net handleNewStream from %s", s.Conn().RemotePeer())
bsnet.receiver.ReceiveMessage(ctx, p, received)
}
}
func (bsnet *impl) ConnectionManager() ifconnmgr.ConnManager {
return bsnet.host.ConnManager()
}
type netNotifiee impl
func (nn *netNotifiee) impl() *impl {
return (*impl)(nn)
}
func (nn *netNotifiee) Connected(n inet.Network, v inet.Conn) {
nn.impl().receiver.PeerConnected(v.RemotePeer())
}
func (nn *netNotifiee) Disconnected(n inet.Network, v inet.Conn) {
nn.impl().receiver.PeerDisconnected(v.RemotePeer())
}
func (nn *netNotifiee) OpenedStream(n inet.Network, v inet.Stream) {}
func (nn *netNotifiee) ClosedStream(n inet.Network, v inet.Stream) {}
func (nn *netNotifiee) Listen(n inet.Network, a ma.Multiaddr) {}
func (nn *netNotifiee) ListenClose(n inet.Network, a ma.Multiaddr) {}

View File

@ -1,130 +0,0 @@
package notifications
import (
"context"
"sync"
blocks "gx/ipfs/QmVzK524a2VWLqyvtBeiHKsUAWYgeAk4DBeZoY7vpNPNRx/go-block-format"
cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid"
pubsub "gx/ipfs/QmdbxjQWogRCHRaxhhGnYdT1oQJzL9GdqSKzCdqWr85AP2/pubsub"
)
const bufferSize = 16
type PubSub interface {
Publish(block blocks.Block)
Subscribe(ctx context.Context, keys ...*cid.Cid) <-chan blocks.Block
Shutdown()
}
func New() PubSub {
return &impl{
wrapped: *pubsub.New(bufferSize),
cancel: make(chan struct{}),
}
}
type impl struct {
wrapped pubsub.PubSub
// These two fields make up a shutdown "lock".
// We need them as calling, e.g., `Unsubscribe` after calling `Shutdown`
// blocks forever and fixing this in pubsub would be rather invasive.
cancel chan struct{}
wg sync.WaitGroup
}
func (ps *impl) Publish(block blocks.Block) {
ps.wg.Add(1)
defer ps.wg.Done()
select {
case <-ps.cancel:
// Already shutdown, bail.
return
default:
}
ps.wrapped.Pub(block, block.Cid().KeyString())
}
// Not safe to call more than once.
func (ps *impl) Shutdown() {
// Interrupt in-progress subscriptions.
close(ps.cancel)
// Wait for them to finish.
ps.wg.Wait()
// shutdown the pubsub.
ps.wrapped.Shutdown()
}
// Subscribe returns a channel of blocks for the given |keys|. |blockChannel|
// is closed if the |ctx| times out or is cancelled, or after sending len(keys)
// blocks.
func (ps *impl) Subscribe(ctx context.Context, keys ...*cid.Cid) <-chan blocks.Block {
blocksCh := make(chan blocks.Block, len(keys))
valuesCh := make(chan interface{}, len(keys)) // provide our own channel to control buffer, prevent blocking
if len(keys) == 0 {
close(blocksCh)
return blocksCh
}
// prevent shutdown
ps.wg.Add(1)
// check if shutdown *after* preventing shutdowns.
select {
case <-ps.cancel:
// abort, allow shutdown to continue.
ps.wg.Done()
close(blocksCh)
return blocksCh
default:
}
ps.wrapped.AddSubOnceEach(valuesCh, toStrings(keys)...)
go func() {
defer func() {
ps.wrapped.Unsub(valuesCh)
close(blocksCh)
// Unblock shutdown.
ps.wg.Done()
}()
for {
select {
case <-ps.cancel:
return
case <-ctx.Done():
return
case val, ok := <-valuesCh:
if !ok {
return
}
block, ok := val.(blocks.Block)
if !ok {
return
}
select {
case <-ps.cancel:
return
case <-ctx.Done():
return
case blocksCh <- block: // continue
}
}
}
}()
return blocksCh
}
func toStrings(keys []*cid.Cid) []string {
strs := make([]string, 0, len(keys))
for _, key := range keys {
strs = append(strs, key.KeyString())
}
return strs
}

View File

@ -1,187 +0,0 @@
package notifications
import (
"bytes"
"context"
"testing"
"time"
blocks "gx/ipfs/QmVzK524a2VWLqyvtBeiHKsUAWYgeAk4DBeZoY7vpNPNRx/go-block-format"
cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid"
blocksutil "gx/ipfs/QmYqPGpZ9Yemr55xus9DiEztkns6Jti5XJ7hC94JbvkdqZ/go-ipfs-blocksutil"
)
func TestDuplicates(t *testing.T) {
b1 := blocks.NewBlock([]byte("1"))
b2 := blocks.NewBlock([]byte("2"))
n := New()
defer n.Shutdown()
ch := n.Subscribe(context.Background(), b1.Cid(), b2.Cid())
n.Publish(b1)
blockRecvd, ok := <-ch
if !ok {
t.Fail()
}
assertBlocksEqual(t, b1, blockRecvd)
n.Publish(b1) // ignored duplicate
n.Publish(b2)
blockRecvd, ok = <-ch
if !ok {
t.Fail()
}
assertBlocksEqual(t, b2, blockRecvd)
}
func TestPublishSubscribe(t *testing.T) {
blockSent := blocks.NewBlock([]byte("Greetings from The Interval"))
n := New()
defer n.Shutdown()
ch := n.Subscribe(context.Background(), blockSent.Cid())
n.Publish(blockSent)
blockRecvd, ok := <-ch
if !ok {
t.Fail()
}
assertBlocksEqual(t, blockRecvd, blockSent)
}
func TestSubscribeMany(t *testing.T) {
e1 := blocks.NewBlock([]byte("1"))
e2 := blocks.NewBlock([]byte("2"))
n := New()
defer n.Shutdown()
ch := n.Subscribe(context.Background(), e1.Cid(), e2.Cid())
n.Publish(e1)
r1, ok := <-ch
if !ok {
t.Fatal("didn't receive first expected block")
}
assertBlocksEqual(t, e1, r1)
n.Publish(e2)
r2, ok := <-ch
if !ok {
t.Fatal("didn't receive second expected block")
}
assertBlocksEqual(t, e2, r2)
}
// TestDuplicateSubscribe tests a scenario where a given block
// would be requested twice at the same time.
func TestDuplicateSubscribe(t *testing.T) {
e1 := blocks.NewBlock([]byte("1"))
n := New()
defer n.Shutdown()
ch1 := n.Subscribe(context.Background(), e1.Cid())
ch2 := n.Subscribe(context.Background(), e1.Cid())
n.Publish(e1)
r1, ok := <-ch1
if !ok {
t.Fatal("didn't receive first expected block")
}
assertBlocksEqual(t, e1, r1)
r2, ok := <-ch2
if !ok {
t.Fatal("didn't receive second expected block")
}
assertBlocksEqual(t, e1, r2)
}
func TestShutdownBeforeUnsubscribe(t *testing.T) {
e1 := blocks.NewBlock([]byte("1"))
n := New()
ctx, cancel := context.WithCancel(context.Background())
ch := n.Subscribe(ctx, e1.Cid()) // no keys provided
n.Shutdown()
cancel()
select {
case _, ok := <-ch:
if ok {
t.Fatal("channel should have been closed")
}
default:
t.Fatal("channel should have been closed")
}
}
func TestSubscribeIsANoopWhenCalledWithNoKeys(t *testing.T) {
n := New()
defer n.Shutdown()
ch := n.Subscribe(context.Background()) // no keys provided
if _, ok := <-ch; ok {
t.Fatal("should be closed if no keys provided")
}
}
func TestCarryOnWhenDeadlineExpires(t *testing.T) {
impossibleDeadline := time.Nanosecond
fastExpiringCtx, cancel := context.WithTimeout(context.Background(), impossibleDeadline)
defer cancel()
n := New()
defer n.Shutdown()
block := blocks.NewBlock([]byte("A Missed Connection"))
blockChannel := n.Subscribe(fastExpiringCtx, block.Cid())
assertBlockChannelNil(t, blockChannel)
}
func TestDoesNotDeadLockIfContextCancelledBeforePublish(t *testing.T) {
g := blocksutil.NewBlockGenerator()
ctx, cancel := context.WithCancel(context.Background())
n := New()
defer n.Shutdown()
t.Log("generate a large number of blocks. exceed default buffer")
bs := g.Blocks(1000)
ks := func() []*cid.Cid {
var keys []*cid.Cid
for _, b := range bs {
keys = append(keys, b.Cid())
}
return keys
}()
_ = n.Subscribe(ctx, ks...) // ignore received channel
t.Log("cancel context before any blocks published")
cancel()
for _, b := range bs {
n.Publish(b)
}
t.Log("publishing the large number of blocks to the ignored channel must not deadlock")
}
func assertBlockChannelNil(t *testing.T, blockChannel <-chan blocks.Block) {
_, ok := <-blockChannel
if ok {
t.Fail()
}
}
func assertBlocksEqual(t *testing.T, a, b blocks.Block) {
if !bytes.Equal(a.RawData(), b.RawData()) {
t.Fatal("blocks aren't equal")
}
if a.Cid() != b.Cid() {
t.Fatal("block keys aren't equal")
}
}

View File

@ -1,364 +0,0 @@
package bitswap
import (
"context"
"fmt"
"time"
notifications "github.com/ipfs/go-ipfs/exchange/bitswap/notifications"
loggables "gx/ipfs/QmRPkGkHLB72caXgdDYnoaWigXNWx95BcYDKV1n3KTEpaG/go-libp2p-loggables"
lru "gx/ipfs/QmVYxfoJQiZijTgPNHCHgHELvQpbsJNTg6Crmc3dQkj3yy/golang-lru"
blocks "gx/ipfs/QmVzK524a2VWLqyvtBeiHKsUAWYgeAk4DBeZoY7vpNPNRx/go-block-format"
cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid"
logging "gx/ipfs/QmcVVHfdyv15GVPk7NrxdWjh2hLVccXnoD8j2tyQShiXJb/go-log"
peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer"
)
const activeWantsLimit = 16
// Session holds state for an individual bitswap transfer operation.
// This allows bitswap to make smarter decisions about who to send wantlist
// info to, and who to request blocks from
type Session struct {
ctx context.Context
tofetch *cidQueue
activePeers map[peer.ID]struct{}
activePeersArr []peer.ID
bs *Bitswap
incoming chan blkRecv
newReqs chan []*cid.Cid
cancelKeys chan []*cid.Cid
interestReqs chan interestReq
interest *lru.Cache
liveWants map[string]time.Time
tick *time.Timer
baseTickDelay time.Duration
latTotal time.Duration
fetchcnt int
notif notifications.PubSub
uuid logging.Loggable
id uint64
tag string
}
// NewSession creates a new bitswap session whose lifetime is bounded by the
// given context
func (bs *Bitswap) NewSession(ctx context.Context) *Session {
s := &Session{
activePeers: make(map[peer.ID]struct{}),
liveWants: make(map[string]time.Time),
newReqs: make(chan []*cid.Cid),
cancelKeys: make(chan []*cid.Cid),
tofetch: newCidQueue(),
interestReqs: make(chan interestReq),
ctx: ctx,
bs: bs,
incoming: make(chan blkRecv),
notif: notifications.New(),
uuid: loggables.Uuid("GetBlockRequest"),
baseTickDelay: time.Millisecond * 500,
id: bs.getNextSessionID(),
}
s.tag = fmt.Sprint("bs-ses-", s.id)
cache, _ := lru.New(2048)
s.interest = cache
bs.sessLk.Lock()
bs.sessions = append(bs.sessions, s)
bs.sessLk.Unlock()
go s.run(ctx)
return s
}
func (bs *Bitswap) removeSession(s *Session) {
s.notif.Shutdown()
live := make([]*cid.Cid, 0, len(s.liveWants))
for c := range s.liveWants {
cs, _ := cid.Cast([]byte(c))
live = append(live, cs)
}
bs.CancelWants(live, s.id)
bs.sessLk.Lock()
defer bs.sessLk.Unlock()
for i := 0; i < len(bs.sessions); i++ {
if bs.sessions[i] == s {
bs.sessions[i] = bs.sessions[len(bs.sessions)-1]
bs.sessions = bs.sessions[:len(bs.sessions)-1]
return
}
}
}
type blkRecv struct {
from peer.ID
blk blocks.Block
}
func (s *Session) receiveBlockFrom(from peer.ID, blk blocks.Block) {
select {
case s.incoming <- blkRecv{from: from, blk: blk}:
case <-s.ctx.Done():
}
}
type interestReq struct {
c *cid.Cid
resp chan bool
}
// TODO: PERF: this is using a channel to guard a map access against race
// conditions. This is definitely much slower than a mutex, though its unclear
// if it will actually induce any noticeable slowness. This is implemented this
// way to avoid adding a more complex set of mutexes around the liveWants map.
// note that in the average case (where this session *is* interested in the
// block we received) this function will not be called, as the cid will likely
// still be in the interest cache.
func (s *Session) isLiveWant(c *cid.Cid) bool {
resp := make(chan bool, 1)
select {
case s.interestReqs <- interestReq{
c: c,
resp: resp,
}:
case <-s.ctx.Done():
return false
}
select {
case want := <-resp:
return want
case <-s.ctx.Done():
return false
}
}
func (s *Session) interestedIn(c *cid.Cid) bool {
return s.interest.Contains(c.KeyString()) || s.isLiveWant(c)
}
const provSearchDelay = time.Second * 10
func (s *Session) addActivePeer(p peer.ID) {
if _, ok := s.activePeers[p]; !ok {
s.activePeers[p] = struct{}{}
s.activePeersArr = append(s.activePeersArr, p)
cmgr := s.bs.network.ConnectionManager()
cmgr.TagPeer(p, s.tag, 10)
}
}
func (s *Session) resetTick() {
if s.latTotal == 0 {
s.tick.Reset(provSearchDelay)
} else {
avLat := s.latTotal / time.Duration(s.fetchcnt)
s.tick.Reset(s.baseTickDelay + (3 * avLat))
}
}
func (s *Session) run(ctx context.Context) {
s.tick = time.NewTimer(provSearchDelay)
newpeers := make(chan peer.ID, 16)
for {
select {
case blk := <-s.incoming:
s.tick.Stop()
if blk.from != "" {
s.addActivePeer(blk.from)
}
s.receiveBlock(ctx, blk.blk)
s.resetTick()
case keys := <-s.newReqs:
for _, k := range keys {
s.interest.Add(k.KeyString(), nil)
}
if len(s.liveWants) < activeWantsLimit {
toadd := activeWantsLimit - len(s.liveWants)
if toadd > len(keys) {
toadd = len(keys)
}
now := keys[:toadd]
keys = keys[toadd:]
s.wantBlocks(ctx, now)
}
for _, k := range keys {
s.tofetch.Push(k)
}
case keys := <-s.cancelKeys:
s.cancel(keys)
case <-s.tick.C:
live := make([]*cid.Cid, 0, len(s.liveWants))
now := time.Now()
for c := range s.liveWants {
cs, _ := cid.Cast([]byte(c))
live = append(live, cs)
s.liveWants[c] = now
}
// Broadcast these keys to everyone we're connected to
s.bs.wm.WantBlocks(ctx, live, nil, s.id)
if len(live) > 0 {
go func(k *cid.Cid) {
// TODO: have a task queue setup for this to:
// - rate limit
// - manage timeouts
// - ensure two 'findprovs' calls for the same block don't run concurrently
// - share peers between sessions based on interest set
for p := range s.bs.network.FindProvidersAsync(ctx, k, 10) {
newpeers <- p
}
}(live[0])
}
s.resetTick()
case p := <-newpeers:
s.addActivePeer(p)
case lwchk := <-s.interestReqs:
lwchk.resp <- s.cidIsWanted(lwchk.c)
case <-ctx.Done():
s.tick.Stop()
s.bs.removeSession(s)
cmgr := s.bs.network.ConnectionManager()
for _, p := range s.activePeersArr {
cmgr.UntagPeer(p, s.tag)
}
return
}
}
}
func (s *Session) cidIsWanted(c *cid.Cid) bool {
_, ok := s.liveWants[c.KeyString()]
if !ok {
ok = s.tofetch.Has(c)
}
return ok
}
func (s *Session) receiveBlock(ctx context.Context, blk blocks.Block) {
c := blk.Cid()
if s.cidIsWanted(c) {
ks := c.KeyString()
tval, ok := s.liveWants[ks]
if ok {
s.latTotal += time.Since(tval)
delete(s.liveWants, ks)
} else {
s.tofetch.Remove(c)
}
s.fetchcnt++
s.notif.Publish(blk)
if next := s.tofetch.Pop(); next != nil {
s.wantBlocks(ctx, []*cid.Cid{next})
}
}
}
func (s *Session) wantBlocks(ctx context.Context, ks []*cid.Cid) {
now := time.Now()
for _, c := range ks {
s.liveWants[c.KeyString()] = now
}
s.bs.wm.WantBlocks(ctx, ks, s.activePeersArr, s.id)
}
func (s *Session) cancel(keys []*cid.Cid) {
for _, c := range keys {
s.tofetch.Remove(c)
}
}
func (s *Session) cancelWants(keys []*cid.Cid) {
select {
case s.cancelKeys <- keys:
case <-s.ctx.Done():
}
}
func (s *Session) fetch(ctx context.Context, keys []*cid.Cid) {
select {
case s.newReqs <- keys:
case <-ctx.Done():
case <-s.ctx.Done():
}
}
// GetBlocks fetches a set of blocks within the context of this session and
// returns a channel that found blocks will be returned on. No order is
// guaranteed on the returned blocks.
func (s *Session) GetBlocks(ctx context.Context, keys []*cid.Cid) (<-chan blocks.Block, error) {
ctx = logging.ContextWithLoggable(ctx, s.uuid)
return getBlocksImpl(ctx, keys, s.notif, s.fetch, s.cancelWants)
}
// GetBlock fetches a single block
func (s *Session) GetBlock(parent context.Context, k *cid.Cid) (blocks.Block, error) {
return getBlock(parent, k, s.GetBlocks)
}
type cidQueue struct {
elems []*cid.Cid
eset *cid.Set
}
func newCidQueue() *cidQueue {
return &cidQueue{eset: cid.NewSet()}
}
func (cq *cidQueue) Pop() *cid.Cid {
for {
if len(cq.elems) == 0 {
return nil
}
out := cq.elems[0]
cq.elems = cq.elems[1:]
if cq.eset.Has(out) {
cq.eset.Remove(out)
return out
}
}
}
func (cq *cidQueue) Push(c *cid.Cid) {
if cq.eset.Visit(c) {
cq.elems = append(cq.elems, c)
}
}
func (cq *cidQueue) Remove(c *cid.Cid) {
cq.eset.Remove(c)
}
func (cq *cidQueue) Has(c *cid.Cid) bool {
return cq.eset.Has(c)
}
func (cq *cidQueue) Len() int {
return cq.eset.Len()
}

View File

@ -1,325 +0,0 @@
package bitswap
import (
"context"
"fmt"
"testing"
"time"
blocks "gx/ipfs/QmVzK524a2VWLqyvtBeiHKsUAWYgeAk4DBeZoY7vpNPNRx/go-block-format"
cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid"
blocksutil "gx/ipfs/QmYqPGpZ9Yemr55xus9DiEztkns6Jti5XJ7hC94JbvkdqZ/go-ipfs-blocksutil"
tu "gx/ipfs/QmcW4FGAt24fdK1jBgWQn3yP4R9ZLyWQqjozv9QK7epRhL/go-testutil"
)
func TestBasicSessions(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
vnet := getVirtualNetwork()
sesgen := NewTestSessionGenerator(vnet)
defer sesgen.Close()
bgen := blocksutil.NewBlockGenerator()
block := bgen.Next()
inst := sesgen.Instances(2)
a := inst[0]
b := inst[1]
if err := b.Blockstore().Put(block); err != nil {
t.Fatal(err)
}
sesa := a.Exchange.NewSession(ctx)
blkout, err := sesa.GetBlock(ctx, block.Cid())
if err != nil {
t.Fatal(err)
}
if !blkout.Cid().Equals(block.Cid()) {
t.Fatal("got wrong block")
}
}
func assertBlockLists(got, exp []blocks.Block) error {
if len(got) != len(exp) {
return fmt.Errorf("got wrong number of blocks, %d != %d", len(got), len(exp))
}
h := cid.NewSet()
for _, b := range got {
h.Add(b.Cid())
}
for _, b := range exp {
if !h.Has(b.Cid()) {
return fmt.Errorf("didnt have: %s", b.Cid())
}
}
return nil
}
func TestSessionBetweenPeers(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
vnet := getVirtualNetwork()
sesgen := NewTestSessionGenerator(vnet)
defer sesgen.Close()
bgen := blocksutil.NewBlockGenerator()
inst := sesgen.Instances(10)
blks := bgen.Blocks(101)
if err := inst[0].Blockstore().PutMany(blks); err != nil {
t.Fatal(err)
}
var cids []*cid.Cid
for _, blk := range blks {
cids = append(cids, blk.Cid())
}
ses := inst[1].Exchange.NewSession(ctx)
if _, err := ses.GetBlock(ctx, cids[0]); err != nil {
t.Fatal(err)
}
blks = blks[1:]
cids = cids[1:]
for i := 0; i < 10; i++ {
ch, err := ses.GetBlocks(ctx, cids[i*10:(i+1)*10])
if err != nil {
t.Fatal(err)
}
var got []blocks.Block
for b := range ch {
got = append(got, b)
}
if err := assertBlockLists(got, blks[i*10:(i+1)*10]); err != nil {
t.Fatal(err)
}
}
for _, is := range inst[2:] {
if is.Exchange.counters.messagesRecvd > 2 {
t.Fatal("uninvolved nodes should only receive two messages", is.Exchange.counters.messagesRecvd)
}
}
}
func TestSessionSplitFetch(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
vnet := getVirtualNetwork()
sesgen := NewTestSessionGenerator(vnet)
defer sesgen.Close()
bgen := blocksutil.NewBlockGenerator()
inst := sesgen.Instances(11)
blks := bgen.Blocks(100)
for i := 0; i < 10; i++ {
if err := inst[i].Blockstore().PutMany(blks[i*10 : (i+1)*10]); err != nil {
t.Fatal(err)
}
}
var cids []*cid.Cid
for _, blk := range blks {
cids = append(cids, blk.Cid())
}
ses := inst[10].Exchange.NewSession(ctx)
ses.baseTickDelay = time.Millisecond * 10
for i := 0; i < 10; i++ {
ch, err := ses.GetBlocks(ctx, cids[i*10:(i+1)*10])
if err != nil {
t.Fatal(err)
}
var got []blocks.Block
for b := range ch {
got = append(got, b)
}
if err := assertBlockLists(got, blks[i*10:(i+1)*10]); err != nil {
t.Fatal(err)
}
}
}
func TestInterestCacheOverflow(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
vnet := getVirtualNetwork()
sesgen := NewTestSessionGenerator(vnet)
defer sesgen.Close()
bgen := blocksutil.NewBlockGenerator()
blks := bgen.Blocks(2049)
inst := sesgen.Instances(2)
a := inst[0]
b := inst[1]
ses := a.Exchange.NewSession(ctx)
zeroch, err := ses.GetBlocks(ctx, []*cid.Cid{blks[0].Cid()})
if err != nil {
t.Fatal(err)
}
var restcids []*cid.Cid
for _, blk := range blks[1:] {
restcids = append(restcids, blk.Cid())
}
restch, err := ses.GetBlocks(ctx, restcids)
if err != nil {
t.Fatal(err)
}
// wait to ensure that all the above cids were added to the sessions cache
time.Sleep(time.Millisecond * 50)
if err := b.Exchange.HasBlock(blks[0]); err != nil {
t.Fatal(err)
}
select {
case blk, ok := <-zeroch:
if ok && blk.Cid().Equals(blks[0].Cid()) {
// success!
} else {
t.Fatal("failed to get the block")
}
case <-restch:
t.Fatal("should not get anything on restch")
case <-time.After(time.Second * 5):
t.Fatal("timed out waiting for block")
}
}
func TestPutAfterSessionCacheEvict(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
vnet := getVirtualNetwork()
sesgen := NewTestSessionGenerator(vnet)
defer sesgen.Close()
bgen := blocksutil.NewBlockGenerator()
blks := bgen.Blocks(2500)
inst := sesgen.Instances(1)
a := inst[0]
ses := a.Exchange.NewSession(ctx)
var allcids []*cid.Cid
for _, blk := range blks[1:] {
allcids = append(allcids, blk.Cid())
}
blkch, err := ses.GetBlocks(ctx, allcids)
if err != nil {
t.Fatal(err)
}
// wait to ensure that all the above cids were added to the sessions cache
time.Sleep(time.Millisecond * 50)
if err := a.Exchange.HasBlock(blks[17]); err != nil {
t.Fatal(err)
}
select {
case <-blkch:
case <-time.After(time.Millisecond * 50):
t.Fatal("timed out waiting for block")
}
}
func TestMultipleSessions(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
vnet := getVirtualNetwork()
sesgen := NewTestSessionGenerator(vnet)
defer sesgen.Close()
bgen := blocksutil.NewBlockGenerator()
blk := bgen.Blocks(1)[0]
inst := sesgen.Instances(2)
a := inst[0]
b := inst[1]
ctx1, cancel1 := context.WithCancel(ctx)
ses := a.Exchange.NewSession(ctx1)
blkch, err := ses.GetBlocks(ctx, []*cid.Cid{blk.Cid()})
if err != nil {
t.Fatal(err)
}
cancel1()
ses2 := a.Exchange.NewSession(ctx)
blkch2, err := ses2.GetBlocks(ctx, []*cid.Cid{blk.Cid()})
if err != nil {
t.Fatal(err)
}
time.Sleep(time.Millisecond * 10)
if err := b.Exchange.HasBlock(blk); err != nil {
t.Fatal(err)
}
select {
case <-blkch2:
case <-time.After(time.Second * 20):
t.Fatal("bad juju")
}
_ = blkch
}
func TestWantlistClearsOnCancel(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
defer cancel()
vnet := getVirtualNetwork()
sesgen := NewTestSessionGenerator(vnet)
defer sesgen.Close()
bgen := blocksutil.NewBlockGenerator()
blks := bgen.Blocks(10)
var cids []*cid.Cid
for _, blk := range blks {
cids = append(cids, blk.Cid())
}
inst := sesgen.Instances(1)
a := inst[0]
ctx1, cancel1 := context.WithCancel(ctx)
ses := a.Exchange.NewSession(ctx1)
_, err := ses.GetBlocks(ctx, cids)
if err != nil {
t.Fatal(err)
}
cancel1()
if err := tu.WaitFor(ctx, func() error {
if len(a.Exchange.GetWantlist()) > 0 {
return fmt.Errorf("expected empty wantlist")
}
return nil
}); err != nil {
t.Fatal(err)
}
}

View File

@ -1,44 +0,0 @@
package bitswap
import (
"sort"
cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid"
)
type Stat struct {
ProvideBufLen int
Wantlist []*cid.Cid
Peers []string
BlocksReceived uint64
DataReceived uint64
BlocksSent uint64
DataSent uint64
DupBlksReceived uint64
DupDataReceived uint64
}
func (bs *Bitswap) Stat() (*Stat, error) {
st := new(Stat)
st.ProvideBufLen = len(bs.newBlocks)
st.Wantlist = bs.GetWantlist()
bs.counterLk.Lock()
c := bs.counters
st.BlocksReceived = c.blocksRecvd
st.DupBlksReceived = c.dupBlocksRecvd
st.DupDataReceived = c.dupDataRecvd
st.BlocksSent = c.blocksSent
st.DataSent = c.dataSent
st.DataReceived = c.dataRecvd
bs.counterLk.Unlock()
peers := bs.engine.Peers()
st.Peers = make([]string, 0, len(peers))
for _, p := range peers {
st.Peers = append(st.Peers, p.Pretty())
}
sort.Strings(st.Peers)
return st, nil
}

View File

@ -1,13 +0,0 @@
package bitswap
import (
bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network"
"gx/ipfs/QmcW4FGAt24fdK1jBgWQn3yP4R9ZLyWQqjozv9QK7epRhL/go-testutil"
peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer"
)
type Network interface {
Adapter(testutil.Identity) bsnet.BitSwapNetwork
HasPeer(peer.ID) bool
}

View File

@ -1,98 +0,0 @@
package bitswap
import (
"context"
"sync"
"testing"
bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message"
bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network"
delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay"
blocks "gx/ipfs/QmVzK524a2VWLqyvtBeiHKsUAWYgeAk4DBeZoY7vpNPNRx/go-block-format"
mockrouting "gx/ipfs/QmbFRJeEmEU16y3BmKKaD4a9fm5oHsEAMHe2vSB1UnfLMi/go-ipfs-routing/mock"
testutil "gx/ipfs/QmcW4FGAt24fdK1jBgWQn3yP4R9ZLyWQqjozv9QK7epRhL/go-testutil"
peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer"
)
func TestSendMessageAsyncButWaitForResponse(t *testing.T) {
net := VirtualNetwork(mockrouting.NewServer(), delay.Fixed(0))
responderPeer := testutil.RandIdentityOrFatal(t)
waiter := net.Adapter(testutil.RandIdentityOrFatal(t))
responder := net.Adapter(responderPeer)
var wg sync.WaitGroup
wg.Add(1)
expectedStr := "received async"
responder.SetDelegate(lambda(func(
ctx context.Context,
fromWaiter peer.ID,
msgFromWaiter bsmsg.BitSwapMessage) {
msgToWaiter := bsmsg.New(true)
msgToWaiter.AddBlock(blocks.NewBlock([]byte(expectedStr)))
waiter.SendMessage(ctx, fromWaiter, msgToWaiter)
}))
waiter.SetDelegate(lambda(func(
ctx context.Context,
fromResponder peer.ID,
msgFromResponder bsmsg.BitSwapMessage) {
// TODO assert that this came from the correct peer and that the message contents are as expected
ok := false
for _, b := range msgFromResponder.Blocks() {
if string(b.RawData()) == expectedStr {
wg.Done()
ok = true
}
}
if !ok {
t.Fatal("Message not received from the responder")
}
}))
messageSentAsync := bsmsg.New(true)
messageSentAsync.AddBlock(blocks.NewBlock([]byte("data")))
errSending := waiter.SendMessage(
context.Background(), responderPeer.ID(), messageSentAsync)
if errSending != nil {
t.Fatal(errSending)
}
wg.Wait() // until waiter delegate function is executed
}
type receiverFunc func(ctx context.Context, p peer.ID,
incoming bsmsg.BitSwapMessage)
// lambda returns a Receiver instance given a receiver function
func lambda(f receiverFunc) bsnet.Receiver {
return &lambdaImpl{
f: f,
}
}
type lambdaImpl struct {
f func(ctx context.Context, p peer.ID, incoming bsmsg.BitSwapMessage)
}
func (lam *lambdaImpl) ReceiveMessage(ctx context.Context,
p peer.ID, incoming bsmsg.BitSwapMessage) {
lam.f(ctx, p, incoming)
}
func (lam *lambdaImpl) ReceiveError(err error) {
// TODO log error
}
func (lam *lambdaImpl) PeerConnected(p peer.ID) {
// TODO
}
func (lam *lambdaImpl) PeerDisconnected(peer.ID) {
// TODO
}

View File

@ -1,42 +0,0 @@
package bitswap
import (
"context"
bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network"
mockpeernet "gx/ipfs/QmY51bqSM5XgxQZqsBrQcRkKTnCb8EKpJpR9K6Qax7Njco/go-libp2p/p2p/net/mock"
mockrouting "gx/ipfs/QmbFRJeEmEU16y3BmKKaD4a9fm5oHsEAMHe2vSB1UnfLMi/go-ipfs-routing/mock"
testutil "gx/ipfs/QmcW4FGAt24fdK1jBgWQn3yP4R9ZLyWQqjozv9QK7epRhL/go-testutil"
peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer"
ds "gx/ipfs/QmeiCcJfDW1GJnWUArudsv5rQsihpi4oyddPhdqo3CfX6i/go-datastore"
)
type peernet struct {
mockpeernet.Mocknet
routingserver mockrouting.Server
}
func StreamNet(ctx context.Context, net mockpeernet.Mocknet, rs mockrouting.Server) (Network, error) {
return &peernet{net, rs}, nil
}
func (pn *peernet) Adapter(p testutil.Identity) bsnet.BitSwapNetwork {
client, err := pn.Mocknet.AddPeer(p.PrivateKey(), p.Address())
if err != nil {
panic(err.Error())
}
routing := pn.routingserver.ClientWithDatastore(context.TODO(), p, ds.NewMapDatastore())
return bsnet.NewFromIpfsHost(client, routing)
}
func (pn *peernet) HasPeer(p peer.ID) bool {
for _, member := range pn.Mocknet.Peers() {
if p == member {
return true
}
}
return false
}
var _ Network = (*peernet)(nil)

View File

@ -1,253 +0,0 @@
package bitswap
import (
"context"
"errors"
"sync"
"time"
bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message"
bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network"
delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay"
ifconnmgr "gx/ipfs/QmXuucFcuvAWYAJfhHV2h4BYreHEAsLSsiquosiXeuduTN/go-libp2p-interface-connmgr"
cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid"
routing "gx/ipfs/QmZ383TySJVeZWzGnWui6pRcKyYZk9VkKTuW7tmKRWk5au/go-libp2p-routing"
mockrouting "gx/ipfs/QmbFRJeEmEU16y3BmKKaD4a9fm5oHsEAMHe2vSB1UnfLMi/go-ipfs-routing/mock"
logging "gx/ipfs/QmcVVHfdyv15GVPk7NrxdWjh2hLVccXnoD8j2tyQShiXJb/go-log"
testutil "gx/ipfs/QmcW4FGAt24fdK1jBgWQn3yP4R9ZLyWQqjozv9QK7epRhL/go-testutil"
peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer"
)
var log = logging.Logger("bstestnet")
func VirtualNetwork(rs mockrouting.Server, d delay.D) Network {
return &network{
clients: make(map[peer.ID]*receiverQueue),
delay: d,
routingserver: rs,
conns: make(map[string]struct{}),
}
}
type network struct {
mu sync.Mutex
clients map[peer.ID]*receiverQueue
routingserver mockrouting.Server
delay delay.D
conns map[string]struct{}
}
type message struct {
from peer.ID
msg bsmsg.BitSwapMessage
shouldSend time.Time
}
// receiverQueue queues up a set of messages to be sent, and sends them *in
// order* with their delays respected as much as sending them in order allows
// for
type receiverQueue struct {
receiver bsnet.Receiver
queue []*message
active bool
lk sync.Mutex
}
func (n *network) Adapter(p testutil.Identity) bsnet.BitSwapNetwork {
n.mu.Lock()
defer n.mu.Unlock()
client := &networkClient{
local: p.ID(),
network: n,
routing: n.routingserver.Client(p),
}
n.clients[p.ID()] = &receiverQueue{receiver: client}
return client
}
func (n *network) HasPeer(p peer.ID) bool {
n.mu.Lock()
defer n.mu.Unlock()
_, found := n.clients[p]
return found
}
// TODO should this be completely asynchronous?
// TODO what does the network layer do with errors received from services?
func (n *network) SendMessage(
ctx context.Context,
from peer.ID,
to peer.ID,
mes bsmsg.BitSwapMessage) error {
n.mu.Lock()
defer n.mu.Unlock()
receiver, ok := n.clients[to]
if !ok {
return errors.New("cannot locate peer on network")
}
// nb: terminate the context since the context wouldn't actually be passed
// over the network in a real scenario
msg := &message{
from: from,
msg: mes,
shouldSend: time.Now().Add(n.delay.Get()),
}
receiver.enqueue(msg)
return nil
}
func (n *network) deliver(
r bsnet.Receiver, from peer.ID, message bsmsg.BitSwapMessage) error {
if message == nil || from == "" {
return errors.New("invalid input")
}
n.delay.Wait()
r.ReceiveMessage(context.TODO(), from, message)
return nil
}
type networkClient struct {
local peer.ID
bsnet.Receiver
network *network
routing routing.IpfsRouting
}
func (nc *networkClient) SendMessage(
ctx context.Context,
to peer.ID,
message bsmsg.BitSwapMessage) error {
return nc.network.SendMessage(ctx, nc.local, to, message)
}
// FindProvidersAsync returns a channel of providers for the given key
func (nc *networkClient) FindProvidersAsync(ctx context.Context, k *cid.Cid, max int) <-chan peer.ID {
// NB: this function duplicates the PeerInfo -> ID transformation in the
// bitswap network adapter. Not to worry. This network client will be
// deprecated once the ipfsnet.Mock is added. The code below is only
// temporary.
out := make(chan peer.ID)
go func() {
defer close(out)
providers := nc.routing.FindProvidersAsync(ctx, k, max)
for info := range providers {
select {
case <-ctx.Done():
case out <- info.ID:
}
}
}()
return out
}
func (nc *networkClient) ConnectionManager() ifconnmgr.ConnManager {
return &ifconnmgr.NullConnMgr{}
}
type messagePasser struct {
net *network
target peer.ID
local peer.ID
ctx context.Context
}
func (mp *messagePasser) SendMsg(ctx context.Context, m bsmsg.BitSwapMessage) error {
return mp.net.SendMessage(ctx, mp.local, mp.target, m)
}
func (mp *messagePasser) Close() error {
return nil
}
func (mp *messagePasser) Reset() error {
return nil
}
func (n *networkClient) NewMessageSender(ctx context.Context, p peer.ID) (bsnet.MessageSender, error) {
return &messagePasser{
net: n.network,
target: p,
local: n.local,
ctx: ctx,
}, nil
}
// Provide provides the key to the network
func (nc *networkClient) Provide(ctx context.Context, k *cid.Cid) error {
return nc.routing.Provide(ctx, k, true)
}
func (nc *networkClient) SetDelegate(r bsnet.Receiver) {
nc.Receiver = r
}
func (nc *networkClient) ConnectTo(_ context.Context, p peer.ID) error {
nc.network.mu.Lock()
otherClient, ok := nc.network.clients[p]
if !ok {
nc.network.mu.Unlock()
return errors.New("no such peer in network")
}
tag := tagForPeers(nc.local, p)
if _, ok := nc.network.conns[tag]; ok {
nc.network.mu.Unlock()
log.Warning("ALREADY CONNECTED TO PEER (is this a reconnect? test lib needs fixing)")
return nil
}
nc.network.conns[tag] = struct{}{}
nc.network.mu.Unlock()
// TODO: add handling for disconnects
otherClient.receiver.PeerConnected(nc.local)
nc.Receiver.PeerConnected(p)
return nil
}
func (rq *receiverQueue) enqueue(m *message) {
rq.lk.Lock()
defer rq.lk.Unlock()
rq.queue = append(rq.queue, m)
if !rq.active {
rq.active = true
go rq.process()
}
}
func (rq *receiverQueue) process() {
for {
rq.lk.Lock()
if len(rq.queue) == 0 {
rq.active = false
rq.lk.Unlock()
return
}
m := rq.queue[0]
rq.queue = rq.queue[1:]
rq.lk.Unlock()
time.Sleep(time.Until(m.shouldSend))
rq.receiver.ReceiveMessage(context.TODO(), m.from, m.msg)
}
}
func tagForPeers(a, b peer.ID) string {
if a < b {
return string(a + b)
}
return string(b + a)
}

View File

@ -1,110 +0,0 @@
package bitswap
import (
"context"
"time"
tn "github.com/ipfs/go-ipfs/exchange/bitswap/testnet"
delay "gx/ipfs/QmRJVNatYJwTAHgdSM1Xef9QVQ1Ch3XHdmcrykjP5Y4soL/go-ipfs-delay"
blockstore "gx/ipfs/QmadMhXJLHMFjpRmh85XjpmVDkEtQpNYEZNRpWRvYVLrvb/go-ipfs-blockstore"
testutil "gx/ipfs/QmcW4FGAt24fdK1jBgWQn3yP4R9ZLyWQqjozv9QK7epRhL/go-testutil"
p2ptestutil "gx/ipfs/QmcxUtMB5sJrXR3znSvkrDd2ghvwGM8rLRqwJiPUdgQwat/go-libp2p-netutil"
peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer"
ds "gx/ipfs/QmeiCcJfDW1GJnWUArudsv5rQsihpi4oyddPhdqo3CfX6i/go-datastore"
delayed "gx/ipfs/QmeiCcJfDW1GJnWUArudsv5rQsihpi4oyddPhdqo3CfX6i/go-datastore/delayed"
ds_sync "gx/ipfs/QmeiCcJfDW1GJnWUArudsv5rQsihpi4oyddPhdqo3CfX6i/go-datastore/sync"
)
// WARNING: this uses RandTestBogusIdentity DO NOT USE for NON TESTS!
func NewTestSessionGenerator(
net tn.Network) SessionGenerator {
ctx, cancel := context.WithCancel(context.Background())
return SessionGenerator{
net: net,
seq: 0,
ctx: ctx, // TODO take ctx as param to Next, Instances
cancel: cancel,
}
}
// TODO move this SessionGenerator to the core package and export it as the core generator
type SessionGenerator struct {
seq int
net tn.Network
ctx context.Context
cancel context.CancelFunc
}
func (g *SessionGenerator) Close() error {
g.cancel()
return nil // for Closer interface
}
func (g *SessionGenerator) Next() Instance {
g.seq++
p, err := p2ptestutil.RandTestBogusIdentity()
if err != nil {
panic("FIXME") // TODO change signature
}
return MkSession(g.ctx, g.net, p)
}
func (g *SessionGenerator) Instances(n int) []Instance {
var instances []Instance
for j := 0; j < n; j++ {
inst := g.Next()
instances = append(instances, inst)
}
for i, inst := range instances {
for j := i + 1; j < len(instances); j++ {
oinst := instances[j]
inst.Exchange.network.ConnectTo(context.Background(), oinst.Peer)
}
}
return instances
}
type Instance struct {
Peer peer.ID
Exchange *Bitswap
blockstore blockstore.Blockstore
blockstoreDelay delay.D
}
func (i *Instance) Blockstore() blockstore.Blockstore {
return i.blockstore
}
func (i *Instance) SetBlockstoreLatency(t time.Duration) time.Duration {
return i.blockstoreDelay.Set(t)
}
// session creates a test bitswap session.
//
// NB: It's easy make mistakes by providing the same peer ID to two different
// sessions. To safeguard, use the SessionGenerator to generate sessions. It's
// just a much better idea.
func MkSession(ctx context.Context, net tn.Network, p testutil.Identity) Instance {
bsdelay := delay.Fixed(0)
adapter := net.Adapter(p)
dstore := ds_sync.MutexWrap(delayed.New(ds.NewMapDatastore(), bsdelay))
bstore, err := blockstore.CachedBlockstore(ctx,
blockstore.NewBlockstore(ds_sync.MutexWrap(dstore)),
blockstore.DefaultCacheOpts())
if err != nil {
panic(err.Error()) // FIXME perhaps change signature and return error.
}
bs := New(ctx, adapter, bstore).(*Bitswap)
return Instance{
Peer: p.ID(),
Exchange: bs,
blockstore: bstore,
blockstoreDelay: bsdelay,
}
}

View File

@ -1,203 +0,0 @@
// package wantlist implements an object for bitswap that contains the keys
// that a given peer wants.
package wantlist
import (
"sort"
"sync"
cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid"
)
type ThreadSafe struct {
lk sync.RWMutex
set map[string]*Entry
}
// not threadsafe
type Wantlist struct {
set map[string]*Entry
}
type Entry struct {
Cid *cid.Cid
Priority int
SesTrk map[uint64]struct{}
}
// NewRefEntry creates a new reference tracked wantlist entry
func NewRefEntry(c *cid.Cid, p int) *Entry {
return &Entry{
Cid: c,
Priority: p,
SesTrk: make(map[uint64]struct{}),
}
}
type entrySlice []*Entry
func (es entrySlice) Len() int { return len(es) }
func (es entrySlice) Swap(i, j int) { es[i], es[j] = es[j], es[i] }
func (es entrySlice) Less(i, j int) bool { return es[i].Priority > es[j].Priority }
func NewThreadSafe() *ThreadSafe {
return &ThreadSafe{
set: make(map[string]*Entry),
}
}
func New() *Wantlist {
return &Wantlist{
set: make(map[string]*Entry),
}
}
// Add adds the given cid to the wantlist with the specified priority, governed
// by the session ID 'ses'. if a cid is added under multiple session IDs, then
// it must be removed by each of those sessions before it is no longer 'in the
// wantlist'. Calls to Add are idempotent given the same arguments. Subsequent
// calls with different values for priority will not update the priority
// TODO: think through priority changes here
// Add returns true if the cid did not exist in the wantlist before this call
// (even if it was under a different session)
func (w *ThreadSafe) Add(c *cid.Cid, priority int, ses uint64) bool {
w.lk.Lock()
defer w.lk.Unlock()
k := c.KeyString()
if e, ok := w.set[k]; ok {
e.SesTrk[ses] = struct{}{}
return false
}
w.set[k] = &Entry{
Cid: c,
Priority: priority,
SesTrk: map[uint64]struct{}{ses: struct{}{}},
}
return true
}
// AddEntry adds given Entry to the wantlist. For more information see Add method.
func (w *ThreadSafe) AddEntry(e *Entry, ses uint64) bool {
w.lk.Lock()
defer w.lk.Unlock()
k := e.Cid.KeyString()
if ex, ok := w.set[k]; ok {
ex.SesTrk[ses] = struct{}{}
return false
}
w.set[k] = e
e.SesTrk[ses] = struct{}{}
return true
}
// Remove removes the given cid from being tracked by the given session.
// 'true' is returned if this call to Remove removed the final session ID
// tracking the cid. (meaning true will be returned iff this call caused the
// value of 'Contains(c)' to change from true to false)
func (w *ThreadSafe) Remove(c *cid.Cid, ses uint64) bool {
w.lk.Lock()
defer w.lk.Unlock()
k := c.KeyString()
e, ok := w.set[k]
if !ok {
return false
}
delete(e.SesTrk, ses)
if len(e.SesTrk) == 0 {
delete(w.set, k)
return true
}
return false
}
// Contains returns true if the given cid is in the wantlist tracked by one or
// more sessions
func (w *ThreadSafe) Contains(k *cid.Cid) (*Entry, bool) {
w.lk.RLock()
defer w.lk.RUnlock()
e, ok := w.set[k.KeyString()]
return e, ok
}
func (w *ThreadSafe) Entries() []*Entry {
w.lk.RLock()
defer w.lk.RUnlock()
es := make([]*Entry, 0, len(w.set))
for _, e := range w.set {
es = append(es, e)
}
return es
}
func (w *ThreadSafe) SortedEntries() []*Entry {
es := w.Entries()
sort.Sort(entrySlice(es))
return es
}
func (w *ThreadSafe) Len() int {
w.lk.RLock()
defer w.lk.RUnlock()
return len(w.set)
}
func (w *Wantlist) Len() int {
return len(w.set)
}
func (w *Wantlist) Add(c *cid.Cid, priority int) bool {
k := c.KeyString()
if _, ok := w.set[k]; ok {
return false
}
w.set[k] = &Entry{
Cid: c,
Priority: priority,
}
return true
}
func (w *Wantlist) AddEntry(e *Entry) bool {
k := e.Cid.KeyString()
if _, ok := w.set[k]; ok {
return false
}
w.set[k] = e
return true
}
func (w *Wantlist) Remove(c *cid.Cid) bool {
k := c.KeyString()
_, ok := w.set[k]
if !ok {
return false
}
delete(w.set, k)
return true
}
func (w *Wantlist) Contains(k *cid.Cid) (*Entry, bool) {
e, ok := w.set[k.KeyString()]
return e, ok
}
func (w *Wantlist) Entries() []*Entry {
es := make([]*Entry, 0, len(w.set))
for _, e := range w.set {
es = append(es, e)
}
return es
}
func (w *Wantlist) SortedEntries() []*Entry {
es := w.Entries()
sort.Sort(entrySlice(es))
return es
}

View File

@ -1,104 +0,0 @@
package wantlist
import (
"testing"
cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid"
)
var testcids []*cid.Cid
func init() {
strs := []string{
"QmQL8LqkEgYXaDHdNYCG2mmpow7Sp8Z8Kt3QS688vyBeC7",
"QmcBDsdjgSXU7BP4A4V8LJCXENE5xVwnhrhRGVTJr9YCVj",
"QmQakgd2wDxc3uUF4orGdEm28zUT9Mmimp5pyPG2SFS9Gj",
}
for _, s := range strs {
c, err := cid.Decode(s)
if err != nil {
panic(err)
}
testcids = append(testcids, c)
}
}
type wli interface {
Contains(*cid.Cid) (*Entry, bool)
}
func assertHasCid(t *testing.T, w wli, c *cid.Cid) {
e, ok := w.Contains(c)
if !ok {
t.Fatal("expected to have ", c)
}
if !e.Cid.Equals(c) {
t.Fatal("returned entry had wrong cid value")
}
}
func assertNotHasCid(t *testing.T, w wli, c *cid.Cid) {
_, ok := w.Contains(c)
if ok {
t.Fatal("expected not to have ", c)
}
}
func TestBasicWantlist(t *testing.T) {
wl := New()
if !wl.Add(testcids[0], 5) {
t.Fatal("expected true")
}
assertHasCid(t, wl, testcids[0])
if !wl.Add(testcids[1], 4) {
t.Fatal("expected true")
}
assertHasCid(t, wl, testcids[0])
assertHasCid(t, wl, testcids[1])
if wl.Len() != 2 {
t.Fatal("should have had two items")
}
if wl.Add(testcids[1], 4) {
t.Fatal("add shouldnt report success on second add")
}
assertHasCid(t, wl, testcids[0])
assertHasCid(t, wl, testcids[1])
if wl.Len() != 2 {
t.Fatal("should have had two items")
}
if !wl.Remove(testcids[0]) {
t.Fatal("should have gotten true")
}
assertHasCid(t, wl, testcids[1])
if _, has := wl.Contains(testcids[0]); has {
t.Fatal("shouldnt have this cid")
}
}
func TestSesRefWantlist(t *testing.T) {
wl := NewThreadSafe()
if !wl.Add(testcids[0], 5, 1) {
t.Fatal("should have added")
}
assertHasCid(t, wl, testcids[0])
if wl.Remove(testcids[0], 2) {
t.Fatal("shouldnt have removed")
}
assertHasCid(t, wl, testcids[0])
if wl.Add(testcids[0], 5, 1) {
t.Fatal("shouldnt have added")
}
assertHasCid(t, wl, testcids[0])
if !wl.Remove(testcids[0], 1) {
t.Fatal("should have removed")
}
assertNotHasCid(t, wl, testcids[0])
}

View File

@ -1,400 +0,0 @@
package bitswap
import (
"context"
"sync"
"time"
engine "github.com/ipfs/go-ipfs/exchange/bitswap/decision"
bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message"
bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network"
wantlist "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist"
cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid"
peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer"
metrics "gx/ipfs/QmekzFM3hPZjTjUFGTABdQkEnQ3PTiMstY198PwSFr5w1Q/go-metrics-interface"
)
type WantManager struct {
// sync channels for Run loop
incoming chan *wantSet
connectEvent chan peerStatus // notification channel for peers connecting/disconnecting
peerReqs chan chan []peer.ID // channel to request connected peers on
// synchronized by Run loop, only touch inside there
peers map[peer.ID]*msgQueue
wl *wantlist.ThreadSafe
bcwl *wantlist.ThreadSafe
network bsnet.BitSwapNetwork
ctx context.Context
cancel func()
wantlistGauge metrics.Gauge
sentHistogram metrics.Histogram
}
type peerStatus struct {
connect bool
peer peer.ID
}
func NewWantManager(ctx context.Context, network bsnet.BitSwapNetwork) *WantManager {
ctx, cancel := context.WithCancel(ctx)
wantlistGauge := metrics.NewCtx(ctx, "wantlist_total",
"Number of items in wantlist.").Gauge()
sentHistogram := metrics.NewCtx(ctx, "sent_all_blocks_bytes", "Histogram of blocks sent by"+
" this bitswap").Histogram(metricsBuckets)
return &WantManager{
incoming: make(chan *wantSet, 10),
connectEvent: make(chan peerStatus, 10),
peerReqs: make(chan chan []peer.ID),
peers: make(map[peer.ID]*msgQueue),
wl: wantlist.NewThreadSafe(),
bcwl: wantlist.NewThreadSafe(),
network: network,
ctx: ctx,
cancel: cancel,
wantlistGauge: wantlistGauge,
sentHistogram: sentHistogram,
}
}
type msgQueue struct {
p peer.ID
outlk sync.Mutex
out bsmsg.BitSwapMessage
network bsnet.BitSwapNetwork
wl *wantlist.ThreadSafe
sender bsnet.MessageSender
refcnt int
work chan struct{}
done chan struct{}
}
// WantBlocks adds the given cids to the wantlist, tracked by the given session
func (pm *WantManager) WantBlocks(ctx context.Context, ks []*cid.Cid, peers []peer.ID, ses uint64) {
log.Infof("want blocks: %s", ks)
pm.addEntries(ctx, ks, peers, false, ses)
}
// CancelWants removes the given cids from the wantlist, tracked by the given session
func (pm *WantManager) CancelWants(ctx context.Context, ks []*cid.Cid, peers []peer.ID, ses uint64) {
pm.addEntries(context.Background(), ks, peers, true, ses)
}
type wantSet struct {
entries []*bsmsg.Entry
targets []peer.ID
from uint64
}
func (pm *WantManager) addEntries(ctx context.Context, ks []*cid.Cid, targets []peer.ID, cancel bool, ses uint64) {
entries := make([]*bsmsg.Entry, 0, len(ks))
for i, k := range ks {
entries = append(entries, &bsmsg.Entry{
Cancel: cancel,
Entry: wantlist.NewRefEntry(k, kMaxPriority-i),
})
}
select {
case pm.incoming <- &wantSet{entries: entries, targets: targets, from: ses}:
case <-pm.ctx.Done():
case <-ctx.Done():
}
}
func (pm *WantManager) ConnectedPeers() []peer.ID {
resp := make(chan []peer.ID)
pm.peerReqs <- resp
return <-resp
}
func (pm *WantManager) SendBlock(ctx context.Context, env *engine.Envelope) {
// Blocks need to be sent synchronously to maintain proper backpressure
// throughout the network stack
defer env.Sent()
pm.sentHistogram.Observe(float64(len(env.Block.RawData())))
msg := bsmsg.New(false)
msg.AddBlock(env.Block)
log.Infof("Sending block %s to %s", env.Block, env.Peer)
err := pm.network.SendMessage(ctx, env.Peer, msg)
if err != nil {
log.Infof("sendblock error: %s", err)
}
}
func (pm *WantManager) startPeerHandler(p peer.ID) *msgQueue {
mq, ok := pm.peers[p]
if ok {
mq.refcnt++
return nil
}
mq = pm.newMsgQueue(p)
// new peer, we will want to give them our full wantlist
fullwantlist := bsmsg.New(true)
for _, e := range pm.bcwl.Entries() {
for k := range e.SesTrk {
mq.wl.AddEntry(e, k)
}
fullwantlist.AddEntry(e.Cid, e.Priority)
}
mq.out = fullwantlist
mq.work <- struct{}{}
pm.peers[p] = mq
go mq.runQueue(pm.ctx)
return mq
}
func (pm *WantManager) stopPeerHandler(p peer.ID) {
pq, ok := pm.peers[p]
if !ok {
// TODO: log error?
return
}
pq.refcnt--
if pq.refcnt > 0 {
return
}
close(pq.done)
delete(pm.peers, p)
}
func (mq *msgQueue) runQueue(ctx context.Context) {
for {
select {
case <-mq.work: // there is work to be done
mq.doWork(ctx)
case <-mq.done:
if mq.sender != nil {
mq.sender.Close()
}
return
case <-ctx.Done():
if mq.sender != nil {
mq.sender.Reset()
}
return
}
}
}
func (mq *msgQueue) doWork(ctx context.Context) {
// grab outgoing message
mq.outlk.Lock()
wlm := mq.out
if wlm == nil || wlm.Empty() {
mq.outlk.Unlock()
return
}
mq.out = nil
mq.outlk.Unlock()
// NB: only open a stream if we actually have data to send
if mq.sender == nil {
err := mq.openSender(ctx)
if err != nil {
log.Infof("cant open message sender to peer %s: %s", mq.p, err)
// TODO: cant connect, what now?
return
}
}
// send wantlist updates
for { // try to send this message until we fail.
err := mq.sender.SendMsg(ctx, wlm)
if err == nil {
return
}
log.Infof("bitswap send error: %s", err)
mq.sender.Reset()
mq.sender = nil
select {
case <-mq.done:
return
case <-ctx.Done():
return
case <-time.After(time.Millisecond * 100):
// wait 100ms in case disconnect notifications are still propogating
log.Warning("SendMsg errored but neither 'done' nor context.Done() were set")
}
err = mq.openSender(ctx)
if err != nil {
log.Infof("couldnt open sender again after SendMsg(%s) failed: %s", mq.p, err)
// TODO(why): what do we do now?
// I think the *right* answer is to probably put the message we're
// trying to send back, and then return to waiting for new work or
// a disconnect.
return
}
// TODO: Is this the same instance for the remote peer?
// If its not, we should resend our entire wantlist to them
/*
if mq.sender.InstanceID() != mq.lastSeenInstanceID {
wlm = mq.getFullWantlistMessage()
}
*/
}
}
func (mq *msgQueue) openSender(ctx context.Context) error {
// allow ten minutes for connections this includes looking them up in the
// dht dialing them, and handshaking
conctx, cancel := context.WithTimeout(ctx, time.Minute*10)
defer cancel()
err := mq.network.ConnectTo(conctx, mq.p)
if err != nil {
return err
}
nsender, err := mq.network.NewMessageSender(ctx, mq.p)
if err != nil {
return err
}
mq.sender = nsender
return nil
}
func (pm *WantManager) Connected(p peer.ID) {
select {
case pm.connectEvent <- peerStatus{peer: p, connect: true}:
case <-pm.ctx.Done():
}
}
func (pm *WantManager) Disconnected(p peer.ID) {
select {
case pm.connectEvent <- peerStatus{peer: p, connect: false}:
case <-pm.ctx.Done():
}
}
// TODO: use goprocess here once i trust it
func (pm *WantManager) Run() {
// NOTE: Do not open any streams or connections from anywhere in this
// event loop. Really, just don't do anything likely to block.
for {
select {
case ws := <-pm.incoming:
// is this a broadcast or not?
brdc := len(ws.targets) == 0
// add changes to our wantlist
for _, e := range ws.entries {
if e.Cancel {
if brdc {
pm.bcwl.Remove(e.Cid, ws.from)
}
if pm.wl.Remove(e.Cid, ws.from) {
pm.wantlistGauge.Dec()
}
} else {
if brdc {
pm.bcwl.AddEntry(e.Entry, ws.from)
}
if pm.wl.AddEntry(e.Entry, ws.from) {
pm.wantlistGauge.Inc()
}
}
}
// broadcast those wantlist changes
if len(ws.targets) == 0 {
for _, p := range pm.peers {
p.addMessage(ws.entries, ws.from)
}
} else {
for _, t := range ws.targets {
p, ok := pm.peers[t]
if !ok {
log.Infof("tried sending wantlist change to non-partner peer: %s", t)
continue
}
p.addMessage(ws.entries, ws.from)
}
}
case p := <-pm.connectEvent:
if p.connect {
pm.startPeerHandler(p.peer)
} else {
pm.stopPeerHandler(p.peer)
}
case req := <-pm.peerReqs:
peers := make([]peer.ID, 0, len(pm.peers))
for p := range pm.peers {
peers = append(peers, p)
}
req <- peers
case <-pm.ctx.Done():
return
}
}
}
func (wm *WantManager) newMsgQueue(p peer.ID) *msgQueue {
return &msgQueue{
done: make(chan struct{}),
work: make(chan struct{}, 1),
wl: wantlist.NewThreadSafe(),
network: wm.network,
p: p,
refcnt: 1,
}
}
func (mq *msgQueue) addMessage(entries []*bsmsg.Entry, ses uint64) {
var work bool
mq.outlk.Lock()
defer func() {
mq.outlk.Unlock()
if !work {
return
}
select {
case mq.work <- struct{}{}:
default:
}
}()
// if we have no message held allocate a new one
if mq.out == nil {
mq.out = bsmsg.New(false)
}
// TODO: add a msg.Combine(...) method
// otherwise, combine the one we are holding with the
// one passed in
for _, e := range entries {
if e.Cancel {
if mq.wl.Remove(e.Cid, ses) {
work = true
mq.out.Cancel(e.Cid)
}
} else {
if mq.wl.Add(e.Cid, e.Priority, ses) {
work = true
mq.out.AddEntry(e.Cid, e.Priority)
}
}
}
}

View File

@ -1,253 +0,0 @@
package bitswap
import (
"context"
"math/rand"
"sync"
"time"
bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message"
process "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess"
procctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context"
cid "gx/ipfs/QmYVNvtQkeZ6AKSwDrjQTs432QtL6umrrK41EBq3cu7iSP/go-cid"
logging "gx/ipfs/QmcVVHfdyv15GVPk7NrxdWjh2hLVccXnoD8j2tyQShiXJb/go-log"
peer "gx/ipfs/QmdVrMn1LhB4ybb8hMVaMLXnA8XRSewMnK6YqXKXoTcRvN/go-libp2p-peer"
)
var TaskWorkerCount = 8
func (bs *Bitswap) startWorkers(px process.Process, ctx context.Context) {
// Start up a worker to handle block requests this node is making
px.Go(func(px process.Process) {
bs.providerQueryManager(ctx)
})
// Start up workers to handle requests from other nodes for the data on this node
for i := 0; i < TaskWorkerCount; i++ {
i := i
px.Go(func(px process.Process) {
bs.taskWorker(ctx, i)
})
}
// Start up a worker to manage periodically resending our wantlist out to peers
px.Go(func(px process.Process) {
bs.rebroadcastWorker(ctx)
})
// Start up a worker to manage sending out provides messages
px.Go(func(px process.Process) {
bs.provideCollector(ctx)
})
// Spawn up multiple workers to handle incoming blocks
// consider increasing number if providing blocks bottlenecks
// file transfers
px.Go(bs.provideWorker)
}
func (bs *Bitswap) taskWorker(ctx context.Context, id int) {
idmap := logging.LoggableMap{"ID": id}
defer log.Debug("bitswap task worker shutting down...")
for {
log.Event(ctx, "Bitswap.TaskWorker.Loop", idmap)
select {
case nextEnvelope := <-bs.engine.Outbox():
select {
case envelope, ok := <-nextEnvelope:
if !ok {
continue
}
log.Event(ctx, "Bitswap.TaskWorker.Work", logging.LoggableF(func() map[string]interface{} {
return logging.LoggableMap{
"ID": id,
"Target": envelope.Peer.Pretty(),
"Block": envelope.Block.Cid().String(),
}
}))
// update the BS ledger to reflect sent message
// TODO: Should only track *useful* messages in ledger
outgoing := bsmsg.New(false)
outgoing.AddBlock(envelope.Block)
bs.engine.MessageSent(envelope.Peer, outgoing)
bs.wm.SendBlock(ctx, envelope)
bs.counterLk.Lock()
bs.counters.blocksSent++
bs.counters.dataSent += uint64(len(envelope.Block.RawData()))
bs.counterLk.Unlock()
case <-ctx.Done():
return
}
case <-ctx.Done():
return
}
}
}
func (bs *Bitswap) provideWorker(px process.Process) {
limit := make(chan struct{}, provideWorkerMax)
limitedGoProvide := func(k *cid.Cid, wid int) {
defer func() {
// replace token when done
<-limit
}()
ev := logging.LoggableMap{"ID": wid}
ctx := procctx.OnClosingContext(px) // derive ctx from px
defer log.EventBegin(ctx, "Bitswap.ProvideWorker.Work", ev, k).Done()
ctx, cancel := context.WithTimeout(ctx, provideTimeout) // timeout ctx
defer cancel()
if err := bs.network.Provide(ctx, k); err != nil {
log.Warning(err)
}
}
// worker spawner, reads from bs.provideKeys until it closes, spawning a
// _ratelimited_ number of workers to handle each key.
for wid := 2; ; wid++ {
ev := logging.LoggableMap{"ID": 1}
log.Event(procctx.OnClosingContext(px), "Bitswap.ProvideWorker.Loop", ev)
select {
case <-px.Closing():
return
case k, ok := <-bs.provideKeys:
if !ok {
log.Debug("provideKeys channel closed")
return
}
select {
case <-px.Closing():
return
case limit <- struct{}{}:
go limitedGoProvide(k, wid)
}
}
}
}
func (bs *Bitswap) provideCollector(ctx context.Context) {
defer close(bs.provideKeys)
var toProvide []*cid.Cid
var nextKey *cid.Cid
var keysOut chan *cid.Cid
for {
select {
case blkey, ok := <-bs.newBlocks:
if !ok {
log.Debug("newBlocks channel closed")
return
}
if keysOut == nil {
nextKey = blkey
keysOut = bs.provideKeys
} else {
toProvide = append(toProvide, blkey)
}
case keysOut <- nextKey:
if len(toProvide) > 0 {
nextKey = toProvide[0]
toProvide = toProvide[1:]
} else {
keysOut = nil
}
case <-ctx.Done():
return
}
}
}
func (bs *Bitswap) rebroadcastWorker(parent context.Context) {
ctx, cancel := context.WithCancel(parent)
defer cancel()
broadcastSignal := time.NewTicker(rebroadcastDelay.Get())
defer broadcastSignal.Stop()
tick := time.NewTicker(10 * time.Second)
defer tick.Stop()
for {
log.Event(ctx, "Bitswap.Rebroadcast.idle")
select {
case <-tick.C:
n := bs.wm.wl.Len()
if n > 0 {
log.Debug(n, " keys in bitswap wantlist")
}
case <-broadcastSignal.C: // resend unfulfilled wantlist keys
log.Event(ctx, "Bitswap.Rebroadcast.active")
entries := bs.wm.wl.Entries()
if len(entries) == 0 {
continue
}
// TODO: come up with a better strategy for determining when to search
// for new providers for blocks.
i := rand.Intn(len(entries))
bs.findKeys <- &blockRequest{
Cid: entries[i].Cid,
Ctx: ctx,
}
case <-parent.Done():
return
}
}
}
func (bs *Bitswap) providerQueryManager(ctx context.Context) {
var activeLk sync.Mutex
kset := cid.NewSet()
for {
select {
case e := <-bs.findKeys:
select { // make sure its not already cancelled
case <-e.Ctx.Done():
continue
default:
}
activeLk.Lock()
if kset.Has(e.Cid) {
activeLk.Unlock()
continue
}
kset.Add(e.Cid)
activeLk.Unlock()
go func(e *blockRequest) {
child, cancel := context.WithTimeout(e.Ctx, providerRequestTimeout)
defer cancel()
providers := bs.network.FindProvidersAsync(child, e.Cid, maxProvidersPerRequest)
wg := &sync.WaitGroup{}
for p := range providers {
wg.Add(1)
go func(p peer.ID) {
defer wg.Done()
err := bs.network.ConnectTo(child, p)
if err != nil {
log.Debug("failed to connect to provider %s: %s", p, err)
}
}(p)
}
wg.Wait()
activeLk.Lock()
kset.Remove(e.Cid)
activeLk.Unlock()
}(e)
case <-ctx.Done():
return
}
}
}

View File

@ -545,6 +545,12 @@
"hash": "Qmdue1XShFNi3mpizGx9NR9hyNEj6U2wEW93yGhKqKCFGN",
"name": "go-ipns",
"version": "0.1.4"
},
{
"author": "why",
"hash": "QmSLYFS88MpPsszqWdhGSxvHyoTnmaU4A74SD6KGib6Z3m",
"name": "go-bitswap",
"version": "1.0.0"
}
],
"gxVersion": "0.10.0",