1
0
mirror of https://github.com/ipfs/kubo.git synced 2025-06-29 01:12:24 +08:00

Remove chunk channels

License: MIT
Signed-off-by: rht <rhtbot@gmail.com>
This commit is contained in:
rht
2015-11-15 18:50:05 +07:00
committed by Jeromy
parent a961b1f7ac
commit b4a3854151
5 changed files with 23 additions and 53 deletions

View File

@ -22,15 +22,12 @@ import (
// TODO: extract these tests and more as a generic layout test suite // TODO: extract these tests and more as a generic layout test suite
func buildTestDag(ds dag.DAGService, spl chunk.Splitter) (*dag.Node, error) { func buildTestDag(ds dag.DAGService, spl chunk.Splitter) (*dag.Node, error) {
// Start the splitter
blkch, errs := chunk.Chan(spl)
dbp := h.DagBuilderParams{ dbp := h.DagBuilderParams{
Dagserv: ds, Dagserv: ds,
Maxlinks: h.DefaultLinksPerBlock, Maxlinks: h.DefaultLinksPerBlock,
} }
return BalancedLayout(dbp.New(blkch, errs)) return BalancedLayout(dbp.New(spl))
} }
func getTestDag(t *testing.T, ds dag.DAGService, size int64, blksize int64) (*dag.Node, []byte) { func getTestDag(t *testing.T, ds dag.DAGService, size int64, blksize int64) (*dag.Node, []byte) {

View File

@ -1,6 +1,7 @@
package helpers package helpers
import ( import (
"github.com/ipfs/go-ipfs/importer/chunk"
dag "github.com/ipfs/go-ipfs/merkledag" dag "github.com/ipfs/go-ipfs/merkledag"
) )
@ -8,8 +9,7 @@ import (
// efficiently create unixfs dag trees // efficiently create unixfs dag trees
type DagBuilderHelper struct { type DagBuilderHelper struct {
dserv dag.DAGService dserv dag.DAGService
in <-chan []byte spl chunk.Splitter
errs <-chan error
recvdErr error recvdErr error
nextData []byte // the next item to return. nextData []byte // the next item to return.
maxlinks int maxlinks int
@ -24,45 +24,35 @@ type DagBuilderParams struct {
Dagserv dag.DAGService Dagserv dag.DAGService
} }
// Generate a new DagBuilderHelper from the given params, using 'in' as a // Generate a new DagBuilderHelper from the given params, which data source comes
// data source // from chunks object
func (dbp *DagBuilderParams) New(in <-chan []byte, errs <-chan error) *DagBuilderHelper { func (dbp *DagBuilderParams) New(spl chunk.Splitter) *DagBuilderHelper {
return &DagBuilderHelper{ return &DagBuilderHelper{
dserv: dbp.Dagserv, dserv: dbp.Dagserv,
in: in, spl: spl,
errs: errs,
maxlinks: dbp.Maxlinks, maxlinks: dbp.Maxlinks,
batch: dbp.Dagserv.Batch(), batch: dbp.Dagserv.Batch(),
} }
} }
// prepareNext consumes the next item from the channel and puts it // prepareNext consumes the next item from the splitter and puts it
// in the nextData field. it is idempotent-- if nextData is full // in the nextData field. it is idempotent-- if nextData is full
// it will do nothing. // it will do nothing.
//
// i realized that building the dag becomes _a lot_ easier if we can
// "peek" the "are done yet?" (i.e. not consume it from the channel)
func (db *DagBuilderHelper) prepareNext() { func (db *DagBuilderHelper) prepareNext() {
if db.in == nil { // if we already have data waiting to be consumed, we're ready
// if our input is nil, there is "nothing to do". we're done.
// as if there was no data at all. (a sort of zero-value)
return
}
// if we already have data waiting to be consumed, we're ready.
if db.nextData != nil { if db.nextData != nil {
return return
} }
// if it's closed, nextData will be correctly set to nil, signaling // TODO: handle err (which wasn't handled either when the splitter was channeled)
// that we're done consuming from the channel. db.nextData, _ = db.spl.NextBytes()
db.nextData = <-db.in
} }
// Done returns whether or not we're done consuming the incoming data. // Done returns whether or not we're done consuming the incoming data.
func (db *DagBuilderHelper) Done() bool { func (db *DagBuilderHelper) Done() bool {
// ensure we have an accurate perspective on data // ensure we have an accurate perspective on data
// as `done` this may be called before `next`. // as `done` this may be called before `next`.
//db.prepareNext() // idempotent
db.prepareNext() // idempotent db.prepareNext() // idempotent
return db.nextData == nil return db.nextData == nil
} }

View File

@ -39,25 +39,19 @@ func BuildDagFromFile(fpath string, ds dag.DAGService) (*dag.Node, error) {
} }
func BuildDagFromReader(ds dag.DAGService, spl chunk.Splitter) (*dag.Node, error) { func BuildDagFromReader(ds dag.DAGService, spl chunk.Splitter) (*dag.Node, error) {
// Start the splitter
blkch, errch := chunk.Chan(spl)
dbp := h.DagBuilderParams{ dbp := h.DagBuilderParams{
Dagserv: ds, Dagserv: ds,
Maxlinks: h.DefaultLinksPerBlock, Maxlinks: h.DefaultLinksPerBlock,
} }
return bal.BalancedLayout(dbp.New(blkch, errch)) return bal.BalancedLayout(dbp.New(spl))
} }
func BuildTrickleDagFromReader(ds dag.DAGService, spl chunk.Splitter) (*dag.Node, error) { func BuildTrickleDagFromReader(ds dag.DAGService, spl chunk.Splitter) (*dag.Node, error) {
// Start the splitter
blkch, errch := chunk.Chan(spl)
dbp := h.DagBuilderParams{ dbp := h.DagBuilderParams{
Dagserv: ds, Dagserv: ds,
Maxlinks: h.DefaultLinksPerBlock, Maxlinks: h.DefaultLinksPerBlock,
} }
return trickle.TrickleLayout(dbp.New(blkch, errch)) return trickle.TrickleLayout(dbp.New(spl))
} }

View File

@ -21,15 +21,12 @@ import (
) )
func buildTestDag(ds merkledag.DAGService, spl chunk.Splitter) (*merkledag.Node, error) { func buildTestDag(ds merkledag.DAGService, spl chunk.Splitter) (*merkledag.Node, error) {
// Start the splitter
blkch, errs := chunk.Chan(spl)
dbp := h.DagBuilderParams{ dbp := h.DagBuilderParams{
Dagserv: ds, Dagserv: ds,
Maxlinks: h.DefaultLinksPerBlock, Maxlinks: h.DefaultLinksPerBlock,
} }
nd, err := TrickleLayout(dbp.New(blkch, errs)) nd, err := TrickleLayout(dbp.New(spl))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -441,10 +438,9 @@ func TestAppend(t *testing.T) {
} }
r := bytes.NewReader(should[nbytes/2:]) r := bytes.NewReader(should[nbytes/2:])
blks, errs := chunk.Chan(chunk.NewSizeSplitter(r, 500))
ctx := context.Background() ctx := context.Background()
nnode, err := TrickleAppend(ctx, nd, dbp.New(blks, errs)) nnode, err := TrickleAppend(ctx, nd, dbp.New(chunk.NewSizeSplitter(r, 500)))
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -494,9 +490,8 @@ func TestMultipleAppends(t *testing.T) {
ctx := context.Background() ctx := context.Background()
for i := 0; i < len(should); i++ { for i := 0; i < len(should); i++ {
blks, errs := chunk.Chan(spl(bytes.NewReader(should[i : i+1])))
nnode, err := TrickleAppend(ctx, nd, dbp.New(blks, errs)) nnode, err := TrickleAppend(ctx, nd, dbp.New(spl(bytes.NewReader(should[i:i+1]))))
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -538,17 +533,13 @@ func TestAppendSingleBytesToEmpty(t *testing.T) {
spl := chunk.SizeSplitterGen(500) spl := chunk.SizeSplitterGen(500)
blks, errs := chunk.Chan(spl(bytes.NewReader(data[:1])))
ctx := context.Background() ctx := context.Background()
nnode, err := TrickleAppend(ctx, nd, dbp.New(blks, errs)) nnode, err := TrickleAppend(ctx, nd, dbp.New(spl(bytes.NewReader(data[:1]))))
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
blks, errs = chunk.Chan(spl(bytes.NewReader(data[1:]))) nnode, err = TrickleAppend(ctx, nnode, dbp.New(spl(bytes.NewReader(data[1:]))))
nnode, err = TrickleAppend(ctx, nnode, dbp.New(blks, errs))
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -103,8 +103,7 @@ func (zr zeroReader) Read(b []byte) (int, error) {
func (dm *DagModifier) expandSparse(size int64) error { func (dm *DagModifier) expandSparse(size int64) error {
r := io.LimitReader(zeroReader{}, size) r := io.LimitReader(zeroReader{}, size)
spl := chunk.NewSizeSplitter(r, 4096) spl := chunk.NewSizeSplitter(r, 4096)
blks, errs := chunk.Chan(spl) nnode, err := dm.appendData(dm.curNode, spl)
nnode, err := dm.appendData(dm.curNode, blks, errs)
if err != nil { if err != nil {
return err return err
} }
@ -191,8 +190,7 @@ func (dm *DagModifier) Sync() error {
// need to write past end of current dag // need to write past end of current dag
if !done { if !done {
blks, errs := chunk.Chan(dm.splitter(dm.wrBuf)) nd, err = dm.appendData(dm.curNode, dm.splitter(dm.wrBuf))
nd, err = dm.appendData(dm.curNode, blks, errs)
if err != nil { if err != nil {
return err return err
} }
@ -286,13 +284,13 @@ func (dm *DagModifier) modifyDag(node *mdag.Node, offset uint64, data io.Reader)
} }
// appendData appends the blocks from the given chan to the end of this dag // appendData appends the blocks from the given chan to the end of this dag
func (dm *DagModifier) appendData(node *mdag.Node, blks <-chan []byte, errs <-chan error) (*mdag.Node, error) { func (dm *DagModifier) appendData(node *mdag.Node, spl chunk.Splitter) (*mdag.Node, error) {
dbp := &help.DagBuilderParams{ dbp := &help.DagBuilderParams{
Dagserv: dm.dagserv, Dagserv: dm.dagserv,
Maxlinks: help.DefaultLinksPerBlock, Maxlinks: help.DefaultLinksPerBlock,
} }
return trickle.TrickleAppend(dm.ctx, node, dbp.New(blks, errs)) return trickle.TrickleAppend(dm.ctx, node, dbp.New(spl))
} }
// Read data from this dag starting at the current offset // Read data from this dag starting at the current offset