1
0
mirror of https://github.com/ipfs/kubo.git synced 2025-09-10 05:52:20 +08:00

implement mark and sweep GC

License: MIT
Signed-off-by: Jeromy <jeromyj@gmail.com>

dont GC blocks used by pinner

License: MIT
Signed-off-by: Jeromy <jeromyj@gmail.com>

comment GC algo

License: MIT
Signed-off-by: Jeromy <jeromyj@gmail.com>

add lock to blockstore to prevent GC from eating wanted blocks

License: MIT
Signed-off-by: Jeromy <jeromyj@gmail.com>

improve FetchGraph

License: MIT
Signed-off-by: Jeromy <jeromyj@gmail.com>

separate interfaces for blockstore and GCBlockstore

License: MIT
Signed-off-by: Jeromy <jeromyj@gmail.com>

reintroduce indirect pinning, add enumerateChildren dag method

License: MIT
Signed-off-by: Jeromy <jeromyj@gmail.com>
This commit is contained in:
Jeromy
2015-06-23 16:01:32 -07:00
parent 27f34b4311
commit b12ee40aba
18 changed files with 200 additions and 313 deletions

View File

@ -2,30 +2,18 @@ package helpers
import (
dag "github.com/ipfs/go-ipfs/merkledag"
"github.com/ipfs/go-ipfs/pin"
)
// NodeCB is callback function for dag generation
// the `last` flag signifies whether or not this is the last
// (top-most root) node being added. useful for things like
// only pinning the first node recursively.
type NodeCB func(node *dag.Node, last bool) error
var nilFunc NodeCB = func(_ *dag.Node, _ bool) error { return nil }
// DagBuilderHelper wraps together a bunch of objects needed to
// efficiently create unixfs dag trees
type DagBuilderHelper struct {
dserv dag.DAGService
mp pin.Pinner
in <-chan []byte
errs <-chan error
recvdErr error
nextData []byte // the next item to return.
maxlinks int
ncb NodeCB
batch *dag.Batch
batch *dag.Batch
}
type DagBuilderParams struct {
@ -34,25 +22,16 @@ type DagBuilderParams struct {
// DAGService to write blocks to (required)
Dagserv dag.DAGService
// Callback for each block added
NodeCB NodeCB
}
// Generate a new DagBuilderHelper from the given params, using 'in' as a
// data source
func (dbp *DagBuilderParams) New(in <-chan []byte, errs <-chan error) *DagBuilderHelper {
ncb := dbp.NodeCB
if ncb == nil {
ncb = nilFunc
}
return &DagBuilderHelper{
dserv: dbp.Dagserv,
in: in,
errs: errs,
maxlinks: dbp.Maxlinks,
ncb: ncb,
batch: dbp.Dagserv.Batch(),
}
}
@ -106,7 +85,6 @@ func (db *DagBuilderHelper) GetDagServ() dag.DAGService {
// FillNodeLayer will add datanodes as children to the give node until
// at most db.indirSize ndoes are added
//
// warning: **children** pinned indirectly, but input node IS NOT pinned.
func (db *DagBuilderHelper) FillNodeLayer(node *UnixfsNode) error {
// while we have room AND we're not done
@ -150,12 +128,6 @@ func (db *DagBuilderHelper) Add(node *UnixfsNode) (*dag.Node, error) {
return nil, err
}
// node callback
err = db.ncb(dn, true)
if err != nil {
return nil, err
}
return dn, nil
}

View File

@ -4,10 +4,8 @@ import (
"fmt"
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context"
key "github.com/ipfs/go-ipfs/blocks/key"
chunk "github.com/ipfs/go-ipfs/importer/chunk"
dag "github.com/ipfs/go-ipfs/merkledag"
"github.com/ipfs/go-ipfs/pin"
ft "github.com/ipfs/go-ipfs/unixfs"
)
@ -108,21 +106,11 @@ func (n *UnixfsNode) AddChild(child *UnixfsNode, db *DagBuilderHelper) error {
return err
}
// Pin the child node indirectly
err = db.ncb(childnode, false)
if err != nil {
return err
}
return nil
}
// Removes the child node at the given index
func (n *UnixfsNode) RemoveChild(index int, dbh *DagBuilderHelper) {
k := key.Key(n.node.Links[index].Hash)
if dbh.mp != nil {
dbh.mp.RemovePinWithMode(k, pin.Indirect)
}
n.ufmt.RemoveBlockSize(index)
n.node.Links = append(n.node.Links[:index], n.node.Links[index+1:]...)
}

View File

@ -12,7 +12,6 @@ import (
h "github.com/ipfs/go-ipfs/importer/helpers"
trickle "github.com/ipfs/go-ipfs/importer/trickle"
dag "github.com/ipfs/go-ipfs/merkledag"
"github.com/ipfs/go-ipfs/pin"
logging "github.com/ipfs/go-ipfs/vendor/QmQg1J6vikuXF9oDvm4wpdeAUvvkVEKW1EYDw9HhTMnP2b/go-log"
)
@ -20,7 +19,7 @@ var log = logging.Logger("importer")
// Builds a DAG from the given file, writing created blocks to disk as they are
// created
func BuildDagFromFile(fpath string, ds dag.DAGService, mp pin.Pinner) (*dag.Node, error) {
func BuildDagFromFile(fpath string, ds dag.DAGService) (*dag.Node, error) {
stat, err := os.Lstat(fpath)
if err != nil {
return nil, err
@ -36,60 +35,29 @@ func BuildDagFromFile(fpath string, ds dag.DAGService, mp pin.Pinner) (*dag.Node
}
defer f.Close()
return BuildDagFromReader(ds, chunk.NewSizeSplitter(f, chunk.DefaultBlockSize), BasicPinnerCB(mp))
return BuildDagFromReader(ds, chunk.NewSizeSplitter(f, chunk.DefaultBlockSize))
}
func BuildDagFromReader(ds dag.DAGService, spl chunk.Splitter, ncb h.NodeCB) (*dag.Node, error) {
func BuildDagFromReader(ds dag.DAGService, spl chunk.Splitter) (*dag.Node, error) {
// Start the splitter
blkch, errch := chunk.Chan(spl)
dbp := h.DagBuilderParams{
Dagserv: ds,
Maxlinks: h.DefaultLinksPerBlock,
NodeCB: ncb,
}
return bal.BalancedLayout(dbp.New(blkch, errch))
}
func BuildTrickleDagFromReader(ds dag.DAGService, spl chunk.Splitter, ncb h.NodeCB) (*dag.Node, error) {
func BuildTrickleDagFromReader(ds dag.DAGService, spl chunk.Splitter) (*dag.Node, error) {
// Start the splitter
blkch, errch := chunk.Chan(spl)
dbp := h.DagBuilderParams{
Dagserv: ds,
Maxlinks: h.DefaultLinksPerBlock,
NodeCB: ncb,
}
return trickle.TrickleLayout(dbp.New(blkch, errch))
}
func BasicPinnerCB(p pin.Pinner) h.NodeCB {
return func(n *dag.Node, last bool) error {
k, err := n.Key()
if err != nil {
return err
}
if last {
p.PinWithMode(k, pin.Recursive)
return p.Flush()
} else {
p.PinWithMode(k, pin.Indirect)
return nil
}
}
}
func PinIndirectCB(p pin.Pinner) h.NodeCB {
return func(n *dag.Node, last bool) error {
k, err := n.Key()
if err != nil {
return err
}
p.PinWithMode(k, pin.Indirect)
return nil
}
}

View File

@ -17,7 +17,7 @@ import (
func getBalancedDag(t testing.TB, size int64, blksize int64) (*dag.Node, dag.DAGService) {
ds := mdtest.Mock()
r := io.LimitReader(u.NewTimeSeededRand(), size)
nd, err := BuildDagFromReader(ds, chunk.NewSizeSplitter(r, blksize), nil)
nd, err := BuildDagFromReader(ds, chunk.NewSizeSplitter(r, blksize))
if err != nil {
t.Fatal(err)
}
@ -27,7 +27,7 @@ func getBalancedDag(t testing.TB, size int64, blksize int64) (*dag.Node, dag.DAG
func getTrickleDag(t testing.TB, size int64, blksize int64) (*dag.Node, dag.DAGService) {
ds := mdtest.Mock()
r := io.LimitReader(u.NewTimeSeededRand(), size)
nd, err := BuildTrickleDagFromReader(ds, chunk.NewSizeSplitter(r, blksize), nil)
nd, err := BuildTrickleDagFromReader(ds, chunk.NewSizeSplitter(r, blksize))
if err != nil {
t.Fatal(err)
}
@ -40,7 +40,7 @@ func TestBalancedDag(t *testing.T) {
u.NewTimeSeededRand().Read(buf)
r := bytes.NewReader(buf)
nd, err := BuildDagFromReader(ds, chunk.DefaultSplitter(r), nil)
nd, err := BuildDagFromReader(ds, chunk.DefaultSplitter(r))
if err != nil {
t.Fatal(err)
}