1
0
mirror of https://github.com/ipfs/kubo.git synced 2025-09-10 05:52:20 +08:00

race fix: pinner loads with a threadsafe datastore

All the datastores used by pinners and so on should be mutex
wrapped. One issue with changing all of them from
ds.Datastore -> ds.ThreadSafeDatastore is that we wrap the
incoming ds.ThreadSafeDatastore with other datastores, which
do not implement the interface. Re-wrapping again causes
double locking. (which may be ok..., but...) any ideas?
This commit is contained in:
Juan Batiz-Benet
2015-01-11 21:31:19 -08:00
parent 4af5d85fac
commit 16690d4af2
4 changed files with 9 additions and 9 deletions

View File

@ -137,8 +137,8 @@ type dagservAndPinner struct {
} }
func getDagservAndPinner(t *testing.T) dagservAndPinner { func getDagservAndPinner(t *testing.T) dagservAndPinner {
db := ds.NewMapDatastore() db := dssync.MutexWrap(ds.NewMapDatastore())
bs := bstore.NewBlockstore(dssync.MutexWrap(db)) bs := bstore.NewBlockstore(db)
blockserv, err := bserv.New(bs, offline.Exchange(bs)) blockserv, err := bserv.New(bs, offline.Exchange(bs))
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)

View File

@ -28,8 +28,8 @@ type dagservAndPinner struct {
} }
func getDagservAndPinner(t *testing.T) dagservAndPinner { func getDagservAndPinner(t *testing.T) dagservAndPinner {
db := ds.NewMapDatastore() db := dssync.MutexWrap(ds.NewMapDatastore())
bs := bstore.NewBlockstore(dssync.MutexWrap(db)) bs := bstore.NewBlockstore(db)
blockserv, err := bserv.New(bs, offline.Exchange(bs)) blockserv, err := bserv.New(bs, offline.Exchange(bs))
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)

View File

@ -53,11 +53,11 @@ type pinner struct {
directPin set.BlockSet directPin set.BlockSet
indirPin *indirectPin indirPin *indirectPin
dserv mdag.DAGService dserv mdag.DAGService
dstore ds.Datastore dstore ds.ThreadSafeDatastore
} }
// NewPinner creates a new pinner using the given datastore as a backend // NewPinner creates a new pinner using the given datastore as a backend
func NewPinner(dstore ds.Datastore, serv mdag.DAGService) Pinner { func NewPinner(dstore ds.ThreadSafeDatastore, serv mdag.DAGService) Pinner {
// Load set from given datastore... // Load set from given datastore...
rcds := nsds.Wrap(dstore, recursePinDatastoreKey) rcds := nsds.Wrap(dstore, recursePinDatastoreKey)
@ -176,7 +176,7 @@ func (p *pinner) IsPinned(key util.Key) bool {
} }
// LoadPinner loads a pinner and its keysets from the given datastore // LoadPinner loads a pinner and its keysets from the given datastore
func LoadPinner(d ds.Datastore, dserv mdag.DAGService) (Pinner, error) { func LoadPinner(d ds.ThreadSafeDatastore, dserv mdag.DAGService) (Pinner, error) {
p := new(pinner) p := new(pinner)
{ // load recursive set { // load recursive set

View File

@ -21,8 +21,8 @@ func randNode() (*mdag.Node, util.Key) {
} }
func TestPinnerBasic(t *testing.T) { func TestPinnerBasic(t *testing.T) {
dstore := ds.NewMapDatastore() dstore := dssync.MutexWrap(ds.NewMapDatastore())
bstore := blockstore.NewBlockstore(dssync.MutexWrap(dstore)) bstore := blockstore.NewBlockstore(dstore)
bserv, err := bs.New(bstore, offline.Exchange(bstore)) bserv, err := bs.New(bstore, offline.Exchange(bstore))
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)