mirror of
https://github.com/ipfs/kubo.git
synced 2025-09-10 05:52:20 +08:00
race fix: pinner loads with a threadsafe datastore
All the datastores used by pinners and so on should be mutex wrapped. One issue with changing all of them from ds.Datastore -> ds.ThreadSafeDatastore is that we wrap the incoming ds.ThreadSafeDatastore with other datastores, which do not implement the interface. Re-wrapping again causes double locking. (which may be ok..., but...) any ideas?
This commit is contained in:
@ -137,8 +137,8 @@ type dagservAndPinner struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func getDagservAndPinner(t *testing.T) dagservAndPinner {
|
func getDagservAndPinner(t *testing.T) dagservAndPinner {
|
||||||
db := ds.NewMapDatastore()
|
db := dssync.MutexWrap(ds.NewMapDatastore())
|
||||||
bs := bstore.NewBlockstore(dssync.MutexWrap(db))
|
bs := bstore.NewBlockstore(db)
|
||||||
blockserv, err := bserv.New(bs, offline.Exchange(bs))
|
blockserv, err := bserv.New(bs, offline.Exchange(bs))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
|
@ -28,8 +28,8 @@ type dagservAndPinner struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func getDagservAndPinner(t *testing.T) dagservAndPinner {
|
func getDagservAndPinner(t *testing.T) dagservAndPinner {
|
||||||
db := ds.NewMapDatastore()
|
db := dssync.MutexWrap(ds.NewMapDatastore())
|
||||||
bs := bstore.NewBlockstore(dssync.MutexWrap(db))
|
bs := bstore.NewBlockstore(db)
|
||||||
blockserv, err := bserv.New(bs, offline.Exchange(bs))
|
blockserv, err := bserv.New(bs, offline.Exchange(bs))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
|
@ -53,11 +53,11 @@ type pinner struct {
|
|||||||
directPin set.BlockSet
|
directPin set.BlockSet
|
||||||
indirPin *indirectPin
|
indirPin *indirectPin
|
||||||
dserv mdag.DAGService
|
dserv mdag.DAGService
|
||||||
dstore ds.Datastore
|
dstore ds.ThreadSafeDatastore
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewPinner creates a new pinner using the given datastore as a backend
|
// NewPinner creates a new pinner using the given datastore as a backend
|
||||||
func NewPinner(dstore ds.Datastore, serv mdag.DAGService) Pinner {
|
func NewPinner(dstore ds.ThreadSafeDatastore, serv mdag.DAGService) Pinner {
|
||||||
|
|
||||||
// Load set from given datastore...
|
// Load set from given datastore...
|
||||||
rcds := nsds.Wrap(dstore, recursePinDatastoreKey)
|
rcds := nsds.Wrap(dstore, recursePinDatastoreKey)
|
||||||
@ -176,7 +176,7 @@ func (p *pinner) IsPinned(key util.Key) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// LoadPinner loads a pinner and its keysets from the given datastore
|
// LoadPinner loads a pinner and its keysets from the given datastore
|
||||||
func LoadPinner(d ds.Datastore, dserv mdag.DAGService) (Pinner, error) {
|
func LoadPinner(d ds.ThreadSafeDatastore, dserv mdag.DAGService) (Pinner, error) {
|
||||||
p := new(pinner)
|
p := new(pinner)
|
||||||
|
|
||||||
{ // load recursive set
|
{ // load recursive set
|
||||||
|
@ -21,8 +21,8 @@ func randNode() (*mdag.Node, util.Key) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPinnerBasic(t *testing.T) {
|
func TestPinnerBasic(t *testing.T) {
|
||||||
dstore := ds.NewMapDatastore()
|
dstore := dssync.MutexWrap(ds.NewMapDatastore())
|
||||||
bstore := blockstore.NewBlockstore(dssync.MutexWrap(dstore))
|
bstore := blockstore.NewBlockstore(dstore)
|
||||||
bserv, err := bs.New(bstore, offline.Exchange(bstore))
|
bserv, err := bs.New(bstore, offline.Exchange(bstore))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
|
Reference in New Issue
Block a user