mirror of
https://github.com/ipfs/kubo.git
synced 2025-06-30 18:13:54 +08:00
Vendor newer github.com/jbenet/go-datastore
This commit is contained in:
2
Godeps/Godeps.json
generated
2
Godeps/Godeps.json
generated
@ -153,7 +153,7 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/jbenet/go-datastore",
|
||||
"Rev": "2525cae416316b9cf2eb66ec8d4792f567436efa"
|
||||
"Rev": "751a1b4ad40b27c3f0993ba5e2bcf22ad941991b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/jbenet/go-detect-race",
|
||||
|
8
Godeps/_workspace/src/github.com/jbenet/go-datastore/Godeps/Godeps.json
generated
vendored
8
Godeps/_workspace/src/github.com/jbenet/go-datastore/Godeps/Godeps.json
generated
vendored
@ -14,6 +14,14 @@
|
||||
"ImportPath": "github.com/codahale/blake2",
|
||||
"Rev": "3fa823583afba430e8fc7cdbcc670dbf90bfacc4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/codahale/hdrhistogram",
|
||||
"Rev": "5fd85ec0b4e2dd5d4158d257d943f2e586d86b62"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/codahale/metrics",
|
||||
"Rev": "7d3beb1b480077e77c08a6f6c65ea969f6e91420"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/golang-lru",
|
||||
"Rev": "4dfff096c4973178c8f35cf6dd1a732a0a139370"
|
||||
|
170
Godeps/_workspace/src/github.com/jbenet/go-datastore/measure/measure.go
generated
vendored
Normal file
170
Godeps/_workspace/src/github.com/jbenet/go-datastore/measure/measure.go
generated
vendored
Normal file
@ -0,0 +1,170 @@
|
||||
// Package measure provides a Datastore wrapper that records metrics
|
||||
// using github.com/codahale/metrics.
|
||||
package measure
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/codahale/metrics"
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore"
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/query"
|
||||
)
|
||||
|
||||
// Histogram measurements exceeding these limits are dropped. TODO
|
||||
// maybe it would be better to cap the value? Should we keep track of
|
||||
// drops?
|
||||
const (
|
||||
maxLatency = int64(1 * time.Second)
|
||||
maxSize = int64(1 << 32)
|
||||
)
|
||||
|
||||
type DatastoreCloser interface {
|
||||
datastore.Datastore
|
||||
Close() error
|
||||
}
|
||||
|
||||
// New wraps the datastore, providing metrics on the operations. The
|
||||
// metrics are registered with names starting with prefix and a dot.
|
||||
//
|
||||
// If prefix is not unique, New will panic. Call Close to release the
|
||||
// prefix.
|
||||
func New(prefix string, ds datastore.Datastore) DatastoreCloser {
|
||||
m := &measure{
|
||||
backend: ds,
|
||||
|
||||
putNum: metrics.Counter(prefix + ".Put.num"),
|
||||
putErr: metrics.Counter(prefix + ".Put.err"),
|
||||
putLatency: metrics.NewHistogram(prefix+".Put.latency", 0, maxLatency, 3),
|
||||
putSize: metrics.NewHistogram(prefix+".Put.size", 0, maxSize, 3),
|
||||
|
||||
getNum: metrics.Counter(prefix + ".Get.num"),
|
||||
getErr: metrics.Counter(prefix + ".Get.err"),
|
||||
getLatency: metrics.NewHistogram(prefix+".Get.latency", 0, maxLatency, 3),
|
||||
getSize: metrics.NewHistogram(prefix+".Get.size", 0, maxSize, 3),
|
||||
|
||||
hasNum: metrics.Counter(prefix + ".Has.num"),
|
||||
hasErr: metrics.Counter(prefix + ".Has.err"),
|
||||
hasLatency: metrics.NewHistogram(prefix+".Has.latency", 0, maxLatency, 3),
|
||||
|
||||
deleteNum: metrics.Counter(prefix + ".Delete.num"),
|
||||
deleteErr: metrics.Counter(prefix + ".Delete.err"),
|
||||
deleteLatency: metrics.NewHistogram(prefix+".Delete.latency", 0, maxLatency, 3),
|
||||
|
||||
queryNum: metrics.Counter(prefix + ".Query.num"),
|
||||
queryErr: metrics.Counter(prefix + ".Query.err"),
|
||||
queryLatency: metrics.NewHistogram(prefix+".Query.latency", 0, maxLatency, 3),
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type measure struct {
|
||||
backend datastore.Datastore
|
||||
|
||||
putNum metrics.Counter
|
||||
putErr metrics.Counter
|
||||
putLatency *metrics.Histogram
|
||||
putSize *metrics.Histogram
|
||||
|
||||
getNum metrics.Counter
|
||||
getErr metrics.Counter
|
||||
getLatency *metrics.Histogram
|
||||
getSize *metrics.Histogram
|
||||
|
||||
hasNum metrics.Counter
|
||||
hasErr metrics.Counter
|
||||
hasLatency *metrics.Histogram
|
||||
|
||||
deleteNum metrics.Counter
|
||||
deleteErr metrics.Counter
|
||||
deleteLatency *metrics.Histogram
|
||||
|
||||
queryNum metrics.Counter
|
||||
queryErr metrics.Counter
|
||||
queryLatency *metrics.Histogram
|
||||
}
|
||||
|
||||
var _ datastore.Datastore = (*measure)(nil)
|
||||
var _ DatastoreCloser = (*measure)(nil)
|
||||
|
||||
func recordLatency(h *metrics.Histogram, start time.Time) {
|
||||
elapsed := time.Now().Sub(start) / time.Microsecond
|
||||
_ = h.RecordValue(int64(elapsed))
|
||||
}
|
||||
|
||||
func (m *measure) Put(key datastore.Key, value interface{}) error {
|
||||
defer recordLatency(m.putLatency, time.Now())
|
||||
m.putNum.Add()
|
||||
if b, ok := value.([]byte); ok {
|
||||
_ = m.putSize.RecordValue(int64(len(b)))
|
||||
}
|
||||
err := m.backend.Put(key, value)
|
||||
if err != nil {
|
||||
m.putErr.Add()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (m *measure) Get(key datastore.Key) (value interface{}, err error) {
|
||||
defer recordLatency(m.getLatency, time.Now())
|
||||
m.getNum.Add()
|
||||
value, err = m.backend.Get(key)
|
||||
if err != nil {
|
||||
m.getErr.Add()
|
||||
} else {
|
||||
if b, ok := value.([]byte); ok {
|
||||
_ = m.getSize.RecordValue(int64(len(b)))
|
||||
}
|
||||
}
|
||||
return value, err
|
||||
}
|
||||
|
||||
func (m *measure) Has(key datastore.Key) (exists bool, err error) {
|
||||
defer recordLatency(m.hasLatency, time.Now())
|
||||
m.hasNum.Add()
|
||||
exists, err = m.backend.Has(key)
|
||||
if err != nil {
|
||||
m.hasErr.Add()
|
||||
}
|
||||
return exists, err
|
||||
}
|
||||
|
||||
func (m *measure) Delete(key datastore.Key) error {
|
||||
defer recordLatency(m.deleteLatency, time.Now())
|
||||
m.deleteNum.Add()
|
||||
err := m.backend.Delete(key)
|
||||
if err != nil {
|
||||
m.deleteErr.Add()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (m *measure) Query(q query.Query) (query.Results, error) {
|
||||
defer recordLatency(m.queryLatency, time.Now())
|
||||
m.queryNum.Add()
|
||||
res, err := m.backend.Query(q)
|
||||
if err != nil {
|
||||
m.queryErr.Add()
|
||||
}
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (m *measure) Close() error {
|
||||
m.putNum.Remove()
|
||||
m.putErr.Remove()
|
||||
m.putLatency.Remove()
|
||||
m.putSize.Remove()
|
||||
m.getNum.Remove()
|
||||
m.getErr.Remove()
|
||||
m.getLatency.Remove()
|
||||
m.getSize.Remove()
|
||||
m.hasNum.Remove()
|
||||
m.hasErr.Remove()
|
||||
m.hasLatency.Remove()
|
||||
m.deleteNum.Remove()
|
||||
m.deleteErr.Remove()
|
||||
m.deleteLatency.Remove()
|
||||
m.queryNum.Remove()
|
||||
m.queryErr.Remove()
|
||||
m.queryLatency.Remove()
|
||||
return nil
|
||||
}
|
Reference in New Issue
Block a user