proc: Improve performance of loadMap on very large sparse maps

Users can create sparse maps in two ways, either by:
a) adding lots of entries to a map and then deleting most of them, or
b) using the make(mapType, N) expression with a very large N

When this happens reading the resulting map will be very slow
because loadMap needs to scan many buckets for each entry it finds.

Technically this is not a bug, the user just created a map that's
very sparse and therefore very slow to read. However it's very
annoying to have the debugger hang for several seconds when trying
to read the local variables just because one of them (which you
might not even be interested into) happens to be a very sparse map.

There is an easy mitigation to this problem: not reading any
additional buckets once we know that we have already read all
entries of the map, or as many entries as we need to fulfill the
MaxArrayValues parameter.

Unfortunately this is mostly useless, a VLSM (Very Large Sparse Map)
with a single entry will still be slow to access, because the single
entry in the map could easily end up in the last bucket.

The obvious solution to this problem is to set a limit to the
number of buckets we read when loading a map. However there is no
good way to set this limit.
If we hardcode it there will be no way to print maps that are beyond
whatever limit we pick.
We could let users (or clients) specify it but the meaning of such
knob would be arcane and they would have no way of picking a good
value (because there is no objectively good value for it).

The solution used in this commit is to set an arbirtray limit on
the number of buckets we read but only when loadMap is invoked
through API calls ListLocalVars and ListFunctionArgs. In this way
`ListLocalVars` and `ListFunctionArgs` (which are often invoked
automatically by GUI clients) remain fast even in presence of a
VLSM, but the contents of the VLSM can still be inspected using
`EvalVariable`.
This commit is contained in:
aarzilli
2018-10-29 12:22:03 +01:00
committed by Derek Parker
parent 51c342c6b7
commit 89c8da65b6
11 changed files with 66 additions and 21 deletions

View File

@ -358,11 +358,11 @@ mainSearch:
} }
scope := proc.FrameToScope(p.BinInfo(), p.CurrentThread(), nil, *mainFrame) scope := proc.FrameToScope(p.BinInfo(), p.CurrentThread(), nil, *mainFrame)
v1, err := scope.EvalVariable("t", proc.LoadConfig{true, 1, 64, 64, -1}) v1, err := scope.EvalVariable("t", proc.LoadConfig{true, 1, 64, 64, -1, 0})
assertNoError(err, t, "EvalVariable(t)") assertNoError(err, t, "EvalVariable(t)")
assertNoError(v1.Unreadable, t, "unreadable variable 't'") assertNoError(v1.Unreadable, t, "unreadable variable 't'")
t.Logf("t = %#v\n", v1) t.Logf("t = %#v\n", v1)
v2, err := scope.EvalVariable("s", proc.LoadConfig{true, 1, 64, 64, -1}) v2, err := scope.EvalVariable("s", proc.LoadConfig{true, 1, 64, 64, -1, 0})
assertNoError(err, t, "EvalVariable(s)") assertNoError(err, t, "EvalVariable(s)")
assertNoError(v2.Unreadable, t, "unreadable variable 's'") assertNoError(v2.Unreadable, t, "unreadable variable 's'")
t.Logf("s = %#v\n", v2) t.Logf("s = %#v\n", v2)

View File

@ -218,7 +218,7 @@ func funcCallEvalExpr(p Process, expr string) (fn *Function, closureAddr uint64,
if fnvar.Kind != reflect.Func { if fnvar.Kind != reflect.Func {
return nil, 0, nil, fmt.Errorf("expression %q is not a function", exprToString(callexpr.Fun)) return nil, 0, nil, fmt.Errorf("expression %q is not a function", exprToString(callexpr.Fun))
} }
fnvar.loadValue(LoadConfig{false, 0, 0, 0, 0}) fnvar.loadValue(LoadConfig{false, 0, 0, 0, 0, 0})
if fnvar.Unreadable != nil { if fnvar.Unreadable != nil {
return nil, 0, nil, fnvar.Unreadable return nil, 0, nil, fnvar.Unreadable
} }

View File

@ -88,7 +88,7 @@ func resolveTypeOff(bi *BinaryInfo, typeAddr uintptr, off uintptr, mem MemoryRea
if err != nil { if err != nil {
return nil, err return nil, err
} }
v.loadValue(LoadConfig{false, 1, 0, 0, -1}) v.loadValue(LoadConfig{false, 1, 0, 0, -1, 0})
addr, _ := constant.Int64Val(v.Value) addr, _ := constant.Int64Val(v.Value)
return v.newVariable(v.Name, uintptr(addr), rtyp, mem), nil return v.newVariable(v.Name, uintptr(addr), rtyp, mem), nil
} }

View File

@ -28,7 +28,7 @@ import (
protest "github.com/derekparker/delve/pkg/proc/test" protest "github.com/derekparker/delve/pkg/proc/test"
) )
var normalLoadConfig = proc.LoadConfig{true, 1, 64, 64, -1} var normalLoadConfig = proc.LoadConfig{true, 1, 64, 64, -1, 0}
var testBackend, buildMode string var testBackend, buildMode string
func init() { func init() {
@ -2656,7 +2656,7 @@ func BenchmarkTrace(b *testing.B) {
assertNoError(proc.Continue(p), b, "Continue()") assertNoError(proc.Continue(p), b, "Continue()")
s, err := proc.GoroutineScope(p.CurrentThread()) s, err := proc.GoroutineScope(p.CurrentThread())
assertNoError(err, b, "Scope()") assertNoError(err, b, "Scope()")
_, err = s.FunctionArguments(proc.LoadConfig{false, 0, 64, 0, 3}) _, err = s.FunctionArguments(proc.LoadConfig{false, 0, 64, 0, 3, 0})
assertNoError(err, b, "FunctionArguments()") assertNoError(err, b, "FunctionArguments()")
} }
b.StopTimer() b.StopTimer()

View File

@ -645,7 +645,7 @@ func (g *G) readDefers(frames []Stackframe) {
} }
func (d *Defer) load() { func (d *Defer) load() {
d.variable.loadValue(LoadConfig{false, 1, 0, 0, -1}) d.variable.loadValue(LoadConfig{false, 1, 0, 0, -1, 0})
if d.variable.Unreadable != nil { if d.variable.Unreadable != nil {
d.Unreadable = d.variable.Unreadable d.Unreadable = d.variable.Unreadable
return return

View File

@ -578,7 +578,7 @@ func runtimeTypeToDIE(_type *Variable, dataAddr uintptr) (typ godwarf.Type, kind
if typestring == nil || typestring.Addr == 0 || typestring.Kind != reflect.String { if typestring == nil || typestring.Addr == 0 || typestring.Kind != reflect.String {
return nil, 0, fmt.Errorf("invalid interface type") return nil, 0, fmt.Errorf("invalid interface type")
} }
typestring.loadValue(LoadConfig{false, 0, 512, 0, 0}) typestring.loadValue(LoadConfig{false, 0, 512, 0, 0, 0})
if typestring.Unreadable != nil { if typestring.Unreadable != nil {
return nil, 0, fmt.Errorf("invalid interface type: %v", typestring.Unreadable) return nil, 0, fmt.Errorf("invalid interface type: %v", typestring.Unreadable)
} }
@ -880,7 +880,7 @@ func nameOfInterfaceRuntimeType(_type *Variable, kind, tflag int64) (string, err
buf.WriteString("interface {") buf.WriteString("interface {")
methods, _ := _type.structMember(interfacetypeFieldMhdr) methods, _ := _type.structMember(interfacetypeFieldMhdr)
methods.loadArrayValues(0, LoadConfig{false, 1, 0, 4096, -1}) methods.loadArrayValues(0, LoadConfig{false, 1, 0, 4096, -1, 0})
if methods.Unreadable != nil { if methods.Unreadable != nil {
return "", nil return "", nil
} }
@ -941,7 +941,7 @@ func nameOfStructRuntimeType(_type *Variable, kind, tflag int64) (string, error)
buf.WriteString("struct {") buf.WriteString("struct {")
fields, _ := _type.structMember("fields") fields, _ := _type.structMember("fields")
fields.loadArrayValues(0, LoadConfig{false, 2, 0, 4096, -1}) fields.loadArrayValues(0, LoadConfig{false, 2, 0, 4096, -1, 0})
if fields.Unreadable != nil { if fields.Unreadable != nil {
return "", fields.Unreadable return "", fields.Unreadable
} }

View File

@ -32,6 +32,8 @@ const (
hashMinTopHash = 4 // used by map reading code, indicates minimum value of tophash that isn't empty or evacuated hashMinTopHash = 4 // used by map reading code, indicates minimum value of tophash that isn't empty or evacuated
maxFramePrefetchSize = 1 * 1024 * 1024 // Maximum prefetch size for a stack frame maxFramePrefetchSize = 1 * 1024 * 1024 // Maximum prefetch size for a stack frame
maxMapBucketsFactor = 100 // Maximum numbers of map buckets to read for every requested map entry when loading variables through (*EvalScope).LocalVariables and (*EvalScope).FunctionArguments.
) )
type floatSpecial uint8 type floatSpecial uint8
@ -122,10 +124,39 @@ type LoadConfig struct {
MaxArrayValues int MaxArrayValues int
// MaxStructFields is the maximum number of fields read from a struct, -1 will read all fields. // MaxStructFields is the maximum number of fields read from a struct, -1 will read all fields.
MaxStructFields int MaxStructFields int
// MaxMapBuckets is the maximum number of map buckets to read before giving up.
// A value of 0 will read as many buckets as necessary until the entire map
// is read or MaxArrayValues is reached.
//
// Loading a map is an operation that issues O(num_buckets) operations.
// Normally the number of buckets is proportional to the number of elements
// in the map, since the runtime tries to keep the load factor of maps
// between 40% and 80%.
//
// It is possible, however, to create very sparse maps either by:
// a) adding lots of entries to a map and then deleting most of them, or
// b) using the make(mapType, N) expression with a very large N
//
// When this happens delve will have to scan many empty buckets to find the
// few entries in the map.
// MaxMapBuckets can be set to avoid annoying slowdowns␣while reading
// very sparse maps.
//
// Since there is no good way for a user of delve to specify the value of
// MaxMapBuckets, this field is not actually exposed through the API.
// Instead (*EvalScope).LocalVariables and (*EvalScope).FunctionArguments
// set this field automatically to MaxArrayValues * maxMapBucketsFactor.
// Every other invocation uses the default value of 0, obtaining the old behavior.
// In practice this means that debuggers using the ListLocalVars or
// ListFunctionArgs API will not experience a massive slowdown when a very
// sparse map is in scope, but evaluating a single variable will still work
// correctly, even if the variable in question is a very sparse map.
MaxMapBuckets int
} }
var loadSingleValue = LoadConfig{false, 0, 64, 0, 0} var loadSingleValue = LoadConfig{false, 0, 64, 0, 0, 0}
var loadFullValue = LoadConfig{true, 1, 64, 64, -1} var loadFullValue = LoadConfig{true, 1, 64, 64, -1, 0}
// G status, from: src/runtime/runtime2.go // G status, from: src/runtime/runtime2.go
const ( const (
@ -441,7 +472,7 @@ func (v *Variable) parseG() (*G, error) {
} }
v = v.maybeDereference() v = v.maybeDereference()
} }
v.loadValue(LoadConfig{false, 2, 64, 0, -1}) v.loadValue(LoadConfig{false, 2, 64, 0, -1, 0})
if v.Unreadable != nil { if v.Unreadable != nil {
return nil, v.Unreadable return nil, v.Unreadable
} }
@ -591,7 +622,7 @@ func (g *G) stkbar() ([]savedLR, error) {
if g.stkbarVar == nil { // stack barriers were removed in Go 1.9 if g.stkbarVar == nil { // stack barriers were removed in Go 1.9
return nil, nil return nil, nil
} }
g.stkbarVar.loadValue(LoadConfig{false, 1, 0, int(g.stkbarVar.Len), 3}) g.stkbarVar.loadValue(LoadConfig{false, 1, 0, int(g.stkbarVar.Len), 3, 0})
if g.stkbarVar.Unreadable != nil { if g.stkbarVar.Unreadable != nil {
return nil, fmt.Errorf("unreadable stkbar: %v", g.stkbarVar.Unreadable) return nil, fmt.Errorf("unreadable stkbar: %v", g.stkbarVar.Unreadable)
} }
@ -658,6 +689,7 @@ func (scope *EvalScope) LocalVariables(cfg LoadConfig) ([]*Variable, error) {
vars = filterVariables(vars, func(v *Variable) bool { vars = filterVariables(vars, func(v *Variable) bool {
return (v.Flags & (VariableArgument | VariableReturnArgument)) == 0 return (v.Flags & (VariableArgument | VariableReturnArgument)) == 0
}) })
cfg.MaxMapBuckets = maxMapBucketsFactor * cfg.MaxArrayValues
loadValues(vars, cfg) loadValues(vars, cfg)
return vars, nil return vars, nil
} }
@ -671,6 +703,7 @@ func (scope *EvalScope) FunctionArguments(cfg LoadConfig) ([]*Variable, error) {
vars = filterVariables(vars, func(v *Variable) bool { vars = filterVariables(vars, func(v *Variable) bool {
return (v.Flags & (VariableArgument | VariableReturnArgument)) != 0 return (v.Flags & (VariableArgument | VariableReturnArgument)) != 0
}) })
cfg.MaxMapBuckets = maxMapBucketsFactor * cfg.MaxArrayValues
loadValues(vars, cfg) loadValues(vars, cfg)
return vars, nil return vars, nil
} }
@ -1569,6 +1602,11 @@ func (v *Variable) loadMap(recurseLevel int, cfg LoadConfig) {
if it == nil { if it == nil {
return return
} }
it.maxNumBuckets = uint64(cfg.MaxMapBuckets)
if v.Len == 0 || int64(v.mapSkip) >= v.Len || cfg.MaxArrayValues == 0 {
return
}
for skip := 0; skip < v.mapSkip; skip++ { for skip := 0; skip < v.mapSkip; skip++ {
if ok := it.next(); !ok { if ok := it.next(); !ok {
@ -1580,9 +1618,6 @@ func (v *Variable) loadMap(recurseLevel int, cfg LoadConfig) {
count := 0 count := 0
errcount := 0 errcount := 0
for it.next() { for it.next() {
if count >= cfg.MaxArrayValues {
break
}
key := it.key() key := it.key()
var val *Variable var val *Variable
if it.values.fieldType.Size() > 0 { if it.values.fieldType.Size() > 0 {
@ -1601,6 +1636,9 @@ func (v *Variable) loadMap(recurseLevel int, cfg LoadConfig) {
if errcount > maxErrCount { if errcount > maxErrCount {
break break
} }
if count >= cfg.MaxArrayValues || int64(count) >= v.Len {
break
}
} }
} }
@ -1618,6 +1656,8 @@ type mapIterator struct {
values *Variable values *Variable
overflow *Variable overflow *Variable
maxNumBuckets uint64 // maximum number of buckets to scan
idx int64 idx int64
} }
@ -1683,6 +1723,10 @@ func (it *mapIterator) nextBucket() bool {
} else { } else {
it.b = nil it.b = nil
if it.maxNumBuckets > 0 && it.bidx >= it.maxNumBuckets {
return false
}
for it.bidx < it.numbuckets { for it.bidx < it.numbuckets {
it.b = it.buckets.clone() it.b = it.buckets.clone()
it.b.Addr += uintptr(uint64(it.buckets.DwarfType.Size()) * it.bidx) it.b.Addr += uintptr(uint64(it.buckets.DwarfType.Size()) * it.bidx)

View File

@ -283,6 +283,7 @@ func LoadConfigToProc(cfg *LoadConfig) *proc.LoadConfig {
cfg.MaxStringLen, cfg.MaxStringLen,
cfg.MaxArrayValues, cfg.MaxArrayValues,
cfg.MaxStructFields, cfg.MaxStructFields,
0, // MaxMapBuckets is set internally by pkg/proc, read its documentation for an explanation.
} }
} }

View File

@ -265,7 +265,7 @@ func (loc *AddrLocationSpec) Find(d *Debugger, scope *proc.EvalScope, locStr str
} }
return []api.Location{{PC: uint64(addr)}}, nil return []api.Location{{PC: uint64(addr)}}, nil
} else { } else {
v, err := scope.EvalExpression(loc.AddrExpr, proc.LoadConfig{true, 0, 0, 0, 0}) v, err := scope.EvalExpression(loc.AddrExpr, proc.LoadConfig{true, 0, 0, 0, 0, 0})
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -10,7 +10,7 @@ import (
"github.com/derekparker/delve/service/debugger" "github.com/derekparker/delve/service/debugger"
) )
var defaultLoadConfig = proc.LoadConfig{true, 1, 64, 64, -1} var defaultLoadConfig = proc.LoadConfig{true, 1, 64, 64, -1, 0}
type RPCServer struct { type RPCServer struct {
// config is all the information necessary to start the debugger and server. // config is all the information necessary to start the debugger and server.

View File

@ -17,8 +17,8 @@ import (
protest "github.com/derekparker/delve/pkg/proc/test" protest "github.com/derekparker/delve/pkg/proc/test"
) )
var pnormalLoadConfig = proc.LoadConfig{true, 1, 64, 64, -1} var pnormalLoadConfig = proc.LoadConfig{true, 1, 64, 64, -1, 0}
var pshortLoadConfig = proc.LoadConfig{false, 0, 64, 0, 3} var pshortLoadConfig = proc.LoadConfig{false, 0, 64, 0, 3, 0}
type varTest struct { type varTest struct {
name string name string