Bump go.etcd.io/bbolt from 1.3.4 to 1.3.5

Bumps [go.etcd.io/bbolt](https://github.com/etcd-io/bbolt) from 1.3.4 to 1.3.5.
- [Release notes](https://github.com/etcd-io/bbolt/releases)
- [Commits](https://github.com/etcd-io/bbolt/compare/v1.3.4...v1.3.5)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
This commit is contained in:
dependabot-preview[bot]
2020-06-18 09:07:30 +00:00
committed by Daniel J Walsh
parent 6472b44c34
commit 16dbc160c5
9 changed files with 112 additions and 110 deletions

2
go.mod
View File

@ -57,7 +57,7 @@ require (
github.com/uber/jaeger-lib v2.2.0+incompatible // indirect github.com/uber/jaeger-lib v2.2.0+incompatible // indirect
github.com/varlink/go v0.0.0-20190502142041-0f1d566d194b github.com/varlink/go v0.0.0-20190502142041-0f1d566d194b
github.com/vishvananda/netlink v1.1.0 github.com/vishvananda/netlink v1.1.0
go.etcd.io/bbolt v1.3.4 go.etcd.io/bbolt v1.3.5
golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5 golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7 golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a

2
go.sum
View File

@ -475,6 +475,8 @@ github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.4 h1:hi1bXHMVrlQh6WwxAy+qZCV/SYIlqo+Ushwdpa4tAKg= go.etcd.io/bbolt v1.3.4 h1:hi1bXHMVrlQh6WwxAy+qZCV/SYIlqo+Ushwdpa4tAKg=
go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0=
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4= go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU=

11
vendor/go.etcd.io/bbolt/README.md generated vendored
View File

@ -152,11 +152,12 @@ are not thread safe. To work with data in multiple goroutines you must start
a transaction for each one or use locking to ensure only one goroutine accesses a transaction for each one or use locking to ensure only one goroutine accesses
a transaction at a time. Creating transaction from the `DB` is thread safe. a transaction at a time. Creating transaction from the `DB` is thread safe.
Read-only transactions and read-write transactions should not depend on one Transactions should not depend on one another and generally shouldn't be opened
another and generally shouldn't be opened simultaneously in the same goroutine. simultaneously in the same goroutine. This can cause a deadlock as the read-write
This can cause a deadlock as the read-write transaction needs to periodically transaction needs to periodically re-map the data file but it cannot do so while
re-map the data file but it cannot do so while a read-only transaction is open. any read-only transaction is open. Even a nested read-only transaction can cause
a deadlock, as the child transaction can block the parent transaction from releasing
its resources.
#### Read-write transactions #### Read-write transactions

57
vendor/go.etcd.io/bbolt/freelist.go generated vendored
View File

@ -2,7 +2,6 @@ package bbolt
import ( import (
"fmt" "fmt"
"reflect"
"sort" "sort"
"unsafe" "unsafe"
) )
@ -94,24 +93,8 @@ func (f *freelist) pending_count() int {
return count return count
} }
// copyallunsafe copies a list of all free ids and all pending ids in one sorted list. // copyall copies a list of all free ids and all pending ids in one sorted list.
// f.count returns the minimum length required for dst. // f.count returns the minimum length required for dst.
func (f *freelist) copyallunsafe(dstptr unsafe.Pointer) { // dstptr is []pgid data pointer
m := make(pgids, 0, f.pending_count())
for _, txp := range f.pending {
m = append(m, txp.ids...)
}
sort.Sort(m)
fpgids := f.getFreePageIDs()
sz := len(fpgids) + len(m)
dst := *(*[]pgid)(unsafe.Pointer(&reflect.SliceHeader{
Data: uintptr(dstptr),
Len: sz,
Cap: sz,
}))
mergepgids(dst, fpgids, m)
}
func (f *freelist) copyall(dst []pgid) { func (f *freelist) copyall(dst []pgid) {
m := make(pgids, 0, f.pending_count()) m := make(pgids, 0, f.pending_count())
for _, txp := range f.pending { for _, txp := range f.pending {
@ -284,21 +267,23 @@ func (f *freelist) read(p *page) {
} }
// If the page.count is at the max uint16 value (64k) then it's considered // If the page.count is at the max uint16 value (64k) then it's considered
// an overflow and the size of the freelist is stored as the first element. // an overflow and the size of the freelist is stored as the first element.
var idx, count uintptr = 0, uintptr(p.count) var idx, count = 0, int(p.count)
if count == 0xFFFF { if count == 0xFFFF {
idx = 1 idx = 1
count = uintptr(*(*pgid)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p)))) c := *(*pgid)(unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)))
count = int(c)
if count < 0 {
panic(fmt.Sprintf("leading element count %d overflows int", c))
}
} }
// Copy the list of page ids from the freelist. // Copy the list of page ids from the freelist.
if count == 0 { if count == 0 {
f.ids = nil f.ids = nil
} else { } else {
ids := *(*[]pgid)(unsafe.Pointer(&reflect.SliceHeader{ var ids []pgid
Data: uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p) + idx*unsafe.Sizeof(pgid(0)), data := unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), unsafe.Sizeof(ids[0]), idx)
Len: int(count), unsafeSlice(unsafe.Pointer(&ids), data, count)
Cap: int(count),
}))
// copy the ids, so we don't modify on the freelist page directly // copy the ids, so we don't modify on the freelist page directly
idsCopy := make([]pgid, count) idsCopy := make([]pgid, count)
@ -331,16 +316,22 @@ func (f *freelist) write(p *page) error {
// The page.count can only hold up to 64k elements so if we overflow that // The page.count can only hold up to 64k elements so if we overflow that
// number then we handle it by putting the size in the first element. // number then we handle it by putting the size in the first element.
lenids := f.count() l := f.count()
if lenids == 0 { if l == 0 {
p.count = uint16(lenids) p.count = uint16(l)
} else if lenids < 0xFFFF { } else if l < 0xFFFF {
p.count = uint16(lenids) p.count = uint16(l)
f.copyallunsafe(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p))) var ids []pgid
data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))
unsafeSlice(unsafe.Pointer(&ids), data, l)
f.copyall(ids)
} else { } else {
p.count = 0xFFFF p.count = 0xFFFF
*(*pgid)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p))) = pgid(lenids) var ids []pgid
f.copyallunsafe(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p) + unsafe.Sizeof(pgid(0)))) data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))
unsafeSlice(unsafe.Pointer(&ids), data, l+1)
ids[0] = pgid(l)
f.copyall(ids[1:])
} }
return nil return nil

25
vendor/go.etcd.io/bbolt/node.go generated vendored
View File

@ -3,7 +3,6 @@ package bbolt
import ( import (
"bytes" "bytes"
"fmt" "fmt"
"reflect"
"sort" "sort"
"unsafe" "unsafe"
) )
@ -208,36 +207,32 @@ func (n *node) write(p *page) {
} }
// Loop over each item and write it to the page. // Loop over each item and write it to the page.
bp := uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p) + n.pageElementSize()*uintptr(len(n.inodes)) // off tracks the offset into p of the start of the next data.
off := unsafe.Sizeof(*p) + n.pageElementSize()*uintptr(len(n.inodes))
for i, item := range n.inodes { for i, item := range n.inodes {
_assert(len(item.key) > 0, "write: zero-length inode key") _assert(len(item.key) > 0, "write: zero-length inode key")
// Create a slice to write into of needed size and advance
// byte pointer for next iteration.
sz := len(item.key) + len(item.value)
b := unsafeByteSlice(unsafe.Pointer(p), off, 0, sz)
off += uintptr(sz)
// Write the page element. // Write the page element.
if n.isLeaf { if n.isLeaf {
elem := p.leafPageElement(uint16(i)) elem := p.leafPageElement(uint16(i))
elem.pos = uint32(bp - uintptr(unsafe.Pointer(elem))) elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))
elem.flags = item.flags elem.flags = item.flags
elem.ksize = uint32(len(item.key)) elem.ksize = uint32(len(item.key))
elem.vsize = uint32(len(item.value)) elem.vsize = uint32(len(item.value))
} else { } else {
elem := p.branchPageElement(uint16(i)) elem := p.branchPageElement(uint16(i))
elem.pos = uint32(bp - uintptr(unsafe.Pointer(elem))) elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))
elem.ksize = uint32(len(item.key)) elem.ksize = uint32(len(item.key))
elem.pgid = item.pgid elem.pgid = item.pgid
_assert(elem.pgid != p.id, "write: circular dependency occurred") _assert(elem.pgid != p.id, "write: circular dependency occurred")
} }
// Create a slice to write into of needed size and advance
// byte pointer for next iteration.
klen, vlen := len(item.key), len(item.value)
sz := klen + vlen
b := *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
Data: bp,
Len: sz,
Cap: sz,
}))
bp += uintptr(sz)
// Write data for the element to the end of the page. // Write data for the element to the end of the page.
l := copy(b, item.key) l := copy(b, item.key)
copy(b[l:], item.value) copy(b[l:], item.value)

57
vendor/go.etcd.io/bbolt/page.go generated vendored
View File

@ -3,7 +3,6 @@ package bbolt
import ( import (
"fmt" "fmt"
"os" "os"
"reflect"
"sort" "sort"
"unsafe" "unsafe"
) )
@ -51,13 +50,13 @@ func (p *page) typ() string {
// meta returns a pointer to the metadata section of the page. // meta returns a pointer to the metadata section of the page.
func (p *page) meta() *meta { func (p *page) meta() *meta {
return (*meta)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p))) return (*meta)(unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)))
} }
// leafPageElement retrieves the leaf node by index // leafPageElement retrieves the leaf node by index
func (p *page) leafPageElement(index uint16) *leafPageElement { func (p *page) leafPageElement(index uint16) *leafPageElement {
off := uintptr(index) * unsafe.Sizeof(leafPageElement{}) return (*leafPageElement)(unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p),
return (*leafPageElement)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p) + off)) leafPageElementSize, int(index)))
} }
// leafPageElements retrieves a list of leaf nodes. // leafPageElements retrieves a list of leaf nodes.
@ -65,17 +64,16 @@ func (p *page) leafPageElements() []leafPageElement {
if p.count == 0 { if p.count == 0 {
return nil return nil
} }
return *(*[]leafPageElement)(unsafe.Pointer(&reflect.SliceHeader{ var elems []leafPageElement
Data: uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p), data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))
Len: int(p.count), unsafeSlice(unsafe.Pointer(&elems), data, int(p.count))
Cap: int(p.count), return elems
}))
} }
// branchPageElement retrieves the branch node by index // branchPageElement retrieves the branch node by index
func (p *page) branchPageElement(index uint16) *branchPageElement { func (p *page) branchPageElement(index uint16) *branchPageElement {
off := uintptr(index) * unsafe.Sizeof(branchPageElement{}) return (*branchPageElement)(unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p),
return (*branchPageElement)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p) + off)) unsafe.Sizeof(branchPageElement{}), int(index)))
} }
// branchPageElements retrieves a list of branch nodes. // branchPageElements retrieves a list of branch nodes.
@ -83,20 +81,15 @@ func (p *page) branchPageElements() []branchPageElement {
if p.count == 0 { if p.count == 0 {
return nil return nil
} }
return *(*[]branchPageElement)(unsafe.Pointer(&reflect.SliceHeader{ var elems []branchPageElement
Data: uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p), data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))
Len: int(p.count), unsafeSlice(unsafe.Pointer(&elems), data, int(p.count))
Cap: int(p.count), return elems
}))
} }
// dump writes n bytes of the page to STDERR as hex output. // dump writes n bytes of the page to STDERR as hex output.
func (p *page) hexdump(n int) { func (p *page) hexdump(n int) {
buf := *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{ buf := unsafeByteSlice(unsafe.Pointer(p), 0, 0, n)
Data: uintptr(unsafe.Pointer(p)),
Len: n,
Cap: n,
}))
fmt.Fprintf(os.Stderr, "%x\n", buf) fmt.Fprintf(os.Stderr, "%x\n", buf)
} }
@ -115,11 +108,7 @@ type branchPageElement struct {
// key returns a byte slice of the node key. // key returns a byte slice of the node key.
func (n *branchPageElement) key() []byte { func (n *branchPageElement) key() []byte {
return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{ return unsafeByteSlice(unsafe.Pointer(n), 0, int(n.pos), int(n.pos)+int(n.ksize))
Data: uintptr(unsafe.Pointer(n)) + uintptr(n.pos),
Len: int(n.ksize),
Cap: int(n.ksize),
}))
} }
// leafPageElement represents a node on a leaf page. // leafPageElement represents a node on a leaf page.
@ -132,20 +121,16 @@ type leafPageElement struct {
// key returns a byte slice of the node key. // key returns a byte slice of the node key.
func (n *leafPageElement) key() []byte { func (n *leafPageElement) key() []byte {
return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{ i := int(n.pos)
Data: uintptr(unsafe.Pointer(n)) + uintptr(n.pos), j := i + int(n.ksize)
Len: int(n.ksize), return unsafeByteSlice(unsafe.Pointer(n), 0, i, j)
Cap: int(n.ksize),
}))
} }
// value returns a byte slice of the node value. // value returns a byte slice of the node value.
func (n *leafPageElement) value() []byte { func (n *leafPageElement) value() []byte {
return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{ i := int(n.pos) + int(n.ksize)
Data: uintptr(unsafe.Pointer(n)) + uintptr(n.pos) + uintptr(n.ksize), j := i + int(n.vsize)
Len: int(n.vsize), return unsafeByteSlice(unsafe.Pointer(n), 0, i, j)
Cap: int(n.vsize),
}))
} }
// PageInfo represents human readable information about a page. // PageInfo represents human readable information about a page.

27
vendor/go.etcd.io/bbolt/tx.go generated vendored
View File

@ -4,7 +4,6 @@ import (
"fmt" "fmt"
"io" "io"
"os" "os"
"reflect"
"sort" "sort"
"strings" "strings"
"time" "time"
@ -524,24 +523,18 @@ func (tx *Tx) write() error {
// Write pages to disk in order. // Write pages to disk in order.
for _, p := range pages { for _, p := range pages {
size := (int(p.overflow) + 1) * tx.db.pageSize rem := (uint64(p.overflow) + 1) * uint64(tx.db.pageSize)
offset := int64(p.id) * int64(tx.db.pageSize) offset := int64(p.id) * int64(tx.db.pageSize)
var written uintptr
// Write out page in "max allocation" sized chunks. // Write out page in "max allocation" sized chunks.
ptr := uintptr(unsafe.Pointer(p))
for { for {
// Limit our write to our max allocation size. sz := rem
sz := size
if sz > maxAllocSize-1 { if sz > maxAllocSize-1 {
sz = maxAllocSize - 1 sz = maxAllocSize - 1
} }
buf := unsafeByteSlice(unsafe.Pointer(p), written, 0, int(sz))
// Write chunk to disk.
buf := *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
Data: ptr,
Len: sz,
Cap: sz,
}))
if _, err := tx.db.ops.writeAt(buf, offset); err != nil { if _, err := tx.db.ops.writeAt(buf, offset); err != nil {
return err return err
} }
@ -550,14 +543,14 @@ func (tx *Tx) write() error {
tx.stats.Write++ tx.stats.Write++
// Exit inner for loop if we've written all the chunks. // Exit inner for loop if we've written all the chunks.
size -= sz rem -= sz
if size == 0 { if rem == 0 {
break break
} }
// Otherwise move offset forward and move pointer to next chunk. // Otherwise move offset forward and move pointer to next chunk.
offset += int64(sz) offset += int64(sz)
ptr += uintptr(sz) written += uintptr(sz)
} }
} }
@ -576,11 +569,7 @@ func (tx *Tx) write() error {
continue continue
} }
buf := *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{ buf := unsafeByteSlice(unsafe.Pointer(p), 0, 0, tx.db.pageSize)
Data: uintptr(unsafe.Pointer(p)),
Len: tx.db.pageSize,
Cap: tx.db.pageSize,
}))
// See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1 // See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1
for i := range buf { for i := range buf {

39
vendor/go.etcd.io/bbolt/unsafe.go generated vendored Normal file
View File

@ -0,0 +1,39 @@
package bbolt
import (
"reflect"
"unsafe"
)
func unsafeAdd(base unsafe.Pointer, offset uintptr) unsafe.Pointer {
return unsafe.Pointer(uintptr(base) + offset)
}
func unsafeIndex(base unsafe.Pointer, offset uintptr, elemsz uintptr, n int) unsafe.Pointer {
return unsafe.Pointer(uintptr(base) + offset + uintptr(n)*elemsz)
}
func unsafeByteSlice(base unsafe.Pointer, offset uintptr, i, j int) []byte {
// See: https://github.com/golang/go/wiki/cgo#turning-c-arrays-into-go-slices
//
// This memory is not allocated from C, but it is unmanaged by Go's
// garbage collector and should behave similarly, and the compiler
// should produce similar code. Note that this conversion allows a
// subslice to begin after the base address, with an optional offset,
// while the URL above does not cover this case and only slices from
// index 0. However, the wiki never says that the address must be to
// the beginning of a C allocation (or even that malloc was used at
// all), so this is believed to be correct.
return (*[maxAllocSize]byte)(unsafeAdd(base, offset))[i:j:j]
}
// unsafeSlice modifies the data, len, and cap of a slice variable pointed to by
// the slice parameter. This helper should be used over other direct
// manipulation of reflect.SliceHeader to prevent misuse, namely, converting
// from reflect.SliceHeader to a Go slice type.
func unsafeSlice(slice, data unsafe.Pointer, len int) {
s := (*reflect.SliceHeader)(slice)
s.Data = uintptr(data)
s.Cap = len
s.Len = len
}

2
vendor/modules.txt vendored
View File

@ -548,7 +548,7 @@ github.com/xeipuuv/gojsonpointer
github.com/xeipuuv/gojsonreference github.com/xeipuuv/gojsonreference
# github.com/xeipuuv/gojsonschema v1.2.0 # github.com/xeipuuv/gojsonschema v1.2.0
github.com/xeipuuv/gojsonschema github.com/xeipuuv/gojsonschema
# go.etcd.io/bbolt v1.3.4 # go.etcd.io/bbolt v1.3.5
go.etcd.io/bbolt go.etcd.io/bbolt
# go.opencensus.io v0.22.0 # go.opencensus.io v0.22.0
go.opencensus.io go.opencensus.io