Bump github.com/containers/storage from 1.24.4 to 1.24.5

Bumps [github.com/containers/storage](https://github.com/containers/storage) from 1.24.4 to 1.24.5.
- [Release notes](https://github.com/containers/storage/releases)
- [Changelog](https://github.com/containers/storage/blob/master/docs/containers-storage-changes.md)
- [Commits](https://github.com/containers/storage/compare/v1.24.4...v1.24.5)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
This commit is contained in:
dependabot-preview[bot]
2021-01-11 10:23:37 +00:00
committed by Daniel J Walsh
parent 41613bdb96
commit 63ecd1215a
13 changed files with 96 additions and 45 deletions

View File

@ -85,6 +85,10 @@ func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) {
d.current.output = make(chan decodeOutput, d.o.concurrent)
d.current.flushed = true
if r == nil {
d.current.err = ErrDecoderNilInput
}
// Transfer option dicts.
d.dicts = make(map[uint32]dict, len(d.o.dicts))
for _, dc := range d.o.dicts {
@ -111,7 +115,7 @@ func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) {
// When the stream is done, io.EOF will be returned.
func (d *Decoder) Read(p []byte) (int, error) {
if d.stream == nil {
return 0, errors.New("no input has been initialized")
return 0, ErrDecoderNilInput
}
var n int
for {
@ -152,12 +156,20 @@ func (d *Decoder) Read(p []byte) (int, error) {
// Reset will reset the decoder the supplied stream after the current has finished processing.
// Note that this functionality cannot be used after Close has been called.
// Reset can be called with a nil reader to release references to the previous reader.
// After being called with a nil reader, no other operations than Reset or DecodeAll or Close
// should be used.
func (d *Decoder) Reset(r io.Reader) error {
if d.current.err == ErrDecoderClosed {
return d.current.err
}
d.drainOutput()
if r == nil {
return errors.New("nil Reader sent as input")
d.current.err = ErrDecoderNilInput
d.current.flushed = true
return nil
}
if d.stream == nil {
@ -166,8 +178,6 @@ func (d *Decoder) Reset(r io.Reader) error {
go d.startStreamDecoder(d.stream)
}
d.drainOutput()
// If bytes buffer and < 1MB, do sync decoding anyway.
if bb, ok := r.(*bytes.Buffer); ok && bb.Len() < 1<<20 {
if debug {
@ -249,7 +259,7 @@ func (d *Decoder) drainOutput() {
// Any error encountered during the write is also returned.
func (d *Decoder) WriteTo(w io.Writer) (int64, error) {
if d.stream == nil {
return 0, errors.New("no input has been initialized")
return 0, ErrDecoderNilInput
}
var n int64
for {

View File

@ -181,11 +181,18 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error {
return fmt.Errorf("output (%d) bigger than max block size", size)
}
if size > cap(s.out) {
// Not enough size, will be extremely rarely triggered,
// Not enough size, which can happen under high volume block streaming conditions
// but could be if destination slice is too small for sync operations.
// We add maxBlockSize to the capacity.
s.out = append(s.out, make([]byte, maxBlockSize)...)
s.out = s.out[:len(s.out)-maxBlockSize]
// over-allocating here can create a large amount of GC pressure so we try to keep
// it as contained as possible
used := len(s.out) - startSize
addBytes := 256 + ll + ml + used>>2
// Clamp to max block size.
if used+addBytes > maxBlockSize {
addBytes = maxBlockSize - used
}
s.out = append(s.out, make([]byte, addBytes)...)
s.out = s.out[:len(s.out)-addBytes]
}
if ml > maxMatchLen {
return fmt.Errorf("match len (%d) bigger than max allowed length", ml)

View File

@ -73,6 +73,10 @@ var (
// ErrDecoderClosed will be returned if the Decoder was used after
// Close has been called.
ErrDecoderClosed = errors.New("decoder used after Close")
// ErrDecoderNilInput is returned when a nil Reader was provided
// and an operation other than Reset/DecodeAll/Close was attempted.
ErrDecoderNilInput = errors.New("nil input provided as reader")
)
func println(a ...interface{}) {