build(deps): bump github.com/containers/storage from 1.15.0 to 1.15.2

Bumps [github.com/containers/storage](https://github.com/containers/storage) from 1.15.0 to 1.15.2.
- [Release notes](https://github.com/containers/storage/releases)
- [Changelog](https://github.com/containers/storage/blob/master/docs/containers-storage-changes.md)
- [Commits](https://github.com/containers/storage/compare/v1.15.0...v1.15.2)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
Signed-off-by: Valentin Rothberg <rothberg@redhat.com>
This commit is contained in:
dependabot-preview[bot]
2019-12-06 09:17:29 +00:00
committed by Valentin Rothberg
parent 465e142bf2
commit 625a02a286
39 changed files with 322 additions and 500 deletions

View File

@ -135,7 +135,6 @@ func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter {
func (w *huffmanBitWriter) reset(writer io.Writer) {
w.writer = writer
w.bits, w.nbits, w.nbytes, w.err = 0, 0, 0, nil
w.bytes = [256]byte{}
w.lastHeader = 0
w.lastHuffMan = false
}
@ -463,15 +462,12 @@ func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, n
case 16:
w.writeBits(int32(w.codegen[i]), 2)
i++
break
case 17:
w.writeBits(int32(w.codegen[i]), 3)
i++
break
case 18:
w.writeBits(int32(w.codegen[i]), 7)
i++
break
}
}
}

View File

@ -85,17 +85,14 @@ func generateFixedLiteralEncoding() *huffmanEncoder {
// size 8, 000110000 .. 10111111
bits = ch + 48
size = 8
break
case ch < 256:
// size 9, 110010000 .. 111111111
bits = ch + 400 - 144
size = 9
break
case ch < 280:
// size 7, 0000000 .. 0010111
bits = ch - 256
size = 7
break
default:
// size 8, 11000000 .. 11000111
bits = ch + 192 - 280

View File

@ -3,6 +3,7 @@ package flate
import (
"io"
"math"
"sync"
)
const (
@ -49,11 +50,24 @@ func NewStatelessWriter(dst io.Writer) io.WriteCloser {
return &statelessWriter{dst: dst}
}
// bitWriterPool contains bit writers that can be reused.
var bitWriterPool = sync.Pool{
New: func() interface{} {
return newHuffmanBitWriter(nil)
},
}
// StatelessDeflate allows to compress directly to a Writer without retaining state.
// When returning everything will be flushed.
func StatelessDeflate(out io.Writer, in []byte, eof bool) error {
var dst tokens
bw := newHuffmanBitWriter(out)
bw := bitWriterPool.Get().(*huffmanBitWriter)
bw.reset(out)
defer func() {
// don't keep a reference to our output
bw.reset(nil)
bitWriterPool.Put(bw)
}()
if eof && len(in) == 0 {
// Just write an EOF block.
// Could be faster...

View File

@ -15,8 +15,7 @@ type dTable struct {
// single-symbols decoding
type dEntrySingle struct {
byte uint8
nBits uint8
entry uint16
}
// double-symbols decoding
@ -76,14 +75,15 @@ func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) {
}
// collect weight stats
var rankStats [tableLogMax + 1]uint32
var rankStats [16]uint32
weightTotal := uint32(0)
for _, v := range s.huffWeight[:s.symbolLen] {
if v > tableLogMax {
return s, nil, errors.New("corrupt input: weight too large")
}
rankStats[v]++
weightTotal += (1 << (v & 15)) >> 1
v2 := v & 15
rankStats[v2]++
weightTotal += (1 << v2) >> 1
}
if weightTotal == 0 {
return s, nil, errors.New("corrupt input: weights zero")
@ -134,15 +134,17 @@ func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) {
if len(s.dt.single) != tSize {
s.dt.single = make([]dEntrySingle, tSize)
}
for n, w := range s.huffWeight[:s.symbolLen] {
if w == 0 {
continue
}
length := (uint32(1) << w) >> 1
d := dEntrySingle{
byte: uint8(n),
nBits: s.actualTableLog + 1 - w,
entry: uint16(s.actualTableLog+1-w) | (uint16(n) << 8),
}
for u := rankStats[w]; u < rankStats[w]+length; u++ {
s.dt.single[u] = d
single := s.dt.single[rankStats[w] : rankStats[w]+length]
for i := range single {
single[i] = d
}
rankStats[w] += length
}
@ -167,12 +169,12 @@ func (s *Scratch) Decompress1X(in []byte) (out []byte, err error) {
decode := func() byte {
val := br.peekBitsFast(s.actualTableLog) /* note : actualTableLog >= 1 */
v := s.dt.single[val]
br.bitsRead += v.nBits
return v.byte
br.bitsRead += uint8(v.entry)
return uint8(v.entry >> 8)
}
hasDec := func(v dEntrySingle) byte {
br.bitsRead += v.nBits
return v.byte
br.bitsRead += uint8(v.entry)
return uint8(v.entry >> 8)
}
// Avoid bounds check by always having full sized table.
@ -269,8 +271,8 @@ func (s *Scratch) Decompress4X(in []byte, dstSize int) (out []byte, err error) {
decode := func(br *bitReader) byte {
val := br.peekBitsFast(s.actualTableLog) /* note : actualTableLog >= 1 */
v := single[val&tlMask]
br.bitsRead += v.nBits
return v.byte
br.bitsRead += uint8(v.entry)
return uint8(v.entry >> 8)
}
// Use temp table to avoid bound checks/append penalty.
@ -283,20 +285,67 @@ func (s *Scratch) Decompress4X(in []byte, dstSize int) (out []byte, err error) {
bigloop:
for {
for i := range br {
if br[i].off < 4 {
br := &br[i]
if br.off < 4 {
break bigloop
}
br[i].fillFast()
br.fillFast()
}
tmp[off] = decode(&br[0])
tmp[off+bufoff] = decode(&br[1])
tmp[off+bufoff*2] = decode(&br[2])
tmp[off+bufoff*3] = decode(&br[3])
tmp[off+1] = decode(&br[0])
tmp[off+1+bufoff] = decode(&br[1])
tmp[off+1+bufoff*2] = decode(&br[2])
tmp[off+1+bufoff*3] = decode(&br[3])
{
const stream = 0
val := br[stream].peekBitsFast(s.actualTableLog)
v := single[val&tlMask]
br[stream].bitsRead += uint8(v.entry)
val2 := br[stream].peekBitsFast(s.actualTableLog)
v2 := single[val2&tlMask]
tmp[off+bufoff*stream+1] = uint8(v2.entry >> 8)
tmp[off+bufoff*stream] = uint8(v.entry >> 8)
br[stream].bitsRead += uint8(v2.entry)
}
{
const stream = 1
val := br[stream].peekBitsFast(s.actualTableLog)
v := single[val&tlMask]
br[stream].bitsRead += uint8(v.entry)
val2 := br[stream].peekBitsFast(s.actualTableLog)
v2 := single[val2&tlMask]
tmp[off+bufoff*stream+1] = uint8(v2.entry >> 8)
tmp[off+bufoff*stream] = uint8(v.entry >> 8)
br[stream].bitsRead += uint8(v2.entry)
}
{
const stream = 2
val := br[stream].peekBitsFast(s.actualTableLog)
v := single[val&tlMask]
br[stream].bitsRead += uint8(v.entry)
val2 := br[stream].peekBitsFast(s.actualTableLog)
v2 := single[val2&tlMask]
tmp[off+bufoff*stream+1] = uint8(v2.entry >> 8)
tmp[off+bufoff*stream] = uint8(v.entry >> 8)
br[stream].bitsRead += uint8(v2.entry)
}
{
const stream = 3
val := br[stream].peekBitsFast(s.actualTableLog)
v := single[val&tlMask]
br[stream].bitsRead += uint8(v.entry)
val2 := br[stream].peekBitsFast(s.actualTableLog)
v2 := single[val2&tlMask]
tmp[off+bufoff*stream+1] = uint8(v2.entry >> 8)
tmp[off+bufoff*stream] = uint8(v.entry >> 8)
br[stream].bitsRead += uint8(v2.entry)
}
off += 2
if off == bufoff {
if bufoff > dstEvery {
return nil, errors.New("corruption detected: stream overrun 1")
@ -367,7 +416,7 @@ func (s *Scratch) matches(ct cTable, w io.Writer) {
broken++
if enc.nBits == 0 {
for _, dec := range dt {
if dec.byte == byte(sym) {
if uint8(dec.entry>>8) == byte(sym) {
fmt.Fprintf(w, "symbol %x has decoder, but no encoder\n", sym)
errs++
break
@ -383,12 +432,12 @@ func (s *Scratch) matches(ct cTable, w io.Writer) {
top := enc.val << ub
// decoder looks at top bits.
dec := dt[top]
if dec.nBits != enc.nBits {
fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", sym, enc.nBits, dec.nBits)
if uint8(dec.entry) != enc.nBits {
fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", sym, enc.nBits, uint8(dec.entry))
errs++
}
if dec.byte != uint8(sym) {
fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", sym, sym, dec.byte)
if uint8(dec.entry>>8) != uint8(sym) {
fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", sym, sym, uint8(dec.entry>>8))
errs++
}
if errs > 0 {
@ -399,12 +448,12 @@ func (s *Scratch) matches(ct cTable, w io.Writer) {
for i := uint16(0); i < (1 << ub); i++ {
vval := top | i
dec := dt[vval]
if dec.nBits != enc.nBits {
fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", vval, enc.nBits, dec.nBits)
if uint8(dec.entry) != enc.nBits {
fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", vval, enc.nBits, uint8(dec.entry))
errs++
}
if dec.byte != uint8(sym) {
fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", vval, sym, dec.byte)
if uint8(dec.entry>>8) != uint8(sym) {
fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", vval, sym, uint8(dec.entry>>8))
errs++
}
if errs > 20 {

View File

@ -26,8 +26,12 @@ Godoc Documentation: https://godoc.org/github.com/klauspost/compress/zstd
### Status:
BETA - there may still be subtle bugs, but a wide variety of content has been tested.
There may still be implementation specific stuff in regards to error handling that could lead to edge cases.
STABLE - there may always be subtle bugs, a wide variety of content has been tested and the library is actively
used by several projects. This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz),
kindly supplied by [fuzzit.dev](https://fuzzit.dev/).
There may still be specific combinations of data types/size/settings that could lead to edge cases,
so as always, testing is recommended.
For now, a high speed (fastest) and medium-fast (default) compressor has been implemented.
@ -251,8 +255,12 @@ The converter `s` can be reused to avoid allocations, even after errors.
## Decompressor
STATUS: Release Candidate - there may still be subtle bugs, but a wide variety of content has been tested.
Staus: STABLE - there may still be subtle bugs, but a wide variety of content has been tested.
This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz),
kindly supplied by [fuzzit.dev](https://fuzzit.dev/).
The main purpose of the fuzz testing is to ensure that it is not possible to crash the decoder,
or run it past its limits with ANY input provided.
### Usage

View File

@ -89,6 +89,7 @@ type blockDec struct {
sequenceBuf []seq
tmp [4]byte
err error
decWG sync.WaitGroup
}
func (b *blockDec) String() string {
@ -105,6 +106,7 @@ func newBlockDec(lowMem bool) *blockDec {
input: make(chan struct{}, 1),
history: make(chan *history, 1),
}
b.decWG.Add(1)
go b.startDecoder()
return &b
}
@ -183,11 +185,13 @@ func (b *blockDec) Close() {
close(b.input)
close(b.history)
close(b.result)
b.decWG.Wait()
}
// decodeAsync will prepare decoding the block when it receives input.
// This will separate output and history.
func (b *blockDec) startDecoder() {
defer b.decWG.Done()
for range b.input {
//println("blockDec: Got block input")
switch b.Type {

View File

@ -300,13 +300,13 @@ func (b *blockEnc) encodeRaw(a []byte) {
}
// encodeLits can be used if the block is only litLen.
func (b *blockEnc) encodeLits() error {
func (b *blockEnc) encodeLits(raw bool) error {
var bh blockHeader
bh.setLast(b.last)
bh.setSize(uint32(len(b.literals)))
// Don't compress extremely small blocks
if len(b.literals) < 32 {
if len(b.literals) < 32 || raw {
if debug {
println("Adding RAW block, length", len(b.literals))
}
@ -438,9 +438,9 @@ func fuzzFseEncoder(data []byte) int {
}
// encode will encode the block and put the output in b.output.
func (b *blockEnc) encode() error {
func (b *blockEnc) encode(raw bool) error {
if len(b.sequences) == 0 {
return b.encodeLits()
return b.encodeLits(raw)
}
// We want some difference
if len(b.literals) > (b.size - (b.size >> 5)) {
@ -458,10 +458,10 @@ func (b *blockEnc) encode() error {
reUsed, single bool
err error
)
if len(b.literals) >= 1024 {
if len(b.literals) >= 1024 && !raw {
// Use 4 Streams.
out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc)
} else if len(b.literals) > 32 {
} else if len(b.literals) > 32 && !raw {
// Use 1 stream
single = true
out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc)

View File

@ -262,7 +262,7 @@ func (e *Encoder) nextBlock(final bool) error {
// If we got the exact same number of literals as input,
// assume the literals cannot be compressed.
if len(src) != len(blk.literals) || len(src) != e.o.blockSize {
err = blk.encode()
err = blk.encode(e.o.noEntropy)
}
switch err {
case errIncompressible:
@ -473,7 +473,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
// If we got the exact same number of literals as input,
// assume the literals cannot be compressed.
if len(blk.literals) != len(todo) || len(todo) != e.o.blockSize {
err = blk.encode()
err = blk.encode(e.o.noEntropy)
}
switch err {

View File

@ -20,6 +20,7 @@ type encoderOptions struct {
windowSize int
level EncoderLevel
fullZero bool
noEntropy bool
}
func (o *encoderOptions) setDefault() {
@ -202,6 +203,16 @@ func WithZeroFrames(b bool) EOption {
}
}
// WithNoEntropyCompression will always skip entropy compression of literals.
// This can be useful if content has matches, but unlikely to benefit from entropy
// compression. Usually the slight speed improvement is not worth enabling this.
func WithNoEntropyCompression(b bool) EOption {
return func(o *encoderOptions) error {
o.noEntropy = b
return nil
}
}
// WithSingleSegment will set the "single segment" flag when EncodeAll is used.
// If this flag is set, data must be regenerated within a single continuous memory segment.
// In this case, Window_Descriptor byte is skipped, but Frame_Content_Size is necessarily present.

View File

@ -478,9 +478,10 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
if err == nil {
if n != len(dst)-crcStart {
err = io.ErrShortWrite
} else {
err = d.checkCRC()
}
}
err = d.checkCRC()
}
}
d.history.b = saved

View File

@ -111,7 +111,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) {
// Add empty last block
r.block.reset(nil)
r.block.last = true
err := r.block.encodeLits()
err := r.block.encodeLits(false)
if err != nil {
return written, err
}
@ -178,7 +178,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) {
r.err = ErrSnappyCorrupt
return written, r.err
}
err = r.block.encode()
err = r.block.encode(false)
switch err {
case errIncompressible:
r.block.popOffsets()
@ -188,7 +188,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) {
println("snappy.Decode:", err)
return written, err
}
err = r.block.encodeLits()
err = r.block.encodeLits(false)
if err != nil {
return written, err
}
@ -235,7 +235,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) {
r.err = ErrSnappyCorrupt
return written, r.err
}
err := r.block.encodeLits()
err := r.block.encodeLits(false)
if err != nil {
return written, err
}