mirror of
https://github.com/containers/podman.git
synced 2025-12-02 02:58:03 +08:00
Vendor in containers/common@main
Signed-off-by: Ashley Cui <acui@redhat.com>
This commit is contained in:
2
vendor/github.com/klauspost/compress/flate/fast_encoder.go
generated
vendored
2
vendor/github.com/klauspost/compress/flate/fast_encoder.go
generated
vendored
@@ -179,7 +179,7 @@ func (e *fastGen) matchlen(s, t int32, src []byte) int32 {
|
||||
// matchlenLong will return the match length between offsets and t in src.
|
||||
// It is assumed that s > t, that t >=0 and s < len(src).
|
||||
func (e *fastGen) matchlenLong(s, t int32, src []byte) int32 {
|
||||
if debugDecode {
|
||||
if debugDeflate {
|
||||
if t >= s {
|
||||
panic(fmt.Sprint("t >=s:", t, s))
|
||||
}
|
||||
|
||||
166
vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
generated
vendored
166
vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
generated
vendored
@@ -8,6 +8,7 @@ import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -24,6 +25,10 @@ const (
|
||||
codegenCodeCount = 19
|
||||
badCode = 255
|
||||
|
||||
// maxPredefinedTokens is the maximum number of tokens
|
||||
// where we check if fixed size is smaller.
|
||||
maxPredefinedTokens = 250
|
||||
|
||||
// bufferFlushSize indicates the buffer size
|
||||
// after which bytes are flushed to the writer.
|
||||
// Should preferably be a multiple of 6, since
|
||||
@@ -36,8 +41,11 @@ const (
|
||||
bufferSize = bufferFlushSize + 8
|
||||
)
|
||||
|
||||
// Minimum length code that emits bits.
|
||||
const lengthExtraBitsMinCode = 8
|
||||
|
||||
// The number of extra bits needed by length code X - LENGTH_CODES_START.
|
||||
var lengthExtraBits = [32]int8{
|
||||
var lengthExtraBits = [32]uint8{
|
||||
/* 257 */ 0, 0, 0,
|
||||
/* 260 */ 0, 0, 0, 0, 0, 1, 1, 1, 1, 2,
|
||||
/* 270 */ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4,
|
||||
@@ -51,6 +59,9 @@ var lengthBase = [32]uint8{
|
||||
64, 80, 96, 112, 128, 160, 192, 224, 255,
|
||||
}
|
||||
|
||||
// Minimum offset code that emits bits.
|
||||
const offsetExtraBitsMinCode = 4
|
||||
|
||||
// offset code word extra bits.
|
||||
var offsetExtraBits = [32]int8{
|
||||
0, 0, 0, 0, 1, 1, 2, 2, 3, 3,
|
||||
@@ -78,10 +89,10 @@ func init() {
|
||||
|
||||
for i := range offsetCombined[:] {
|
||||
// Don't use extended window values...
|
||||
if offsetBase[i] > 0x006000 {
|
||||
if offsetExtraBits[i] == 0 || offsetBase[i] > 0x006000 {
|
||||
continue
|
||||
}
|
||||
offsetCombined[i] = uint32(offsetExtraBits[i])<<16 | (offsetBase[i])
|
||||
offsetCombined[i] = uint32(offsetExtraBits[i]) | (offsetBase[i] << 8)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -97,7 +108,7 @@ type huffmanBitWriter struct {
|
||||
// Data waiting to be written is bytes[0:nbytes]
|
||||
// and then the low nbits of bits.
|
||||
bits uint64
|
||||
nbits uint16
|
||||
nbits uint8
|
||||
nbytes uint8
|
||||
lastHuffMan bool
|
||||
literalEncoding *huffmanEncoder
|
||||
@@ -215,7 +226,7 @@ func (w *huffmanBitWriter) write(b []byte) {
|
||||
_, w.err = w.writer.Write(b)
|
||||
}
|
||||
|
||||
func (w *huffmanBitWriter) writeBits(b int32, nb uint16) {
|
||||
func (w *huffmanBitWriter) writeBits(b int32, nb uint8) {
|
||||
w.bits |= uint64(b) << (w.nbits & 63)
|
||||
w.nbits += nb
|
||||
if w.nbits >= 48 {
|
||||
@@ -571,7 +582,10 @@ func (w *huffmanBitWriter) writeBlock(tokens *tokens, eof bool, input []byte) {
|
||||
// Fixed Huffman baseline.
|
||||
var literalEncoding = fixedLiteralEncoding
|
||||
var offsetEncoding = fixedOffsetEncoding
|
||||
var size = w.fixedSize(extraBits)
|
||||
var size = math.MaxInt32
|
||||
if tokens.n < maxPredefinedTokens {
|
||||
size = w.fixedSize(extraBits)
|
||||
}
|
||||
|
||||
// Dynamic Huffman?
|
||||
var numCodegens int
|
||||
@@ -672,19 +686,21 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b
|
||||
size = reuseSize
|
||||
}
|
||||
|
||||
if preSize := w.fixedSize(extraBits) + 7; usePrefs && preSize < size {
|
||||
// Check if we get a reasonable size decrease.
|
||||
if storable && ssize <= size {
|
||||
w.writeStoredHeader(len(input), eof)
|
||||
w.writeBytes(input)
|
||||
if tokens.n < maxPredefinedTokens {
|
||||
if preSize := w.fixedSize(extraBits) + 7; usePrefs && preSize < size {
|
||||
// Check if we get a reasonable size decrease.
|
||||
if storable && ssize <= size {
|
||||
w.writeStoredHeader(len(input), eof)
|
||||
w.writeBytes(input)
|
||||
return
|
||||
}
|
||||
w.writeFixedHeader(eof)
|
||||
if !sync {
|
||||
tokens.AddEOB()
|
||||
}
|
||||
w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes)
|
||||
return
|
||||
}
|
||||
w.writeFixedHeader(eof)
|
||||
if !sync {
|
||||
tokens.AddEOB()
|
||||
}
|
||||
w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes)
|
||||
return
|
||||
}
|
||||
// Check if we get a reasonable size decrease.
|
||||
if storable && ssize <= size {
|
||||
@@ -717,19 +733,21 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b
|
||||
size, numCodegens = w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits)
|
||||
|
||||
// Store predefined, if we don't get a reasonable improvement.
|
||||
if preSize := w.fixedSize(extraBits); usePrefs && preSize <= size {
|
||||
// Store bytes, if we don't get an improvement.
|
||||
if storable && ssize <= preSize {
|
||||
w.writeStoredHeader(len(input), eof)
|
||||
w.writeBytes(input)
|
||||
if tokens.n < maxPredefinedTokens {
|
||||
if preSize := w.fixedSize(extraBits); usePrefs && preSize <= size {
|
||||
// Store bytes, if we don't get an improvement.
|
||||
if storable && ssize <= preSize {
|
||||
w.writeStoredHeader(len(input), eof)
|
||||
w.writeBytes(input)
|
||||
return
|
||||
}
|
||||
w.writeFixedHeader(eof)
|
||||
if !sync {
|
||||
tokens.AddEOB()
|
||||
}
|
||||
w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes)
|
||||
return
|
||||
}
|
||||
w.writeFixedHeader(eof)
|
||||
if !sync {
|
||||
tokens.AddEOB()
|
||||
}
|
||||
w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes)
|
||||
return
|
||||
}
|
||||
|
||||
if storable && ssize <= size {
|
||||
@@ -833,9 +851,9 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
|
||||
bits, nbits, nbytes := w.bits, w.nbits, w.nbytes
|
||||
|
||||
for _, t := range tokens {
|
||||
if t < matchType {
|
||||
if t < 256 {
|
||||
//w.writeCode(lits[t.literal()])
|
||||
c := lits[t.literal()]
|
||||
c := lits[t]
|
||||
bits |= uint64(c.code) << (nbits & 63)
|
||||
nbits += c.len
|
||||
if nbits >= 48 {
|
||||
@@ -858,12 +876,12 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
|
||||
|
||||
// Write the length
|
||||
length := t.length()
|
||||
lengthCode := lengthCode(length)
|
||||
lengthCode := lengthCode(length) & 31
|
||||
if false {
|
||||
w.writeCode(lengths[lengthCode&31])
|
||||
w.writeCode(lengths[lengthCode])
|
||||
} else {
|
||||
// inlined
|
||||
c := lengths[lengthCode&31]
|
||||
c := lengths[lengthCode]
|
||||
bits |= uint64(c.code) << (nbits & 63)
|
||||
nbits += c.len
|
||||
if nbits >= 48 {
|
||||
@@ -883,10 +901,10 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
|
||||
}
|
||||
}
|
||||
|
||||
extraLengthBits := uint16(lengthExtraBits[lengthCode&31])
|
||||
if extraLengthBits > 0 {
|
||||
if lengthCode >= lengthExtraBitsMinCode {
|
||||
extraLengthBits := lengthExtraBits[lengthCode]
|
||||
//w.writeBits(extraLength, extraLengthBits)
|
||||
extraLength := int32(length - lengthBase[lengthCode&31])
|
||||
extraLength := int32(length - lengthBase[lengthCode])
|
||||
bits |= uint64(extraLength) << (nbits & 63)
|
||||
nbits += extraLengthBits
|
||||
if nbits >= 48 {
|
||||
@@ -907,10 +925,9 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
|
||||
}
|
||||
// Write the offset
|
||||
offset := t.offset()
|
||||
offsetCode := offset >> 16
|
||||
offset &= matchOffsetOnlyMask
|
||||
offsetCode := (offset >> 16) & 31
|
||||
if false {
|
||||
w.writeCode(offs[offsetCode&31])
|
||||
w.writeCode(offs[offsetCode])
|
||||
} else {
|
||||
// inlined
|
||||
c := offs[offsetCode]
|
||||
@@ -932,11 +949,12 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
|
||||
}
|
||||
}
|
||||
}
|
||||
offsetComb := offsetCombined[offsetCode]
|
||||
if offsetComb > 1<<16 {
|
||||
|
||||
if offsetCode >= offsetExtraBitsMinCode {
|
||||
offsetComb := offsetCombined[offsetCode]
|
||||
//w.writeBits(extraOffset, extraOffsetBits)
|
||||
bits |= uint64(offset-(offsetComb&0xffff)) << (nbits & 63)
|
||||
nbits += uint16(offsetComb >> 16)
|
||||
bits |= uint64((offset-(offsetComb>>8))&matchOffsetOnlyMask) << (nbits & 63)
|
||||
nbits += uint8(offsetComb)
|
||||
if nbits >= 48 {
|
||||
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
|
||||
//*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
|
||||
@@ -1002,6 +1020,29 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
|
||||
// https://stackoverflow.com/a/25454430
|
||||
const guessHeaderSizeBits = 70 * 8
|
||||
histogram(input, w.literalFreq[:numLiterals], fill)
|
||||
ssize, storable := w.storedSize(input)
|
||||
if storable && len(input) > 1024 {
|
||||
// Quick check for incompressible content.
|
||||
abs := float64(0)
|
||||
avg := float64(len(input)) / 256
|
||||
max := float64(len(input) * 2)
|
||||
for _, v := range w.literalFreq[:256] {
|
||||
diff := float64(v) - avg
|
||||
abs += diff * diff
|
||||
if abs > max {
|
||||
break
|
||||
}
|
||||
}
|
||||
if abs < max {
|
||||
if debugDeflate {
|
||||
fmt.Println("stored", abs, "<", max)
|
||||
}
|
||||
// No chance we can compress this...
|
||||
w.writeStoredHeader(len(input), eof)
|
||||
w.writeBytes(input)
|
||||
return
|
||||
}
|
||||
}
|
||||
w.literalFreq[endBlockMarker] = 1
|
||||
w.tmpLitEncoding.generate(w.literalFreq[:numLiterals], 15)
|
||||
if fill {
|
||||
@@ -1019,8 +1060,10 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
|
||||
estBits += estBits >> w.logNewTablePenalty
|
||||
|
||||
// Store bytes, if we don't get a reasonable improvement.
|
||||
ssize, storable := w.storedSize(input)
|
||||
if storable && ssize <= estBits {
|
||||
if debugDeflate {
|
||||
fmt.Println("stored,", ssize, "<=", estBits)
|
||||
}
|
||||
w.writeStoredHeader(len(input), eof)
|
||||
w.writeBytes(input)
|
||||
return
|
||||
@@ -1031,7 +1074,7 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
|
||||
|
||||
if estBits < reuseSize {
|
||||
if debugDeflate {
|
||||
//fmt.Println("not reusing, reuse:", reuseSize/8, "> new:", estBits/8, "- header est:", w.lastHeader/8)
|
||||
fmt.Println("NOT reusing, reuse:", reuseSize/8, "> new:", estBits/8, "header est:", w.lastHeader/8, "bytes")
|
||||
}
|
||||
// We owe an EOB
|
||||
w.writeCode(w.literalEncoding.codes[endBlockMarker])
|
||||
@@ -1065,6 +1108,9 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
|
||||
// Go 1.16 LOVES having these on stack. At least 1.5x the speed.
|
||||
bits, nbits, nbytes := w.bits, w.nbits, w.nbytes
|
||||
|
||||
if debugDeflate {
|
||||
count -= int(nbytes)*8 + int(nbits)
|
||||
}
|
||||
// Unroll, write 3 codes/loop.
|
||||
// Fastest number of unrolls.
|
||||
for len(input) > 3 {
|
||||
@@ -1074,13 +1120,16 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
|
||||
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
|
||||
bits >>= (n * 8) & 63
|
||||
nbits -= n * 8
|
||||
nbytes += uint8(n)
|
||||
nbytes += n
|
||||
}
|
||||
if nbytes >= bufferFlushSize {
|
||||
if w.err != nil {
|
||||
nbytes = 0
|
||||
return
|
||||
}
|
||||
if debugDeflate {
|
||||
count += int(nbytes) * 8
|
||||
}
|
||||
_, w.err = w.writer.Write(w.bytes[:nbytes])
|
||||
nbytes = 0
|
||||
}
|
||||
@@ -1096,13 +1145,6 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
|
||||
|
||||
// Remaining...
|
||||
for _, t := range input {
|
||||
// Bitwriting inlined, ~30% speedup
|
||||
c := encoding[t]
|
||||
bits |= uint64(c.code) << (nbits & 63)
|
||||
nbits += c.len
|
||||
if debugDeflate {
|
||||
count += int(c.len)
|
||||
}
|
||||
if nbits >= 48 {
|
||||
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
|
||||
//*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
|
||||
@@ -1114,17 +1156,33 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
|
||||
nbytes = 0
|
||||
return
|
||||
}
|
||||
if debugDeflate {
|
||||
count += int(nbytes) * 8
|
||||
}
|
||||
_, w.err = w.writer.Write(w.bytes[:nbytes])
|
||||
nbytes = 0
|
||||
}
|
||||
}
|
||||
// Bitwriting inlined, ~30% speedup
|
||||
c := encoding[t]
|
||||
bits |= uint64(c.code) << (nbits & 63)
|
||||
nbits += c.len
|
||||
if debugDeflate {
|
||||
count += int(c.len)
|
||||
}
|
||||
}
|
||||
// Restore...
|
||||
w.bits, w.nbits, w.nbytes = bits, nbits, nbytes
|
||||
|
||||
if debugDeflate {
|
||||
fmt.Println("wrote", count/8, "bytes")
|
||||
nb := count + int(nbytes)*8 + int(nbits)
|
||||
fmt.Println("wrote", nb, "bits,", nb/8, "bytes.")
|
||||
}
|
||||
// Flush if needed to have space.
|
||||
if w.nbits >= 48 {
|
||||
w.writeOutBits()
|
||||
}
|
||||
|
||||
if eof || sync {
|
||||
w.writeCode(w.literalEncoding.codes[endBlockMarker])
|
||||
w.lastHeader = 0
|
||||
|
||||
40
vendor/github.com/klauspost/compress/flate/huffman_code.go
generated
vendored
40
vendor/github.com/klauspost/compress/flate/huffman_code.go
generated
vendored
@@ -17,7 +17,8 @@ const (
|
||||
|
||||
// hcode is a huffman code with a bit code and bit length.
|
||||
type hcode struct {
|
||||
code, len uint16
|
||||
code uint16
|
||||
len uint8
|
||||
}
|
||||
|
||||
type huffmanEncoder struct {
|
||||
@@ -56,7 +57,7 @@ type levelInfo struct {
|
||||
}
|
||||
|
||||
// set sets the code and length of an hcode.
|
||||
func (h *hcode) set(code uint16, length uint16) {
|
||||
func (h *hcode) set(code uint16, length uint8) {
|
||||
h.len = length
|
||||
h.code = code
|
||||
}
|
||||
@@ -80,7 +81,7 @@ func generateFixedLiteralEncoding() *huffmanEncoder {
|
||||
var ch uint16
|
||||
for ch = 0; ch < literalCount; ch++ {
|
||||
var bits uint16
|
||||
var size uint16
|
||||
var size uint8
|
||||
switch {
|
||||
case ch < 144:
|
||||
// size 8, 000110000 .. 10111111
|
||||
@@ -99,7 +100,7 @@ func generateFixedLiteralEncoding() *huffmanEncoder {
|
||||
bits = ch + 192 - 280
|
||||
size = 8
|
||||
}
|
||||
codes[ch] = hcode{code: reverseBits(bits, byte(size)), len: size}
|
||||
codes[ch] = hcode{code: reverseBits(bits, size), len: size}
|
||||
}
|
||||
return h
|
||||
}
|
||||
@@ -187,14 +188,19 @@ func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 {
|
||||
// of the level j ancestor.
|
||||
var leafCounts [maxBitsLimit][maxBitsLimit]int32
|
||||
|
||||
// Descending to only have 1 bounds check.
|
||||
l2f := int32(list[2].freq)
|
||||
l1f := int32(list[1].freq)
|
||||
l0f := int32(list[0].freq) + int32(list[1].freq)
|
||||
|
||||
for level := int32(1); level <= maxBits; level++ {
|
||||
// For every level, the first two items are the first two characters.
|
||||
// We initialize the levels as if we had already figured this out.
|
||||
levels[level] = levelInfo{
|
||||
level: level,
|
||||
lastFreq: int32(list[1].freq),
|
||||
nextCharFreq: int32(list[2].freq),
|
||||
nextPairFreq: int32(list[0].freq) + int32(list[1].freq),
|
||||
lastFreq: l1f,
|
||||
nextCharFreq: l2f,
|
||||
nextPairFreq: l0f,
|
||||
}
|
||||
leafCounts[level][level] = 2
|
||||
if level == 1 {
|
||||
@@ -205,8 +211,8 @@ func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 {
|
||||
// We need a total of 2*n - 2 items at top level and have already generated 2.
|
||||
levels[maxBits].needed = 2*n - 4
|
||||
|
||||
level := maxBits
|
||||
for {
|
||||
level := uint32(maxBits)
|
||||
for level < 16 {
|
||||
l := &levels[level]
|
||||
if l.nextPairFreq == math.MaxInt32 && l.nextCharFreq == math.MaxInt32 {
|
||||
// We've run out of both leafs and pairs.
|
||||
@@ -238,7 +244,13 @@ func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 {
|
||||
// more values in the level below
|
||||
l.lastFreq = l.nextPairFreq
|
||||
// Take leaf counts from the lower level, except counts[level] remains the same.
|
||||
copy(leafCounts[level][:level], leafCounts[level-1][:level])
|
||||
if true {
|
||||
save := leafCounts[level][level]
|
||||
leafCounts[level] = leafCounts[level-1]
|
||||
leafCounts[level][level] = save
|
||||
} else {
|
||||
copy(leafCounts[level][:level], leafCounts[level-1][:level])
|
||||
}
|
||||
levels[l.level-1].needed = 2
|
||||
}
|
||||
|
||||
@@ -296,7 +308,7 @@ func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalN
|
||||
|
||||
sortByLiteral(chunk)
|
||||
for _, node := range chunk {
|
||||
h.codes[node.literal] = hcode{code: reverseBits(code, uint8(n)), len: uint16(n)}
|
||||
h.codes[node.literal] = hcode{code: reverseBits(code, uint8(n)), len: uint8(n)}
|
||||
code++
|
||||
}
|
||||
list = list[0 : len(list)-int(bits)]
|
||||
@@ -309,6 +321,7 @@ func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalN
|
||||
// maxBits The maximum number of bits to use for any literal.
|
||||
func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) {
|
||||
list := h.freqcache[:len(freq)+1]
|
||||
codes := h.codes[:len(freq)]
|
||||
// Number of non-zero literals
|
||||
count := 0
|
||||
// Set list to be the set of all non-zero literals and their frequencies
|
||||
@@ -317,11 +330,10 @@ func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) {
|
||||
list[count] = literalNode{uint16(i), f}
|
||||
count++
|
||||
} else {
|
||||
list[count] = literalNode{}
|
||||
h.codes[i].len = 0
|
||||
codes[i].len = 0
|
||||
}
|
||||
}
|
||||
list[len(freq)] = literalNode{}
|
||||
list[count] = literalNode{}
|
||||
|
||||
list = list[:count]
|
||||
if count <= 2 {
|
||||
|
||||
222
vendor/github.com/klauspost/compress/flate/inflate.go
generated
vendored
222
vendor/github.com/klauspost/compress/flate/inflate.go
generated
vendored
@@ -36,6 +36,13 @@ type lengthExtra struct {
|
||||
|
||||
var decCodeToLen = [32]lengthExtra{{length: 0x0, extra: 0x0}, {length: 0x1, extra: 0x0}, {length: 0x2, extra: 0x0}, {length: 0x3, extra: 0x0}, {length: 0x4, extra: 0x0}, {length: 0x5, extra: 0x0}, {length: 0x6, extra: 0x0}, {length: 0x7, extra: 0x0}, {length: 0x8, extra: 0x1}, {length: 0xa, extra: 0x1}, {length: 0xc, extra: 0x1}, {length: 0xe, extra: 0x1}, {length: 0x10, extra: 0x2}, {length: 0x14, extra: 0x2}, {length: 0x18, extra: 0x2}, {length: 0x1c, extra: 0x2}, {length: 0x20, extra: 0x3}, {length: 0x28, extra: 0x3}, {length: 0x30, extra: 0x3}, {length: 0x38, extra: 0x3}, {length: 0x40, extra: 0x4}, {length: 0x50, extra: 0x4}, {length: 0x60, extra: 0x4}, {length: 0x70, extra: 0x4}, {length: 0x80, extra: 0x5}, {length: 0xa0, extra: 0x5}, {length: 0xc0, extra: 0x5}, {length: 0xe0, extra: 0x5}, {length: 0xff, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}}
|
||||
|
||||
var bitMask32 = [32]uint32{
|
||||
0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF,
|
||||
0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF,
|
||||
0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF,
|
||||
0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF,
|
||||
} // up to 32 bits
|
||||
|
||||
// Initialize the fixedHuffmanDecoder only once upon first use.
|
||||
var fixedOnce sync.Once
|
||||
var fixedHuffmanDecoder huffmanDecoder
|
||||
@@ -559,221 +566,6 @@ func (f *decompressor) readHuffman() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a single Huffman block from f.
|
||||
// hl and hd are the Huffman states for the lit/length values
|
||||
// and the distance values, respectively. If hd == nil, using the
|
||||
// fixed distance encoding associated with fixed Huffman blocks.
|
||||
func (f *decompressor) huffmanBlockGeneric() {
|
||||
const (
|
||||
stateInit = iota // Zero value must be stateInit
|
||||
stateDict
|
||||
)
|
||||
|
||||
switch f.stepState {
|
||||
case stateInit:
|
||||
goto readLiteral
|
||||
case stateDict:
|
||||
goto copyHistory
|
||||
}
|
||||
|
||||
readLiteral:
|
||||
// Read literal and/or (length, distance) according to RFC section 3.2.3.
|
||||
{
|
||||
var v int
|
||||
{
|
||||
// Inlined v, err := f.huffSym(f.hl)
|
||||
// Since a huffmanDecoder can be empty or be composed of a degenerate tree
|
||||
// with single element, huffSym must error on these two edge cases. In both
|
||||
// cases, the chunks slice will be 0 for the invalid sequence, leading it
|
||||
// satisfy the n == 0 check below.
|
||||
n := uint(f.hl.maxRead)
|
||||
// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
|
||||
// but is smart enough to keep local variables in registers, so use nb and b,
|
||||
// inline call to moreBits and reassign b,nb back to f on return.
|
||||
nb, b := f.nb, f.b
|
||||
for {
|
||||
for nb < n {
|
||||
c, err := f.r.ReadByte()
|
||||
if err != nil {
|
||||
f.b = b
|
||||
f.nb = nb
|
||||
f.err = noEOF(err)
|
||||
return
|
||||
}
|
||||
f.roffset++
|
||||
b |= uint32(c) << (nb & regSizeMaskUint32)
|
||||
nb += 8
|
||||
}
|
||||
chunk := f.hl.chunks[b&(huffmanNumChunks-1)]
|
||||
n = uint(chunk & huffmanCountMask)
|
||||
if n > huffmanChunkBits {
|
||||
chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask]
|
||||
n = uint(chunk & huffmanCountMask)
|
||||
}
|
||||
if n <= nb {
|
||||
if n == 0 {
|
||||
f.b = b
|
||||
f.nb = nb
|
||||
if debugDecode {
|
||||
fmt.Println("huffsym: n==0")
|
||||
}
|
||||
f.err = CorruptInputError(f.roffset)
|
||||
return
|
||||
}
|
||||
f.b = b >> (n & regSizeMaskUint32)
|
||||
f.nb = nb - n
|
||||
v = int(chunk >> huffmanValueShift)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var n uint // number of bits extra
|
||||
var length int
|
||||
var err error
|
||||
switch {
|
||||
case v < 256:
|
||||
f.dict.writeByte(byte(v))
|
||||
if f.dict.availWrite() == 0 {
|
||||
f.toRead = f.dict.readFlush()
|
||||
f.step = (*decompressor).huffmanBlockGeneric
|
||||
f.stepState = stateInit
|
||||
return
|
||||
}
|
||||
goto readLiteral
|
||||
case v == 256:
|
||||
f.finishBlock()
|
||||
return
|
||||
// otherwise, reference to older data
|
||||
case v < 265:
|
||||
length = v - (257 - 3)
|
||||
n = 0
|
||||
case v < 269:
|
||||
length = v*2 - (265*2 - 11)
|
||||
n = 1
|
||||
case v < 273:
|
||||
length = v*4 - (269*4 - 19)
|
||||
n = 2
|
||||
case v < 277:
|
||||
length = v*8 - (273*8 - 35)
|
||||
n = 3
|
||||
case v < 281:
|
||||
length = v*16 - (277*16 - 67)
|
||||
n = 4
|
||||
case v < 285:
|
||||
length = v*32 - (281*32 - 131)
|
||||
n = 5
|
||||
case v < maxNumLit:
|
||||
length = 258
|
||||
n = 0
|
||||
default:
|
||||
if debugDecode {
|
||||
fmt.Println(v, ">= maxNumLit")
|
||||
}
|
||||
f.err = CorruptInputError(f.roffset)
|
||||
return
|
||||
}
|
||||
if n > 0 {
|
||||
for f.nb < n {
|
||||
if err = f.moreBits(); err != nil {
|
||||
if debugDecode {
|
||||
fmt.Println("morebits n>0:", err)
|
||||
}
|
||||
f.err = err
|
||||
return
|
||||
}
|
||||
}
|
||||
length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1))
|
||||
f.b >>= n & regSizeMaskUint32
|
||||
f.nb -= n
|
||||
}
|
||||
|
||||
var dist uint32
|
||||
if f.hd == nil {
|
||||
for f.nb < 5 {
|
||||
if err = f.moreBits(); err != nil {
|
||||
if debugDecode {
|
||||
fmt.Println("morebits f.nb<5:", err)
|
||||
}
|
||||
f.err = err
|
||||
return
|
||||
}
|
||||
}
|
||||
dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3)))
|
||||
f.b >>= 5
|
||||
f.nb -= 5
|
||||
} else {
|
||||
sym, err := f.huffSym(f.hd)
|
||||
if err != nil {
|
||||
if debugDecode {
|
||||
fmt.Println("huffsym:", err)
|
||||
}
|
||||
f.err = err
|
||||
return
|
||||
}
|
||||
dist = uint32(sym)
|
||||
}
|
||||
|
||||
switch {
|
||||
case dist < 4:
|
||||
dist++
|
||||
case dist < maxNumDist:
|
||||
nb := uint(dist-2) >> 1
|
||||
// have 1 bit in bottom of dist, need nb more.
|
||||
extra := (dist & 1) << (nb & regSizeMaskUint32)
|
||||
for f.nb < nb {
|
||||
if err = f.moreBits(); err != nil {
|
||||
if debugDecode {
|
||||
fmt.Println("morebits f.nb<nb:", err)
|
||||
}
|
||||
f.err = err
|
||||
return
|
||||
}
|
||||
}
|
||||
extra |= f.b & uint32(1<<(nb®SizeMaskUint32)-1)
|
||||
f.b >>= nb & regSizeMaskUint32
|
||||
f.nb -= nb
|
||||
dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra
|
||||
default:
|
||||
if debugDecode {
|
||||
fmt.Println("dist too big:", dist, maxNumDist)
|
||||
}
|
||||
f.err = CorruptInputError(f.roffset)
|
||||
return
|
||||
}
|
||||
|
||||
// No check on length; encoding can be prescient.
|
||||
if dist > uint32(f.dict.histSize()) {
|
||||
if debugDecode {
|
||||
fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize())
|
||||
}
|
||||
f.err = CorruptInputError(f.roffset)
|
||||
return
|
||||
}
|
||||
|
||||
f.copyLen, f.copyDist = length, int(dist)
|
||||
goto copyHistory
|
||||
}
|
||||
|
||||
copyHistory:
|
||||
// Perform a backwards copy according to RFC section 3.2.3.
|
||||
{
|
||||
cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen)
|
||||
if cnt == 0 {
|
||||
cnt = f.dict.writeCopy(f.copyDist, f.copyLen)
|
||||
}
|
||||
f.copyLen -= cnt
|
||||
|
||||
if f.dict.availWrite() == 0 || f.copyLen > 0 {
|
||||
f.toRead = f.dict.readFlush()
|
||||
f.step = (*decompressor).huffmanBlockGeneric // We need to continue this work
|
||||
f.stepState = stateDict
|
||||
return
|
||||
}
|
||||
goto readLiteral
|
||||
}
|
||||
}
|
||||
|
||||
// Copy a single uncompressed data block from input to output.
|
||||
func (f *decompressor) dataBlock() {
|
||||
// Uncompressed.
|
||||
|
||||
659
vendor/github.com/klauspost/compress/flate/inflate_gen.go
generated
vendored
659
vendor/github.com/klauspost/compress/flate/inflate_gen.go
generated
vendored
File diff suppressed because it is too large
Load Diff
56
vendor/github.com/klauspost/compress/flate/level1.go
generated
vendored
56
vendor/github.com/klauspost/compress/flate/level1.go
generated
vendored
@@ -1,6 +1,10 @@
|
||||
package flate
|
||||
|
||||
import "fmt"
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math/bits"
|
||||
)
|
||||
|
||||
// fastGen maintains the table for matches,
|
||||
// and the previous byte block for level 2.
|
||||
@@ -116,7 +120,32 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) {
|
||||
|
||||
// Extend the 4-byte match as long as possible.
|
||||
t := candidate.offset - e.cur
|
||||
l := e.matchlenLong(s+4, t+4, src) + 4
|
||||
var l = int32(4)
|
||||
if false {
|
||||
l = e.matchlenLong(s+4, t+4, src) + 4
|
||||
} else {
|
||||
// inlined:
|
||||
a := src[s+4:]
|
||||
b := src[t+4:]
|
||||
for len(a) >= 8 {
|
||||
if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 {
|
||||
l += int32(bits.TrailingZeros64(diff) >> 3)
|
||||
break
|
||||
}
|
||||
l += 8
|
||||
a = a[8:]
|
||||
b = b[8:]
|
||||
}
|
||||
if len(a) < 8 {
|
||||
b = b[:len(a)]
|
||||
for i := range a {
|
||||
if a[i] != b[i] {
|
||||
break
|
||||
}
|
||||
l++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Extend backwards
|
||||
for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
|
||||
@@ -129,7 +158,28 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) {
|
||||
}
|
||||
|
||||
// Save the match found
|
||||
dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
|
||||
if false {
|
||||
dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
|
||||
} else {
|
||||
// Inlined...
|
||||
xoffset := uint32(s - t - baseMatchOffset)
|
||||
xlength := l
|
||||
oc := offsetCode(xoffset)
|
||||
xoffset |= oc << 16
|
||||
for xlength > 0 {
|
||||
xl := xlength
|
||||
if xl > 258 {
|
||||
// We need to have at least baseMatchLength left over for next loop.
|
||||
xl = 258 - baseMatchLength
|
||||
}
|
||||
xlength -= xl
|
||||
xl -= baseMatchLength
|
||||
dst.extraHist[lengthCodes1[uint8(xl)]]++
|
||||
dst.offHist[oc]++
|
||||
dst.tokens[dst.n] = token(matchType | uint32(xl)<<lengthShift | xoffset)
|
||||
dst.n++
|
||||
}
|
||||
}
|
||||
s += l
|
||||
nextEmit = s
|
||||
if nextS >= s {
|
||||
|
||||
45
vendor/github.com/klauspost/compress/flate/level3.go
generated
vendored
45
vendor/github.com/klauspost/compress/flate/level3.go
generated
vendored
@@ -5,7 +5,7 @@ import "fmt"
|
||||
// fastEncL3
|
||||
type fastEncL3 struct {
|
||||
fastGen
|
||||
table [tableSize]tableEntryPrev
|
||||
table [1 << 16]tableEntryPrev
|
||||
}
|
||||
|
||||
// Encode uses a similar algorithm to level 2, will check up to two candidates.
|
||||
@@ -13,6 +13,8 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) {
|
||||
const (
|
||||
inputMargin = 8 - 1
|
||||
minNonLiteralBlockSize = 1 + 1 + inputMargin
|
||||
tableBits = 16
|
||||
tableSize = 1 << tableBits
|
||||
)
|
||||
|
||||
if debugDeflate && e.cur < 0 {
|
||||
@@ -73,7 +75,7 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) {
|
||||
nextS := s
|
||||
var candidate tableEntry
|
||||
for {
|
||||
nextHash := hash(cv)
|
||||
nextHash := hash4u(cv, tableBits)
|
||||
s = nextS
|
||||
nextS = s + 1 + (s-nextEmit)>>skipLog
|
||||
if nextS > sLimit {
|
||||
@@ -156,7 +158,7 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) {
|
||||
// Index first pair after match end.
|
||||
if int(t+4) < len(src) && t > 0 {
|
||||
cv := load3232(src, t)
|
||||
nextHash := hash(cv)
|
||||
nextHash := hash4u(cv, tableBits)
|
||||
e.table[nextHash] = tableEntryPrev{
|
||||
Prev: e.table[nextHash].Cur,
|
||||
Cur: tableEntry{offset: e.cur + t},
|
||||
@@ -165,30 +167,31 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) {
|
||||
goto emitRemainder
|
||||
}
|
||||
|
||||
// We could immediately start working at s now, but to improve
|
||||
// compression we first update the hash table at s-3 to s.
|
||||
x := load6432(src, s-3)
|
||||
prevHash := hash(uint32(x))
|
||||
e.table[prevHash] = tableEntryPrev{
|
||||
Prev: e.table[prevHash].Cur,
|
||||
Cur: tableEntry{offset: e.cur + s - 3},
|
||||
// Store every 5th hash in-between.
|
||||
for i := s - l + 2; i < s-5; i += 5 {
|
||||
nextHash := hash4u(load3232(src, i), tableBits)
|
||||
e.table[nextHash] = tableEntryPrev{
|
||||
Prev: e.table[nextHash].Cur,
|
||||
Cur: tableEntry{offset: e.cur + i}}
|
||||
}
|
||||
x >>= 8
|
||||
prevHash = hash(uint32(x))
|
||||
// We could immediately start working at s now, but to improve
|
||||
// compression we first update the hash table at s-2 to s.
|
||||
x := load6432(src, s-2)
|
||||
prevHash := hash4u(uint32(x), tableBits)
|
||||
|
||||
e.table[prevHash] = tableEntryPrev{
|
||||
Prev: e.table[prevHash].Cur,
|
||||
Cur: tableEntry{offset: e.cur + s - 2},
|
||||
}
|
||||
x >>= 8
|
||||
prevHash = hash(uint32(x))
|
||||
prevHash = hash4u(uint32(x), tableBits)
|
||||
|
||||
e.table[prevHash] = tableEntryPrev{
|
||||
Prev: e.table[prevHash].Cur,
|
||||
Cur: tableEntry{offset: e.cur + s - 1},
|
||||
}
|
||||
x >>= 8
|
||||
currHash := hash(uint32(x))
|
||||
currHash := hash4u(uint32(x), tableBits)
|
||||
candidates := e.table[currHash]
|
||||
cv = uint32(x)
|
||||
e.table[currHash] = tableEntryPrev{
|
||||
@@ -200,15 +203,15 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) {
|
||||
candidate = candidates.Cur
|
||||
minOffset := e.cur + s - (maxMatchOffset - 4)
|
||||
|
||||
if candidate.offset > minOffset && cv != load3232(src, candidate.offset-e.cur) {
|
||||
// We only check if value mismatches.
|
||||
// Offset will always be invalid in other cases.
|
||||
if candidate.offset > minOffset {
|
||||
if cv == load3232(src, candidate.offset-e.cur) {
|
||||
// Found a match...
|
||||
continue
|
||||
}
|
||||
candidate = candidates.Prev
|
||||
if candidate.offset > minOffset && cv == load3232(src, candidate.offset-e.cur) {
|
||||
offset := s - (candidate.offset - e.cur)
|
||||
if offset <= maxMatchOffset {
|
||||
continue
|
||||
}
|
||||
// Match at prev...
|
||||
continue
|
||||
}
|
||||
}
|
||||
cv = uint32(x >> 8)
|
||||
|
||||
17
vendor/github.com/klauspost/compress/flate/token.go
generated
vendored
17
vendor/github.com/klauspost/compress/flate/token.go
generated
vendored
@@ -13,11 +13,10 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// From top
|
||||
// 2 bits: type 0 = literal 1=EOF 2=Match 3=Unused
|
||||
// 8 bits: xlength = length - MIN_MATCH_LENGTH
|
||||
// 5 bits offsetcode
|
||||
// 16 bits xoffset = offset - MIN_OFFSET_SIZE, or literal
|
||||
// bits 0-16 xoffset = offset - MIN_OFFSET_SIZE, or literal - 16 bits
|
||||
// bits 16-22 offsetcode - 5 bits
|
||||
// bits 22-30 xlength = length - MIN_MATCH_LENGTH - 8 bits
|
||||
// bits 30-32 type 0 = literal 1=EOF 2=Match 3=Unused - 2 bits
|
||||
lengthShift = 22
|
||||
offsetMask = 1<<lengthShift - 1
|
||||
typeMask = 3 << 30
|
||||
@@ -276,7 +275,7 @@ func (t *tokens) AddMatch(xlength uint32, xoffset uint32) {
|
||||
xoffset |= oCode << 16
|
||||
|
||||
t.extraHist[lengthCodes1[uint8(xlength)]]++
|
||||
t.offHist[oCode]++
|
||||
t.offHist[oCode&31]++
|
||||
t.tokens[t.n] = token(matchType | xlength<<lengthShift | xoffset)
|
||||
t.n++
|
||||
}
|
||||
@@ -300,7 +299,7 @@ func (t *tokens) AddMatchLong(xlength int32, xoffset uint32) {
|
||||
xlength -= xl
|
||||
xl -= baseMatchLength
|
||||
t.extraHist[lengthCodes1[uint8(xl)]]++
|
||||
t.offHist[oc]++
|
||||
t.offHist[oc&31]++
|
||||
t.tokens[t.n] = token(matchType | uint32(xl)<<lengthShift | xoffset)
|
||||
t.n++
|
||||
}
|
||||
@@ -356,8 +355,8 @@ func (t token) offset() uint32 { return uint32(t) & offsetMask }
|
||||
|
||||
func (t token) length() uint8 { return uint8(t >> lengthShift) }
|
||||
|
||||
// The code is never more than 8 bits, but is returned as uint32 for convenience.
|
||||
func lengthCode(len uint8) uint32 { return uint32(lengthCodes[len]) }
|
||||
// Convert length to code.
|
||||
func lengthCode(len uint8) uint8 { return lengthCodes[len] }
|
||||
|
||||
// Returns the offset code corresponding to a specific offset
|
||||
func offsetCode(off uint32) uint32 {
|
||||
|
||||
Reference in New Issue
Block a user