Merge pull request #14211 from giuseppe/vendor-storage-image

vendor: update c/storage and c/image
This commit is contained in:
OpenShift Merge Robot
2022-05-12 07:20:58 -04:00
committed by GitHub
46 changed files with 991 additions and 1583 deletions

4
go.mod
View File

@ -14,10 +14,10 @@ require (
github.com/containers/buildah v1.26.1 github.com/containers/buildah v1.26.1
github.com/containers/common v0.48.0 github.com/containers/common v0.48.0
github.com/containers/conmon v2.0.20+incompatible github.com/containers/conmon v2.0.20+incompatible
github.com/containers/image/v5 v5.21.1 github.com/containers/image/v5 v5.21.2-0.20220511203756-fe4fd4ed8be4
github.com/containers/ocicrypt v1.1.4-0.20220428134531-566b808bdf6f github.com/containers/ocicrypt v1.1.4-0.20220428134531-566b808bdf6f
github.com/containers/psgo v1.7.2 github.com/containers/psgo v1.7.2
github.com/containers/storage v1.40.2 github.com/containers/storage v1.41.1-0.20220511210719-cacc3325a9c8
github.com/coreos/go-systemd/v22 v22.3.2 github.com/coreos/go-systemd/v22 v22.3.2
github.com/coreos/stream-metadata-go v0.0.0-20210225230131-70edb9eb47b3 github.com/coreos/stream-metadata-go v0.0.0-20210225230131-70edb9eb47b3
github.com/cyphar/filepath-securejoin v0.2.3 github.com/cyphar/filepath-securejoin v0.2.3

9
go.sum
View File

@ -343,8 +343,9 @@ github.com/containers/common v0.48.0 h1:997nnXBZ+eNpfSM7L4SxhhZubQrfEyw3jRyNMTSs
github.com/containers/common v0.48.0/go.mod h1:zPLZCfLXfnd1jI0QRsD4By54fP4k1+ifQs+tulIe3o0= github.com/containers/common v0.48.0/go.mod h1:zPLZCfLXfnd1jI0QRsD4By54fP4k1+ifQs+tulIe3o0=
github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg= github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg=
github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I= github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
github.com/containers/image/v5 v5.21.1 h1:Cr3zw2f0FZs4SCkdGlc8SN/mpcmg2AKG4OUuDbeGS/Q=
github.com/containers/image/v5 v5.21.1/go.mod h1:zl35egpcDQa79IEXIuoUe1bW+D1pdxRxYjNlyb3YiXw= github.com/containers/image/v5 v5.21.1/go.mod h1:zl35egpcDQa79IEXIuoUe1bW+D1pdxRxYjNlyb3YiXw=
github.com/containers/image/v5 v5.21.2-0.20220511203756-fe4fd4ed8be4 h1:9yDGjKniCxCIVJwdiUHGTjguGJUcntDtWLUIz+LhyzY=
github.com/containers/image/v5 v5.21.2-0.20220511203756-fe4fd4ed8be4/go.mod h1:OsX9sFexyGF0FCNAjfcVFv3IwMqDyLyV/WQY/roLPcE=
github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a h1:spAGlqziZjCJL25C6F1zsQY05tfCKE9F5YwtEWWe6hU= github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a h1:spAGlqziZjCJL25C6F1zsQY05tfCKE9F5YwtEWWe6hU=
github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
@ -359,8 +360,9 @@ github.com/containers/psgo v1.7.2/go.mod h1:SLpqxsPOHtTqRygjutCPXmeU2PoEFzV3gzJp
github.com/containers/storage v1.37.0/go.mod h1:kqeJeS0b7DO2ZT1nVWs0XufrmPFbgV3c+Q/45RlH6r4= github.com/containers/storage v1.37.0/go.mod h1:kqeJeS0b7DO2ZT1nVWs0XufrmPFbgV3c+Q/45RlH6r4=
github.com/containers/storage v1.38.0/go.mod h1:lBzt28gAk5ADZuRtwdndRJyqX22vnRaXmlF+7ktfMYc= github.com/containers/storage v1.38.0/go.mod h1:lBzt28gAk5ADZuRtwdndRJyqX22vnRaXmlF+7ktfMYc=
github.com/containers/storage v1.40.0/go.mod h1:zUyPC3CFIGR1OhY1CKkffxgw9+LuH76PGvVcFj38dgs= github.com/containers/storage v1.40.0/go.mod h1:zUyPC3CFIGR1OhY1CKkffxgw9+LuH76PGvVcFj38dgs=
github.com/containers/storage v1.40.2 h1:GUlHaGnrs1JOEwv6YEvkQdgYXOXZdU1Angy4wgWNgF8=
github.com/containers/storage v1.40.2/go.mod h1:zUyPC3CFIGR1OhY1CKkffxgw9+LuH76PGvVcFj38dgs= github.com/containers/storage v1.40.2/go.mod h1:zUyPC3CFIGR1OhY1CKkffxgw9+LuH76PGvVcFj38dgs=
github.com/containers/storage v1.41.1-0.20220511210719-cacc3325a9c8 h1:4XdTbn3iVIr1+kN5srZND2G3/Q3hJiZSZZtKdL6r9jg=
github.com/containers/storage v1.41.1-0.20220511210719-cacc3325a9c8/go.mod h1:Pb0l5Sm/89kolX3o2KolKQ5cCHk5vPNpJrhNaLcdS5s=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
@ -831,8 +833,9 @@ github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47e
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.14.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.14.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.15.2 h1:3WH+AG7s2+T8o3nrM/8u2rdqUEcQhmga7smjrT41nAw=
github.com/klauspost/compress v1.15.2/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/compress v1.15.2/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
github.com/klauspost/compress v1.15.4 h1:1kn4/7MepF/CHmYub99/nNX8az0IJjfSOU/jbnTVfqQ=
github.com/klauspost/compress v1.15.4/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=

View File

@ -61,8 +61,8 @@ type certPath struct {
var ( var (
homeCertDir = filepath.FromSlash(".config/containers/certs.d") homeCertDir = filepath.FromSlash(".config/containers/certs.d")
perHostCertDirs = []certPath{ perHostCertDirs = []certPath{
{path: "/etc/containers/certs.d", absolute: true}, {path: etcDir + "/containers/certs.d", absolute: true},
{path: "/etc/docker/certs.d", absolute: true}, {path: etcDir + "/docker/certs.d", absolute: true},
} }
defaultUserAgent = "containers/" + version.Version + " (github.com/containers/image)" defaultUserAgent = "containers/" + version.Version + " (github.com/containers/image)"

View File

@ -9,6 +9,7 @@ import (
"net/http" "net/http"
"net/url" "net/url"
"os" "os"
"regexp"
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
@ -343,12 +344,16 @@ func handle206Response(streams chan io.ReadCloser, errs chan error, body io.Read
buffered := makeBufferedNetworkReader(body, 64, 16384) buffered := makeBufferedNetworkReader(body, 64, 16384)
defer buffered.Close() defer buffered.Close()
mr := multipart.NewReader(buffered, boundary) mr := multipart.NewReader(buffered, boundary)
parts := 0
for { for {
p, err := mr.NextPart() p, err := mr.NextPart()
if err != nil { if err != nil {
if err != io.EOF { if err != io.EOF {
errs <- err errs <- err
} }
if parts != len(chunks) {
errs <- errors.Errorf("invalid number of chunks returned by the server")
}
return return
} }
s := signalCloseReader{ s := signalCloseReader{
@ -359,9 +364,34 @@ func handle206Response(streams chan io.ReadCloser, errs chan error, body io.Read
// NextPart() cannot be called while the current part // NextPart() cannot be called while the current part
// is being read, so wait until it is closed // is being read, so wait until it is closed
<-s.closed <-s.closed
parts++
} }
} }
var multipartByteRangesRe = regexp.MustCompile("multipart/byteranges; boundary=([A-Za-z-0-9:]+)")
func parseMediaType(contentType string) (string, map[string]string, error) {
mediaType, params, err := mime.ParseMediaType(contentType)
if err != nil {
if err == mime.ErrInvalidMediaParameter {
// CloudFront returns an invalid MIME type, that contains an unquoted ":" in the boundary
// param, let's handle it here.
matches := multipartByteRangesRe.FindStringSubmatch(contentType)
if len(matches) == 2 {
mediaType = "multipart/byteranges"
params = map[string]string{
"boundary": matches[1],
}
err = nil
}
}
if err != nil {
return "", nil, err
}
}
return mediaType, params, err
}
// GetBlobAt returns a sequential channel of readers that contain data for the requested // GetBlobAt returns a sequential channel of readers that contain data for the requested
// blob chunks, and a channel that might get a single error value. // blob chunks, and a channel that might get a single error value.
// The specified chunks must be not overlapping and sorted by their offset. // The specified chunks must be not overlapping and sorted by their offset.
@ -397,7 +427,7 @@ func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo,
go splitHTTP200ResponseToPartial(streams, errs, res.Body, chunks) go splitHTTP200ResponseToPartial(streams, errs, res.Body, chunks)
return streams, errs, nil return streams, errs, nil
case http.StatusPartialContent: case http.StatusPartialContent:
mediaType, params, err := mime.ParseMediaType(res.Header.Get("Content-Type")) mediaType, params, err := parseMediaType(res.Header.Get("Content-Type"))
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }

View File

@ -25,7 +25,7 @@ var systemRegistriesDirPath = builtinRegistriesDirPath
// builtinRegistriesDirPath is the path to registries.d. // builtinRegistriesDirPath is the path to registries.d.
// DO NOT change this, instead see systemRegistriesDirPath above. // DO NOT change this, instead see systemRegistriesDirPath above.
const builtinRegistriesDirPath = "/etc/containers/registries.d" const builtinRegistriesDirPath = etcDir + "/containers/registries.d"
// userRegistriesDirPath is the path to the per user registries.d. // userRegistriesDirPath is the path to the per user registries.d.
var userRegistriesDir = filepath.FromSlash(".config/containers/registries.d") var userRegistriesDir = filepath.FromSlash(".config/containers/registries.d")

View File

@ -0,0 +1,6 @@
//go:build !freebsd
// +build !freebsd
package docker
const etcDir = "/etc"

View File

@ -0,0 +1,6 @@
//go:build freebsd
// +build freebsd
package docker
const etcDir = "/usr/local/etc"

View File

@ -0,0 +1,12 @@
//go:build !freebsd
// +build !freebsd
package sysregistriesv2
// builtinRegistriesConfPath is the path to the registry configuration file.
// DO NOT change this, instead see systemRegistriesConfPath above.
const builtinRegistriesConfPath = "/etc/containers/registries.conf"
// builtinRegistriesConfDirPath is the path to the registry configuration directory.
// DO NOT change this, instead see systemRegistriesConfDirectoryPath above.
const builtinRegistriesConfDirPath = "/etc/containers/registries.conf.d"

View File

@ -0,0 +1,12 @@
//go:build freebsd
// +build freebsd
package sysregistriesv2
// builtinRegistriesConfPath is the path to the registry configuration file.
// DO NOT change this, instead see systemRegistriesConfPath above.
const builtinRegistriesConfPath = "/usr/local/etc/containers/registries.conf"
// builtinRegistriesConfDirPath is the path to the registry configuration directory.
// DO NOT change this, instead see systemRegistriesConfDirectoryPath above.
const builtinRegistriesConfDirPath = "/usr/local/etc/containers/registries.conf.d"

View File

@ -25,20 +25,12 @@ import (
// -ldflags '-X github.com/containers/image/v5/sysregistries.systemRegistriesConfPath=$your_path' // -ldflags '-X github.com/containers/image/v5/sysregistries.systemRegistriesConfPath=$your_path'
var systemRegistriesConfPath = builtinRegistriesConfPath var systemRegistriesConfPath = builtinRegistriesConfPath
// builtinRegistriesConfPath is the path to the registry configuration file.
// DO NOT change this, instead see systemRegistriesConfPath above.
const builtinRegistriesConfPath = "/etc/containers/registries.conf"
// systemRegistriesConfDirPath is the path to the system-wide registry // systemRegistriesConfDirPath is the path to the system-wide registry
// configuration directory and is used to add/subtract potential registries for // configuration directory and is used to add/subtract potential registries for
// obtaining images. You can override this at build time with // obtaining images. You can override this at build time with
// -ldflags '-X github.com/containers/image/v5/sysregistries.systemRegistriesConfDirectoryPath=$your_path' // -ldflags '-X github.com/containers/image/v5/sysregistries.systemRegistriesConfDirectoryPath=$your_path'
var systemRegistriesConfDirPath = builtinRegistriesConfDirPath var systemRegistriesConfDirPath = builtinRegistriesConfDirPath
// builtinRegistriesConfDirPath is the path to the registry configuration directory.
// DO NOT change this, instead see systemRegistriesConfDirectoryPath above.
const builtinRegistriesConfDirPath = "/etc/containers/registries.conf.d"
// AuthenticationFileHelper is a special key for credential helpers indicating // AuthenticationFileHelper is a special key for credential helpers indicating
// the usage of consulting containers-auth.json files instead of a credential // the usage of consulting containers-auth.json files instead of a credential
// helper. // helper.

View File

@ -32,10 +32,6 @@ import (
// -ldflags '-X github.com/containers/image/v5/signature.systemDefaultPolicyPath=$your_path' // -ldflags '-X github.com/containers/image/v5/signature.systemDefaultPolicyPath=$your_path'
var systemDefaultPolicyPath = builtinDefaultPolicyPath var systemDefaultPolicyPath = builtinDefaultPolicyPath
// builtinDefaultPolicyPath is the policy path used for DefaultPolicy().
// DO NOT change this, instead see systemDefaultPolicyPath above.
const builtinDefaultPolicyPath = "/etc/containers/policy.json"
// userPolicyFile is the path to the per user policy path. // userPolicyFile is the path to the per user policy path.
var userPolicyFile = filepath.FromSlash(".config/containers/policy.json") var userPolicyFile = filepath.FromSlash(".config/containers/policy.json")

View File

@ -0,0 +1,8 @@
//go:build !freebsd
// +build !freebsd
package signature
// builtinDefaultPolicyPath is the policy path used for DefaultPolicy().
// DO NOT change this, instead see systemDefaultPolicyPath above.
const builtinDefaultPolicyPath = "/etc/containers/policy.json"

View File

@ -0,0 +1,8 @@
//go:build freebsd
// +build freebsd
package signature
// builtinDefaultPolicyPath is the policy path used for DefaultPolicy().
// DO NOT change this, instead see systemDefaultPolicyPath above.
const builtinDefaultPolicyPath = "/usr/local/etc/containers/policy.json"

View File

@ -8,10 +8,10 @@ const (
// VersionMinor is for functionality in a backwards-compatible manner // VersionMinor is for functionality in a backwards-compatible manner
VersionMinor = 21 VersionMinor = 21
// VersionPatch is for backwards-compatible bug fixes // VersionPatch is for backwards-compatible bug fixes
VersionPatch = 1 VersionPatch = 2
// VersionDev indicates development branch. Releases will be empty string. // VersionDev indicates development branch. Releases will be empty string.
VersionDev = "" VersionDev = "-dev"
) )
// Version is the specification version that the package types support. // Version is the specification version that the package types support.

View File

@ -59,8 +59,8 @@ binary local-binary: containers-storage
local-gccgo: ## build using gccgo on the host local-gccgo: ## build using gccgo on the host
GCCGO=$(PWD)/hack/gccgo-wrapper.sh $(GO) build $(MOD_VENDOR) -compiler gccgo $(BUILDFLAGS) -o containers-storage.gccgo ./cmd/containers-storage GCCGO=$(PWD)/hack/gccgo-wrapper.sh $(GO) build $(MOD_VENDOR) -compiler gccgo $(BUILDFLAGS) -o containers-storage.gccgo ./cmd/containers-storage
local-cross: ## cross build the binaries for arm, darwin, and\nfreebsd local-cross: ## cross build the binaries for arm, darwin, and freebsd
@for target in linux/amd64 linux/386 linux/arm linux/arm64 linux/ppc64 linux/ppc64le darwin/amd64 windows/amd64 ; do \ @for target in linux/amd64 linux/386 linux/arm linux/arm64 linux/ppc64 linux/ppc64le darwin/amd64 windows/amd64 freebsd/amd64 freebsd/arm64 ; do \
os=`echo $${target} | cut -f1 -d/` ; \ os=`echo $${target} | cut -f1 -d/` ; \
arch=`echo $${target} | cut -f2 -d/` ; \ arch=`echo $${target} | cut -f2 -d/` ; \
suffix=$${os}.$${arch} ; \ suffix=$${os}.$${arch} ; \

View File

@ -1 +1 @@
1.40.2 1.41.1-dev

View File

@ -207,14 +207,18 @@ func checkSupportVolatile(home, runhome string) (bool, error) {
// checkAndRecordIDMappedSupport checks and stores if the kernel supports mounting overlay on top of a // checkAndRecordIDMappedSupport checks and stores if the kernel supports mounting overlay on top of a
// idmapped lower layer. // idmapped lower layer.
func checkAndRecordIDMappedSupport(home, runhome string) (bool, error) { func checkAndRecordIDMappedSupport(home, runhome string) (bool, error) {
if os.Geteuid() != 0 {
return false, nil
}
feature := "idmapped-lower-dir" feature := "idmapped-lower-dir"
overlayCacheResult, overlayCacheText, err := cachedFeatureCheck(runhome, feature) overlayCacheResult, overlayCacheText, err := cachedFeatureCheck(runhome, feature)
if err == nil { if err == nil {
if overlayCacheResult { if overlayCacheResult {
logrus.Debugf("Cached value indicated that overlay is supported") logrus.Debugf("Cached value indicated that idmapped mounts for overlay are supported")
return true, nil return true, nil
} }
logrus.Debugf("Cached value indicated that overlay is not supported") logrus.Debugf("Cached value indicated that idmapped mounts for overlay are not supported")
return false, errors.New(overlayCacheText) return false, errors.New(overlayCacheText)
} }
supportsIDMappedMounts, err := supportsIdmappedLowerLayers(home) supportsIDMappedMounts, err := supportsIdmappedLowerLayers(home)

View File

@ -12,7 +12,7 @@ require (
github.com/google/go-intervals v0.0.2 github.com/google/go-intervals v0.0.2
github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/go-multierror v1.1.1
github.com/json-iterator/go v1.1.12 github.com/json-iterator/go v1.1.12
github.com/klauspost/compress v1.15.2 github.com/klauspost/compress v1.15.4
github.com/klauspost/pgzip v1.2.5 github.com/klauspost/pgzip v1.2.5
github.com/mattn/go-shellwords v1.0.12 github.com/mattn/go-shellwords v1.0.12
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible

View File

@ -425,8 +425,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.15.2 h1:3WH+AG7s2+T8o3nrM/8u2rdqUEcQhmga7smjrT41nAw= github.com/klauspost/compress v1.15.4 h1:1kn4/7MepF/CHmYub99/nNX8az0IJjfSOU/jbnTVfqQ=
github.com/klauspost/compress v1.15.2/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/compress v1.15.4/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=

View File

@ -5,9 +5,7 @@ import (
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"net"
"os" "os"
"os/user"
"path/filepath" "path/filepath"
"sync" "sync"
@ -17,13 +15,6 @@ import (
"github.com/pkg/errors" "github.com/pkg/errors"
) )
func init() {
// initialize nss libraries in Glibc so that the dynamic libraries are loaded in the host
// environment not in the chroot from untrusted files.
_, _ = user.Lookup("storage")
_, _ = net.LookupHost("localhost")
}
// NewArchiver returns a new Archiver which uses chrootarchive.Untar // NewArchiver returns a new Archiver which uses chrootarchive.Untar
func NewArchiver(idMappings *idtools.IDMappings) *archive.Archiver { func NewArchiver(idMappings *idtools.IDMappings) *archive.Archiver {
archiver := archive.NewArchiver(idMappings) archiver := archive.NewArchiver(idMappings)

View File

@ -3,7 +3,9 @@ package chrootarchive
import ( import (
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"net"
"os" "os"
"os/user"
"path/filepath" "path/filepath"
"github.com/containers/storage/pkg/mount" "github.com/containers/storage/pkg/mount"
@ -23,6 +25,11 @@ func chroot(path string) (err error) {
return err return err
} }
// initialize nss libraries in Glibc so that the dynamic libraries are loaded in the host
// environment not in the chroot from untrusted files.
_, _ = user.Lookup("storage")
_, _ = net.LookupHost("localhost")
// if the process doesn't have CAP_SYS_ADMIN, but does have CAP_SYS_CHROOT, we need to use the actual chroot // if the process doesn't have CAP_SYS_ADMIN, but does have CAP_SYS_CHROOT, we need to use the actual chroot
if !caps.Get(capability.EFFECTIVE, capability.CAP_SYS_ADMIN) && caps.Get(capability.EFFECTIVE, capability.CAP_SYS_CHROOT) { if !caps.Get(capability.EFFECTIVE, capability.CAP_SYS_ADMIN) && caps.Get(capability.EFFECTIVE, capability.CAP_SYS_CHROOT) {
return realChroot(path) return realChroot(path)

View File

@ -918,6 +918,9 @@ func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan
case p := <-streams: case p := <-streams:
part = p part = p
case err := <-errs: case err := <-errs:
if err == nil {
return errors.New("not enough data returned from the server")
}
return err return err
} }
if part == nil { if part == nil {
@ -1081,12 +1084,18 @@ func mergeMissingChunks(missingParts []missingPart, target int) []missingPart {
func (c *chunkedDiffer) retrieveMissingFiles(dest string, dirfd int, missingParts []missingPart, options *archive.TarOptions) error { func (c *chunkedDiffer) retrieveMissingFiles(dest string, dirfd int, missingParts []missingPart, options *archive.TarOptions) error {
var chunksToRequest []ImageSourceChunk var chunksToRequest []ImageSourceChunk
for _, c := range missingParts {
if c.OriginFile == nil && !c.Hole { calculateChunksToRequest := func() {
chunksToRequest = append(chunksToRequest, *c.SourceChunk) chunksToRequest = []ImageSourceChunk{}
for _, c := range missingParts {
if c.OriginFile == nil && !c.Hole {
chunksToRequest = append(chunksToRequest, *c.SourceChunk)
}
} }
} }
calculateChunksToRequest()
// There are some missing files. Prepare a multirange request for the missing chunks. // There are some missing files. Prepare a multirange request for the missing chunks.
var streams chan io.ReadCloser var streams chan io.ReadCloser
var err error var err error
@ -1106,6 +1115,7 @@ func (c *chunkedDiffer) retrieveMissingFiles(dest string, dirfd int, missingPart
// Merge more chunks to request // Merge more chunks to request
missingParts = mergeMissingChunks(missingParts, requested/2) missingParts = mergeMissingChunks(missingParts, requested/2)
calculateChunksToRequest()
continue continue
} }
return err return err
@ -1575,6 +1585,8 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra
wg.Wait() wg.Wait()
for _, res := range copyResults[:filesToWaitFor] { for _, res := range copyResults[:filesToWaitFor] {
r := &mergedEntries[res.index]
if res.err != nil { if res.err != nil {
return output, res.err return output, res.err
} }
@ -1584,8 +1596,6 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra
continue continue
} }
r := &mergedEntries[res.index]
missingPartsSize += r.Size missingPartsSize += r.Size
remainingSize := r.Size remainingSize := r.Size

View File

@ -1,3 +1,6 @@
//go:build freebsd && cgo
// +build freebsd,cgo
package mount package mount
/* /*

View File

@ -1,4 +1,6 @@
// +build !linux,!freebsd //go:build !linux && !(freebsd && cgo)
// +build !linux
// +build !freebsd !cgo
package mount package mount

View File

@ -5,8 +5,8 @@
# files. # files.
# #
# Note: The storage.conf file overrides other storage.conf files based on this precedence: # Note: The storage.conf file overrides other storage.conf files based on this precedence:
# /usr/containers/storage.conf # /usr/local/share/containers/storage.conf
# /etc/containers/storage.conf # /usr/local/etc/containers/storage.conf
# $HOME/.config/containers/storage.conf # $HOME/.config/containers/storage.conf
# $XDG_CONFIG_HOME/containers/storage.conf (If XDG_CONFIG_HOME is set) # $XDG_CONFIG_HOME/containers/storage.conf (If XDG_CONFIG_HOME is set)
# See man 5 containers-storage.conf for more information # See man 5 containers-storage.conf for more information

View File

@ -1195,6 +1195,11 @@ func (s *store) imageTopLayerForMapping(image *Image, ristore ROImageStore, crea
if layer == nil { if layer == nil {
layer = cLayer layer = cLayer
parentLayer = cParentLayer parentLayer = cParentLayer
if store != rlstore {
// The layer is in another store, so we cannot
// create a mapped version of it to the image.
createMappedLayer = false
}
} }
} }
} }

View File

@ -25,22 +25,6 @@ type TomlConfig struct {
} `toml:"storage"` } `toml:"storage"`
} }
const (
// these are default path for run and graph root for rootful users
// for rootless path is constructed via getRootlessStorageOpts
defaultRunRoot string = "/run/containers/storage"
defaultGraphRoot string = "/var/lib/containers/storage"
)
// defaultConfigFile path to the system wide storage.conf file
var (
defaultConfigFile = "/usr/share/containers/storage.conf"
defaultOverrideConfigFile = "/etc/containers/storage.conf"
defaultConfigFileSet = false
// DefaultStoreOptions is a reasonable default set of options.
defaultStoreOptions StoreOptions
)
const ( const (
overlayDriver = "overlay" overlayDriver = "overlay"
overlay2 = "overlay2" overlay2 = "overlay2"

View File

@ -0,0 +1,17 @@
package types
const (
// these are default path for run and graph root for rootful users
// for rootless path is constructed via getRootlessStorageOpts
defaultRunRoot string = "/run/containers/storage"
defaultGraphRoot string = "/var/lib/containers/storage"
)
// defaultConfigFile path to the system wide storage.conf file
var (
defaultConfigFile = "/usr/share/containers/storage.conf"
defaultOverrideConfigFile = "/etc/containers/storage.conf"
defaultConfigFileSet = false
// DefaultStoreOptions is a reasonable default set of options.
defaultStoreOptions StoreOptions
)

View File

@ -0,0 +1,17 @@
package types
const (
// these are default path for run and graph root for rootful users
// for rootless path is constructed via getRootlessStorageOpts
defaultRunRoot string = "/var/run/containers/storage"
defaultGraphRoot string = "/var/db/containers/storage"
)
// defaultConfigFile path to the system wide storage.conf file
var (
defaultConfigFile = "/usr/local/share/containers/storage.conf"
defaultOverrideConfigFile = "/usr/local/etc/containers/storage.conf"
defaultConfigFileSet = false
// DefaultStoreOptions is a reasonable default set of options.
defaultStoreOptions StoreOptions
)

View File

@ -0,0 +1,17 @@
package types
const (
// these are default path for run and graph root for rootful users
// for rootless path is constructed via getRootlessStorageOpts
defaultRunRoot string = "/run/containers/storage"
defaultGraphRoot string = "/var/lib/containers/storage"
)
// defaultConfigFile path to the system wide storage.conf file
var (
defaultConfigFile = "/usr/share/containers/storage.conf"
defaultOverrideConfigFile = "/etc/containers/storage.conf"
defaultConfigFileSet = false
// DefaultStoreOptions is a reasonable default set of options.
defaultStoreOptions StoreOptions
)

View File

@ -0,0 +1,17 @@
package types
const (
// these are default path for run and graph root for rootful users
// for rootless path is constructed via getRootlessStorageOpts
defaultRunRoot string = "/run/containers/storage"
defaultGraphRoot string = "/var/lib/containers/storage"
)
// defaultConfigFile path to the system wide storage.conf file
var (
defaultConfigFile = "/usr/share/containers/storage.conf"
defaultOverrideConfigFile = "/etc/containers/storage.conf"
defaultConfigFileSet = false
// DefaultStoreOptions is a reasonable default set of options.
defaultStoreOptions StoreOptions
)

View File

@ -17,6 +17,16 @@ This package provides various compression algorithms.
# changelog # changelog
* May 5, 2022 (v1.15.3)
* zstd: Allow to ignore checksum checking by @WojciechMula [#572](https://github.com/klauspost/compress/pull/572)
* s2: Fix incorrect seek for io.SeekEnd in [#575](https://github.com/klauspost/compress/pull/575)
* Apr 26, 2022 (v1.15.2)
* zstd: Add x86-64 assembly for decompression on streams and blocks. Contributed by [@WojciechMula](https://github.com/WojciechMula). Typically 2x faster. [#528](https://github.com/klauspost/compress/pull/528) [#531](https://github.com/klauspost/compress/pull/531) [#545](https://github.com/klauspost/compress/pull/545) [#537](https://github.com/klauspost/compress/pull/537)
* zstd: Add options to ZipDecompressor and fixes [#539](https://github.com/klauspost/compress/pull/539)
* s2: Use sorted search for index [#555](https://github.com/klauspost/compress/pull/555)
* Minimum version is Go 1.16, added CI test on 1.18.
* Mar 11, 2022 (v1.15.1) * Mar 11, 2022 (v1.15.1)
* huff0: Add x86 assembly of Decode4X by @WojciechMula in [#512](https://github.com/klauspost/compress/pull/512) * huff0: Add x86 assembly of Decode4X by @WojciechMula in [#512](https://github.com/klauspost/compress/pull/512)
* zstd: Reuse zip decoders in [#514](https://github.com/klauspost/compress/pull/514) * zstd: Reuse zip decoders in [#514](https://github.com/klauspost/compress/pull/514)

View File

@ -24,7 +24,7 @@ func (f *decompressor) huffmanBytesBuffer() {
// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
// but is smart enough to keep local variables in registers, so use nb and b, // but is smart enough to keep local variables in registers, so use nb and b,
// inline call to moreBits and reassign b,nb back to f on return. // inline call to moreBits and reassign b,nb back to f on return.
fnb, fb := f.nb, f.b fnb, fb, dict := f.nb, f.b, &f.dict
switch f.stepState { switch f.stepState {
case stateInit: case stateInit:
@ -82,9 +82,9 @@ readLiteral:
var length int var length int
switch { switch {
case v < 256: case v < 256:
f.dict.writeByte(byte(v)) dict.writeByte(byte(v))
if f.dict.availWrite() == 0 { if dict.availWrite() == 0 {
f.toRead = f.dict.readFlush() f.toRead = dict.readFlush()
f.step = (*decompressor).huffmanBytesBuffer f.step = (*decompressor).huffmanBytesBuffer
f.stepState = stateInit f.stepState = stateInit
f.b, f.nb = fb, fnb f.b, f.nb = fb, fnb
@ -227,10 +227,10 @@ readLiteral:
} }
// No check on length; encoding can be prescient. // No check on length; encoding can be prescient.
if dist > uint32(f.dict.histSize()) { if dist > uint32(dict.histSize()) {
f.b, f.nb = fb, fnb f.b, f.nb = fb, fnb
if debugDecode { if debugDecode {
fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) fmt.Println("dist > dict.histSize():", dist, dict.histSize())
} }
f.err = CorruptInputError(f.roffset) f.err = CorruptInputError(f.roffset)
return return
@ -243,14 +243,14 @@ readLiteral:
copyHistory: copyHistory:
// Perform a backwards copy according to RFC section 3.2.3. // Perform a backwards copy according to RFC section 3.2.3.
{ {
cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen) cnt := dict.tryWriteCopy(f.copyDist, f.copyLen)
if cnt == 0 { if cnt == 0 {
cnt = f.dict.writeCopy(f.copyDist, f.copyLen) cnt = dict.writeCopy(f.copyDist, f.copyLen)
} }
f.copyLen -= cnt f.copyLen -= cnt
if f.dict.availWrite() == 0 || f.copyLen > 0 { if dict.availWrite() == 0 || f.copyLen > 0 {
f.toRead = f.dict.readFlush() f.toRead = dict.readFlush()
f.step = (*decompressor).huffmanBytesBuffer // We need to continue this work f.step = (*decompressor).huffmanBytesBuffer // We need to continue this work
f.stepState = stateDict f.stepState = stateDict
f.b, f.nb = fb, fnb f.b, f.nb = fb, fnb
@ -275,7 +275,7 @@ func (f *decompressor) huffmanBytesReader() {
// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
// but is smart enough to keep local variables in registers, so use nb and b, // but is smart enough to keep local variables in registers, so use nb and b,
// inline call to moreBits and reassign b,nb back to f on return. // inline call to moreBits and reassign b,nb back to f on return.
fnb, fb := f.nb, f.b fnb, fb, dict := f.nb, f.b, &f.dict
switch f.stepState { switch f.stepState {
case stateInit: case stateInit:
@ -333,9 +333,9 @@ readLiteral:
var length int var length int
switch { switch {
case v < 256: case v < 256:
f.dict.writeByte(byte(v)) dict.writeByte(byte(v))
if f.dict.availWrite() == 0 { if dict.availWrite() == 0 {
f.toRead = f.dict.readFlush() f.toRead = dict.readFlush()
f.step = (*decompressor).huffmanBytesReader f.step = (*decompressor).huffmanBytesReader
f.stepState = stateInit f.stepState = stateInit
f.b, f.nb = fb, fnb f.b, f.nb = fb, fnb
@ -478,10 +478,10 @@ readLiteral:
} }
// No check on length; encoding can be prescient. // No check on length; encoding can be prescient.
if dist > uint32(f.dict.histSize()) { if dist > uint32(dict.histSize()) {
f.b, f.nb = fb, fnb f.b, f.nb = fb, fnb
if debugDecode { if debugDecode {
fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) fmt.Println("dist > dict.histSize():", dist, dict.histSize())
} }
f.err = CorruptInputError(f.roffset) f.err = CorruptInputError(f.roffset)
return return
@ -494,14 +494,14 @@ readLiteral:
copyHistory: copyHistory:
// Perform a backwards copy according to RFC section 3.2.3. // Perform a backwards copy according to RFC section 3.2.3.
{ {
cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen) cnt := dict.tryWriteCopy(f.copyDist, f.copyLen)
if cnt == 0 { if cnt == 0 {
cnt = f.dict.writeCopy(f.copyDist, f.copyLen) cnt = dict.writeCopy(f.copyDist, f.copyLen)
} }
f.copyLen -= cnt f.copyLen -= cnt
if f.dict.availWrite() == 0 || f.copyLen > 0 { if dict.availWrite() == 0 || f.copyLen > 0 {
f.toRead = f.dict.readFlush() f.toRead = dict.readFlush()
f.step = (*decompressor).huffmanBytesReader // We need to continue this work f.step = (*decompressor).huffmanBytesReader // We need to continue this work
f.stepState = stateDict f.stepState = stateDict
f.b, f.nb = fb, fnb f.b, f.nb = fb, fnb
@ -526,7 +526,7 @@ func (f *decompressor) huffmanBufioReader() {
// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
// but is smart enough to keep local variables in registers, so use nb and b, // but is smart enough to keep local variables in registers, so use nb and b,
// inline call to moreBits and reassign b,nb back to f on return. // inline call to moreBits and reassign b,nb back to f on return.
fnb, fb := f.nb, f.b fnb, fb, dict := f.nb, f.b, &f.dict
switch f.stepState { switch f.stepState {
case stateInit: case stateInit:
@ -584,9 +584,9 @@ readLiteral:
var length int var length int
switch { switch {
case v < 256: case v < 256:
f.dict.writeByte(byte(v)) dict.writeByte(byte(v))
if f.dict.availWrite() == 0 { if dict.availWrite() == 0 {
f.toRead = f.dict.readFlush() f.toRead = dict.readFlush()
f.step = (*decompressor).huffmanBufioReader f.step = (*decompressor).huffmanBufioReader
f.stepState = stateInit f.stepState = stateInit
f.b, f.nb = fb, fnb f.b, f.nb = fb, fnb
@ -729,10 +729,10 @@ readLiteral:
} }
// No check on length; encoding can be prescient. // No check on length; encoding can be prescient.
if dist > uint32(f.dict.histSize()) { if dist > uint32(dict.histSize()) {
f.b, f.nb = fb, fnb f.b, f.nb = fb, fnb
if debugDecode { if debugDecode {
fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) fmt.Println("dist > dict.histSize():", dist, dict.histSize())
} }
f.err = CorruptInputError(f.roffset) f.err = CorruptInputError(f.roffset)
return return
@ -745,14 +745,14 @@ readLiteral:
copyHistory: copyHistory:
// Perform a backwards copy according to RFC section 3.2.3. // Perform a backwards copy according to RFC section 3.2.3.
{ {
cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen) cnt := dict.tryWriteCopy(f.copyDist, f.copyLen)
if cnt == 0 { if cnt == 0 {
cnt = f.dict.writeCopy(f.copyDist, f.copyLen) cnt = dict.writeCopy(f.copyDist, f.copyLen)
} }
f.copyLen -= cnt f.copyLen -= cnt
if f.dict.availWrite() == 0 || f.copyLen > 0 { if dict.availWrite() == 0 || f.copyLen > 0 {
f.toRead = f.dict.readFlush() f.toRead = dict.readFlush()
f.step = (*decompressor).huffmanBufioReader // We need to continue this work f.step = (*decompressor).huffmanBufioReader // We need to continue this work
f.stepState = stateDict f.stepState = stateDict
f.b, f.nb = fb, fnb f.b, f.nb = fb, fnb
@ -777,7 +777,7 @@ func (f *decompressor) huffmanStringsReader() {
// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
// but is smart enough to keep local variables in registers, so use nb and b, // but is smart enough to keep local variables in registers, so use nb and b,
// inline call to moreBits and reassign b,nb back to f on return. // inline call to moreBits and reassign b,nb back to f on return.
fnb, fb := f.nb, f.b fnb, fb, dict := f.nb, f.b, &f.dict
switch f.stepState { switch f.stepState {
case stateInit: case stateInit:
@ -835,9 +835,9 @@ readLiteral:
var length int var length int
switch { switch {
case v < 256: case v < 256:
f.dict.writeByte(byte(v)) dict.writeByte(byte(v))
if f.dict.availWrite() == 0 { if dict.availWrite() == 0 {
f.toRead = f.dict.readFlush() f.toRead = dict.readFlush()
f.step = (*decompressor).huffmanStringsReader f.step = (*decompressor).huffmanStringsReader
f.stepState = stateInit f.stepState = stateInit
f.b, f.nb = fb, fnb f.b, f.nb = fb, fnb
@ -980,10 +980,10 @@ readLiteral:
} }
// No check on length; encoding can be prescient. // No check on length; encoding can be prescient.
if dist > uint32(f.dict.histSize()) { if dist > uint32(dict.histSize()) {
f.b, f.nb = fb, fnb f.b, f.nb = fb, fnb
if debugDecode { if debugDecode {
fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) fmt.Println("dist > dict.histSize():", dist, dict.histSize())
} }
f.err = CorruptInputError(f.roffset) f.err = CorruptInputError(f.roffset)
return return
@ -996,14 +996,14 @@ readLiteral:
copyHistory: copyHistory:
// Perform a backwards copy according to RFC section 3.2.3. // Perform a backwards copy according to RFC section 3.2.3.
{ {
cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen) cnt := dict.tryWriteCopy(f.copyDist, f.copyLen)
if cnt == 0 { if cnt == 0 {
cnt = f.dict.writeCopy(f.copyDist, f.copyLen) cnt = dict.writeCopy(f.copyDist, f.copyLen)
} }
f.copyLen -= cnt f.copyLen -= cnt
if f.dict.availWrite() == 0 || f.copyLen > 0 { if dict.availWrite() == 0 || f.copyLen > 0 {
f.toRead = f.dict.readFlush() f.toRead = dict.readFlush()
f.step = (*decompressor).huffmanStringsReader // We need to continue this work f.step = (*decompressor).huffmanStringsReader // We need to continue this work
f.stepState = stateDict f.stepState = stateDict
f.b, f.nb = fb, fnb f.b, f.nb = fb, fnb
@ -1028,7 +1028,7 @@ func (f *decompressor) huffmanGenericReader() {
// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
// but is smart enough to keep local variables in registers, so use nb and b, // but is smart enough to keep local variables in registers, so use nb and b,
// inline call to moreBits and reassign b,nb back to f on return. // inline call to moreBits and reassign b,nb back to f on return.
fnb, fb := f.nb, f.b fnb, fb, dict := f.nb, f.b, &f.dict
switch f.stepState { switch f.stepState {
case stateInit: case stateInit:
@ -1086,9 +1086,9 @@ readLiteral:
var length int var length int
switch { switch {
case v < 256: case v < 256:
f.dict.writeByte(byte(v)) dict.writeByte(byte(v))
if f.dict.availWrite() == 0 { if dict.availWrite() == 0 {
f.toRead = f.dict.readFlush() f.toRead = dict.readFlush()
f.step = (*decompressor).huffmanGenericReader f.step = (*decompressor).huffmanGenericReader
f.stepState = stateInit f.stepState = stateInit
f.b, f.nb = fb, fnb f.b, f.nb = fb, fnb
@ -1231,10 +1231,10 @@ readLiteral:
} }
// No check on length; encoding can be prescient. // No check on length; encoding can be prescient.
if dist > uint32(f.dict.histSize()) { if dist > uint32(dict.histSize()) {
f.b, f.nb = fb, fnb f.b, f.nb = fb, fnb
if debugDecode { if debugDecode {
fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) fmt.Println("dist > dict.histSize():", dist, dict.histSize())
} }
f.err = CorruptInputError(f.roffset) f.err = CorruptInputError(f.roffset)
return return
@ -1247,14 +1247,14 @@ readLiteral:
copyHistory: copyHistory:
// Perform a backwards copy according to RFC section 3.2.3. // Perform a backwards copy according to RFC section 3.2.3.
{ {
cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen) cnt := dict.tryWriteCopy(f.copyDist, f.copyLen)
if cnt == 0 { if cnt == 0 {
cnt = f.dict.writeCopy(f.copyDist, f.copyLen) cnt = dict.writeCopy(f.copyDist, f.copyLen)
} }
f.copyLen -= cnt f.copyLen -= cnt
if f.dict.availWrite() == 0 || f.copyLen > 0 { if dict.availWrite() == 0 || f.copyLen > 0 {
f.toRead = f.dict.readFlush() f.toRead = dict.readFlush()
f.step = (*decompressor).huffmanGenericReader // We need to continue this work f.step = (*decompressor).huffmanGenericReader // We need to continue this work
f.stepState = stateDict f.stepState = stateDict
f.b, f.nb = fb, fnb f.b, f.nb = fb, fnb

View File

@ -1,5 +0,0 @@
package huff0
//go:generate go run generate.go
//go:generate asmfmt -w decompress_amd64.s
//go:generate asmfmt -w decompress_8b_amd64.s

View File

@ -1,488 +0,0 @@
// +build !appengine
// +build gc
// +build !noasm
#include "textflag.h"
#include "funcdata.h"
#include "go_asm.h"
#define bufoff 256 // see decompress.go, we're using [4][256]byte table
// func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
TEXT ·decompress4x_8b_loop_x86(SB), NOSPLIT, $8
#define off R8
#define buffer DI
#define table SI
#define br_bits_read R9
#define br_value R10
#define br_offset R11
#define peek_bits R12
#define exhausted DX
#define br0 R13
#define br1 R14
#define br2 R15
#define br3 BP
MOVQ BP, 0(SP)
XORQ exhausted, exhausted // exhausted = false
XORQ off, off // off = 0
MOVBQZX peekBits+32(FP), peek_bits
MOVQ buf+40(FP), buffer
MOVQ tbl+48(FP), table
MOVQ pbr0+0(FP), br0
MOVQ pbr1+8(FP), br1
MOVQ pbr2+16(FP), br2
MOVQ pbr3+24(FP), br3
main_loop:
// const stream = 0
// br0.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br0), br_bits_read
MOVQ bitReaderShifted_value(br0), br_value
MOVQ bitReaderShifted_off(br0), br_offset
// if b.bitsRead >= 32 {
CMPQ br_bits_read, $32
JB skip_fill0
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br0), AX
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
// b.value |= uint64(low) << (b.bitsRead & 63)
MOVQ br_bits_read, CX
SHLQ CL, AX
ORQ AX, br_value
// exhausted = exhausted || (br0.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill0:
// val0 := br0.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br0.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val1 := br0.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br0.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, 0(buffer)(off*1)
// SECOND PART:
// val2 := br0.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v2 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br0.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val3 := br0.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v3 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br0.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off+2] = uint8(v2.entry >> 8)
// buf[stream][off+3] = uint8(v3.entry >> 8)
MOVW BX, 0+2(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br0)
MOVQ br_value, bitReaderShifted_value(br0)
MOVQ br_offset, bitReaderShifted_off(br0)
// const stream = 1
// br1.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br1), br_bits_read
MOVQ bitReaderShifted_value(br1), br_value
MOVQ bitReaderShifted_off(br1), br_offset
// if b.bitsRead >= 32 {
CMPQ br_bits_read, $32
JB skip_fill1
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br1), AX
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
// b.value |= uint64(low) << (b.bitsRead & 63)
MOVQ br_bits_read, CX
SHLQ CL, AX
ORQ AX, br_value
// exhausted = exhausted || (br1.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill1:
// val0 := br1.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br1.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val1 := br1.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br1.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, 256(buffer)(off*1)
// SECOND PART:
// val2 := br1.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v2 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br1.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val3 := br1.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v3 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br1.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off+2] = uint8(v2.entry >> 8)
// buf[stream][off+3] = uint8(v3.entry >> 8)
MOVW BX, 256+2(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br1)
MOVQ br_value, bitReaderShifted_value(br1)
MOVQ br_offset, bitReaderShifted_off(br1)
// const stream = 2
// br2.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br2), br_bits_read
MOVQ bitReaderShifted_value(br2), br_value
MOVQ bitReaderShifted_off(br2), br_offset
// if b.bitsRead >= 32 {
CMPQ br_bits_read, $32
JB skip_fill2
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br2), AX
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
// b.value |= uint64(low) << (b.bitsRead & 63)
MOVQ br_bits_read, CX
SHLQ CL, AX
ORQ AX, br_value
// exhausted = exhausted || (br2.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill2:
// val0 := br2.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br2.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val1 := br2.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br2.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, 512(buffer)(off*1)
// SECOND PART:
// val2 := br2.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v2 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br2.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val3 := br2.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v3 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br2.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off+2] = uint8(v2.entry >> 8)
// buf[stream][off+3] = uint8(v3.entry >> 8)
MOVW BX, 512+2(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br2)
MOVQ br_value, bitReaderShifted_value(br2)
MOVQ br_offset, bitReaderShifted_off(br2)
// const stream = 3
// br3.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br3), br_bits_read
MOVQ bitReaderShifted_value(br3), br_value
MOVQ bitReaderShifted_off(br3), br_offset
// if b.bitsRead >= 32 {
CMPQ br_bits_read, $32
JB skip_fill3
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br3), AX
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
// b.value |= uint64(low) << (b.bitsRead & 63)
MOVQ br_bits_read, CX
SHLQ CL, AX
ORQ AX, br_value
// exhausted = exhausted || (br3.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill3:
// val0 := br3.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br3.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val1 := br3.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br3.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, 768(buffer)(off*1)
// SECOND PART:
// val2 := br3.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v2 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br3.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val3 := br3.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v3 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br3.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off+2] = uint8(v2.entry >> 8)
// buf[stream][off+3] = uint8(v3.entry >> 8)
MOVW BX, 768+2(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br3)
MOVQ br_value, bitReaderShifted_value(br3)
MOVQ br_offset, bitReaderShifted_off(br3)
ADDQ $4, off // off += 2
TESTB DH, DH // any br[i].ofs < 4?
JNZ end
CMPQ off, $bufoff
JL main_loop
end:
MOVQ 0(SP), BP
MOVB off, ret+56(FP)
RET
#undef off
#undef buffer
#undef table
#undef br_bits_read
#undef br_value
#undef br_offset
#undef peek_bits
#undef exhausted
#undef br0
#undef br1
#undef br2
#undef br3

View File

@ -1,197 +0,0 @@
// +build !appengine
// +build gc
// +build !noasm
#include "textflag.h"
#include "funcdata.h"
#include "go_asm.h"
#define bufoff 256 // see decompress.go, we're using [4][256]byte table
//func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
TEXT ·decompress4x_8b_loop_x86(SB), NOSPLIT, $8
#define off R8
#define buffer DI
#define table SI
#define br_bits_read R9
#define br_value R10
#define br_offset R11
#define peek_bits R12
#define exhausted DX
#define br0 R13
#define br1 R14
#define br2 R15
#define br3 BP
MOVQ BP, 0(SP)
XORQ exhausted, exhausted // exhausted = false
XORQ off, off // off = 0
MOVBQZX peekBits+32(FP), peek_bits
MOVQ buf+40(FP), buffer
MOVQ tbl+48(FP), table
MOVQ pbr0+0(FP), br0
MOVQ pbr1+8(FP), br1
MOVQ pbr2+16(FP), br2
MOVQ pbr3+24(FP), br3
main_loop:
{{ define "decode_2_values_x86" }}
// const stream = {{ var "id" }}
// br{{ var "id"}}.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br{{ var "id" }}), br_bits_read
MOVQ bitReaderShifted_value(br{{ var "id" }}), br_value
MOVQ bitReaderShifted_off(br{{ var "id" }}), br_offset
// if b.bitsRead >= 32 {
CMPQ br_bits_read, $32
JB skip_fill{{ var "id" }}
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br{{ var "id" }}), AX
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
// b.value |= uint64(low) << (b.bitsRead & 63)
MOVQ br_bits_read, CX
SHLQ CL, AX
ORQ AX, br_value
// exhausted = exhausted || (br{{ var "id"}}.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill{{ var "id" }}:
// val0 := br{{ var "id"}}.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br{{ var "id"}}.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val1 := br{{ var "id"}}.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br{{ var "id"}}.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, {{ var "bufofs" }}(buffer)(off*1)
// SECOND PART:
// val2 := br{{ var "id"}}.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v2 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br{{ var "id"}}.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val3 := br{{ var "id"}}.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v3 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br{{ var "id"}}.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off+2] = uint8(v2.entry >> 8)
// buf[stream][off+3] = uint8(v3.entry >> 8)
MOVW BX, {{ var "bufofs" }}+2(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br{{ var "id" }})
MOVQ br_value, bitReaderShifted_value(br{{ var "id" }})
MOVQ br_offset, bitReaderShifted_off(br{{ var "id" }})
{{ end }}
{{ set "id" "0" }}
{{ set "ofs" "0" }}
{{ set "bufofs" "0" }} {{/* id * bufoff */}}
{{ template "decode_2_values_x86" . }}
{{ set "id" "1" }}
{{ set "ofs" "8" }}
{{ set "bufofs" "256" }}
{{ template "decode_2_values_x86" . }}
{{ set "id" "2" }}
{{ set "ofs" "16" }}
{{ set "bufofs" "512" }}
{{ template "decode_2_values_x86" . }}
{{ set "id" "3" }}
{{ set "ofs" "24" }}
{{ set "bufofs" "768" }}
{{ template "decode_2_values_x86" . }}
ADDQ $4, off // off += 2
TESTB DH, DH // any br[i].ofs < 4?
JNZ end
CMPQ off, $bufoff
JL main_loop
end:
MOVQ 0(SP), BP
MOVB off, ret+56(FP)
RET
#undef off
#undef buffer
#undef table
#undef br_bits_read
#undef br_value
#undef br_offset
#undef peek_bits
#undef exhausted
#undef br0
#undef br1
#undef br2
#undef br3

View File

@ -13,19 +13,30 @@ import (
// decompress4x_main_loop_x86 is an x86 assembler implementation // decompress4x_main_loop_x86 is an x86 assembler implementation
// of Decompress4X when tablelog > 8. // of Decompress4X when tablelog > 8.
//go:noescape //go:noescape
func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted, func decompress4x_main_loop_amd64(ctx *decompress4xContext)
peekBits uint8, buf *byte, tbl *dEntrySingle) uint8
// decompress4x_8b_loop_x86 is an x86 assembler implementation // decompress4x_8b_loop_x86 is an x86 assembler implementation
// of Decompress4X when tablelog <= 8 which decodes 4 entries // of Decompress4X when tablelog <= 8 which decodes 4 entries
// per loop. // per loop.
//go:noescape //go:noescape
func decompress4x_8b_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted, func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext)
peekBits uint8, buf *byte, tbl *dEntrySingle) uint8
// fallback8BitSize is the size where using Go version is faster. // fallback8BitSize is the size where using Go version is faster.
const fallback8BitSize = 800 const fallback8BitSize = 800
type decompress4xContext struct {
pbr0 *bitReaderShifted
pbr1 *bitReaderShifted
pbr2 *bitReaderShifted
pbr3 *bitReaderShifted
peekBits uint8
out *byte
dstEvery int
tbl *dEntrySingle
decoded int
limit *byte
}
// Decompress4X will decompress a 4X encoded stream. // Decompress4X will decompress a 4X encoded stream.
// The length of the supplied input must match the end of a block exactly. // The length of the supplied input must match the end of a block exactly.
// The *capacity* of the dst slice must match the destination size of // The *capacity* of the dst slice must match the destination size of
@ -42,6 +53,7 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
if cap(dst) < fallback8BitSize && use8BitTables { if cap(dst) < fallback8BitSize && use8BitTables {
return d.decompress4X8bit(dst, src) return d.decompress4X8bit(dst, src)
} }
var br [4]bitReaderShifted var br [4]bitReaderShifted
// Decode "jump table" // Decode "jump table"
start := 6 start := 6
@ -71,70 +83,28 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
const tlMask = tlSize - 1 const tlMask = tlSize - 1
single := d.dt.single[:tlSize] single := d.dt.single[:tlSize]
// Use temp table to avoid bound checks/append penalty.
buf := d.buffer()
var off uint8
var decoded int var decoded int
const debug = false if len(out) > 4*4 && !(br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4) {
ctx := decompress4xContext{
// see: bitReaderShifted.peekBitsFast() pbr0: &br[0],
peekBits := uint8((64 - d.actualTableLog) & 63) pbr1: &br[1],
pbr2: &br[2],
// Decode 2 values from each decoder/loop. pbr3: &br[3],
const bufoff = 256 peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast()
for { out: &out[0],
if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { dstEvery: dstEvery,
break tbl: &single[0],
limit: &out[dstEvery-4], // Always stop decoding when first buffer gets here to avoid writing OOB on last.
} }
if use8BitTables { if use8BitTables {
off = decompress4x_8b_loop_x86(&br[0], &br[1], &br[2], &br[3], peekBits, &buf[0][0], &single[0]) decompress4x_8b_main_loop_amd64(&ctx)
} else { } else {
off = decompress4x_main_loop_x86(&br[0], &br[1], &br[2], &br[3], peekBits, &buf[0][0], &single[0]) decompress4x_main_loop_amd64(&ctx)
}
if debug {
fmt.Print("DEBUG: ")
fmt.Printf("off=%d,", off)
for i := 0; i < 4; i++ {
fmt.Printf(" br[%d]={bitsRead=%d, value=%x, off=%d}",
i, br[i].bitsRead, br[i].value, br[i].off)
}
fmt.Println("")
} }
if off != 0 { decoded = ctx.decoded
break out = out[decoded/4:]
}
if bufoff > dstEvery {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 1")
}
copy(out, buf[0][:])
copy(out[dstEvery:], buf[1][:])
copy(out[dstEvery*2:], buf[2][:])
copy(out[dstEvery*3:], buf[3][:])
out = out[bufoff:]
decoded += bufoff * 4
// There must at least be 3 buffers left.
if len(out) < dstEvery*3 {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 2")
}
}
if off > 0 {
ioff := int(off)
if len(out) < dstEvery*3+ioff {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 3")
}
copy(out, buf[0][:off])
copy(out[dstEvery:], buf[1][:off])
copy(out[dstEvery*2:], buf[2][:off])
copy(out[dstEvery*3:], buf[3][:off])
decoded += int(off) * 4
out = out[off:]
} }
// Decode remaining. // Decode remaining.
@ -150,7 +120,6 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
for bitsLeft > 0 { for bitsLeft > 0 {
br.fill() br.fill()
if offset >= endsAt { if offset >= endsAt {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 4") return nil, errors.New("corruption detected: stream overrun 4")
} }
@ -164,7 +133,6 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
offset++ offset++
} }
if offset != endsAt { if offset != endsAt {
d.bufs.Put(buf)
return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
} }
decoded += offset - dstEvery*i decoded += offset - dstEvery*i
@ -173,7 +141,6 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
return nil, err return nil, err
} }
} }
d.bufs.Put(buf)
if dstSize != decoded { if dstSize != decoded {
return nil, errors.New("corruption detected: short output block") return nil, errors.New("corruption detected: short output block")
} }

File diff suppressed because it is too large Load Diff

View File

@ -1,195 +0,0 @@
// +build !appengine
// +build gc
// +build !noasm
#include "textflag.h"
#include "funcdata.h"
#include "go_asm.h"
#ifdef GOAMD64_v4
#ifndef GOAMD64_v3
#define GOAMD64_v3
#endif
#endif
#define bufoff 256 // see decompress.go, we're using [4][256]byte table
//func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
TEXT ·decompress4x_main_loop_x86(SB), NOSPLIT, $8
#define off R8
#define buffer DI
#define table SI
#define br_bits_read R9
#define br_value R10
#define br_offset R11
#define peek_bits R12
#define exhausted DX
#define br0 R13
#define br1 R14
#define br2 R15
#define br3 BP
MOVQ BP, 0(SP)
XORQ exhausted, exhausted // exhausted = false
XORQ off, off // off = 0
MOVBQZX peekBits+32(FP), peek_bits
MOVQ buf+40(FP), buffer
MOVQ tbl+48(FP), table
MOVQ pbr0+0(FP), br0
MOVQ pbr1+8(FP), br1
MOVQ pbr2+16(FP), br2
MOVQ pbr3+24(FP), br3
main_loop:
{{ define "decode_2_values_x86" }}
// const stream = {{ var "id" }}
// br{{ var "id"}}.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br{{ var "id" }}), br_bits_read
MOVQ bitReaderShifted_value(br{{ var "id" }}), br_value
MOVQ bitReaderShifted_off(br{{ var "id" }}), br_offset
// We must have at least 2 * max tablelog left
CMPQ br_bits_read, $64-22
JBE skip_fill{{ var "id" }}
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br{{ var "id" }}), AX
// b.value |= uint64(low) << (b.bitsRead & 63)
#ifdef GOAMD64_v3
SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
#else
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
MOVQ br_bits_read, CX
SHLQ CL, AX
#endif
ORQ AX, br_value
// exhausted = exhausted || (br{{ var "id"}}.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill{{ var "id" }}:
// val0 := br{{ var "id"}}.peekTopBits(peekBits)
#ifdef GOAMD64_v3
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
#else
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
#endif
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br{{ var "id"}}.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
#ifdef GOAMD64_v3
MOVBQZX AL, CX
SHLXQ AX, br_value, br_value // value <<= n
#else
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
#endif
ADDQ CX, br_bits_read // bits_read += n
#ifdef GOAMD64_v3
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
#else
// val1 := br{{ var "id"}}.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
#endif
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br{{ var "id"}}.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
#ifdef GOAMD64_v3
MOVBQZX AL, CX
SHLXQ AX, br_value, br_value // value <<= n
#else
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
#endif
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, {{ var "bufofs" }}(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br{{ var "id" }})
MOVQ br_value, bitReaderShifted_value(br{{ var "id" }})
MOVQ br_offset, bitReaderShifted_off(br{{ var "id" }})
{{ end }}
{{ set "id" "0" }}
{{ set "ofs" "0" }}
{{ set "bufofs" "0" }} {{/* id * bufoff */}}
{{ template "decode_2_values_x86" . }}
{{ set "id" "1" }}
{{ set "ofs" "8" }}
{{ set "bufofs" "256" }}
{{ template "decode_2_values_x86" . }}
{{ set "id" "2" }}
{{ set "ofs" "16" }}
{{ set "bufofs" "512" }}
{{ template "decode_2_values_x86" . }}
{{ set "id" "3" }}
{{ set "ofs" "24" }}
{{ set "bufofs" "768" }}
{{ template "decode_2_values_x86" . }}
ADDQ $2, off // off += 2
TESTB DH, DH // any br[i].ofs < 4?
JNZ end
CMPQ off, $bufoff
JL main_loop
end:
MOVQ 0(SP), BP
MOVB off, ret+56(FP)
RET
#undef off
#undef buffer
#undef table
#undef br_bits_read
#undef br_value
#undef br_offset
#undef peek_bits
#undef exhausted
#undef br0
#undef br1
#undef br2
#undef br3

View File

@ -439,7 +439,7 @@ func (d *Decoder) nextBlock(blocking bool) (ok bool) {
println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp) println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp)
} }
if len(next.b) > 0 { if !d.o.ignoreChecksum && len(next.b) > 0 {
n, err := d.current.crc.Write(next.b) n, err := d.current.crc.Write(next.b)
if err == nil { if err == nil {
if n != len(next.b) { if n != len(next.b) {
@ -451,7 +451,7 @@ func (d *Decoder) nextBlock(blocking bool) (ok bool) {
got := d.current.crc.Sum64() got := d.current.crc.Sum64()
var tmp [4]byte var tmp [4]byte
binary.LittleEndian.PutUint32(tmp[:], uint32(got)) binary.LittleEndian.PutUint32(tmp[:], uint32(got))
if !bytes.Equal(tmp[:], next.d.checkCRC) && !ignoreCRC { if !d.o.ignoreChecksum && !bytes.Equal(tmp[:], next.d.checkCRC) {
if debugDecoder { if debugDecoder {
println("CRC Check Failed:", tmp[:], " (got) !=", next.d.checkCRC, "(on stream)") println("CRC Check Failed:", tmp[:], " (got) !=", next.d.checkCRC, "(on stream)")
} }
@ -535,9 +535,15 @@ func (d *Decoder) nextBlockSync() (ok bool) {
// Update/Check CRC // Update/Check CRC
if d.frame.HasCheckSum { if d.frame.HasCheckSum {
d.frame.crc.Write(d.current.b) if !d.o.ignoreChecksum {
d.frame.crc.Write(d.current.b)
}
if d.current.d.Last { if d.current.d.Last {
d.current.err = d.frame.checkCRC() if !d.o.ignoreChecksum {
d.current.err = d.frame.checkCRC()
} else {
d.current.err = d.frame.consumeCRC()
}
if d.current.err != nil { if d.current.err != nil {
println("CRC error:", d.current.err) println("CRC error:", d.current.err)
return false return false

View File

@ -19,6 +19,7 @@ type decoderOptions struct {
maxDecodedSize uint64 maxDecodedSize uint64
maxWindowSize uint64 maxWindowSize uint64
dicts []dict dicts []dict
ignoreChecksum bool
} }
func (o *decoderOptions) setDefault() { func (o *decoderOptions) setDefault() {
@ -112,3 +113,11 @@ func WithDecoderMaxWindow(size uint64) DOption {
return nil return nil
} }
} }
// IgnoreChecksum allows to forcibly ignore checksum checking.
func IgnoreChecksum(b bool) DOption {
return func(o *decoderOptions) error {
o.ignoreChecksum = b
return nil
}
}

View File

@ -290,13 +290,6 @@ func (d *frameDec) checkCRC() error {
if !d.HasCheckSum { if !d.HasCheckSum {
return nil return nil
} }
var tmp [4]byte
got := d.crc.Sum64()
// Flip to match file order.
tmp[0] = byte(got >> 0)
tmp[1] = byte(got >> 8)
tmp[2] = byte(got >> 16)
tmp[3] = byte(got >> 24)
// We can overwrite upper tmp now // We can overwrite upper tmp now
want, err := d.rawInput.readSmall(4) want, err := d.rawInput.readSmall(4)
@ -305,7 +298,19 @@ func (d *frameDec) checkCRC() error {
return err return err
} }
if !bytes.Equal(tmp[:], want) && !ignoreCRC { if d.o.ignoreChecksum {
return nil
}
var tmp [4]byte
got := d.crc.Sum64()
// Flip to match file order.
tmp[0] = byte(got >> 0)
tmp[1] = byte(got >> 8)
tmp[2] = byte(got >> 16)
tmp[3] = byte(got >> 24)
if !bytes.Equal(tmp[:], want) {
if debugDecoder { if debugDecoder {
println("CRC Check Failed:", tmp[:], "!=", want) println("CRC Check Failed:", tmp[:], "!=", want)
} }
@ -317,6 +322,19 @@ func (d *frameDec) checkCRC() error {
return nil return nil
} }
// consumeCRC reads the checksum data if the frame has one.
func (d *frameDec) consumeCRC() error {
if d.HasCheckSum {
_, err := d.rawInput.readSmall(4)
if err != nil {
println("CRC missing?", err)
return err
}
}
return nil
}
// runDecoder will create a sync decoder that will decode a block of data. // runDecoder will create a sync decoder that will decode a block of data.
func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
saved := d.history.b saved := d.history.b
@ -373,13 +391,17 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
if d.FrameContentSize != fcsUnknown && uint64(len(d.history.b)-crcStart) != d.FrameContentSize { if d.FrameContentSize != fcsUnknown && uint64(len(d.history.b)-crcStart) != d.FrameContentSize {
err = ErrFrameSizeMismatch err = ErrFrameSizeMismatch
} else if d.HasCheckSum { } else if d.HasCheckSum {
var n int if d.o.ignoreChecksum {
n, err = d.crc.Write(dst[crcStart:]) err = d.consumeCRC()
if err == nil { } else {
if n != len(dst)-crcStart { var n int
err = io.ErrShortWrite n, err = d.crc.Write(dst[crcStart:])
} else { if err == nil {
err = d.checkCRC() if n != len(dst)-crcStart {
err = io.ErrShortWrite
} else {
err = d.checkCRC()
}
} }
} }
} }

View File

@ -1,11 +0,0 @@
//go:build ignorecrc
// +build ignorecrc
// Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.
// Based on work by Yann Collet, released under BSD License.
package zstd
// ignoreCRC can be used for fuzz testing to ignore CRC values...
const ignoreCRC = true

View File

@ -1,11 +0,0 @@
//go:build !ignorecrc
// +build !ignorecrc
// Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.
// Based on work by Yann Collet, released under BSD License.
package zstd
// ignoreCRC can be used for fuzz testing to ignore CRC values...
const ignoreCRC = false

View File

@ -1326,30 +1326,30 @@ copy_match:
JA copy_overlapping_match JA copy_overlapping_match
// Copy non-overlapping match // Copy non-overlapping match
XORQ R12, R12 ADDQ R13, DI
MOVQ BX, R12
ADDQ R13, BX
copy_2: copy_2:
MOVUPS (R11)(R12*1), X0 MOVUPS (R11), X0
MOVUPS X0, (BX)(R12*1) MOVUPS X0, (R12)
ADDQ $0x10, R11
ADDQ $0x10, R12 ADDQ $0x10, R12
CMPQ R12, R13 SUBQ $0x10, R13
JB copy_2 JHI copy_2
ADDQ R13, BX
ADDQ R13, DI
JMP handle_loop JMP handle_loop
// Copy overlapping match // Copy overlapping match
copy_overlapping_match: copy_overlapping_match:
XORQ R12, R12 ADDQ R13, DI
copy_slow_3: copy_slow_3:
MOVB (R11)(R12*1), R14 MOVB (R11), R12
MOVB R14, (BX)(R12*1) MOVB R12, (BX)
INCQ R12 INCQ R11
CMPQ R12, R13 INCQ BX
JB copy_slow_3 DECQ R13
ADDQ R13, BX JNZ copy_slow_3
ADDQ R13, DI
handle_loop: handle_loop:
ADDQ $0x18, AX ADDQ $0x18, AX
@ -1826,30 +1826,30 @@ copy_match:
JA copy_overlapping_match JA copy_overlapping_match
// Copy non-overlapping match // Copy non-overlapping match
XORQ CX, CX ADDQ R13, R12
MOVQ R10, CX
ADDQ R13, R10
copy_2: copy_2:
MOVUPS (AX)(CX*1), X0 MOVUPS (AX), X0
MOVUPS X0, (R10)(CX*1) MOVUPS X0, (CX)
ADDQ $0x10, AX
ADDQ $0x10, CX ADDQ $0x10, CX
CMPQ CX, R13 SUBQ $0x10, R13
JB copy_2 JHI copy_2
ADDQ R13, R10
ADDQ R13, R12
JMP handle_loop JMP handle_loop
// Copy overlapping match // Copy overlapping match
copy_overlapping_match: copy_overlapping_match:
XORQ CX, CX ADDQ R13, R12
copy_slow_3: copy_slow_3:
MOVB (AX)(CX*1), R14 MOVB (AX), CL
MOVB R14, (R10)(CX*1) MOVB CL, (R10)
INCQ CX INCQ AX
CMPQ CX, R13 INCQ R10
JB copy_slow_3 DECQ R13
ADDQ R13, R10 JNZ copy_slow_3
ADDQ R13, R12
handle_loop: handle_loop:
MOVQ ctx+16(FP), AX MOVQ ctx+16(FP), AX
@ -2333,30 +2333,30 @@ copy_match:
JA copy_overlapping_match JA copy_overlapping_match
// Copy non-overlapping match // Copy non-overlapping match
XORQ R12, R12 ADDQ R13, R11
MOVQ R9, R12
ADDQ R13, R9
copy_2: copy_2:
MOVUPS (CX)(R12*1), X0 MOVUPS (CX), X0
MOVUPS X0, (R9)(R12*1) MOVUPS X0, (R12)
ADDQ $0x10, CX
ADDQ $0x10, R12 ADDQ $0x10, R12
CMPQ R12, R13 SUBQ $0x10, R13
JB copy_2 JHI copy_2
ADDQ R13, R9
ADDQ R13, R11
JMP handle_loop JMP handle_loop
// Copy overlapping match // Copy overlapping match
copy_overlapping_match: copy_overlapping_match:
XORQ R12, R12 ADDQ R13, R11
copy_slow_3: copy_slow_3:
MOVB (CX)(R12*1), R14 MOVB (CX), R12
MOVB R14, (R9)(R12*1) MOVB R12, (R9)
INCQ R12 INCQ CX
CMPQ R12, R13 INCQ R9
JB copy_slow_3 DECQ R13
ADDQ R13, R9 JNZ copy_slow_3
ADDQ R13, R11
handle_loop: handle_loop:
MOVQ ctx+16(FP), CX MOVQ ctx+16(FP), CX
@ -2862,6 +2862,7 @@ copy_match:
JA copy_overlapping_match JA copy_overlapping_match
// Copy non-overlapping match // Copy non-overlapping match
ADDQ R13, R12
XORQ CX, CX XORQ CX, CX
TESTQ $0x00000001, R13 TESTQ $0x00000001, R13
JZ copy_2_word JZ copy_2_word
@ -2900,21 +2901,19 @@ copy_2_test:
CMPQ CX, R13 CMPQ CX, R13
JB copy_2 JB copy_2
ADDQ R13, R10 ADDQ R13, R10
ADDQ R13, R12
JMP handle_loop JMP handle_loop
// Copy overlapping match // Copy overlapping match
copy_overlapping_match: copy_overlapping_match:
XORQ CX, CX ADDQ R13, R12
copy_slow_3: copy_slow_3:
MOVB (AX)(CX*1), R14 MOVB (AX), CL
MOVB R14, (R10)(CX*1) MOVB CL, (R10)
INCQ CX INCQ AX
CMPQ CX, R13 INCQ R10
JB copy_slow_3 DECQ R13
ADDQ R13, R10 JNZ copy_slow_3
ADDQ R13, R12
handle_loop: handle_loop:
MOVQ ctx+16(FP), AX MOVQ ctx+16(FP), AX
@ -3398,6 +3397,7 @@ copy_match:
JA copy_overlapping_match JA copy_overlapping_match
// Copy non-overlapping match // Copy non-overlapping match
ADDQ R13, R11
XORQ R12, R12 XORQ R12, R12
TESTQ $0x00000001, R13 TESTQ $0x00000001, R13
JZ copy_2_word JZ copy_2_word
@ -3436,21 +3436,19 @@ copy_2_test:
CMPQ R12, R13 CMPQ R12, R13
JB copy_2 JB copy_2
ADDQ R13, R9 ADDQ R13, R9
ADDQ R13, R11
JMP handle_loop JMP handle_loop
// Copy overlapping match // Copy overlapping match
copy_overlapping_match: copy_overlapping_match:
XORQ R12, R12 ADDQ R13, R11
copy_slow_3: copy_slow_3:
MOVB (CX)(R12*1), R14 MOVB (CX), R12
MOVB R14, (R9)(R12*1) MOVB R12, (R9)
INCQ R12 INCQ CX
CMPQ R12, R13 INCQ R9
JB copy_slow_3 DECQ R13
ADDQ R13, R9 JNZ copy_slow_3
ADDQ R13, R11
handle_loop: handle_loop:
MOVQ ctx+16(FP), CX MOVQ ctx+16(FP), CX

6
vendor/modules.txt vendored
View File

@ -155,7 +155,7 @@ github.com/containers/common/version
# github.com/containers/conmon v2.0.20+incompatible # github.com/containers/conmon v2.0.20+incompatible
## explicit ## explicit
github.com/containers/conmon/runner/config github.com/containers/conmon/runner/config
# github.com/containers/image/v5 v5.21.1 # github.com/containers/image/v5 v5.21.2-0.20220511203756-fe4fd4ed8be4
## explicit ## explicit
github.com/containers/image/v5/copy github.com/containers/image/v5/copy
github.com/containers/image/v5/directory github.com/containers/image/v5/directory
@ -235,7 +235,7 @@ github.com/containers/psgo/internal/dev
github.com/containers/psgo/internal/host github.com/containers/psgo/internal/host
github.com/containers/psgo/internal/proc github.com/containers/psgo/internal/proc
github.com/containers/psgo/internal/process github.com/containers/psgo/internal/process
# github.com/containers/storage v1.40.2 # github.com/containers/storage v1.41.1-0.20220511210719-cacc3325a9c8
## explicit ## explicit
github.com/containers/storage github.com/containers/storage
github.com/containers/storage/drivers github.com/containers/storage/drivers
@ -450,7 +450,7 @@ github.com/jinzhu/copier
# github.com/json-iterator/go v1.1.12 # github.com/json-iterator/go v1.1.12
## explicit ## explicit
github.com/json-iterator/go github.com/json-iterator/go
# github.com/klauspost/compress v1.15.2 # github.com/klauspost/compress v1.15.4
github.com/klauspost/compress github.com/klauspost/compress
github.com/klauspost/compress/flate github.com/klauspost/compress/flate
github.com/klauspost/compress/fse github.com/klauspost/compress/fse