mirror of
https://github.com/containers/podman.git
synced 2025-06-19 00:06:43 +08:00
Merge pull request #9140 from containers/dependabot/go_modules/github.com/containers/image/v5-5.10.0
Bump github.com/containers/image/v5 from 5.9.0 to 5.10.0
This commit is contained in:
2
go.mod
2
go.mod
@ -13,7 +13,7 @@ require (
|
|||||||
github.com/containers/buildah v1.19.2
|
github.com/containers/buildah v1.19.2
|
||||||
github.com/containers/common v0.33.1
|
github.com/containers/common v0.33.1
|
||||||
github.com/containers/conmon v2.0.20+incompatible
|
github.com/containers/conmon v2.0.20+incompatible
|
||||||
github.com/containers/image/v5 v5.9.0
|
github.com/containers/image/v5 v5.10.0
|
||||||
github.com/containers/psgo v1.5.2
|
github.com/containers/psgo v1.5.2
|
||||||
github.com/containers/storage v1.24.5
|
github.com/containers/storage v1.24.5
|
||||||
github.com/coreos/go-systemd/v22 v22.1.0
|
github.com/coreos/go-systemd/v22 v22.1.0
|
||||||
|
9
go.sum
9
go.sum
@ -103,6 +103,8 @@ github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6J
|
|||||||
github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
|
github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
|
||||||
github.com/containers/image/v5 v5.9.0 h1:dRmUtcluQcmasNo3DpnRoZjfU0rOu1qZeL6wlDJr10Q=
|
github.com/containers/image/v5 v5.9.0 h1:dRmUtcluQcmasNo3DpnRoZjfU0rOu1qZeL6wlDJr10Q=
|
||||||
github.com/containers/image/v5 v5.9.0/go.mod h1:blOEFd/iFdeyh891ByhCVUc+xAcaI3gBegXECwz9UbQ=
|
github.com/containers/image/v5 v5.9.0/go.mod h1:blOEFd/iFdeyh891ByhCVUc+xAcaI3gBegXECwz9UbQ=
|
||||||
|
github.com/containers/image/v5 v5.10.0 h1:Dq4gZ2QfxKUZIfPCubgwp4WBs3ReW1+mckfsRwq+wsQ=
|
||||||
|
github.com/containers/image/v5 v5.10.0/go.mod h1:JlRLJZv7elVbtHaaaR6Kz8i6G3k2ttj4t7fubwxD9Hs=
|
||||||
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE=
|
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE=
|
||||||
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
|
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
|
||||||
github.com/containers/ocicrypt v1.0.3 h1:vYgl+RZ9Q3DPMuTfxmN+qp0X2Bj52uuY2vnt6GzVe1c=
|
github.com/containers/ocicrypt v1.0.3 h1:vYgl+RZ9Q3DPMuTfxmN+qp0X2Bj52uuY2vnt6GzVe1c=
|
||||||
@ -340,6 +342,8 @@ github.com/klauspost/compress v1.11.1/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYs
|
|||||||
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||||
github.com/klauspost/compress v1.11.5 h1:xNCE0uE6yvTPRS+0wGNMHPo3NIpwnk6aluQZ6R6kRcc=
|
github.com/klauspost/compress v1.11.5 h1:xNCE0uE6yvTPRS+0wGNMHPo3NIpwnk6aluQZ6R6kRcc=
|
||||||
github.com/klauspost/compress v1.11.5/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
github.com/klauspost/compress v1.11.5/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||||
|
github.com/klauspost/compress v1.11.7 h1:0hzRabrMN4tSTvMfnL3SCv1ZGeAP23ynzodBgaHeMeg=
|
||||||
|
github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||||
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
|
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
|
||||||
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
@ -571,6 +575,8 @@ github.com/uber/jaeger-lib v2.2.0+incompatible h1:MxZXOiR2JuoANZ3J6DE/U0kSFv/eJ/
|
|||||||
github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
|
github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
|
||||||
github.com/ulikunitz/xz v0.5.8 h1:ERv8V6GKqVi23rgu5cj9pVfVzJbOqAY2Ntl88O6c2nQ=
|
github.com/ulikunitz/xz v0.5.8 h1:ERv8V6GKqVi23rgu5cj9pVfVzJbOqAY2Ntl88O6c2nQ=
|
||||||
github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
|
github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
|
||||||
|
github.com/ulikunitz/xz v0.5.9 h1:RsKRIA2MO8x56wkkcd3LbtcE/uMszhb6DpRf+3uwa3I=
|
||||||
|
github.com/ulikunitz/xz v0.5.9/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
|
||||||
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||||
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||||
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||||
@ -579,6 +585,8 @@ github.com/vbatts/tar-split v0.11.1 h1:0Odu65rhcZ3JZaPHxl7tCI3V/C/Q9Zf82UFravl02
|
|||||||
github.com/vbatts/tar-split v0.11.1/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g=
|
github.com/vbatts/tar-split v0.11.1/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g=
|
||||||
github.com/vbauerster/mpb/v5 v5.3.0 h1:vgrEJjUzHaSZKDRRxul5Oh4C72Yy/5VEMb0em+9M0mQ=
|
github.com/vbauerster/mpb/v5 v5.3.0 h1:vgrEJjUzHaSZKDRRxul5Oh4C72Yy/5VEMb0em+9M0mQ=
|
||||||
github.com/vbauerster/mpb/v5 v5.3.0/go.mod h1:4yTkvAb8Cm4eylAp6t0JRq6pXDkFJ4krUlDqWYkakAs=
|
github.com/vbauerster/mpb/v5 v5.3.0/go.mod h1:4yTkvAb8Cm4eylAp6t0JRq6pXDkFJ4krUlDqWYkakAs=
|
||||||
|
github.com/vbauerster/mpb/v5 v5.4.0 h1:n8JPunifvQvh6P1D1HAl2Ur9YcmKT1tpoUuiea5mlmg=
|
||||||
|
github.com/vbauerster/mpb/v5 v5.4.0/go.mod h1:fi4wVo7BVQ22QcvFObm+VwliQXlV1eBT8JDaKXR4JGI=
|
||||||
github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
|
github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
|
||||||
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
|
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
|
||||||
github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852 h1:cPXZWzzG0NllBLdjWoD1nDfaqu98YMv+OneaKc8sPOA=
|
github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852 h1:cPXZWzzG0NllBLdjWoD1nDfaqu98YMv+OneaKc8sPOA=
|
||||||
@ -744,6 +752,7 @@ golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3 h1:kzM6+9dur93BcC2kVlYl34cHU+TYZLanmpSJHVMmL64=
|
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3 h1:kzM6+9dur93BcC2kVlYl34cHU+TYZLanmpSJHVMmL64=
|
||||||
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20201218084310-7d0127a74742/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4 h1:myAQVi0cGEoqQVR5POX+8RR2mrocKqNN1hmeMqhX27k=
|
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4 h1:myAQVi0cGEoqQVR5POX+8RR2mrocKqNN1hmeMqhX27k=
|
||||||
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||||
|
84
vendor/github.com/containers/image/v5/copy/copy.go
generated
vendored
84
vendor/github.com/containers/image/v5/copy/copy.go
generated
vendored
@ -14,6 +14,7 @@ import (
|
|||||||
|
|
||||||
"github.com/containers/image/v5/docker/reference"
|
"github.com/containers/image/v5/docker/reference"
|
||||||
"github.com/containers/image/v5/image"
|
"github.com/containers/image/v5/image"
|
||||||
|
internalblobinfocache "github.com/containers/image/v5/internal/blobinfocache"
|
||||||
"github.com/containers/image/v5/internal/pkg/platform"
|
"github.com/containers/image/v5/internal/pkg/platform"
|
||||||
"github.com/containers/image/v5/manifest"
|
"github.com/containers/image/v5/manifest"
|
||||||
"github.com/containers/image/v5/pkg/blobinfocache"
|
"github.com/containers/image/v5/pkg/blobinfocache"
|
||||||
@ -47,7 +48,7 @@ var (
|
|||||||
|
|
||||||
// maxParallelDownloads is used to limit the maxmimum number of parallel
|
// maxParallelDownloads is used to limit the maxmimum number of parallel
|
||||||
// downloads. Let's follow Firefox by limiting it to 6.
|
// downloads. Let's follow Firefox by limiting it to 6.
|
||||||
maxParallelDownloads = 6
|
maxParallelDownloads = uint(6)
|
||||||
)
|
)
|
||||||
|
|
||||||
// compressionBufferSize is the buffer size used to compress a blob
|
// compressionBufferSize is the buffer size used to compress a blob
|
||||||
@ -113,12 +114,13 @@ type copier struct {
|
|||||||
progressOutput io.Writer
|
progressOutput io.Writer
|
||||||
progressInterval time.Duration
|
progressInterval time.Duration
|
||||||
progress chan types.ProgressProperties
|
progress chan types.ProgressProperties
|
||||||
blobInfoCache types.BlobInfoCache
|
blobInfoCache internalblobinfocache.BlobInfoCache2
|
||||||
copyInParallel bool
|
copyInParallel bool
|
||||||
compressionFormat compression.Algorithm
|
compressionFormat compression.Algorithm
|
||||||
compressionLevel *int
|
compressionLevel *int
|
||||||
ociDecryptConfig *encconfig.DecryptConfig
|
ociDecryptConfig *encconfig.DecryptConfig
|
||||||
ociEncryptConfig *encconfig.EncryptConfig
|
ociEncryptConfig *encconfig.EncryptConfig
|
||||||
|
maxParallelDownloads uint
|
||||||
}
|
}
|
||||||
|
|
||||||
// imageCopier tracks state specific to a single image (possibly an item of a manifest list)
|
// imageCopier tracks state specific to a single image (possibly an item of a manifest list)
|
||||||
@ -190,6 +192,8 @@ type Options struct {
|
|||||||
// OciDecryptConfig contains the config that can be used to decrypt an image if it is
|
// OciDecryptConfig contains the config that can be used to decrypt an image if it is
|
||||||
// encrypted if non-nil. If nil, it does not attempt to decrypt an image.
|
// encrypted if non-nil. If nil, it does not attempt to decrypt an image.
|
||||||
OciDecryptConfig *encconfig.DecryptConfig
|
OciDecryptConfig *encconfig.DecryptConfig
|
||||||
|
// MaxParallelDownloads indicates the maximum layers to pull at the same time. A reasonable default is used if this is left as 0.
|
||||||
|
MaxParallelDownloads uint
|
||||||
}
|
}
|
||||||
|
|
||||||
// validateImageListSelection returns an error if the passed-in value is not one that we recognize as a valid ImageListSelection value
|
// validateImageListSelection returns an error if the passed-in value is not one that we recognize as a valid ImageListSelection value
|
||||||
@ -265,9 +269,10 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
|
|||||||
// FIXME? The cache is used for sources and destinations equally, but we only have a SourceCtx and DestinationCtx.
|
// FIXME? The cache is used for sources and destinations equally, but we only have a SourceCtx and DestinationCtx.
|
||||||
// For now, use DestinationCtx (because blob reuse changes the behavior of the destination side more); eventually
|
// For now, use DestinationCtx (because blob reuse changes the behavior of the destination side more); eventually
|
||||||
// we might want to add a separate CommonCtx — or would that be too confusing?
|
// we might want to add a separate CommonCtx — or would that be too confusing?
|
||||||
blobInfoCache: blobinfocache.DefaultCache(options.DestinationCtx),
|
blobInfoCache: internalblobinfocache.FromBlobInfoCache(blobinfocache.DefaultCache(options.DestinationCtx)),
|
||||||
ociDecryptConfig: options.OciDecryptConfig,
|
ociDecryptConfig: options.OciDecryptConfig,
|
||||||
ociEncryptConfig: options.OciEncryptConfig,
|
ociEncryptConfig: options.OciEncryptConfig,
|
||||||
|
maxParallelDownloads: options.MaxParallelDownloads,
|
||||||
}
|
}
|
||||||
// Default to using gzip compression unless specified otherwise.
|
// Default to using gzip compression unless specified otherwise.
|
||||||
if options.DestinationCtx == nil || options.DestinationCtx.CompressionFormat == nil {
|
if options.DestinationCtx == nil || options.DestinationCtx.CompressionFormat == nil {
|
||||||
@ -648,13 +653,19 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
|
|||||||
// With docker/distribution registries we do not know whether the registry accepts schema2 or schema1 only;
|
// With docker/distribution registries we do not know whether the registry accepts schema2 or schema1 only;
|
||||||
// and at least with the OpenShift registry "acceptschema2" option, there is no way to detect the support
|
// and at least with the OpenShift registry "acceptschema2" option, there is no way to detect the support
|
||||||
// without actually trying to upload something and getting a types.ManifestTypeRejectedError.
|
// without actually trying to upload something and getting a types.ManifestTypeRejectedError.
|
||||||
// So, try the preferred manifest MIME type. If the process succeeds, fine…
|
// So, try the preferred manifest MIME type with possibly-updated blob digests, media types, and sizes if
|
||||||
|
// we're altering how they're compressed. If the process succeeds, fine…
|
||||||
manifestBytes, retManifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance)
|
manifestBytes, retManifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance)
|
||||||
retManifestType = preferredManifestMIMEType
|
retManifestType = preferredManifestMIMEType
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Debugf("Writing manifest using preferred type %s failed: %v", preferredManifestMIMEType, err)
|
logrus.Debugf("Writing manifest using preferred type %s failed: %v", preferredManifestMIMEType, err)
|
||||||
// … if it fails, _and_ the failure is because the manifest is rejected, we may have other options.
|
// … if it fails, and the failure is either because the manifest is rejected by the registry, or
|
||||||
if _, isManifestRejected := errors.Cause(err).(types.ManifestTypeRejectedError); !isManifestRejected || len(otherManifestMIMETypeCandidates) == 0 {
|
// because we failed to create a manifest of the specified type because the specific manifest type
|
||||||
|
// doesn't support the type of compression we're trying to use (e.g. docker v2s2 and zstd), we may
|
||||||
|
// have other options available that could still succeed.
|
||||||
|
_, isManifestRejected := errors.Cause(err).(types.ManifestTypeRejectedError)
|
||||||
|
_, isCompressionIncompatible := errors.Cause(err).(manifest.ManifestLayerCompressionIncompatibilityError)
|
||||||
|
if (!isManifestRejected && !isCompressionIncompatible) || len(otherManifestMIMETypeCandidates) == 0 {
|
||||||
// We don’t have other options.
|
// We don’t have other options.
|
||||||
// In principle the code below would handle this as well, but the resulting error message is fairly ugly.
|
// In principle the code below would handle this as well, but the resulting error message is fairly ugly.
|
||||||
// Don’t bother the user with MIME types if we have no choice.
|
// Don’t bother the user with MIME types if we have no choice.
|
||||||
@ -809,7 +820,11 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
|
|||||||
// avoid malicious images causing troubles and to be nice to servers.
|
// avoid malicious images causing troubles and to be nice to servers.
|
||||||
var copySemaphore *semaphore.Weighted
|
var copySemaphore *semaphore.Weighted
|
||||||
if ic.c.copyInParallel {
|
if ic.c.copyInParallel {
|
||||||
copySemaphore = semaphore.NewWeighted(int64(maxParallelDownloads))
|
max := ic.c.maxParallelDownloads
|
||||||
|
if max == 0 {
|
||||||
|
max = maxParallelDownloads
|
||||||
|
}
|
||||||
|
copySemaphore = semaphore.NewWeighted(int64(max))
|
||||||
} else {
|
} else {
|
||||||
copySemaphore = semaphore.NewWeighted(int64(1))
|
copySemaphore = semaphore.NewWeighted(int64(1))
|
||||||
}
|
}
|
||||||
@ -896,7 +911,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// layerDigestsDiffer return true iff the digests in a and b differ (ignoring sizes and possible other fields)
|
// layerDigestsDiffer returns true iff the digests in a and b differ (ignoring sizes and possible other fields)
|
||||||
func layerDigestsDiffer(a, b []types.BlobInfo) bool {
|
func layerDigestsDiffer(a, b []types.BlobInfo) bool {
|
||||||
if len(a) != len(b) {
|
if len(a) != len(b) {
|
||||||
return true
|
return true
|
||||||
@ -951,7 +966,7 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanc
|
|||||||
instanceDigest = &manifestDigest
|
instanceDigest = &manifestDigest
|
||||||
}
|
}
|
||||||
if err := ic.c.dest.PutManifest(ctx, man, instanceDigest); err != nil {
|
if err := ic.c.dest.PutManifest(ctx, man, instanceDigest); err != nil {
|
||||||
return nil, "", errors.Wrap(err, "Error writing manifest")
|
return nil, "", errors.Wrapf(err, "Error writing manifest %q", string(man))
|
||||||
}
|
}
|
||||||
return man, manifestDigest, nil
|
return man, manifestDigest, nil
|
||||||
}
|
}
|
||||||
@ -1049,7 +1064,7 @@ type diffIDResult struct {
|
|||||||
err error
|
err error
|
||||||
}
|
}
|
||||||
|
|
||||||
// copyLayer copies a layer with srcInfo (with known Digest and Annotations and possibly known Size) in src to dest, perhaps compressing it if canCompress,
|
// copyLayer copies a layer with srcInfo (with known Digest and Annotations and possibly known Size) in src to dest, perhaps (de/re/)compressing it,
|
||||||
// and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded
|
// and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded
|
||||||
func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, toEncrypt bool, pool *mpb.Progress) (types.BlobInfo, digest.Digest, error) {
|
func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, toEncrypt bool, pool *mpb.Progress) (types.BlobInfo, digest.Digest, error) {
|
||||||
cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be ""
|
cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be ""
|
||||||
@ -1058,6 +1073,12 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
|
|||||||
|
|
||||||
// If we already have the blob, and we don't need to compute the diffID, then we don't need to read it from the source.
|
// If we already have the blob, and we don't need to compute the diffID, then we don't need to read it from the source.
|
||||||
if !diffIDIsNeeded {
|
if !diffIDIsNeeded {
|
||||||
|
// TODO: at this point we don't know whether or not a blob we end up reusing is compressed using an algorithm
|
||||||
|
// that is acceptable for use on layers in the manifest that we'll be writing later, so if we end up reusing
|
||||||
|
// a blob that's compressed with e.g. zstd, but we're only allowed to write a v2s2 manifest, this will cause
|
||||||
|
// a failure when we eventually try to update the manifest with the digest and MIME type of the reused blob.
|
||||||
|
// Fixing that will probably require passing more information to TryReusingBlob() than the current version of
|
||||||
|
// the ImageDestination interface lets us pass in.
|
||||||
reused, blobInfo, err := ic.c.dest.TryReusingBlob(ctx, srcInfo, ic.c.blobInfoCache, ic.canSubstituteBlobs)
|
reused, blobInfo, err := ic.c.dest.TryReusingBlob(ctx, srcInfo, ic.c.blobInfoCache, ic.canSubstituteBlobs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return types.BlobInfo{}, "", errors.Wrapf(err, "Error trying to reuse blob %s at destination", srcInfo.Digest)
|
return types.BlobInfo{}, "", errors.Wrapf(err, "Error trying to reuse blob %s at destination", srcInfo.Digest)
|
||||||
@ -1115,7 +1136,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
|
|||||||
|
|
||||||
// copyLayerFromStream is an implementation detail of copyLayer; mostly providing a separate “defer” scope.
|
// copyLayerFromStream is an implementation detail of copyLayer; mostly providing a separate “defer” scope.
|
||||||
// it copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcStream to dest,
|
// it copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcStream to dest,
|
||||||
// perhaps compressing the stream if canCompress,
|
// perhaps (de/re/)compressing the stream,
|
||||||
// and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller.
|
// and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller.
|
||||||
func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo,
|
func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo,
|
||||||
diffIDIsNeeded bool, toEncrypt bool, bar *mpb.Bar) (types.BlobInfo, <-chan diffIDResult, error) {
|
diffIDIsNeeded bool, toEncrypt bool, bar *mpb.Bar) (types.BlobInfo, <-chan diffIDResult, error) {
|
||||||
@ -1191,11 +1212,15 @@ func (r errorAnnotationReader) Read(b []byte) (n int, err error) {
|
|||||||
|
|
||||||
// copyBlobFromStream copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcStream to dest,
|
// copyBlobFromStream copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcStream to dest,
|
||||||
// perhaps sending a copy to an io.Writer if getOriginalLayerCopyWriter != nil,
|
// perhaps sending a copy to an io.Writer if getOriginalLayerCopyWriter != nil,
|
||||||
// perhaps compressing it if canCompress,
|
// perhaps (de/re/)compressing it if canModifyBlob,
|
||||||
// and returns a complete blobInfo of the copied blob.
|
// and returns a complete blobInfo of the copied blob.
|
||||||
func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo,
|
func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo,
|
||||||
getOriginalLayerCopyWriter func(decompressor compression.DecompressorFunc) io.Writer,
|
getOriginalLayerCopyWriter func(decompressor compression.DecompressorFunc) io.Writer,
|
||||||
canModifyBlob bool, isConfig bool, toEncrypt bool, bar *mpb.Bar) (types.BlobInfo, error) {
|
canModifyBlob bool, isConfig bool, toEncrypt bool, bar *mpb.Bar) (types.BlobInfo, error) {
|
||||||
|
if isConfig { // This is guaranteed by the caller, but set it here to be explicit.
|
||||||
|
canModifyBlob = false
|
||||||
|
}
|
||||||
|
|
||||||
// The copying happens through a pipeline of connected io.Readers.
|
// The copying happens through a pipeline of connected io.Readers.
|
||||||
// === Input: srcStream
|
// === Input: srcStream
|
||||||
|
|
||||||
@ -1253,16 +1278,23 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
|||||||
originalLayerReader = destStream
|
originalLayerReader = destStream
|
||||||
}
|
}
|
||||||
|
|
||||||
desiredCompressionFormat := c.compressionFormat
|
|
||||||
|
|
||||||
// === Deal with layer compression/decompression if necessary
|
// === Deal with layer compression/decompression if necessary
|
||||||
var inputInfo types.BlobInfo
|
var inputInfo types.BlobInfo
|
||||||
var compressionOperation types.LayerCompression
|
var compressionOperation types.LayerCompression
|
||||||
|
uploadCompressionFormat := &c.compressionFormat
|
||||||
|
srcCompressorName := internalblobinfocache.Uncompressed
|
||||||
|
if isCompressed {
|
||||||
|
srcCompressorName = compressionFormat.Name()
|
||||||
|
}
|
||||||
|
var uploadCompressorName string
|
||||||
if canModifyBlob && isOciEncrypted(srcInfo.MediaType) {
|
if canModifyBlob && isOciEncrypted(srcInfo.MediaType) {
|
||||||
// PreserveOriginal due to any compression not being able to be done on an encrypted blob unless decrypted
|
// PreserveOriginal due to any compression not being able to be done on an encrypted blob unless decrypted
|
||||||
logrus.Debugf("Using original blob without modification for encrypted blob")
|
logrus.Debugf("Using original blob without modification for encrypted blob")
|
||||||
compressionOperation = types.PreserveOriginal
|
compressionOperation = types.PreserveOriginal
|
||||||
inputInfo = srcInfo
|
inputInfo = srcInfo
|
||||||
|
srcCompressorName = internalblobinfocache.UnknownCompression
|
||||||
|
uploadCompressorName = internalblobinfocache.UnknownCompression
|
||||||
|
uploadCompressionFormat = nil
|
||||||
} else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && !isCompressed {
|
} else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && !isCompressed {
|
||||||
logrus.Debugf("Compressing blob on the fly")
|
logrus.Debugf("Compressing blob on the fly")
|
||||||
compressionOperation = types.Compress
|
compressionOperation = types.Compress
|
||||||
@ -1272,11 +1304,12 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
|||||||
// If this fails while writing data, it will do pipeWriter.CloseWithError(); if it fails otherwise,
|
// If this fails while writing data, it will do pipeWriter.CloseWithError(); if it fails otherwise,
|
||||||
// e.g. because we have exited and due to pipeReader.Close() above further writing to the pipe has failed,
|
// e.g. because we have exited and due to pipeReader.Close() above further writing to the pipe has failed,
|
||||||
// we don’t care.
|
// we don’t care.
|
||||||
go c.compressGoroutine(pipeWriter, destStream, desiredCompressionFormat) // Closes pipeWriter
|
go c.compressGoroutine(pipeWriter, destStream, *uploadCompressionFormat) // Closes pipeWriter
|
||||||
destStream = pipeReader
|
destStream = pipeReader
|
||||||
inputInfo.Digest = ""
|
inputInfo.Digest = ""
|
||||||
inputInfo.Size = -1
|
inputInfo.Size = -1
|
||||||
} else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && isCompressed && desiredCompressionFormat.Name() != compressionFormat.Name() {
|
uploadCompressorName = uploadCompressionFormat.Name()
|
||||||
|
} else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && isCompressed && uploadCompressionFormat.Name() != compressionFormat.Name() {
|
||||||
// When the blob is compressed, but the desired format is different, it first needs to be decompressed and finally
|
// When the blob is compressed, but the desired format is different, it first needs to be decompressed and finally
|
||||||
// re-compressed using the desired format.
|
// re-compressed using the desired format.
|
||||||
logrus.Debugf("Blob will be converted")
|
logrus.Debugf("Blob will be converted")
|
||||||
@ -1291,11 +1324,12 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
|||||||
pipeReader, pipeWriter := io.Pipe()
|
pipeReader, pipeWriter := io.Pipe()
|
||||||
defer pipeReader.Close()
|
defer pipeReader.Close()
|
||||||
|
|
||||||
go c.compressGoroutine(pipeWriter, s, desiredCompressionFormat) // Closes pipeWriter
|
go c.compressGoroutine(pipeWriter, s, *uploadCompressionFormat) // Closes pipeWriter
|
||||||
|
|
||||||
destStream = pipeReader
|
destStream = pipeReader
|
||||||
inputInfo.Digest = ""
|
inputInfo.Digest = ""
|
||||||
inputInfo.Size = -1
|
inputInfo.Size = -1
|
||||||
|
uploadCompressorName = uploadCompressionFormat.Name()
|
||||||
} else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Decompress && isCompressed {
|
} else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Decompress && isCompressed {
|
||||||
logrus.Debugf("Blob will be decompressed")
|
logrus.Debugf("Blob will be decompressed")
|
||||||
compressionOperation = types.Decompress
|
compressionOperation = types.Decompress
|
||||||
@ -1307,11 +1341,15 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
|||||||
destStream = s
|
destStream = s
|
||||||
inputInfo.Digest = ""
|
inputInfo.Digest = ""
|
||||||
inputInfo.Size = -1
|
inputInfo.Size = -1
|
||||||
|
uploadCompressorName = internalblobinfocache.Uncompressed
|
||||||
|
uploadCompressionFormat = nil
|
||||||
} else {
|
} else {
|
||||||
// PreserveOriginal might also need to recompress the original blob if the desired compression format is different.
|
// PreserveOriginal might also need to recompress the original blob if the desired compression format is different.
|
||||||
logrus.Debugf("Using original blob without modification")
|
logrus.Debugf("Using original blob without modification")
|
||||||
compressionOperation = types.PreserveOriginal
|
compressionOperation = types.PreserveOriginal
|
||||||
inputInfo = srcInfo
|
inputInfo = srcInfo
|
||||||
|
uploadCompressorName = srcCompressorName
|
||||||
|
uploadCompressionFormat = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Perform image encryption for valid mediatypes if ociEncryptConfig provided
|
// Perform image encryption for valid mediatypes if ociEncryptConfig provided
|
||||||
@ -1371,9 +1409,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
|||||||
|
|
||||||
uploadedInfo.CompressionOperation = compressionOperation
|
uploadedInfo.CompressionOperation = compressionOperation
|
||||||
// If we can modify the layer's blob, set the desired algorithm for it to be set in the manifest.
|
// If we can modify the layer's blob, set the desired algorithm for it to be set in the manifest.
|
||||||
if canModifyBlob && !isConfig {
|
uploadedInfo.CompressionAlgorithm = uploadCompressionFormat
|
||||||
uploadedInfo.CompressionAlgorithm = &desiredCompressionFormat
|
|
||||||
}
|
|
||||||
if decrypted {
|
if decrypted {
|
||||||
uploadedInfo.CryptoOperation = types.Decrypt
|
uploadedInfo.CryptoOperation = types.Decrypt
|
||||||
} else if encrypted {
|
} else if encrypted {
|
||||||
@ -1390,7 +1426,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// This is fairly horrible: the writer from getOriginalLayerCopyWriter wants to consumer
|
// This is fairly horrible: the writer from getOriginalLayerCopyWriter wants to consume
|
||||||
// all of the input (to compute DiffIDs), even if dest.PutBlob does not need it.
|
// all of the input (to compute DiffIDs), even if dest.PutBlob does not need it.
|
||||||
// So, read everything from originalLayerReader, which will cause the rest to be
|
// So, read everything from originalLayerReader, which will cause the rest to be
|
||||||
// sent there if we are not already at EOF.
|
// sent there if we are not already at EOF.
|
||||||
@ -1423,6 +1459,12 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
|||||||
default:
|
default:
|
||||||
return types.BlobInfo{}, errors.Errorf("Internal error: Unexpected compressionOperation value %#v", compressionOperation)
|
return types.BlobInfo{}, errors.Errorf("Internal error: Unexpected compressionOperation value %#v", compressionOperation)
|
||||||
}
|
}
|
||||||
|
if uploadCompressorName != "" && uploadCompressorName != internalblobinfocache.UnknownCompression {
|
||||||
|
c.blobInfoCache.RecordDigestCompressorName(uploadedInfo.Digest, uploadCompressorName)
|
||||||
|
}
|
||||||
|
if srcInfo.Digest != "" && srcCompressorName != "" && srcCompressorName != internalblobinfocache.UnknownCompression {
|
||||||
|
c.blobInfoCache.RecordDigestCompressorName(srcInfo.Digest, srcCompressorName)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return uploadedInfo, nil
|
return uploadedInfo, nil
|
||||||
}
|
}
|
||||||
|
7
vendor/github.com/containers/image/v5/directory/directory_dest.go
generated
vendored
7
vendor/github.com/containers/image/v5/directory/directory_dest.go
generated
vendored
@ -194,7 +194,9 @@ func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp
|
|||||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||||
// info.Digest must not be empty.
|
// info.Digest must not be empty.
|
||||||
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
||||||
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
|
||||||
|
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
|
||||||
|
// reflected in the manifest that will be written.
|
||||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||||
// May use and/or update cache.
|
// May use and/or update cache.
|
||||||
func (d *dirImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
func (d *dirImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
||||||
@ -210,7 +212,6 @@ func (d *dirImageDestination) TryReusingBlob(ctx context.Context, info types.Blo
|
|||||||
return false, types.BlobInfo{}, err
|
return false, types.BlobInfo{}, err
|
||||||
}
|
}
|
||||||
return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil
|
return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutManifest writes manifest to the destination.
|
// PutManifest writes manifest to the destination.
|
||||||
@ -251,7 +252,7 @@ func pathExists(path string) (bool, error) {
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
if err != nil && os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
return false, err
|
return false, err
|
||||||
|
24
vendor/github.com/containers/image/v5/docker/docker_client.go
generated
vendored
24
vendor/github.com/containers/image/v5/docker/docker_client.go
generated
vendored
@ -23,6 +23,7 @@ import (
|
|||||||
"github.com/containers/image/v5/pkg/sysregistriesv2"
|
"github.com/containers/image/v5/pkg/sysregistriesv2"
|
||||||
"github.com/containers/image/v5/pkg/tlsclientconfig"
|
"github.com/containers/image/v5/pkg/tlsclientconfig"
|
||||||
"github.com/containers/image/v5/types"
|
"github.com/containers/image/v5/types"
|
||||||
|
"github.com/containers/image/v5/version"
|
||||||
"github.com/containers/storage/pkg/homedir"
|
"github.com/containers/storage/pkg/homedir"
|
||||||
clientLib "github.com/docker/distribution/registry/client"
|
clientLib "github.com/docker/distribution/registry/client"
|
||||||
"github.com/docker/go-connections/tlsconfig"
|
"github.com/docker/go-connections/tlsconfig"
|
||||||
@ -65,6 +66,8 @@ var (
|
|||||||
{path: "/etc/containers/certs.d", absolute: true},
|
{path: "/etc/containers/certs.d", absolute: true},
|
||||||
{path: "/etc/docker/certs.d", absolute: true},
|
{path: "/etc/docker/certs.d", absolute: true},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
defaultUserAgent = "containers/" + version.Version + " (github.com/containers/image)"
|
||||||
)
|
)
|
||||||
|
|
||||||
// extensionSignature and extensionSignatureList come from github.com/openshift/origin/pkg/dockerregistry/server/signaturedispatcher.go:
|
// extensionSignature and extensionSignatureList come from github.com/openshift/origin/pkg/dockerregistry/server/signaturedispatcher.go:
|
||||||
@ -94,6 +97,7 @@ type dockerClient struct {
|
|||||||
// The following members are set by newDockerClient and do not change afterwards.
|
// The following members are set by newDockerClient and do not change afterwards.
|
||||||
sys *types.SystemContext
|
sys *types.SystemContext
|
||||||
registry string
|
registry string
|
||||||
|
userAgent string
|
||||||
|
|
||||||
// tlsClientConfig is setup by newDockerClient and will be used and updated
|
// tlsClientConfig is setup by newDockerClient and will be used and updated
|
||||||
// by detectProperties(). Callers can edit tlsClientConfig.InsecureSkipVerify in the meantime.
|
// by detectProperties(). Callers can edit tlsClientConfig.InsecureSkipVerify in the meantime.
|
||||||
@ -200,10 +204,8 @@ func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) {
|
|||||||
logrus.Debugf("error accessing certs directory due to permissions: %v", err)
|
logrus.Debugf("error accessing certs directory due to permissions: %v", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if err != nil {
|
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
}
|
|
||||||
return fullCertDirPath, nil
|
return fullCertDirPath, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -277,9 +279,15 @@ func newDockerClient(sys *types.SystemContext, registry, reference string) (*doc
|
|||||||
}
|
}
|
||||||
tlsClientConfig.InsecureSkipVerify = skipVerify
|
tlsClientConfig.InsecureSkipVerify = skipVerify
|
||||||
|
|
||||||
|
userAgent := defaultUserAgent
|
||||||
|
if sys != nil && sys.DockerRegistryUserAgent != "" {
|
||||||
|
userAgent = sys.DockerRegistryUserAgent
|
||||||
|
}
|
||||||
|
|
||||||
return &dockerClient{
|
return &dockerClient{
|
||||||
sys: sys,
|
sys: sys,
|
||||||
registry: registry,
|
registry: registry,
|
||||||
|
userAgent: userAgent,
|
||||||
tlsClientConfig: tlsClientConfig,
|
tlsClientConfig: tlsClientConfig,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
@ -529,9 +537,7 @@ func (c *dockerClient) makeRequestToResolvedURLOnce(ctx context.Context, method,
|
|||||||
req.Header.Add(n, hh)
|
req.Header.Add(n, hh)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if c.sys != nil && c.sys.DockerRegistryUserAgent != "" {
|
req.Header.Add("User-Agent", c.userAgent)
|
||||||
req.Header.Add("User-Agent", c.sys.DockerRegistryUserAgent)
|
|
||||||
}
|
|
||||||
if auth == v2Auth {
|
if auth == v2Auth {
|
||||||
if err := c.setupRequestAuth(req, extraScope); err != nil {
|
if err := c.setupRequestAuth(req, extraScope); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -637,9 +643,7 @@ func (c *dockerClient) getBearerTokenOAuth2(ctx context.Context, challenge chall
|
|||||||
params.Add("client_id", "containers/image")
|
params.Add("client_id", "containers/image")
|
||||||
|
|
||||||
authReq.Body = ioutil.NopCloser(bytes.NewBufferString(params.Encode()))
|
authReq.Body = ioutil.NopCloser(bytes.NewBufferString(params.Encode()))
|
||||||
if c.sys != nil && c.sys.DockerRegistryUserAgent != "" {
|
authReq.Header.Add("User-Agent", c.userAgent)
|
||||||
authReq.Header.Add("User-Agent", c.sys.DockerRegistryUserAgent)
|
|
||||||
}
|
|
||||||
authReq.Header.Add("Content-Type", "application/x-www-form-urlencoded")
|
authReq.Header.Add("Content-Type", "application/x-www-form-urlencoded")
|
||||||
logrus.Debugf("%s %s", authReq.Method, authReq.URL.String())
|
logrus.Debugf("%s %s", authReq.Method, authReq.URL.String())
|
||||||
res, err := c.client.Do(authReq)
|
res, err := c.client.Do(authReq)
|
||||||
@ -692,9 +696,7 @@ func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge,
|
|||||||
if c.auth.Username != "" && c.auth.Password != "" {
|
if c.auth.Username != "" && c.auth.Password != "" {
|
||||||
authReq.SetBasicAuth(c.auth.Username, c.auth.Password)
|
authReq.SetBasicAuth(c.auth.Username, c.auth.Password)
|
||||||
}
|
}
|
||||||
if c.sys != nil && c.sys.DockerRegistryUserAgent != "" {
|
authReq.Header.Add("User-Agent", c.userAgent)
|
||||||
authReq.Header.Add("User-Agent", c.sys.DockerRegistryUserAgent)
|
|
||||||
}
|
|
||||||
|
|
||||||
logrus.Debugf("%s %s", authReq.Method, authReq.URL.String())
|
logrus.Debugf("%s %s", authReq.Method, authReq.URL.String())
|
||||||
res, err := c.client.Do(authReq)
|
res, err := c.client.Do(authReq)
|
||||||
|
29
vendor/github.com/containers/image/v5/docker/docker_image_dest.go
generated
vendored
29
vendor/github.com/containers/image/v5/docker/docker_image_dest.go
generated
vendored
@ -15,6 +15,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/containers/image/v5/docker/reference"
|
"github.com/containers/image/v5/docker/reference"
|
||||||
|
"github.com/containers/image/v5/internal/blobinfocache"
|
||||||
"github.com/containers/image/v5/internal/iolimits"
|
"github.com/containers/image/v5/internal/iolimits"
|
||||||
"github.com/containers/image/v5/internal/uploadreader"
|
"github.com/containers/image/v5/internal/uploadreader"
|
||||||
"github.com/containers/image/v5/manifest"
|
"github.com/containers/image/v5/manifest"
|
||||||
@ -284,7 +285,9 @@ func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo referenc
|
|||||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||||
// info.Digest must not be empty.
|
// info.Digest must not be empty.
|
||||||
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
||||||
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
|
||||||
|
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
|
||||||
|
// reflected in the manifest that will be written.
|
||||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||||
// May use and/or update cache.
|
// May use and/or update cache.
|
||||||
func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
||||||
@ -299,17 +302,23 @@ func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.
|
|||||||
}
|
}
|
||||||
if exists {
|
if exists {
|
||||||
cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, newBICLocationReference(d.ref))
|
cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, newBICLocationReference(d.ref))
|
||||||
return true, types.BlobInfo{Digest: info.Digest, Size: size}, nil
|
return true, types.BlobInfo{Digest: info.Digest, MediaType: info.MediaType, Size: size}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Then try reusing blobs from other locations.
|
// Then try reusing blobs from other locations.
|
||||||
for _, candidate := range cache.CandidateLocations(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, canSubstitute) {
|
bic := blobinfocache.FromBlobInfoCache(cache)
|
||||||
|
candidates := bic.CandidateLocations2(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, canSubstitute)
|
||||||
|
for _, candidate := range candidates {
|
||||||
candidateRepo, err := parseBICLocationReference(candidate.Location)
|
candidateRepo, err := parseBICLocationReference(candidate.Location)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Debugf("Error parsing BlobInfoCache location reference: %s", err)
|
logrus.Debugf("Error parsing BlobInfoCache location reference: %s", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
logrus.Debugf("Trying to reuse cached location %s in %s", candidate.Digest.String(), candidateRepo.Name())
|
if candidate.CompressorName != blobinfocache.Uncompressed {
|
||||||
|
logrus.Debugf("Trying to reuse cached location %s compressed with %s in %s", candidate.Digest.String(), candidate.CompressorName, candidateRepo.Name())
|
||||||
|
} else {
|
||||||
|
logrus.Debugf("Trying to reuse cached location %s with no compression in %s", candidate.Digest.String(), candidateRepo.Name())
|
||||||
|
}
|
||||||
|
|
||||||
// Sanity checks:
|
// Sanity checks:
|
||||||
if reference.Domain(candidateRepo) != reference.Domain(d.ref.ref) {
|
if reference.Domain(candidateRepo) != reference.Domain(d.ref.ref) {
|
||||||
@ -351,8 +360,16 @@ func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), candidate.Digest, newBICLocationReference(d.ref))
|
|
||||||
return true, types.BlobInfo{Digest: candidate.Digest, Size: size}, nil
|
bic.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), candidate.Digest, newBICLocationReference(d.ref))
|
||||||
|
|
||||||
|
compressionOperation, compressionAlgorithm, err := blobinfocache.OperationAndAlgorithmForCompressor(candidate.CompressorName)
|
||||||
|
if err != nil {
|
||||||
|
logrus.Debugf("... Failed: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, types.BlobInfo{Digest: candidate.Digest, MediaType: info.MediaType, Size: size, CompressionOperation: compressionOperation, CompressionAlgorithm: compressionAlgorithm}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return false, types.BlobInfo{}, nil
|
return false, types.BlobInfo{}, nil
|
||||||
|
5
vendor/github.com/containers/image/v5/docker/docker_image_src.go
generated
vendored
5
vendor/github.com/containers/image/v5/docker/docker_image_src.go
generated
vendored
@ -64,6 +64,11 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerRef
|
|||||||
}
|
}
|
||||||
attempts := []attempt{}
|
attempts := []attempt{}
|
||||||
for _, pullSource := range pullSources {
|
for _, pullSource := range pullSources {
|
||||||
|
if sys.DockerLogMirrorChoice {
|
||||||
|
logrus.Infof("Trying to access %q", pullSource.Reference)
|
||||||
|
} else {
|
||||||
|
logrus.Debugf("Trying to access %q", pullSource.Reference)
|
||||||
|
}
|
||||||
logrus.Debugf("Trying to access %q", pullSource.Reference)
|
logrus.Debugf("Trying to access %q", pullSource.Reference)
|
||||||
s, err := newImageSourceAttempt(ctx, sys, ref, pullSource)
|
s, err := newImageSourceAttempt(ctx, sys, ref, pullSource)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
4
vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go
generated
vendored
4
vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go
generated
vendored
@ -159,7 +159,9 @@ func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo t
|
|||||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||||
// info.Digest must not be empty.
|
// info.Digest must not be empty.
|
||||||
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
||||||
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
|
||||||
|
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
|
||||||
|
// reflected in the manifest that will be written.
|
||||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||||
// May use and/or update cache.
|
// May use and/or update cache.
|
||||||
func (d *Destination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
func (d *Destination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
||||||
|
8
vendor/github.com/containers/image/v5/docker/lookaside.go
generated
vendored
8
vendor/github.com/containers/image/v5/docker/lookaside.go
generated
vendored
@ -96,10 +96,16 @@ func SignatureStorageBaseURL(sys *types.SystemContext, ref types.ImageReference,
|
|||||||
|
|
||||||
// registriesDirPath returns a path to registries.d
|
// registriesDirPath returns a path to registries.d
|
||||||
func registriesDirPath(sys *types.SystemContext) string {
|
func registriesDirPath(sys *types.SystemContext) string {
|
||||||
|
return registriesDirPathWithHomeDir(sys, homedir.Get())
|
||||||
|
}
|
||||||
|
|
||||||
|
// registriesDirPathWithHomeDir is an internal implementation detail of registriesDirPath,
|
||||||
|
// it exists only to allow testing it with an artificial home directory.
|
||||||
|
func registriesDirPathWithHomeDir(sys *types.SystemContext, homeDir string) string {
|
||||||
if sys != nil && sys.RegistriesDirPath != "" {
|
if sys != nil && sys.RegistriesDirPath != "" {
|
||||||
return sys.RegistriesDirPath
|
return sys.RegistriesDirPath
|
||||||
}
|
}
|
||||||
userRegistriesDirPath := filepath.Join(homedir.Get(), userRegistriesDir)
|
userRegistriesDirPath := filepath.Join(homeDir, userRegistriesDir)
|
||||||
if _, err := os.Stat(userRegistriesDirPath); err == nil {
|
if _, err := os.Stat(userRegistriesDirPath); err == nil {
|
||||||
return userRegistriesDirPath
|
return userRegistriesDirPath
|
||||||
}
|
}
|
||||||
|
21
vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go
generated
vendored
21
vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go
generated
vendored
@ -52,5 +52,26 @@ func DockerReferenceNamespaces(ref reference.Named) []string {
|
|||||||
}
|
}
|
||||||
name = name[:lastSlash]
|
name = name[:lastSlash]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Strip port number if any, before appending to res slice.
|
||||||
|
// Currently, the most compatible behavior is to return
|
||||||
|
// example.com:8443/ns, example.com:8443, *.com.
|
||||||
|
// If a port number is not specified, the expected behavior would be
|
||||||
|
// example.com/ns, example.com, *.com
|
||||||
|
portNumColon := strings.Index(name, ":")
|
||||||
|
if portNumColon != -1 {
|
||||||
|
name = name[:portNumColon]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Append wildcarded domains to res slice
|
||||||
|
for {
|
||||||
|
firstDot := strings.Index(name, ".")
|
||||||
|
if firstDot == -1 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
name = name[firstDot+1:]
|
||||||
|
|
||||||
|
res = append(res, "*."+name)
|
||||||
|
}
|
||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
4
vendor/github.com/containers/image/v5/docker/tarfile/dest.go
generated
vendored
4
vendor/github.com/containers/image/v5/docker/tarfile/dest.go
generated
vendored
@ -86,7 +86,9 @@ func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo t
|
|||||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||||
// info.Digest must not be empty.
|
// info.Digest must not be empty.
|
||||||
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
||||||
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
|
||||||
|
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
|
||||||
|
// reflected in the manifest that will be written.
|
||||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||||
// May use and/or update cache.
|
// May use and/or update cache.
|
||||||
func (d *Destination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
func (d *Destination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
||||||
|
3
vendor/github.com/containers/image/v5/image/docker_schema2.go
generated
vendored
3
vendor/github.com/containers/image/v5/image/docker_schema2.go
generated
vendored
@ -154,6 +154,9 @@ func (m *manifestSchema2) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUp
|
|||||||
|
|
||||||
// UpdatedImage returns a types.Image modified according to options.
|
// UpdatedImage returns a types.Image modified according to options.
|
||||||
// This does not change the state of the original Image object.
|
// This does not change the state of the original Image object.
|
||||||
|
// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError
|
||||||
|
// if the CompressionOperation and CompressionAlgorithm specified in one or more
|
||||||
|
// options.LayerInfos items is anything other than gzip.
|
||||||
func (m *manifestSchema2) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) {
|
func (m *manifestSchema2) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) {
|
||||||
copy := manifestSchema2{ // NOTE: This is not a deep copy, it still shares slices etc.
|
copy := manifestSchema2{ // NOTE: This is not a deep copy, it still shares slices etc.
|
||||||
src: m.src,
|
src: m.src,
|
||||||
|
4
vendor/github.com/containers/image/v5/image/oci.go
generated
vendored
4
vendor/github.com/containers/image/v5/image/oci.go
generated
vendored
@ -134,6 +134,10 @@ func (m *manifestOCI1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdat
|
|||||||
|
|
||||||
// UpdatedImage returns a types.Image modified according to options.
|
// UpdatedImage returns a types.Image modified according to options.
|
||||||
// This does not change the state of the original Image object.
|
// This does not change the state of the original Image object.
|
||||||
|
// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError
|
||||||
|
// if the combination of CompressionOperation and CompressionAlgorithm specified
|
||||||
|
// in one or more options.LayerInfos items indicates that a layer is compressed using
|
||||||
|
// an algorithm that is not allowed in OCI.
|
||||||
func (m *manifestOCI1) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) {
|
func (m *manifestOCI1) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) {
|
||||||
copy := manifestOCI1{ // NOTE: This is not a deep copy, it still shares slices etc.
|
copy := manifestOCI1{ // NOTE: This is not a deep copy, it still shares slices etc.
|
||||||
src: m.src,
|
src: m.src,
|
||||||
|
63
vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go
generated
vendored
Normal file
63
vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go
generated
vendored
Normal file
@ -0,0 +1,63 @@
|
|||||||
|
package blobinfocache
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/containers/image/v5/pkg/compression"
|
||||||
|
"github.com/containers/image/v5/types"
|
||||||
|
digest "github.com/opencontainers/go-digest"
|
||||||
|
)
|
||||||
|
|
||||||
|
// FromBlobInfoCache returns a BlobInfoCache2 based on a BlobInfoCache, returning the original
|
||||||
|
// object if it implements BlobInfoCache2, or a wrapper which discards compression information
|
||||||
|
// if it only implements BlobInfoCache.
|
||||||
|
func FromBlobInfoCache(bic types.BlobInfoCache) BlobInfoCache2 {
|
||||||
|
if bic2, ok := bic.(BlobInfoCache2); ok {
|
||||||
|
return bic2
|
||||||
|
}
|
||||||
|
return &v1OnlyBlobInfoCache{
|
||||||
|
BlobInfoCache: bic,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type v1OnlyBlobInfoCache struct {
|
||||||
|
types.BlobInfoCache
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bic *v1OnlyBlobInfoCache) RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bic *v1OnlyBlobInfoCache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []BICReplacementCandidate2 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CandidateLocationsFromV2 converts a slice of BICReplacementCandidate2 to a slice of
|
||||||
|
// types.BICReplacementCandidate, dropping compression information.
|
||||||
|
func CandidateLocationsFromV2(v2candidates []BICReplacementCandidate2) []types.BICReplacementCandidate {
|
||||||
|
candidates := make([]types.BICReplacementCandidate, 0, len(v2candidates))
|
||||||
|
for _, c := range v2candidates {
|
||||||
|
candidates = append(candidates, types.BICReplacementCandidate{
|
||||||
|
Digest: c.Digest,
|
||||||
|
Location: c.Location,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return candidates
|
||||||
|
}
|
||||||
|
|
||||||
|
// OperationAndAlgorithmForCompressor returns CompressionOperation and CompressionAlgorithm
|
||||||
|
// values suitable for inclusion in a types.BlobInfo structure, based on the name of the
|
||||||
|
// compression algorithm, or Uncompressed, or UnknownCompression. This is typically used by
|
||||||
|
// TryReusingBlob() implementations to set values in the BlobInfo structure that they return
|
||||||
|
// upon success.
|
||||||
|
func OperationAndAlgorithmForCompressor(compressorName string) (types.LayerCompression, *compression.Algorithm, error) {
|
||||||
|
switch compressorName {
|
||||||
|
case Uncompressed:
|
||||||
|
return types.Decompress, nil, nil
|
||||||
|
case UnknownCompression:
|
||||||
|
return types.PreserveOriginal, nil, nil
|
||||||
|
default:
|
||||||
|
algo, err := compression.AlgorithmByName(compressorName)
|
||||||
|
if err == nil {
|
||||||
|
return types.Compress, &algo, nil
|
||||||
|
}
|
||||||
|
return types.PreserveOriginal, nil, err
|
||||||
|
}
|
||||||
|
}
|
45
vendor/github.com/containers/image/v5/internal/blobinfocache/types.go
generated
vendored
Normal file
45
vendor/github.com/containers/image/v5/internal/blobinfocache/types.go
generated
vendored
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
package blobinfocache
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/containers/image/v5/types"
|
||||||
|
digest "github.com/opencontainers/go-digest"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Uncompressed is the value we store in a blob info cache to indicate that we know that
|
||||||
|
// the blob in the corresponding location is not compressed.
|
||||||
|
Uncompressed = "uncompressed"
|
||||||
|
// UnknownCompression is the value we store in a blob info cache to indicate that we don't
|
||||||
|
// know if the blob in the corresponding location is compressed (and if so, how) or not.
|
||||||
|
UnknownCompression = "unknown"
|
||||||
|
)
|
||||||
|
|
||||||
|
// BlobInfoCache2 extends BlobInfoCache by adding the ability to track information about what kind
|
||||||
|
// of compression was applied to the blobs it keeps information about.
|
||||||
|
type BlobInfoCache2 interface {
|
||||||
|
types.BlobInfoCache
|
||||||
|
// RecordDigestCompressorName records a compressor for the blob with the specified digest,
|
||||||
|
// or Uncompressed or UnknownCompression.
|
||||||
|
// WARNING: Only call this with LOCALLY VERIFIED data; don’t record a compressor for a
|
||||||
|
// digest just because some remote author claims so (e.g. because a manifest says so);
|
||||||
|
// otherwise the cache could be poisoned and cause us to make incorrect edits to type
|
||||||
|
// information in a manifest.
|
||||||
|
RecordDigestCompressorName(anyDigest digest.Digest, compressorName string)
|
||||||
|
// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations
|
||||||
|
// that could possibly be reused within the specified (transport scope) (if they still
|
||||||
|
// exist, which is not guaranteed).
|
||||||
|
//
|
||||||
|
// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if
|
||||||
|
// canSubstitute, data from previous RecordDigestUncompressedPair calls is used to also look
|
||||||
|
// up variants of the blob which have the same uncompressed digest.
|
||||||
|
//
|
||||||
|
// The CompressorName fields in returned data must never be UnknownCompression.
|
||||||
|
CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []BICReplacementCandidate2
|
||||||
|
}
|
||||||
|
|
||||||
|
// BICReplacementCandidate2 is an item returned by BlobInfoCache2.CandidateLocations2.
|
||||||
|
type BICReplacementCandidate2 struct {
|
||||||
|
Digest digest.Digest
|
||||||
|
CompressorName string // either the Name() of a known pkg/compression.Algorithm, or Uncompressed or UnknownCompression
|
||||||
|
Location types.BICLocationReference
|
||||||
|
}
|
36
vendor/github.com/containers/image/v5/manifest/common.go
generated
vendored
36
vendor/github.com/containers/image/v5/manifest/common.go
generated
vendored
@ -5,7 +5,6 @@ import (
|
|||||||
|
|
||||||
"github.com/containers/image/v5/pkg/compression"
|
"github.com/containers/image/v5/pkg/compression"
|
||||||
"github.com/containers/image/v5/types"
|
"github.com/containers/image/v5/types"
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -54,6 +53,12 @@ const mtsUnsupportedMIMEType = "" // A value in compressionMIMETypeSet that mean
|
|||||||
|
|
||||||
// compressionVariantMIMEType returns a variant of mimeType for the specified algorithm (which may be nil
|
// compressionVariantMIMEType returns a variant of mimeType for the specified algorithm (which may be nil
|
||||||
// to mean "no compression"), based on variantTable.
|
// to mean "no compression"), based on variantTable.
|
||||||
|
// The returned error will be a ManifestLayerCompressionIncompatibilityError if mimeType has variants
|
||||||
|
// that differ only in what type of compression is applied, but it can't be combined with this
|
||||||
|
// algorithm to produce an updated MIME type that complies with the standard that defines mimeType.
|
||||||
|
// If the compression algorithm is unrecognized, or mimeType is not known to have variants that
|
||||||
|
// differ from it only in what type of compression has been applied, the returned error will not be
|
||||||
|
// a ManifestLayerCompressionIncompatibilityError.
|
||||||
func compressionVariantMIMEType(variantTable []compressionMIMETypeSet, mimeType string, algorithm *compression.Algorithm) (string, error) {
|
func compressionVariantMIMEType(variantTable []compressionMIMETypeSet, mimeType string, algorithm *compression.Algorithm) (string, error) {
|
||||||
if mimeType == mtsUnsupportedMIMEType { // Prevent matching against the {algo:mtsUnsupportedMIMEType} entries
|
if mimeType == mtsUnsupportedMIMEType { // Prevent matching against the {algo:mtsUnsupportedMIMEType} entries
|
||||||
return "", fmt.Errorf("cannot update unknown MIME type")
|
return "", fmt.Errorf("cannot update unknown MIME type")
|
||||||
@ -70,15 +75,15 @@ func compressionVariantMIMEType(variantTable []compressionMIMETypeSet, mimeType
|
|||||||
return res, nil
|
return res, nil
|
||||||
}
|
}
|
||||||
if name != mtsUncompressed {
|
if name != mtsUncompressed {
|
||||||
return "", fmt.Errorf("%s compression is not supported", name)
|
return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("%s compression is not supported for type %q", name, mt)}
|
||||||
}
|
}
|
||||||
return "", errors.New("uncompressed variant is not supported")
|
return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mt)}
|
||||||
}
|
}
|
||||||
if name != mtsUncompressed {
|
if name != mtsUncompressed {
|
||||||
return "", fmt.Errorf("unknown compression algorithm %s", name)
|
return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("unknown compressed with algorithm %s variant for type %s", name, mt)}
|
||||||
}
|
}
|
||||||
// We can't very well say “the idea of no compression is unknown”
|
// We can't very well say “the idea of no compression is unknown”
|
||||||
return "", errors.New("uncompressed variant is not supported")
|
return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mt)}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -90,6 +95,10 @@ func compressionVariantMIMEType(variantTable []compressionMIMETypeSet, mimeType
|
|||||||
|
|
||||||
// updatedMIMEType returns the result of applying edits in updated (MediaType, CompressionOperation) to
|
// updatedMIMEType returns the result of applying edits in updated (MediaType, CompressionOperation) to
|
||||||
// mimeType, based on variantTable. It may use updated.Digest for error messages.
|
// mimeType, based on variantTable. It may use updated.Digest for error messages.
|
||||||
|
// The returned error will be a ManifestLayerCompressionIncompatibilityError if mimeType has variants
|
||||||
|
// that differ only in what type of compression is applied, but applying updated.CompressionOperation
|
||||||
|
// and updated.CompressionAlgorithm to it won't produce an updated MIME type that complies with the
|
||||||
|
// standard that defines mimeType.
|
||||||
func updatedMIMEType(variantTable []compressionMIMETypeSet, mimeType string, updated types.BlobInfo) (string, error) {
|
func updatedMIMEType(variantTable []compressionMIMETypeSet, mimeType string, updated types.BlobInfo) (string, error) {
|
||||||
// Note that manifests in containers-storage might be reporting the
|
// Note that manifests in containers-storage might be reporting the
|
||||||
// wrong media type since the original manifests are stored while layers
|
// wrong media type since the original manifests are stored while layers
|
||||||
@ -99,6 +108,12 @@ func updatedMIMEType(variantTable []compressionMIMETypeSet, mimeType string, upd
|
|||||||
// {de}compressed.
|
// {de}compressed.
|
||||||
switch updated.CompressionOperation {
|
switch updated.CompressionOperation {
|
||||||
case types.PreserveOriginal:
|
case types.PreserveOriginal:
|
||||||
|
// Force a change to the media type if we're being told to use a particular compressor,
|
||||||
|
// since it might be different from the one associated with the media type. Otherwise,
|
||||||
|
// try to keep the original media type.
|
||||||
|
if updated.CompressionAlgorithm != nil {
|
||||||
|
return compressionVariantMIMEType(variantTable, mimeType, updated.CompressionAlgorithm)
|
||||||
|
}
|
||||||
// Keep the original media type.
|
// Keep the original media type.
|
||||||
return mimeType, nil
|
return mimeType, nil
|
||||||
|
|
||||||
@ -116,3 +131,14 @@ func updatedMIMEType(variantTable []compressionMIMETypeSet, mimeType string, upd
|
|||||||
return "", fmt.Errorf("unknown compression operation (%d)", updated.CompressionOperation)
|
return "", fmt.Errorf("unknown compression operation (%d)", updated.CompressionOperation)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ManifestLayerCompressionIncompatibilityError indicates that a specified compression algorithm
|
||||||
|
// could not be applied to a layer MIME type. A caller that receives this should either retry
|
||||||
|
// the call with a different compression algorithm, or attempt to use a different manifest type.
|
||||||
|
type ManifestLayerCompressionIncompatibilityError struct {
|
||||||
|
text string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m ManifestLayerCompressionIncompatibilityError) Error() string {
|
||||||
|
return m.text
|
||||||
|
}
|
||||||
|
2
vendor/github.com/containers/image/v5/manifest/docker_schema2.go
generated
vendored
2
vendor/github.com/containers/image/v5/manifest/docker_schema2.go
generated
vendored
@ -226,6 +226,8 @@ var schema2CompressionMIMETypeSets = []compressionMIMETypeSet{
|
|||||||
}
|
}
|
||||||
|
|
||||||
// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers)
|
// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers)
|
||||||
|
// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError if any of the layerInfos includes a combination of CompressionOperation and
|
||||||
|
// CompressionAlgorithm that would result in anything other than gzip compression.
|
||||||
func (m *Schema2) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
|
func (m *Schema2) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
|
||||||
if len(m.LayersDescriptors) != len(layerInfos) {
|
if len(m.LayersDescriptors) != len(layerInfos) {
|
||||||
return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.LayersDescriptors), len(layerInfos))
|
return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.LayersDescriptors), len(layerInfos))
|
||||||
|
2
vendor/github.com/containers/image/v5/manifest/oci.go
generated
vendored
2
vendor/github.com/containers/image/v5/manifest/oci.go
generated
vendored
@ -108,6 +108,8 @@ var oci1CompressionMIMETypeSets = []compressionMIMETypeSet{
|
|||||||
}
|
}
|
||||||
|
|
||||||
// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls+mediatype), in order (the root layer first, and then successive layered layers)
|
// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls+mediatype), in order (the root layer first, and then successive layered layers)
|
||||||
|
// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError if any of the layerInfos includes a combination of CompressionOperation and
|
||||||
|
// CompressionAlgorithm that isn't supported by OCI.
|
||||||
func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
|
func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
|
||||||
if len(m.Layers) != len(layerInfos) {
|
if len(m.Layers) != len(layerInfos) {
|
||||||
return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.Layers), len(layerInfos))
|
return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.Layers), len(layerInfos))
|
||||||
|
4
vendor/github.com/containers/image/v5/oci/archive/oci_dest.go
generated
vendored
4
vendor/github.com/containers/image/v5/oci/archive/oci_dest.go
generated
vendored
@ -103,7 +103,9 @@ func (d *ociArchiveImageDestination) PutBlob(ctx context.Context, stream io.Read
|
|||||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||||
// info.Digest must not be empty.
|
// info.Digest must not be empty.
|
||||||
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
||||||
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
|
||||||
|
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
|
||||||
|
// reflected in the manifest that will be written.
|
||||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||||
// May use and/or update cache.
|
// May use and/or update cache.
|
||||||
func (d *ociArchiveImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
func (d *ociArchiveImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
||||||
|
5
vendor/github.com/containers/image/v5/oci/layout/oci_dest.go
generated
vendored
5
vendor/github.com/containers/image/v5/oci/layout/oci_dest.go
generated
vendored
@ -186,7 +186,9 @@ func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp
|
|||||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||||
// info.Digest must not be empty.
|
// info.Digest must not be empty.
|
||||||
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
||||||
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
|
||||||
|
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
|
||||||
|
// reflected in the manifest that will be written.
|
||||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||||
// May use and/or update cache.
|
// May use and/or update cache.
|
||||||
func (d *ociImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
func (d *ociImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
||||||
@ -204,6 +206,7 @@ func (d *ociImageDestination) TryReusingBlob(ctx context.Context, info types.Blo
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return false, types.BlobInfo{}, err
|
return false, types.BlobInfo{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil
|
return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
4
vendor/github.com/containers/image/v5/openshift/openshift.go
generated
vendored
4
vendor/github.com/containers/image/v5/openshift/openshift.go
generated
vendored
@ -410,7 +410,9 @@ func (d *openshiftImageDestination) PutBlob(ctx context.Context, stream io.Reade
|
|||||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||||
// info.Digest must not be empty.
|
// info.Digest must not be empty.
|
||||||
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
||||||
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
|
||||||
|
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
|
||||||
|
// reflected in the manifest that will be written.
|
||||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||||
// May use and/or update cache.
|
// May use and/or update cache.
|
||||||
func (d *openshiftImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
func (d *openshiftImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
||||||
|
4
vendor/github.com/containers/image/v5/ostree/ostree_dest.go
generated
vendored
4
vendor/github.com/containers/image/v5/ostree/ostree_dest.go
generated
vendored
@ -339,7 +339,9 @@ func (d *ostreeImageDestination) importConfig(repo *otbuiltin.Repo, blob *blobTo
|
|||||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||||
// info.Digest must not be empty.
|
// info.Digest must not be empty.
|
||||||
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
||||||
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
|
||||||
|
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
|
||||||
|
// reflected in the manifest that will be written.
|
||||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||||
// May use and/or update cache.
|
// May use and/or update cache.
|
||||||
func (d *ostreeImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
func (d *ostreeImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
||||||
|
81
vendor/github.com/containers/image/v5/pkg/blobinfocache/boltdb/boltdb.go
generated
vendored
81
vendor/github.com/containers/image/v5/pkg/blobinfocache/boltdb/boltdb.go
generated
vendored
@ -7,6 +7,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/containers/image/v5/internal/blobinfocache"
|
||||||
"github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize"
|
"github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize"
|
||||||
"github.com/containers/image/v5/types"
|
"github.com/containers/image/v5/types"
|
||||||
"github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
@ -22,6 +23,9 @@ var (
|
|||||||
|
|
||||||
// uncompressedDigestBucket stores a mapping from any digest to an uncompressed digest.
|
// uncompressedDigestBucket stores a mapping from any digest to an uncompressed digest.
|
||||||
uncompressedDigestBucket = []byte("uncompressedDigest")
|
uncompressedDigestBucket = []byte("uncompressedDigest")
|
||||||
|
// digestCompressorBucket stores a mapping from any digest to a compressor, or blobinfocache.Uncompressed
|
||||||
|
// It may not exist in caches created by older versions, even if uncompressedDigestBucket is present.
|
||||||
|
digestCompressorBucket = []byte("digestCompressor")
|
||||||
// digestByUncompressedBucket stores a bucket per uncompressed digest, with the bucket containing a set of digests for that uncompressed digest
|
// digestByUncompressedBucket stores a bucket per uncompressed digest, with the bucket containing a set of digests for that uncompressed digest
|
||||||
// (as a set of key=digest, value="" pairs)
|
// (as a set of key=digest, value="" pairs)
|
||||||
digestByUncompressedBucket = []byte("digestByUncompressed")
|
digestByUncompressedBucket = []byte("digestByUncompressed")
|
||||||
@ -95,6 +99,9 @@ type cache struct {
|
|||||||
//
|
//
|
||||||
// Most users should call blobinfocache.DefaultCache instead.
|
// Most users should call blobinfocache.DefaultCache instead.
|
||||||
func New(path string) types.BlobInfoCache {
|
func New(path string) types.BlobInfoCache {
|
||||||
|
return new2(path)
|
||||||
|
}
|
||||||
|
func new2(path string) *cache {
|
||||||
return &cache{path: path}
|
return &cache{path: path}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -220,6 +227,30 @@ func (bdc *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompre
|
|||||||
}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
|
}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RecordDigestCompressorName records that the blob with digest anyDigest was compressed with the specified
|
||||||
|
// compressor, or is blobinfocache.Uncompressed.
|
||||||
|
// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
|
||||||
|
// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
|
||||||
|
// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
|
||||||
|
func (bdc *cache) RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) {
|
||||||
|
_ = bdc.update(func(tx *bolt.Tx) error {
|
||||||
|
b, err := tx.CreateBucketIfNotExists(digestCompressorBucket)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
key := []byte(anyDigest.String())
|
||||||
|
if previousBytes := b.Get(key); previousBytes != nil {
|
||||||
|
if string(previousBytes) != compressorName {
|
||||||
|
logrus.Warnf("Compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, string(previousBytes), compressorName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if compressorName == blobinfocache.UnknownCompression {
|
||||||
|
return b.Delete(key)
|
||||||
|
}
|
||||||
|
return b.Put(key, []byte(compressorName))
|
||||||
|
}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
|
||||||
|
}
|
||||||
|
|
||||||
// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
|
// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
|
||||||
// and can be reused given the opaque location data.
|
// and can be reused given the opaque location data.
|
||||||
func (bdc *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) {
|
func (bdc *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) {
|
||||||
@ -251,20 +282,33 @@ func (bdc *cache) RecordKnownLocation(transport types.ImageTransport, scope type
|
|||||||
}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
|
}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
|
||||||
}
|
}
|
||||||
|
|
||||||
// appendReplacementCandiates creates prioritize.CandidateWithTime values for digest in scopeBucket, and returns the result of appending them to candidates.
|
// appendReplacementCandiates creates prioritize.CandidateWithTime values for digest in scopeBucket with corresponding compression info from compressionBucket (if compressionBucket is not nil), and returns the result of appending them to candidates.
|
||||||
func (bdc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, scopeBucket *bolt.Bucket, digest digest.Digest) []prioritize.CandidateWithTime {
|
func (bdc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, scopeBucket, compressionBucket *bolt.Bucket, digest digest.Digest, requireCompressionInfo bool) []prioritize.CandidateWithTime {
|
||||||
b := scopeBucket.Bucket([]byte(digest.String()))
|
digestKey := []byte(digest.String())
|
||||||
|
b := scopeBucket.Bucket(digestKey)
|
||||||
if b == nil {
|
if b == nil {
|
||||||
return candidates
|
return candidates
|
||||||
}
|
}
|
||||||
|
compressorName := blobinfocache.UnknownCompression
|
||||||
|
if compressionBucket != nil {
|
||||||
|
// the bucket won't exist if the cache was created by a v1 implementation and
|
||||||
|
// hasn't yet been updated by a v2 implementation
|
||||||
|
if compressorNameValue := compressionBucket.Get(digestKey); len(compressorNameValue) > 0 {
|
||||||
|
compressorName = string(compressorNameValue)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if compressorName == blobinfocache.UnknownCompression && requireCompressionInfo {
|
||||||
|
return candidates
|
||||||
|
}
|
||||||
_ = b.ForEach(func(k, v []byte) error {
|
_ = b.ForEach(func(k, v []byte) error {
|
||||||
t := time.Time{}
|
t := time.Time{}
|
||||||
if err := t.UnmarshalBinary(v); err != nil {
|
if err := t.UnmarshalBinary(v); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
candidates = append(candidates, prioritize.CandidateWithTime{
|
candidates = append(candidates, prioritize.CandidateWithTime{
|
||||||
Candidate: types.BICReplacementCandidate{
|
Candidate: blobinfocache.BICReplacementCandidate2{
|
||||||
Digest: digest,
|
Digest: digest,
|
||||||
|
CompressorName: compressorName,
|
||||||
Location: types.BICLocationReference{Opaque: string(k)},
|
Location: types.BICLocationReference{Opaque: string(k)},
|
||||||
},
|
},
|
||||||
LastSeen: t,
|
LastSeen: t,
|
||||||
@ -274,13 +318,17 @@ func (bdc *cache) appendReplacementCandidates(candidates []prioritize.CandidateW
|
|||||||
return candidates
|
return candidates
|
||||||
}
|
}
|
||||||
|
|
||||||
// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused
|
// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations that could possibly be reused
|
||||||
// within the specified (transport scope) (if they still exist, which is not guaranteed).
|
// within the specified (transport scope) (if they still exist, which is not guaranteed).
|
||||||
//
|
//
|
||||||
// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
|
// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
|
||||||
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
|
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
|
||||||
// uncompressed digest.
|
// uncompressed digest.
|
||||||
func (bdc *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
|
func (bdc *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []blobinfocache.BICReplacementCandidate2 {
|
||||||
|
return bdc.candidateLocations(transport, scope, primaryDigest, canSubstitute, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bdc *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, requireCompressionInfo bool) []blobinfocache.BICReplacementCandidate2 {
|
||||||
res := []prioritize.CandidateWithTime{}
|
res := []prioritize.CandidateWithTime{}
|
||||||
var uncompressedDigestValue digest.Digest // = ""
|
var uncompressedDigestValue digest.Digest // = ""
|
||||||
if err := bdc.view(func(tx *bolt.Tx) error {
|
if err := bdc.view(func(tx *bolt.Tx) error {
|
||||||
@ -296,8 +344,11 @@ func (bdc *cache) CandidateLocations(transport types.ImageTransport, scope types
|
|||||||
if scopeBucket == nil {
|
if scopeBucket == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
// compressionBucket won't have been created if previous writers never recorded info about compression,
|
||||||
|
// and we don't want to fail just because of that
|
||||||
|
compressionBucket := tx.Bucket(digestCompressorBucket)
|
||||||
|
|
||||||
res = bdc.appendReplacementCandidates(res, scopeBucket, primaryDigest)
|
res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, primaryDigest, requireCompressionInfo)
|
||||||
if canSubstitute {
|
if canSubstitute {
|
||||||
if uncompressedDigestValue = bdc.uncompressedDigest(tx, primaryDigest); uncompressedDigestValue != "" {
|
if uncompressedDigestValue = bdc.uncompressedDigest(tx, primaryDigest); uncompressedDigestValue != "" {
|
||||||
b := tx.Bucket(digestByUncompressedBucket)
|
b := tx.Bucket(digestByUncompressedBucket)
|
||||||
@ -310,7 +361,7 @@ func (bdc *cache) CandidateLocations(transport types.ImageTransport, scope types
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if d != primaryDigest && d != uncompressedDigestValue {
|
if d != primaryDigest && d != uncompressedDigestValue {
|
||||||
res = bdc.appendReplacementCandidates(res, scopeBucket, d)
|
res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, d, requireCompressionInfo)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
@ -319,14 +370,24 @@ func (bdc *cache) CandidateLocations(transport types.ImageTransport, scope types
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if uncompressedDigestValue != primaryDigest {
|
if uncompressedDigestValue != primaryDigest {
|
||||||
res = bdc.appendReplacementCandidates(res, scopeBucket, uncompressedDigestValue)
|
res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, uncompressedDigestValue, requireCompressionInfo)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}); err != nil { // Including os.IsNotExist(err)
|
}); err != nil { // Including os.IsNotExist(err)
|
||||||
return []types.BICReplacementCandidate{} // FIXME? Log err (but throttle the log volume on repeated accesses)?
|
return []blobinfocache.BICReplacementCandidate2{} // FIXME? Log err (but throttle the log volume on repeated accesses)?
|
||||||
}
|
}
|
||||||
|
|
||||||
return prioritize.DestructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigestValue)
|
return prioritize.DestructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigestValue)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused
|
||||||
|
// within the specified (transport scope) (if they still exist, which is not guaranteed).
|
||||||
|
//
|
||||||
|
// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
|
||||||
|
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
|
||||||
|
// uncompressed digest.
|
||||||
|
func (bdc *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
|
||||||
|
return blobinfocache.CandidateLocationsFromV2(bdc.candidateLocations(transport, scope, primaryDigest, canSubstitute, false))
|
||||||
|
}
|
||||||
|
@ -6,7 +6,7 @@ import (
|
|||||||
"sort"
|
"sort"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/containers/image/v5/types"
|
"github.com/containers/image/v5/internal/blobinfocache"
|
||||||
"github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -17,7 +17,7 @@ const replacementAttempts = 5
|
|||||||
|
|
||||||
// CandidateWithTime is the input to types.BICReplacementCandidate prioritization.
|
// CandidateWithTime is the input to types.BICReplacementCandidate prioritization.
|
||||||
type CandidateWithTime struct {
|
type CandidateWithTime struct {
|
||||||
Candidate types.BICReplacementCandidate // The replacement candidate
|
Candidate blobinfocache.BICReplacementCandidate2 // The replacement candidate
|
||||||
LastSeen time.Time // Time the candidate was last known to exist (either read or written)
|
LastSeen time.Time // Time the candidate was last known to exist (either read or written)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -79,7 +79,7 @@ func (css *candidateSortState) Swap(i, j int) {
|
|||||||
|
|
||||||
// destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with a parameter for the
|
// destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with a parameter for the
|
||||||
// number of entries to limit, only to make testing simpler.
|
// number of entries to limit, only to make testing simpler.
|
||||||
func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest, maxCandidates int) []types.BICReplacementCandidate {
|
func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest, maxCandidates int) []blobinfocache.BICReplacementCandidate2 {
|
||||||
// We don't need to use sort.Stable() because nanosecond timestamps are (presumably?) unique, so no two elements should
|
// We don't need to use sort.Stable() because nanosecond timestamps are (presumably?) unique, so no two elements should
|
||||||
// compare equal.
|
// compare equal.
|
||||||
sort.Sort(&candidateSortState{
|
sort.Sort(&candidateSortState{
|
||||||
@ -92,7 +92,7 @@ func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime,
|
|||||||
if resLength > maxCandidates {
|
if resLength > maxCandidates {
|
||||||
resLength = maxCandidates
|
resLength = maxCandidates
|
||||||
}
|
}
|
||||||
res := make([]types.BICReplacementCandidate, resLength)
|
res := make([]blobinfocache.BICReplacementCandidate2, resLength)
|
||||||
for i := range res {
|
for i := range res {
|
||||||
res[i] = cs[i].Candidate
|
res[i] = cs[i].Candidate
|
||||||
}
|
}
|
||||||
@ -105,6 +105,6 @@ func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime,
|
|||||||
//
|
//
|
||||||
// WARNING: The array of candidates is destructively modified. (The implementation of this function could of course
|
// WARNING: The array of candidates is destructively modified. (The implementation of this function could of course
|
||||||
// make a copy, but all CandidateLocations implementations build the slice of candidates only for the single purpose of calling this function anyway.)
|
// make a copy, but all CandidateLocations implementations build the slice of candidates only for the single purpose of calling this function anyway.)
|
||||||
func DestructivelyPrioritizeReplacementCandidates(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest) []types.BICReplacementCandidate {
|
func DestructivelyPrioritizeReplacementCandidates(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest) []blobinfocache.BICReplacementCandidate2 {
|
||||||
return destructivelyPrioritizeReplacementCandidatesWithMax(cs, primaryDigest, uncompressedDigest, replacementAttempts)
|
return destructivelyPrioritizeReplacementCandidatesWithMax(cs, primaryDigest, uncompressedDigest, replacementAttempts)
|
||||||
}
|
}
|
||||||
|
51
vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go
generated
vendored
51
vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go
generated
vendored
@ -5,6 +5,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/containers/image/v5/internal/blobinfocache"
|
||||||
"github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize"
|
"github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize"
|
||||||
"github.com/containers/image/v5/types"
|
"github.com/containers/image/v5/types"
|
||||||
digest "github.com/opencontainers/go-digest"
|
digest "github.com/opencontainers/go-digest"
|
||||||
@ -25,6 +26,7 @@ type cache struct {
|
|||||||
uncompressedDigests map[digest.Digest]digest.Digest
|
uncompressedDigests map[digest.Digest]digest.Digest
|
||||||
digestsByUncompressed map[digest.Digest]map[digest.Digest]struct{} // stores a set of digests for each uncompressed digest
|
digestsByUncompressed map[digest.Digest]map[digest.Digest]struct{} // stores a set of digests for each uncompressed digest
|
||||||
knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference
|
knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference
|
||||||
|
compressors map[digest.Digest]string // stores a compressor name, or blobinfocache.Unknown, for each digest
|
||||||
}
|
}
|
||||||
|
|
||||||
// New returns a BlobInfoCache implementation which is in-memory only.
|
// New returns a BlobInfoCache implementation which is in-memory only.
|
||||||
@ -36,10 +38,15 @@ type cache struct {
|
|||||||
// Manual users of types.{ImageSource,ImageDestination} might also use
|
// Manual users of types.{ImageSource,ImageDestination} might also use
|
||||||
// this instead of a persistent cache.
|
// this instead of a persistent cache.
|
||||||
func New() types.BlobInfoCache {
|
func New() types.BlobInfoCache {
|
||||||
|
return new2()
|
||||||
|
}
|
||||||
|
|
||||||
|
func new2() *cache {
|
||||||
return &cache{
|
return &cache{
|
||||||
uncompressedDigests: map[digest.Digest]digest.Digest{},
|
uncompressedDigests: map[digest.Digest]digest.Digest{},
|
||||||
digestsByUncompressed: map[digest.Digest]map[digest.Digest]struct{}{},
|
digestsByUncompressed: map[digest.Digest]map[digest.Digest]struct{}{},
|
||||||
knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{},
|
knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{},
|
||||||
|
compressors: map[digest.Digest]string{},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -101,13 +108,33 @@ func (mem *cache) RecordKnownLocation(transport types.ImageTransport, scope type
|
|||||||
locationScope[location] = time.Now() // Possibly overwriting an older entry.
|
locationScope[location] = time.Now() // Possibly overwriting an older entry.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RecordDigestCompressorName records that the blob with the specified digest is either compressed with the specified
|
||||||
|
// algorithm, or uncompressed, or that we no longer know.
|
||||||
|
func (mem *cache) RecordDigestCompressorName(blobDigest digest.Digest, compressorName string) {
|
||||||
|
mem.mutex.Lock()
|
||||||
|
defer mem.mutex.Unlock()
|
||||||
|
if compressorName == blobinfocache.UnknownCompression {
|
||||||
|
delete(mem.compressors, blobDigest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
mem.compressors[blobDigest] = compressorName
|
||||||
|
}
|
||||||
|
|
||||||
// appendReplacementCandiates creates prioritize.CandidateWithTime values for (transport, scope, digest), and returns the result of appending them to candidates.
|
// appendReplacementCandiates creates prioritize.CandidateWithTime values for (transport, scope, digest), and returns the result of appending them to candidates.
|
||||||
func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest) []prioritize.CandidateWithTime {
|
func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, requireCompressionInfo bool) []prioritize.CandidateWithTime {
|
||||||
locations := mem.knownLocations[locationKey{transport: transport.Name(), scope: scope, blobDigest: digest}] // nil if not present
|
locations := mem.knownLocations[locationKey{transport: transport.Name(), scope: scope, blobDigest: digest}] // nil if not present
|
||||||
for l, t := range locations {
|
for l, t := range locations {
|
||||||
|
compressorName, compressorKnown := mem.compressors[digest]
|
||||||
|
if !compressorKnown {
|
||||||
|
if requireCompressionInfo {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
compressorName = blobinfocache.UnknownCompression
|
||||||
|
}
|
||||||
candidates = append(candidates, prioritize.CandidateWithTime{
|
candidates = append(candidates, prioritize.CandidateWithTime{
|
||||||
Candidate: types.BICReplacementCandidate{
|
Candidate: blobinfocache.BICReplacementCandidate2{
|
||||||
Digest: digest,
|
Digest: digest,
|
||||||
|
CompressorName: compressorName,
|
||||||
Location: l,
|
Location: l,
|
||||||
},
|
},
|
||||||
LastSeen: t,
|
LastSeen: t,
|
||||||
@ -123,21 +150,35 @@ func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateW
|
|||||||
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
|
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
|
||||||
// uncompressed digest.
|
// uncompressed digest.
|
||||||
func (mem *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
|
func (mem *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
|
||||||
|
return blobinfocache.CandidateLocationsFromV2(mem.candidateLocations(transport, scope, primaryDigest, canSubstitute, false))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations that could possibly be reused
|
||||||
|
// within the specified (transport scope) (if they still exist, which is not guaranteed).
|
||||||
|
//
|
||||||
|
// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
|
||||||
|
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
|
||||||
|
// uncompressed digest.
|
||||||
|
func (mem *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []blobinfocache.BICReplacementCandidate2 {
|
||||||
|
return mem.candidateLocations(transport, scope, primaryDigest, canSubstitute, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mem *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, requireCompressionInfo bool) []blobinfocache.BICReplacementCandidate2 {
|
||||||
mem.mutex.Lock()
|
mem.mutex.Lock()
|
||||||
defer mem.mutex.Unlock()
|
defer mem.mutex.Unlock()
|
||||||
res := []prioritize.CandidateWithTime{}
|
res := []prioritize.CandidateWithTime{}
|
||||||
res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest)
|
res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest, requireCompressionInfo)
|
||||||
var uncompressedDigest digest.Digest // = ""
|
var uncompressedDigest digest.Digest // = ""
|
||||||
if canSubstitute {
|
if canSubstitute {
|
||||||
if uncompressedDigest = mem.uncompressedDigestLocked(primaryDigest); uncompressedDigest != "" {
|
if uncompressedDigest = mem.uncompressedDigestLocked(primaryDigest); uncompressedDigest != "" {
|
||||||
otherDigests := mem.digestsByUncompressed[uncompressedDigest] // nil if not present in the map
|
otherDigests := mem.digestsByUncompressed[uncompressedDigest] // nil if not present in the map
|
||||||
for d := range otherDigests {
|
for d := range otherDigests {
|
||||||
if d != primaryDigest && d != uncompressedDigest {
|
if d != primaryDigest && d != uncompressedDigest {
|
||||||
res = mem.appendReplacementCandidates(res, transport, scope, d)
|
res = mem.appendReplacementCandidates(res, transport, scope, d, requireCompressionInfo)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if uncompressedDigest != primaryDigest {
|
if uncompressedDigest != primaryDigest {
|
||||||
res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest)
|
res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest, requireCompressionInfo)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
3
vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go
generated
vendored
3
vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go
generated
vendored
@ -2,6 +2,7 @@
|
|||||||
package none
|
package none
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"github.com/containers/image/v5/internal/blobinfocache"
|
||||||
"github.com/containers/image/v5/types"
|
"github.com/containers/image/v5/types"
|
||||||
"github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
)
|
)
|
||||||
@ -16,7 +17,7 @@ type noCache struct {
|
|||||||
// Manifest.Inspect, because configs only have one representation.
|
// Manifest.Inspect, because configs only have one representation.
|
||||||
// Any use of BlobInfoCache with blobs should usually use at least a
|
// Any use of BlobInfoCache with blobs should usually use at least a
|
||||||
// short-lived cache, ideally blobinfocache.DefaultCache.
|
// short-lived cache, ideally blobinfocache.DefaultCache.
|
||||||
var NoCache types.BlobInfoCache = noCache{}
|
var NoCache blobinfocache.BlobInfoCache2 = blobinfocache.FromBlobInfoCache(&noCache{})
|
||||||
|
|
||||||
// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
|
// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
|
||||||
// May return anyDigest if it is known to be uncompressed.
|
// May return anyDigest if it is known to be uncompressed.
|
||||||
|
36
vendor/github.com/containers/image/v5/pkg/docker/config/config.go
generated
vendored
36
vendor/github.com/containers/image/v5/pkg/docker/config/config.go
generated
vendored
@ -86,7 +86,7 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon
|
|||||||
// Note: we need to read the auth files in the inverse order to prevent
|
// Note: we need to read the auth files in the inverse order to prevent
|
||||||
// a priority inversion when writing to the map.
|
// a priority inversion when writing to the map.
|
||||||
authConfigs := make(map[string]types.DockerAuthConfig)
|
authConfigs := make(map[string]types.DockerAuthConfig)
|
||||||
paths := getAuthFilePaths(sys)
|
paths := getAuthFilePaths(sys, homedir.Get())
|
||||||
for i := len(paths) - 1; i >= 0; i-- {
|
for i := len(paths) - 1; i >= 0; i-- {
|
||||||
path := paths[i]
|
path := paths[i]
|
||||||
// readJSONFile returns an empty map in case the path doesn't exist.
|
// readJSONFile returns an empty map in case the path doesn't exist.
|
||||||
@ -126,7 +126,9 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon
|
|||||||
|
|
||||||
// getAuthFilePaths returns a slice of authPaths based on the system context
|
// getAuthFilePaths returns a slice of authPaths based on the system context
|
||||||
// in the order they should be searched. Note that some paths may not exist.
|
// in the order they should be searched. Note that some paths may not exist.
|
||||||
func getAuthFilePaths(sys *types.SystemContext) []authPath {
|
// The homeDir parameter should always be homedir.Get(), and is only intended to be overridden
|
||||||
|
// by tests.
|
||||||
|
func getAuthFilePaths(sys *types.SystemContext, homeDir string) []authPath {
|
||||||
paths := []authPath{}
|
paths := []authPath{}
|
||||||
pathToAuth, lf, err := getPathToAuth(sys)
|
pathToAuth, lf, err := getPathToAuth(sys)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
@ -139,7 +141,7 @@ func getAuthFilePaths(sys *types.SystemContext) []authPath {
|
|||||||
}
|
}
|
||||||
xdgCfgHome := os.Getenv("XDG_CONFIG_HOME")
|
xdgCfgHome := os.Getenv("XDG_CONFIG_HOME")
|
||||||
if xdgCfgHome == "" {
|
if xdgCfgHome == "" {
|
||||||
xdgCfgHome = filepath.Join(homedir.Get(), ".config")
|
xdgCfgHome = filepath.Join(homeDir, ".config")
|
||||||
}
|
}
|
||||||
paths = append(paths, authPath{path: filepath.Join(xdgCfgHome, xdgConfigHomePath), legacyFormat: false})
|
paths = append(paths, authPath{path: filepath.Join(xdgCfgHome, xdgConfigHomePath), legacyFormat: false})
|
||||||
if dockerConfig := os.Getenv("DOCKER_CONFIG"); dockerConfig != "" {
|
if dockerConfig := os.Getenv("DOCKER_CONFIG"); dockerConfig != "" {
|
||||||
@ -148,11 +150,11 @@ func getAuthFilePaths(sys *types.SystemContext) []authPath {
|
|||||||
)
|
)
|
||||||
} else {
|
} else {
|
||||||
paths = append(paths,
|
paths = append(paths,
|
||||||
authPath{path: filepath.Join(homedir.Get(), dockerHomePath), legacyFormat: false},
|
authPath{path: filepath.Join(homeDir, dockerHomePath), legacyFormat: false},
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
paths = append(paths,
|
paths = append(paths,
|
||||||
authPath{path: filepath.Join(homedir.Get(), dockerLegacyHomePath), legacyFormat: true},
|
authPath{path: filepath.Join(homeDir, dockerLegacyHomePath), legacyFormat: true},
|
||||||
)
|
)
|
||||||
return paths
|
return paths
|
||||||
}
|
}
|
||||||
@ -161,6 +163,12 @@ func getAuthFilePaths(sys *types.SystemContext) []authPath {
|
|||||||
// file or .docker/config.json, including support for OAuth2 and IdentityToken.
|
// file or .docker/config.json, including support for OAuth2 and IdentityToken.
|
||||||
// If an entry is not found, an empty struct is returned.
|
// If an entry is not found, an empty struct is returned.
|
||||||
func GetCredentials(sys *types.SystemContext, registry string) (types.DockerAuthConfig, error) {
|
func GetCredentials(sys *types.SystemContext, registry string) (types.DockerAuthConfig, error) {
|
||||||
|
return getCredentialsWithHomeDir(sys, registry, homedir.Get())
|
||||||
|
}
|
||||||
|
|
||||||
|
// getCredentialsWithHomeDir is an internal implementation detail of GetCredentials,
|
||||||
|
// it exists only to allow testing it with an artificial home directory.
|
||||||
|
func getCredentialsWithHomeDir(sys *types.SystemContext, registry, homeDir string) (types.DockerAuthConfig, error) {
|
||||||
if sys != nil && sys.DockerAuthConfig != nil {
|
if sys != nil && sys.DockerAuthConfig != nil {
|
||||||
logrus.Debug("Returning credentials from DockerAuthConfig")
|
logrus.Debug("Returning credentials from DockerAuthConfig")
|
||||||
return *sys.DockerAuthConfig, nil
|
return *sys.DockerAuthConfig, nil
|
||||||
@ -177,7 +185,7 @@ func GetCredentials(sys *types.SystemContext, registry string) (types.DockerAuth
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, path := range getAuthFilePaths(sys) {
|
for _, path := range getAuthFilePaths(sys, homeDir) {
|
||||||
authConfig, err := findAuthentication(registry, path.path, path.legacyFormat)
|
authConfig, err := findAuthentication(registry, path.path, path.legacyFormat)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Debugf("Credentials not found")
|
logrus.Debugf("Credentials not found")
|
||||||
@ -203,7 +211,13 @@ func GetCredentials(sys *types.SystemContext, registry string) (types.DockerAuth
|
|||||||
// GetCredentials API. The new API should be used and this API is kept to
|
// GetCredentials API. The new API should be used and this API is kept to
|
||||||
// maintain backward compatibility.
|
// maintain backward compatibility.
|
||||||
func GetAuthentication(sys *types.SystemContext, registry string) (string, string, error) {
|
func GetAuthentication(sys *types.SystemContext, registry string) (string, string, error) {
|
||||||
auth, err := GetCredentials(sys, registry)
|
return getAuthenticationWithHomeDir(sys, registry, homedir.Get())
|
||||||
|
}
|
||||||
|
|
||||||
|
// getAuthenticationWithHomeDir is an internal implementation detail of GetAuthentication,
|
||||||
|
// it exists only to allow testing it with an artificial home directory.
|
||||||
|
func getAuthenticationWithHomeDir(sys *types.SystemContext, registry, homeDir string) (string, string, error) {
|
||||||
|
auth, err := getCredentialsWithHomeDir(sys, registry, homeDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", "", err
|
return "", "", err
|
||||||
}
|
}
|
||||||
@ -262,6 +276,12 @@ func RemoveAllAuthentication(sys *types.SystemContext) error {
|
|||||||
// getPathToAuth gets the path of the auth.json file used for reading and writing credentials
|
// getPathToAuth gets the path of the auth.json file used for reading and writing credentials
|
||||||
// returns the path, and a bool specifies whether the file is in legacy format
|
// returns the path, and a bool specifies whether the file is in legacy format
|
||||||
func getPathToAuth(sys *types.SystemContext) (string, bool, error) {
|
func getPathToAuth(sys *types.SystemContext) (string, bool, error) {
|
||||||
|
return getPathToAuthWithOS(sys, runtime.GOOS)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getPathToAuthWithOS is an internal implementation detail of getPathToAuth,
|
||||||
|
// it exists only to allow testing it with an artificial runtime.GOOS.
|
||||||
|
func getPathToAuthWithOS(sys *types.SystemContext, goOS string) (string, bool, error) {
|
||||||
if sys != nil {
|
if sys != nil {
|
||||||
if sys.AuthFilePath != "" {
|
if sys.AuthFilePath != "" {
|
||||||
return sys.AuthFilePath, false, nil
|
return sys.AuthFilePath, false, nil
|
||||||
@ -273,7 +293,7 @@ func getPathToAuth(sys *types.SystemContext) (string, bool, error) {
|
|||||||
return filepath.Join(sys.RootForImplicitAbsolutePaths, fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid())), false, nil
|
return filepath.Join(sys.RootForImplicitAbsolutePaths, fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid())), false, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if runtime.GOOS == "windows" || runtime.GOOS == "darwin" {
|
if goOS == "windows" || goOS == "darwin" {
|
||||||
return filepath.Join(homedir.Get(), nonLinuxAuthFilePath), false, nil
|
return filepath.Join(homedir.Get(), nonLinuxAuthFilePath), false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
14
vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go
generated
vendored
14
vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go
generated
vendored
@ -149,9 +149,9 @@ const (
|
|||||||
func (r *Resolved) Description() string {
|
func (r *Resolved) Description() string {
|
||||||
switch r.rationale {
|
switch r.rationale {
|
||||||
case rationaleAlias:
|
case rationaleAlias:
|
||||||
return fmt.Sprintf("Resolved short name %q to a recorded short-name alias (origin: %s)", r.userInput, r.originDescription)
|
return fmt.Sprintf("Resolved %q as an alias (%s)", r.userInput, r.originDescription)
|
||||||
case rationaleUSR:
|
case rationaleUSR:
|
||||||
return fmt.Sprintf("Completed short name %q with unqualified-search registries (origin: %s)", r.userInput, r.originDescription)
|
return fmt.Sprintf("Resolving %q using unqualified-search registries (%s)", r.userInput, r.originDescription)
|
||||||
case rationaleUserSelection, rationaleNone:
|
case rationaleUserSelection, rationaleNone:
|
||||||
fallthrough
|
fallthrough
|
||||||
default:
|
default:
|
||||||
@ -240,14 +240,14 @@ func Resolve(ctx *types.SystemContext, name string) (*Resolved, error) {
|
|||||||
|
|
||||||
// Create a copy of the system context to make it usable beyond this
|
// Create a copy of the system context to make it usable beyond this
|
||||||
// function call.
|
// function call.
|
||||||
var sys *types.SystemContext
|
|
||||||
if ctx != nil {
|
if ctx != nil {
|
||||||
sys = &(*ctx)
|
copy := *ctx
|
||||||
|
ctx = ©
|
||||||
}
|
}
|
||||||
resolved.systemContext = ctx
|
resolved.systemContext = ctx
|
||||||
|
|
||||||
// Detect which mode we're running in.
|
// Detect which mode we're running in.
|
||||||
mode, err := sysregistriesv2.GetShortNameMode(sys)
|
mode, err := sysregistriesv2.GetShortNameMode(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -276,7 +276,7 @@ func Resolve(ctx *types.SystemContext, name string) (*Resolved, error) {
|
|||||||
resolved.userInput = shortNameRepo
|
resolved.userInput = shortNameRepo
|
||||||
|
|
||||||
// If there's already an alias, use it.
|
// If there's already an alias, use it.
|
||||||
namedAlias, aliasOriginDescription, err := sysregistriesv2.ResolveShortNameAlias(sys, shortNameRepo.String())
|
namedAlias, aliasOriginDescription, err := sysregistriesv2.ResolveShortNameAlias(ctx, shortNameRepo.String())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -307,7 +307,7 @@ func Resolve(ctx *types.SystemContext, name string) (*Resolved, error) {
|
|||||||
resolved.rationale = rationaleUSR
|
resolved.rationale = rationaleUSR
|
||||||
|
|
||||||
// Query the registry for unqualified-search registries.
|
// Query the registry for unqualified-search registries.
|
||||||
unqualifiedSearchRegistries, usrConfig, err := sysregistriesv2.UnqualifiedSearchRegistriesWithOrigin(sys)
|
unqualifiedSearchRegistries, usrConfig, err := sysregistriesv2.UnqualifiedSearchRegistriesWithOrigin(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
10
vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
generated
vendored
10
vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
generated
vendored
@ -405,9 +405,15 @@ type configWrapper struct {
|
|||||||
|
|
||||||
// newConfigWrapper returns a configWrapper for the specified SystemContext.
|
// newConfigWrapper returns a configWrapper for the specified SystemContext.
|
||||||
func newConfigWrapper(ctx *types.SystemContext) configWrapper {
|
func newConfigWrapper(ctx *types.SystemContext) configWrapper {
|
||||||
|
return newConfigWrapperWithHomeDir(ctx, homedir.Get())
|
||||||
|
}
|
||||||
|
|
||||||
|
// newConfigWrapperWithHomeDir is an internal implementation detail of newConfigWrapper,
|
||||||
|
// it exists only to allow testing it with an artificial home directory.
|
||||||
|
func newConfigWrapperWithHomeDir(ctx *types.SystemContext, homeDir string) configWrapper {
|
||||||
var wrapper configWrapper
|
var wrapper configWrapper
|
||||||
userRegistriesFilePath := filepath.Join(homedir.Get(), userRegistriesFile)
|
userRegistriesFilePath := filepath.Join(homeDir, userRegistriesFile)
|
||||||
userRegistriesDirPath := filepath.Join(homedir.Get(), userRegistriesDir)
|
userRegistriesDirPath := filepath.Join(homeDir, userRegistriesDir)
|
||||||
|
|
||||||
// decide configPath using per-user path or system file
|
// decide configPath using per-user path or system file
|
||||||
if ctx != nil && ctx.SystemRegistriesConfPath != "" {
|
if ctx != nil && ctx.SystemRegistriesConfPath != "" {
|
||||||
|
8
vendor/github.com/containers/image/v5/signature/policy_config.go
generated
vendored
8
vendor/github.com/containers/image/v5/signature/policy_config.go
generated
vendored
@ -59,10 +59,16 @@ func DefaultPolicy(sys *types.SystemContext) (*Policy, error) {
|
|||||||
|
|
||||||
// defaultPolicyPath returns a path to the default policy of the system.
|
// defaultPolicyPath returns a path to the default policy of the system.
|
||||||
func defaultPolicyPath(sys *types.SystemContext) string {
|
func defaultPolicyPath(sys *types.SystemContext) string {
|
||||||
|
return defaultPolicyPathWithHomeDir(sys, homedir.Get())
|
||||||
|
}
|
||||||
|
|
||||||
|
// defaultPolicyPathWithHomeDir is an internal implementation detail of defaultPolicyPath,
|
||||||
|
// it exists only to allow testing it with an artificial home directory.
|
||||||
|
func defaultPolicyPathWithHomeDir(sys *types.SystemContext, homeDir string) string {
|
||||||
if sys != nil && sys.SignaturePolicyPath != "" {
|
if sys != nil && sys.SignaturePolicyPath != "" {
|
||||||
return sys.SignaturePolicyPath
|
return sys.SignaturePolicyPath
|
||||||
}
|
}
|
||||||
userPolicyFilePath := filepath.Join(homedir.Get(), userPolicyFile)
|
userPolicyFilePath := filepath.Join(homeDir, userPolicyFile)
|
||||||
if _, err := os.Stat(userPolicyFilePath); err == nil {
|
if _, err := os.Stat(userPolicyFilePath); err == nil {
|
||||||
return userPolicyFilePath
|
return userPolicyFilePath
|
||||||
}
|
}
|
||||||
|
4
vendor/github.com/containers/image/v5/storage/storage_image.go
generated
vendored
4
vendor/github.com/containers/image/v5/storage/storage_image.go
generated
vendored
@ -463,7 +463,9 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader,
|
|||||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||||
// info.Digest must not be empty.
|
// info.Digest must not be empty.
|
||||||
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
||||||
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
|
||||||
|
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
|
||||||
|
// reflected in the manifest that will be written.
|
||||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||||
// May use and/or update cache.
|
// May use and/or update cache.
|
||||||
func (s *storageImageDestination) TryReusingBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
func (s *storageImageDestination) TryReusingBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
||||||
|
17
vendor/github.com/containers/image/v5/tarball/doc.go
generated
vendored
17
vendor/github.com/containers/image/v5/tarball/doc.go
generated
vendored
@ -5,11 +5,13 @@
|
|||||||
// package main
|
// package main
|
||||||
//
|
//
|
||||||
// import (
|
// import (
|
||||||
// "fmt"
|
// "context"
|
||||||
//
|
//
|
||||||
// cp "github.com/containers/image/v5/copy"
|
// cp "github.com/containers/image/v5/copy"
|
||||||
|
// "github.com/containers/image/v5/signature"
|
||||||
// "github.com/containers/image/v5/tarball"
|
// "github.com/containers/image/v5/tarball"
|
||||||
// "github.com/containers/image/v5/transports/alltransports"
|
// "github.com/containers/image/v5/transports/alltransports"
|
||||||
|
// "github.com/containers/image/v5/types"
|
||||||
// imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
// imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
// )
|
// )
|
||||||
//
|
//
|
||||||
@ -39,7 +41,18 @@
|
|||||||
// if err != nil {
|
// if err != nil {
|
||||||
// panic(err)
|
// panic(err)
|
||||||
// }
|
// }
|
||||||
// err = cp.Image(nil, dest, src, nil)
|
//
|
||||||
|
// policy, err := signature.DefaultPolicy(nil)
|
||||||
|
// if err != nil {
|
||||||
|
// panic(err)
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// pc, err := signature.NewPolicyContext(policy)
|
||||||
|
// if err != nil {
|
||||||
|
// panic(err)
|
||||||
|
// }
|
||||||
|
// defer pc.Destroy()
|
||||||
|
// _, err = cp.Image(context.TODO(), pc, dest, src, nil)
|
||||||
// if err != nil {
|
// if err != nil {
|
||||||
// panic(err)
|
// panic(err)
|
||||||
// }
|
// }
|
||||||
|
25
vendor/github.com/containers/image/v5/types/types.go
generated
vendored
25
vendor/github.com/containers/image/v5/types/types.go
generated
vendored
@ -126,14 +126,18 @@ type BlobInfo struct {
|
|||||||
Annotations map[string]string
|
Annotations map[string]string
|
||||||
MediaType string
|
MediaType string
|
||||||
// CompressionOperation is used in Image.UpdateLayerInfos to instruct
|
// CompressionOperation is used in Image.UpdateLayerInfos to instruct
|
||||||
// whether the original layer should be preserved or (de)compressed. The
|
// whether the original layer's "compressed or not" should be preserved,
|
||||||
// field defaults to preserve the original layer.
|
// possibly while changing the compression algorithm from one to another,
|
||||||
|
// or if it should be compressed or decompressed. The field defaults to
|
||||||
|
// preserve the original layer's compressedness.
|
||||||
// TODO: To remove together with CryptoOperation in re-design to remove
|
// TODO: To remove together with CryptoOperation in re-design to remove
|
||||||
// field out out of BlobInfo.
|
// field out out of BlobInfo.
|
||||||
CompressionOperation LayerCompression
|
CompressionOperation LayerCompression
|
||||||
// CompressionAlgorithm is used in Image.UpdateLayerInfos to set the correct
|
// CompressionAlgorithm is used in Image.UpdateLayerInfos to set the correct
|
||||||
// MIME type for compressed layers (e.g., gzip or zstd). This field MUST be
|
// MIME type for compressed layers (e.g., gzip or zstd). This field MUST be
|
||||||
// set when `CompressionOperation == Compress`.
|
// set when `CompressionOperation == Compress` and MAY be set when
|
||||||
|
// `CompressionOperation == PreserveOriginal` and the compression type is
|
||||||
|
// being changed for an already-compressed layer.
|
||||||
CompressionAlgorithm *compression.Algorithm
|
CompressionAlgorithm *compression.Algorithm
|
||||||
// CryptoOperation is used in Image.UpdateLayerInfos to instruct
|
// CryptoOperation is used in Image.UpdateLayerInfos to instruct
|
||||||
// whether the original layer was encrypted/decrypted
|
// whether the original layer was encrypted/decrypted
|
||||||
@ -194,6 +198,9 @@ type BICReplacementCandidate struct {
|
|||||||
//
|
//
|
||||||
// None of the methods return an error indication: errors when neither reading from, nor writing to, the cache, should be fatal;
|
// None of the methods return an error indication: errors when neither reading from, nor writing to, the cache, should be fatal;
|
||||||
// users of the cache should just fall back to copying the blobs the usual way.
|
// users of the cache should just fall back to copying the blobs the usual way.
|
||||||
|
//
|
||||||
|
// The BlobInfoCache interface is deprecated. Consumers of this library should use one of the implementations provided by
|
||||||
|
// subpackages of the library's "pkg/blobinfocache" package in preference to implementing the interface on their own.
|
||||||
type BlobInfoCache interface {
|
type BlobInfoCache interface {
|
||||||
// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
|
// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
|
||||||
// May return anyDigest if it is known to be uncompressed.
|
// May return anyDigest if it is known to be uncompressed.
|
||||||
@ -306,7 +313,9 @@ type ImageDestination interface {
|
|||||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||||
// info.Digest must not be empty.
|
// info.Digest must not be empty.
|
||||||
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
||||||
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
|
||||||
|
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
|
||||||
|
// reflected in the manifest that will be written.
|
||||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||||
// May use and/or update cache.
|
// May use and/or update cache.
|
||||||
TryReusingBlob(ctx context.Context, info BlobInfo, cache BlobInfoCache, canSubstitute bool) (bool, BlobInfo, error)
|
TryReusingBlob(ctx context.Context, info BlobInfo, cache BlobInfoCache, canSubstitute bool) (bool, BlobInfo, error)
|
||||||
@ -397,6 +406,12 @@ type Image interface {
|
|||||||
// UpdatedImage returns a types.Image modified according to options.
|
// UpdatedImage returns a types.Image modified according to options.
|
||||||
// Everything in options.InformationOnly should be provided, other fields should be set only if a modification is desired.
|
// Everything in options.InformationOnly should be provided, other fields should be set only if a modification is desired.
|
||||||
// This does not change the state of the original Image object.
|
// This does not change the state of the original Image object.
|
||||||
|
// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError if
|
||||||
|
// manifests of type options.ManifestMIMEType can not include layers that are compressed
|
||||||
|
// in accordance with the CompressionOperation and CompressionAlgorithm specified in one
|
||||||
|
// or more options.LayerInfos items, though retrying with a different
|
||||||
|
// options.ManifestMIMEType or with different CompressionOperation+CompressionAlgorithm
|
||||||
|
// values might succeed.
|
||||||
UpdatedImage(ctx context.Context, options ManifestUpdateOptions) (Image, error)
|
UpdatedImage(ctx context.Context, options ManifestUpdateOptions) (Image, error)
|
||||||
// SupportsEncryption returns an indicator that the image supports encryption
|
// SupportsEncryption returns an indicator that the image supports encryption
|
||||||
//
|
//
|
||||||
@ -600,6 +615,8 @@ type SystemContext struct {
|
|||||||
DockerDisableV1Ping bool
|
DockerDisableV1Ping bool
|
||||||
// If true, dockerImageDestination.SupportedManifestMIMETypes will omit the Schema1 media types from the supported list
|
// If true, dockerImageDestination.SupportedManifestMIMETypes will omit the Schema1 media types from the supported list
|
||||||
DockerDisableDestSchema1MIMETypes bool
|
DockerDisableDestSchema1MIMETypes bool
|
||||||
|
// If true, the physical pull source of docker transport images logged as info level
|
||||||
|
DockerLogMirrorChoice bool
|
||||||
// Directory to use for OSTree temporary files
|
// Directory to use for OSTree temporary files
|
||||||
OSTreeTmpDirPath string
|
OSTreeTmpDirPath string
|
||||||
|
|
||||||
|
2
vendor/github.com/containers/image/v5/version/version.go
generated
vendored
2
vendor/github.com/containers/image/v5/version/version.go
generated
vendored
@ -6,7 +6,7 @@ const (
|
|||||||
// VersionMajor is for an API incompatible changes
|
// VersionMajor is for an API incompatible changes
|
||||||
VersionMajor = 5
|
VersionMajor = 5
|
||||||
// VersionMinor is for functionality in a backwards-compatible manner
|
// VersionMinor is for functionality in a backwards-compatible manner
|
||||||
VersionMinor = 9
|
VersionMinor = 10
|
||||||
// VersionPatch is for backwards-compatible bug fixes
|
// VersionPatch is for backwards-compatible bug fixes
|
||||||
VersionPatch = 0
|
VersionPatch = 0
|
||||||
|
|
||||||
|
294
vendor/github.com/klauspost/compress/flate/gen_inflate.go
generated
vendored
294
vendor/github.com/klauspost/compress/flate/gen_inflate.go
generated
vendored
@ -1,294 +0,0 @@
|
|||||||
// +build generate
|
|
||||||
|
|
||||||
//go:generate go run $GOFILE && gofmt -w inflate_gen.go
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
f, err := os.Create("inflate_gen.go")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
types := []string{"*bytes.Buffer", "*bytes.Reader", "*bufio.Reader", "*strings.Reader"}
|
|
||||||
names := []string{"BytesBuffer", "BytesReader", "BufioReader", "StringsReader"}
|
|
||||||
imports := []string{"bytes", "bufio", "io", "strings", "math/bits"}
|
|
||||||
f.WriteString(`// Code generated by go generate gen_inflate.go. DO NOT EDIT.
|
|
||||||
|
|
||||||
package flate
|
|
||||||
|
|
||||||
import (
|
|
||||||
`)
|
|
||||||
|
|
||||||
for _, imp := range imports {
|
|
||||||
f.WriteString("\t\"" + imp + "\"\n")
|
|
||||||
}
|
|
||||||
f.WriteString(")\n\n")
|
|
||||||
|
|
||||||
template := `
|
|
||||||
|
|
||||||
// Decode a single Huffman block from f.
|
|
||||||
// hl and hd are the Huffman states for the lit/length values
|
|
||||||
// and the distance values, respectively. If hd == nil, using the
|
|
||||||
// fixed distance encoding associated with fixed Huffman blocks.
|
|
||||||
func (f *decompressor) $FUNCNAME$() {
|
|
||||||
const (
|
|
||||||
stateInit = iota // Zero value must be stateInit
|
|
||||||
stateDict
|
|
||||||
)
|
|
||||||
fr := f.r.($TYPE$)
|
|
||||||
|
|
||||||
switch f.stepState {
|
|
||||||
case stateInit:
|
|
||||||
goto readLiteral
|
|
||||||
case stateDict:
|
|
||||||
goto copyHistory
|
|
||||||
}
|
|
||||||
|
|
||||||
readLiteral:
|
|
||||||
// Read literal and/or (length, distance) according to RFC section 3.2.3.
|
|
||||||
{
|
|
||||||
var v int
|
|
||||||
{
|
|
||||||
// Inlined v, err := f.huffSym(f.hl)
|
|
||||||
// Since a huffmanDecoder can be empty or be composed of a degenerate tree
|
|
||||||
// with single element, huffSym must error on these two edge cases. In both
|
|
||||||
// cases, the chunks slice will be 0 for the invalid sequence, leading it
|
|
||||||
// satisfy the n == 0 check below.
|
|
||||||
n := uint(f.hl.maxRead)
|
|
||||||
// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
|
|
||||||
// but is smart enough to keep local variables in registers, so use nb and b,
|
|
||||||
// inline call to moreBits and reassign b,nb back to f on return.
|
|
||||||
nb, b := f.nb, f.b
|
|
||||||
for {
|
|
||||||
for nb < n {
|
|
||||||
c, err := fr.ReadByte()
|
|
||||||
if err != nil {
|
|
||||||
f.b = b
|
|
||||||
f.nb = nb
|
|
||||||
f.err = noEOF(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
f.roffset++
|
|
||||||
b |= uint32(c) << (nb & regSizeMaskUint32)
|
|
||||||
nb += 8
|
|
||||||
}
|
|
||||||
chunk := f.hl.chunks[b&(huffmanNumChunks-1)]
|
|
||||||
n = uint(chunk & huffmanCountMask)
|
|
||||||
if n > huffmanChunkBits {
|
|
||||||
chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask]
|
|
||||||
n = uint(chunk & huffmanCountMask)
|
|
||||||
}
|
|
||||||
if n <= nb {
|
|
||||||
if n == 0 {
|
|
||||||
f.b = b
|
|
||||||
f.nb = nb
|
|
||||||
if debugDecode {
|
|
||||||
fmt.Println("huffsym: n==0")
|
|
||||||
}
|
|
||||||
f.err = CorruptInputError(f.roffset)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
f.b = b >> (n & regSizeMaskUint32)
|
|
||||||
f.nb = nb - n
|
|
||||||
v = int(chunk >> huffmanValueShift)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var length int
|
|
||||||
switch {
|
|
||||||
case v < 256:
|
|
||||||
f.dict.writeByte(byte(v))
|
|
||||||
if f.dict.availWrite() == 0 {
|
|
||||||
f.toRead = f.dict.readFlush()
|
|
||||||
f.step = (*decompressor).$FUNCNAME$
|
|
||||||
f.stepState = stateInit
|
|
||||||
return
|
|
||||||
}
|
|
||||||
goto readLiteral
|
|
||||||
case v == 256:
|
|
||||||
f.finishBlock()
|
|
||||||
return
|
|
||||||
// otherwise, reference to older data
|
|
||||||
case v < 265:
|
|
||||||
length = v - (257 - 3)
|
|
||||||
case v < maxNumLit:
|
|
||||||
val := decCodeToLen[(v - 257)]
|
|
||||||
length = int(val.length) + 3
|
|
||||||
n := uint(val.extra)
|
|
||||||
for f.nb < n {
|
|
||||||
c, err := fr.ReadByte()
|
|
||||||
if err != nil {
|
|
||||||
if debugDecode {
|
|
||||||
fmt.Println("morebits n>0:", err)
|
|
||||||
}
|
|
||||||
f.err = err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
f.roffset++
|
|
||||||
f.b |= uint32(c) << f.nb
|
|
||||||
f.nb += 8
|
|
||||||
}
|
|
||||||
length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1))
|
|
||||||
f.b >>= n & regSizeMaskUint32
|
|
||||||
f.nb -= n
|
|
||||||
default:
|
|
||||||
if debugDecode {
|
|
||||||
fmt.Println(v, ">= maxNumLit")
|
|
||||||
}
|
|
||||||
f.err = CorruptInputError(f.roffset)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var dist uint32
|
|
||||||
if f.hd == nil {
|
|
||||||
for f.nb < 5 {
|
|
||||||
c, err := fr.ReadByte()
|
|
||||||
if err != nil {
|
|
||||||
if debugDecode {
|
|
||||||
fmt.Println("morebits f.nb<5:", err)
|
|
||||||
}
|
|
||||||
f.err = err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
f.roffset++
|
|
||||||
f.b |= uint32(c) << f.nb
|
|
||||||
f.nb += 8
|
|
||||||
}
|
|
||||||
dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3)))
|
|
||||||
f.b >>= 5
|
|
||||||
f.nb -= 5
|
|
||||||
} else {
|
|
||||||
// Since a huffmanDecoder can be empty or be composed of a degenerate tree
|
|
||||||
// with single element, huffSym must error on these two edge cases. In both
|
|
||||||
// cases, the chunks slice will be 0 for the invalid sequence, leading it
|
|
||||||
// satisfy the n == 0 check below.
|
|
||||||
n := uint(f.hd.maxRead)
|
|
||||||
// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
|
|
||||||
// but is smart enough to keep local variables in registers, so use nb and b,
|
|
||||||
// inline call to moreBits and reassign b,nb back to f on return.
|
|
||||||
nb, b := f.nb, f.b
|
|
||||||
for {
|
|
||||||
for nb < n {
|
|
||||||
c, err := fr.ReadByte()
|
|
||||||
if err != nil {
|
|
||||||
f.b = b
|
|
||||||
f.nb = nb
|
|
||||||
f.err = noEOF(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
f.roffset++
|
|
||||||
b |= uint32(c) << (nb & regSizeMaskUint32)
|
|
||||||
nb += 8
|
|
||||||
}
|
|
||||||
chunk := f.hd.chunks[b&(huffmanNumChunks-1)]
|
|
||||||
n = uint(chunk & huffmanCountMask)
|
|
||||||
if n > huffmanChunkBits {
|
|
||||||
chunk = f.hd.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hd.linkMask]
|
|
||||||
n = uint(chunk & huffmanCountMask)
|
|
||||||
}
|
|
||||||
if n <= nb {
|
|
||||||
if n == 0 {
|
|
||||||
f.b = b
|
|
||||||
f.nb = nb
|
|
||||||
if debugDecode {
|
|
||||||
fmt.Println("huffsym: n==0")
|
|
||||||
}
|
|
||||||
f.err = CorruptInputError(f.roffset)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
f.b = b >> (n & regSizeMaskUint32)
|
|
||||||
f.nb = nb - n
|
|
||||||
dist = uint32(chunk >> huffmanValueShift)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case dist < 4:
|
|
||||||
dist++
|
|
||||||
case dist < maxNumDist:
|
|
||||||
nb := uint(dist-2) >> 1
|
|
||||||
// have 1 bit in bottom of dist, need nb more.
|
|
||||||
extra := (dist & 1) << (nb & regSizeMaskUint32)
|
|
||||||
for f.nb < nb {
|
|
||||||
c, err := fr.ReadByte()
|
|
||||||
if err != nil {
|
|
||||||
if debugDecode {
|
|
||||||
fmt.Println("morebits f.nb<nb:", err)
|
|
||||||
}
|
|
||||||
f.err = err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
f.roffset++
|
|
||||||
f.b |= uint32(c) << f.nb
|
|
||||||
f.nb += 8
|
|
||||||
}
|
|
||||||
extra |= f.b & uint32(1<<(nb®SizeMaskUint32)-1)
|
|
||||||
f.b >>= nb & regSizeMaskUint32
|
|
||||||
f.nb -= nb
|
|
||||||
dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra
|
|
||||||
default:
|
|
||||||
if debugDecode {
|
|
||||||
fmt.Println("dist too big:", dist, maxNumDist)
|
|
||||||
}
|
|
||||||
f.err = CorruptInputError(f.roffset)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// No check on length; encoding can be prescient.
|
|
||||||
if dist > uint32(f.dict.histSize()) {
|
|
||||||
if debugDecode {
|
|
||||||
fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize())
|
|
||||||
}
|
|
||||||
f.err = CorruptInputError(f.roffset)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
f.copyLen, f.copyDist = length, int(dist)
|
|
||||||
goto copyHistory
|
|
||||||
}
|
|
||||||
|
|
||||||
copyHistory:
|
|
||||||
// Perform a backwards copy according to RFC section 3.2.3.
|
|
||||||
{
|
|
||||||
cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen)
|
|
||||||
if cnt == 0 {
|
|
||||||
cnt = f.dict.writeCopy(f.copyDist, f.copyLen)
|
|
||||||
}
|
|
||||||
f.copyLen -= cnt
|
|
||||||
|
|
||||||
if f.dict.availWrite() == 0 || f.copyLen > 0 {
|
|
||||||
f.toRead = f.dict.readFlush()
|
|
||||||
f.step = (*decompressor).$FUNCNAME$ // We need to continue this work
|
|
||||||
f.stepState = stateDict
|
|
||||||
return
|
|
||||||
}
|
|
||||||
goto readLiteral
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
`
|
|
||||||
for i, t := range types {
|
|
||||||
s := strings.Replace(template, "$FUNCNAME$", "huffman"+names[i], -1)
|
|
||||||
s = strings.Replace(s, "$TYPE$", t, -1)
|
|
||||||
f.WriteString(s)
|
|
||||||
}
|
|
||||||
f.WriteString("func (f *decompressor) huffmanBlockDecoder() func() {\n")
|
|
||||||
f.WriteString("\tswitch f.r.(type) {\n")
|
|
||||||
for i, t := range types {
|
|
||||||
f.WriteString("\t\tcase " + t + ":\n")
|
|
||||||
f.WriteString("\t\t\treturn f.huffman" + names[i] + "\n")
|
|
||||||
}
|
|
||||||
f.WriteString("\t\tdefault:\n")
|
|
||||||
f.WriteString("\t\t\treturn f.huffmanBlockGeneric")
|
|
||||||
f.WriteString("\t}\n}\n")
|
|
||||||
}
|
|
4
vendor/github.com/klauspost/compress/huff0/README.md
generated
vendored
4
vendor/github.com/klauspost/compress/huff0/README.md
generated
vendored
@ -14,7 +14,9 @@ but it can be used as a secondary step to compressors (like Snappy) that does no
|
|||||||
|
|
||||||
## News
|
## News
|
||||||
|
|
||||||
* Mar 2018: First implementation released. Consider this beta software for now.
|
This is used as part of the [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression package.
|
||||||
|
|
||||||
|
This ensures that most functionality is well tested.
|
||||||
|
|
||||||
# Usage
|
# Usage
|
||||||
|
|
||||||
|
7
vendor/github.com/klauspost/compress/zstd/decoder.go
generated
vendored
7
vendor/github.com/klauspost/compress/zstd/decoder.go
generated
vendored
@ -5,7 +5,6 @@
|
|||||||
package zstd
|
package zstd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"errors"
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
"sync"
|
"sync"
|
||||||
@ -179,11 +178,13 @@ func (d *Decoder) Reset(r io.Reader) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// If bytes buffer and < 1MB, do sync decoding anyway.
|
// If bytes buffer and < 1MB, do sync decoding anyway.
|
||||||
if bb, ok := r.(*bytes.Buffer); ok && bb.Len() < 1<<20 {
|
if bb, ok := r.(byter); ok && bb.Len() < 1<<20 {
|
||||||
|
var bb2 byter
|
||||||
|
bb2 = bb
|
||||||
if debug {
|
if debug {
|
||||||
println("*bytes.Buffer detected, doing sync decode, len:", bb.Len())
|
println("*bytes.Buffer detected, doing sync decode, len:", bb.Len())
|
||||||
}
|
}
|
||||||
b := bb.Bytes()
|
b := bb2.Bytes()
|
||||||
var dst []byte
|
var dst []byte
|
||||||
if cap(d.current.b) > 0 {
|
if cap(d.current.b) > 0 {
|
||||||
dst = d.current.b
|
dst = d.current.b
|
||||||
|
8
vendor/github.com/klauspost/compress/zstd/zstd.go
generated
vendored
8
vendor/github.com/klauspost/compress/zstd/zstd.go
generated
vendored
@ -4,6 +4,7 @@
|
|||||||
package zstd
|
package zstd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"errors"
|
"errors"
|
||||||
"log"
|
"log"
|
||||||
"math"
|
"math"
|
||||||
@ -146,3 +147,10 @@ func load64(b []byte, i int) uint64 {
|
|||||||
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
|
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
|
||||||
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
|
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type byter interface {
|
||||||
|
Bytes() []byte
|
||||||
|
Len() int
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ byter = &bytes.Buffer{}
|
||||||
|
10
vendor/github.com/ulikunitz/xz/SECURITY.md
generated
vendored
Normal file
10
vendor/github.com/ulikunitz/xz/SECURITY.md
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
# Security Policy
|
||||||
|
|
||||||
|
## Supported Versions
|
||||||
|
|
||||||
|
Currently the last minor version v0.5.x is supported.
|
||||||
|
|
||||||
|
## Reporting a Vulnerability
|
||||||
|
|
||||||
|
Report a vulnerability by creating a Github issue at
|
||||||
|
<https://github.com/ulikunitz/xz/issues>. Expect a response in a week.
|
98
vendor/github.com/ulikunitz/xz/TODO.md
generated
vendored
98
vendor/github.com/ulikunitz/xz/TODO.md
generated
vendored
@ -8,19 +8,17 @@
|
|||||||
|
|
||||||
1. Review encoder and check for lzma improvements under xz.
|
1. Review encoder and check for lzma improvements under xz.
|
||||||
2. Fix binary tree matcher.
|
2. Fix binary tree matcher.
|
||||||
3. Compare compression ratio with xz tool using comparable parameters
|
3. Compare compression ratio with xz tool using comparable parameters and optimize parameters
|
||||||
and optimize parameters
|
4. rename operation action and make it a simple type of size 8
|
||||||
4. Do some optimizations
|
5. make maxMatches, wordSize parameters
|
||||||
- rename operation action and make it a simple type of size 8
|
6. stop searching after a certain length is found (parameter sweetLen)
|
||||||
- make maxMatches, wordSize parameters
|
|
||||||
- stop searching after a certain length is found (parameter sweetLen)
|
|
||||||
|
|
||||||
## Release v0.7
|
## Release v0.7
|
||||||
|
|
||||||
1. Optimize code
|
1. Optimize code
|
||||||
2. Do statistical analysis to get linear presets.
|
2. Do statistical analysis to get linear presets.
|
||||||
3. Test sync.Pool compatability for xz and lzma Writer and Reader
|
3. Test sync.Pool compatability for xz and lzma Writer and Reader
|
||||||
3. Fuzz optimized code.
|
4. Fuzz optimized code.
|
||||||
|
|
||||||
## Release v0.8
|
## Release v0.8
|
||||||
|
|
||||||
@ -44,53 +42,73 @@
|
|||||||
|
|
||||||
## Package lzma
|
## Package lzma
|
||||||
|
|
||||||
### Release v0.6
|
### v0.6
|
||||||
|
|
||||||
- Rewrite Encoder into a simple greedy one-op-at-a-time encoder
|
|
||||||
including
|
|
||||||
+ simple scan at the dictionary head for the same byte
|
|
||||||
+ use the killer byte (requiring matches to get longer, the first
|
|
||||||
test should be the byte that would make the match longer)
|
|
||||||
|
|
||||||
|
* Rewrite Encoder into a simple greedy one-op-at-a-time encoder including
|
||||||
|
* simple scan at the dictionary head for the same byte
|
||||||
|
* use the killer byte (requiring matches to get longer, the first test should be the byte that would make the match longer)
|
||||||
|
|
||||||
## Optimizations
|
## Optimizations
|
||||||
|
|
||||||
- There may be a lot of false sharing in lzma.State; check whether this
|
* There may be a lot of false sharing in lzma. State; check whether this can be improved by reorganizing the internal structure of it.
|
||||||
can be improved by reorganizing the internal structure of it.
|
|
||||||
- Check whether batching encoding and decoding improves speed.
|
* Check whether batching encoding and decoding improves speed.
|
||||||
|
|
||||||
### DAG optimizations
|
### DAG optimizations
|
||||||
|
|
||||||
- Use full buffer to create minimal bit-length above range encoder.
|
* Use full buffer to create minimal bit-length above range encoder.
|
||||||
- Might be too slow (see v0.4)
|
* Might be too slow (see v0.4)
|
||||||
|
|
||||||
### Different match finders
|
### Different match finders
|
||||||
|
|
||||||
- hashes with 2, 3 characters additional to 4 characters
|
* hashes with 2, 3 characters additional to 4 characters
|
||||||
- binary trees with 2-7 characters (uint64 as key, use uint32 as
|
* binary trees with 2-7 characters (uint64 as key, use uint32 as
|
||||||
|
|
||||||
pointers into a an array)
|
pointers into a an array)
|
||||||
- rb-trees with 2-7 characters (uint64 as key, use uint32 as pointers
|
|
||||||
|
* rb-trees with 2-7 characters (uint64 as key, use uint32 as pointers
|
||||||
|
|
||||||
into an array with bit-steeling for the colors)
|
into an array with bit-steeling for the colors)
|
||||||
|
|
||||||
## Release Procedure
|
## Release Procedure
|
||||||
|
|
||||||
- execute goch -l for all packages; probably with lower param like 0.5.
|
* execute goch -l for all packages; probably with lower param like 0.5.
|
||||||
- check orthography with gospell
|
* check orthography with gospell
|
||||||
- Write release notes in doc/relnotes.
|
* Write release notes in doc/relnotes.
|
||||||
- Update README.md
|
* Update README.md
|
||||||
- xb copyright . in xz directory to ensure all new files have Copyright
|
* xb copyright . in xz directory to ensure all new files have Copyright header
|
||||||
header
|
* `VERSION=<version> go generate github.com/ulikunitz/xz/...` to update version files
|
||||||
- VERSION=<version> go generate github.com/ulikunitz/xz/... to update
|
* Execute test for Linux/amd64, Linux/x86 and Windows/amd64.
|
||||||
version files
|
* Update TODO.md - write short log entry
|
||||||
- Execute test for Linux/amd64, Linux/x86 and Windows/amd64.
|
* `git checkout master && git merge dev`
|
||||||
- Update TODO.md - write short log entry
|
* `git tag -a <version>`
|
||||||
- git checkout master && git merge dev
|
* `git push`
|
||||||
- git tag -a <version>
|
|
||||||
- git push
|
|
||||||
|
|
||||||
## Log
|
## Log
|
||||||
|
|
||||||
## 2020-08-19
|
### 2020-12-17
|
||||||
|
|
||||||
|
Release v0.5.9 fixes warnings, a typo and adds SECURITY.md.
|
||||||
|
|
||||||
|
One fix is interesting.
|
||||||
|
|
||||||
|
```go
|
||||||
|
const (
|
||||||
|
a byte = 0x1
|
||||||
|
b = 0x2
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
The constants a and b don't have the same type. Correct is
|
||||||
|
|
||||||
|
```go
|
||||||
|
const (
|
||||||
|
a byte = 0x1
|
||||||
|
b byte = 0x2
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2020-08-19
|
||||||
|
|
||||||
Release v0.5.8 fixes issue
|
Release v0.5.8 fixes issue
|
||||||
[issue #35](https://github.com/ulikunitz/xz/issues/35).
|
[issue #35](https://github.com/ulikunitz/xz/issues/35).
|
||||||
@ -302,7 +320,7 @@ However in Francesco Campoy's presentation "Go for Javaneros
|
|||||||
(Javaïstes?)" is the the idea that using an embedded field E, all the
|
(Javaïstes?)" is the the idea that using an embedded field E, all the
|
||||||
methods of E will be defined on T. If E is an interface T satisfies E.
|
methods of E will be defined on T. If E is an interface T satisfies E.
|
||||||
|
|
||||||
https://talks.golang.org/2014/go4java.slide#51
|
<https://talks.golang.org/2014/go4java.slide#51>
|
||||||
|
|
||||||
I have never used this, but it seems to be a cool idea.
|
I have never used this, but it seems to be a cool idea.
|
||||||
|
|
||||||
@ -327,11 +345,11 @@ and the opCodec.
|
|||||||
|
|
||||||
1. Implemented simple lzmago tool
|
1. Implemented simple lzmago tool
|
||||||
2. Tested tool against large 4.4G file
|
2. Tested tool against large 4.4G file
|
||||||
- compression worked correctly; tested decompression with lzma
|
* compression worked correctly; tested decompression with lzma
|
||||||
- decompression hits a full buffer condition
|
* decompression hits a full buffer condition
|
||||||
3. Fixed a bug in the compressor and wrote a test for it
|
3. Fixed a bug in the compressor and wrote a test for it
|
||||||
4. Executed full cycle for 4.4 GB file; performance can be improved ;-)
|
4. Executed full cycle for 4.4 GB file; performance can be improved ;-)
|
||||||
|
|
||||||
### 2015-01-11
|
### 2015-01-11
|
||||||
|
|
||||||
- Release v0.2 because of the working LZMA encoder and decoder
|
* Release v0.2 because of the working LZMA encoder and decoder
|
||||||
|
22
vendor/github.com/ulikunitz/xz/format.go
generated
vendored
22
vendor/github.com/ulikunitz/xz/format.go
generated
vendored
@ -47,9 +47,9 @@ const HeaderLen = 12
|
|||||||
// Constants for the checksum methods supported by xz.
|
// Constants for the checksum methods supported by xz.
|
||||||
const (
|
const (
|
||||||
None byte = 0x0
|
None byte = 0x0
|
||||||
CRC32 = 0x1
|
CRC32 byte = 0x1
|
||||||
CRC64 = 0x4
|
CRC64 byte = 0x4
|
||||||
SHA256 = 0xa
|
SHA256 byte = 0xa
|
||||||
)
|
)
|
||||||
|
|
||||||
// errInvalidFlags indicates that flags are invalid.
|
// errInvalidFlags indicates that flags are invalid.
|
||||||
@ -569,22 +569,6 @@ func readFilters(r io.Reader, count int) (filters []filter, err error) {
|
|||||||
return []filter{f}, err
|
return []filter{f}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// writeFilters writes the filters.
|
|
||||||
func writeFilters(w io.Writer, filters []filter) (n int, err error) {
|
|
||||||
for _, f := range filters {
|
|
||||||
p, err := f.MarshalBinary()
|
|
||||||
if err != nil {
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
k, err := w.Write(p)
|
|
||||||
n += k
|
|
||||||
if err != nil {
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
/*** Index ***/
|
/*** Index ***/
|
||||||
|
|
||||||
// record describes a block in the xz file index.
|
// record describes a block in the xz file index.
|
||||||
|
5
vendor/github.com/ulikunitz/xz/lzma/bintree.go
generated
vendored
5
vendor/github.com/ulikunitz/xz/lzma/bintree.go
generated
vendored
@ -5,10 +5,7 @@
|
|||||||
package lzma
|
package lzma
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"unicode"
|
"unicode"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -349,6 +346,7 @@ func dumpX(x uint32) string {
|
|||||||
return string(a)
|
return string(a)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
// dumpNode writes a representation of the node v into the io.Writer.
|
// dumpNode writes a representation of the node v into the io.Writer.
|
||||||
func (t *binTree) dumpNode(w io.Writer, v uint32, indent int) {
|
func (t *binTree) dumpNode(w io.Writer, v uint32, indent int) {
|
||||||
if v == null {
|
if v == null {
|
||||||
@ -377,6 +375,7 @@ func (t *binTree) dump(w io.Writer) error {
|
|||||||
t.dumpNode(bw, t.root, 0)
|
t.dumpNode(bw, t.root, 0)
|
||||||
return bw.Flush()
|
return bw.Flush()
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
func (t *binTree) distance(v uint32) int {
|
func (t *binTree) distance(v uint32) int {
|
||||||
dist := int(t.front) - int(v)
|
dist := int(t.front) - int(v)
|
||||||
|
2
vendor/github.com/ulikunitz/xz/lzma/bitops.go
generated
vendored
2
vendor/github.com/ulikunitz/xz/lzma/bitops.go
generated
vendored
@ -18,6 +18,7 @@ var ntz32Table = [32]int8{
|
|||||||
30, 17, 8, 14, 29, 13, 28, 27,
|
30, 17, 8, 14, 29, 13, 28, 27,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
// ntz32 computes the number of trailing zeros for an unsigned 32-bit integer.
|
// ntz32 computes the number of trailing zeros for an unsigned 32-bit integer.
|
||||||
func ntz32(x uint32) int {
|
func ntz32(x uint32) int {
|
||||||
if x == 0 {
|
if x == 0 {
|
||||||
@ -26,6 +27,7 @@ func ntz32(x uint32) int {
|
|||||||
x = (x & -x) * ntz32Const
|
x = (x & -x) * ntz32Const
|
||||||
return int(ntz32Table[x>>27])
|
return int(ntz32Table[x>>27])
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
// nlz32 computes the number of leading zeros for an unsigned 32-bit integer.
|
// nlz32 computes the number of leading zeros for an unsigned 32-bit integer.
|
||||||
func nlz32(x uint32) int {
|
func nlz32(x uint32) int {
|
||||||
|
2
vendor/github.com/ulikunitz/xz/lzma/decoder.go
generated
vendored
2
vendor/github.com/ulikunitz/xz/lzma/decoder.go
generated
vendored
@ -200,7 +200,7 @@ func (d *decoder) decompress() error {
|
|||||||
op, err := d.readOp()
|
op, err := d.readOp()
|
||||||
switch err {
|
switch err {
|
||||||
case nil:
|
case nil:
|
||||||
break
|
// break
|
||||||
case errEOS:
|
case errEOS:
|
||||||
d.eos = true
|
d.eos = true
|
||||||
if !d.rd.possiblyAtEnd() {
|
if !d.rd.possiblyAtEnd() {
|
||||||
|
7
vendor/github.com/ulikunitz/xz/lzma/decoderdict.go
generated
vendored
7
vendor/github.com/ulikunitz/xz/lzma/decoderdict.go
generated
vendored
@ -126,10 +126,3 @@ func (d *decoderDict) Available() int { return d.buf.Available() }
|
|||||||
|
|
||||||
// Read reads data from the buffer contained in the decoder dictionary.
|
// Read reads data from the buffer contained in the decoder dictionary.
|
||||||
func (d *decoderDict) Read(p []byte) (n int, err error) { return d.buf.Read(p) }
|
func (d *decoderDict) Read(p []byte) (n int, err error) { return d.buf.Read(p) }
|
||||||
|
|
||||||
// Buffered returns the number of bytes currently buffered in the
|
|
||||||
// decoder dictionary.
|
|
||||||
func (d *decoderDict) buffered() int { return d.buf.Buffered() }
|
|
||||||
|
|
||||||
// Peek gets data from the buffer without advancing the rear index.
|
|
||||||
func (d *decoderDict) peek(p []byte) (n int, err error) { return d.buf.Peek(p) }
|
|
||||||
|
11
vendor/github.com/ulikunitz/xz/lzma/directcodec.go
generated
vendored
11
vendor/github.com/ulikunitz/xz/lzma/directcodec.go
generated
vendored
@ -4,21 +4,10 @@
|
|||||||
|
|
||||||
package lzma
|
package lzma
|
||||||
|
|
||||||
import "fmt"
|
|
||||||
|
|
||||||
// directCodec allows the encoding and decoding of values with a fixed number
|
// directCodec allows the encoding and decoding of values with a fixed number
|
||||||
// of bits. The number of bits must be in the range [1,32].
|
// of bits. The number of bits must be in the range [1,32].
|
||||||
type directCodec byte
|
type directCodec byte
|
||||||
|
|
||||||
// makeDirectCodec creates a directCodec. The function panics if the number of
|
|
||||||
// bits is not in the range [1,32].
|
|
||||||
func makeDirectCodec(bits int) directCodec {
|
|
||||||
if !(1 <= bits && bits <= 32) {
|
|
||||||
panic(fmt.Errorf("bits=%d out of range", bits))
|
|
||||||
}
|
|
||||||
return directCodec(bits)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Bits returns the number of bits supported by this codec.
|
// Bits returns the number of bits supported by this codec.
|
||||||
func (dc directCodec) Bits() int {
|
func (dc directCodec) Bits() int {
|
||||||
return int(dc)
|
return int(dc)
|
||||||
|
16
vendor/github.com/ulikunitz/xz/lzma/distcodec.go
generated
vendored
16
vendor/github.com/ulikunitz/xz/lzma/distcodec.go
generated
vendored
@ -20,8 +20,6 @@ const (
|
|||||||
posSlotBits = 6
|
posSlotBits = 6
|
||||||
// number of align bits
|
// number of align bits
|
||||||
alignBits = 4
|
alignBits = 4
|
||||||
// maximum position slot
|
|
||||||
maxPosSlot = 63
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// distCodec provides encoding and decoding of distance values.
|
// distCodec provides encoding and decoding of distance values.
|
||||||
@ -45,20 +43,6 @@ func (dc *distCodec) deepcopy(src *distCodec) {
|
|||||||
dc.alignCodec.deepcopy(&src.alignCodec)
|
dc.alignCodec.deepcopy(&src.alignCodec)
|
||||||
}
|
}
|
||||||
|
|
||||||
// distBits returns the number of bits required to encode dist.
|
|
||||||
func distBits(dist uint32) int {
|
|
||||||
if dist < startPosModel {
|
|
||||||
return 6
|
|
||||||
}
|
|
||||||
// slot s > 3, dist d
|
|
||||||
// s = 2(bits(d)-1) + bit(d, bits(d)-2)
|
|
||||||
// s>>1 = bits(d)-1
|
|
||||||
// bits(d) = 32-nlz32(d)
|
|
||||||
// s>>1=31-nlz32(d)
|
|
||||||
// n = 5 + (s>>1) = 36 - nlz32(d)
|
|
||||||
return 36 - nlz32(dist)
|
|
||||||
}
|
|
||||||
|
|
||||||
// newDistCodec creates a new distance codec.
|
// newDistCodec creates a new distance codec.
|
||||||
func (dc *distCodec) init() {
|
func (dc *distCodec) init() {
|
||||||
for i := range dc.posSlotCodecs {
|
for i := range dc.posSlotCodecs {
|
||||||
|
2
vendor/github.com/ulikunitz/xz/lzma/encoderdict.go
generated
vendored
2
vendor/github.com/ulikunitz/xz/lzma/encoderdict.go
generated
vendored
@ -19,7 +19,7 @@ type matcher interface {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// encoderDict provides the dictionary of the encoder. It includes an
|
// encoderDict provides the dictionary of the encoder. It includes an
|
||||||
// addtional buffer atop of the actual dictionary.
|
// additional buffer atop of the actual dictionary.
|
||||||
type encoderDict struct {
|
type encoderDict struct {
|
||||||
buf buffer
|
buf buffer
|
||||||
m matcher
|
m matcher
|
||||||
|
2
vendor/github.com/ulikunitz/xz/lzma/header2.go
generated
vendored
2
vendor/github.com/ulikunitz/xz/lzma/header2.go
generated
vendored
@ -264,7 +264,7 @@ type chunkState byte
|
|||||||
// state
|
// state
|
||||||
const (
|
const (
|
||||||
start chunkState = 'S'
|
start chunkState = 'S'
|
||||||
stop = 'T'
|
stop chunkState = 'T'
|
||||||
)
|
)
|
||||||
|
|
||||||
// errors for the chunk state handling
|
// errors for the chunk state handling
|
||||||
|
13
vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go
generated
vendored
13
vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go
generated
vendored
@ -56,19 +56,6 @@ func (lc *lengthCodec) init() {
|
|||||||
lc.high = makeTreeCodec(8)
|
lc.high = makeTreeCodec(8)
|
||||||
}
|
}
|
||||||
|
|
||||||
// lBits gives the number of bits used for the encoding of the l value
|
|
||||||
// provided to the range encoder.
|
|
||||||
func lBits(l uint32) int {
|
|
||||||
switch {
|
|
||||||
case l < 8:
|
|
||||||
return 4
|
|
||||||
case l < 16:
|
|
||||||
return 5
|
|
||||||
default:
|
|
||||||
return 10
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Encode encodes the length offset. The length offset l can be compute by
|
// Encode encodes the length offset. The length offset l can be compute by
|
||||||
// subtracting minMatchLen (2) from the actual length.
|
// subtracting minMatchLen (2) from the actual length.
|
||||||
//
|
//
|
||||||
|
7
vendor/github.com/ulikunitz/xz/lzma/literalcodec.go
generated
vendored
7
vendor/github.com/ulikunitz/xz/lzma/literalcodec.go
generated
vendored
@ -123,10 +123,3 @@ const (
|
|||||||
minLP = 0
|
minLP = 0
|
||||||
maxLP = 4
|
maxLP = 4
|
||||||
)
|
)
|
||||||
|
|
||||||
// minState and maxState define a range for the state values stored in
|
|
||||||
// the State values.
|
|
||||||
const (
|
|
||||||
minState = 0
|
|
||||||
maxState = 11
|
|
||||||
)
|
|
||||||
|
25
vendor/github.com/ulikunitz/xz/lzma/operation.go
generated
vendored
25
vendor/github.com/ulikunitz/xz/lzma/operation.go
generated
vendored
@ -5,7 +5,6 @@
|
|||||||
package lzma
|
package lzma
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"unicode"
|
"unicode"
|
||||||
)
|
)
|
||||||
@ -24,30 +23,6 @@ type match struct {
|
|||||||
n int
|
n int
|
||||||
}
|
}
|
||||||
|
|
||||||
// verify checks whether the match is valid. If that is not the case an
|
|
||||||
// error is returned.
|
|
||||||
func (m match) verify() error {
|
|
||||||
if !(minDistance <= m.distance && m.distance <= maxDistance) {
|
|
||||||
return errors.New("distance out of range")
|
|
||||||
}
|
|
||||||
if !(1 <= m.n && m.n <= maxMatchLen) {
|
|
||||||
return errors.New("length out of range")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// l return the l-value for the match, which is the difference of length
|
|
||||||
// n and 2.
|
|
||||||
func (m match) l() uint32 {
|
|
||||||
return uint32(m.n - minMatchLen)
|
|
||||||
}
|
|
||||||
|
|
||||||
// dist returns the dist value for the match, which is one less of the
|
|
||||||
// distance stored in the match.
|
|
||||||
func (m match) dist() uint32 {
|
|
||||||
return uint32(m.distance - minDistance)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Len returns the number of bytes matched.
|
// Len returns the number of bytes matched.
|
||||||
func (m match) Len() int {
|
func (m match) Len() int {
|
||||||
return m.n
|
return m.n
|
||||||
|
26
vendor/github.com/ulikunitz/xz/lzma/rangecodec.go
generated
vendored
26
vendor/github.com/ulikunitz/xz/lzma/rangecodec.go
generated
vendored
@ -131,32 +131,6 @@ type rangeDecoder struct {
|
|||||||
code uint32
|
code uint32
|
||||||
}
|
}
|
||||||
|
|
||||||
// init initializes the range decoder, by reading from the byte reader.
|
|
||||||
func (d *rangeDecoder) init() error {
|
|
||||||
d.nrange = 0xffffffff
|
|
||||||
d.code = 0
|
|
||||||
|
|
||||||
b, err := d.br.ReadByte()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if b != 0 {
|
|
||||||
return errors.New("newRangeDecoder: first byte not zero")
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < 4; i++ {
|
|
||||||
if err = d.updateCode(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if d.code >= d.nrange {
|
|
||||||
return errors.New("newRangeDecoder: d.code >= d.nrange")
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// newRangeDecoder initializes a range decoder. It reads five bytes from the
|
// newRangeDecoder initializes a range decoder. It reads five bytes from the
|
||||||
// reader and therefore may return an error.
|
// reader and therefore may return an error.
|
||||||
func newRangeDecoder(br io.ByteReader) (d *rangeDecoder, err error) {
|
func newRangeDecoder(br io.ByteReader) (d *rangeDecoder, err error) {
|
||||||
|
1
vendor/github.com/ulikunitz/xz/lzma/reader2.go
generated
vendored
1
vendor/github.com/ulikunitz/xz/lzma/reader2.go
generated
vendored
@ -48,7 +48,6 @@ type Reader2 struct {
|
|||||||
chunkReader io.Reader
|
chunkReader io.Reader
|
||||||
|
|
||||||
cstate chunkState
|
cstate chunkState
|
||||||
ctype chunkType
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewReader2 creates a reader for an LZMA2 chunk sequence.
|
// NewReader2 creates a reader for an LZMA2 chunk sequence.
|
||||||
|
6
vendor/github.com/ulikunitz/xz/lzma/state.go
generated
vendored
6
vendor/github.com/ulikunitz/xz/lzma/state.go
generated
vendored
@ -53,12 +53,6 @@ func (s *state) Reset() {
|
|||||||
s.distCodec.init()
|
s.distCodec.init()
|
||||||
}
|
}
|
||||||
|
|
||||||
// initState initializes the state.
|
|
||||||
func initState(s *state, p Properties) {
|
|
||||||
*s = state{Properties: p}
|
|
||||||
s.Reset()
|
|
||||||
}
|
|
||||||
|
|
||||||
// newState creates a new state from the give Properties.
|
// newState creates a new state from the give Properties.
|
||||||
func newState(p Properties) *state {
|
func newState(p Properties) *state {
|
||||||
s := &state{Properties: p}
|
s := &state{Properties: p}
|
||||||
|
15
vendor/github.com/ulikunitz/xz/reader.go
generated
vendored
15
vendor/github.com/ulikunitz/xz/reader.go
generated
vendored
@ -26,13 +26,6 @@ type ReaderConfig struct {
|
|||||||
SingleStream bool
|
SingleStream bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// fill replaces all zero values with their default values.
|
|
||||||
func (c *ReaderConfig) fill() {
|
|
||||||
if c.DictCap == 0 {
|
|
||||||
c.DictCap = 8 * 1024 * 1024
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify checks the reader parameters for Validity. Zero values will be
|
// Verify checks the reader parameters for Validity. Zero values will be
|
||||||
// replaced by default values.
|
// replaced by default values.
|
||||||
func (c *ReaderConfig) Verify() error {
|
func (c *ReaderConfig) Verify() error {
|
||||||
@ -165,9 +158,6 @@ func (c ReaderConfig) newStreamReader(xz io.Reader) (r *streamReader, err error)
|
|||||||
return r, nil
|
return r, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// errIndex indicates an error with the xz file index.
|
|
||||||
var errIndex = errors.New("xz: error in xz file index")
|
|
||||||
|
|
||||||
// readTail reads the index body and the xz footer.
|
// readTail reads the index body and the xz footer.
|
||||||
func (r *streamReader) readTail() error {
|
func (r *streamReader) readTail() error {
|
||||||
index, n, err := readIndexBody(r.xz)
|
index, n, err := readIndexBody(r.xz)
|
||||||
@ -265,7 +255,6 @@ type blockReader struct {
|
|||||||
n int64
|
n int64
|
||||||
hash hash.Hash
|
hash hash.Hash
|
||||||
r io.Reader
|
r io.Reader
|
||||||
err error
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// newBlockReader creates a new block reader.
|
// newBlockReader creates a new block reader.
|
||||||
@ -315,10 +304,6 @@ func (br *blockReader) record() record {
|
|||||||
return record{br.unpaddedSize(), br.uncompressedSize()}
|
return record{br.unpaddedSize(), br.uncompressedSize()}
|
||||||
}
|
}
|
||||||
|
|
||||||
// errBlockSize indicates that the size of the block in the block header
|
|
||||||
// is wrong.
|
|
||||||
var errBlockSize = errors.New("xz: wrong uncompressed size for block")
|
|
||||||
|
|
||||||
// Read reads data from the block.
|
// Read reads data from the block.
|
||||||
func (br *blockReader) Read(p []byte) (n int, err error) {
|
func (br *blockReader) Read(p []byte) (n int, err error) {
|
||||||
n, err = br.r.Read(p)
|
n, err = br.r.Read(p)
|
||||||
|
4
vendor/github.com/ulikunitz/xz/writer.go
generated
vendored
4
vendor/github.com/ulikunitz/xz/writer.go
generated
vendored
@ -6,6 +6,7 @@ package xz
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
"hash"
|
"hash"
|
||||||
"io"
|
"io"
|
||||||
|
|
||||||
@ -190,6 +191,9 @@ func (c WriterConfig) NewWriter(xz io.Writer) (w *Writer, err error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
data, err := w.h.MarshalBinary()
|
data, err := w.h.MarshalBinary()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("w.h.MarshalBinary(): error %w", err)
|
||||||
|
}
|
||||||
if _, err = xz.Write(data); err != nil {
|
if _, err = xz.Write(data); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
3
vendor/github.com/vbauerster/mpb/v5/.travis.yml
generated
vendored
3
vendor/github.com/vbauerster/mpb/v5/.travis.yml
generated
vendored
@ -1,4 +1,7 @@
|
|||||||
language: go
|
language: go
|
||||||
|
arch:
|
||||||
|
- amd64
|
||||||
|
- ppc64le
|
||||||
|
|
||||||
go:
|
go:
|
||||||
- 1.14.x
|
- 1.14.x
|
||||||
|
11
vendor/github.com/vbauerster/mpb/v5/bar_option.go
generated
vendored
11
vendor/github.com/vbauerster/mpb/v5/bar_option.go
generated
vendored
@ -123,13 +123,20 @@ func makeExtFunc(filler BarFiller) extFunc {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TrimSpace trims bar's edge spaces.
|
// BarFillerTrim bar filler is rendered with leading and trailing space
|
||||||
func TrimSpace() BarOption {
|
// like ' [===] ' by default. With this option leading and trailing
|
||||||
|
// space will be removed.
|
||||||
|
func BarFillerTrim() BarOption {
|
||||||
return func(s *bState) {
|
return func(s *bState) {
|
||||||
s.trimSpace = true
|
s.trimSpace = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TrimSpace is an alias to BarFillerTrim.
|
||||||
|
func TrimSpace() BarOption {
|
||||||
|
return BarFillerTrim()
|
||||||
|
}
|
||||||
|
|
||||||
// BarStyle overrides mpb.DefaultBarStyle which is "[=>-]<+".
|
// BarStyle overrides mpb.DefaultBarStyle which is "[=>-]<+".
|
||||||
// It's ok to pass string containing just 5 runes, for example "╢▌▌░╟",
|
// It's ok to pass string containing just 5 runes, for example "╢▌▌░╟",
|
||||||
// if you don't need to override '<' (reverse tip) and '+' (refill rune).
|
// if you don't need to override '<' (reverse tip) and '+' (refill rune).
|
||||||
|
2
vendor/github.com/vbauerster/mpb/v5/go.mod
generated
vendored
2
vendor/github.com/vbauerster/mpb/v5/go.mod
generated
vendored
@ -4,7 +4,7 @@ require (
|
|||||||
github.com/VividCortex/ewma v1.1.1
|
github.com/VividCortex/ewma v1.1.1
|
||||||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d
|
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d
|
||||||
github.com/mattn/go-runewidth v0.0.9
|
github.com/mattn/go-runewidth v0.0.9
|
||||||
golang.org/x/sys v0.0.0-20200810151505-1b9f1253b3ed
|
golang.org/x/sys v0.0.0-20201218084310-7d0127a74742
|
||||||
)
|
)
|
||||||
|
|
||||||
go 1.14
|
go 1.14
|
||||||
|
4
vendor/github.com/vbauerster/mpb/v5/go.sum
generated
vendored
4
vendor/github.com/vbauerster/mpb/v5/go.sum
generated
vendored
@ -4,5 +4,5 @@ github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpH
|
|||||||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo=
|
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo=
|
||||||
github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
|
github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
|
||||||
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||||
golang.org/x/sys v0.0.0-20200810151505-1b9f1253b3ed h1:WBkVNH1zd9jg/dK4HCM4lNANnmd12EHC9z+LmcCG4ns=
|
golang.org/x/sys v0.0.0-20201218084310-7d0127a74742 h1:+CBz4km/0KPU3RGTwARGh/noP3bEwtHcq+0YcBQM2JQ=
|
||||||
golang.org/x/sys v0.0.0-20200810151505-1b9f1253b3ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20201218084310-7d0127a74742/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
32
vendor/github.com/vbauerster/mpb/v5/progress.go
generated
vendored
32
vendor/github.com/vbauerster/mpb/v5/progress.go
generated
vendored
@ -8,6 +8,7 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"log"
|
"log"
|
||||||
|
"math"
|
||||||
"os"
|
"os"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
@ -40,7 +41,6 @@ type pState struct {
|
|||||||
pMatrix map[int][]chan int
|
pMatrix map[int][]chan int
|
||||||
aMatrix map[int][]chan int
|
aMatrix map[int][]chan int
|
||||||
barShutdownQueue []*Bar
|
barShutdownQueue []*Bar
|
||||||
barPopQueue []*Bar
|
|
||||||
|
|
||||||
// following are provided/overrided by user
|
// following are provided/overrided by user
|
||||||
idCount int
|
idCount int
|
||||||
@ -179,7 +179,7 @@ func (p *Progress) BarCount() int {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait waits far all bars to complete and finally shutdowns container.
|
// Wait waits for all bars to complete and finally shutdowns container.
|
||||||
// After this method has been called, there is no way to reuse *Progress
|
// After this method has been called, there is no way to reuse *Progress
|
||||||
// instance.
|
// instance.
|
||||||
func (p *Progress) Wait() {
|
func (p *Progress) Wait() {
|
||||||
@ -301,27 +301,18 @@ func (s *pState) flush(cw *cwriter.Writer) error {
|
|||||||
delete(s.parkedBars, b)
|
delete(s.parkedBars, b)
|
||||||
b.toDrop = true
|
b.toDrop = true
|
||||||
}
|
}
|
||||||
|
if s.popCompleted && !b.noPop {
|
||||||
|
lineCount -= b.extendedLines + 1
|
||||||
|
b.toDrop = true
|
||||||
|
}
|
||||||
if b.toDrop {
|
if b.toDrop {
|
||||||
delete(bm, b)
|
delete(bm, b)
|
||||||
s.heapUpdated = true
|
s.heapUpdated = true
|
||||||
} else if s.popCompleted {
|
|
||||||
if b := b; !b.noPop {
|
|
||||||
defer func() {
|
|
||||||
s.barPopQueue = append(s.barPopQueue, b)
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
b.cancel()
|
b.cancel()
|
||||||
}
|
}
|
||||||
s.barShutdownQueue = s.barShutdownQueue[0:0]
|
s.barShutdownQueue = s.barShutdownQueue[0:0]
|
||||||
|
|
||||||
for _, b := range s.barPopQueue {
|
|
||||||
delete(bm, b)
|
|
||||||
s.heapUpdated = true
|
|
||||||
lineCount -= b.extendedLines + 1
|
|
||||||
}
|
|
||||||
s.barPopQueue = s.barPopQueue[0:0]
|
|
||||||
|
|
||||||
for b := range bm {
|
for b := range bm {
|
||||||
heap.Push(&s.bHeap, b)
|
heap.Push(&s.bHeap, b)
|
||||||
}
|
}
|
||||||
@ -370,7 +361,7 @@ func (s *pState) makeBarState(total int64, filler BarFiller, options ...BarOptio
|
|||||||
}
|
}
|
||||||
|
|
||||||
if s.popCompleted && !bs.noPop {
|
if s.popCompleted && !bs.noPop {
|
||||||
bs.priority = -1
|
bs.priority = -(math.MaxInt32 - s.idCount)
|
||||||
}
|
}
|
||||||
|
|
||||||
bs.bufP = bytes.NewBuffer(make([]byte, 0, 128))
|
bs.bufP = bytes.NewBuffer(make([]byte, 0, 128))
|
||||||
@ -382,8 +373,11 @@ func (s *pState) makeBarState(total int64, filler BarFiller, options ...BarOptio
|
|||||||
|
|
||||||
func syncWidth(matrix map[int][]chan int) {
|
func syncWidth(matrix map[int][]chan int) {
|
||||||
for _, column := range matrix {
|
for _, column := range matrix {
|
||||||
column := column
|
go maxWidthDistributor(column)
|
||||||
go func() {
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var maxWidthDistributor = func(column []chan int) {
|
||||||
var maxWidth int
|
var maxWidth int
|
||||||
for _, ch := range column {
|
for _, ch := range column {
|
||||||
if w := <-ch; w > maxWidth {
|
if w := <-ch; w > maxWidth {
|
||||||
@ -393,6 +387,4 @@ func syncWidth(matrix map[int][]chan int) {
|
|||||||
for _, ch := range column {
|
for _, ch := range column {
|
||||||
ch <- maxWidth
|
ch <- maxWidth
|
||||||
}
|
}
|
||||||
}()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
9
vendor/modules.txt
vendored
9
vendor/modules.txt
vendored
@ -108,7 +108,7 @@ github.com/containers/common/pkg/umask
|
|||||||
github.com/containers/common/version
|
github.com/containers/common/version
|
||||||
# github.com/containers/conmon v2.0.20+incompatible
|
# github.com/containers/conmon v2.0.20+incompatible
|
||||||
github.com/containers/conmon/runner/config
|
github.com/containers/conmon/runner/config
|
||||||
# github.com/containers/image/v5 v5.9.0
|
# github.com/containers/image/v5 v5.10.0
|
||||||
github.com/containers/image/v5/copy
|
github.com/containers/image/v5/copy
|
||||||
github.com/containers/image/v5/directory
|
github.com/containers/image/v5/directory
|
||||||
github.com/containers/image/v5/directory/explicitfilepath
|
github.com/containers/image/v5/directory/explicitfilepath
|
||||||
@ -120,6 +120,7 @@ github.com/containers/image/v5/docker/policyconfiguration
|
|||||||
github.com/containers/image/v5/docker/reference
|
github.com/containers/image/v5/docker/reference
|
||||||
github.com/containers/image/v5/docker/tarfile
|
github.com/containers/image/v5/docker/tarfile
|
||||||
github.com/containers/image/v5/image
|
github.com/containers/image/v5/image
|
||||||
|
github.com/containers/image/v5/internal/blobinfocache
|
||||||
github.com/containers/image/v5/internal/iolimits
|
github.com/containers/image/v5/internal/iolimits
|
||||||
github.com/containers/image/v5/internal/pkg/keyctl
|
github.com/containers/image/v5/internal/pkg/keyctl
|
||||||
github.com/containers/image/v5/internal/pkg/platform
|
github.com/containers/image/v5/internal/pkg/platform
|
||||||
@ -348,7 +349,7 @@ github.com/json-iterator/go
|
|||||||
# github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a
|
# github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a
|
||||||
github.com/juju/ansiterm
|
github.com/juju/ansiterm
|
||||||
github.com/juju/ansiterm/tabwriter
|
github.com/juju/ansiterm/tabwriter
|
||||||
# github.com/klauspost/compress v1.11.5
|
# github.com/klauspost/compress v1.11.7
|
||||||
github.com/klauspost/compress/flate
|
github.com/klauspost/compress/flate
|
||||||
github.com/klauspost/compress/fse
|
github.com/klauspost/compress/fse
|
||||||
github.com/klauspost/compress/huff0
|
github.com/klauspost/compress/huff0
|
||||||
@ -553,7 +554,7 @@ github.com/uber/jaeger-client-go/transport
|
|||||||
github.com/uber/jaeger-client-go/utils
|
github.com/uber/jaeger-client-go/utils
|
||||||
# github.com/uber/jaeger-lib v2.2.0+incompatible
|
# github.com/uber/jaeger-lib v2.2.0+incompatible
|
||||||
github.com/uber/jaeger-lib/metrics
|
github.com/uber/jaeger-lib/metrics
|
||||||
# github.com/ulikunitz/xz v0.5.8
|
# github.com/ulikunitz/xz v0.5.9
|
||||||
github.com/ulikunitz/xz
|
github.com/ulikunitz/xz
|
||||||
github.com/ulikunitz/xz/internal/hash
|
github.com/ulikunitz/xz/internal/hash
|
||||||
github.com/ulikunitz/xz/internal/xlog
|
github.com/ulikunitz/xz/internal/xlog
|
||||||
@ -562,7 +563,7 @@ github.com/ulikunitz/xz/lzma
|
|||||||
github.com/vbatts/tar-split/archive/tar
|
github.com/vbatts/tar-split/archive/tar
|
||||||
github.com/vbatts/tar-split/tar/asm
|
github.com/vbatts/tar-split/tar/asm
|
||||||
github.com/vbatts/tar-split/tar/storage
|
github.com/vbatts/tar-split/tar/storage
|
||||||
# github.com/vbauerster/mpb/v5 v5.3.0
|
# github.com/vbauerster/mpb/v5 v5.4.0
|
||||||
github.com/vbauerster/mpb/v5
|
github.com/vbauerster/mpb/v5
|
||||||
github.com/vbauerster/mpb/v5/cwriter
|
github.com/vbauerster/mpb/v5/cwriter
|
||||||
github.com/vbauerster/mpb/v5/decor
|
github.com/vbauerster/mpb/v5/decor
|
||||||
|
Reference in New Issue
Block a user