Merge pull request #14599 from rhatdan/VENDOR

Vendor in latest containers/image
This commit is contained in:
openshift-ci[bot]
2022-06-17 14:59:17 +00:00
committed by GitHub
50 changed files with 1638 additions and 1148 deletions

4
go.mod
View File

@ -14,8 +14,8 @@ require (
github.com/containers/buildah v1.26.1-0.20220609225314-e66309ebde8c
github.com/containers/common v0.48.1-0.20220608111710-dbecabbe82c9
github.com/containers/conmon v2.0.20+incompatible
github.com/containers/image/v5 v5.21.2-0.20220520105616-e594853d6471
github.com/containers/ocicrypt v1.1.4-0.20220428134531-566b808bdf6f
github.com/containers/image/v5 v5.21.2-0.20220617075545-929f14a56f5c
github.com/containers/ocicrypt v1.1.5
github.com/containers/psgo v1.7.2
github.com/containers/storage v1.41.1-0.20220616120034-7df64288ef35
github.com/coreos/go-systemd/v22 v22.3.2

14
go.sum
View File

@ -119,6 +119,7 @@ github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMo
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM=
github.com/ProtonMail/go-crypto v0.0.0-20220407094043-a94812496cf5/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo=
github.com/ProtonMail/go-crypto v0.0.0-20220517143526-88bb52951d5b/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo=
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
@ -344,8 +345,9 @@ github.com/containers/common v0.48.1-0.20220608111710-dbecabbe82c9/go.mod h1:WBL
github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg=
github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
github.com/containers/image/v5 v5.21.2-0.20220511203756-fe4fd4ed8be4/go.mod h1:OsX9sFexyGF0FCNAjfcVFv3IwMqDyLyV/WQY/roLPcE=
github.com/containers/image/v5 v5.21.2-0.20220520105616-e594853d6471 h1:2mm1jEFATvpdFfp8lUB/yc237OqwruMvfIPiVn1Wpgg=
github.com/containers/image/v5 v5.21.2-0.20220520105616-e594853d6471/go.mod h1:KntCBNQn3qOuZmQuJ38ORyTozmWXiuo05Vef2S0Sm5M=
github.com/containers/image/v5 v5.21.2-0.20220617075545-929f14a56f5c h1:U2F/FaMt8gPP8sIpBfvXMCP5gAZfyxoYZ7lmu0dwsXc=
github.com/containers/image/v5 v5.21.2-0.20220617075545-929f14a56f5c/go.mod h1:hEf8L08Hrh/3fK4vLf5l7988MJmij2swfCBUzqgnhF4=
github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a h1:spAGlqziZjCJL25C6F1zsQY05tfCKE9F5YwtEWWe6hU=
github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
@ -353,8 +355,9 @@ github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgU
github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
github.com/containers/ocicrypt v1.1.2/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
github.com/containers/ocicrypt v1.1.3/go.mod h1:xpdkbVAuaH3WzbEabUd5yDsl9SwJA5pABH85425Es2g=
github.com/containers/ocicrypt v1.1.4-0.20220428134531-566b808bdf6f h1:hffElEaoDQfREHltc2wtFPd68BqDmzW6KkEDpuSRBjs=
github.com/containers/ocicrypt v1.1.4-0.20220428134531-566b808bdf6f/go.mod h1:xpdkbVAuaH3WzbEabUd5yDsl9SwJA5pABH85425Es2g=
github.com/containers/ocicrypt v1.1.5 h1:UO+gBnBXvMvC7HTXLh0bPgLslfW8HlY+oxYcoSHBcZQ=
github.com/containers/ocicrypt v1.1.5/go.mod h1:WgjxPWdTJMqYMjf3M6cuIFFA1/MpyyhIM99YInA+Rvc=
github.com/containers/psgo v1.7.2 h1:WbCvsY9w+nCv3j4der0mbD3PSRUv/W8l+G0YrZrdSDc=
github.com/containers/psgo v1.7.2/go.mod h1:SLpqxsPOHtTqRygjutCPXmeU2PoEFzV3gzJplN4BMx0=
github.com/containers/storage v1.37.0/go.mod h1:kqeJeS0b7DO2ZT1nVWs0XufrmPFbgV3c+Q/45RlH6r4=
@ -776,8 +779,9 @@ github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ
github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk=
github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/insomniacslk/dhcp v0.0.0-20220119180841-3c283ff8b7dd/go.mod h1:h+MxyHxRg9NH3terB1nfRIUaQEcI0XOVkdR9LNBlp8E=
@ -1275,8 +1279,9 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s=
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/sylabs/sif/v2 v2.7.0 h1:VFzN8alnJ/3n1JA0K9DyUtfSzezWgWrzLDcYGhgBskk=
github.com/sylabs/sif/v2 v2.7.0/go.mod h1:TiyBWsgWeh5yBeQFNuQnvROwswqK7YJT8JA1L53bsXQ=
github.com/sylabs/sif/v2 v2.7.1 h1:XXt9AP39sQfsMCGOGQ/XP9H47yqZOvAonalkaCaNIYM=
github.com/sylabs/sif/v2 v2.7.1/go.mod h1:bBse2nEFd3yHkmq6KmAOFEWQg5LdFYiQUdVcgamxlc8=
github.com/sylvia7788/contextcheck v1.0.4/go.mod h1:vuPKJMQ7MQ91ZTqfdyreNKwZjyUg6KO+IebVyQDedZQ=
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
@ -2061,6 +2066,7 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=

170
vendor/github.com/containers/image/v5/copy/blob.go generated vendored Normal file
View File

@ -0,0 +1,170 @@
package copy
import (
"context"
"io"
"github.com/containers/image/v5/internal/private"
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/types"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// copyBlobFromStream copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcReader to dest,
// perhaps sending a copy to an io.Writer if getOriginalLayerCopyWriter != nil,
// perhaps (de/re/)compressing it if canModifyBlob,
// and returns a complete blobInfo of the copied blob.
func (ic *imageCopier) copyBlobFromStream(ctx context.Context, srcReader io.Reader, srcInfo types.BlobInfo,
getOriginalLayerCopyWriter func(decompressor compressiontypes.DecompressorFunc) io.Writer,
isConfig bool, toEncrypt bool, bar *progressBar, layerIndex int, emptyLayer bool) (types.BlobInfo, error) {
// The copying happens through a pipeline of connected io.Readers;
// that pipeline is built by updating stream.
// === Input: srcReader
stream := sourceStream{
reader: srcReader,
info: srcInfo,
}
// === Process input through digestingReader to validate against the expected digest.
// Be paranoid; in case PutBlob somehow managed to ignore an error from digestingReader,
// use a separate validation failure indicator.
// Note that for this check we don't use the stronger "validationSucceeded" indicator, because
// dest.PutBlob may detect that the layer already exists, in which case we don't
// read stream to the end, and validation does not happen.
digestingReader, err := newDigestingReader(stream.reader, srcInfo.Digest)
if err != nil {
return types.BlobInfo{}, errors.Wrapf(err, "preparing to verify blob %s", srcInfo.Digest)
}
stream.reader = digestingReader
// === Update progress bars
stream.reader = bar.ProxyReader(stream.reader)
// === Decrypt the stream, if required.
decryptionStep, err := ic.c.blobPipelineDecryptionStep(&stream, srcInfo)
if err != nil {
return types.BlobInfo{}, err
}
// === Detect compression of the input stream.
// This requires us to “peek ahead” into the stream to read the initial part, which requires us to chain through another io.Reader returned by DetectCompression.
detectedCompression, err := blobPipelineDetectCompressionStep(&stream, srcInfo)
if err != nil {
return types.BlobInfo{}, err
}
// === Send a copy of the original, uncompressed, stream, to a separate path if necessary.
var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so.
if getOriginalLayerCopyWriter != nil {
stream.reader = io.TeeReader(stream.reader, getOriginalLayerCopyWriter(detectedCompression.decompressor))
originalLayerReader = stream.reader
}
// WARNING: If you are adding new reasons to change the blob, update also the OptimizeDestinationImageAlreadyExists
// short-circuit conditions
canModifyBlob := !isConfig && ic.cannotModifyManifestReason == ""
// === Deal with layer compression/decompression if necessary
compressionStep, err := ic.blobPipelineCompressionStep(&stream, canModifyBlob, srcInfo, detectedCompression)
if err != nil {
return types.BlobInfo{}, err
}
defer compressionStep.close()
// === Encrypt the stream for valid mediatypes if ociEncryptConfig provided
if decryptionStep.decrypting && toEncrypt {
// If nothing else, we can only set uploadedInfo.CryptoOperation to a single value.
// Before relaxing this, see the original pull requests review if there are other reasons to reject this.
return types.BlobInfo{}, errors.New("Unable to support both decryption and encryption in the same copy")
}
encryptionStep, err := ic.c.blobPipelineEncryptionStep(&stream, toEncrypt, srcInfo, decryptionStep)
if err != nil {
return types.BlobInfo{}, err
}
// === Report progress using the ic.c.progress channel, if required.
if ic.c.progress != nil && ic.c.progressInterval > 0 {
progressReader := newProgressReader(
stream.reader,
ic.c.progress,
ic.c.progressInterval,
srcInfo,
)
defer progressReader.reportDone()
stream.reader = progressReader
}
// === Finally, send the layer stream to dest.
options := private.PutBlobOptions{
Cache: ic.c.blobInfoCache,
IsConfig: isConfig,
EmptyLayer: emptyLayer,
}
if !isConfig {
options.LayerIndex = &layerIndex
}
uploadedInfo, err := ic.c.dest.PutBlobWithOptions(ctx, &errorAnnotationReader{stream.reader}, stream.info, options)
if err != nil {
return types.BlobInfo{}, errors.Wrap(err, "writing blob")
}
uploadedInfo.Annotations = stream.info.Annotations
compressionStep.updateCompressionEdits(&uploadedInfo.CompressionOperation, &uploadedInfo.CompressionAlgorithm, &uploadedInfo.Annotations)
decryptionStep.updateCryptoOperation(&uploadedInfo.CryptoOperation)
if err := encryptionStep.updateCryptoOperationAndAnnotations(&uploadedInfo.CryptoOperation, &uploadedInfo.Annotations); err != nil {
return types.BlobInfo{}, err
}
// This is fairly horrible: the writer from getOriginalLayerCopyWriter wants to consume
// all of the input (to compute DiffIDs), even if dest.PutBlob does not need it.
// So, read everything from originalLayerReader, which will cause the rest to be
// sent there if we are not already at EOF.
if getOriginalLayerCopyWriter != nil {
logrus.Debugf("Consuming rest of the original blob to satisfy getOriginalLayerCopyWriter")
_, err := io.Copy(io.Discard, originalLayerReader)
if err != nil {
return types.BlobInfo{}, errors.Wrapf(err, "reading input blob %s", srcInfo.Digest)
}
}
if digestingReader.validationFailed { // Coverage: This should never happen.
return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, digest verification failed but was ignored", srcInfo.Digest)
}
if stream.info.Digest != "" && uploadedInfo.Digest != stream.info.Digest {
return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, blob with digest %s saved with digest %s", srcInfo.Digest, stream.info.Digest, uploadedInfo.Digest)
}
if digestingReader.validationSucceeded {
if err := compressionStep.recordValidatedDigestData(ic.c, uploadedInfo, srcInfo, encryptionStep, decryptionStep); err != nil {
return types.BlobInfo{}, err
}
}
return uploadedInfo, nil
}
// sourceStream encapsulates an input consumed by copyBlobFromStream, in progress of being built.
// This allows handles of individual aspects to build the copy pipeline without _too much_
// specific cooperation by the caller.
//
// We are currently very far from a generalized plug-and-play API for building/consuming the pipeline
// without specific knowledge of various aspects in copyBlobFromStream; that may come one day.
type sourceStream struct {
reader io.Reader
info types.BlobInfo // corresponding to the data available in reader.
}
// errorAnnotationReader wraps the io.Reader passed to PutBlob for annotating the error happened during read.
// These errors are reported as PutBlob errors, so we would otherwise misleadingly attribute them to the copy destination.
type errorAnnotationReader struct {
reader io.Reader
}
// Read annotates the error happened during read
func (r errorAnnotationReader) Read(b []byte) (n int, err error) {
n, err = r.reader.Read(b)
if err != io.EOF {
return n, errors.Wrapf(err, "happened during read")
}
return n, err
}

View File

@ -0,0 +1,320 @@
package copy
import (
"io"
internalblobinfocache "github.com/containers/image/v5/internal/blobinfocache"
"github.com/containers/image/v5/pkg/compression"
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/types"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// bpDetectCompressionStepData contains data that the copy pipeline needs about the “detect compression” step.
type bpDetectCompressionStepData struct {
isCompressed bool
format compressiontypes.Algorithm // Valid if isCompressed
decompressor compressiontypes.DecompressorFunc // Valid if isCompressed
srcCompressorName string // Compressor name to possibly record in the blob info cache for the source blob.
}
// blobPipelineDetectCompressionStep updates *stream to detect its current compression format.
// srcInfo is only used for error messages.
// Returns data for other steps.
func blobPipelineDetectCompressionStep(stream *sourceStream, srcInfo types.BlobInfo) (bpDetectCompressionStepData, error) {
// This requires us to “peek ahead” into the stream to read the initial part, which requires us to chain through another io.Reader returned by DetectCompression.
format, decompressor, reader, err := compression.DetectCompressionFormat(stream.reader) // We could skip this in some cases, but let's keep the code path uniform
if err != nil {
return bpDetectCompressionStepData{}, errors.Wrapf(err, "reading blob %s", srcInfo.Digest)
}
stream.reader = reader
res := bpDetectCompressionStepData{
isCompressed: decompressor != nil,
format: format,
decompressor: decompressor,
}
if res.isCompressed {
res.srcCompressorName = format.Name()
} else {
res.srcCompressorName = internalblobinfocache.Uncompressed
}
if expectedFormat, known := expectedCompressionFormats[stream.info.MediaType]; known && res.isCompressed && format.Name() != expectedFormat.Name() {
logrus.Debugf("blob %s with type %s should be compressed with %s, but compressor appears to be %s", srcInfo.Digest.String(), srcInfo.MediaType, expectedFormat.Name(), format.Name())
}
return res, nil
}
// bpCompressionStepData contains data that the copy pipeline needs about the compression step.
type bpCompressionStepData struct {
operation types.LayerCompression // Operation to use for updating the blob metadata.
uploadedAlgorithm *compressiontypes.Algorithm // An algorithm parameter for the compressionOperation edits.
uploadedAnnotations map[string]string // Annotations that should be set on the uploaded blob. WARNING: This is only set after the srcStream.reader is fully consumed.
srcCompressorName string // Compressor name to record in the blob info cache for the source blob.
uploadedCompressorName string // Compressor name to record in the blob info cache for the uploaded blob.
closers []io.Closer // Objects to close after the upload is done, if any.
}
// blobPipelineCompressionStep updates *stream to compress and/or decompress it.
// srcInfo is primarily used for error messages.
// Returns data for other steps; the caller should eventually call updateCompressionEdits and perhaps recordValidatedBlobData,
// and must eventually call close.
func (ic *imageCopier) blobPipelineCompressionStep(stream *sourceStream, canModifyBlob bool, srcInfo types.BlobInfo,
detected bpDetectCompressionStepData) (*bpCompressionStepData, error) {
// WARNING: If you are adding new reasons to change the blob, update also the OptimizeDestinationImageAlreadyExists
// short-circuit conditions
layerCompressionChangeSupported := ic.src.CanChangeLayerCompression(stream.info.MediaType)
if !layerCompressionChangeSupported {
logrus.Debugf("Compression change for blob %s (%q) not supported", srcInfo.Digest, stream.info.MediaType)
}
if canModifyBlob && layerCompressionChangeSupported {
for _, fn := range []func(*sourceStream, bpDetectCompressionStepData) (*bpCompressionStepData, error){
ic.bpcPreserveEncrypted,
ic.bpcCompressUncompressed,
ic.bpcRecompressCompressed,
ic.bpcDecompressCompressed,
} {
res, err := fn(stream, detected)
if err != nil {
return nil, err
}
if res != nil {
return res, nil
}
}
}
return ic.bpcPreserveOriginal(stream, detected, layerCompressionChangeSupported), nil
}
// bpcPreserveEncrypted checks if the input is encrypted, and returns a *bpCompressionStepData if so.
func (ic *imageCopier) bpcPreserveEncrypted(stream *sourceStream, _ bpDetectCompressionStepData) (*bpCompressionStepData, error) {
if isOciEncrypted(stream.info.MediaType) {
logrus.Debugf("Using original blob without modification for encrypted blob")
// PreserveOriginal due to any compression not being able to be done on an encrypted blob unless decrypted
return &bpCompressionStepData{
operation: types.PreserveOriginal,
uploadedAlgorithm: nil,
srcCompressorName: internalblobinfocache.UnknownCompression,
uploadedCompressorName: internalblobinfocache.UnknownCompression,
}, nil
}
return nil, nil
}
// bpcCompressUncompressed checks if we should be compressing an uncompressed input, and returns a *bpCompressionStepData if so.
func (ic *imageCopier) bpcCompressUncompressed(stream *sourceStream, detected bpDetectCompressionStepData) (*bpCompressionStepData, error) {
if ic.c.dest.DesiredLayerCompression() == types.Compress && !detected.isCompressed {
logrus.Debugf("Compressing blob on the fly")
var uploadedAlgorithm *compressiontypes.Algorithm
if ic.c.compressionFormat != nil {
uploadedAlgorithm = ic.c.compressionFormat
} else {
uploadedAlgorithm = defaultCompressionFormat
}
reader, annotations := ic.c.compressedStream(stream.reader, *uploadedAlgorithm)
// Note: reader must be closed on all return paths.
stream.reader = reader
stream.info = types.BlobInfo{ // FIXME? Should we preserve more data in src.info?
Digest: "",
Size: -1,
}
return &bpCompressionStepData{
operation: types.Compress,
uploadedAlgorithm: uploadedAlgorithm,
uploadedAnnotations: annotations,
srcCompressorName: detected.srcCompressorName,
uploadedCompressorName: uploadedAlgorithm.Name(),
closers: []io.Closer{reader},
}, nil
}
return nil, nil
}
// bpcRecompressCompressed checks if we should be recompressing a compressed input to another format, and returns a *bpCompressionStepData if so.
func (ic *imageCopier) bpcRecompressCompressed(stream *sourceStream, detected bpDetectCompressionStepData) (*bpCompressionStepData, error) {
if ic.c.dest.DesiredLayerCompression() == types.Compress && detected.isCompressed &&
ic.c.compressionFormat != nil && ic.c.compressionFormat.Name() != detected.format.Name() {
// When the blob is compressed, but the desired format is different, it first needs to be decompressed and finally
// re-compressed using the desired format.
logrus.Debugf("Blob will be converted")
decompressed, err := detected.decompressor(stream.reader)
if err != nil {
return nil, err
}
succeeded := false
defer func() {
if !succeeded {
decompressed.Close()
}
}()
recompressed, annotations := ic.c.compressedStream(decompressed, *ic.c.compressionFormat)
// Note: recompressed must be closed on all return paths.
stream.reader = recompressed
stream.info = types.BlobInfo{ // FIXME? Should we preserve more data in src.info?
Digest: "",
Size: -1,
}
succeeded = true
return &bpCompressionStepData{
operation: types.PreserveOriginal,
uploadedAlgorithm: ic.c.compressionFormat,
uploadedAnnotations: annotations,
srcCompressorName: detected.srcCompressorName,
uploadedCompressorName: ic.c.compressionFormat.Name(),
closers: []io.Closer{decompressed, recompressed},
}, nil
}
return nil, nil
}
// bpcDecompressCompressed checks if we should be decompressing a compressed input, and returns a *bpCompressionStepData if so.
func (ic *imageCopier) bpcDecompressCompressed(stream *sourceStream, detected bpDetectCompressionStepData) (*bpCompressionStepData, error) {
if ic.c.dest.DesiredLayerCompression() == types.Decompress && detected.isCompressed {
logrus.Debugf("Blob will be decompressed")
s, err := detected.decompressor(stream.reader)
if err != nil {
return nil, err
}
// Note: s must be closed on all return paths.
stream.reader = s
stream.info = types.BlobInfo{ // FIXME? Should we preserve more data in src.info?
Digest: "",
Size: -1,
}
return &bpCompressionStepData{
operation: types.Decompress,
uploadedAlgorithm: nil,
srcCompressorName: detected.srcCompressorName,
uploadedCompressorName: internalblobinfocache.Uncompressed,
closers: []io.Closer{s},
}, nil
}
return nil, nil
}
// bpcPreserveOriginal returns a *bpCompressionStepData for not changing the original blob.
func (ic *imageCopier) bpcPreserveOriginal(stream *sourceStream, detected bpDetectCompressionStepData,
layerCompressionChangeSupported bool) *bpCompressionStepData {
logrus.Debugf("Using original blob without modification")
// Remember if the original blob was compressed, and if so how, so that if
// LayerInfosForCopy() returned something that differs from what was in the
// source's manifest, and UpdatedImage() needs to call UpdateLayerInfos(),
// it will be able to correctly derive the MediaType for the copied blob.
//
// But dont touch blobs in objects where we cant change compression,
// so that src.UpdatedImage() doesnt fail; assume that for such blobs
// LayerInfosForCopy() should not be making any changes in the first place.
var algorithm *compressiontypes.Algorithm
if layerCompressionChangeSupported && detected.isCompressed {
algorithm = &detected.format
} else {
algorithm = nil
}
return &bpCompressionStepData{
operation: types.PreserveOriginal,
uploadedAlgorithm: algorithm,
srcCompressorName: detected.srcCompressorName,
uploadedCompressorName: detected.srcCompressorName,
}
}
// updateCompressionEdits sets *operation, *algorithm and updates *annotations, if necessary.
func (d *bpCompressionStepData) updateCompressionEdits(operation *types.LayerCompression, algorithm **compressiontypes.Algorithm, annotations *map[string]string) {
*operation = d.operation
// If we can modify the layer's blob, set the desired algorithm for it to be set in the manifest.
*algorithm = d.uploadedAlgorithm
if *annotations == nil {
*annotations = map[string]string{}
}
for k, v := range d.uploadedAnnotations {
(*annotations)[k] = v
}
}
// recordValidatedBlobData updates b.blobInfoCache with data about the created uploadedInfo adnd the original srcInfo.
// This must ONLY be called if all data has been validated by OUR code, and is not comming from third parties.
func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInfo types.BlobInfo, srcInfo types.BlobInfo,
encryptionStep *bpEncryptionStepData, decryptionStep *bpDecryptionStepData) error {
// Dont record any associations that involve encrypted data. This is a bit crude,
// some blob substitutions (replacing pulls of encrypted data with local reuse of known decryption outcomes)
// might be safe, but its not trivially obvious, so lets be conservative for now.
// This crude approach also means we dont need to record whether a blob is encrypted
// in the blob info cache (which would probably be necessary for any more complex logic),
// and the simplicity is attractive.
if !encryptionStep.encrypting && !decryptionStep.decrypting {
// If d.operation != types.PreserveOriginal, we now have two reliable digest values:
// srcinfo.Digest describes the pre-d.operation input, verified by digestingReader
// uploadedInfo.Digest describes the post-d.operation output, computed by PutBlob
// (because stream.info.Digest == "", this must have been computed afresh).
switch d.operation {
case types.PreserveOriginal:
break // Do nothing, we have only one digest and we might not have even verified it.
case types.Compress:
c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest)
case types.Decompress:
c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest)
default:
return errors.Errorf("Internal error: Unexpected d.operation value %#v", d.operation)
}
}
if d.uploadedCompressorName != "" && d.uploadedCompressorName != internalblobinfocache.UnknownCompression {
c.blobInfoCache.RecordDigestCompressorName(uploadedInfo.Digest, d.uploadedCompressorName)
}
if srcInfo.Digest != "" && d.srcCompressorName != "" && d.srcCompressorName != internalblobinfocache.UnknownCompression {
c.blobInfoCache.RecordDigestCompressorName(srcInfo.Digest, d.srcCompressorName)
}
return nil
}
// close closes objects that carry state throughout the compression/decompression operation.
func (d *bpCompressionStepData) close() {
for _, c := range d.closers {
c.Close()
}
}
// doCompression reads all input from src and writes its compressed equivalent to dest.
func doCompression(dest io.Writer, src io.Reader, metadata map[string]string, compressionFormat compressiontypes.Algorithm, compressionLevel *int) error {
compressor, err := compression.CompressStreamWithMetadata(dest, metadata, compressionFormat, compressionLevel)
if err != nil {
return err
}
buf := make([]byte, compressionBufferSize)
_, err = io.CopyBuffer(compressor, src, buf) // Sets err to nil, i.e. causes dest.Close()
if err != nil {
compressor.Close()
return err
}
return compressor.Close()
}
// compressGoroutine reads all input from src and writes its compressed equivalent to dest.
func (c *copier) compressGoroutine(dest *io.PipeWriter, src io.Reader, metadata map[string]string, compressionFormat compressiontypes.Algorithm) {
err := errors.New("Internal error: unexpected panic in compressGoroutine")
defer func() { // Note that this is not the same as {defer dest.CloseWithError(err)}; we need err to be evaluated lazily.
_ = dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close(), always returns nil
}()
err = doCompression(dest, src, metadata, compressionFormat, c.compressionLevel)
}
// compressedStream returns a stream the input reader compressed using format, and a metadata map.
// The caller must close the returned reader.
// AFTER the stream is consumed, metadata will be updated with annotations to use on the data.
func (c *copier) compressedStream(reader io.Reader, algorithm compressiontypes.Algorithm) (io.ReadCloser, map[string]string) {
pipeReader, pipeWriter := io.Pipe()
annotations := map[string]string{}
// If this fails while writing data, it will do pipeWriter.CloseWithError(); if it fails otherwise,
// e.g. because we have exited and due to pipeReader.Close() above further writing to the pipe has failed,
// we dont care.
go c.compressGoroutine(pipeWriter, reader, annotations, algorithm) // Closes pipeWriter
return pipeReader, annotations
}

View File

@ -12,8 +12,8 @@ import (
"time"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/image"
internalblobinfocache "github.com/containers/image/v5/internal/blobinfocache"
"github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/internal/imagedestination"
"github.com/containers/image/v5/internal/imagesource"
"github.com/containers/image/v5/internal/pkg/platform"
@ -25,7 +25,6 @@ import (
"github.com/containers/image/v5/signature"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
"github.com/containers/ocicrypt"
encconfig "github.com/containers/ocicrypt/config"
digest "github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
@ -82,7 +81,7 @@ type copier struct {
type imageCopier struct {
c *copier
manifestUpdates *types.ManifestUpdateOptions
src types.Image
src *image.SourcedImage
diffIDsAreNeeded bool
cannotModifyManifestReason string // The reason the manifest cannot be modified, or an empty string if it can
canSubstituteBlobs bool
@ -349,13 +348,8 @@ func supportsMultipleImages(dest types.ImageDestination) bool {
// compareImageDestinationManifestEqual compares the `src` and `dest` image manifests (reading the manifest from the
// (possibly remote) destination). Returning true and the destination's manifest, type and digest if they compare equal.
func compareImageDestinationManifestEqual(ctx context.Context, options *Options, src types.Image, targetInstance *digest.Digest, dest types.ImageDestination) (bool, []byte, string, digest.Digest, error) {
srcManifest, _, err := src.Manifest(ctx)
if err != nil {
return false, nil, "", "", errors.Wrapf(err, "reading manifest from image")
}
srcManifestDigest, err := manifest.Digest(srcManifest)
func compareImageDestinationManifestEqual(ctx context.Context, options *Options, src *image.SourcedImage, targetInstance *digest.Digest, dest types.ImageDestination) (bool, []byte, string, digest.Digest, error) {
srcManifestDigest, err := manifest.Digest(src.ManifestBlob)
if err != nil {
return false, nil, "", "", errors.Wrapf(err, "calculating manifest digest")
}
@ -620,11 +614,7 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
if named := c.dest.Reference().DockerReference(); named != nil {
if digested, ok := named.(reference.Digested); ok {
destIsDigestedReference = true
sourceManifest, _, err := src.Manifest(ctx)
if err != nil {
return nil, "", "", errors.Wrapf(err, "reading manifest from source image")
}
matches, err := manifest.MatchesDigest(sourceManifest, digested.Digest())
matches, err := manifest.MatchesDigest(src.ManifestBlob, digested.Digest())
if err != nil {
return nil, "", "", errors.Wrapf(err, "computing digest of source image's manifest")
}
@ -688,12 +678,14 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
cannotModifyManifestReason: cannotModifyManifestReason,
ociEncryptLayers: options.OciEncryptLayers,
}
// Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it.
// This may be too conservative, but for now, better safe than sorry, _especially_ on the SignBy path:
// The signature makes the content non-repudiable, so it very much matters that the signature is made over exactly what the user intended.
// We do intend the RecordDigestUncompressedPair calls to only work with reliable data, but at least theres a risk
// that the compressed version coming from a third party may be designed to attack some other decompressor implementation,
// and we would reuse and sign it.
// Decide whether we can substitute blobs with semantic equivalents:
// - Dont do that if we cant modify the manifest at all
// - Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it.
// This may be too conservative, but for now, better safe than sorry, _especially_ on the SignBy path:
// The signature makes the content non-repudiable, so it very much matters that the signature is made over exactly what the user intended.
// We do intend the RecordDigestUncompressedPair calls to only work with reliable data, but at least theres a risk
// that the compressed version coming from a third party may be designed to attack some other decompressor implementation,
// and we would reuse and sign it.
ic.canSubstituteBlobs = ic.cannotModifyManifestReason == "" && options.SignBy == ""
if err := ic.updateEmbeddedDockerReference(); err != nil {
@ -702,12 +694,23 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
destRequiresOciEncryption := (isEncrypted(src) && ic.c.ociDecryptConfig != nil) || options.OciEncryptLayers != nil
// We compute preferredManifestMIMEType only to show it in error messages.
// Without having to add this context in an error message, we would be happy enough to know only that no conversion is needed.
preferredManifestMIMEType, otherManifestMIMETypeCandidates, err := ic.determineManifestConversion(ctx, c.dest.SupportedManifestMIMETypes(), options.ForceManifestMIMEType, destRequiresOciEncryption)
manifestConversionPlan, err := determineManifestConversion(determineManifestConversionInputs{
srcMIMEType: ic.src.ManifestMIMEType,
destSupportedManifestMIMETypes: ic.c.dest.SupportedManifestMIMETypes(),
forceManifestMIMEType: options.ForceManifestMIMEType,
requiresOCIEncryption: destRequiresOciEncryption,
cannotModifyManifestReason: ic.cannotModifyManifestReason,
})
if err != nil {
return nil, "", "", err
}
// We set up this part of ic.manifestUpdates quite early, not just around the
// code that calls copyUpdatedConfigAndManifest, so that other parts of the copy code
// (e.g. the UpdatedImageNeedsLayerDiffIDs check just below) can make decisions based
// on the expected destination format.
if manifestConversionPlan.preferredMIMETypeNeedsConversion {
ic.manifestUpdates.ManifestMIMEType = manifestConversionPlan.preferredMIMEType
}
// If src.UpdatedImageNeedsLayerDiffIDs(ic.manifestUpdates) will be true, it needs to be true by the time we get here.
ic.diffIDsAreNeeded = src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates)
@ -742,22 +745,22 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
// So, try the preferred manifest MIME type with possibly-updated blob digests, media types, and sizes if
// we're altering how they're compressed. If the process succeeds, fine…
manifestBytes, retManifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance)
retManifestType = preferredManifestMIMEType
retManifestType = manifestConversionPlan.preferredMIMEType
if err != nil {
logrus.Debugf("Writing manifest using preferred type %s failed: %v", preferredManifestMIMEType, err)
logrus.Debugf("Writing manifest using preferred type %s failed: %v", manifestConversionPlan.preferredMIMEType, err)
// … if it fails, and the failure is either because the manifest is rejected by the registry, or
// because we failed to create a manifest of the specified type because the specific manifest type
// doesn't support the type of compression we're trying to use (e.g. docker v2s2 and zstd), we may
// have other options available that could still succeed.
_, isManifestRejected := errors.Cause(err).(types.ManifestTypeRejectedError)
_, isCompressionIncompatible := errors.Cause(err).(manifest.ManifestLayerCompressionIncompatibilityError)
if (!isManifestRejected && !isCompressionIncompatible) || len(otherManifestMIMETypeCandidates) == 0 {
if (!isManifestRejected && !isCompressionIncompatible) || len(manifestConversionPlan.otherMIMETypeCandidates) == 0 {
// We dont have other options.
// In principle the code below would handle this as well, but the resulting error message is fairly ugly.
// Dont bother the user with MIME types if we have no choice.
return nil, "", "", err
}
// If the original MIME type is acceptable, determineManifestConversion always uses it as preferredManifestMIMEType.
// If the original MIME type is acceptable, determineManifestConversion always uses it as manifestConversionPlan.preferredMIMEType.
// So if we are here, we will definitely be trying to convert the manifest.
// With ic.cannotModifyManifestReason != "", that would just be a string of repeated failures for the same reason,
// so lets bail out early and with a better error message.
@ -766,8 +769,8 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
}
// errs is a list of errors when trying various manifest types. Also serves as an "upload succeeded" flag when set to nil.
errs := []string{fmt.Sprintf("%s(%v)", preferredManifestMIMEType, err)}
for _, manifestMIMEType := range otherManifestMIMETypeCandidates {
errs := []string{fmt.Sprintf("%s(%v)", manifestConversionPlan.preferredMIMEType, err)}
for _, manifestMIMEType := range manifestConversionPlan.otherMIMETypeCandidates {
logrus.Debugf("Trying to use manifest type %s…", manifestMIMEType)
ic.manifestUpdates.ManifestMIMEType = manifestMIMEType
attemptedManifest, attemptedManifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance)
@ -908,11 +911,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
// The manifest is used to extract the information whether a given
// layer is empty.
manifestBlob, manifestType, err := ic.src.Manifest(ctx)
if err != nil {
return err
}
man, err := manifest.FromBlob(manifestBlob, manifestType)
man, err := manifest.FromBlob(ic.src.ManifestBlob, ic.src.ManifestMIMEType)
if err != nil {
return err
}
@ -1022,7 +1021,7 @@ func layerDigestsDiffer(a, b []types.BlobInfo) bool {
// stores the resulting config and manifest to the destination, and returns the stored manifest
// and its digest.
func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, digest.Digest, error) {
pendingImage := ic.src
var pendingImage types.Image = ic.src
if !ic.noPendingManifestUpdates() {
if ic.cannotModifyManifestReason != "" {
return nil, "", errors.Errorf("Internal error: copy needs an updated manifest but that was known to be forbidden: %q", ic.cannotModifyManifestReason)
@ -1047,7 +1046,7 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanc
return nil, "", errors.Wrap(err, "reading manifest")
}
if err := ic.c.copyConfig(ctx, pendingImage); err != nil {
if err := ic.copyConfig(ctx, pendingImage); err != nil {
return nil, "", err
}
@ -1067,19 +1066,19 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanc
}
// copyConfig copies config.json, if any, from src to dest.
func (c *copier) copyConfig(ctx context.Context, src types.Image) error {
func (ic *imageCopier) copyConfig(ctx context.Context, src types.Image) error {
srcInfo := src.ConfigInfo()
if srcInfo.Digest != "" {
if err := c.concurrentBlobCopiesSemaphore.Acquire(ctx, 1); err != nil {
if err := ic.c.concurrentBlobCopiesSemaphore.Acquire(ctx, 1); err != nil {
// This can only fail with ctx.Err(), so no need to blame acquiring the semaphore.
return fmt.Errorf("copying config: %w", err)
}
defer c.concurrentBlobCopiesSemaphore.Release(1)
defer ic.c.concurrentBlobCopiesSemaphore.Release(1)
destInfo, err := func() (types.BlobInfo, error) { // A scope for defer
progressPool := c.newProgressPool()
progressPool := ic.c.newProgressPool()
defer progressPool.Wait()
bar := c.createProgressBar(progressPool, false, srcInfo, "config", "done")
bar := ic.c.createProgressBar(progressPool, false, srcInfo, "config", "done")
defer bar.Abort(false)
configBlob, err := src.ConfigBlob(ctx)
@ -1087,7 +1086,7 @@ func (c *copier) copyConfig(ctx context.Context, src types.Image) error {
return types.BlobInfo{}, errors.Wrapf(err, "reading config blob %s", srcInfo.Digest)
}
destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true, false, bar, -1, false)
destInfo, err := ic.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, true, false, bar, -1, false)
if err != nil {
return types.BlobInfo{}, err
}
@ -1146,6 +1145,10 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
// Dont read the layer from the source if we already have the blob, and optimizations are acceptable.
if canAvoidProcessingCompleteLayer {
canChangeLayerCompression := ic.src.CanChangeLayerCompression(srcInfo.MediaType)
logrus.Debugf("Checking if we can reuse blob %s: general substitution = %v, compression for MIME type %q = %v",
srcInfo.Digest, ic.canSubstituteBlobs, srcInfo.MediaType, canChangeLayerCompression)
canSubstitute := ic.canSubstituteBlobs && ic.src.CanChangeLayerCompression(srcInfo.MediaType)
// TODO: at this point we don't know whether or not a blob we end up reusing is compressed using an algorithm
// that is acceptable for use on layers in the manifest that we'll be writing later, so if we end up reusing
// a blob that's compressed with e.g. zstd, but we're only allowed to write a v2s2 manifest, this will cause
@ -1154,7 +1157,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
// the ImageDestination interface lets us pass in.
reused, blobInfo, err := ic.c.dest.TryReusingBlobWithOptions(ctx, srcInfo, private.TryReusingBlobOptions{
Cache: ic.c.blobInfoCache,
CanSubstitute: ic.canSubstituteBlobs,
CanSubstitute: canSubstitute,
EmptyLayer: emptyLayer,
LayerIndex: &layerIndex,
SrcRef: srcRef,
@ -1303,7 +1306,7 @@ func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Rea
}
}
blobInfo, err := ic.c.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, ic.cannotModifyManifestReason == "", false, toEncrypt, bar, layerIndex, emptyLayer) // Sets err to nil on success
blobInfo, err := ic.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, false, toEncrypt, bar, layerIndex, emptyLayer) // Sets err to nil on success
return blobInfo, diffIDChan, err
// We need the defer … pipeWriter.CloseWithError() to happen HERE so that the caller can block on reading from diffIDChan
}
@ -1333,350 +1336,3 @@ func computeDiffID(stream io.Reader, decompressor compressiontypes.DecompressorF
return digest.Canonical.FromReader(stream)
}
// errorAnnotationReader wraps the io.Reader passed to PutBlob for annotating the error happened during read.
// These errors are reported as PutBlob errors, so we would otherwise misleadingly attribute them to the copy destination.
type errorAnnotationReader struct {
reader io.Reader
}
// Read annotates the error happened during read
func (r errorAnnotationReader) Read(b []byte) (n int, err error) {
n, err = r.reader.Read(b)
if err != io.EOF {
return n, errors.Wrapf(err, "happened during read")
}
return n, err
}
// copyBlobFromStream copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcStream to dest,
// perhaps sending a copy to an io.Writer if getOriginalLayerCopyWriter != nil,
// perhaps (de/re/)compressing it if canModifyBlob,
// and returns a complete blobInfo of the copied blob.
func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo,
getOriginalLayerCopyWriter func(decompressor compressiontypes.DecompressorFunc) io.Writer,
canModifyBlob bool, isConfig bool, toEncrypt bool, bar *progressBar, layerIndex int, emptyLayer bool) (types.BlobInfo, error) {
if isConfig { // This is guaranteed by the caller, but set it here to be explicit.
canModifyBlob = false
}
// The copying happens through a pipeline of connected io.Readers.
// === Input: srcStream
// === Process input through digestingReader to validate against the expected digest.
// Be paranoid; in case PutBlob somehow managed to ignore an error from digestingReader,
// use a separate validation failure indicator.
// Note that for this check we don't use the stronger "validationSucceeded" indicator, because
// dest.PutBlob may detect that the layer already exists, in which case we don't
// read stream to the end, and validation does not happen.
digestingReader, err := newDigestingReader(srcStream, srcInfo.Digest)
if err != nil {
return types.BlobInfo{}, errors.Wrapf(err, "preparing to verify blob %s", srcInfo.Digest)
}
var destStream io.Reader = digestingReader
// === Update progress bars
destStream = bar.ProxyReader(destStream)
// === Decrypt the stream, if required.
var decrypted bool
if isOciEncrypted(srcInfo.MediaType) && c.ociDecryptConfig != nil {
newDesc := imgspecv1.Descriptor{
Annotations: srcInfo.Annotations,
}
var d digest.Digest
destStream, d, err = ocicrypt.DecryptLayer(c.ociDecryptConfig, destStream, newDesc, false)
if err != nil {
return types.BlobInfo{}, errors.Wrapf(err, "decrypting layer %s", srcInfo.Digest)
}
srcInfo.Digest = d
srcInfo.Size = -1
for k := range srcInfo.Annotations {
if strings.HasPrefix(k, "org.opencontainers.image.enc") {
delete(srcInfo.Annotations, k)
}
}
decrypted = true
}
// === Detect compression of the input stream.
// This requires us to “peek ahead” into the stream to read the initial part, which requires us to chain through another io.Reader returned by DetectCompression.
compressionFormat, decompressor, destStream, err := compression.DetectCompressionFormat(destStream) // We could skip this in some cases, but let's keep the code path uniform
if err != nil {
return types.BlobInfo{}, errors.Wrapf(err, "reading blob %s", srcInfo.Digest)
}
isCompressed := decompressor != nil
if expectedCompressionFormat, known := expectedCompressionFormats[srcInfo.MediaType]; known && isCompressed && compressionFormat.Name() != expectedCompressionFormat.Name() {
logrus.Debugf("blob %s with type %s should be compressed with %s, but compressor appears to be %s", srcInfo.Digest.String(), srcInfo.MediaType, expectedCompressionFormat.Name(), compressionFormat.Name())
}
// === Send a copy of the original, uncompressed, stream, to a separate path if necessary.
var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so.
if getOriginalLayerCopyWriter != nil {
destStream = io.TeeReader(destStream, getOriginalLayerCopyWriter(decompressor))
originalLayerReader = destStream
}
compressionMetadata := map[string]string{}
// === Deal with layer compression/decompression if necessary
// WARNING: If you are adding new reasons to change the blob, update also the OptimizeDestinationImageAlreadyExists
// short-circuit conditions
var inputInfo types.BlobInfo
var compressionOperation types.LayerCompression
var uploadCompressionFormat *compressiontypes.Algorithm
srcCompressorName := internalblobinfocache.Uncompressed
if isCompressed {
srcCompressorName = compressionFormat.Name()
}
var uploadCompressorName string
if canModifyBlob && isOciEncrypted(srcInfo.MediaType) {
// PreserveOriginal due to any compression not being able to be done on an encrypted blob unless decrypted
logrus.Debugf("Using original blob without modification for encrypted blob")
compressionOperation = types.PreserveOriginal
inputInfo = srcInfo
srcCompressorName = internalblobinfocache.UnknownCompression
uploadCompressionFormat = nil
uploadCompressorName = internalblobinfocache.UnknownCompression
} else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && !isCompressed {
logrus.Debugf("Compressing blob on the fly")
compressionOperation = types.Compress
pipeReader, pipeWriter := io.Pipe()
defer pipeReader.Close()
if c.compressionFormat != nil {
uploadCompressionFormat = c.compressionFormat
} else {
uploadCompressionFormat = defaultCompressionFormat
}
// If this fails while writing data, it will do pipeWriter.CloseWithError(); if it fails otherwise,
// e.g. because we have exited and due to pipeReader.Close() above further writing to the pipe has failed,
// we dont care.
go c.compressGoroutine(pipeWriter, destStream, compressionMetadata, *uploadCompressionFormat) // Closes pipeWriter
destStream = pipeReader
inputInfo.Digest = ""
inputInfo.Size = -1
uploadCompressorName = uploadCompressionFormat.Name()
} else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && isCompressed &&
c.compressionFormat != nil && c.compressionFormat.Name() != compressionFormat.Name() {
// When the blob is compressed, but the desired format is different, it first needs to be decompressed and finally
// re-compressed using the desired format.
logrus.Debugf("Blob will be converted")
compressionOperation = types.PreserveOriginal
s, err := decompressor(destStream)
if err != nil {
return types.BlobInfo{}, err
}
defer s.Close()
pipeReader, pipeWriter := io.Pipe()
defer pipeReader.Close()
uploadCompressionFormat = c.compressionFormat
go c.compressGoroutine(pipeWriter, s, compressionMetadata, *uploadCompressionFormat) // Closes pipeWriter
destStream = pipeReader
inputInfo.Digest = ""
inputInfo.Size = -1
uploadCompressorName = uploadCompressionFormat.Name()
} else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Decompress && isCompressed {
logrus.Debugf("Blob will be decompressed")
compressionOperation = types.Decompress
s, err := decompressor(destStream)
if err != nil {
return types.BlobInfo{}, err
}
defer s.Close()
destStream = s
inputInfo.Digest = ""
inputInfo.Size = -1
uploadCompressionFormat = nil
uploadCompressorName = internalblobinfocache.Uncompressed
} else {
// PreserveOriginal might also need to recompress the original blob if the desired compression format is different.
logrus.Debugf("Using original blob without modification")
compressionOperation = types.PreserveOriginal
inputInfo = srcInfo
// Remember if the original blob was compressed, and if so how, so that if
// LayerInfosForCopy() returned something that differs from what was in the
// source's manifest, and UpdatedImage() needs to call UpdateLayerInfos(),
// it will be able to correctly derive the MediaType for the copied blob.
if isCompressed {
uploadCompressionFormat = &compressionFormat
} else {
uploadCompressionFormat = nil
}
uploadCompressorName = srcCompressorName
}
// === Encrypt the stream for valid mediatypes if ociEncryptConfig provided
var (
encrypted bool
finalizer ocicrypt.EncryptLayerFinalizer
)
if toEncrypt {
if decrypted {
return types.BlobInfo{}, errors.New("Unable to support both decryption and encryption in the same copy")
}
if !isOciEncrypted(srcInfo.MediaType) && c.ociEncryptConfig != nil {
var annotations map[string]string
if !decrypted {
annotations = srcInfo.Annotations
}
desc := imgspecv1.Descriptor{
MediaType: srcInfo.MediaType,
Digest: srcInfo.Digest,
Size: srcInfo.Size,
Annotations: annotations,
}
s, fin, err := ocicrypt.EncryptLayer(c.ociEncryptConfig, destStream, desc)
if err != nil {
return types.BlobInfo{}, errors.Wrapf(err, "encrypting blob %s", srcInfo.Digest)
}
destStream = s
finalizer = fin
inputInfo.Digest = ""
inputInfo.Size = -1
encrypted = true
}
}
// === Report progress using the c.progress channel, if required.
if c.progress != nil && c.progressInterval > 0 {
progressReader := newProgressReader(
destStream,
c.progress,
c.progressInterval,
srcInfo,
)
defer progressReader.reportDone()
destStream = progressReader
}
// === Finally, send the layer stream to dest.
options := private.PutBlobOptions{
Cache: c.blobInfoCache,
IsConfig: isConfig,
EmptyLayer: emptyLayer,
}
if !isConfig {
options.LayerIndex = &layerIndex
}
uploadedInfo, err := c.dest.PutBlobWithOptions(ctx, &errorAnnotationReader{destStream}, inputInfo, options)
if err != nil {
return types.BlobInfo{}, errors.Wrap(err, "writing blob")
}
uploadedInfo.Annotations = srcInfo.Annotations
uploadedInfo.CompressionOperation = compressionOperation
// If we can modify the layer's blob, set the desired algorithm for it to be set in the manifest.
uploadedInfo.CompressionAlgorithm = uploadCompressionFormat
if decrypted {
uploadedInfo.CryptoOperation = types.Decrypt
} else if encrypted {
encryptAnnotations, err := finalizer()
if err != nil {
return types.BlobInfo{}, errors.Wrap(err, "Unable to finalize encryption")
}
uploadedInfo.CryptoOperation = types.Encrypt
if uploadedInfo.Annotations == nil {
uploadedInfo.Annotations = map[string]string{}
}
for k, v := range encryptAnnotations {
uploadedInfo.Annotations[k] = v
}
}
// This is fairly horrible: the writer from getOriginalLayerCopyWriter wants to consume
// all of the input (to compute DiffIDs), even if dest.PutBlob does not need it.
// So, read everything from originalLayerReader, which will cause the rest to be
// sent there if we are not already at EOF.
if getOriginalLayerCopyWriter != nil {
logrus.Debugf("Consuming rest of the original blob to satisfy getOriginalLayerCopyWriter")
_, err := io.Copy(io.Discard, originalLayerReader)
if err != nil {
return types.BlobInfo{}, errors.Wrapf(err, "reading input blob %s", srcInfo.Digest)
}
}
if digestingReader.validationFailed { // Coverage: This should never happen.
return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, digest verification failed but was ignored", srcInfo.Digest)
}
if inputInfo.Digest != "" && uploadedInfo.Digest != inputInfo.Digest {
return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, blob with digest %s saved with digest %s", srcInfo.Digest, inputInfo.Digest, uploadedInfo.Digest)
}
if digestingReader.validationSucceeded {
// Dont record any associations that involve encrypted data. This is a bit crude,
// some blob substitutions (replacing pulls of encrypted data with local reuse of known decryption outcomes)
// might be safe, but its not trivially obvious, so lets be conservative for now.
// This crude approach also means we dont need to record whether a blob is encrypted
// in the blob info cache (which would probably be necessary for any more complex logic),
// and the simplicity is attractive.
if !encrypted && !decrypted {
// If compressionOperation != types.PreserveOriginal, we now have two reliable digest values:
// srcinfo.Digest describes the pre-compressionOperation input, verified by digestingReader
// uploadedInfo.Digest describes the post-compressionOperation output, computed by PutBlob
// (because inputInfo.Digest == "", this must have been computed afresh).
switch compressionOperation {
case types.PreserveOriginal:
break // Do nothing, we have only one digest and we might not have even verified it.
case types.Compress:
c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest)
case types.Decompress:
c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest)
default:
return types.BlobInfo{}, errors.Errorf("Internal error: Unexpected compressionOperation value %#v", compressionOperation)
}
}
if uploadCompressorName != "" && uploadCompressorName != internalblobinfocache.UnknownCompression {
c.blobInfoCache.RecordDigestCompressorName(uploadedInfo.Digest, uploadCompressorName)
}
if srcInfo.Digest != "" && srcCompressorName != "" && srcCompressorName != internalblobinfocache.UnknownCompression {
c.blobInfoCache.RecordDigestCompressorName(srcInfo.Digest, srcCompressorName)
}
}
// Copy all the metadata generated by the compressor into the annotations.
if uploadedInfo.Annotations == nil {
uploadedInfo.Annotations = map[string]string{}
}
for k, v := range compressionMetadata {
uploadedInfo.Annotations[k] = v
}
return uploadedInfo, nil
}
// doCompression reads all input from src and writes its compressed equivalent to dest.
func doCompression(dest io.Writer, src io.Reader, metadata map[string]string, compressionFormat compressiontypes.Algorithm, compressionLevel *int) error {
compressor, err := compression.CompressStreamWithMetadata(dest, metadata, compressionFormat, compressionLevel)
if err != nil {
return err
}
buf := make([]byte, compressionBufferSize)
_, err = io.CopyBuffer(compressor, src, buf) // Sets err to nil, i.e. causes dest.Close()
if err != nil {
compressor.Close()
return err
}
return compressor.Close()
}
// compressGoroutine reads all input from src and writes its compressed equivalent to dest.
func (c *copier) compressGoroutine(dest *io.PipeWriter, src io.Reader, metadata map[string]string, compressionFormat compressiontypes.Algorithm) {
err := errors.New("Internal error: unexpected panic in compressGoroutine")
defer func() { // Note that this is not the same as {defer dest.CloseWithError(err)}; we need err to be evaluated lazily.
_ = dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close(), always returns nil
}()
err = doCompression(dest, src, metadata, compressionFormat, c.compressionLevel)
}

View File

@ -1,24 +0,0 @@
package copy
import (
"strings"
"github.com/containers/image/v5/types"
)
// isOciEncrypted returns a bool indicating if a mediatype is encrypted
// This function will be moved to be part of OCI spec when adopted.
func isOciEncrypted(mediatype string) bool {
return strings.HasSuffix(mediatype, "+encrypted")
}
// isEncrypted checks if an image is encrypted
func isEncrypted(i types.Image) bool {
layers := i.LayerInfos()
for _, l := range layers {
if isOciEncrypted(l.MediaType) {
return true
}
}
return false
}

View File

@ -0,0 +1,129 @@
package copy
import (
"strings"
"github.com/containers/image/v5/types"
"github.com/containers/ocicrypt"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
// isOciEncrypted returns a bool indicating if a mediatype is encrypted
// This function will be moved to be part of OCI spec when adopted.
func isOciEncrypted(mediatype string) bool {
return strings.HasSuffix(mediatype, "+encrypted")
}
// isEncrypted checks if an image is encrypted
func isEncrypted(i types.Image) bool {
layers := i.LayerInfos()
for _, l := range layers {
if isOciEncrypted(l.MediaType) {
return true
}
}
return false
}
// bpDecryptionStepData contains data that the copy pipeline needs about the decryption step.
type bpDecryptionStepData struct {
decrypting bool // We are actually decrypting the stream
}
// blobPipelineDecryptionStep updates *stream to decrypt if, it necessary.
// srcInfo is only used for error messages.
// Returns data for other steps; the caller should eventually use updateCryptoOperation.
func (c *copier) blobPipelineDecryptionStep(stream *sourceStream, srcInfo types.BlobInfo) (*bpDecryptionStepData, error) {
if isOciEncrypted(stream.info.MediaType) && c.ociDecryptConfig != nil {
desc := imgspecv1.Descriptor{
Annotations: stream.info.Annotations,
}
reader, decryptedDigest, err := ocicrypt.DecryptLayer(c.ociDecryptConfig, stream.reader, desc, false)
if err != nil {
return nil, errors.Wrapf(err, "decrypting layer %s", srcInfo.Digest)
}
stream.reader = reader
stream.info.Digest = decryptedDigest
stream.info.Size = -1
for k := range stream.info.Annotations {
if strings.HasPrefix(k, "org.opencontainers.image.enc") {
delete(stream.info.Annotations, k)
}
}
return &bpDecryptionStepData{
decrypting: true,
}, nil
}
return &bpDecryptionStepData{
decrypting: false,
}, nil
}
// updateCryptoOperation sets *operation, if necessary.
func (d *bpDecryptionStepData) updateCryptoOperation(operation *types.LayerCrypto) {
if d.decrypting {
*operation = types.Decrypt
}
}
// bpdData contains data that the copy pipeline needs about the encryption step.
type bpEncryptionStepData struct {
encrypting bool // We are actually encrypting the stream
finalizer ocicrypt.EncryptLayerFinalizer
}
// blobPipelineEncryptionStep updates *stream to encrypt if, it required by toEncrypt.
// srcInfo is primarily used for error messages.
// Returns data for other steps; the caller should eventually call updateCryptoOperationAndAnnotations.
func (c *copier) blobPipelineEncryptionStep(stream *sourceStream, toEncrypt bool, srcInfo types.BlobInfo,
decryptionStep *bpDecryptionStepData) (*bpEncryptionStepData, error) {
if toEncrypt && !isOciEncrypted(srcInfo.MediaType) && c.ociEncryptConfig != nil {
var annotations map[string]string
if !decryptionStep.decrypting {
annotations = srcInfo.Annotations
}
desc := imgspecv1.Descriptor{
MediaType: srcInfo.MediaType,
Digest: srcInfo.Digest,
Size: srcInfo.Size,
Annotations: annotations,
}
reader, finalizer, err := ocicrypt.EncryptLayer(c.ociEncryptConfig, stream.reader, desc)
if err != nil {
return nil, errors.Wrapf(err, "encrypting blob %s", srcInfo.Digest)
}
stream.reader = reader
stream.info.Digest = ""
stream.info.Size = -1
return &bpEncryptionStepData{
encrypting: true,
finalizer: finalizer,
}, nil
}
return &bpEncryptionStepData{
encrypting: false,
}, nil
}
// updateCryptoOperationAndAnnotations sets *operation and updates *annotations, if necessary.
func (d *bpEncryptionStepData) updateCryptoOperationAndAnnotations(operation *types.LayerCrypto, annotations *map[string]string) error {
if !d.encrypting {
return nil
}
encryptAnnotations, err := d.finalizer()
if err != nil {
return errors.Wrap(err, "Unable to finalize encryption")
}
*operation = types.Encrypt
if *annotations == nil {
*annotations = map[string]string{}
}
for k, v := range encryptAnnotations {
(*annotations)[k] = v
}
return nil
}

View File

@ -38,31 +38,50 @@ func (os *orderedSet) append(s string) {
}
}
// determineManifestConversion updates ic.manifestUpdates to convert manifest to a supported MIME type, if necessary and ic.canModifyManifest.
// Note that the conversion will only happen later, through ic.src.UpdatedImage
// Returns the preferred manifest MIME type (whether we are converting to it or using it unmodified),
// and a list of other possible alternatives, in order.
func (ic *imageCopier) determineManifestConversion(ctx context.Context, destSupportedManifestMIMETypes []string, forceManifestMIMEType string, requiresOciEncryption bool) (string, []string, error) {
_, srcType, err := ic.src.Manifest(ctx)
if err != nil { // This should have been cached?!
return "", nil, errors.Wrap(err, "reading manifest")
}
// determineManifestConversionInputs contains the inputs for determineManifestConversion.
type determineManifestConversionInputs struct {
srcMIMEType string // MIME type of the input manifest
destSupportedManifestMIMETypes []string // MIME types supported by the destination, per types.ImageDestination.SupportedManifestMIMETypes()
forceManifestMIMEType string // Users choice of forced manifest MIME type
requiresOCIEncryption bool // Restrict to manifest formats that can support OCI encryption
cannotModifyManifestReason string // The reason the manifest cannot be modified, or an empty string if it can
}
// manifestConversionPlan contains the decisions made by determineManifestConversion.
type manifestConversionPlan struct {
// The preferred manifest MIME type (whether we are converting to it or using it unmodified).
// We compute this only to show it in error messages; without having to add this context
// in an error message, we would be happy enough to know only that no conversion is needed.
preferredMIMEType string
preferredMIMETypeNeedsConversion bool // True if using preferredMIMEType requires a conversion step.
otherMIMETypeCandidates []string // Other possible alternatives, in order
}
// determineManifestConversion returns a plan for what formats, and possibly conversions, to use based on in.
func determineManifestConversion(in determineManifestConversionInputs) (manifestConversionPlan, error) {
srcType := in.srcMIMEType
normalizedSrcType := manifest.NormalizedMIMEType(srcType)
if srcType != normalizedSrcType {
logrus.Debugf("Source manifest MIME type %s, treating it as %s", srcType, normalizedSrcType)
srcType = normalizedSrcType
}
if forceManifestMIMEType != "" {
destSupportedManifestMIMETypes = []string{forceManifestMIMEType}
destSupportedManifestMIMETypes := in.destSupportedManifestMIMETypes
if in.forceManifestMIMEType != "" {
destSupportedManifestMIMETypes = []string{in.forceManifestMIMEType}
}
if len(destSupportedManifestMIMETypes) == 0 && (!requiresOciEncryption || manifest.MIMETypeSupportsEncryption(srcType)) {
return srcType, []string{}, nil // Anything goes; just use the original as is, do not try any conversions.
if len(destSupportedManifestMIMETypes) == 0 && (!in.requiresOCIEncryption || manifest.MIMETypeSupportsEncryption(srcType)) {
return manifestConversionPlan{ // Anything goes; just use the original as is, do not try any conversions.
preferredMIMEType: srcType,
otherMIMETypeCandidates: []string{},
}, nil
}
supportedByDest := map[string]struct{}{}
for _, t := range destSupportedManifestMIMETypes {
if !requiresOciEncryption || manifest.MIMETypeSupportsEncryption(t) {
if !in.requiresOCIEncryption || manifest.MIMETypeSupportsEncryption(t) {
supportedByDest[t] = struct{}{}
}
}
@ -79,13 +98,16 @@ func (ic *imageCopier) determineManifestConversion(ctx context.Context, destSupp
if _, ok := supportedByDest[srcType]; ok {
prioritizedTypes.append(srcType)
}
if ic.cannotModifyManifestReason != "" {
if in.cannotModifyManifestReason != "" {
// We could also drop this check and have the caller
// make the choice; it is already doing that to an extent, to improve error
// messages. But it is nice to hide the “if we can't modify, do no conversion”
// special case in here; the caller can then worry (or not) only about a good UI.
logrus.Debugf("We can't modify the manifest, hoping for the best...")
return srcType, []string{}, nil // Take our chances - FIXME? Or should we fail without trying?
return manifestConversionPlan{ // Take our chances - FIXME? Or should we fail without trying?
preferredMIMEType: srcType,
otherMIMETypeCandidates: []string{},
}, nil
}
// Then use our list of preferred types.
@ -102,15 +124,17 @@ func (ic *imageCopier) determineManifestConversion(ctx context.Context, destSupp
logrus.Debugf("Manifest has MIME type %s, ordered candidate list [%s]", srcType, strings.Join(prioritizedTypes.list, ", "))
if len(prioritizedTypes.list) == 0 { // Coverage: destSupportedManifestMIMETypes is not empty (or we would have exited in the “Anything goes” case above), so this should never happen.
return "", nil, errors.New("Internal error: no candidate MIME types")
return manifestConversionPlan{}, errors.New("Internal error: no candidate MIME types")
}
preferredType := prioritizedTypes.list[0]
if preferredType != srcType {
ic.manifestUpdates.ManifestMIMEType = preferredType
} else {
res := manifestConversionPlan{
preferredMIMEType: prioritizedTypes.list[0],
otherMIMETypeCandidates: prioritizedTypes.list[1:],
}
res.preferredMIMETypeNeedsConversion = res.preferredMIMEType != srcType
if !res.preferredMIMETypeNeedsConversion {
logrus.Debugf("... will first try using the original manifest unmodified")
}
return preferredType, prioritizedTypes.list[1:], nil
return res, nil
}
// isMultiImage returns true if img is a list of images

View File

@ -8,7 +8,7 @@ import (
"github.com/containers/image/v5/directory/explicitfilepath"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/image"
"github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
@ -140,8 +140,7 @@ func (ref dirReference) PolicyConfigurationNamespaces() []string {
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
func (ref dirReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
src := newImageSource(ref)
return image.FromSource(ctx, sys, src)
return image.FromReference(ctx, sys, ref)
}
// NewImageSource returns a types.ImageSource for this reference.

View File

@ -8,7 +8,7 @@ import (
"github.com/containers/image/v5/docker/internal/tarfile"
"github.com/containers/image/v5/docker/reference"
ctrImage "github.com/containers/image/v5/image"
ctrImage "github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
"github.com/pkg/errors"
@ -185,11 +185,7 @@ func (ref archiveReference) PolicyConfigurationNamespaces() []string {
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
func (ref archiveReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
src, err := newImageSource(ctx, sys, ref)
if err != nil {
return nil, err
}
return ctrImage.FromSource(ctx, sys, src)
return ctrImage.FromReference(ctx, sys, ref)
}
// NewImageSource returns a types.ImageSource for this reference.

View File

@ -6,7 +6,7 @@ import (
"github.com/containers/image/v5/docker/policyconfiguration"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/image"
"github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
@ -195,11 +195,7 @@ func (ref daemonReference) PolicyConfigurationNamespaces() []string {
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
func (ref daemonReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
src, err := newImageSource(ctx, sys, ref)
if err != nil {
return nil, err
}
return image.FromSource(ctx, sys, src)
return image.FromReference(ctx, sys, ref)
}
// NewImageSource returns a types.ImageSource for this reference.

View File

@ -163,9 +163,8 @@ func newBearerTokenFromJSONBlob(blob []byte) (*bearerToken, error) {
func serverDefault() *tls.Config {
return &tls.Config{
// Avoid fallback to SSL protocols < TLS1.0
MinVersion: tls.VersionTLS10,
PreferServerCipherSuites: true,
CipherSuites: tlsconfig.DefaultServerAcceptedCiphers,
MinVersion: tls.VersionTLS10,
CipherSuites: tlsconfig.DefaultServerAcceptedCiphers,
}
}

View File

@ -9,7 +9,7 @@ import (
"strings"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/image"
"github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"

View File

@ -1,400 +1,14 @@
package image
import (
"bytes"
"context"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"fmt"
"strings"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/iolimits"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/blobinfocache/none"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/containers/image/v5/internal/image"
)
// GzippedEmptyLayer is a gzip-compressed version of an empty tar file (1024 NULL bytes)
// This comes from github.com/docker/distribution/manifest/schema1/config_builder.go; there is
// a non-zero embedded timestamp; we could zero that, but that would just waste storage space
// in registries, so lets use the same values.
var GzippedEmptyLayer = []byte{
31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88,
0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0,
}
var GzippedEmptyLayer = image.GzippedEmptyLayer
// GzippedEmptyLayerDigest is a digest of GzippedEmptyLayer
const GzippedEmptyLayerDigest = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")
type manifestSchema2 struct {
src types.ImageSource // May be nil if configBlob is not nil
configBlob []byte // If set, corresponds to contents of ConfigDescriptor.
m *manifest.Schema2
}
func manifestSchema2FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) {
m, err := manifest.Schema2FromManifest(manifestBlob)
if err != nil {
return nil, err
}
return &manifestSchema2{
src: src,
m: m,
}, nil
}
// manifestSchema2FromComponents builds a new manifestSchema2 from the supplied data:
func manifestSchema2FromComponents(config manifest.Schema2Descriptor, src types.ImageSource, configBlob []byte, layers []manifest.Schema2Descriptor) *manifestSchema2 {
return &manifestSchema2{
src: src,
configBlob: configBlob,
m: manifest.Schema2FromComponents(config, layers),
}
}
func (m *manifestSchema2) serialize() ([]byte, error) {
return m.m.Serialize()
}
func (m *manifestSchema2) manifestMIMEType() string {
return m.m.MediaType
}
// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below.
func (m *manifestSchema2) ConfigInfo() types.BlobInfo {
return m.m.ConfigInfo()
}
// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about
// layers in the resulting configuration isn't guaranteed to be returned to due how
// old image manifests work (docker v2s1 especially).
func (m *manifestSchema2) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) {
configBlob, err := m.ConfigBlob(ctx)
if err != nil {
return nil, err
}
// docker v2s2 and OCI v1 are mostly compatible but v2s2 contains more fields
// than OCI v1. This unmarshal makes sure we drop docker v2s2
// fields that aren't needed in OCI v1.
configOCI := &imgspecv1.Image{}
if err := json.Unmarshal(configBlob, configOCI); err != nil {
return nil, err
}
return configOCI, nil
}
// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise.
// The result is cached; it is OK to call this however often you need.
func (m *manifestSchema2) ConfigBlob(ctx context.Context) ([]byte, error) {
if m.configBlob == nil {
if m.src == nil {
return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestSchema2")
}
stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromSchema2Descriptor(m.m.ConfigDescriptor), none.NoCache)
if err != nil {
return nil, err
}
defer stream.Close()
blob, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize)
if err != nil {
return nil, err
}
computedDigest := digest.FromBytes(blob)
if computedDigest != m.m.ConfigDescriptor.Digest {
return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.ConfigDescriptor.Digest)
}
m.configBlob = blob
}
return m.configBlob, nil
}
// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
// The Digest field is guaranteed to be provided; Size may be -1.
// WARNING: The list may contain duplicates, and they are semantically relevant.
func (m *manifestSchema2) LayerInfos() []types.BlobInfo {
return manifestLayerInfosToBlobInfos(m.m.LayerInfos())
}
// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref.
// It returns false if the manifest does not embed a Docker reference.
// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.)
func (m *manifestSchema2) EmbeddedDockerReferenceConflicts(ref reference.Named) bool {
return false
}
// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration.
func (m *manifestSchema2) Inspect(ctx context.Context) (*types.ImageInspectInfo, error) {
getter := func(info types.BlobInfo) ([]byte, error) {
if info.Digest != m.ConfigInfo().Digest {
// Shouldn't ever happen
return nil, errors.New("asked for a different config blob")
}
config, err := m.ConfigBlob(ctx)
if err != nil {
return nil, err
}
return config, nil
}
return m.m.Inspect(getter)
}
// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs.
// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute
// (most importantly it forces us to download the full layers even if they are already present at the destination).
func (m *manifestSchema2) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool {
return false
}
// UpdatedImage returns a types.Image modified according to options.
// This does not change the state of the original Image object.
// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError
// if the CompressionOperation and CompressionAlgorithm specified in one or more
// options.LayerInfos items is anything other than gzip.
func (m *manifestSchema2) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) {
copy := manifestSchema2{ // NOTE: This is not a deep copy, it still shares slices etc.
src: m.src,
configBlob: m.configBlob,
m: manifest.Schema2Clone(m.m),
}
converted, err := convertManifestIfRequiredWithUpdate(ctx, options, map[string]manifestConvertFn{
manifest.DockerV2Schema1MediaType: copy.convertToManifestSchema1,
manifest.DockerV2Schema1SignedMediaType: copy.convertToManifestSchema1,
imgspecv1.MediaTypeImageManifest: copy.convertToManifestOCI1,
})
if err != nil {
return nil, err
}
if converted != nil {
return converted, nil
}
// No conversion required, update manifest
if options.LayerInfos != nil {
if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil {
return nil, err
}
}
// Ignore options.EmbeddedDockerReference: it may be set when converting from schema1 to schema2, but we really don't care.
return memoryImageFromManifest(&copy), nil
}
func oci1DescriptorFromSchema2Descriptor(d manifest.Schema2Descriptor) imgspecv1.Descriptor {
return imgspecv1.Descriptor{
MediaType: d.MediaType,
Size: d.Size,
Digest: d.Digest,
URLs: d.URLs,
}
}
// convertToManifestOCI1 returns a genericManifest implementation converted to imgspecv1.MediaTypeImageManifest.
// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned
// value.
// This does not change the state of the original manifestSchema2 object.
func (m *manifestSchema2) convertToManifestOCI1(ctx context.Context, _ *types.ManifestUpdateOptions) (genericManifest, error) {
configOCI, err := m.OCIConfig(ctx)
if err != nil {
return nil, err
}
configOCIBytes, err := json.Marshal(configOCI)
if err != nil {
return nil, err
}
config := imgspecv1.Descriptor{
MediaType: imgspecv1.MediaTypeImageConfig,
Size: int64(len(configOCIBytes)),
Digest: digest.FromBytes(configOCIBytes),
}
layers := make([]imgspecv1.Descriptor, len(m.m.LayersDescriptors))
for idx := range layers {
layers[idx] = oci1DescriptorFromSchema2Descriptor(m.m.LayersDescriptors[idx])
switch m.m.LayersDescriptors[idx].MediaType {
case manifest.DockerV2Schema2ForeignLayerMediaType:
layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable
case manifest.DockerV2Schema2ForeignLayerMediaTypeGzip:
layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributableGzip
case manifest.DockerV2SchemaLayerMediaTypeUncompressed:
layers[idx].MediaType = imgspecv1.MediaTypeImageLayer
case manifest.DockerV2Schema2LayerMediaType:
layers[idx].MediaType = imgspecv1.MediaTypeImageLayerGzip
default:
return nil, fmt.Errorf("Unknown media type during manifest conversion: %q", m.m.LayersDescriptors[idx].MediaType)
}
}
return manifestOCI1FromComponents(config, m.src, configOCIBytes, layers), nil
}
// convertToManifestSchema1 returns a genericManifest implementation converted to manifest.DockerV2Schema1{Signed,}MediaType.
// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned
// value.
// This does not change the state of the original manifestSchema2 object.
//
// Based on docker/distribution/manifest/schema1/config_builder.go
func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) {
dest := options.InformationOnly.Destination
var convertedLayerUpdates []types.BlobInfo // Only used if options.LayerInfos != nil
if options.LayerInfos != nil {
if len(options.LayerInfos) != len(m.m.LayersDescriptors) {
return nil, fmt.Errorf("Error converting image: layer edits for %d layers vs %d existing layers",
len(options.LayerInfos), len(m.m.LayersDescriptors))
}
convertedLayerUpdates = []types.BlobInfo{}
}
configBytes, err := m.ConfigBlob(ctx)
if err != nil {
return nil, err
}
imageConfig := &manifest.Schema2Image{}
if err := json.Unmarshal(configBytes, imageConfig); err != nil {
return nil, err
}
// Build fsLayers and History, discarding all configs. We will patch the top-level config in later.
fsLayers := make([]manifest.Schema1FSLayers, len(imageConfig.History))
history := make([]manifest.Schema1History, len(imageConfig.History))
nonemptyLayerIndex := 0
var parentV1ID string // Set in the loop
v1ID := ""
haveGzippedEmptyLayer := false
if len(imageConfig.History) == 0 {
// What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing.
return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema1SignedMediaType)
}
for v2Index, historyEntry := range imageConfig.History {
parentV1ID = v1ID
v1Index := len(imageConfig.History) - 1 - v2Index
var blobDigest digest.Digest
if historyEntry.EmptyLayer {
emptyLayerBlobInfo := types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))}
if !haveGzippedEmptyLayer {
logrus.Debugf("Uploading empty layer during conversion to schema 1")
// Ideally we should update the relevant BlobInfoCache about this layer, but that would require passing it down here,
// and anyway this blob is so small that its easier to just copy it than to worry about figuring out another location where to get it.
info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), emptyLayerBlobInfo, none.NoCache, false)
if err != nil {
return nil, errors.Wrap(err, "uploading empty layer")
}
if info.Digest != emptyLayerBlobInfo.Digest {
return nil, errors.Errorf("Internal error: Uploaded empty layer has digest %#v instead of %s", info.Digest, emptyLayerBlobInfo.Digest)
}
haveGzippedEmptyLayer = true
}
if options.LayerInfos != nil {
convertedLayerUpdates = append(convertedLayerUpdates, emptyLayerBlobInfo)
}
blobDigest = emptyLayerBlobInfo.Digest
} else {
if nonemptyLayerIndex >= len(m.m.LayersDescriptors) {
return nil, errors.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.m.LayersDescriptors))
}
if options.LayerInfos != nil {
convertedLayerUpdates = append(convertedLayerUpdates, options.LayerInfos[nonemptyLayerIndex])
}
blobDigest = m.m.LayersDescriptors[nonemptyLayerIndex].Digest
nonemptyLayerIndex++
}
// AFAICT pull ignores these ID values, at least nowadays, so we could use anything unique, including a simple counter. Use what Docker uses for cargo-cult consistency.
v, err := v1IDFromBlobDigestAndComponents(blobDigest, parentV1ID)
if err != nil {
return nil, err
}
v1ID = v
fakeImage := manifest.Schema1V1Compatibility{
ID: v1ID,
Parent: parentV1ID,
Comment: historyEntry.Comment,
Created: historyEntry.Created,
Author: historyEntry.Author,
ThrowAway: historyEntry.EmptyLayer,
}
fakeImage.ContainerConfig.Cmd = []string{historyEntry.CreatedBy}
v1CompatibilityBytes, err := json.Marshal(&fakeImage)
if err != nil {
return nil, errors.Errorf("Internal error: Error creating v1compatibility for %#v", fakeImage)
}
fsLayers[v1Index] = manifest.Schema1FSLayers{BlobSum: blobDigest}
history[v1Index] = manifest.Schema1History{V1Compatibility: string(v1CompatibilityBytes)}
// Note that parentV1ID of the top layer is preserved when exiting this loop
}
// Now patch in real configuration for the top layer (v1Index == 0)
v1ID, err = v1IDFromBlobDigestAndComponents(fsLayers[0].BlobSum, parentV1ID, string(configBytes)) // See above WRT v1ID value generation and cargo-cult consistency.
if err != nil {
return nil, err
}
v1Config, err := v1ConfigFromConfigJSON(configBytes, v1ID, parentV1ID, imageConfig.History[len(imageConfig.History)-1].EmptyLayer)
if err != nil {
return nil, err
}
history[0].V1Compatibility = string(v1Config)
if options.LayerInfos != nil {
options.LayerInfos = convertedLayerUpdates
}
m1, err := manifestSchema1FromComponents(dest.Reference().DockerReference(), fsLayers, history, imageConfig.Architecture)
if err != nil {
return nil, err // This should never happen, we should have created all the components correctly.
}
return m1, nil
}
func v1IDFromBlobDigestAndComponents(blobDigest digest.Digest, others ...string) (string, error) {
if err := blobDigest.Validate(); err != nil {
return "", err
}
parts := append([]string{blobDigest.Hex()}, others...)
v1IDHash := sha256.Sum256([]byte(strings.Join(parts, " ")))
return hex.EncodeToString(v1IDHash[:]), nil
}
func v1ConfigFromConfigJSON(configJSON []byte, v1ID, parentV1ID string, throwaway bool) ([]byte, error) {
// Preserve everything we don't specifically know about.
// (This must be a *json.RawMessage, even though *[]byte is fairly redundant, because only *RawMessage implements json.Marshaler.)
rawContents := map[string]*json.RawMessage{}
if err := json.Unmarshal(configJSON, &rawContents); err != nil { // We have already unmarshaled it before, using a more detailed schema?!
return nil, err
}
delete(rawContents, "rootfs")
delete(rawContents, "history")
updates := map[string]interface{}{"id": v1ID}
if parentV1ID != "" {
updates["parent"] = parentV1ID
}
if throwaway {
updates["throwaway"] = throwaway
}
for field, value := range updates {
encoded, err := json.Marshal(value)
if err != nil {
return nil, err
}
rawContents[field] = (*json.RawMessage)(&encoded)
}
return json.Marshal(rawContents)
}
// SupportsEncryption returns if encryption is supported for the manifest type
func (m *manifestSchema2) SupportsEncryption(context.Context) bool {
return false
}
const GzippedEmptyLayerDigest = image.GzippedEmptyLayerDigest

View File

@ -6,17 +6,10 @@ package image
import (
"context"
"github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/types"
)
// imageCloser implements types.ImageCloser, perhaps allowing simple users
// to use a single object without having keep a reference to a types.ImageSource
// only to call types.ImageSource.Close().
type imageCloser struct {
types.Image
src types.ImageSource
}
// FromSource returns a types.ImageCloser implementation for the default instance of source.
// If source is a manifest list, .Manifest() still returns the manifest list,
// but other methods transparently return data from an appropriate image instance.
@ -31,33 +24,7 @@ type imageCloser struct {
// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage instead of calling this function.
func FromSource(ctx context.Context, sys *types.SystemContext, src types.ImageSource) (types.ImageCloser, error) {
img, err := FromUnparsedImage(ctx, sys, UnparsedInstance(src, nil))
if err != nil {
return nil, err
}
return &imageCloser{
Image: img,
src: src,
}, nil
}
func (ic *imageCloser) Close() error {
return ic.src.Close()
}
// sourcedImage is a general set of utilities for working with container images,
// whatever is their underlying location (i.e. dockerImageSource-independent).
// Note the existence of skopeo/docker.Image: some instances of a `types.Image`
// may not be a `sourcedImage` directly. However, most users of `types.Image`
// do not care, and those who care about `skopeo/docker.Image` know they do.
type sourcedImage struct {
*UnparsedImage
manifestBlob []byte
manifestMIMEType string
// genericManifest contains data corresponding to manifestBlob.
// NOTE: The manifest may have been modified in the process; DO NOT reserialize and store genericManifest
// if you want to preserve the original manifest; use manifestBlob directly.
genericManifest
return image.FromSource(ctx, sys, src)
}
// FromUnparsedImage returns a types.Image implementation for unparsed.
@ -66,39 +33,5 @@ type sourcedImage struct {
//
// The Image must not be used after the underlying ImageSource is Close()d.
func FromUnparsedImage(ctx context.Context, sys *types.SystemContext, unparsed *UnparsedImage) (types.Image, error) {
// Note that the input parameter above is specifically *image.UnparsedImage, not types.UnparsedImage:
// we want to be able to use unparsed.src. We could make that an explicit interface, but, well,
// this is the only UnparsedImage implementation around, anyway.
// NOTE: It is essential for signature verification that all parsing done in this object happens on the same manifest which is returned by unparsed.Manifest().
manifestBlob, manifestMIMEType, err := unparsed.Manifest(ctx)
if err != nil {
return nil, err
}
parsedManifest, err := manifestInstanceFromBlob(ctx, sys, unparsed.src, manifestBlob, manifestMIMEType)
if err != nil {
return nil, err
}
return &sourcedImage{
UnparsedImage: unparsed,
manifestBlob: manifestBlob,
manifestMIMEType: manifestMIMEType,
genericManifest: parsedManifest,
}, nil
}
// Size returns the size of the image as stored, if it's known, or -1 if it isn't.
func (i *sourcedImage) Size() (int64, error) {
return -1, nil
}
// Manifest overrides the UnparsedImage.Manifest to always use the fields which we have already fetched.
func (i *sourcedImage) Manifest(ctx context.Context) ([]byte, string, error) {
return i.manifestBlob, i.manifestMIMEType, nil
}
func (i *sourcedImage) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) {
return i.UnparsedImage.src.LayerInfosForCopy(ctx, i.UnparsedImage.instanceDigest)
return image.FromUnparsedImage(ctx, sys, unparsed)
}

View File

@ -1,95 +1,19 @@
package image
import (
"context"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
// UnparsedImage implements types.UnparsedImage .
// An UnparsedImage is a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance.
type UnparsedImage struct {
src types.ImageSource
instanceDigest *digest.Digest
cachedManifest []byte // A private cache for Manifest(); nil if not yet known.
// A private cache for Manifest(), may be the empty string if guessing failed.
// Valid iff cachedManifest is not nil.
cachedManifestMIMEType string
cachedSignatures [][]byte // A private cache for Signatures(); nil if not yet known.
}
type UnparsedImage = image.UnparsedImage
// UnparsedInstance returns a types.UnparsedImage implementation for (source, instanceDigest).
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list).
//
// The UnparsedImage must not be used after the underlying ImageSource is Close()d.
func UnparsedInstance(src types.ImageSource, instanceDigest *digest.Digest) *UnparsedImage {
return &UnparsedImage{
src: src,
instanceDigest: instanceDigest,
}
}
// Reference returns the reference used to set up this source, _as specified by the user_
// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
func (i *UnparsedImage) Reference() types.ImageReference {
// Note that this does not depend on instanceDigest; e.g. all instances within a manifest list need to be signed with the manifest list identity.
return i.src.Reference()
}
// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need.
func (i *UnparsedImage) Manifest(ctx context.Context) ([]byte, string, error) {
if i.cachedManifest == nil {
m, mt, err := i.src.GetManifest(ctx, i.instanceDigest)
if err != nil {
return nil, "", err
}
// ImageSource.GetManifest does not do digest verification, but we do;
// this immediately protects also any user of types.Image.
if digest, haveDigest := i.expectedManifestDigest(); haveDigest {
matches, err := manifest.MatchesDigest(m, digest)
if err != nil {
return nil, "", errors.Wrap(err, "computing manifest digest")
}
if !matches {
return nil, "", errors.Errorf("Manifest does not match provided manifest digest %s", digest)
}
}
i.cachedManifest = m
i.cachedManifestMIMEType = mt
}
return i.cachedManifest, i.cachedManifestMIMEType, nil
}
// expectedManifestDigest returns a the expected value of the manifest digest, and an indicator whether it is known.
// The bool return value seems redundant with digest != ""; it is used explicitly
// to refuse (unexpected) situations when the digest exists but is "".
func (i *UnparsedImage) expectedManifestDigest() (digest.Digest, bool) {
if i.instanceDigest != nil {
return *i.instanceDigest, true
}
ref := i.Reference().DockerReference()
if ref != nil {
if canonical, ok := ref.(reference.Canonical); ok {
return canonical.Digest(), true
}
}
return "", false
}
// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need.
func (i *UnparsedImage) Signatures(ctx context.Context) ([][]byte, error) {
if i.cachedSignatures == nil {
sigs, err := i.src.GetSignatures(ctx, i.instanceDigest)
if err != nil {
return nil, err
}
i.cachedSignatures = sigs
}
return i.cachedSignatures, nil
return image.UnparsedInstance(src, instanceDigest)
}

View File

@ -246,3 +246,12 @@ func (m *manifestSchema1) convertToManifestOCI1(ctx context.Context, options *ty
func (m *manifestSchema1) SupportsEncryption(context.Context) bool {
return false
}
// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image
// (and the code can handle that).
// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted
// algorithms depends not on the current format, but possibly on the target of a conversion (if UpdatedImage converts
// to a different manifest format).
func (m *manifestSchema1) CanChangeLayerCompression(mimeType string) bool {
return true // There are no MIME types in the manifest, so we must assume a valid image.
}

View File

@ -0,0 +1,413 @@
package image
import (
"bytes"
"context"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"fmt"
"strings"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/iolimits"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/blobinfocache/none"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// GzippedEmptyLayer is a gzip-compressed version of an empty tar file (1024 NULL bytes)
// This comes from github.com/docker/distribution/manifest/schema1/config_builder.go; there is
// a non-zero embedded timestamp; we could zero that, but that would just waste storage space
// in registries, so lets use the same values.
//
// This is publicly visible as c/image/image.GzippedEmptyLayer.
var GzippedEmptyLayer = []byte{
31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88,
0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0,
}
// GzippedEmptyLayerDigest is a digest of GzippedEmptyLayer
//
// This is publicly visible as c/image/image.GzippedEmptyLayerDigest.
const GzippedEmptyLayerDigest = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")
type manifestSchema2 struct {
src types.ImageSource // May be nil if configBlob is not nil
configBlob []byte // If set, corresponds to contents of ConfigDescriptor.
m *manifest.Schema2
}
func manifestSchema2FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) {
m, err := manifest.Schema2FromManifest(manifestBlob)
if err != nil {
return nil, err
}
return &manifestSchema2{
src: src,
m: m,
}, nil
}
// manifestSchema2FromComponents builds a new manifestSchema2 from the supplied data:
func manifestSchema2FromComponents(config manifest.Schema2Descriptor, src types.ImageSource, configBlob []byte, layers []manifest.Schema2Descriptor) *manifestSchema2 {
return &manifestSchema2{
src: src,
configBlob: configBlob,
m: manifest.Schema2FromComponents(config, layers),
}
}
func (m *manifestSchema2) serialize() ([]byte, error) {
return m.m.Serialize()
}
func (m *manifestSchema2) manifestMIMEType() string {
return m.m.MediaType
}
// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below.
func (m *manifestSchema2) ConfigInfo() types.BlobInfo {
return m.m.ConfigInfo()
}
// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about
// layers in the resulting configuration isn't guaranteed to be returned to due how
// old image manifests work (docker v2s1 especially).
func (m *manifestSchema2) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) {
configBlob, err := m.ConfigBlob(ctx)
if err != nil {
return nil, err
}
// docker v2s2 and OCI v1 are mostly compatible but v2s2 contains more fields
// than OCI v1. This unmarshal makes sure we drop docker v2s2
// fields that aren't needed in OCI v1.
configOCI := &imgspecv1.Image{}
if err := json.Unmarshal(configBlob, configOCI); err != nil {
return nil, err
}
return configOCI, nil
}
// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise.
// The result is cached; it is OK to call this however often you need.
func (m *manifestSchema2) ConfigBlob(ctx context.Context) ([]byte, error) {
if m.configBlob == nil {
if m.src == nil {
return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestSchema2")
}
stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromSchema2Descriptor(m.m.ConfigDescriptor), none.NoCache)
if err != nil {
return nil, err
}
defer stream.Close()
blob, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize)
if err != nil {
return nil, err
}
computedDigest := digest.FromBytes(blob)
if computedDigest != m.m.ConfigDescriptor.Digest {
return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.ConfigDescriptor.Digest)
}
m.configBlob = blob
}
return m.configBlob, nil
}
// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
// The Digest field is guaranteed to be provided; Size may be -1.
// WARNING: The list may contain duplicates, and they are semantically relevant.
func (m *manifestSchema2) LayerInfos() []types.BlobInfo {
return manifestLayerInfosToBlobInfos(m.m.LayerInfos())
}
// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref.
// It returns false if the manifest does not embed a Docker reference.
// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.)
func (m *manifestSchema2) EmbeddedDockerReferenceConflicts(ref reference.Named) bool {
return false
}
// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration.
func (m *manifestSchema2) Inspect(ctx context.Context) (*types.ImageInspectInfo, error) {
getter := func(info types.BlobInfo) ([]byte, error) {
if info.Digest != m.ConfigInfo().Digest {
// Shouldn't ever happen
return nil, errors.New("asked for a different config blob")
}
config, err := m.ConfigBlob(ctx)
if err != nil {
return nil, err
}
return config, nil
}
return m.m.Inspect(getter)
}
// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs.
// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute
// (most importantly it forces us to download the full layers even if they are already present at the destination).
func (m *manifestSchema2) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool {
return false
}
// UpdatedImage returns a types.Image modified according to options.
// This does not change the state of the original Image object.
// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError
// if the CompressionOperation and CompressionAlgorithm specified in one or more
// options.LayerInfos items is anything other than gzip.
func (m *manifestSchema2) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) {
copy := manifestSchema2{ // NOTE: This is not a deep copy, it still shares slices etc.
src: m.src,
configBlob: m.configBlob,
m: manifest.Schema2Clone(m.m),
}
converted, err := convertManifestIfRequiredWithUpdate(ctx, options, map[string]manifestConvertFn{
manifest.DockerV2Schema1MediaType: copy.convertToManifestSchema1,
manifest.DockerV2Schema1SignedMediaType: copy.convertToManifestSchema1,
imgspecv1.MediaTypeImageManifest: copy.convertToManifestOCI1,
})
if err != nil {
return nil, err
}
if converted != nil {
return converted, nil
}
// No conversion required, update manifest
if options.LayerInfos != nil {
if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil {
return nil, err
}
}
// Ignore options.EmbeddedDockerReference: it may be set when converting from schema1 to schema2, but we really don't care.
return memoryImageFromManifest(&copy), nil
}
func oci1DescriptorFromSchema2Descriptor(d manifest.Schema2Descriptor) imgspecv1.Descriptor {
return imgspecv1.Descriptor{
MediaType: d.MediaType,
Size: d.Size,
Digest: d.Digest,
URLs: d.URLs,
}
}
// convertToManifestOCI1 returns a genericManifest implementation converted to imgspecv1.MediaTypeImageManifest.
// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned
// value.
// This does not change the state of the original manifestSchema2 object.
func (m *manifestSchema2) convertToManifestOCI1(ctx context.Context, _ *types.ManifestUpdateOptions) (genericManifest, error) {
configOCI, err := m.OCIConfig(ctx)
if err != nil {
return nil, err
}
configOCIBytes, err := json.Marshal(configOCI)
if err != nil {
return nil, err
}
config := imgspecv1.Descriptor{
MediaType: imgspecv1.MediaTypeImageConfig,
Size: int64(len(configOCIBytes)),
Digest: digest.FromBytes(configOCIBytes),
}
layers := make([]imgspecv1.Descriptor, len(m.m.LayersDescriptors))
for idx := range layers {
layers[idx] = oci1DescriptorFromSchema2Descriptor(m.m.LayersDescriptors[idx])
switch m.m.LayersDescriptors[idx].MediaType {
case manifest.DockerV2Schema2ForeignLayerMediaType:
layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable
case manifest.DockerV2Schema2ForeignLayerMediaTypeGzip:
layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributableGzip
case manifest.DockerV2SchemaLayerMediaTypeUncompressed:
layers[idx].MediaType = imgspecv1.MediaTypeImageLayer
case manifest.DockerV2Schema2LayerMediaType:
layers[idx].MediaType = imgspecv1.MediaTypeImageLayerGzip
default:
return nil, fmt.Errorf("Unknown media type during manifest conversion: %q", m.m.LayersDescriptors[idx].MediaType)
}
}
return manifestOCI1FromComponents(config, m.src, configOCIBytes, layers), nil
}
// convertToManifestSchema1 returns a genericManifest implementation converted to manifest.DockerV2Schema1{Signed,}MediaType.
// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned
// value.
// This does not change the state of the original manifestSchema2 object.
//
// Based on docker/distribution/manifest/schema1/config_builder.go
func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) {
dest := options.InformationOnly.Destination
var convertedLayerUpdates []types.BlobInfo // Only used if options.LayerInfos != nil
if options.LayerInfos != nil {
if len(options.LayerInfos) != len(m.m.LayersDescriptors) {
return nil, fmt.Errorf("Error converting image: layer edits for %d layers vs %d existing layers",
len(options.LayerInfos), len(m.m.LayersDescriptors))
}
convertedLayerUpdates = []types.BlobInfo{}
}
configBytes, err := m.ConfigBlob(ctx)
if err != nil {
return nil, err
}
imageConfig := &manifest.Schema2Image{}
if err := json.Unmarshal(configBytes, imageConfig); err != nil {
return nil, err
}
// Build fsLayers and History, discarding all configs. We will patch the top-level config in later.
fsLayers := make([]manifest.Schema1FSLayers, len(imageConfig.History))
history := make([]manifest.Schema1History, len(imageConfig.History))
nonemptyLayerIndex := 0
var parentV1ID string // Set in the loop
v1ID := ""
haveGzippedEmptyLayer := false
if len(imageConfig.History) == 0 {
// What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing.
return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema1SignedMediaType)
}
for v2Index, historyEntry := range imageConfig.History {
parentV1ID = v1ID
v1Index := len(imageConfig.History) - 1 - v2Index
var blobDigest digest.Digest
if historyEntry.EmptyLayer {
emptyLayerBlobInfo := types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))}
if !haveGzippedEmptyLayer {
logrus.Debugf("Uploading empty layer during conversion to schema 1")
// Ideally we should update the relevant BlobInfoCache about this layer, but that would require passing it down here,
// and anyway this blob is so small that its easier to just copy it than to worry about figuring out another location where to get it.
info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), emptyLayerBlobInfo, none.NoCache, false)
if err != nil {
return nil, errors.Wrap(err, "uploading empty layer")
}
if info.Digest != emptyLayerBlobInfo.Digest {
return nil, errors.Errorf("Internal error: Uploaded empty layer has digest %#v instead of %s", info.Digest, emptyLayerBlobInfo.Digest)
}
haveGzippedEmptyLayer = true
}
if options.LayerInfos != nil {
convertedLayerUpdates = append(convertedLayerUpdates, emptyLayerBlobInfo)
}
blobDigest = emptyLayerBlobInfo.Digest
} else {
if nonemptyLayerIndex >= len(m.m.LayersDescriptors) {
return nil, errors.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.m.LayersDescriptors))
}
if options.LayerInfos != nil {
convertedLayerUpdates = append(convertedLayerUpdates, options.LayerInfos[nonemptyLayerIndex])
}
blobDigest = m.m.LayersDescriptors[nonemptyLayerIndex].Digest
nonemptyLayerIndex++
}
// AFAICT pull ignores these ID values, at least nowadays, so we could use anything unique, including a simple counter. Use what Docker uses for cargo-cult consistency.
v, err := v1IDFromBlobDigestAndComponents(blobDigest, parentV1ID)
if err != nil {
return nil, err
}
v1ID = v
fakeImage := manifest.Schema1V1Compatibility{
ID: v1ID,
Parent: parentV1ID,
Comment: historyEntry.Comment,
Created: historyEntry.Created,
Author: historyEntry.Author,
ThrowAway: historyEntry.EmptyLayer,
}
fakeImage.ContainerConfig.Cmd = []string{historyEntry.CreatedBy}
v1CompatibilityBytes, err := json.Marshal(&fakeImage)
if err != nil {
return nil, errors.Errorf("Internal error: Error creating v1compatibility for %#v", fakeImage)
}
fsLayers[v1Index] = manifest.Schema1FSLayers{BlobSum: blobDigest}
history[v1Index] = manifest.Schema1History{V1Compatibility: string(v1CompatibilityBytes)}
// Note that parentV1ID of the top layer is preserved when exiting this loop
}
// Now patch in real configuration for the top layer (v1Index == 0)
v1ID, err = v1IDFromBlobDigestAndComponents(fsLayers[0].BlobSum, parentV1ID, string(configBytes)) // See above WRT v1ID value generation and cargo-cult consistency.
if err != nil {
return nil, err
}
v1Config, err := v1ConfigFromConfigJSON(configBytes, v1ID, parentV1ID, imageConfig.History[len(imageConfig.History)-1].EmptyLayer)
if err != nil {
return nil, err
}
history[0].V1Compatibility = string(v1Config)
if options.LayerInfos != nil {
options.LayerInfos = convertedLayerUpdates
}
m1, err := manifestSchema1FromComponents(dest.Reference().DockerReference(), fsLayers, history, imageConfig.Architecture)
if err != nil {
return nil, err // This should never happen, we should have created all the components correctly.
}
return m1, nil
}
func v1IDFromBlobDigestAndComponents(blobDigest digest.Digest, others ...string) (string, error) {
if err := blobDigest.Validate(); err != nil {
return "", err
}
parts := append([]string{blobDigest.Hex()}, others...)
v1IDHash := sha256.Sum256([]byte(strings.Join(parts, " ")))
return hex.EncodeToString(v1IDHash[:]), nil
}
func v1ConfigFromConfigJSON(configJSON []byte, v1ID, parentV1ID string, throwaway bool) ([]byte, error) {
// Preserve everything we don't specifically know about.
// (This must be a *json.RawMessage, even though *[]byte is fairly redundant, because only *RawMessage implements json.Marshaler.)
rawContents := map[string]*json.RawMessage{}
if err := json.Unmarshal(configJSON, &rawContents); err != nil { // We have already unmarshaled it before, using a more detailed schema?!
return nil, err
}
delete(rawContents, "rootfs")
delete(rawContents, "history")
updates := map[string]interface{}{"id": v1ID}
if parentV1ID != "" {
updates["parent"] = parentV1ID
}
if throwaway {
updates["throwaway"] = throwaway
}
for field, value := range updates {
encoded, err := json.Marshal(value)
if err != nil {
return nil, err
}
rawContents[field] = (*json.RawMessage)(&encoded)
}
return json.Marshal(rawContents)
}
// SupportsEncryption returns if encryption is supported for the manifest type
func (m *manifestSchema2) SupportsEncryption(context.Context) bool {
return false
}
// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image
// (and the code can handle that).
// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted
// algorithms depends not on the current format, but possibly on the target of a conversion (if UpdatedImage converts
// to a different manifest format).
func (m *manifestSchema2) CanChangeLayerCompression(mimeType string) bool {
return m.m.CanChangeLayerCompression(mimeType)
}

View File

@ -12,9 +12,8 @@ import (
)
// genericManifest is an interface for parsing, modifying image manifests and related data.
// Note that the public methods are intended to be a subset of types.Image
// so that embedding a genericManifest into structs works.
// will support v1 one day...
// The public methods are related to types.Image so that embedding a genericManifest implements most of it,
// but there are also public methods that are only visible by packages that can import c/image/internal/image.
type genericManifest interface {
serialize() ([]byte, error)
manifestMIMEType() string
@ -51,6 +50,16 @@ type genericManifest interface {
// the process of updating a manifest between different manifest types was to update then convert.
// This resulted in some fields in the update being lost. This has been fixed by: https://github.com/containers/image/pull/836
SupportsEncryption(ctx context.Context) bool
// The following methods are not a part of types.Image:
// ===
// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image
// (and the code can handle that).
// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted
// algorithms depends not on the current format, but possibly on the target of a conversion (if UpdatedImage converts
// to a different manifest format).
CanChangeLayerCompression(mimeType string) bool
}
// manifestInstanceFromBlob returns a genericManifest implementation for (manblob, mt) in src.

View File

@ -7,6 +7,7 @@ import (
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/iolimits"
internalManifest "github.com/containers/image/v5/internal/manifest"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/blobinfocache/none"
"github.com/containers/image/v5/types"
@ -84,6 +85,10 @@ func (m *manifestOCI1) ConfigBlob(ctx context.Context) ([]byte, error) {
// layers in the resulting configuration isn't guaranteed to be returned to due how
// old image manifests work (docker v2s1 especially).
func (m *manifestOCI1) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) {
if m.m.Config.MediaType != imgspecv1.MediaTypeImageConfig {
return nil, internalManifest.NewNonImageArtifactError(m.m.Config.MediaType)
}
cb, err := m.ConfigBlob(ctx)
if err != nil {
return nil, err
@ -194,10 +199,15 @@ func (m *manifestOCI1) convertToManifestSchema2Generic(ctx context.Context, opti
// value.
// This does not change the state of the original manifestOCI1 object.
func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, _ *types.ManifestUpdateOptions) (*manifestSchema2, error) {
if m.m.Config.MediaType != imgspecv1.MediaTypeImageConfig {
return nil, internalManifest.NewNonImageArtifactError(m.m.Config.MediaType)
}
// Create a copy of the descriptor.
config := schema2DescriptorFromOCI1Descriptor(m.m.Config)
// The only difference between OCI and DockerSchema2 is the mediatypes. The
// Above, we have already checked that this manifest refers to an image, not an OCI artifact,
// so the only difference between OCI and DockerSchema2 is the mediatypes. The
// media type of the manifest is handled by manifestSchema2FromComponents.
config.MediaType = manifest.DockerV2Schema2ConfigMediaType
@ -233,7 +243,11 @@ func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, _ *types.Mani
// value.
// This does not change the state of the original manifestOCI1 object.
func (m *manifestOCI1) convertToManifestSchema1(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) {
// We can't directly convert to V1, but we can transitively convert via a V2 image
if m.m.Config.MediaType != imgspecv1.MediaTypeImageConfig {
return nil, internalManifest.NewNonImageArtifactError(m.m.Config.MediaType)
}
// We can't directly convert images to V1, but we can transitively convert via a V2 image
m2, err := m.convertToManifestSchema2(ctx, options)
if err != nil {
return nil, err
@ -246,3 +260,12 @@ func (m *manifestOCI1) convertToManifestSchema1(ctx context.Context, options *ty
func (m *manifestOCI1) SupportsEncryption(context.Context) bool {
return true
}
// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image
// (and the code can handle that).
// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted
// algorithms depends not on the current format, but possibly on the target of a conversion (if UpdatedImage converts
// to a different manifest format).
func (m *manifestOCI1) CanChangeLayerCompression(mimeType string) bool {
return m.m.CanChangeLayerCompression(mimeType)
}

View File

@ -0,0 +1,134 @@
// Package image consolidates knowledge about various container image formats
// (as opposed to image storage mechanisms, which are handled by types.ImageSource)
// and exposes all of them using an unified interface.
package image
import (
"context"
"github.com/containers/image/v5/types"
)
// FromReference returns a types.ImageCloser implementation for the default instance reading from reference.
// If reference poitns to a manifest list, .Manifest() still returns the manifest list,
// but other methods transparently return data from an appropriate image instance.
//
// The caller must call .Close() on the returned ImageCloser.
//
// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage instead of calling this function.
func FromReference(ctx context.Context, sys *types.SystemContext, ref types.ImageReference) (types.ImageCloser, error) {
src, err := ref.NewImageSource(ctx, sys)
if err != nil {
return nil, err
}
img, err := FromSource(ctx, sys, src)
if err != nil {
src.Close()
return nil, err
}
return img, nil
}
// imageCloser implements types.ImageCloser, perhaps allowing simple users
// to use a single object without having keep a reference to a types.ImageSource
// only to call types.ImageSource.Close().
type imageCloser struct {
types.Image
src types.ImageSource
}
// FromSource returns a types.ImageCloser implementation for the default instance of source.
// If source is a manifest list, .Manifest() still returns the manifest list,
// but other methods transparently return data from an appropriate image instance.
//
// The caller must call .Close() on the returned ImageCloser.
//
// FromSource “takes ownership” of the input ImageSource and will call src.Close()
// when the image is closed. (This does not prevent callers from using both the
// Image and ImageSource objects simultaneously, but it means that they only need to
// the Image.)
//
// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage instead of calling this function.
//
// Most callers can use either FromUnparsedImage or FromReference instead.
//
// This is publicly visible as c/image/image.FromSource.
func FromSource(ctx context.Context, sys *types.SystemContext, src types.ImageSource) (types.ImageCloser, error) {
img, err := FromUnparsedImage(ctx, sys, UnparsedInstance(src, nil))
if err != nil {
return nil, err
}
return &imageCloser{
Image: img,
src: src,
}, nil
}
func (ic *imageCloser) Close() error {
return ic.src.Close()
}
// SourcedImage is a general set of utilities for working with container images,
// whatever is their underlying transport (i.e. ImageSource-independent).
// Note the existence of docker.Image and image.memoryImage: various instances
// of a types.Image may not be a SourcedImage directly.
//
// Most external users of `types.Image` do not care, and those who care about `docker.Image` know they do.
//
// Internal users may depend on methods available in SourcedImage but not (yet?) in types.Image.
type SourcedImage struct {
*UnparsedImage
ManifestBlob []byte // The manifest of the relevant instance
ManifestMIMEType string // MIME type of ManifestBlob
// genericManifest contains data corresponding to manifestBlob.
// NOTE: The manifest may have been modified in the process; DO NOT reserialize and store genericManifest
// if you want to preserve the original manifest; use manifestBlob directly.
genericManifest
}
// FromUnparsedImage returns a types.Image implementation for unparsed.
// If unparsed represents a manifest list, .Manifest() still returns the manifest list,
// but other methods transparently return data from an appropriate single image.
//
// The Image must not be used after the underlying ImageSource is Close()d.
//
// This is publicly visible as c/image/image.FromUnparsedImage.
func FromUnparsedImage(ctx context.Context, sys *types.SystemContext, unparsed *UnparsedImage) (*SourcedImage, error) {
// Note that the input parameter above is specifically *image.UnparsedImage, not types.UnparsedImage:
// we want to be able to use unparsed.src. We could make that an explicit interface, but, well,
// this is the only UnparsedImage implementation around, anyway.
// NOTE: It is essential for signature verification that all parsing done in this object happens on the same manifest which is returned by unparsed.Manifest().
manifestBlob, manifestMIMEType, err := unparsed.Manifest(ctx)
if err != nil {
return nil, err
}
parsedManifest, err := manifestInstanceFromBlob(ctx, sys, unparsed.src, manifestBlob, manifestMIMEType)
if err != nil {
return nil, err
}
return &SourcedImage{
UnparsedImage: unparsed,
ManifestBlob: manifestBlob,
ManifestMIMEType: manifestMIMEType,
genericManifest: parsedManifest,
}, nil
}
// Size returns the size of the image as stored, if it's known, or -1 if it isn't.
func (i *SourcedImage) Size() (int64, error) {
return -1, nil
}
// Manifest overrides the UnparsedImage.Manifest to always use the fields which we have already fetched.
func (i *SourcedImage) Manifest(ctx context.Context) ([]byte, string, error) {
return i.ManifestBlob, i.ManifestMIMEType, nil
}
func (i *SourcedImage) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) {
return i.UnparsedImage.src.LayerInfosForCopy(ctx, i.UnparsedImage.instanceDigest)
}

View File

@ -0,0 +1,99 @@
package image
import (
"context"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
// UnparsedImage implements types.UnparsedImage .
// An UnparsedImage is a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance.
//
// This is publicly visible as c/image/image.UnparsedImage.
type UnparsedImage struct {
src types.ImageSource
instanceDigest *digest.Digest
cachedManifest []byte // A private cache for Manifest(); nil if not yet known.
// A private cache for Manifest(), may be the empty string if guessing failed.
// Valid iff cachedManifest is not nil.
cachedManifestMIMEType string
cachedSignatures [][]byte // A private cache for Signatures(); nil if not yet known.
}
// UnparsedInstance returns a types.UnparsedImage implementation for (source, instanceDigest).
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list).
//
// The UnparsedImage must not be used after the underlying ImageSource is Close()d.
//
// This is publicly visible as c/image/image.UnparsedInstance.
func UnparsedInstance(src types.ImageSource, instanceDigest *digest.Digest) *UnparsedImage {
return &UnparsedImage{
src: src,
instanceDigest: instanceDigest,
}
}
// Reference returns the reference used to set up this source, _as specified by the user_
// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
func (i *UnparsedImage) Reference() types.ImageReference {
// Note that this does not depend on instanceDigest; e.g. all instances within a manifest list need to be signed with the manifest list identity.
return i.src.Reference()
}
// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need.
func (i *UnparsedImage) Manifest(ctx context.Context) ([]byte, string, error) {
if i.cachedManifest == nil {
m, mt, err := i.src.GetManifest(ctx, i.instanceDigest)
if err != nil {
return nil, "", err
}
// ImageSource.GetManifest does not do digest verification, but we do;
// this immediately protects also any user of types.Image.
if digest, haveDigest := i.expectedManifestDigest(); haveDigest {
matches, err := manifest.MatchesDigest(m, digest)
if err != nil {
return nil, "", errors.Wrap(err, "computing manifest digest")
}
if !matches {
return nil, "", errors.Errorf("Manifest does not match provided manifest digest %s", digest)
}
}
i.cachedManifest = m
i.cachedManifestMIMEType = mt
}
return i.cachedManifest, i.cachedManifestMIMEType, nil
}
// expectedManifestDigest returns a the expected value of the manifest digest, and an indicator whether it is known.
// The bool return value seems redundant with digest != ""; it is used explicitly
// to refuse (unexpected) situations when the digest exists but is "".
func (i *UnparsedImage) expectedManifestDigest() (digest.Digest, bool) {
if i.instanceDigest != nil {
return *i.instanceDigest, true
}
ref := i.Reference().DockerReference()
if ref != nil {
if canonical, ok := ref.(reference.Canonical); ok {
return canonical.Digest(), true
}
}
return "", false
}
// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need.
func (i *UnparsedImage) Signatures(ctx context.Context) ([][]byte, error) {
if i.cachedSignatures == nil {
sigs, err := i.src.GetSignatures(ctx, i.instanceDigest)
if err != nil {
return nil, err
}
i.cachedSignatures = sigs
}
return i.cachedSignatures, nil
}

View File

@ -0,0 +1,32 @@
package manifest
import "fmt"
// NonImageArtifactError (detected via errors.As) is used when asking for an image-specific operation
// on an object which is not a “container image” in the standard sense (e.g. an OCI artifact)
//
// This is publicly visible as c/image/manifest.NonImageArtifactError (but we dont provide a public constructor)
type NonImageArtifactError struct {
// Callers should not be blindly calling image-specific operations and only checking MIME types
// on failure; if they care about the artifact type, they should check before using it.
// If they blindly assume an image, they dont really need this value; just a type check
// is sufficient for basic "we can only pull images" UI.
//
// Also, there are fairly widespread “artifacts” which nevertheless use imgspecv1.MediaTypeImageConfig,
// e.g. https://github.com/sigstore/cosign/blob/main/specs/SIGNATURE_SPEC.md , which could cause the callers
// to complain about a non-image artifact with the correct MIME type; we should probably add some other kind of
// type discrimination, _and_ somehow make it available in the API, if we expect API callers to make decisions
// based on that kind of data.
//
// So, lets not expose this until a specific need is identified.
mimeType string
}
// NewNonImageArtifactError returns a NonImageArtifactError about an artifact with mimeType.
func NewNonImageArtifactError(mimeType string) error {
return NonImageArtifactError{mimeType: mimeType}
}
func (e NonImageArtifactError) Error() string {
return fmt.Sprintf("unsupported image-specific operation on artifact with type %q", e.mimeType)
}

View File

@ -118,6 +118,18 @@ type compressionMIMETypeSet map[string]string
const mtsUncompressed = "" // A key in compressionMIMETypeSet for the uncompressed variant
const mtsUnsupportedMIMEType = "" // A value in compressionMIMETypeSet that means “recognized but unsupported”
// findCompressionMIMETypeSet returns a pointer to a compressionMIMETypeSet in variantTable that contains a value of mimeType, or nil if not found
func findCompressionMIMETypeSet(variantTable []compressionMIMETypeSet, mimeType string) compressionMIMETypeSet {
for _, variants := range variantTable {
for _, mt := range variants {
if mt == mimeType {
return variants
}
}
}
return nil
}
// compressionVariantMIMEType returns a variant of mimeType for the specified algorithm (which may be nil
// to mean "no compression"), based on variantTable.
// The returned error will be a ManifestLayerCompressionIncompatibilityError if mimeType has variants
@ -130,29 +142,26 @@ func compressionVariantMIMEType(variantTable []compressionMIMETypeSet, mimeType
if mimeType == mtsUnsupportedMIMEType { // Prevent matching against the {algo:mtsUnsupportedMIMEType} entries
return "", fmt.Errorf("cannot update unknown MIME type")
}
for _, variants := range variantTable {
for _, mt := range variants {
if mt == mimeType { // Found the variant
name := mtsUncompressed
if algorithm != nil {
name = algorithm.InternalUnstableUndocumentedMIMEQuestionMark()
}
if res, ok := variants[name]; ok {
if res != mtsUnsupportedMIMEType {
return res, nil
}
if name != mtsUncompressed {
return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("%s compression is not supported for type %q", name, mt)}
}
return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mt)}
}
if name != mtsUncompressed {
return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("unknown compressed with algorithm %s variant for type %s", name, mt)}
}
// We can't very well say “the idea of no compression is unknown”
return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mt)}
}
variants := findCompressionMIMETypeSet(variantTable, mimeType)
if variants != nil {
name := mtsUncompressed
if algorithm != nil {
name = algorithm.InternalUnstableUndocumentedMIMEQuestionMark()
}
if res, ok := variants[name]; ok {
if res != mtsUnsupportedMIMEType {
return res, nil
}
if name != mtsUncompressed {
return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("%s compression is not supported for type %q", name, mimeType)}
}
return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mimeType)}
}
if name != mtsUncompressed {
return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("unknown compressed with algorithm %s variant for type %s", name, mimeType)}
}
// We can't very well say “the idea of no compression is unknown”
return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mimeType)}
}
if algorithm != nil {
return "", fmt.Errorf("unsupported MIME type for compression: %s", mimeType)
@ -209,3 +218,13 @@ type ManifestLayerCompressionIncompatibilityError struct {
func (m ManifestLayerCompressionIncompatibilityError) Error() string {
return m.text
}
// compressionVariantsRecognizeMIMEType returns true if variantTable contains data about compressing/decompressing layers with mimeType
// Note that the caller still needs to worry about a specific algorithm not being supported.
func compressionVariantsRecognizeMIMEType(variantTable []compressionMIMETypeSet, mimeType string) bool {
if mimeType == mtsUnsupportedMIMEType { // Prevent matching against the {algo:mtsUnsupportedMIMEType} entries
return false
}
variants := findCompressionMIMETypeSet(variantTable, mimeType)
return variants != nil // Alternatively, this could be len(variants) > 1, but really the caller should ask about a specific algorithm.
}

View File

@ -295,3 +295,11 @@ func (m *Schema2) ImageID([]digest.Digest) (string, error) {
}
return m.ConfigDescriptor.Digest.Hex(), nil
}
// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image
// (and the code can handle that).
// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted
// algorithms depends not on the current format, but possibly on the target of a conversion.
func (m *Schema2) CanChangeLayerCompression(mimeType string) bool {
return compressionVariantsRecognizeMIMEType(schema2CompressionMIMETypeSets, mimeType)
}

View File

@ -4,6 +4,7 @@ import (
"encoding/json"
"fmt"
internalManifest "github.com/containers/image/v5/internal/manifest"
"github.com/containers/image/v5/types"
"github.com/containers/libtrust"
digest "github.com/opencontainers/go-digest"
@ -34,6 +35,10 @@ const (
DockerV2Schema2ForeignLayerMediaTypeGzip = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip"
)
// NonImageArtifactError (detected via errors.As) is used when asking for an image-specific operation
// on an object which is not a “container image” in the standard sense (e.g. an OCI artifact)
type NonImageArtifactError = internalManifest.NonImageArtifactError
// SupportedSchema2MediaType checks if the specified string is a supported Docker v2s2 media type.
func SupportedSchema2MediaType(m string) error {
switch m {

View File

@ -5,6 +5,7 @@ import (
"fmt"
"strings"
internalManifest "github.com/containers/image/v5/internal/manifest"
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/types"
ociencspec "github.com/containers/ocicrypt/spec"
@ -115,6 +116,12 @@ var oci1CompressionMIMETypeSets = []compressionMIMETypeSet{
// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls+mediatype), in order (the root layer first, and then successive layered layers)
// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError if any of the layerInfos includes a combination of CompressionOperation and
// CompressionAlgorithm that isn't supported by OCI.
//
// Its generally the callers responsibility to determine whether a particular edit is acceptable, rather than relying on
// failures of this function, because the layer is typically created _before_ UpdateLayerInfos is called, because UpdateLayerInfos needs
// to know the final digest). See OCI1.CanChangeLayerCompression for some help in determining this; other aspects like compression
// algorithms that might not be supported by a format, or the limited set of MIME types accepted for encryption, are not currently
// handled — that logic should eventually also be provided as OCI1 methods, not hard-coded in callers.
func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
if len(m.Layers) != len(layerInfos) {
return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.Layers), len(layerInfos))
@ -151,6 +158,33 @@ func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
return nil
}
// getEncryptedMediaType will return the mediatype to its encrypted counterpart and return
// an error if the mediatype does not support encryption
func getEncryptedMediaType(mediatype string) (string, error) {
for _, s := range strings.Split(mediatype, "+")[1:] {
if s == "encrypted" {
return "", errors.Errorf("unsupportedmediatype: %v already encrypted", mediatype)
}
}
unsuffixedMediatype := strings.Split(mediatype, "+")[0]
switch unsuffixedMediatype {
case DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerNonDistributable:
return mediatype + "+encrypted", nil
}
return "", errors.Errorf("unsupported mediatype to encrypt: %v", mediatype)
}
// getEncryptedMediaType will return the mediatype to its encrypted counterpart and return
// an error if the mediatype does not support decryption
func getDecryptedMediaType(mediatype string) (string, error) {
if !strings.HasSuffix(mediatype, "+encrypted") {
return "", errors.Errorf("unsupported mediatype to decrypt %v:", mediatype)
}
return strings.TrimSuffix(mediatype, "+encrypted"), nil
}
// Serialize returns the manifest in a blob format.
// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made!
func (m *OCI1) Serialize() ([]byte, error) {
@ -159,6 +193,14 @@ func (m *OCI1) Serialize() ([]byte, error) {
// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration.
func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) {
if m.Config.MediaType != imgspecv1.MediaTypeImageConfig {
// We could return at least the layers, but thats already available in a better format via types.Image.LayerInfos.
// Most software calling this without human intervention is going to expect the values to be realistic and relevant,
// and is probably better served by failing; we can always re-visit that later if we fail now, but
// if we started returning some data for OCI artifacts now, we couldnt start failing in this function later.
return nil, internalManifest.NewNonImageArtifactError(m.Config.MediaType)
}
config, err := configGetter(m.ConfigInfo())
if err != nil {
return nil, err
@ -186,35 +228,39 @@ func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*type
// ImageID computes an ID which can uniquely identify this image by its contents.
func (m *OCI1) ImageID([]digest.Digest) (string, error) {
// The way m.Config.Digest “uniquely identifies” an image is
// by containing RootFS.DiffIDs, which identify the layers of the image.
// For non-image artifacts, the we cant expect the config to change
// any time the other layers (semantically) change, so this approach of
// distinguishing objects only by m.Config.Digest doesnt work in general.
//
// Any caller of this method presumably wants to disambiguate the same
// images with a different representation, but doesnt want to disambiguate
// representations (by using a manifest digest). So, submitting a non-image
// artifact to such a caller indicates an expectation mismatch.
// So, we just fail here instead of inventing some other ID value (e.g.
// by combining the config and blob layer digests). That still
// gives us the option to not fail, and return some value, in the future,
// without committing to that approach now.
// (The only known caller of ImageID is storage/storageImageDestination.computeID,
// which cant work with non-image artifacts.)
if m.Config.MediaType != imgspecv1.MediaTypeImageConfig {
return "", internalManifest.NewNonImageArtifactError(m.Config.MediaType)
}
if err := m.Config.Digest.Validate(); err != nil {
return "", err
}
return m.Config.Digest.Hex(), nil
}
// getEncryptedMediaType will return the mediatype to its encrypted counterpart and return
// an error if the mediatype does not support encryption
func getEncryptedMediaType(mediatype string) (string, error) {
for _, s := range strings.Split(mediatype, "+")[1:] {
if s == "encrypted" {
return "", errors.Errorf("unsupportedmediatype: %v already encrypted", mediatype)
}
// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image
// (and the code can handle that).
// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted
// algorithms depends not on the current format, but possibly on the target of a conversion.
func (m *OCI1) CanChangeLayerCompression(mimeType string) bool {
if m.Config.MediaType != imgspecv1.MediaTypeImageConfig {
return false
}
unsuffixedMediatype := strings.Split(mediatype, "+")[0]
switch unsuffixedMediatype {
case DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerNonDistributable:
return mediatype + "+encrypted", nil
}
return "", errors.Errorf("unsupported mediatype to encrypt: %v", mediatype)
}
// getEncryptedMediaType will return the mediatype to its encrypted counterpart and return
// an error if the mediatype does not support decryption
func getDecryptedMediaType(mediatype string) (string, error) {
if !strings.HasSuffix(mediatype, "+encrypted") {
return "", errors.Errorf("unsupported mediatype to decrypt %v:", mediatype)
}
return strings.TrimSuffix(mediatype, "+encrypted"), nil
return compressionVariantsRecognizeMIMEType(oci1CompressionMIMETypeSets, mimeType)
}

View File

@ -8,7 +8,7 @@ import (
"github.com/containers/image/v5/directory/explicitfilepath"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/image"
"github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/internal/tmpdir"
"github.com/containers/image/v5/oci/internal"
ocilayout "github.com/containers/image/v5/oci/layout"
@ -122,11 +122,7 @@ func (ref ociArchiveReference) PolicyConfigurationNamespaces() []string {
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
func (ref ociArchiveReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
src, err := newImageSource(ctx, sys, ref)
if err != nil {
return nil, err
}
return image.FromSource(ctx, sys, src)
return image.FromReference(ctx, sys, ref)
}
// NewImageSource returns a types.ImageSource for this reference.

View File

@ -10,7 +10,7 @@ import (
"github.com/containers/image/v5/directory/explicitfilepath"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/image"
"github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/oci/internal"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
@ -154,11 +154,7 @@ func (ref ociReference) PolicyConfigurationNamespaces() []string {
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
func (ref ociReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
src, err := newImageSource(sys, ref)
if err != nil {
return nil, err
}
return image.FromSource(ctx, sys, src)
return image.FromReference(ctx, sys, ref)
}
// getIndex returns a pointer to the index references by this ociReference. If an error occurs opening an index nil is returned together

View File

@ -8,7 +8,7 @@ import (
"github.com/containers/image/v5/docker/policyconfiguration"
"github.com/containers/image/v5/docker/reference"
genericImage "github.com/containers/image/v5/image"
genericImage "github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
"github.com/pkg/errors"
@ -132,11 +132,7 @@ func (ref openshiftReference) PolicyConfigurationNamespaces() []string {
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
func (ref openshiftReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
src, err := newImageSource(sys, ref)
if err != nil {
return nil, err
}
return genericImage.FromSource(ctx, sys, src)
return genericImage.FromReference(ctx, sys, ref)
}
// NewImageSource returns a types.ImageSource for this reference.

View File

@ -14,7 +14,7 @@ import (
"github.com/containers/image/v5/directory/explicitfilepath"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/image"
"github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
"github.com/pkg/errors"
@ -184,17 +184,7 @@ func (s *ostreeImageCloser) Size() (int64, error) {
// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
func (ref ostreeReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
var tmpDir string
if sys == nil || sys.OSTreeTmpDirPath == "" {
tmpDir = os.TempDir()
} else {
tmpDir = sys.OSTreeTmpDirPath
}
src, err := newImageSource(tmpDir, ref)
if err != nil {
return nil, err
}
return image.FromSource(ctx, sys, src)
return image.FromReference(ctx, sys, ref)
}
// NewImageSource returns a types.ImageSource for this reference.

View File

@ -9,7 +9,7 @@ import (
"sync"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/image"
"github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/compression"
"github.com/containers/image/v5/transports"
@ -158,11 +158,7 @@ func (b *BlobCache) ClearCache() error {
}
func (b *BlobCache) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
src, err := b.NewImageSource(ctx, sys)
if err != nil {
return nil, errors.Wrapf(err, "error creating new image %q", transports.ImageName(b.reference))
}
return image.FromSource(ctx, sys, src)
return image.FromReference(ctx, sys, b)
}
func (b *BlobCache) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) {

View File

@ -9,7 +9,7 @@ import (
"github.com/containers/image/v5/directory/explicitfilepath"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/image"
"github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
)
@ -139,11 +139,7 @@ func (ref sifReference) PolicyConfigurationNamespaces() []string {
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
func (ref sifReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
src, err := newImageSource(ctx, sys, ref)
if err != nil {
return nil, err
}
return image.FromSource(ctx, sys, src)
return image.FromReference(ctx, sys, ref)
}
// NewImageSource returns a types.ImageSource for this reference.

View File

@ -16,7 +16,7 @@ import (
"sync/atomic"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/image"
"github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/internal/putblobdigest"
"github.com/containers/image/v5/internal/tmpdir"
@ -486,7 +486,7 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader,
}
// putBlobToPendingFile implements ImageDestination.PutBlobWithOptions, storing stream into an on-disk file.
// The caller must arrange the blob to be eventually commited using s.commitLayer().
// The caller must arrange the blob to be eventually committed using s.commitLayer().
func (s *storageImageDestination) putBlobToPendingFile(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, options *private.PutBlobOptions) (types.BlobInfo, error) {
// Stores a layer or data blob in our temporary directory, checking that any information
// in the blobinfo matches the incoming data.
@ -641,7 +641,7 @@ func (s *storageImageDestination) TryReusingBlob(ctx context.Context, blobinfo t
}
// tryReusingBlobAsPending implements TryReusingBlobWithOptions, filling s.blobDiffIDs and other metadata.
// The caller must arrange the blob to be eventually commited using s.commitLayer().
// The caller must arrange the blob to be eventually committed using s.commitLayer().
func (s *storageImageDestination) tryReusingBlobAsPending(ctx context.Context, blobinfo types.BlobInfo, options *private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
// lock the entire method as it executes fairly quickly
s.lock.Lock()
@ -941,7 +941,7 @@ func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest
s.lock.Unlock()
if ok {
layer, err := al.PutAs(id, lastLayer, nil)
if err != nil {
if err != nil && errors.Cause(err) != storage.ErrDuplicateID {
return errors.Wrapf(err, "failed to put layer from digest and labels")
}
lastLayer = layer.ID

View File

@ -7,7 +7,7 @@ import (
"strings"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/image"
"github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/types"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
@ -67,16 +67,7 @@ func (r *tarballReference) PolicyConfigurationNamespaces() []string {
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
func (r *tarballReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
src, err := r.NewImageSource(ctx, sys)
if err != nil {
return nil, err
}
img, err := image.FromSource(ctx, sys, src)
if err != nil {
src.Close()
return nil, err
}
return img, nil
return image.FromReference(ctx, sys, r)
}
func (r *tarballReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {

View File

@ -21,7 +21,7 @@ addons:
go_import_path: github.com/containers/ocicrypt
install:
- curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.30.0
- curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.46.2
script:
- make

View File

@ -21,7 +21,7 @@ import (
"strings"
"github.com/pkg/errors"
"gopkg.in/yaml.v2"
"gopkg.in/yaml.v3"
)
// EncryptWithJwe returns a CryptoConfig to encrypt with jwe public keys

View File

@ -24,7 +24,7 @@ import (
"github.com/containers/ocicrypt/crypto/pkcs11"
"github.com/pkg/errors"
"gopkg.in/yaml.v2"
"gopkg.in/yaml.v3"
)
// OcicryptConfig represents the format of an imgcrypt.conf config file

View File

@ -17,7 +17,7 @@ import (
"fmt"
"github.com/pkg/errors"
pkcs11uri "github.com/stefanberger/go-pkcs11uri"
"gopkg.in/yaml.v2"
"gopkg.in/yaml.v3"
)
// Pkcs11KeyFile describes the format of the pkcs11 (private) key file.

View File

@ -17,5 +17,5 @@ require (
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1
google.golang.org/grpc v1.33.2
gopkg.in/square/go-jose.v2 v2.5.1
gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.0
)

View File

@ -111,7 +111,7 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w=
gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0 h1:hjy8E9ON/egN1tAYqKb61G10WtihqetD4sz2H+8nIeA=
gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

View File

@ -8,8 +8,7 @@
[![Coverage Status][9]][10]
[![Sourcegraph][11]][12]
[![FOSSA Status][13]][14]
[![GoCenter Kudos][15]][16]
[![Become my sponsor][15]][16]
[1]: https://travis-ci.org/imdario/mergo.png
[2]: https://travis-ci.org/imdario/mergo
@ -25,8 +24,8 @@
[12]: https://sourcegraph.com/github.com/imdario/mergo?badge
[13]: https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield
[14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield
[15]: https://search.gocenter.io/api/ui/badge/github.com%2Fimdario%2Fmergo
[16]: https://search.gocenter.io/github.com/imdario/mergo
[15]: https://img.shields.io/github/sponsors/imdario
[16]: https://github.com/sponsors/imdario
A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements.
@ -36,11 +35,11 @@ Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the
## Status
It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild).
It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, Microsoft, etc](https://github.com/imdario/mergo#mergo-in-the-wild).
### Important note
Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds suppot for go modules.
Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds support for go modules.
Keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2), Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). I added an optional/variadic argument so that it won't break the existing code.
@ -51,12 +50,12 @@ If you were using Mergo before April 6th, 2015, please check your project works
If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes:
<a href='https://ko-fi.com/B0B58839' target='_blank'><img height='36' style='border:0px;height:36px;' src='https://az743702.vo.msecnd.net/cdn/kofi1.png?v=0' border='0' alt='Buy Me a Coffee at ko-fi.com' /></a>
[![Beerpay](https://beerpay.io/imdario/mergo/badge.svg)](https://beerpay.io/imdario/mergo)
[![Beerpay](https://beerpay.io/imdario/mergo/make-wish.svg)](https://beerpay.io/imdario/mergo)
<a href="https://liberapay.com/dario/donate"><img alt="Donate using Liberapay" src="https://liberapay.com/assets/widgets/donate.svg"></a>
<a href='https://github.com/sponsors/imdario' target='_blank'><img alt="Become my sponsor" src="https://img.shields.io/github/sponsors/imdario?style=for-the-badge" /></a>
### Mergo in the wild
- [cli/cli](https://github.com/cli/cli)
- [moby/moby](https://github.com/moby/moby)
- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes)
- [vmware/dispatch](https://github.com/vmware/dispatch)
@ -98,6 +97,8 @@ If Mergo is useful to you, consider buying me a coffee, a beer, or making a mont
- [jnuthong/item_search](https://github.com/jnuthong/item_search)
- [bukalapak/snowboard](https://github.com/bukalapak/snowboard)
- [containerssh/containerssh](https://github.com/containerssh/containerssh)
- [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser)
- [tjpnz/structbot](https://github.com/tjpnz/structbot)
## Install
@ -168,7 +169,7 @@ func main() {
Note: if test are failing due missing package, please execute:
go get gopkg.in/yaml.v2
go get gopkg.in/yaml.v3
### Transformers
@ -218,7 +219,6 @@ func main() {
}
```
## Contact me
If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario)
@ -227,18 +227,6 @@ If I can help you, you have an idea or you are using Mergo in your projects, don
Written by [Dario Castañé](http://dario.im).
## Top Contributors
[![0](https://sourcerer.io/fame/imdario/imdario/mergo/images/0)](https://sourcerer.io/fame/imdario/imdario/mergo/links/0)
[![1](https://sourcerer.io/fame/imdario/imdario/mergo/images/1)](https://sourcerer.io/fame/imdario/imdario/mergo/links/1)
[![2](https://sourcerer.io/fame/imdario/imdario/mergo/images/2)](https://sourcerer.io/fame/imdario/imdario/mergo/links/2)
[![3](https://sourcerer.io/fame/imdario/imdario/mergo/images/3)](https://sourcerer.io/fame/imdario/imdario/mergo/links/3)
[![4](https://sourcerer.io/fame/imdario/imdario/mergo/images/4)](https://sourcerer.io/fame/imdario/imdario/mergo/links/4)
[![5](https://sourcerer.io/fame/imdario/imdario/mergo/images/5)](https://sourcerer.io/fame/imdario/imdario/mergo/links/5)
[![6](https://sourcerer.io/fame/imdario/imdario/mergo/images/6)](https://sourcerer.io/fame/imdario/imdario/mergo/links/6)
[![7](https://sourcerer.io/fame/imdario/imdario/mergo/images/7)](https://sourcerer.io/fame/imdario/imdario/mergo/links/7)
## License
[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE).

View File

@ -2,4 +2,4 @@ module github.com/imdario/mergo
go 1.13
require gopkg.in/yaml.v2 v2.3.0
require gopkg.in/yaml.v3 v3.0.0

View File

@ -1,4 +1,4 @@
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0 h1:hjy8E9ON/egN1tAYqKb61G10WtihqetD4sz2H+8nIeA=
gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@ -79,7 +79,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
visited[h] = &visit{addr, typ, seen}
}
if config.Transformers != nil && !isEmptyValue(dst) {
if config.Transformers != nil && !isReflectNil(dst) && dst.IsValid() {
if fn := config.Transformers.Transformer(dst.Type()); fn != nil {
err = fn(dst, src)
return

View File

@ -17,7 +17,7 @@ import (
var (
ErrNilArguments = errors.New("src and dst must not be nil")
ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type")
ErrNotSupported = errors.New("only structs and maps are supported")
ErrNotSupported = errors.New("only structs, maps, and slices are supported")
ErrExpectedMapAsDestination = errors.New("dst was expected to be a map")
ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct")
ErrNonPointerAgument = errors.New("dst must be a pointer")
@ -65,7 +65,7 @@ func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) {
return
}
vDst = reflect.ValueOf(dst).Elem()
if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map {
if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map && vDst.Kind() != reflect.Slice {
err = ErrNotSupported
return
}

10
vendor/modules.txt vendored
View File

@ -159,7 +159,7 @@ github.com/containers/common/version
# github.com/containers/conmon v2.0.20+incompatible
## explicit
github.com/containers/conmon/runner/config
# github.com/containers/image/v5 v5.21.2-0.20220520105616-e594853d6471
# github.com/containers/image/v5 v5.21.2-0.20220617075545-929f14a56f5c
## explicit
github.com/containers/image/v5/copy
github.com/containers/image/v5/directory
@ -172,9 +172,11 @@ github.com/containers/image/v5/docker/policyconfiguration
github.com/containers/image/v5/docker/reference
github.com/containers/image/v5/image
github.com/containers/image/v5/internal/blobinfocache
github.com/containers/image/v5/internal/image
github.com/containers/image/v5/internal/imagedestination
github.com/containers/image/v5/internal/imagesource
github.com/containers/image/v5/internal/iolimits
github.com/containers/image/v5/internal/manifest
github.com/containers/image/v5/internal/pkg/platform
github.com/containers/image/v5/internal/private
github.com/containers/image/v5/internal/putblobdigest
@ -212,7 +214,7 @@ github.com/containers/image/v5/types
github.com/containers/image/v5/version
# github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a
github.com/containers/libtrust
# github.com/containers/ocicrypt v1.1.4-0.20220428134531-566b808bdf6f
# github.com/containers/ocicrypt v1.1.5
## explicit
github.com/containers/ocicrypt
github.com/containers/ocicrypt/blockcipher
@ -441,7 +443,7 @@ github.com/hashicorp/errwrap
# github.com/hashicorp/go-multierror v1.1.1
## explicit
github.com/hashicorp/go-multierror
# github.com/imdario/mergo v0.3.12
# github.com/imdario/mergo v0.3.13
github.com/imdario/mergo
# github.com/inconshreveable/mousetrap v1.0.0
github.com/inconshreveable/mousetrap
@ -644,7 +646,7 @@ github.com/stefanberger/go-pkcs11uri
## explicit
github.com/stretchr/testify/assert
github.com/stretchr/testify/require
# github.com/sylabs/sif/v2 v2.7.0
# github.com/sylabs/sif/v2 v2.7.1
github.com/sylabs/sif/v2/pkg/sif
# github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
## explicit