mirror of
https://github.com/containers/podman.git
synced 2025-12-05 21:32:22 +08:00
Vendor in containers/(buildah, common)
Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
This commit is contained in:
11
vendor/github.com/containers/image/v5/copy/compression.go
generated
vendored
11
vendor/github.com/containers/image/v5/copy/compression.go
generated
vendored
@@ -23,9 +23,9 @@ var (
|
||||
// compressionBufferSize is the buffer size used to compress a blob
|
||||
compressionBufferSize = 1048576
|
||||
|
||||
// expectedCompressionFormats is used to check if a blob with a specified media type is compressed
|
||||
// expectedBaseCompressionFormats is used to check if a blob with a specified media type is compressed
|
||||
// using the algorithm that the media type says it should be compressed with
|
||||
expectedCompressionFormats = map[string]*compressiontypes.Algorithm{
|
||||
expectedBaseCompressionFormats = map[string]*compressiontypes.Algorithm{
|
||||
imgspecv1.MediaTypeImageLayerGzip: &compression.Gzip,
|
||||
imgspecv1.MediaTypeImageLayerZstd: &compression.Zstd,
|
||||
manifest.DockerV2Schema2LayerMediaType: &compression.Gzip,
|
||||
@@ -62,8 +62,8 @@ func blobPipelineDetectCompressionStep(stream *sourceStream, srcInfo types.BlobI
|
||||
res.srcCompressorName = internalblobinfocache.Uncompressed
|
||||
}
|
||||
|
||||
if expectedFormat, known := expectedCompressionFormats[stream.info.MediaType]; known && res.isCompressed && format.Name() != expectedFormat.Name() {
|
||||
logrus.Debugf("blob %s with type %s should be compressed with %s, but compressor appears to be %s", srcInfo.Digest.String(), srcInfo.MediaType, expectedFormat.Name(), format.Name())
|
||||
if expectedBaseFormat, known := expectedBaseCompressionFormats[stream.info.MediaType]; known && res.isCompressed && format.BaseVariantName() != expectedBaseFormat.Name() {
|
||||
logrus.Debugf("blob %s with type %s should be compressed with %s, but compressor appears to be %s", srcInfo.Digest.String(), srcInfo.MediaType, expectedBaseFormat.Name(), format.Name())
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
@@ -172,7 +172,8 @@ func (ic *imageCopier) bpcCompressUncompressed(stream *sourceStream, detected bp
|
||||
// bpcRecompressCompressed checks if we should be recompressing a compressed input to another format, and returns a *bpCompressionStepData if so.
|
||||
func (ic *imageCopier) bpcRecompressCompressed(stream *sourceStream, detected bpDetectCompressionStepData) (*bpCompressionStepData, error) {
|
||||
if ic.c.dest.DesiredLayerCompression() == types.Compress && detected.isCompressed &&
|
||||
ic.compressionFormat != nil && ic.compressionFormat.Name() != detected.format.Name() {
|
||||
ic.compressionFormat != nil &&
|
||||
(ic.compressionFormat.Name() != detected.format.Name() && ic.compressionFormat.Name() != detected.format.BaseVariantName()) {
|
||||
// When the blob is compressed, but the desired format is different, it first needs to be decompressed and finally
|
||||
// re-compressed using the desired format.
|
||||
logrus.Debugf("Blob will be converted")
|
||||
|
||||
4
vendor/github.com/containers/image/v5/copy/digesting_reader.go
generated
vendored
4
vendor/github.com/containers/image/v5/copy/digesting_reader.go
generated
vendored
@@ -23,11 +23,11 @@ type digestingReader struct {
|
||||
func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digestingReader, error) {
|
||||
var digester digest.Digester
|
||||
if err := expectedDigest.Validate(); err != nil {
|
||||
return nil, fmt.Errorf("Invalid digest specification %s", expectedDigest)
|
||||
return nil, fmt.Errorf("invalid digest specification %q: %w", expectedDigest, err)
|
||||
}
|
||||
digestAlgorithm := expectedDigest.Algorithm()
|
||||
if !digestAlgorithm.Available() {
|
||||
return nil, fmt.Errorf("Invalid digest specification %s: unsupported digest algorithm %s", expectedDigest, digestAlgorithm)
|
||||
return nil, fmt.Errorf("invalid digest specification %q: unsupported digest algorithm %q", expectedDigest, digestAlgorithm)
|
||||
}
|
||||
digester = digestAlgorithm.Digester()
|
||||
|
||||
|
||||
34
vendor/github.com/containers/image/v5/copy/single.go
generated
vendored
34
vendor/github.com/containers/image/v5/copy/single.go
generated
vendored
@@ -383,7 +383,11 @@ func (ic *imageCopier) compareImageDestinationManifestEqual(ctx context.Context,
|
||||
|
||||
compressionAlgos := set.New[string]()
|
||||
for _, srcInfo := range ic.src.LayerInfos() {
|
||||
if _, c := compressionEditsFromMIMEType(srcInfo); c != nil {
|
||||
_, c, err := compressionEditsFromBlobInfo(srcInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if c != nil {
|
||||
compressionAlgos.Add(c.Name())
|
||||
}
|
||||
}
|
||||
@@ -636,21 +640,28 @@ type diffIDResult struct {
|
||||
err error
|
||||
}
|
||||
|
||||
// compressionEditsFromMIMEType returns a (CompressionOperation, CompressionAlgorithm) value pair suitable
|
||||
// for types.BlobInfo, based on a MIME type of srcInfo.
|
||||
func compressionEditsFromMIMEType(srcInfo types.BlobInfo) (types.LayerCompression, *compressiontypes.Algorithm) {
|
||||
// compressionEditsFromBlobInfo returns a (CompressionOperation, CompressionAlgorithm) value pair suitable
|
||||
// for types.BlobInfo.
|
||||
func compressionEditsFromBlobInfo(srcInfo types.BlobInfo) (types.LayerCompression, *compressiontypes.Algorithm, error) {
|
||||
// This MIME type → compression mapping belongs in manifest-specific code in our manifest
|
||||
// package (but we should preferably replace/change UpdatedImage instead of productizing
|
||||
// this workaround).
|
||||
switch srcInfo.MediaType {
|
||||
case manifest.DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayerGzip:
|
||||
return types.PreserveOriginal, &compression.Gzip
|
||||
return types.PreserveOriginal, &compression.Gzip, nil
|
||||
case imgspecv1.MediaTypeImageLayerZstd:
|
||||
return types.PreserveOriginal, &compression.Zstd
|
||||
tocDigest, err := chunkedToc.GetTOCDigest(srcInfo.Annotations)
|
||||
if err != nil {
|
||||
return types.PreserveOriginal, nil, err
|
||||
}
|
||||
if tocDigest != nil {
|
||||
return types.PreserveOriginal, &compression.ZstdChunked, nil
|
||||
}
|
||||
return types.PreserveOriginal, &compression.Zstd, nil
|
||||
case manifest.DockerV2SchemaLayerMediaTypeUncompressed, imgspecv1.MediaTypeImageLayer:
|
||||
return types.Decompress, nil
|
||||
return types.Decompress, nil, nil
|
||||
default:
|
||||
return types.PreserveOriginal, nil
|
||||
return types.PreserveOriginal, nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -666,7 +677,12 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
|
||||
// (Sadly UpdatedImage() is documented to not update MediaTypes from
|
||||
// ManifestUpdateOptions.LayerInfos[].MediaType, so we are doing it indirectly.)
|
||||
if srcInfo.CompressionOperation == types.PreserveOriginal && srcInfo.CompressionAlgorithm == nil {
|
||||
srcInfo.CompressionOperation, srcInfo.CompressionAlgorithm = compressionEditsFromMIMEType(srcInfo)
|
||||
op, algo, err := compressionEditsFromBlobInfo(srcInfo)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, "", err
|
||||
}
|
||||
srcInfo.CompressionOperation = op
|
||||
srcInfo.CompressionAlgorithm = algo
|
||||
}
|
||||
|
||||
ic.c.printCopyInfo("blob", srcInfo)
|
||||
|
||||
35
vendor/github.com/containers/image/v5/docker/docker_client.go
generated
vendored
35
vendor/github.com/containers/image/v5/docker/docker_client.go
generated
vendored
@@ -978,13 +978,10 @@ func (c *dockerClient) fetchManifest(ctx context.Context, ref dockerReference, t
|
||||
// This function can return nil reader when no url is supported by this function. In this case, the caller
|
||||
// should fallback to fetch the non-external blob (i.e. pull from the registry).
|
||||
func (c *dockerClient) getExternalBlob(ctx context.Context, urls []string) (io.ReadCloser, int64, error) {
|
||||
var (
|
||||
resp *http.Response
|
||||
err error
|
||||
)
|
||||
if len(urls) == 0 {
|
||||
return nil, 0, errors.New("internal error: getExternalBlob called with no URLs")
|
||||
}
|
||||
var remoteErrors []error
|
||||
for _, u := range urls {
|
||||
blobURL, err := url.Parse(u)
|
||||
if err != nil || (blobURL.Scheme != "http" && blobURL.Scheme != "https") {
|
||||
@@ -993,24 +990,28 @@ func (c *dockerClient) getExternalBlob(ctx context.Context, urls []string) (io.R
|
||||
// NOTE: we must not authenticate on additional URLs as those
|
||||
// can be abused to leak credentials or tokens. Please
|
||||
// refer to CVE-2020-15157 for more information.
|
||||
resp, err = c.makeRequestToResolvedURL(ctx, http.MethodGet, blobURL, nil, nil, -1, noAuth, nil)
|
||||
if err == nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
err = fmt.Errorf("error fetching external blob from %q: %d (%s)", u, resp.StatusCode, http.StatusText(resp.StatusCode))
|
||||
logrus.Debug(err)
|
||||
resp.Body.Close()
|
||||
continue
|
||||
}
|
||||
break
|
||||
resp, err := c.makeRequestToResolvedURL(ctx, http.MethodGet, blobURL, nil, nil, -1, noAuth, nil)
|
||||
if err != nil {
|
||||
remoteErrors = append(remoteErrors, err)
|
||||
continue
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
err := fmt.Errorf("error fetching external blob from %q: %d (%s)", u, resp.StatusCode, http.StatusText(resp.StatusCode))
|
||||
remoteErrors = append(remoteErrors, err)
|
||||
logrus.Debug(err)
|
||||
resp.Body.Close()
|
||||
continue
|
||||
}
|
||||
return resp.Body, getBlobSize(resp), nil
|
||||
}
|
||||
if resp == nil && err == nil {
|
||||
if remoteErrors == nil {
|
||||
return nil, 0, nil // fallback to non-external blob
|
||||
}
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
err := fmt.Errorf("failed fetching external blob from all urls: %w", remoteErrors[0])
|
||||
for _, e := range remoteErrors[1:] {
|
||||
err = fmt.Errorf("%s, %w", err, e)
|
||||
}
|
||||
return resp.Body, getBlobSize(resp), nil
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
func getBlobSize(resp *http.Response) int64 {
|
||||
|
||||
3
vendor/github.com/containers/image/v5/internal/imagedestination/impl/helpers.go
generated
vendored
3
vendor/github.com/containers/image/v5/internal/imagedestination/impl/helpers.go
generated
vendored
@@ -17,7 +17,8 @@ func CandidateMatchesTryReusingBlobOptions(options private.TryReusingBlobOptions
|
||||
// The caller must re-compress to build those annotations.
|
||||
return false
|
||||
}
|
||||
if candidateCompression == nil || (options.RequiredCompression.Name() != candidateCompression.Name()) {
|
||||
if candidateCompression == nil ||
|
||||
(options.RequiredCompression.Name() != candidateCompression.Name() && options.RequiredCompression.Name() != candidateCompression.BaseVariantName()) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
7
vendor/github.com/containers/image/v5/internal/manifest/manifest.go
generated
vendored
7
vendor/github.com/containers/image/v5/internal/manifest/manifest.go
generated
vendored
@@ -169,7 +169,8 @@ func NormalizedMIMEType(input string) string {
|
||||
|
||||
// CompressionAlgorithmIsUniversallySupported returns true if MIMETypeSupportsCompressionAlgorithm(mimeType, algo) returns true for all mimeType values.
|
||||
func CompressionAlgorithmIsUniversallySupported(algo compressiontypes.Algorithm) bool {
|
||||
switch algo.Name() { // Should this use InternalUnstableUndocumentedMIMEQuestionMark() ?
|
||||
// Compare the discussion about BaseVariantName in MIMETypeSupportsCompressionAlgorithm().
|
||||
switch algo.Name() {
|
||||
case compressiontypes.GzipAlgorithmName:
|
||||
return true
|
||||
default:
|
||||
@@ -182,7 +183,9 @@ func MIMETypeSupportsCompressionAlgorithm(mimeType string, algo compressiontypes
|
||||
if CompressionAlgorithmIsUniversallySupported(algo) {
|
||||
return true
|
||||
}
|
||||
switch algo.Name() { // Should this use InternalUnstableUndocumentedMIMEQuestionMark() ?
|
||||
// This does not use BaseVariantName: Plausibly a manifest format might support zstd but not have annotation fields.
|
||||
// The logic might have to be more complex (and more ad-hoc) if more manifest formats, with more capabilities, emerge.
|
||||
switch algo.Name() {
|
||||
case compressiontypes.ZstdAlgorithmName, compressiontypes.ZstdChunkedAlgorithmName:
|
||||
return mimeType == imgspecv1.MediaTypeImageManifest
|
||||
default: // Includes Bzip2AlgorithmName and XzAlgorithmName, which are defined names but are not supported anywhere
|
||||
|
||||
4
vendor/github.com/containers/image/v5/internal/manifest/oci_index.go
generated
vendored
4
vendor/github.com/containers/image/v5/internal/manifest/oci_index.go
generated
vendored
@@ -103,8 +103,8 @@ func addCompressionAnnotations(compressionAlgorithms []compression.Algorithm, an
|
||||
*annotationsMap = map[string]string{}
|
||||
}
|
||||
for _, algo := range compressionAlgorithms {
|
||||
switch algo.Name() {
|
||||
case compression.ZstdAlgorithmName, compression.ZstdChunkedAlgorithmName: // Should this use InternalUnstableUndocumentedMIMEQuestionMark() ?
|
||||
switch algo.BaseVariantName() {
|
||||
case compression.ZstdAlgorithmName:
|
||||
(*annotationsMap)[OCI1InstanceAnnotationCompressionZSTD] = OCI1InstanceAnnotationCompressionZSTDValue
|
||||
default:
|
||||
continue
|
||||
|
||||
2
vendor/github.com/containers/image/v5/manifest/common.go
generated
vendored
2
vendor/github.com/containers/image/v5/manifest/common.go
generated
vendored
@@ -55,7 +55,7 @@ func compressionVariantMIMEType(variantTable []compressionMIMETypeSet, mimeType
|
||||
if variants != nil {
|
||||
name := mtsUncompressed
|
||||
if algorithm != nil {
|
||||
name = algorithm.InternalUnstableUndocumentedMIMEQuestionMark()
|
||||
name = algorithm.BaseVariantName()
|
||||
}
|
||||
if res, ok := variants[name]; ok {
|
||||
if res != mtsUnsupportedMIMEType {
|
||||
|
||||
12
vendor/github.com/containers/image/v5/pkg/compression/compression.go
generated
vendored
12
vendor/github.com/containers/image/v5/pkg/compression/compression.go
generated
vendored
@@ -19,19 +19,19 @@ type Algorithm = types.Algorithm
|
||||
|
||||
var (
|
||||
// Gzip compression.
|
||||
Gzip = internal.NewAlgorithm(types.GzipAlgorithmName, types.GzipAlgorithmName,
|
||||
Gzip = internal.NewAlgorithm(types.GzipAlgorithmName, "",
|
||||
[]byte{0x1F, 0x8B, 0x08}, GzipDecompressor, gzipCompressor)
|
||||
// Bzip2 compression.
|
||||
Bzip2 = internal.NewAlgorithm(types.Bzip2AlgorithmName, types.Bzip2AlgorithmName,
|
||||
Bzip2 = internal.NewAlgorithm(types.Bzip2AlgorithmName, "",
|
||||
[]byte{0x42, 0x5A, 0x68}, Bzip2Decompressor, bzip2Compressor)
|
||||
// Xz compression.
|
||||
Xz = internal.NewAlgorithm(types.XzAlgorithmName, types.XzAlgorithmName,
|
||||
Xz = internal.NewAlgorithm(types.XzAlgorithmName, "",
|
||||
[]byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, XzDecompressor, xzCompressor)
|
||||
// Zstd compression.
|
||||
Zstd = internal.NewAlgorithm(types.ZstdAlgorithmName, types.ZstdAlgorithmName,
|
||||
Zstd = internal.NewAlgorithm(types.ZstdAlgorithmName, "",
|
||||
[]byte{0x28, 0xb5, 0x2f, 0xfd}, ZstdDecompressor, zstdCompressor)
|
||||
// ZstdChunked is a Zstd compression with chunk metadta which allows random access to individual files.
|
||||
ZstdChunked = internal.NewAlgorithm(types.ZstdChunkedAlgorithmName, types.ZstdAlgorithmName, /* Note: InternalUnstableUndocumentedMIMEQuestionMark is not ZstdChunkedAlgorithmName */
|
||||
// ZstdChunked is a Zstd compression with chunk metadata which allows random access to individual files.
|
||||
ZstdChunked = internal.NewAlgorithm(types.ZstdChunkedAlgorithmName, types.ZstdAlgorithmName,
|
||||
nil, ZstdDecompressor, compressor.ZstdCompressor)
|
||||
|
||||
compressionAlgorithms = map[string]Algorithm{
|
||||
|
||||
36
vendor/github.com/containers/image/v5/pkg/compression/internal/types.go
generated
vendored
36
vendor/github.com/containers/image/v5/pkg/compression/internal/types.go
generated
vendored
@@ -12,23 +12,28 @@ type DecompressorFunc func(io.Reader) (io.ReadCloser, error)
|
||||
|
||||
// Algorithm is a compression algorithm that can be used for CompressStream.
|
||||
type Algorithm struct {
|
||||
name string
|
||||
mime string
|
||||
prefix []byte // Initial bytes of a stream compressed using this algorithm, or empty to disable detection.
|
||||
decompressor DecompressorFunc
|
||||
compressor CompressorFunc
|
||||
name string
|
||||
baseVariantName string
|
||||
prefix []byte // Initial bytes of a stream compressed using this algorithm, or empty to disable detection.
|
||||
decompressor DecompressorFunc
|
||||
compressor CompressorFunc
|
||||
}
|
||||
|
||||
// NewAlgorithm creates an Algorithm instance.
|
||||
// nontrivialBaseVariantName is typically "".
|
||||
// This function exists so that Algorithm instances can only be created by code that
|
||||
// is allowed to import this internal subpackage.
|
||||
func NewAlgorithm(name, mime string, prefix []byte, decompressor DecompressorFunc, compressor CompressorFunc) Algorithm {
|
||||
func NewAlgorithm(name, nontrivialBaseVariantName string, prefix []byte, decompressor DecompressorFunc, compressor CompressorFunc) Algorithm {
|
||||
baseVariantName := name
|
||||
if nontrivialBaseVariantName != "" {
|
||||
baseVariantName = nontrivialBaseVariantName
|
||||
}
|
||||
return Algorithm{
|
||||
name: name,
|
||||
mime: mime,
|
||||
prefix: prefix,
|
||||
decompressor: decompressor,
|
||||
compressor: compressor,
|
||||
name: name,
|
||||
baseVariantName: baseVariantName,
|
||||
prefix: prefix,
|
||||
decompressor: decompressor,
|
||||
compressor: compressor,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -37,10 +42,11 @@ func (c Algorithm) Name() string {
|
||||
return c.name
|
||||
}
|
||||
|
||||
// InternalUnstableUndocumentedMIMEQuestionMark ???
|
||||
// DO NOT USE THIS anywhere outside of c/image until it is properly documented.
|
||||
func (c Algorithm) InternalUnstableUndocumentedMIMEQuestionMark() string {
|
||||
return c.mime
|
||||
// BaseVariantName returns the name of the “base variant” of the compression algorithm.
|
||||
// It is either equal to Name() of the same algorithm, or equal to Name() of some other Algorithm (the “base variant”).
|
||||
// This supports a single level of “is-a” relationship between compression algorithms, e.g. where "zstd:chunked" data is valid "zstd" data.
|
||||
func (c Algorithm) BaseVariantName() string {
|
||||
return c.baseVariantName
|
||||
}
|
||||
|
||||
// AlgorithmCompressor returns the compressor field of algo.
|
||||
|
||||
8
vendor/github.com/containers/image/v5/signature/sigstore/fulcio/fulcio.go
generated
vendored
8
vendor/github.com/containers/image/v5/signature/sigstore/fulcio/fulcio.go
generated
vendored
@@ -142,9 +142,13 @@ func WithFulcioAndInteractiveOIDC(fulcioURL *url.URL, oidcIssuerURL *url.URL, oi
|
||||
}
|
||||
|
||||
logrus.Debugf("Starting interactive OIDC authentication for issuer %s", oidcIssuerURL.Redacted())
|
||||
// This is intended to match oauthflow.DefaultIDTokenGetter, overriding only input/output
|
||||
// This is intended to match oauthflow.DefaultIDTokenGetter (incl. the update in init()), overriding only input/output
|
||||
htmlPage, err := oauth.GetInteractiveSuccessHTML(false, 10)
|
||||
if err != nil {
|
||||
return fmt.Errorf("formatting HTML content: %w", err)
|
||||
}
|
||||
tokenGetter := &oauthflow.InteractiveIDTokenGetter{
|
||||
HTMLPage: oauth.InteractiveSuccessHTML,
|
||||
HTMLPage: htmlPage,
|
||||
Input: interactiveInput,
|
||||
Output: interactiveOutput,
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user