mirror of
https://github.com/containers/podman.git
synced 2025-05-21 09:05:56 +08:00
vendor: bump c/image to 373c52a9466f
[NO NEW TESTS NEEDED] Signed-off-by: Aditya R <arajan@redhat.com>
This commit is contained in:
3
vendor/github.com/containerd/stargz-snapshotter/estargz/build.go
generated
vendored
3
vendor/github.com/containerd/stargz-snapshotter/estargz/build.go
generated
vendored
@ -436,9 +436,8 @@ func importTar(in io.ReaderAt) (*tarFile, error) {
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else {
|
||||
return nil, fmt.Errorf("failed to parse tar file, %w", err)
|
||||
}
|
||||
return nil, fmt.Errorf("failed to parse tar file, %w", err)
|
||||
}
|
||||
switch cleanEntryName(h.Name) {
|
||||
case PrefetchLandmark, NoPrefetchLandmark:
|
||||
|
2
vendor/github.com/containers/image/v5/copy/encryption.go
generated
vendored
2
vendor/github.com/containers/image/v5/copy/encryption.go
generated
vendored
@ -70,7 +70,7 @@ func (d *bpDecryptionStepData) updateCryptoOperation(operation *types.LayerCrypt
|
||||
}
|
||||
}
|
||||
|
||||
// bpdData contains data that the copy pipeline needs about the encryption step.
|
||||
// bpEncryptionStepData contains data that the copy pipeline needs about the encryption step.
|
||||
type bpEncryptionStepData struct {
|
||||
encrypting bool // We are actually encrypting the stream
|
||||
finalizer ocicrypt.EncryptLayerFinalizer
|
||||
|
2
vendor/github.com/containers/image/v5/copy/multiple.go
generated
vendored
2
vendor/github.com/containers/image/v5/copy/multiple.go
generated
vendored
@ -340,7 +340,7 @@ func (c *copier) copyMultipleImages(ctx context.Context) (copiedManifest []byte,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sigs = append(sigs, newSigs...)
|
||||
sigs = append(slices.Clone(sigs), newSigs...)
|
||||
|
||||
c.Printf("Storing list signatures\n")
|
||||
if err := c.dest.PutSignaturesWithFormat(ctx, sigs, nil); err != nil {
|
||||
|
11
vendor/github.com/containers/image/v5/copy/single.go
generated
vendored
11
vendor/github.com/containers/image/v5/copy/single.go
generated
vendored
@ -277,7 +277,7 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar
|
||||
if err != nil {
|
||||
return copySingleImageResult{}, err
|
||||
}
|
||||
sigs = append(sigs, newSigs...)
|
||||
sigs = append(slices.Clone(sigs), newSigs...)
|
||||
|
||||
if len(sigs) > 0 {
|
||||
c.Printf("Storing signatures\n")
|
||||
@ -380,8 +380,9 @@ func (ic *imageCopier) compareImageDestinationManifestEqual(ctx context.Context,
|
||||
|
||||
compressionAlgos := set.New[string]()
|
||||
for _, srcInfo := range ic.src.LayerInfos() {
|
||||
compression := compressionAlgorithmFromMIMEType(srcInfo)
|
||||
compressionAlgos.Add(compression.Name())
|
||||
if c := compressionAlgorithmFromMIMEType(srcInfo); c != nil {
|
||||
compressionAlgos.Add(c.Name())
|
||||
}
|
||||
}
|
||||
|
||||
algos, err := algorithmsByNames(compressionAlgos.Values())
|
||||
@ -743,7 +744,9 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
|
||||
uploadedBlob, err := ic.c.dest.PutBlobPartial(ctx, &proxy, srcInfo, ic.c.blobInfoCache)
|
||||
if err == nil {
|
||||
if srcInfo.Size != -1 {
|
||||
bar.SetRefill(srcInfo.Size - bar.Current())
|
||||
refill := srcInfo.Size - bar.Current()
|
||||
bar.SetCurrent(srcInfo.Size)
|
||||
bar.SetRefill(refill)
|
||||
}
|
||||
bar.mark100PercentComplete()
|
||||
hideProgressBar = false
|
||||
|
33
vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go
generated
vendored
33
vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go
generated
vendored
@ -2,6 +2,7 @@ package daemon
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
@ -85,12 +86,40 @@ func imageLoadGoroutine(ctx context.Context, c *client.Client, reader *io.PipeRe
|
||||
}
|
||||
}()
|
||||
|
||||
err = imageLoad(ctx, c, reader)
|
||||
}
|
||||
|
||||
// imageLoad accepts tar stream on reader and sends it to c
|
||||
func imageLoad(ctx context.Context, c *client.Client, reader *io.PipeReader) error {
|
||||
resp, err := c.ImageLoad(ctx, reader, true)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("saving image to docker engine: %w", err)
|
||||
return
|
||||
return fmt.Errorf("starting a load operation in docker engine: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
// jsonError and jsonMessage are small subsets of docker/docker/pkg/jsonmessage.JSONError and JSONMessage,
|
||||
// copied here to minimize dependencies.
|
||||
type jsonError struct {
|
||||
Message string `json:"message,omitempty"`
|
||||
}
|
||||
type jsonMessage struct {
|
||||
Error *jsonError `json:"errorDetail,omitempty"`
|
||||
}
|
||||
|
||||
dec := json.NewDecoder(resp.Body)
|
||||
for {
|
||||
var msg jsonMessage
|
||||
if err := dec.Decode(&msg); err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return fmt.Errorf("parsing docker load progress: %w", err)
|
||||
}
|
||||
if msg.Error != nil {
|
||||
return fmt.Errorf("docker engine reported: %s", msg.Error.Message)
|
||||
}
|
||||
}
|
||||
return nil // No error reported = success
|
||||
}
|
||||
|
||||
// DesiredLayerCompression indicates if layers must be compressed, decompressed or preserved
|
||||
|
3
vendor/github.com/containers/image/v5/docker/distribution_error.go
generated
vendored
3
vendor/github.com/containers/image/v5/docker/distribution_error.go
generated
vendored
@ -24,6 +24,7 @@ import (
|
||||
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
dockerChallenge "github.com/docker/distribution/registry/client/auth/challenge"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
// errNoErrorsInBody is returned when an HTTP response body parses to an empty
|
||||
@ -105,7 +106,7 @@ func makeErrorList(err error) []error {
|
||||
}
|
||||
|
||||
func mergeErrors(err1, err2 error) error {
|
||||
return errcode.Errors(append(makeErrorList(err1), makeErrorList(err2)...))
|
||||
return errcode.Errors(append(slices.Clone(makeErrorList(err1)), makeErrorList(err2)...))
|
||||
}
|
||||
|
||||
// handleErrorResponse returns error parsed from HTTP response for an
|
||||
|
5
vendor/github.com/containers/image/v5/docker/docker_client.go
generated
vendored
5
vendor/github.com/containers/image/v5/docker/docker_client.go
generated
vendored
@ -363,6 +363,11 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
|
||||
hostname := registry
|
||||
if registry == dockerHostname {
|
||||
hostname = dockerV1Hostname
|
||||
// A search term of library/foo does not find the library/foo image on the docker.io servers,
|
||||
// which is surprising - and that Docker is modifying the search term client-side this same way,
|
||||
// and it seems convenient to do the same thing.
|
||||
// Read more here: https://github.com/containers/image/pull/2133#issue-1928524334
|
||||
image = strings.TrimPrefix(image, "library/")
|
||||
}
|
||||
|
||||
client, err := newDockerClient(sys, hostname, registry)
|
||||
|
65
vendor/github.com/containers/image/v5/docker/docker_image_dest.go
generated
vendored
65
vendor/github.com/containers/image/v5/docker/docker_image_dest.go
generated
vendored
@ -137,7 +137,7 @@ func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream
|
||||
// If requested, precompute the blob digest to prevent uploading layers that already exist on the registry.
|
||||
// This functionality is particularly useful when BlobInfoCache has not been populated with compressed digests,
|
||||
// the source blob is uncompressed, and the destination blob is being compressed "on the fly".
|
||||
if inputInfo.Digest == "" && d.c.sys.DockerRegistryPushPrecomputeDigests {
|
||||
if inputInfo.Digest == "" && d.c.sys != nil && d.c.sys.DockerRegistryPushPrecomputeDigests {
|
||||
logrus.Debugf("Precomputing digest layer for %s", reference.Path(d.ref.ref))
|
||||
streamCopy, cleanup, err := streamdigest.ComputeBlobInfo(d.c.sys, stream, &inputInfo)
|
||||
if err != nil {
|
||||
@ -341,39 +341,58 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
|
||||
// Then try reusing blobs from other locations.
|
||||
candidates := options.Cache.CandidateLocations2(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, options.CanSubstitute)
|
||||
for _, candidate := range candidates {
|
||||
candidateRepo, err := parseBICLocationReference(candidate.Location)
|
||||
if err != nil {
|
||||
logrus.Debugf("Error parsing BlobInfoCache location reference: %s", err)
|
||||
continue
|
||||
}
|
||||
var err error
|
||||
compressionOperation, compressionAlgorithm, err := blobinfocache.OperationAndAlgorithmForCompressor(candidate.CompressorName)
|
||||
if err != nil {
|
||||
logrus.Debugf("OperationAndAlgorithmForCompressor Failed: %v", err)
|
||||
continue
|
||||
}
|
||||
var candidateRepo reference.Named
|
||||
if !candidate.UnknownLocation {
|
||||
candidateRepo, err = parseBICLocationReference(candidate.Location)
|
||||
if err != nil {
|
||||
logrus.Debugf("Error parsing BlobInfoCache location reference: %s", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
if !impl.BlobMatchesRequiredCompression(options, compressionAlgorithm) {
|
||||
requiredCompression := "nil"
|
||||
if compressionAlgorithm != nil {
|
||||
requiredCompression = compressionAlgorithm.Name()
|
||||
}
|
||||
logrus.Debugf("Ignoring candidate blob %s as reuse candidate due to compression mismatch ( %s vs %s ) in %s", candidate.Digest.String(), options.RequiredCompression.Name(), requiredCompression, candidateRepo.Name())
|
||||
if !candidate.UnknownLocation {
|
||||
logrus.Debugf("Ignoring candidate blob %s as reuse candidate due to compression mismatch ( %s vs %s ) in %s", candidate.Digest.String(), options.RequiredCompression.Name(), requiredCompression, candidateRepo.Name())
|
||||
} else {
|
||||
logrus.Debugf("Ignoring candidate blob %s as reuse candidate due to compression mismatch ( %s vs %s ) with no location match, checking current repo", candidate.Digest.String(), options.RequiredCompression.Name(), requiredCompression)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if candidate.CompressorName != blobinfocache.Uncompressed {
|
||||
logrus.Debugf("Trying to reuse cached location %s compressed with %s in %s", candidate.Digest.String(), candidate.CompressorName, candidateRepo.Name())
|
||||
if !candidate.UnknownLocation {
|
||||
if candidate.CompressorName != blobinfocache.Uncompressed {
|
||||
logrus.Debugf("Trying to reuse blob with cached digest %s compressed with %s in destination repo %s", candidate.Digest.String(), candidate.CompressorName, candidateRepo.Name())
|
||||
} else {
|
||||
logrus.Debugf("Trying to reuse blob with cached digest %s in destination repo %s", candidate.Digest.String(), candidateRepo.Name())
|
||||
}
|
||||
// Sanity checks:
|
||||
if reference.Domain(candidateRepo) != reference.Domain(d.ref.ref) {
|
||||
// OCI distribution spec 1.1 allows mounting blobs without specifying the source repo
|
||||
// (the "from" parameter); in that case we might try to use these candidates as well.
|
||||
//
|
||||
// OTOH that would mean we can’t do the “blobExists” check, and if there is no match
|
||||
// we could get an upload request that we would have to cancel.
|
||||
logrus.Debugf("... Internal error: domain %s does not match destination %s", reference.Domain(candidateRepo), reference.Domain(d.ref.ref))
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
logrus.Debugf("Trying to reuse cached location %s with no compression in %s", candidate.Digest.String(), candidateRepo.Name())
|
||||
}
|
||||
|
||||
// Sanity checks:
|
||||
if reference.Domain(candidateRepo) != reference.Domain(d.ref.ref) {
|
||||
// OCI distribution spec 1.1 allows mounting blobs without specifying the source repo
|
||||
// (the "from" parameter); in that case we might try to use these candidates as well.
|
||||
//
|
||||
// OTOH that would mean we can’t do the “blobExists” check, and if there is no match
|
||||
// we could get an upload request that we would have to cancel.
|
||||
logrus.Debugf("... Internal error: domain %s does not match destination %s", reference.Domain(candidateRepo), reference.Domain(d.ref.ref))
|
||||
continue
|
||||
if candidate.CompressorName != blobinfocache.Uncompressed {
|
||||
logrus.Debugf("Trying to reuse blob with cached digest %s compressed with %s with no location match, checking current repo", candidate.Digest.String(), candidate.CompressorName)
|
||||
} else {
|
||||
logrus.Debugf("Trying to reuse blob with cached digest %s in destination repo with no location match, checking current repo", candidate.Digest.String())
|
||||
}
|
||||
// This digest is a known variant of this blob but we don’t
|
||||
// have a recorded location in this registry, let’s try looking
|
||||
// for it in the current repo.
|
||||
candidateRepo = reference.TrimNamed(d.ref.ref)
|
||||
}
|
||||
if candidateRepo.Name() == d.ref.ref.Name() && candidate.Digest == info.Digest {
|
||||
logrus.Debug("... Already tried the primary destination")
|
||||
@ -688,6 +707,10 @@ func (d *dockerImageDestination) putSignaturesToSigstoreAttachments(ctx context.
|
||||
}
|
||||
}
|
||||
|
||||
// To make sure we can safely append to the slices of ociManifest, without adding a remote dependency on the code that creates it.
|
||||
ociManifest.Layers = slices.Clone(ociManifest.Layers)
|
||||
// We don’t need to ^^^ for ociConfig.RootFS.DiffIDs because we have created it empty ourselves, and json.Unmarshal is documented to append() to
|
||||
// the slice in the original object (or in a newly allocated object).
|
||||
for _, sig := range signatures {
|
||||
mimeType := sig.UntrustedMIMEType()
|
||||
payloadBlob := sig.UntrustedPayload()
|
||||
|
22
vendor/github.com/containers/image/v5/image/unparsed.go
generated
vendored
22
vendor/github.com/containers/image/v5/image/unparsed.go
generated
vendored
@ -2,6 +2,8 @@ package image
|
||||
|
||||
import (
|
||||
"github.com/containers/image/v5/internal/image"
|
||||
"github.com/containers/image/v5/internal/private"
|
||||
"github.com/containers/image/v5/internal/unparsedimage"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
@ -17,3 +19,23 @@ type UnparsedImage = image.UnparsedImage
|
||||
func UnparsedInstance(src types.ImageSource, instanceDigest *digest.Digest) *UnparsedImage {
|
||||
return image.UnparsedInstance(src, instanceDigest)
|
||||
}
|
||||
|
||||
// unparsedWithRef wraps a private.UnparsedImage, claiming another replacementRef
|
||||
type unparsedWithRef struct {
|
||||
private.UnparsedImage
|
||||
ref types.ImageReference
|
||||
}
|
||||
|
||||
func (uwr *unparsedWithRef) Reference() types.ImageReference {
|
||||
return uwr.ref
|
||||
}
|
||||
|
||||
// UnparsedInstanceWithReference returns a types.UnparsedImage for wrappedInstance which claims to be a replacementRef.
|
||||
// This is useful for combining image data with other reference values, e.g. to check signatures on a locally-pulled image
|
||||
// based on a remote-registry policy.
|
||||
func UnparsedInstanceWithReference(wrappedInstance types.UnparsedImage, replacementRef types.ImageReference) types.UnparsedImage {
|
||||
return &unparsedWithRef{
|
||||
UnparsedImage: unparsedimage.FromPublic(wrappedInstance),
|
||||
ref: replacementRef,
|
||||
}
|
||||
}
|
||||
|
9
vendor/github.com/containers/image/v5/internal/blobinfocache/types.go
generated
vendored
9
vendor/github.com/containers/image/v5/internal/blobinfocache/types.go
generated
vendored
@ -32,7 +32,7 @@ type BlobInfoCache2 interface {
|
||||
// otherwise the cache could be poisoned and cause us to make incorrect edits to type
|
||||
// information in a manifest.
|
||||
RecordDigestCompressorName(anyDigest digest.Digest, compressorName string)
|
||||
// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations
|
||||
// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known)
|
||||
// that could possibly be reused within the specified (transport scope) (if they still
|
||||
// exist, which is not guaranteed).
|
||||
//
|
||||
@ -46,7 +46,8 @@ type BlobInfoCache2 interface {
|
||||
|
||||
// BICReplacementCandidate2 is an item returned by BlobInfoCache2.CandidateLocations2.
|
||||
type BICReplacementCandidate2 struct {
|
||||
Digest digest.Digest
|
||||
CompressorName string // either the Name() of a known pkg/compression.Algorithm, or Uncompressed or UnknownCompression
|
||||
Location types.BICLocationReference
|
||||
Digest digest.Digest
|
||||
CompressorName string // either the Name() of a known pkg/compression.Algorithm, or Uncompressed or UnknownCompression
|
||||
UnknownLocation bool // is true when `Location` for this blob is not set
|
||||
Location types.BICLocationReference // not set if UnknownLocation is set to `true`
|
||||
}
|
||||
|
57
vendor/github.com/containers/image/v5/internal/image/oci.go
generated
vendored
57
vendor/github.com/containers/image/v5/internal/image/oci.go
generated
vendored
@ -196,14 +196,12 @@ func (m *manifestOCI1) convertToManifestSchema2Generic(ctx context.Context, opti
|
||||
return m.convertToManifestSchema2(ctx, options)
|
||||
}
|
||||
|
||||
// prepareLayerDecryptEditsIfNecessary checks if options requires layer decryptions.
|
||||
// layerEditsOfOCIOnlyFeatures checks if options requires some layer edits to be done before converting to a Docker format.
|
||||
// If not, it returns (nil, nil).
|
||||
// If decryption is required, it returns a set of edits to provide to OCI1.UpdateLayerInfos,
|
||||
// and edits *options to not try decryption again.
|
||||
func (m *manifestOCI1) prepareLayerDecryptEditsIfNecessary(options *types.ManifestUpdateOptions) ([]types.BlobInfo, error) {
|
||||
if options == nil || !slices.ContainsFunc(options.LayerInfos, func(info types.BlobInfo) bool {
|
||||
return info.CryptoOperation == types.Decrypt
|
||||
}) {
|
||||
func (m *manifestOCI1) layerEditsOfOCIOnlyFeatures(options *types.ManifestUpdateOptions) ([]types.BlobInfo, error) {
|
||||
if options == nil || options.LayerInfos == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@ -212,19 +210,35 @@ func (m *manifestOCI1) prepareLayerDecryptEditsIfNecessary(options *types.Manife
|
||||
return nil, fmt.Errorf("preparing to decrypt before conversion: %d layers vs. %d layer edits", len(originalInfos), len(options.LayerInfos))
|
||||
}
|
||||
|
||||
res := slices.Clone(originalInfos) // Start with a full copy so that we don't forget to copy anything: use the current data in full unless we intentionaly deviate.
|
||||
updatedEdits := slices.Clone(options.LayerInfos)
|
||||
for i, info := range options.LayerInfos {
|
||||
if info.CryptoOperation == types.Decrypt {
|
||||
res[i].CryptoOperation = types.Decrypt
|
||||
updatedEdits[i].CryptoOperation = types.PreserveOriginalCrypto // Don't try to decrypt in a schema[12] manifest later, that would fail.
|
||||
ociOnlyEdits := slices.Clone(originalInfos) // Start with a full copy so that we don't forget to copy anything: use the current data in full unless we intentionally deviate.
|
||||
laterEdits := slices.Clone(options.LayerInfos)
|
||||
needsOCIOnlyEdits := false
|
||||
for i, edit := range options.LayerInfos {
|
||||
// Unless determined otherwise, don't do any compression-related MIME type conversions. m.LayerInfos() should not set these edit instructions, but be explicit.
|
||||
ociOnlyEdits[i].CompressionOperation = types.PreserveOriginal
|
||||
ociOnlyEdits[i].CompressionAlgorithm = nil
|
||||
|
||||
if edit.CryptoOperation == types.Decrypt {
|
||||
needsOCIOnlyEdits = true // Encrypted types must be removed before conversion because they can’t be represented in Docker schemas
|
||||
ociOnlyEdits[i].CryptoOperation = types.Decrypt
|
||||
laterEdits[i].CryptoOperation = types.PreserveOriginalCrypto // Don't try to decrypt in a schema[12] manifest later, that would fail.
|
||||
}
|
||||
|
||||
if originalInfos[i].MediaType == imgspecv1.MediaTypeImageLayerZstd ||
|
||||
originalInfos[i].MediaType == imgspecv1.MediaTypeImageLayerNonDistributableZstd { //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
|
||||
needsOCIOnlyEdits = true // Zstd MIME types must be removed before conversion because they can’t be represented in Docker schemas.
|
||||
ociOnlyEdits[i].CompressionOperation = edit.CompressionOperation
|
||||
ociOnlyEdits[i].CompressionAlgorithm = edit.CompressionAlgorithm
|
||||
laterEdits[i].CompressionOperation = types.PreserveOriginal
|
||||
laterEdits[i].CompressionAlgorithm = nil
|
||||
}
|
||||
// Don't do any compression-related MIME type conversions. m.LayerInfos() should not set these edit instructions, but be explicit.
|
||||
res[i].CompressionOperation = types.PreserveOriginal
|
||||
res[i].CompressionAlgorithm = nil
|
||||
}
|
||||
options.LayerInfos = updatedEdits
|
||||
return res, nil
|
||||
if !needsOCIOnlyEdits {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
options.LayerInfos = laterEdits
|
||||
return ociOnlyEdits, nil
|
||||
}
|
||||
|
||||
// convertToManifestSchema2 returns a genericManifest implementation converted to manifest.DockerV2Schema2MediaType.
|
||||
@ -238,15 +252,15 @@ func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, options *type
|
||||
|
||||
// Mostly we first make a format conversion, and _afterwards_ do layer edits. But first we need to do the layer edits
|
||||
// which remove OCI-specific features, because trying to convert those layers would fail.
|
||||
// So, do the layer updates for decryption.
|
||||
// So, do the layer updates for decryption, and for conversions from Zstd.
|
||||
ociManifest := m.m
|
||||
layerDecryptEdits, err := m.prepareLayerDecryptEditsIfNecessary(options)
|
||||
ociOnlyEdits, err := m.layerEditsOfOCIOnlyFeatures(options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if layerDecryptEdits != nil {
|
||||
if ociOnlyEdits != nil {
|
||||
ociManifest = manifest.OCI1Clone(ociManifest)
|
||||
if err := ociManifest.UpdateLayerInfos(layerDecryptEdits); err != nil {
|
||||
if err := ociManifest.UpdateLayerInfos(ociOnlyEdits); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
@ -275,9 +289,8 @@ func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, options *type
|
||||
layers[idx].MediaType = manifest.DockerV2Schema2LayerMediaType
|
||||
case imgspecv1.MediaTypeImageLayerZstd:
|
||||
return nil, fmt.Errorf("Error during manifest conversion: %q: zstd compression is not supported for docker images", layers[idx].MediaType)
|
||||
// FIXME: s/Zsdt/Zstd/ after ocicrypt with https://github.com/containers/ocicrypt/pull/91 is released
|
||||
case ociencspec.MediaTypeLayerEnc, ociencspec.MediaTypeLayerGzipEnc, ociencspec.MediaTypeLayerZstdEnc,
|
||||
ociencspec.MediaTypeLayerNonDistributableEnc, ociencspec.MediaTypeLayerNonDistributableGzipEnc, ociencspec.MediaTypeLayerNonDistributableZsdtEnc:
|
||||
ociencspec.MediaTypeLayerNonDistributableEnc, ociencspec.MediaTypeLayerNonDistributableGzipEnc, ociencspec.MediaTypeLayerNonDistributableZstdEnc:
|
||||
return nil, fmt.Errorf("during manifest conversion: encrypted layers (%q) are not supported in docker images", layers[idx].MediaType)
|
||||
default:
|
||||
return nil, fmt.Errorf("Unknown media type during manifest conversion: %q", layers[idx].MediaType)
|
||||
|
4
vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go
generated
vendored
4
vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go
generated
vendored
@ -133,7 +133,9 @@ func (index *Schema2ListPublic) editInstances(editInstances []ListEdit) error {
|
||||
}
|
||||
}
|
||||
if len(addedEntries) != 0 {
|
||||
index.Manifests = append(index.Manifests, addedEntries...)
|
||||
// slices.Clone() here to ensure a private backing array;
|
||||
// an external caller could have manually created Schema2ListPublic with a slice with extra capacity.
|
||||
index.Manifests = append(slices.Clone(index.Manifests), addedEntries...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
6
vendor/github.com/containers/image/v5/internal/manifest/oci_index.go
generated
vendored
6
vendor/github.com/containers/image/v5/internal/manifest/oci_index.go
generated
vendored
@ -167,7 +167,9 @@ func (index *OCI1IndexPublic) editInstances(editInstances []ListEdit) error {
|
||||
}
|
||||
}
|
||||
if len(addedEntries) != 0 {
|
||||
index.Manifests = append(index.Manifests, addedEntries...)
|
||||
// slices.Clone() here to ensure the slice uses a private backing array;
|
||||
// an external caller could have manually created OCI1IndexPublic with a slice with extra capacity.
|
||||
index.Manifests = append(slices.Clone(index.Manifests), addedEntries...)
|
||||
}
|
||||
if len(addedEntries) != 0 || updatedAnnotations {
|
||||
slices.SortStableFunc(index.Manifests, func(a, b imgspecv1.Descriptor) int {
|
||||
@ -220,7 +222,7 @@ func (ic instanceCandidate) isPreferredOver(other *instanceCandidate, preferGzip
|
||||
case ic.manifestPosition != other.manifestPosition:
|
||||
return ic.manifestPosition < other.manifestPosition
|
||||
}
|
||||
panic("internal error: invalid comparision between two candidates") // This should not be reachable because in all calls we make, the two candidates differ at least in manifestPosition.
|
||||
panic("internal error: invalid comparison between two candidates") // This should not be reachable because in all calls we make, the two candidates differ at least in manifestPosition.
|
||||
}
|
||||
|
||||
// chooseInstance is a private equivalent to ChooseInstanceByCompression,
|
||||
|
12
vendor/github.com/containers/image/v5/oci/archive/oci_src.go
generated
vendored
12
vendor/github.com/containers/image/v5/oci/archive/oci_src.go
generated
vendored
@ -28,6 +28,18 @@ func (e ImageNotFoundError) Error() string {
|
||||
return fmt.Sprintf("no descriptor found for reference %q", e.ref.image)
|
||||
}
|
||||
|
||||
// ArchiveFileNotFoundError occurs when the archive file does not exist.
|
||||
type ArchiveFileNotFoundError struct {
|
||||
// ref is the image reference
|
||||
ref ociArchiveReference
|
||||
// path is the file path that was not present
|
||||
path string
|
||||
}
|
||||
|
||||
func (e ArchiveFileNotFoundError) Error() string {
|
||||
return fmt.Sprintf("archive file not found: %q", e.path)
|
||||
}
|
||||
|
||||
type ociArchiveImageSource struct {
|
||||
impl.Compat
|
||||
|
||||
|
19
vendor/github.com/containers/image/v5/oci/archive/oci_transport.go
generated
vendored
19
vendor/github.com/containers/image/v5/oci/archive/oci_transport.go
generated
vendored
@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
@ -171,18 +172,24 @@ func createOCIRef(sys *types.SystemContext, image string) (tempDirOCIRef, error)
|
||||
|
||||
// creates the temporary directory and copies the tarred content to it
|
||||
func createUntarTempDir(sys *types.SystemContext, ref ociArchiveReference) (tempDirOCIRef, error) {
|
||||
src := ref.resolvedFile
|
||||
arch, err := os.Open(src)
|
||||
if err != nil {
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return tempDirOCIRef{}, ArchiveFileNotFoundError{ref: ref, path: src}
|
||||
} else {
|
||||
return tempDirOCIRef{}, err
|
||||
}
|
||||
}
|
||||
defer arch.Close()
|
||||
|
||||
tempDirRef, err := createOCIRef(sys, ref.image)
|
||||
if err != nil {
|
||||
return tempDirOCIRef{}, fmt.Errorf("creating oci reference: %w", err)
|
||||
}
|
||||
src := ref.resolvedFile
|
||||
dst := tempDirRef.tempDirectory
|
||||
|
||||
// TODO: This can take quite some time, and should ideally be cancellable using a context.Context.
|
||||
arch, err := os.Open(src)
|
||||
if err != nil {
|
||||
return tempDirOCIRef{}, err
|
||||
}
|
||||
defer arch.Close()
|
||||
if err := archive.NewDefaultArchiver().Untar(arch, dst, &archive.TarOptions{NoLchown: true}); err != nil {
|
||||
if err := tempDirRef.deleteTempDir(); err != nil {
|
||||
return tempDirOCIRef{}, fmt.Errorf("deleting temp directory %q: %w", tempDirRef.tempDirectory, err)
|
||||
|
240
vendor/github.com/containers/image/v5/oci/layout/oci_delete.go
generated
vendored
Normal file
240
vendor/github.com/containers/image/v5/oci/layout/oci_delete.go
generated
vendored
Normal file
@ -0,0 +1,240 @@
|
||||
package layout
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
|
||||
"github.com/containers/image/v5/internal/set"
|
||||
"github.com/containers/image/v5/types"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
// DeleteImage deletes the named image from the directory, if supported.
|
||||
func (ref ociReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
|
||||
sharedBlobsDir := ""
|
||||
if sys != nil && sys.OCISharedBlobDirPath != "" {
|
||||
sharedBlobsDir = sys.OCISharedBlobDirPath
|
||||
}
|
||||
|
||||
descriptor, descriptorIndex, err := ref.getManifestDescriptor()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var blobsUsedByImage map[digest.Digest]int
|
||||
|
||||
switch descriptor.MediaType {
|
||||
case imgspecv1.MediaTypeImageManifest:
|
||||
blobsUsedByImage, err = ref.getBlobsUsedInSingleImage(&descriptor, sharedBlobsDir)
|
||||
case imgspecv1.MediaTypeImageIndex:
|
||||
blobsUsedByImage, err = ref.getBlobsUsedInImageIndex(&descriptor, sharedBlobsDir)
|
||||
default:
|
||||
return fmt.Errorf("unsupported mediaType in index: %q", descriptor.MediaType)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
blobsToDelete, err := ref.getBlobsToDelete(blobsUsedByImage, sharedBlobsDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = ref.deleteBlobs(blobsToDelete)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ref.deleteReferenceFromIndex(descriptorIndex)
|
||||
}
|
||||
|
||||
func (ref ociReference) getBlobsUsedInSingleImage(descriptor *imgspecv1.Descriptor, sharedBlobsDir string) (map[digest.Digest]int, error) {
|
||||
manifest, err := ref.getManifest(descriptor, sharedBlobsDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blobsUsedInManifest := ref.getBlobsUsedInManifest(manifest)
|
||||
blobsUsedInManifest[descriptor.Digest]++ // Add the current manifest to the list of blobs used by this reference
|
||||
|
||||
return blobsUsedInManifest, nil
|
||||
}
|
||||
|
||||
func (ref ociReference) getBlobsUsedInImageIndex(descriptor *imgspecv1.Descriptor, sharedBlobsDir string) (map[digest.Digest]int, error) {
|
||||
blobPath, err := ref.blobPath(descriptor.Digest, sharedBlobsDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
index, err := parseIndex(blobPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
blobsUsedInImageRefIndex := make(map[digest.Digest]int)
|
||||
err = ref.addBlobsUsedInIndex(blobsUsedInImageRefIndex, index, sharedBlobsDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blobsUsedInImageRefIndex[descriptor.Digest]++ // Add the nested index in the list of blobs used by this reference
|
||||
|
||||
return blobsUsedInImageRefIndex, nil
|
||||
}
|
||||
|
||||
// Updates a map of digest with the usage count, so a blob that is referenced three times will have 3 in the map
|
||||
func (ref ociReference) addBlobsUsedInIndex(destination map[digest.Digest]int, index *imgspecv1.Index, sharedBlobsDir string) error {
|
||||
for _, descriptor := range index.Manifests {
|
||||
destination[descriptor.Digest]++
|
||||
switch descriptor.MediaType {
|
||||
case imgspecv1.MediaTypeImageManifest:
|
||||
manifest, err := ref.getManifest(&descriptor, sharedBlobsDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for digest, count := range ref.getBlobsUsedInManifest(manifest) {
|
||||
destination[digest] += count
|
||||
}
|
||||
case imgspecv1.MediaTypeImageIndex:
|
||||
blobPath, err := ref.blobPath(descriptor.Digest, sharedBlobsDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
index, err := parseIndex(blobPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = ref.addBlobsUsedInIndex(destination, index, sharedBlobsDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("unsupported mediaType in index: %q", descriptor.MediaType)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ref ociReference) getBlobsUsedInManifest(manifest *imgspecv1.Manifest) map[digest.Digest]int {
|
||||
blobsUsedInManifest := make(map[digest.Digest]int, 0)
|
||||
|
||||
blobsUsedInManifest[manifest.Config.Digest]++
|
||||
for _, layer := range manifest.Layers {
|
||||
blobsUsedInManifest[layer.Digest]++
|
||||
}
|
||||
|
||||
return blobsUsedInManifest
|
||||
}
|
||||
|
||||
// This takes in a map of the digest and their usage count in the manifest to be deleted
|
||||
// It will compare it to the digest usage in the root index, and return a set of the blobs that can be safely deleted
|
||||
func (ref ociReference) getBlobsToDelete(blobsUsedByDescriptorToDelete map[digest.Digest]int, sharedBlobsDir string) (*set.Set[digest.Digest], error) {
|
||||
rootIndex, err := ref.getIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blobsUsedInRootIndex := make(map[digest.Digest]int)
|
||||
err = ref.addBlobsUsedInIndex(blobsUsedInRootIndex, rootIndex, sharedBlobsDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
blobsToDelete := set.New[digest.Digest]()
|
||||
|
||||
for digest, count := range blobsUsedInRootIndex {
|
||||
if count-blobsUsedByDescriptorToDelete[digest] == 0 {
|
||||
blobsToDelete.Add(digest)
|
||||
}
|
||||
}
|
||||
|
||||
return blobsToDelete, nil
|
||||
}
|
||||
|
||||
// This transport never generates layouts where blobs for an image are both in the local blobs directory
|
||||
// and the shared one; it’s either one or the other, depending on how OCISharedBlobDirPath is set.
|
||||
//
|
||||
// But we can’t correctly compute use counts for OCISharedBlobDirPath (because we don't know what
|
||||
// the other layouts sharing that directory are, and we might not even have permission to read them),
|
||||
// so we can’t really delete any blobs in that case.
|
||||
// Checking the _local_ blobs directory, and deleting blobs from there, doesn't really hurt,
|
||||
// in case the layout was created using some other tool or without OCISharedBlobDirPath set, so let's silently
|
||||
// check for local blobs (but we should make no noise if the blobs are actually in the shared directory).
|
||||
//
|
||||
// So, NOTE: the blobPath() call below hard-codes "" even in calls where OCISharedBlobDirPath is set
|
||||
func (ref ociReference) deleteBlobs(blobsToDelete *set.Set[digest.Digest]) error {
|
||||
for _, digest := range blobsToDelete.Values() {
|
||||
blobPath, err := ref.blobPath(digest, "") //Only delete in the local directory, see comment above
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = deleteBlob(blobPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func deleteBlob(blobPath string) error {
|
||||
logrus.Debug(fmt.Sprintf("Deleting blob at %q", blobPath))
|
||||
|
||||
err := os.Remove(blobPath)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (ref ociReference) deleteReferenceFromIndex(referenceIndex int) error {
|
||||
index, err := ref.getIndex()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
index.Manifests = slices.Delete(index.Manifests, referenceIndex, referenceIndex+1)
|
||||
|
||||
return saveJSON(ref.indexPath(), index)
|
||||
}
|
||||
|
||||
func saveJSON(path string, content any) error {
|
||||
// If the file already exists, get its mode to preserve it
|
||||
var mode fs.FileMode
|
||||
existingfi, err := os.Stat(path)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return err
|
||||
} else { // File does not exist, use default mode
|
||||
mode = 0644
|
||||
}
|
||||
} else {
|
||||
mode = existingfi.Mode()
|
||||
}
|
||||
|
||||
file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, mode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
return json.NewEncoder(file).Encode(content)
|
||||
}
|
||||
|
||||
func (ref ociReference) getManifest(descriptor *imgspecv1.Descriptor, sharedBlobsDir string) (*imgspecv1.Manifest, error) {
|
||||
manifestPath, err := ref.blobPath(descriptor.Digest, sharedBlobsDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
manifest, err := parseJSON[imgspecv1.Manifest](manifestPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return manifest, nil
|
||||
}
|
15
vendor/github.com/containers/image/v5/oci/layout/oci_dest.go
generated
vendored
15
vendor/github.com/containers/image/v5/oci/layout/oci_dest.go
generated
vendored
@ -19,6 +19,7 @@ import (
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
imgspec "github.com/opencontainers/image-spec/specs-go"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
type ociImageDestination struct {
|
||||
@ -84,7 +85,7 @@ func newImageDestination(sys *types.SystemContext, ref ociReference) (private.Im
|
||||
// Per the OCI image specification, layouts MUST have a "blobs" subdirectory,
|
||||
// but it MAY be empty (e.g. if we never end up calling PutBlob)
|
||||
// https://github.com/opencontainers/image-spec/blame/7c889fafd04a893f5c5f50b7ab9963d5d64e5242/image-layout.md#L19
|
||||
if err := ensureDirectoryExists(filepath.Join(d.ref.dir, "blobs")); err != nil {
|
||||
if err := ensureDirectoryExists(filepath.Join(d.ref.dir, imgspecv1.ImageBlobsDir)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d, nil
|
||||
@ -271,8 +272,8 @@ func (d *ociImageDestination) addManifest(desc *imgspecv1.Descriptor) {
|
||||
return
|
||||
}
|
||||
}
|
||||
// It's a new entry to be added to the index.
|
||||
d.index.Manifests = append(d.index.Manifests, *desc)
|
||||
// It's a new entry to be added to the index. Use slices.Clone() to avoid a remote dependency on how d.index was created.
|
||||
d.index.Manifests = append(slices.Clone(d.index.Manifests), *desc)
|
||||
}
|
||||
|
||||
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
|
||||
@ -283,7 +284,13 @@ func (d *ociImageDestination) addManifest(desc *imgspecv1.Descriptor) {
|
||||
// - Uploaded data MAY be visible to others before Commit() is called
|
||||
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
|
||||
func (d *ociImageDestination) Commit(context.Context, types.UnparsedImage) error {
|
||||
if err := os.WriteFile(d.ref.ociLayoutPath(), []byte(`{"imageLayoutVersion": "1.0.0"}`), 0644); err != nil {
|
||||
layoutBytes, err := json.Marshal(imgspecv1.ImageLayout{
|
||||
Version: imgspecv1.ImageLayoutVersion,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.WriteFile(d.ref.ociLayoutPath(), layoutBytes, 0644); err != nil {
|
||||
return err
|
||||
}
|
||||
indexJSON, err := json.Marshal(d.index)
|
||||
|
2
vendor/github.com/containers/image/v5/oci/layout/oci_src.go
generated
vendored
2
vendor/github.com/containers/image/v5/oci/layout/oci_src.go
generated
vendored
@ -60,7 +60,7 @@ func newImageSource(sys *types.SystemContext, ref ociReference) (private.ImageSo
|
||||
|
||||
client := &http.Client{}
|
||||
client.Transport = tr
|
||||
descriptor, err := ref.getManifestDescriptor()
|
||||
descriptor, _, err := ref.getManifestDescriptor()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
62
vendor/github.com/containers/image/v5/oci/layout/oci_transport.go
generated
vendored
62
vendor/github.com/containers/image/v5/oci/layout/oci_transport.go
generated
vendored
@ -160,48 +160,56 @@ func (ref ociReference) NewImage(ctx context.Context, sys *types.SystemContext)
|
||||
// getIndex returns a pointer to the index references by this ociReference. If an error occurs opening an index nil is returned together
|
||||
// with an error.
|
||||
func (ref ociReference) getIndex() (*imgspecv1.Index, error) {
|
||||
indexJSON, err := os.Open(ref.indexPath())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer indexJSON.Close()
|
||||
|
||||
index := &imgspecv1.Index{}
|
||||
if err := json.NewDecoder(indexJSON).Decode(index); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return index, nil
|
||||
return parseIndex(ref.indexPath())
|
||||
}
|
||||
|
||||
func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, error) {
|
||||
func parseIndex(path string) (*imgspecv1.Index, error) {
|
||||
return parseJSON[imgspecv1.Index](path)
|
||||
}
|
||||
|
||||
func parseJSON[T any](path string) (*T, error) {
|
||||
content, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer content.Close()
|
||||
|
||||
obj := new(T)
|
||||
if err := json.NewDecoder(content).Decode(obj); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, int, error) {
|
||||
index, err := ref.getIndex()
|
||||
if err != nil {
|
||||
return imgspecv1.Descriptor{}, err
|
||||
return imgspecv1.Descriptor{}, -1, err
|
||||
}
|
||||
|
||||
if ref.image == "" {
|
||||
// return manifest if only one image is in the oci directory
|
||||
if len(index.Manifests) != 1 {
|
||||
// ask user to choose image when more than one image in the oci directory
|
||||
return imgspecv1.Descriptor{}, ErrMoreThanOneImage
|
||||
return imgspecv1.Descriptor{}, -1, ErrMoreThanOneImage
|
||||
}
|
||||
return index.Manifests[0], nil
|
||||
return index.Manifests[0], 0, nil
|
||||
} else {
|
||||
// if image specified, look through all manifests for a match
|
||||
var unsupportedMIMETypes []string
|
||||
for _, md := range index.Manifests {
|
||||
for i, md := range index.Manifests {
|
||||
if refName, ok := md.Annotations[imgspecv1.AnnotationRefName]; ok && refName == ref.image {
|
||||
if md.MediaType == imgspecv1.MediaTypeImageManifest || md.MediaType == imgspecv1.MediaTypeImageIndex {
|
||||
return md, nil
|
||||
return md, i, nil
|
||||
}
|
||||
unsupportedMIMETypes = append(unsupportedMIMETypes, md.MediaType)
|
||||
}
|
||||
}
|
||||
if len(unsupportedMIMETypes) != 0 {
|
||||
return imgspecv1.Descriptor{}, fmt.Errorf("reference %q matches unsupported manifest MIME types %q", ref.image, unsupportedMIMETypes)
|
||||
return imgspecv1.Descriptor{}, -1, fmt.Errorf("reference %q matches unsupported manifest MIME types %q", ref.image, unsupportedMIMETypes)
|
||||
}
|
||||
}
|
||||
return imgspecv1.Descriptor{}, ImageNotFoundError{ref}
|
||||
return imgspecv1.Descriptor{}, -1, ImageNotFoundError{ref}
|
||||
}
|
||||
|
||||
// LoadManifestDescriptor loads the manifest descriptor to be used to retrieve the image name
|
||||
@ -211,7 +219,8 @@ func LoadManifestDescriptor(imgRef types.ImageReference) (imgspecv1.Descriptor,
|
||||
if !ok {
|
||||
return imgspecv1.Descriptor{}, errors.New("error typecasting, need type ociRef")
|
||||
}
|
||||
return ociRef.getManifestDescriptor()
|
||||
md, _, err := ociRef.getManifestDescriptor()
|
||||
return md, err
|
||||
}
|
||||
|
||||
// NewImageSource returns a types.ImageSource for this reference.
|
||||
@ -226,19 +235,14 @@ func (ref ociReference) NewImageDestination(ctx context.Context, sys *types.Syst
|
||||
return newImageDestination(sys, ref)
|
||||
}
|
||||
|
||||
// DeleteImage deletes the named image from the registry, if supported.
|
||||
func (ref ociReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
|
||||
return errors.New("Deleting images not implemented for oci: images")
|
||||
}
|
||||
|
||||
// ociLayoutPath returns a path for the oci-layout within a directory using OCI conventions.
|
||||
func (ref ociReference) ociLayoutPath() string {
|
||||
return filepath.Join(ref.dir, "oci-layout")
|
||||
return filepath.Join(ref.dir, imgspecv1.ImageLayoutFile)
|
||||
}
|
||||
|
||||
// indexPath returns a path for the index.json within a directory using OCI conventions.
|
||||
func (ref ociReference) indexPath() string {
|
||||
return filepath.Join(ref.dir, "index.json")
|
||||
return filepath.Join(ref.dir, imgspecv1.ImageIndexFile)
|
||||
}
|
||||
|
||||
// blobPath returns a path for a blob within a directory using OCI image-layout conventions.
|
||||
@ -246,9 +250,11 @@ func (ref ociReference) blobPath(digest digest.Digest, sharedBlobDir string) (st
|
||||
if err := digest.Validate(); err != nil {
|
||||
return "", fmt.Errorf("unexpected digest reference %s: %w", digest, err)
|
||||
}
|
||||
blobDir := filepath.Join(ref.dir, "blobs")
|
||||
var blobDir string
|
||||
if sharedBlobDir != "" {
|
||||
blobDir = sharedBlobDir
|
||||
} else {
|
||||
blobDir = filepath.Join(ref.dir, imgspecv1.ImageBlobsDir)
|
||||
}
|
||||
return filepath.Join(blobDir, digest.Algorithm().String(), digest.Hex()), nil
|
||||
}
|
||||
|
@ -10,15 +10,20 @@ import (
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// replacementAttempts is the number of blob replacement candidates returned by destructivelyPrioritizeReplacementCandidates,
|
||||
// replacementAttempts is the number of blob replacement candidates with known location returned by destructivelyPrioritizeReplacementCandidates,
|
||||
// and therefore ultimately by types.BlobInfoCache.CandidateLocations.
|
||||
// This is a heuristic/guess, and could well use a different value.
|
||||
const replacementAttempts = 5
|
||||
|
||||
// replacementUnknownLocationAttempts is the number of blob replacement candidates with unknown Location returned by destructivelyPrioritizeReplacementCandidates,
|
||||
// and therefore ultimately by blobinfocache.BlobInfoCache2.CandidateLocations2.
|
||||
// This is a heuristic/guess, and could well use a different value.
|
||||
const replacementUnknownLocationAttempts = 2
|
||||
|
||||
// CandidateWithTime is the input to types.BICReplacementCandidate prioritization.
|
||||
type CandidateWithTime struct {
|
||||
Candidate blobinfocache.BICReplacementCandidate2 // The replacement candidate
|
||||
LastSeen time.Time // Time the candidate was last known to exist (either read or written)
|
||||
LastSeen time.Time // Time the candidate was last known to exist (either read or written) (not set for Candidate.UnknownLocation)
|
||||
}
|
||||
|
||||
// candidateSortState is a local state implementing sort.Interface on candidates to prioritize,
|
||||
@ -77,9 +82,22 @@ func (css *candidateSortState) Swap(i, j int) {
|
||||
css.cs[i], css.cs[j] = css.cs[j], css.cs[i]
|
||||
}
|
||||
|
||||
// destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with a parameter for the
|
||||
// number of entries to limit, only to make testing simpler.
|
||||
func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest, maxCandidates int) []blobinfocache.BICReplacementCandidate2 {
|
||||
func min(a, b int) int {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with parameters for the
|
||||
// number of entries to limit for known and unknown location separately, only to make testing simpler.
|
||||
// TODO: following function is not destructive any more in the nature instead priortized result is actually copies of the original
|
||||
// candidate set, so In future we might wanna re-name this public API and remove the destructive prefix.
|
||||
func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest, totalLimit int, noLocationLimit int) []blobinfocache.BICReplacementCandidate2 {
|
||||
// split unknown candidates and known candidates
|
||||
// and limit them seperately.
|
||||
var knownLocationCandidates []CandidateWithTime
|
||||
var unknownLocationCandidates []CandidateWithTime
|
||||
// We don't need to use sort.Stable() because nanosecond timestamps are (presumably?) unique, so no two elements should
|
||||
// compare equal.
|
||||
// FIXME: Use slices.SortFunc after we update to Go 1.20 (Go 1.21?) and Time.Compare and cmp.Compare are available.
|
||||
@ -88,24 +106,34 @@ func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime,
|
||||
primaryDigest: primaryDigest,
|
||||
uncompressedDigest: uncompressedDigest,
|
||||
})
|
||||
|
||||
resLength := len(cs)
|
||||
if resLength > maxCandidates {
|
||||
resLength = maxCandidates
|
||||
for _, candidate := range cs {
|
||||
if candidate.Candidate.UnknownLocation {
|
||||
unknownLocationCandidates = append(unknownLocationCandidates, candidate)
|
||||
} else {
|
||||
knownLocationCandidates = append(knownLocationCandidates, candidate)
|
||||
}
|
||||
}
|
||||
res := make([]blobinfocache.BICReplacementCandidate2, resLength)
|
||||
for i := range res {
|
||||
res[i] = cs[i].Candidate
|
||||
|
||||
knownLocationCandidatesUsed := min(len(knownLocationCandidates), totalLimit)
|
||||
remainingCapacity := totalLimit - knownLocationCandidatesUsed
|
||||
unknownLocationCandidatesUsed := min(noLocationLimit, min(remainingCapacity, len(unknownLocationCandidates)))
|
||||
res := make([]blobinfocache.BICReplacementCandidate2, knownLocationCandidatesUsed)
|
||||
for i := 0; i < knownLocationCandidatesUsed; i++ {
|
||||
res[i] = knownLocationCandidates[i].Candidate
|
||||
}
|
||||
// If candidates with unknown location are found, lets add them to final list
|
||||
for i := 0; i < unknownLocationCandidatesUsed; i++ {
|
||||
res = append(res, unknownLocationCandidates[i].Candidate)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// DestructivelyPrioritizeReplacementCandidates consumes AND DESTROYS an array of possible replacement candidates with their last known existence times,
|
||||
// the primary digest the user actually asked for, and the corresponding uncompressed digest (if known, possibly equal to the primary digest),
|
||||
// and returns an appropriately prioritized and/or trimmed result suitable for a return value from types.BlobInfoCache.CandidateLocations.
|
||||
// the primary digest the user actually asked for, the corresponding uncompressed digest (if known, possibly equal to the primary digest) returns an
|
||||
// appropriately prioritized and/or trimmed result suitable for a return value from types.BlobInfoCache.CandidateLocations.
|
||||
//
|
||||
// WARNING: The array of candidates is destructively modified. (The implementation of this function could of course
|
||||
// make a copy, but all CandidateLocations implementations build the slice of candidates only for the single purpose of calling this function anyway.)
|
||||
func DestructivelyPrioritizeReplacementCandidates(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest) []blobinfocache.BICReplacementCandidate2 {
|
||||
return destructivelyPrioritizeReplacementCandidatesWithMax(cs, primaryDigest, uncompressedDigest, replacementAttempts)
|
||||
return destructivelyPrioritizeReplacementCandidatesWithMax(cs, primaryDigest, uncompressedDigest, replacementAttempts, replacementUnknownLocationAttempts)
|
||||
}
|
||||
|
54
vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go
generated
vendored
54
vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go
generated
vendored
@ -133,24 +133,39 @@ func (mem *cache) RecordDigestCompressorName(blobDigest digest.Digest, compresso
|
||||
mem.compressors[blobDigest] = compressorName
|
||||
}
|
||||
|
||||
// appendReplacementCandidates creates prioritize.CandidateWithTime values for (transport, scope, digest), and returns the result of appending them to candidates.
|
||||
func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, requireCompressionInfo bool) []prioritize.CandidateWithTime {
|
||||
// appendReplacementCandidates creates prioritize.CandidateWithTime values for digest in memory
|
||||
// with corresponding compression info from mem.compressors, and returns the result of appending
|
||||
// them to candidates. v2Output allows including candidates with unknown location, and filters out
|
||||
// candidates with unknown compression.
|
||||
func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, v2Output bool) []prioritize.CandidateWithTime {
|
||||
compressorName := blobinfocache.UnknownCompression
|
||||
if v, ok := mem.compressors[digest]; ok {
|
||||
compressorName = v
|
||||
}
|
||||
if compressorName == blobinfocache.UnknownCompression && v2Output {
|
||||
return candidates
|
||||
}
|
||||
locations := mem.knownLocations[locationKey{transport: transport.Name(), scope: scope, blobDigest: digest}] // nil if not present
|
||||
for l, t := range locations {
|
||||
compressorName, compressorKnown := mem.compressors[digest]
|
||||
if !compressorKnown {
|
||||
if requireCompressionInfo {
|
||||
continue
|
||||
}
|
||||
compressorName = blobinfocache.UnknownCompression
|
||||
if len(locations) > 0 {
|
||||
for l, t := range locations {
|
||||
candidates = append(candidates, prioritize.CandidateWithTime{
|
||||
Candidate: blobinfocache.BICReplacementCandidate2{
|
||||
Digest: digest,
|
||||
CompressorName: compressorName,
|
||||
Location: l,
|
||||
},
|
||||
LastSeen: t,
|
||||
})
|
||||
}
|
||||
} else if v2Output {
|
||||
candidates = append(candidates, prioritize.CandidateWithTime{
|
||||
Candidate: blobinfocache.BICReplacementCandidate2{
|
||||
Digest: digest,
|
||||
CompressorName: compressorName,
|
||||
Location: l,
|
||||
Digest: digest,
|
||||
CompressorName: compressorName,
|
||||
UnknownLocation: true,
|
||||
Location: types.BICLocationReference{Opaque: ""},
|
||||
},
|
||||
LastSeen: t,
|
||||
LastSeen: time.Time{},
|
||||
})
|
||||
}
|
||||
return candidates
|
||||
@ -166,7 +181,7 @@ func (mem *cache) CandidateLocations(transport types.ImageTransport, scope types
|
||||
return blobinfocache.CandidateLocationsFromV2(mem.candidateLocations(transport, scope, primaryDigest, canSubstitute, false))
|
||||
}
|
||||
|
||||
// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations that could possibly be reused
|
||||
// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known) that could possibly be reused
|
||||
// within the specified (transport scope) (if they still exist, which is not guaranteed).
|
||||
//
|
||||
// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
|
||||
@ -176,23 +191,24 @@ func (mem *cache) CandidateLocations2(transport types.ImageTransport, scope type
|
||||
return mem.candidateLocations(transport, scope, primaryDigest, canSubstitute, true)
|
||||
}
|
||||
|
||||
func (mem *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, requireCompressionInfo bool) []blobinfocache.BICReplacementCandidate2 {
|
||||
func (mem *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, v2Output bool) []blobinfocache.BICReplacementCandidate2 {
|
||||
mem.mutex.Lock()
|
||||
defer mem.mutex.Unlock()
|
||||
res := []prioritize.CandidateWithTime{}
|
||||
res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest, requireCompressionInfo)
|
||||
res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest, v2Output)
|
||||
var uncompressedDigest digest.Digest // = ""
|
||||
if canSubstitute {
|
||||
if uncompressedDigest = mem.uncompressedDigestLocked(primaryDigest); uncompressedDigest != "" {
|
||||
if otherDigests, ok := mem.digestsByUncompressed[uncompressedDigest]; ok {
|
||||
otherDigests := mem.digestsByUncompressed[uncompressedDigest] // nil if not present in the map
|
||||
if otherDigests != nil {
|
||||
for _, d := range otherDigests.Values() {
|
||||
if d != primaryDigest && d != uncompressedDigest {
|
||||
res = mem.appendReplacementCandidates(res, transport, scope, d, requireCompressionInfo)
|
||||
res = mem.appendReplacementCandidates(res, transport, scope, d, v2Output)
|
||||
}
|
||||
}
|
||||
}
|
||||
if uncompressedDigest != primaryDigest {
|
||||
res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest, requireCompressionInfo)
|
||||
res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest, v2Output)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
48
vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go
generated
vendored
48
vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go
generated
vendored
@ -57,7 +57,7 @@ type cache struct {
|
||||
|
||||
// The database/sql package says “It is rarely necessary to close a DB.”, and steers towards a long-term *sql.DB connection pool.
|
||||
// That’s probably very applicable for database-backed services, where the database is the primary data store. That’s not necessarily
|
||||
// the case for callers of c/image, where image operations might be a small proportion of hte total runtime, and the cache is fairly
|
||||
// the case for callers of c/image, where image operations might be a small proportion of the total runtime, and the cache is fairly
|
||||
// incidental even to the image operations. It’s also hard for us to use that model, because the public BlobInfoCache object doesn’t have
|
||||
// a Close method, so creating a lot of single-use caches could leak data.
|
||||
//
|
||||
@ -117,7 +117,7 @@ func (sqc *cache) Open() {
|
||||
if sqc.refCount == 0 {
|
||||
db, err := rawOpen(sqc.path)
|
||||
if err != nil {
|
||||
logrus.Warnf("Error opening (previously-succesfully-opened) blob info cache at %q: %v", sqc.path, err)
|
||||
logrus.Warnf("Error opening (previously-successfully-opened) blob info cache at %q: %v", sqc.path, err)
|
||||
db = nil // But still increase sqc.refCount, because a .Close() will happen
|
||||
}
|
||||
sqc.db = db
|
||||
@ -171,7 +171,7 @@ func transaction[T any](sqc *cache, fn func(tx *sql.Tx) (T, error)) (T, error) {
|
||||
|
||||
// dbTransaction calls fn within a read-write transaction in db.
|
||||
func dbTransaction[T any](db *sql.DB, fn func(tx *sql.Tx) (T, error)) (T, error) {
|
||||
// Ideally we should be able to distinguish between read-only and read-write transctions, see the _txlock=exclusive dicussion.
|
||||
// Ideally we should be able to distinguish between read-only and read-write transactions, see the _txlock=exclusive dicussion.
|
||||
|
||||
var zeroRes T // A zero value of T
|
||||
|
||||
@ -249,7 +249,7 @@ func ensureDBHasCurrentSchema(db *sql.DB) error {
|
||||
// * Joins (the two that exist in appendReplacementCandidates) are based on the text representation of digests.
|
||||
//
|
||||
// Using integer primary keys might make the joins themselves a bit more efficient, but then we would need to involve an extra
|
||||
// join to translate from/to the user-provided digests anyway. If anything, that extra join (potentialy more btree lookups)
|
||||
// join to translate from/to the user-provided digests anyway. If anything, that extra join (potentially more btree lookups)
|
||||
// is probably costlier than comparing a few more bytes of data.
|
||||
//
|
||||
// Perhaps more importantly, storing digest texts directly makes the database dumps much easier to read for humans without
|
||||
@ -427,11 +427,13 @@ func (sqc *cache) RecordDigestCompressorName(anyDigest digest.Digest, compressor
|
||||
}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
|
||||
}
|
||||
|
||||
// appendReplacementCandidates creates prioritize.CandidateWithTime values for (transport, scope, digest), and returns the result of appending them to candidates.
|
||||
func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, tx *sql.Tx, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, requireCompressionInfo bool) ([]prioritize.CandidateWithTime, error) {
|
||||
// appendReplacementCandidates creates prioritize.CandidateWithTime values for (transport, scope, digest),
|
||||
// and returns the result of appending them to candidates. v2Output allows including candidates with unknown
|
||||
// location, and filters out candidates with unknown compression.
|
||||
func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, tx *sql.Tx, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, v2Output bool) ([]prioritize.CandidateWithTime, error) {
|
||||
var rows *sql.Rows
|
||||
var err error
|
||||
if requireCompressionInfo {
|
||||
if v2Output {
|
||||
rows, err = tx.Query("SELECT location, time, compressor FROM KnownLocations JOIN DigestCompressors "+
|
||||
"ON KnownLocations.digest = DigestCompressors.digest "+
|
||||
"WHERE transport = ? AND scope = ? AND KnownLocations.digest = ?",
|
||||
@ -448,6 +450,7 @@ func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateW
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
res := []prioritize.CandidateWithTime{}
|
||||
for rows.Next() {
|
||||
var location string
|
||||
var time time.Time
|
||||
@ -455,7 +458,7 @@ func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateW
|
||||
if err := rows.Scan(&location, &time, &compressorName); err != nil {
|
||||
return nil, fmt.Errorf("scanning candidate: %w", err)
|
||||
}
|
||||
candidates = append(candidates, prioritize.CandidateWithTime{
|
||||
res = append(res, prioritize.CandidateWithTime{
|
||||
Candidate: blobinfocache.BICReplacementCandidate2{
|
||||
Digest: digest,
|
||||
CompressorName: compressorName,
|
||||
@ -467,10 +470,29 @@ func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateW
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, fmt.Errorf("iterating through locations: %w", err)
|
||||
}
|
||||
|
||||
if len(res) == 0 && v2Output {
|
||||
compressor, found, err := querySingleValue[string](tx, "SELECT compressor FROM DigestCompressors WHERE digest = ?", digest.String())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("scanning compressorName: %w", err)
|
||||
}
|
||||
if found {
|
||||
res = append(res, prioritize.CandidateWithTime{
|
||||
Candidate: blobinfocache.BICReplacementCandidate2{
|
||||
Digest: digest,
|
||||
CompressorName: compressor,
|
||||
UnknownLocation: true,
|
||||
Location: types.BICLocationReference{Opaque: ""},
|
||||
},
|
||||
LastSeen: time.Time{},
|
||||
})
|
||||
}
|
||||
}
|
||||
candidates = append(candidates, res...)
|
||||
return candidates, nil
|
||||
}
|
||||
|
||||
// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations
|
||||
// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known)
|
||||
// that could possibly be reused within the specified (transport scope) (if they still
|
||||
// exist, which is not guaranteed).
|
||||
//
|
||||
@ -483,11 +505,11 @@ func (sqc *cache) CandidateLocations2(transport types.ImageTransport, scope type
|
||||
return sqc.candidateLocations(transport, scope, digest, canSubstitute, true)
|
||||
}
|
||||
|
||||
func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, requireCompressionInfo bool) []blobinfocache.BICReplacementCandidate2 {
|
||||
func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, v2Output bool) []blobinfocache.BICReplacementCandidate2 {
|
||||
var uncompressedDigest digest.Digest // = ""
|
||||
res, err := transaction(sqc, func(tx *sql.Tx) ([]prioritize.CandidateWithTime, error) {
|
||||
res := []prioritize.CandidateWithTime{}
|
||||
res, err := sqc.appendReplacementCandidates(res, tx, transport, scope, primaryDigest, requireCompressionInfo)
|
||||
res, err := sqc.appendReplacementCandidates(res, tx, transport, scope, primaryDigest, v2Output)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -516,7 +538,7 @@ func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types
|
||||
return nil, err
|
||||
}
|
||||
if otherDigest != primaryDigest && otherDigest != uncompressedDigest {
|
||||
res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, otherDigest, requireCompressionInfo)
|
||||
res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, otherDigest, v2Output)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -527,7 +549,7 @@ func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types
|
||||
}
|
||||
|
||||
if uncompressedDigest != primaryDigest {
|
||||
res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, uncompressedDigest, requireCompressionInfo)
|
||||
res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, uncompressedDigest, v2Output)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
3
vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go
generated
vendored
3
vendor/github.com/containers/image/v5/pkg/shortnames/shortnames.go
generated
vendored
@ -11,6 +11,7 @@ import (
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/manifoldco/promptui"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"golang.org/x/exp/slices"
|
||||
"golang.org/x/term"
|
||||
)
|
||||
|
||||
@ -169,7 +170,7 @@ func (r *Resolved) Description() string {
|
||||
// pull errors must equal the amount of pull candidates.
|
||||
func (r *Resolved) FormatPullErrors(pullErrors []error) error {
|
||||
if len(pullErrors) > 0 && len(pullErrors) != len(r.PullCandidates) {
|
||||
pullErrors = append(pullErrors,
|
||||
pullErrors = append(slices.Clone(pullErrors),
|
||||
fmt.Errorf("internal error: expected %d instead of %d errors for %d pull candidates",
|
||||
len(r.PullCandidates), len(pullErrors), len(r.PullCandidates)))
|
||||
}
|
||||
|
2
vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go
generated
vendored
2
vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go
generated
vendored
@ -66,7 +66,7 @@ func SetupCertificates(dir string, tlsc *tls.Config) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tlsc.Certificates = append(tlsc.Certificates, cert)
|
||||
tlsc.Certificates = append(slices.Clone(tlsc.Certificates), cert)
|
||||
}
|
||||
if strings.HasSuffix(f.Name(), ".key") {
|
||||
keyName := f.Name()
|
||||
|
27
vendor/github.com/containers/image/v5/storage/storage_reference.go
generated
vendored
27
vendor/github.com/containers/image/v5/storage/storage_reference.go
generated
vendored
@ -10,6 +10,7 @@ import (
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/transports"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
@ -283,3 +284,29 @@ func (s storageReference) NewImageSource(ctx context.Context, sys *types.SystemC
|
||||
func (s storageReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) {
|
||||
return newImageDestination(sys, s)
|
||||
}
|
||||
|
||||
// ResolveReference finds the underlying storage image for a storage.Transport reference.
|
||||
// It returns that image, and an updated reference which can be used to refer back to the _same_
|
||||
// image again.
|
||||
//
|
||||
// This matters if the input reference contains a tagged name; the destination of the tag can
|
||||
// move in local storage. The updated reference returned by this function contains the resolved
|
||||
// image ID, so later uses of that updated reference will either continue to refer to the same
|
||||
// image, or fail.
|
||||
//
|
||||
// Note that it _is_ possible for the later uses to fail, either because the image was removed
|
||||
// completely, or because the name used in the reference was untaged (even if the underlying image
|
||||
// ID still exists in local storage).
|
||||
func ResolveReference(ref types.ImageReference) (types.ImageReference, *storage.Image, error) {
|
||||
sref, ok := ref.(*storageReference)
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("trying to resolve a non-%s: reference %q", Transport.Name(),
|
||||
transports.ImageName(ref))
|
||||
}
|
||||
clone := *sref // A shallow copy we can update
|
||||
img, err := clone.resolveImage(nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return clone, img, nil
|
||||
}
|
||||
|
29
vendor/github.com/containers/image/v5/storage/storage_transport.go
generated
vendored
29
vendor/github.com/containers/image/v5/storage/storage_transport.go
generated
vendored
@ -48,9 +48,24 @@ type StoreTransport interface {
|
||||
GetStoreIfSet() storage.Store
|
||||
// GetImage retrieves the image from the transport's store that's named
|
||||
// by the reference.
|
||||
// Deprecated: Surprisingly, with a StoreTransport reference which contains an ID,
|
||||
// this ignores that ID; and repeated calls of GetStoreImage with the same named reference
|
||||
// can return different images, with no way for the caller to "freeze" the storage.Image identity
|
||||
// without discarding the name entirely.
|
||||
//
|
||||
// Use storage.ResolveReference instead.
|
||||
GetImage(types.ImageReference) (*storage.Image, error)
|
||||
// GetStoreImage retrieves the image from a specified store that's named
|
||||
// by the reference.
|
||||
//
|
||||
// Deprecated: Surprisingly, with a StoreTransport reference which contains an ID,
|
||||
// this ignores that ID; and repeated calls of GetStoreImage with the same named reference
|
||||
// can return different images, with no way for the caller to "freeze" the storage.Image identity
|
||||
// without discarding the name entirely.
|
||||
//
|
||||
// Also, a StoreTransport reference already contains a store, so providing another one is redundant.
|
||||
//
|
||||
// Use storage.ResolveReference instead.
|
||||
GetStoreImage(storage.Store, types.ImageReference) (*storage.Image, error)
|
||||
// ParseStoreReference parses a reference, overriding any store
|
||||
// specification that it may contain.
|
||||
@ -290,6 +305,14 @@ func (s *storageTransport) ParseReference(reference string) (types.ImageReferenc
|
||||
return s.ParseStoreReference(store, reference)
|
||||
}
|
||||
|
||||
// Deprecated: Surprisingly, with a StoreTransport reference which contains an ID,
|
||||
// this ignores that ID; and repeated calls of GetStoreImage with the same named reference
|
||||
// can return different images, with no way for the caller to "freeze" the storage.Image identity
|
||||
// without discarding the name entirely.
|
||||
//
|
||||
// Also, a StoreTransport reference already contains a store, so providing another one is redundant.
|
||||
//
|
||||
// Use storage.ResolveReference instead.
|
||||
func (s storageTransport) GetStoreImage(store storage.Store, ref types.ImageReference) (*storage.Image, error) {
|
||||
dref := ref.DockerReference()
|
||||
if dref != nil {
|
||||
@ -306,6 +329,12 @@ func (s storageTransport) GetStoreImage(store storage.Store, ref types.ImageRefe
|
||||
return nil, storage.ErrImageUnknown
|
||||
}
|
||||
|
||||
// Deprecated: Surprisingly, with a StoreTransport reference which contains an ID,
|
||||
// this ignores that ID; and repeated calls of GetStoreImage with the same named reference
|
||||
// can return different images, with no way for the caller to "freeze" the storage.Image identity
|
||||
// without discarding the name entirely.
|
||||
//
|
||||
// Use storage.ResolveReference instead.
|
||||
func (s *storageTransport) GetImage(ref types.ImageReference) (*storage.Image, error) {
|
||||
store, err := s.GetStore()
|
||||
if err != nil {
|
||||
|
4
vendor/github.com/containers/image/v5/types/types.go
generated
vendored
4
vendor/github.com/containers/image/v5/types/types.go
generated
vendored
@ -445,7 +445,7 @@ type ImageCloser interface {
|
||||
Close() error
|
||||
}
|
||||
|
||||
// ManifestUpdateOptions is a way to pass named optional arguments to Image.UpdatedManifest
|
||||
// ManifestUpdateOptions is a way to pass named optional arguments to Image.UpdatedImage
|
||||
type ManifestUpdateOptions struct {
|
||||
LayerInfos []BlobInfo // Complete BlobInfos (size+digest+urls+annotations) which should replace the originals, in order (the root layer first, and then successive layered layers). BlobInfos' MediaType fields are ignored.
|
||||
EmbeddedDockerReference reference.Named
|
||||
@ -457,7 +457,7 @@ type ManifestUpdateOptions struct {
|
||||
// ManifestUpdateInformation is a component of ManifestUpdateOptions, named here
|
||||
// only to make writing struct literals possible.
|
||||
type ManifestUpdateInformation struct {
|
||||
Destination ImageDestination // and yes, UpdatedManifest may write to Destination (see the schema2 → schema1 conversion logic in image/docker_schema2.go)
|
||||
Destination ImageDestination // and yes, UpdatedImage may write to Destination (see the schema2 → schema1 conversion logic in image/docker_schema2.go)
|
||||
LayerInfos []BlobInfo // Complete BlobInfos (size+digest) which have been uploaded, in order (the root layer first, and then successive layered layers)
|
||||
LayerDiffIDs []digest.Digest // Digest values for the _uncompressed_ contents of the blobs which have been uploaded, in the same order.
|
||||
}
|
||||
|
4
vendor/github.com/containers/image/v5/version/version.go
generated
vendored
4
vendor/github.com/containers/image/v5/version/version.go
generated
vendored
@ -8,10 +8,10 @@ const (
|
||||
// VersionMinor is for functionality in a backwards-compatible manner
|
||||
VersionMinor = 28
|
||||
// VersionPatch is for backwards-compatible bug fixes
|
||||
VersionPatch = 0
|
||||
VersionPatch = 1
|
||||
|
||||
// VersionDev indicates development branch. Releases will be empty string.
|
||||
VersionDev = ""
|
||||
VersionDev = "-dev"
|
||||
)
|
||||
|
||||
// Version is the specification version that the package types support.
|
||||
|
1
vendor/github.com/containers/ocicrypt/Makefile
generated
vendored
1
vendor/github.com/containers/ocicrypt/Makefile
generated
vendored
@ -28,6 +28,7 @@ vendor:
|
||||
go mod tidy
|
||||
|
||||
test:
|
||||
go clean -testcache
|
||||
go test ./... -test.v
|
||||
|
||||
generate-protobuf:
|
||||
|
6
vendor/github.com/containers/ocicrypt/keywrap/pkcs11/keywrapper_pkcs11.go
generated
vendored
6
vendor/github.com/containers/ocicrypt/keywrap/pkcs11/keywrapper_pkcs11.go
generated
vendored
@ -41,7 +41,11 @@ func NewKeyWrapper() keywrap.KeyWrapper {
|
||||
// WrapKeys wraps the session key for recpients and encrypts the optsData, which
|
||||
// describe the symmetric key used for encrypting the layer
|
||||
func (kw *pkcs11KeyWrapper) WrapKeys(ec *config.EncryptConfig, optsData []byte) ([]byte, error) {
|
||||
pkcs11Recipients, err := addPubKeys(&ec.DecryptConfig, append(ec.Parameters["pkcs11-pubkeys"], ec.Parameters["pkcs11-yamls"]...))
|
||||
// append({}, ...) allocates a fresh backing array, and that's necessary to guarantee concurrent calls to WrapKeys (as in c/image/copy.Image)
|
||||
// can't race writing to the same backing array.
|
||||
pubKeys := append([][]byte{}, ec.Parameters["pkcs11-pubkeys"]...) // In Go 1.21, slices.Clone(ec.Parameters["pkcs11-pubkeys"])
|
||||
pubKeys = append(pubKeys, ec.Parameters["pkcs11-yamls"]...)
|
||||
pkcs11Recipients, err := addPubKeys(&ec.DecryptConfig, pubKeys)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
10
vendor/github.com/containers/ocicrypt/spec/spec.go
generated
vendored
10
vendor/github.com/containers/ocicrypt/spec/spec.go
generated
vendored
@ -9,8 +9,12 @@ const (
|
||||
MediaTypeLayerZstdEnc = "application/vnd.oci.image.layer.v1.tar+zstd+encrypted"
|
||||
// MediaTypeLayerNonDistributableEnc is MIME type used for non distributable encrypted layers.
|
||||
MediaTypeLayerNonDistributableEnc = "application/vnd.oci.image.layer.nondistributable.v1.tar+encrypted"
|
||||
// MediaTypeLayerGzipEnc is MIME type used for non distributable encrypted gzip-compressed layers.
|
||||
// MediaTypeLayerNonDistributableGzipEnc is MIME type used for non distributable encrypted gzip-compressed layers.
|
||||
MediaTypeLayerNonDistributableGzipEnc = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip+encrypted"
|
||||
// MediaTypeLayerZstdEnc is MIME type used for non distributable encrypted zstd-compressed layers.
|
||||
MediaTypeLayerNonDistributableZsdtEnc = "application/vnd.oci.image.layer.nondistributable.v1.tar+zstd+encrypted"
|
||||
// MediaTypeLayerNonDistributableZstdEnc is MIME type used for non distributable encrypted zstd-compressed layers.
|
||||
MediaTypeLayerNonDistributableZstdEnc = "application/vnd.oci.image.layer.nondistributable.v1.tar+zstd+encrypted"
|
||||
// MediaTypeLayerNonDistributableZsdtEnc is MIME type used for non distributable encrypted zstd-compressed layers.
|
||||
//
|
||||
// Deprecated: Use [MediaTypeLayerNonDistributableZstdEnc].
|
||||
MediaTypeLayerNonDistributableZsdtEnc = MediaTypeLayerNonDistributableZstdEnc
|
||||
)
|
||||
|
6
vendor/github.com/containers/storage/.cirrus.yml
generated
vendored
6
vendor/github.com/containers/storage/.cirrus.yml
generated
vendored
@ -17,13 +17,13 @@ env:
|
||||
####
|
||||
#### Cache-image names to test with (double-quotes around names are critical)
|
||||
###
|
||||
FEDORA_NAME: "fedora-38"
|
||||
FEDORA_NAME: "fedora-39ß"
|
||||
DEBIAN_NAME: "debian-13"
|
||||
|
||||
# GCE project where images live
|
||||
IMAGE_PROJECT: "libpod-218412"
|
||||
# VM Image built in containers/automation_images
|
||||
IMAGE_SUFFIX: "c20230816t191118z-f38f37d13"
|
||||
IMAGE_SUFFIX: "c20231004t194547z-f39f38d13"
|
||||
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
|
||||
DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}"
|
||||
|
||||
@ -113,8 +113,6 @@ debian_testing_task: &debian_testing
|
||||
TEST_DRIVER: "fuse-overlay-whiteout"
|
||||
- env:
|
||||
TEST_DRIVER: "btrfs"
|
||||
- env:
|
||||
TEST_DRIVER: "zfs"
|
||||
|
||||
|
||||
lint_task:
|
||||
|
20
vendor/github.com/containers/storage/layers.go
generated
vendored
20
vendor/github.com/containers/storage/layers.go
generated
vendored
@ -1245,8 +1245,8 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount
|
||||
if parentLayer != nil {
|
||||
parent = parentLayer.ID
|
||||
}
|
||||
var parentMappings, templateIDMappings, oldMappings *idtools.IDMappings
|
||||
var (
|
||||
templateIDMappings *idtools.IDMappings
|
||||
templateMetadata string
|
||||
templateCompressedDigest digest.Digest
|
||||
templateCompressedSize int64
|
||||
@ -1274,11 +1274,6 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount
|
||||
} else {
|
||||
templateIDMappings = &idtools.IDMappings{}
|
||||
}
|
||||
if parentLayer != nil {
|
||||
parentMappings = idtools.NewIDMappingsFromMaps(parentLayer.UIDMap, parentLayer.GIDMap)
|
||||
} else {
|
||||
parentMappings = &idtools.IDMappings{}
|
||||
}
|
||||
if mountLabel != "" {
|
||||
selinux.ReserveLabel(mountLabel)
|
||||
}
|
||||
@ -1353,6 +1348,12 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount
|
||||
IDMappings: idMappings,
|
||||
}
|
||||
|
||||
var parentMappings, oldMappings *idtools.IDMappings
|
||||
if parentLayer != nil {
|
||||
parentMappings = idtools.NewIDMappingsFromMaps(parentLayer.UIDMap, parentLayer.GIDMap)
|
||||
} else {
|
||||
parentMappings = &idtools.IDMappings{}
|
||||
}
|
||||
if moreOptions.TemplateLayer != "" {
|
||||
if err = r.driver.CreateFromTemplate(id, moreOptions.TemplateLayer, templateIDMappings, parent, parentMappings, &opts, writeable); err != nil {
|
||||
cleanupFailureContext = fmt.Sprintf("creating a layer from template layer %q", moreOptions.TemplateLayer)
|
||||
@ -1371,10 +1372,13 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount
|
||||
return nil, -1, fmt.Errorf("creating read-only layer with ID %q: %w", id, err)
|
||||
}
|
||||
}
|
||||
oldMappings = parentMappings
|
||||
if parentLayer != nil {
|
||||
oldMappings = parentMappings
|
||||
}
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(oldMappings.UIDs(), idMappings.UIDs()) || !reflect.DeepEqual(oldMappings.GIDs(), idMappings.GIDs()) {
|
||||
if oldMappings != nil &&
|
||||
(!reflect.DeepEqual(oldMappings.UIDs(), idMappings.UIDs()) || !reflect.DeepEqual(oldMappings.GIDs(), idMappings.GIDs())) {
|
||||
if err = r.driver.UpdateLayerIDMap(id, oldMappings, idMappings, mountLabel); err != nil {
|
||||
cleanupFailureContext = "in UpdateLayerIDMap"
|
||||
return nil, -1, err
|
||||
|
19
vendor/github.com/containers/storage/pkg/archive/archive.go
generated
vendored
19
vendor/github.com/containers/storage/pkg/archive/archive.go
generated
vendored
@ -955,14 +955,8 @@ func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) err
|
||||
if options.ForceMask != nil {
|
||||
// if ForceMask is in place, make sure lchown is disabled.
|
||||
doChown = false
|
||||
uid, gid, mode, err := GetFileOwner(dest)
|
||||
if err == nil {
|
||||
value := fmt.Sprintf("%d:%d:0%o", uid, gid, mode)
|
||||
if err := system.Lsetxattr(dest, idtools.ContainersOverrideXattr, []byte(value), 0); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
var rootHdr *tar.Header
|
||||
|
||||
// Iterate through the files in the archive.
|
||||
loop:
|
||||
@ -1007,6 +1001,9 @@ loop:
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if rel == "." {
|
||||
rootHdr = hdr
|
||||
}
|
||||
if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
|
||||
return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
|
||||
}
|
||||
@ -1080,6 +1077,14 @@ loop:
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if options.ForceMask != nil && rootHdr != nil {
|
||||
value := fmt.Sprintf("%d:%d:0%o", rootHdr.Uid, rootHdr.Gid, rootHdr.Mode)
|
||||
if err := system.Lsetxattr(dest, idtools.ContainersOverrideXattr, []byte(value), 0); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
8
vendor/github.com/coreos/go-oidc/v3/oidc/jwks.go
generated
vendored
8
vendor/github.com/coreos/go-oidc/v3/oidc/jwks.go
generated
vendored
@ -8,7 +8,7 @@ import (
|
||||
"crypto/rsa"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
@ -159,7 +159,7 @@ func (r *RemoteKeySet) verify(ctx context.Context, jws *jose.JSONWebSignature) (
|
||||
// https://openid.net/specs/openid-connect-core-1_0.html#RotateSigKeys
|
||||
keys, err := r.keysFromRemote(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("fetching keys %v", err)
|
||||
return nil, fmt.Errorf("fetching keys %w", err)
|
||||
}
|
||||
|
||||
for _, key := range keys {
|
||||
@ -228,11 +228,11 @@ func (r *RemoteKeySet) updateKeys() ([]jose.JSONWebKey, error) {
|
||||
|
||||
resp, err := doRequest(r.ctx, req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("oidc: get keys failed %v", err)
|
||||
return nil, fmt.Errorf("oidc: get keys failed %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read response body: %v", err)
|
||||
}
|
||||
|
6
vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go
generated
vendored
6
vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go
generated
vendored
@ -10,7 +10,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"mime"
|
||||
"net/http"
|
||||
"strings"
|
||||
@ -211,7 +211,7 @@ func NewProvider(ctx context.Context, issuer string) (*Provider, error) {
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read response body: %v", err)
|
||||
}
|
||||
@ -332,7 +332,7 @@ func (p *Provider) UserInfo(ctx context.Context, tokenSource oauth2.TokenSource)
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
4
vendor/github.com/coreos/go-oidc/v3/oidc/verify.go
generated
vendored
4
vendor/github.com/coreos/go-oidc/v3/oidc/verify.go
generated
vendored
@ -7,7 +7,7 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
@ -182,7 +182,7 @@ func resolveDistributedClaim(ctx context.Context, verifier *IDTokenVerifier, src
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read response body: %v", err)
|
||||
}
|
||||
|
11
vendor/github.com/klauspost/compress/zstd/enc_best.go
generated
vendored
11
vendor/github.com/klauspost/compress/zstd/enc_best.go
generated
vendored
@ -197,12 +197,13 @@ encodeLoop:
|
||||
|
||||
// Set m to a match at offset if it looks like that will improve compression.
|
||||
improve := func(m *match, offset int32, s int32, first uint32, rep int32) {
|
||||
if s-offset >= e.maxMatchOff || load3232(src, offset) != first {
|
||||
delta := s - offset
|
||||
if delta >= e.maxMatchOff || delta <= 0 || load3232(src, offset) != first {
|
||||
return
|
||||
}
|
||||
if debugAsserts {
|
||||
if offset <= 0 {
|
||||
panic(offset)
|
||||
if offset >= s {
|
||||
panic(fmt.Sprintf("offset: %d - s:%d - rep: %d - cur :%d - max: %d", offset, s, rep, e.cur, e.maxMatchOff))
|
||||
}
|
||||
if !bytes.Equal(src[s:s+4], src[offset:offset+4]) {
|
||||
panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first))
|
||||
@ -343,8 +344,8 @@ encodeLoop:
|
||||
if best.rep > 0 {
|
||||
var seq seq
|
||||
seq.matchLen = uint32(best.length - zstdMinMatch)
|
||||
if debugAsserts && s <= nextEmit {
|
||||
panic("s <= nextEmit")
|
||||
if debugAsserts && s < nextEmit {
|
||||
panic("s < nextEmit")
|
||||
}
|
||||
addLiterals(&seq, best.s)
|
||||
|
||||
|
2
vendor/github.com/sigstore/sigstore/pkg/cryptoutils/privatekey.go
generated
vendored
2
vendor/github.com/sigstore/sigstore/pkg/cryptoutils/privatekey.go
generated
vendored
@ -26,7 +26,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/theupdateframework/go-tuf/encrypted"
|
||||
"github.com/secure-systems-lab/go-securesystemslib/encrypted"
|
||||
)
|
||||
|
||||
const (
|
||||
|
2
vendor/github.com/sigstore/sigstore/pkg/signature/payload/payload.go
generated
vendored
2
vendor/github.com/sigstore/sigstore/pkg/signature/payload/payload.go
generated
vendored
@ -27,7 +27,7 @@ import (
|
||||
const CosignSignatureType = "cosign container image signature"
|
||||
|
||||
// SimpleContainerImage describes the structure of a basic container image signature payload, as defined at:
|
||||
// https://github.com/containers/image/blob/master/docs/containers-signature.5.md#json-data-format
|
||||
// https://github.com/containers/image/blob/main/docs/containers-signature.5.md#json-data-format
|
||||
type SimpleContainerImage struct {
|
||||
Critical Critical `json:"critical"` // Critical data critical to correctly evaluating the validity of the signature
|
||||
Optional map[string]interface{} `json:"optional"` // Optional optional metadata about the image
|
||||
|
126
vendor/github.com/sylabs/sif/v2/pkg/sif/create.go
generated
vendored
126
vendor/github.com/sylabs/sif/v2/pkg/sif/create.go
generated
vendored
@ -8,6 +8,7 @@
|
||||
package sif
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
@ -321,51 +322,6 @@ func CreateContainerAtPath(path string, opts ...CreateOpt) (*FileImage, error) {
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func zeroData(fimg *FileImage, descr *rawDescriptor) error {
|
||||
// first, move to data object offset
|
||||
if _, err := fimg.rw.Seek(descr.Offset, io.SeekStart); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var zero [4096]byte
|
||||
n := descr.Size
|
||||
upbound := int64(4096)
|
||||
for {
|
||||
if n < 4096 {
|
||||
upbound = n
|
||||
}
|
||||
|
||||
if _, err := fimg.rw.Write(zero[:upbound]); err != nil {
|
||||
return err
|
||||
}
|
||||
n -= 4096
|
||||
if n <= 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resetDescriptor(fimg *FileImage, index int) error {
|
||||
// If we remove the primary partition, set the global header Arch field to HdrArchUnknown
|
||||
// to indicate that the SIF file doesn't include a primary partition and no dependency
|
||||
// on any architecture exists.
|
||||
if fimg.rds[index].isPartitionOfType(PartPrimSys) {
|
||||
fimg.h.Arch = hdrArchUnknown
|
||||
}
|
||||
|
||||
offset := fimg.h.DescriptorsOffset + int64(index)*int64(binary.Size(fimg.rds[0]))
|
||||
|
||||
// first, move to descriptor offset
|
||||
if _, err := fimg.rw.Seek(offset, io.SeekStart); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var emptyDesc rawDescriptor
|
||||
return binary.Write(fimg.rw, binary.LittleEndian, emptyDesc)
|
||||
}
|
||||
|
||||
// addOpts accumulates object add options.
|
||||
type addOpts struct {
|
||||
t time.Time
|
||||
@ -447,6 +403,26 @@ func (f *FileImage) isLast(d *rawDescriptor) bool {
|
||||
return isLast
|
||||
}
|
||||
|
||||
// zeroReader is an io.Reader that returns a stream of zero-bytes.
|
||||
type zeroReader struct{}
|
||||
|
||||
func (zeroReader) Read(b []byte) (int, error) {
|
||||
for i := range b {
|
||||
b[i] = 0
|
||||
}
|
||||
return len(b), nil
|
||||
}
|
||||
|
||||
// zero overwrites the data object described by d with a stream of zero bytes.
|
||||
func (f *FileImage) zero(d *rawDescriptor) error {
|
||||
if _, err := f.rw.Seek(d.Offset, io.SeekStart); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err := io.CopyN(f.rw, zeroReader{}, d.Size)
|
||||
return err
|
||||
}
|
||||
|
||||
// truncateAt truncates f at the start of the padded data object described by d.
|
||||
func (f *FileImage) truncateAt(d *rawDescriptor) error {
|
||||
start := d.Offset + d.Size - d.SizeWithPadding
|
||||
@ -530,7 +506,7 @@ func (f *FileImage) DeleteObject(id uint32, opts ...DeleteOpt) error {
|
||||
}
|
||||
|
||||
if do.zero {
|
||||
if err := zeroData(f, d); err != nil {
|
||||
if err := f.zero(d); err != nil {
|
||||
return fmt.Errorf("%w", err)
|
||||
}
|
||||
}
|
||||
@ -546,15 +522,17 @@ func (f *FileImage) DeleteObject(id uint32, opts ...DeleteOpt) error {
|
||||
f.h.DescriptorsFree++
|
||||
f.h.ModifiedAt = do.t.Unix()
|
||||
|
||||
index := 0
|
||||
for i, od := range f.rds {
|
||||
if od.ID == id {
|
||||
index = i
|
||||
break
|
||||
}
|
||||
// If we remove the primary partition, set the global header Arch field to HdrArchUnknown
|
||||
// to indicate that the SIF file doesn't include a primary partition and no dependency
|
||||
// on any architecture exists.
|
||||
if d.isPartitionOfType(PartPrimSys) {
|
||||
f.h.Arch = hdrArchUnknown
|
||||
}
|
||||
|
||||
if err := resetDescriptor(f, index); err != nil {
|
||||
// Reset rawDescripter with empty struct
|
||||
*d = rawDescriptor{}
|
||||
|
||||
if err := f.writeDescriptors(); err != nil {
|
||||
return fmt.Errorf("%w", err)
|
||||
}
|
||||
|
||||
@ -676,3 +654,45 @@ func (f *FileImage) SetPrimPart(id uint32, opts ...SetOpt) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetMetadata sets the metadata of the data object with id to md, according to opts.
|
||||
//
|
||||
// By default, the image/object modification times are set to the current time for
|
||||
// non-deterministic images, and unset otherwise. To override this, consider using
|
||||
// OptSetDeterministic or OptSetWithTime.
|
||||
func (f *FileImage) SetMetadata(id uint32, md encoding.BinaryMarshaler, opts ...SetOpt) error {
|
||||
so := setOpts{}
|
||||
|
||||
if !f.isDeterministic() {
|
||||
so.t = time.Now()
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
if err := opt(&so); err != nil {
|
||||
return fmt.Errorf("%w", err)
|
||||
}
|
||||
}
|
||||
|
||||
rd, err := f.getDescriptor(WithID(id))
|
||||
if err != nil {
|
||||
return fmt.Errorf("%w", err)
|
||||
}
|
||||
|
||||
if err := rd.setExtra(md); err != nil {
|
||||
return fmt.Errorf("%w", err)
|
||||
}
|
||||
|
||||
rd.ModifiedAt = so.t.Unix()
|
||||
|
||||
if err := f.writeDescriptors(); err != nil {
|
||||
return fmt.Errorf("%w", err)
|
||||
}
|
||||
|
||||
f.h.ModifiedAt = so.t.Unix()
|
||||
|
||||
if err := f.writeHeader(); err != nil {
|
||||
return fmt.Errorf("%w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
27
vendor/github.com/theupdateframework/go-tuf/LICENSE
generated
vendored
27
vendor/github.com/theupdateframework/go-tuf/LICENSE
generated
vendored
@ -1,27 +0,0 @@
|
||||
Copyright (c) 2014-2020 Prime Directive, Inc. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Prime Directive, Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
226
vendor/github.com/theupdateframework/go-tuf/encrypted/encrypted.go
generated
vendored
226
vendor/github.com/theupdateframework/go-tuf/encrypted/encrypted.go
generated
vendored
@ -1,226 +0,0 @@
|
||||
// Package encrypted provides a simple, secure system for encrypting data
|
||||
// symmetrically with a passphrase.
|
||||
//
|
||||
// It uses scrypt derive a key from the passphrase and the NaCl secret box
|
||||
// cipher for authenticated encryption.
|
||||
package encrypted
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"golang.org/x/crypto/nacl/secretbox"
|
||||
"golang.org/x/crypto/scrypt"
|
||||
)
|
||||
|
||||
const saltSize = 32
|
||||
|
||||
const (
|
||||
boxKeySize = 32
|
||||
boxNonceSize = 24
|
||||
)
|
||||
|
||||
const (
|
||||
// N parameter was chosen to be ~100ms of work using the default implementation
|
||||
// on the 2.3GHz Core i7 Haswell processor in a late-2013 Apple Retina Macbook
|
||||
// Pro (it takes ~113ms).
|
||||
scryptN = 32768
|
||||
scryptR = 8
|
||||
scryptP = 1
|
||||
)
|
||||
|
||||
const (
|
||||
nameScrypt = "scrypt"
|
||||
nameSecretBox = "nacl/secretbox"
|
||||
)
|
||||
|
||||
type data struct {
|
||||
KDF scryptKDF `json:"kdf"`
|
||||
Cipher secretBoxCipher `json:"cipher"`
|
||||
Ciphertext []byte `json:"ciphertext"`
|
||||
}
|
||||
|
||||
type scryptParams struct {
|
||||
N int `json:"N"`
|
||||
R int `json:"r"`
|
||||
P int `json:"p"`
|
||||
}
|
||||
|
||||
func newScryptKDF() (scryptKDF, error) {
|
||||
salt := make([]byte, saltSize)
|
||||
if err := fillRandom(salt); err != nil {
|
||||
return scryptKDF{}, err
|
||||
}
|
||||
return scryptKDF{
|
||||
Name: nameScrypt,
|
||||
Params: scryptParams{
|
||||
N: scryptN,
|
||||
R: scryptR,
|
||||
P: scryptP,
|
||||
},
|
||||
Salt: salt,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type scryptKDF struct {
|
||||
Name string `json:"name"`
|
||||
Params scryptParams `json:"params"`
|
||||
Salt []byte `json:"salt"`
|
||||
}
|
||||
|
||||
func (s *scryptKDF) Key(passphrase []byte) ([]byte, error) {
|
||||
return scrypt.Key(passphrase, s.Salt, s.Params.N, s.Params.R, s.Params.P, boxKeySize)
|
||||
}
|
||||
|
||||
// CheckParams checks that the encoded KDF parameters are what we expect them to
|
||||
// be. If we do not do this, an attacker could cause a DoS by tampering with
|
||||
// them.
|
||||
func (s *scryptKDF) CheckParams() error {
|
||||
if s.Params.N != scryptN || s.Params.R != scryptR || s.Params.P != scryptP {
|
||||
return errors.New("encrypted: unexpected kdf parameters")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func newSecretBoxCipher() (secretBoxCipher, error) {
|
||||
nonce := make([]byte, boxNonceSize)
|
||||
if err := fillRandom(nonce); err != nil {
|
||||
return secretBoxCipher{}, err
|
||||
}
|
||||
return secretBoxCipher{
|
||||
Name: nameSecretBox,
|
||||
Nonce: nonce,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type secretBoxCipher struct {
|
||||
Name string `json:"name"`
|
||||
Nonce []byte `json:"nonce"`
|
||||
|
||||
encrypted bool
|
||||
}
|
||||
|
||||
func (s *secretBoxCipher) Encrypt(plaintext, key []byte) []byte {
|
||||
var keyBytes [boxKeySize]byte
|
||||
var nonceBytes [boxNonceSize]byte
|
||||
|
||||
if len(key) != len(keyBytes) {
|
||||
panic("incorrect key size")
|
||||
}
|
||||
if len(s.Nonce) != len(nonceBytes) {
|
||||
panic("incorrect nonce size")
|
||||
}
|
||||
|
||||
copy(keyBytes[:], key)
|
||||
copy(nonceBytes[:], s.Nonce)
|
||||
|
||||
// ensure that we don't re-use nonces
|
||||
if s.encrypted {
|
||||
panic("Encrypt must only be called once for each cipher instance")
|
||||
}
|
||||
s.encrypted = true
|
||||
|
||||
return secretbox.Seal(nil, plaintext, &nonceBytes, &keyBytes)
|
||||
}
|
||||
|
||||
func (s *secretBoxCipher) Decrypt(ciphertext, key []byte) ([]byte, error) {
|
||||
var keyBytes [boxKeySize]byte
|
||||
var nonceBytes [boxNonceSize]byte
|
||||
|
||||
if len(key) != len(keyBytes) {
|
||||
panic("incorrect key size")
|
||||
}
|
||||
if len(s.Nonce) != len(nonceBytes) {
|
||||
// return an error instead of panicking since the nonce is user input
|
||||
return nil, errors.New("encrypted: incorrect nonce size")
|
||||
}
|
||||
|
||||
copy(keyBytes[:], key)
|
||||
copy(nonceBytes[:], s.Nonce)
|
||||
|
||||
res, ok := secretbox.Open(nil, ciphertext, &nonceBytes, &keyBytes)
|
||||
if !ok {
|
||||
return nil, errors.New("encrypted: decryption failed")
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// Encrypt takes a passphrase and plaintext, and returns a JSON object
|
||||
// containing ciphertext and the details necessary to decrypt it.
|
||||
func Encrypt(plaintext, passphrase []byte) ([]byte, error) {
|
||||
k, err := newScryptKDF()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
key, err := k.Key(passphrase)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c, err := newSecretBoxCipher()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
data := &data{
|
||||
KDF: k,
|
||||
Cipher: c,
|
||||
}
|
||||
data.Ciphertext = c.Encrypt(plaintext, key)
|
||||
|
||||
return json.Marshal(data)
|
||||
}
|
||||
|
||||
// Marshal encrypts the JSON encoding of v using passphrase.
|
||||
func Marshal(v interface{}, passphrase []byte) ([]byte, error) {
|
||||
data, err := json.MarshalIndent(v, "", "\t")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return Encrypt(data, passphrase)
|
||||
}
|
||||
|
||||
// Decrypt takes a JSON-encoded ciphertext object encrypted using Encrypt and
|
||||
// tries to decrypt it using passphrase. If successful, it returns the
|
||||
// plaintext.
|
||||
func Decrypt(ciphertext, passphrase []byte) ([]byte, error) {
|
||||
data := &data{}
|
||||
if err := json.Unmarshal(ciphertext, data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if data.KDF.Name != nameScrypt {
|
||||
return nil, fmt.Errorf("encrypted: unknown kdf name %q", data.KDF.Name)
|
||||
}
|
||||
if data.Cipher.Name != nameSecretBox {
|
||||
return nil, fmt.Errorf("encrypted: unknown cipher name %q", data.Cipher.Name)
|
||||
}
|
||||
if err := data.KDF.CheckParams(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
key, err := data.KDF.Key(passphrase)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return data.Cipher.Decrypt(data.Ciphertext, key)
|
||||
}
|
||||
|
||||
// Unmarshal decrypts the data using passphrase and unmarshals the resulting
|
||||
// plaintext into the value pointed to by v.
|
||||
func Unmarshal(data []byte, v interface{}, passphrase []byte) error {
|
||||
decrypted, err := Decrypt(data, passphrase)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return json.Unmarshal(decrypted, v)
|
||||
}
|
||||
|
||||
func fillRandom(b []byte) error {
|
||||
_, err := io.ReadFull(rand.Reader, b)
|
||||
return err
|
||||
}
|
Reference in New Issue
Block a user