mirror of
https://github.com/containers/podman.git
synced 2025-12-11 09:18:34 +08:00
vendor: update containers/common
Signed-off-by: Giuseppe Scrivano <gscrivan@redhat.com>
This commit is contained in:
2
vendor/github.com/containers/storage/pkg/chunked/cache_linux.go
generated
vendored
2
vendor/github.com/containers/storage/pkg/chunked/cache_linux.go
generated
vendored
@@ -297,7 +297,7 @@ func (c *layersCache) load() error {
|
||||
|
||||
// the cache file is either not present or broken. Try to generate it from the TOC.
|
||||
l, err = c.createCacheFileFromTOC(r.ID)
|
||||
if err != nil {
|
||||
if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
|
||||
logrus.Warningf("Error creating cache file for layer %q: %v", r.ID, err)
|
||||
}
|
||||
if l != nil {
|
||||
|
||||
6
vendor/github.com/containers/storage/pkg/chunked/compression_linux.go
generated
vendored
6
vendor/github.com/containers/storage/pkg/chunked/compression_linux.go
generated
vendored
@@ -2,6 +2,7 @@ package chunked
|
||||
|
||||
import (
|
||||
archivetar "archive/tar"
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -14,6 +15,8 @@ import (
|
||||
"github.com/klauspost/pgzip"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/vbatts/tar-split/archive/tar"
|
||||
"github.com/vbatts/tar-split/tar/asm"
|
||||
"github.com/vbatts/tar-split/tar/storage"
|
||||
expMaps "golang.org/x/exp/maps"
|
||||
)
|
||||
|
||||
@@ -256,7 +259,8 @@ func ensureTOCMatchesTarSplit(toc *internal.TOC, tarSplit []byte) error {
|
||||
}
|
||||
}
|
||||
|
||||
if err := iterateTarSplit(tarSplit, func(hdr *tar.Header) error {
|
||||
unpacker := storage.NewJSONUnpacker(bytes.NewReader(tarSplit))
|
||||
if err := asm.IterateHeaders(unpacker, func(hdr *tar.Header) error {
|
||||
e, ok := pendingFiles[hdr.Name]
|
||||
if !ok {
|
||||
return fmt.Errorf("tar-split contains an entry for %q missing in TOC", hdr.Name)
|
||||
|
||||
19
vendor/github.com/containers/storage/pkg/chunked/storage.go
generated
vendored
19
vendor/github.com/containers/storage/pkg/chunked/storage.go
generated
vendored
@@ -23,3 +23,22 @@ type ErrBadRequest struct { //nolint: errname
|
||||
func (e ErrBadRequest) Error() string {
|
||||
return "bad request"
|
||||
}
|
||||
|
||||
// ErrFallbackToOrdinaryLayerDownload is a custom error type that
|
||||
// suggests to the caller that a fallback mechanism can be used
|
||||
// instead of a hard failure.
|
||||
type ErrFallbackToOrdinaryLayerDownload struct {
|
||||
Err error
|
||||
}
|
||||
|
||||
func (c ErrFallbackToOrdinaryLayerDownload) Error() string {
|
||||
return c.Err.Error()
|
||||
}
|
||||
|
||||
func (c ErrFallbackToOrdinaryLayerDownload) Unwrap() error {
|
||||
return c.Err
|
||||
}
|
||||
|
||||
func newErrFallbackToOrdinaryLayerDownload(err error) error {
|
||||
return ErrFallbackToOrdinaryLayerDownload{Err: err}
|
||||
}
|
||||
|
||||
69
vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
generated
vendored
69
vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
generated
vendored
@@ -143,11 +143,13 @@ func (c *chunkedDiffer) convertTarToZstdChunked(destDirectory string, payload *o
|
||||
}
|
||||
|
||||
// GetDiffer returns a differ than can be used with ApplyDiffWithDiffer.
|
||||
// If it returns an error that implements IsErrFallbackToOrdinaryLayerDownload, the caller can
|
||||
// retry the operation with a different method.
|
||||
func GetDiffer(ctx context.Context, store storage.Store, blobDigest digest.Digest, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) {
|
||||
pullOptions := store.PullOptions()
|
||||
|
||||
if !parseBooleanPullOption(pullOptions, "enable_partial_images", true) {
|
||||
return nil, errors.New("enable_partial_images not configured")
|
||||
return nil, newErrFallbackToOrdinaryLayerDownload(errors.New("partial images are disabled"))
|
||||
}
|
||||
|
||||
zstdChunkedTOCDigestString, hasZstdChunkedTOC := annotations[internal.ManifestChecksumKey]
|
||||
@@ -157,29 +159,54 @@ func GetDiffer(ctx context.Context, store storage.Store, blobDigest digest.Diges
|
||||
return nil, errors.New("both zstd:chunked and eStargz TOC found")
|
||||
}
|
||||
|
||||
if hasZstdChunkedTOC {
|
||||
zstdChunkedTOCDigest, err := digest.Parse(zstdChunkedTOCDigestString)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing zstd:chunked TOC digest %q: %w", zstdChunkedTOCDigestString, err)
|
||||
}
|
||||
return makeZstdChunkedDiffer(store, blobSize, zstdChunkedTOCDigest, annotations, iss, pullOptions)
|
||||
}
|
||||
if hasEstargzTOC {
|
||||
estargzTOCDigest, err := digest.Parse(estargzTOCDigestString)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing estargz TOC digest %q: %w", estargzTOCDigestString, err)
|
||||
}
|
||||
return makeEstargzChunkedDiffer(store, blobSize, estargzTOCDigest, iss, pullOptions)
|
||||
convertImages := parseBooleanPullOption(pullOptions, "convert_images", false)
|
||||
|
||||
if !hasZstdChunkedTOC && !hasEstargzTOC && !convertImages {
|
||||
return nil, newErrFallbackToOrdinaryLayerDownload(errors.New("no TOC found and convert_images is not configured"))
|
||||
}
|
||||
|
||||
return makeConvertFromRawDiffer(store, blobDigest, blobSize, iss, pullOptions)
|
||||
var err error
|
||||
var differ graphdriver.Differ
|
||||
// At this point one of hasZstdChunkedTOC, hasEstargzTOC or convertImages is true.
|
||||
if hasZstdChunkedTOC {
|
||||
zstdChunkedTOCDigest, err2 := digest.Parse(zstdChunkedTOCDigestString)
|
||||
if err2 != nil {
|
||||
return nil, err2
|
||||
}
|
||||
differ, err = makeZstdChunkedDiffer(store, blobSize, zstdChunkedTOCDigest, annotations, iss, pullOptions)
|
||||
if err == nil {
|
||||
logrus.Debugf("Created zstd:chunked differ for blob %q", blobDigest)
|
||||
return differ, err
|
||||
}
|
||||
} else if hasEstargzTOC {
|
||||
estargzTOCDigest, err2 := digest.Parse(estargzTOCDigestString)
|
||||
if err2 != nil {
|
||||
return nil, err
|
||||
}
|
||||
differ, err = makeEstargzChunkedDiffer(store, blobSize, estargzTOCDigest, iss, pullOptions)
|
||||
if err == nil {
|
||||
logrus.Debugf("Created eStargz differ for blob %q", blobDigest)
|
||||
return differ, err
|
||||
}
|
||||
}
|
||||
// If convert_images is enabled, always attempt to convert it instead of returning an error or falling back to a different method.
|
||||
if convertImages {
|
||||
logrus.Debugf("Created differ to convert blob %q", blobDigest)
|
||||
return makeConvertFromRawDiffer(store, blobDigest, blobSize, iss, pullOptions)
|
||||
}
|
||||
|
||||
logrus.Debugf("Could not create differ for blob %q: %v", blobDigest, err)
|
||||
|
||||
// If the error is a bad request to the server, then signal to the caller that it can try a different method. This can be done
|
||||
// only when convert_images is disabled.
|
||||
var badRequestErr ErrBadRequest
|
||||
if errors.As(err, &badRequestErr) {
|
||||
err = newErrFallbackToOrdinaryLayerDownload(err)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func makeConvertFromRawDiffer(store storage.Store, blobDigest digest.Digest, blobSize int64, iss ImageSourceSeekable, pullOptions map[string]string) (*chunkedDiffer, error) {
|
||||
if !parseBooleanPullOption(pullOptions, "convert_images", false) {
|
||||
return nil, errors.New("convert_images not configured")
|
||||
}
|
||||
|
||||
layersCache, err := getLayersCache(store)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -947,11 +974,9 @@ func (c *chunkedDiffer) retrieveMissingFiles(stream ImageSourceSeekable, dirfd i
|
||||
}
|
||||
|
||||
if _, ok := err.(ErrBadRequest); ok {
|
||||
// If the server cannot handle at least 64 chunks in a single request, just give up.
|
||||
if len(chunksToRequest) < 64 {
|
||||
if len(chunksToRequest) == 1 {
|
||||
return err
|
||||
}
|
||||
|
||||
// Merge more chunks to request
|
||||
missingParts = mergeMissingChunks(missingParts, len(chunksToRequest)/2)
|
||||
calculateChunksToRequest()
|
||||
|
||||
1
vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go
generated
vendored
1
vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go
generated
vendored
@@ -1,5 +1,4 @@
|
||||
//go:build !linux
|
||||
// +build !linux
|
||||
|
||||
package chunked
|
||||
|
||||
|
||||
68
vendor/github.com/containers/storage/pkg/chunked/tar_split_linux.go
generated
vendored
68
vendor/github.com/containers/storage/pkg/chunked/tar_split_linux.go
generated
vendored
@@ -1,68 +0,0 @@
|
||||
package chunked
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/vbatts/tar-split/archive/tar"
|
||||
"github.com/vbatts/tar-split/tar/storage"
|
||||
)
|
||||
|
||||
// iterateTarSplit calls handler for each tar header in tarSplit
|
||||
func iterateTarSplit(tarSplit []byte, handler func(hdr *tar.Header) error) error {
|
||||
// This, strictly speaking, hard-codes undocumented assumptions about how github.com/vbatts/tar-split/tar/asm.NewInputTarStream
|
||||
// forms the tar-split contents. Pragmatically, NewInputTarStream should always produce storage.FileType entries at least
|
||||
// for every non-empty file, which constraints it basically to the output we expect.
|
||||
//
|
||||
// Specifically, we assume:
|
||||
// - There is a separate SegmentType entry for every tar header, but only one SegmentType entry for the full header incl. any extensions
|
||||
// - (There is a FileType entry for every tar header, we ignore it)
|
||||
// - Trailing padding of a file, if any, is included in the next SegmentType entry
|
||||
// - At the end, there may be SegmentType entries just for the terminating zero blocks.
|
||||
|
||||
unpacker := storage.NewJSONUnpacker(bytes.NewReader(tarSplit))
|
||||
for {
|
||||
tsEntry, err := unpacker.Next()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("reading tar-split entries: %w", err)
|
||||
}
|
||||
switch tsEntry.Type {
|
||||
case storage.SegmentType:
|
||||
payload := tsEntry.Payload
|
||||
// This is horrible, but we don’t know how much padding to skip. (It can be computed from the previous hdr.Size for non-sparse
|
||||
// files, but for sparse files that is set to the logical size.)
|
||||
//
|
||||
// First, assume that all padding is zero bytes.
|
||||
// A tar header starts with a file name, which might in principle be empty, but
|
||||
// at least https://github.com/opencontainers/image-spec/blob/main/layer.md#populate-initial-filesystem suggests that
|
||||
// the tar name should never be empty (it should be ".", or maybe "./").
|
||||
//
|
||||
// This will cause us to skip all zero bytes in the trailing blocks, but that’s fine.
|
||||
i := 0
|
||||
for i < len(payload) && payload[i] == 0 {
|
||||
i++
|
||||
}
|
||||
payload = payload[i:]
|
||||
tr := tar.NewReader(bytes.NewReader(payload))
|
||||
hdr, err := tr.Next()
|
||||
if err != nil {
|
||||
if err == io.EOF { // Probably the last entry, but let’s let the unpacker drive that.
|
||||
break
|
||||
}
|
||||
return fmt.Errorf("decoding a tar header from a tar-split entry: %w", err)
|
||||
}
|
||||
if err := handler(hdr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case storage.FileType:
|
||||
// Nothing
|
||||
default:
|
||||
return fmt.Errorf("unexpected tar-split entry type %q", tsEntry.Type)
|
||||
}
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user