Update containers/common to latest main

Update the containers/common dependency to the latest main with the
needed changes in Podmansh.

Signed-off-by: phoenix <felix.niederwanger@suse.com>
This commit is contained in:
phoenix
2024-05-23 10:53:20 +02:00
parent fa05adba67
commit 4fd425429b
74 changed files with 731 additions and 399 deletions

View File

@@ -823,81 +823,90 @@ func unmarshalToc(manifest []byte) (*internal.TOC, error) {
iter := jsoniter.ParseBytes(jsoniter.ConfigFastest, manifest)
for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
if strings.ToLower(field) == "version" {
switch strings.ToLower(field) {
case "version":
toc.Version = iter.ReadInt()
continue
}
if strings.ToLower(field) != "entries" {
iter.Skip()
continue
}
for iter.ReadArray() {
var m internal.FileMetadata
for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
switch strings.ToLower(field) {
case "type":
m.Type = iter.ReadString()
case "name":
m.Name = iter.ReadString()
case "linkname":
m.Linkname = iter.ReadString()
case "mode":
m.Mode = iter.ReadInt64()
case "size":
m.Size = iter.ReadInt64()
case "uid":
m.UID = iter.ReadInt()
case "gid":
m.GID = iter.ReadInt()
case "modtime":
time, err := time.Parse(time.RFC3339, iter.ReadString())
if err != nil {
return nil, err
case "entries":
for iter.ReadArray() {
var m internal.FileMetadata
for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
switch strings.ToLower(field) {
case "type":
m.Type = iter.ReadString()
case "name":
m.Name = iter.ReadString()
case "linkname":
m.Linkname = iter.ReadString()
case "mode":
m.Mode = iter.ReadInt64()
case "size":
m.Size = iter.ReadInt64()
case "uid":
m.UID = iter.ReadInt()
case "gid":
m.GID = iter.ReadInt()
case "modtime":
time, err := time.Parse(time.RFC3339, iter.ReadString())
if err != nil {
return nil, err
}
m.ModTime = &time
case "accesstime":
time, err := time.Parse(time.RFC3339, iter.ReadString())
if err != nil {
return nil, err
}
m.AccessTime = &time
case "changetime":
time, err := time.Parse(time.RFC3339, iter.ReadString())
if err != nil {
return nil, err
}
m.ChangeTime = &time
case "devmajor":
m.Devmajor = iter.ReadInt64()
case "devminor":
m.Devminor = iter.ReadInt64()
case "digest":
m.Digest = iter.ReadString()
case "offset":
m.Offset = iter.ReadInt64()
case "endoffset":
m.EndOffset = iter.ReadInt64()
case "chunksize":
m.ChunkSize = iter.ReadInt64()
case "chunkoffset":
m.ChunkOffset = iter.ReadInt64()
case "chunkdigest":
m.ChunkDigest = iter.ReadString()
case "chunktype":
m.ChunkType = iter.ReadString()
case "xattrs":
m.Xattrs = make(map[string]string)
for key := iter.ReadObject(); key != ""; key = iter.ReadObject() {
m.Xattrs[key] = iter.ReadString()
}
default:
iter.Skip()
}
m.ModTime = &time
case "accesstime":
time, err := time.Parse(time.RFC3339, iter.ReadString())
if err != nil {
return nil, err
}
m.AccessTime = &time
case "changetime":
time, err := time.Parse(time.RFC3339, iter.ReadString())
if err != nil {
return nil, err
}
m.ChangeTime = &time
case "devmajor":
m.Devmajor = iter.ReadInt64()
case "devminor":
m.Devminor = iter.ReadInt64()
case "digest":
m.Digest = iter.ReadString()
case "offset":
m.Offset = iter.ReadInt64()
case "endoffset":
m.EndOffset = iter.ReadInt64()
case "chunksize":
m.ChunkSize = iter.ReadInt64()
case "chunkoffset":
m.ChunkOffset = iter.ReadInt64()
case "chunkdigest":
m.ChunkDigest = iter.ReadString()
case "chunktype":
m.ChunkType = iter.ReadString()
case "xattrs":
m.Xattrs = make(map[string]string)
for key := iter.ReadObject(); key != ""; key = iter.ReadObject() {
m.Xattrs[key] = iter.ReadString()
}
default:
iter.Skip()
}
if m.Type == TypeReg && m.Size == 0 && m.Digest == "" {
m.Digest = digestSha256Empty
}
toc.Entries = append(toc.Entries, m)
}
if m.Type == TypeReg && m.Size == 0 && m.Digest == "" {
m.Digest = digestSha256Empty
case "tarsplitdigest": // strings.ToLower("tarSplitDigest")
s := iter.ReadString()
d, err := digest.Parse(s)
if err != nil {
return nil, fmt.Errorf("Invalid tarSplitDigest %q: %w", s, err)
}
toc.Entries = append(toc.Entries, m)
toc.TarSplitDigest = d
default:
iter.Skip()
}
}

View File

@@ -133,37 +133,36 @@ func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64,
}
// readZstdChunkedManifest reads the zstd:chunked manifest from the seekable stream blobStream.
func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Digest, annotations map[string]string) ([]byte, []byte, int64, error) {
// Returns (manifest blob, parsed manifest, tar-split blob, manifest offset).
func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Digest, annotations map[string]string) ([]byte, *internal.TOC, []byte, int64, error) {
offsetMetadata := annotations[internal.ManifestInfoKey]
if offsetMetadata == "" {
return nil, nil, 0, fmt.Errorf("%q annotation missing", internal.ManifestInfoKey)
return nil, nil, nil, 0, fmt.Errorf("%q annotation missing", internal.ManifestInfoKey)
}
var manifestChunk ImageSourceChunk
var manifestLengthUncompressed, manifestType uint64
if _, err := fmt.Sscanf(offsetMetadata, "%d:%d:%d:%d", &manifestChunk.Offset, &manifestChunk.Length, &manifestLengthUncompressed, &manifestType); err != nil {
return nil, nil, 0, err
return nil, nil, nil, 0, err
}
// The tarSplit… values are valid if tarSplitChunk.Offset > 0
var tarSplitChunk ImageSourceChunk
var tarSplitLengthUncompressed uint64
var tarSplitChecksum string
if tarSplitInfoKeyAnnotation, found := annotations[internal.TarSplitInfoKey]; found {
if _, err := fmt.Sscanf(tarSplitInfoKeyAnnotation, "%d:%d:%d", &tarSplitChunk.Offset, &tarSplitChunk.Length, &tarSplitLengthUncompressed); err != nil {
return nil, nil, 0, err
return nil, nil, nil, 0, err
}
tarSplitChecksum = annotations[internal.TarSplitChecksumKey]
}
if manifestType != internal.ManifestTypeCRFS {
return nil, nil, 0, errors.New("invalid manifest type")
return nil, nil, nil, 0, errors.New("invalid manifest type")
}
// set a reasonable limit
if manifestChunk.Length > (1<<20)*50 {
return nil, nil, 0, errors.New("manifest too big")
return nil, nil, nil, 0, errors.New("manifest too big")
}
if manifestLengthUncompressed > (1<<20)*50 {
return nil, nil, 0, errors.New("manifest too big")
return nil, nil, nil, 0, errors.New("manifest too big")
}
chunks := []ImageSourceChunk{manifestChunk}
@@ -172,7 +171,7 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Di
}
parts, errs, err := blobStream.GetBlobAt(chunks)
if err != nil {
return nil, nil, 0, err
return nil, nil, nil, 0, err
}
readBlob := func(len uint64) ([]byte, error) {
@@ -197,32 +196,37 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Di
manifest, err := readBlob(manifestChunk.Length)
if err != nil {
return nil, nil, 0, err
return nil, nil, nil, 0, err
}
decodedBlob, err := decodeAndValidateBlob(manifest, manifestLengthUncompressed, tocDigest.String())
if err != nil {
return nil, nil, 0, err
return nil, nil, nil, 0, fmt.Errorf("validating and decompressing TOC: %w", err)
}
toc, err := unmarshalToc(decodedBlob)
if err != nil {
return nil, nil, nil, 0, fmt.Errorf("unmarshaling TOC: %w", err)
}
decodedTarSplit := []byte{}
if tarSplitChunk.Offset > 0 {
tarSplit, err := readBlob(tarSplitChunk.Length)
if err != nil {
return nil, nil, 0, err
return nil, nil, nil, 0, err
}
decodedTarSplit, err = decodeAndValidateBlob(tarSplit, tarSplitLengthUncompressed, tarSplitChecksum)
decodedTarSplit, err = decodeAndValidateBlob(tarSplit, tarSplitLengthUncompressed, toc.TarSplitDigest.String())
if err != nil {
return nil, nil, 0, err
return nil, nil, nil, 0, fmt.Errorf("validating and decompressing tar-split: %w", err)
}
}
return decodedBlob, decodedTarSplit, int64(manifestChunk.Offset), err
return decodedBlob, toc, decodedTarSplit, int64(manifestChunk.Offset), err
}
func decodeAndValidateBlob(blob []byte, lengthUncompressed uint64, expectedCompressedChecksum string) ([]byte, error) {
d, err := digest.Parse(expectedCompressedChecksum)
if err != nil {
return nil, err
return nil, fmt.Errorf("invalid digest %q: %w", expectedCompressedChecksum, err)
}
blobDigester := d.Algorithm().Digester()

View File

@@ -18,8 +18,9 @@ import (
)
type TOC struct {
Version int `json:"version"`
Entries []FileMetadata `json:"entries"`
Version int `json:"version"`
Entries []FileMetadata `json:"entries"`
TarSplitDigest digest.Digest `json:"tarSplitDigest,omitempty"`
}
type FileMetadata struct {
@@ -84,9 +85,10 @@ func GetType(t byte) (string, error) {
const (
ManifestChecksumKey = "io.github.containers.zstd-chunked.manifest-checksum"
ManifestInfoKey = "io.github.containers.zstd-chunked.manifest-position"
TarSplitChecksumKey = "io.github.containers.zstd-chunked.tarsplit-checksum"
TarSplitInfoKey = "io.github.containers.zstd-chunked.tarsplit-position"
TarSplitChecksumKey = "io.github.containers.zstd-chunked.tarsplit-checksum" // Deprecated: Use the TOC.TarSplitDigest field instead, this annotation is no longer read nor written.
// ManifestTypeCRFS is a manifest file compatible with the CRFS TOC file.
ManifestTypeCRFS = 1
@@ -133,8 +135,9 @@ func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, off
manifestOffset := offset + zstdSkippableFrameHeader
toc := TOC{
Version: 1,
Entries: metadata,
Version: 1,
Entries: metadata,
TarSplitDigest: tarSplitData.Digest,
}
json := jsoniter.ConfigCompatibleWithStandardLibrary
@@ -170,7 +173,6 @@ func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, off
return err
}
outMetadata[TarSplitChecksumKey] = tarSplitData.Digest.String()
tarSplitOffset := manifestOffset + uint64(len(compressedManifest)) + zstdSkippableFrameHeader
outMetadata[TarSplitInfoKey] = fmt.Sprintf("%d:%d:%d", tarSplitOffset, len(tarSplitData.Data), tarSplitData.UncompressedSize)
if err := appendZstdSkippableFrame(dest, tarSplitData.Data); err != nil {

View File

@@ -79,6 +79,7 @@ type compressedFileType int
type chunkedDiffer struct {
stream ImageSourceSeekable
manifest []byte
toc *internal.TOC // The parsed contents of manifest, or nil if not yet available
tarSplit []byte
layersCache *layersCache
tocOffset int64
@@ -314,7 +315,7 @@ func makeConvertFromRawDiffer(ctx context.Context, store storage.Store, blobDige
}
func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, tocDigest digest.Digest, annotations map[string]string, iss ImageSourceSeekable, storeOpts *types.StoreOptions) (*chunkedDiffer, error) {
manifest, tarSplit, tocOffset, err := readZstdChunkedManifest(iss, tocDigest, annotations)
manifest, toc, tarSplit, tocOffset, err := readZstdChunkedManifest(iss, tocDigest, annotations)
if err != nil {
return nil, fmt.Errorf("read zstd:chunked manifest: %w", err)
}
@@ -331,6 +332,7 @@ func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize in
fileType: fileTypeZstdChunked,
layersCache: layersCache,
manifest: manifest,
toc: toc,
storeOpts: storeOpts,
stream: iss,
tarSplit: tarSplit,
@@ -1701,7 +1703,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
if tocDigest == nil {
return graphdriver.DriverWithDifferOutput{}, fmt.Errorf("internal error: just-created zstd:chunked missing TOC digest")
}
manifest, tarSplit, tocOffset, err := readZstdChunkedManifest(fileSource, *tocDigest, annotations)
manifest, toc, tarSplit, tocOffset, err := readZstdChunkedManifest(fileSource, *tocDigest, annotations)
if err != nil {
return graphdriver.DriverWithDifferOutput{}, fmt.Errorf("read zstd:chunked manifest: %w", err)
}
@@ -1712,6 +1714,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
// fill the chunkedDiffer with the data we just read.
c.fileType = fileTypeZstdChunked
c.manifest = manifest
c.toc = toc
c.tarSplit = tarSplit
c.tocOffset = tocOffset
@@ -1732,9 +1735,13 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
}
// Generate the manifest
toc, err := unmarshalToc(c.manifest)
if err != nil {
return graphdriver.DriverWithDifferOutput{}, err
toc := c.toc
if toc == nil {
toc_, err := unmarshalToc(c.manifest)
if err != nil {
return graphdriver.DriverWithDifferOutput{}, err
}
toc = toc_
}
output := graphdriver.DriverWithDifferOutput{