update c/image and c/storage to latest

Signed-off-by: Paul Holzinger <pholzing@redhat.com>
This commit is contained in:
Paul Holzinger
2023-06-26 16:11:36 +02:00
parent 4445a5040a
commit b80fd54a56
31 changed files with 395 additions and 153 deletions

View File

@ -256,9 +256,11 @@ func (c *copier) copySingleImage(ctx context.Context, policyContext *signature.P
}
sigs = append(sigs, newSigs...)
c.Printf("Storing signatures\n")
if err := c.dest.PutSignaturesWithFormat(ctx, sigs, targetInstance); err != nil {
return nil, "", "", fmt.Errorf("writing signatures: %w", err)
if len(sigs) > 0 {
c.Printf("Storing signatures\n")
if err := c.dest.PutSignaturesWithFormat(ctx, sigs, targetInstance); err != nil {
return nil, "", "", fmt.Errorf("writing signatures: %w", err)
}
}
return manifestBytes, retManifestType, retManifestDigest, nil

View File

@ -17,8 +17,8 @@ import (
"strings"
"time"
"dario.cat/mergo"
"github.com/containers/storage/pkg/homedir"
"github.com/imdario/mergo"
"github.com/sirupsen/logrus"
"golang.org/x/exp/slices"
"gopkg.in/yaml.v3"

View File

@ -628,18 +628,13 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
if err != nil {
return "", err
}
switch len(options.Options) {
case 0:
case 1:
if options.Options[0] == "ro" {
for _, opt := range options.Options {
if opt == "ro" {
// ignore "ro" option
break
continue
}
fallthrough
default:
return "", fmt.Errorf("btrfs driver does not support mount options")
}
if !st.IsDir() {
return "", fmt.Errorf("%s: not a directory", dir)
}

View File

@ -187,6 +187,8 @@ type DriverWithDifferOutput struct {
UncompressedDigest digest.Digest
Metadata string
BigData map[string][]byte
TarSplit []byte
TOCDigest digest.Digest
}
// Differ defines the interface for using a custom differ.

View File

@ -60,6 +60,8 @@ const (
FsMagicCephFs = FsMagic(0x00C36400)
// FsMagicCIFS filesystem id for CIFS
FsMagicCIFS = FsMagic(0xFF534D42)
// FsMagicEROFS filesystem id for EROFS
FsMagicEROFS = FsMagic(0xE0F5E1E2)
// FsMagicFHGFS filesystem id for FHGFS
FsMagicFHGFSFs = FsMagic(0x19830326)
// FsMagicIBRIX filesystem id for IBRIX
@ -106,6 +108,7 @@ var (
FsMagicBtrfs: "btrfs",
FsMagicCramfs: "cramfs",
FsMagicEcryptfs: "ecryptfs",
FsMagicEROFS: "erofs",
FsMagicExtfs: "extfs",
FsMagicF2fs: "f2fs",
FsMagicGPFS: "gpfs",

View File

@ -55,6 +55,7 @@ func (gdw *NaiveDiffDriver) Diff(id string, idMappings *idtools.IDMappings, pare
options := MountOpts{
MountLabel: mountLabel,
Options: []string{"ro"},
}
layerFs, err := driver.Get(id, options)
if err != nil {

View File

@ -1952,6 +1952,9 @@ func (d *Driver) ApplyDiffFromStagingDirectory(id, parent, stagingDirectory stri
if err := os.RemoveAll(diff); err != nil && !os.IsNotExist(err) {
return err
}
diffOutput.UncompressedDigest = diffOutput.TOCDigest
return os.Rename(stagingDirectory, diff)
}

View File

@ -226,15 +226,12 @@ func (d *Driver) Remove(id string) error {
// Get returns the directory for the given id.
func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr error) {
dir := d.dir(id)
switch len(options.Options) {
case 0:
case 1:
if options.Options[0] == "ro" {
for _, opt := range options.Options {
if opt == "ro" {
// ignore "ro" option
break
continue
}
fallthrough
default:
return "", fmt.Errorf("vfs driver does not support mount options")
}
if st, err := os.Stat(dir); err != nil {

View File

@ -2392,8 +2392,26 @@ func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string,
layer.UncompressedDigest = diffOutput.UncompressedDigest
layer.UncompressedSize = diffOutput.Size
layer.Metadata = diffOutput.Metadata
if err = r.saveFor(layer); err != nil {
return err
if len(diffOutput.TarSplit) != 0 {
tsdata := bytes.Buffer{}
compressor, err := pgzip.NewWriterLevel(&tsdata, pgzip.BestSpeed)
if err != nil {
compressor = pgzip.NewWriter(&tsdata)
}
if err := compressor.SetConcurrency(1024*1024, 1); err != nil { // 1024*1024 is the hard-coded default; we're not changing that
logrus.Infof("setting compression concurrency threads to 1: %v; ignoring", err)
}
if _, err := compressor.Write(diffOutput.TarSplit); err != nil {
compressor.Close()
return err
}
compressor.Close()
if err := os.MkdirAll(filepath.Dir(r.tspath(layer.ID)), 0o700); err != nil {
return err
}
if err := ioutils.AtomicWriteFile(r.tspath(layer.ID), tsdata.Bytes(), 0o600); err != nil {
return err
}
}
for k, v := range diffOutput.BigData {
if err := r.SetBigData(id, k, bytes.NewReader(v)); err != nil {
@ -2403,6 +2421,9 @@ func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string,
return err
}
}
if err = r.saveFor(layer); err != nil {
return err
}
return err
}

View File

@ -516,14 +516,14 @@ func unmarshalToc(manifest []byte) (*internal.TOC, error) {
iter := jsoniter.ParseBytes(jsoniter.ConfigFastest, manifest)
for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
if field != "entries" {
if strings.ToLower(field) != "entries" {
iter.Skip()
continue
}
for iter.ReadArray() {
for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
switch field {
case "type", "name", "linkName", "digest", "chunkDigest", "chunkType":
switch strings.ToLower(field) {
case "type", "name", "linkname", "digest", "chunkdigest", "chunktype", "modtime", "accesstime", "changetime":
count += len(iter.ReadStringAsSlice())
case "xattrs":
for key := iter.ReadObject(); key != ""; key = iter.ReadObject() {
@ -548,33 +548,33 @@ func unmarshalToc(manifest []byte) (*internal.TOC, error) {
iter = jsoniter.ParseBytes(jsoniter.ConfigFastest, manifest)
for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
if field == "version" {
if strings.ToLower(field) == "version" {
toc.Version = iter.ReadInt()
continue
}
if field != "entries" {
if strings.ToLower(field) != "entries" {
iter.Skip()
continue
}
for iter.ReadArray() {
var m internal.FileMetadata
for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
switch field {
switch strings.ToLower(field) {
case "type":
m.Type = getString(iter.ReadStringAsSlice())
case "name":
m.Name = getString(iter.ReadStringAsSlice())
case "linkName":
case "linkname":
m.Linkname = getString(iter.ReadStringAsSlice())
case "mode":
m.Mode = iter.ReadInt64()
case "size":
m.Size = iter.ReadInt64()
case "UID":
case "uid":
m.UID = iter.ReadInt()
case "GID":
case "gid":
m.GID = iter.ReadInt()
case "ModTime":
case "modtime":
time, err := time.Parse(time.RFC3339, byteSliceAsString(iter.ReadStringAsSlice()))
if err != nil {
return nil, err
@ -592,23 +592,23 @@ func unmarshalToc(manifest []byte) (*internal.TOC, error) {
return nil, err
}
m.ChangeTime = &time
case "devMajor":
case "devmajor":
m.Devmajor = iter.ReadInt64()
case "devMinor":
case "devminor":
m.Devminor = iter.ReadInt64()
case "digest":
m.Digest = getString(iter.ReadStringAsSlice())
case "offset":
m.Offset = iter.ReadInt64()
case "endOffset":
case "endoffset":
m.EndOffset = iter.ReadInt64()
case "chunkSize":
case "chunksize":
m.ChunkSize = iter.ReadInt64()
case "chunkOffset":
case "chunkoffset":
m.ChunkOffset = iter.ReadInt64()
case "chunkDigest":
case "chunkdigest":
m.ChunkDigest = getString(iter.ReadStringAsSlice())
case "chunkType":
case "chunktype":
m.ChunkType = getString(iter.ReadStringAsSlice())
case "xattrs":
m.Xattrs = make(map[string]string)

View File

@ -150,22 +150,32 @@ func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64,
// readZstdChunkedManifest reads the zstd:chunked manifest from the seekable stream blobStream. The blob total size must
// be specified.
// This function uses the io.github.containers.zstd-chunked. annotations when specified.
func readZstdChunkedManifest(ctx context.Context, blobStream ImageSourceSeekable, blobSize int64, annotations map[string]string) ([]byte, int64, error) {
func readZstdChunkedManifest(ctx context.Context, blobStream ImageSourceSeekable, blobSize int64, annotations map[string]string) ([]byte, []byte, int64, error) {
footerSize := int64(internal.FooterSizeSupported)
if blobSize <= footerSize {
return nil, 0, errors.New("blob too small")
return nil, nil, 0, errors.New("blob too small")
}
manifestChecksumAnnotation := annotations[internal.ManifestChecksumKey]
if manifestChecksumAnnotation == "" {
return nil, 0, fmt.Errorf("manifest checksum annotation %q not found", internal.ManifestChecksumKey)
return nil, nil, 0, fmt.Errorf("manifest checksum annotation %q not found", internal.ManifestChecksumKey)
}
var offset, length, lengthUncompressed, manifestType uint64
var offsetTarSplit, lengthTarSplit, lengthUncompressedTarSplit uint64
tarSplitChecksumAnnotation := ""
if offsetMetadata := annotations[internal.ManifestInfoKey]; offsetMetadata != "" {
if _, err := fmt.Sscanf(offsetMetadata, "%d:%d:%d:%d", &offset, &length, &lengthUncompressed, &manifestType); err != nil {
return nil, 0, err
return nil, nil, 0, err
}
if tarSplitInfoKeyAnnotation, found := annotations[internal.TarSplitInfoKey]; found {
if _, err := fmt.Sscanf(tarSplitInfoKeyAnnotation, "%d:%d:%d", &offsetTarSplit, &lengthTarSplit, &lengthUncompressedTarSplit); err != nil {
return nil, nil, 0, err
}
tarSplitChecksumAnnotation = annotations[internal.TarSplitChecksumKey]
}
} else {
chunk := ImageSourceChunk{
@ -174,39 +184,39 @@ func readZstdChunkedManifest(ctx context.Context, blobStream ImageSourceSeekable
}
parts, errs, err := blobStream.GetBlobAt([]ImageSourceChunk{chunk})
if err != nil {
return nil, 0, err
return nil, nil, 0, err
}
var reader io.ReadCloser
select {
case r := <-parts:
reader = r
case err := <-errs:
return nil, 0, err
return nil, nil, 0, err
}
footer := make([]byte, footerSize)
if _, err := io.ReadFull(reader, footer); err != nil {
return nil, 0, err
return nil, nil, 0, err
}
offset = binary.LittleEndian.Uint64(footer[0:8])
length = binary.LittleEndian.Uint64(footer[8:16])
lengthUncompressed = binary.LittleEndian.Uint64(footer[16:24])
manifestType = binary.LittleEndian.Uint64(footer[24:32])
if !isZstdChunkedFrameMagic(footer[32:40]) {
return nil, 0, errors.New("invalid magic number")
if !isZstdChunkedFrameMagic(footer[48:56]) {
return nil, nil, 0, errors.New("invalid magic number")
}
}
if manifestType != internal.ManifestTypeCRFS {
return nil, 0, errors.New("invalid manifest type")
return nil, nil, 0, errors.New("invalid manifest type")
}
// set a reasonable limit
if length > (1<<20)*50 {
return nil, 0, errors.New("manifest too big")
return nil, nil, 0, errors.New("manifest too big")
}
if lengthUncompressed > (1<<20)*50 {
return nil, 0, errors.New("manifest too big")
return nil, nil, 0, errors.New("manifest too big")
}
chunk := ImageSourceChunk{
@ -214,47 +224,86 @@ func readZstdChunkedManifest(ctx context.Context, blobStream ImageSourceSeekable
Length: length,
}
parts, errs, err := blobStream.GetBlobAt([]ImageSourceChunk{chunk})
chunks := []ImageSourceChunk{chunk}
if offsetTarSplit > 0 {
chunkTarSplit := ImageSourceChunk{
Offset: offsetTarSplit,
Length: lengthTarSplit,
}
chunks = append(chunks, chunkTarSplit)
}
parts, errs, err := blobStream.GetBlobAt(chunks)
if err != nil {
return nil, 0, err
}
var reader io.ReadCloser
select {
case r := <-parts:
reader = r
case err := <-errs:
return nil, 0, err
return nil, nil, 0, err
}
manifest := make([]byte, length)
if _, err := io.ReadFull(reader, manifest); err != nil {
return nil, 0, err
readBlob := func(len uint64) ([]byte, error) {
var reader io.ReadCloser
select {
case r := <-parts:
reader = r
case err := <-errs:
return nil, err
}
blob := make([]byte, len)
if _, err := io.ReadFull(reader, blob); err != nil {
reader.Close()
return nil, err
}
if err := reader.Close(); err != nil {
return nil, err
}
return blob, nil
}
manifestDigester := digest.Canonical.Digester()
manifestChecksum := manifestDigester.Hash()
if _, err := manifestChecksum.Write(manifest); err != nil {
return nil, 0, err
}
d, err := digest.Parse(manifestChecksumAnnotation)
manifest, err := readBlob(length)
if err != nil {
return nil, 0, err
return nil, nil, 0, err
}
if manifestDigester.Digest() != d {
return nil, 0, errors.New("invalid manifest checksum")
decodedBlob, err := decodeAndValidateBlob(manifest, lengthUncompressed, manifestChecksumAnnotation)
if err != nil {
return nil, nil, 0, err
}
decodedTarSplit := []byte{}
if offsetTarSplit > 0 {
tarSplit, err := readBlob(lengthTarSplit)
if err != nil {
return nil, nil, 0, err
}
decodedTarSplit, err = decodeAndValidateBlob(tarSplit, lengthUncompressedTarSplit, tarSplitChecksumAnnotation)
if err != nil {
return nil, nil, 0, err
}
}
return decodedBlob, decodedTarSplit, int64(offset), err
}
func decodeAndValidateBlob(blob []byte, lengthUncompressed uint64, expectedUncompressedChecksum string) ([]byte, error) {
d, err := digest.Parse(expectedUncompressedChecksum)
if err != nil {
return nil, err
}
blobDigester := d.Algorithm().Digester()
blobChecksum := blobDigester.Hash()
if _, err := blobChecksum.Write(blob); err != nil {
return nil, err
}
if blobDigester.Digest() != d {
return nil, fmt.Errorf("invalid blob checksum, expected checksum %s, got %s", d, blobDigester.Digest())
}
decoder, err := zstd.NewReader(nil) //nolint:contextcheck
if err != nil {
return nil, 0, err
return nil, err
}
defer decoder.Close()
b := make([]byte, 0, lengthUncompressed)
if decoded, err := decoder.DecodeAll(manifest, b); err == nil {
return decoded, int64(offset), nil
}
return manifest, int64(offset), nil
return decoder.DecodeAll(blob, b)
}

View File

@ -6,13 +6,17 @@ package compressor
import (
"bufio"
"bytes"
"encoding/base64"
"io"
"github.com/containers/storage/pkg/chunked/internal"
"github.com/containers/storage/pkg/ioutils"
"github.com/klauspost/compress/zstd"
"github.com/opencontainers/go-digest"
"github.com/vbatts/tar-split/archive/tar"
"github.com/vbatts/tar-split/tar/asm"
"github.com/vbatts/tar-split/tar/storage"
)
const (
@ -198,11 +202,55 @@ type chunk struct {
ChunkType string
}
type tarSplitData struct {
compressed *bytes.Buffer
digester digest.Digester
uncompressedCounter *ioutils.WriteCounter
zstd *zstd.Encoder
packer storage.Packer
}
func newTarSplitData(level int) (*tarSplitData, error) {
compressed := bytes.NewBuffer(nil)
digester := digest.Canonical.Digester()
zstdWriter, err := internal.ZstdWriterWithLevel(io.MultiWriter(compressed, digester.Hash()), level)
if err != nil {
return nil, err
}
uncompressedCounter := ioutils.NewWriteCounter(zstdWriter)
metaPacker := storage.NewJSONPacker(uncompressedCounter)
return &tarSplitData{
compressed: compressed,
digester: digester,
uncompressedCounter: uncompressedCounter,
zstd: zstdWriter,
packer: metaPacker,
}, nil
}
func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, reader io.Reader, level int) error {
// total written so far. Used to retrieve partial offsets in the file
dest := ioutils.NewWriteCounter(destFile)
tr := tar.NewReader(reader)
tarSplitData, err := newTarSplitData(level)
if err != nil {
return err
}
defer func() {
if tarSplitData.zstd != nil {
tarSplitData.zstd.Close()
}
}()
its, err := asm.NewInputTarStream(reader, tarSplitData.packer, nil)
if err != nil {
return err
}
tr := tar.NewReader(its)
tr.RawAccounting = true
buf := make([]byte, 4096)
@ -214,7 +262,6 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r
defer func() {
if zstdWriter != nil {
zstdWriter.Close()
zstdWriter.Flush()
}
}()
@ -224,9 +271,6 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r
if err := zstdWriter.Close(); err != nil {
return 0, err
}
if err := zstdWriter.Flush(); err != nil {
return 0, err
}
offset = dest.Count
zstdWriter.Reset(dest)
}
@ -373,9 +417,11 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r
rawBytes := tr.RawBytes()
if _, err := zstdWriter.Write(rawBytes); err != nil {
zstdWriter.Close()
return err
}
if err := zstdWriter.Flush(); err != nil {
zstdWriter.Close()
return err
}
if err := zstdWriter.Close(); err != nil {
@ -383,7 +429,21 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r
}
zstdWriter = nil
return internal.WriteZstdChunkedManifest(dest, outMetadata, uint64(dest.Count), metadata, level)
if err := tarSplitData.zstd.Flush(); err != nil {
return err
}
if err := tarSplitData.zstd.Close(); err != nil {
return err
}
tarSplitData.zstd = nil
ts := internal.TarSplitData{
Data: tarSplitData.compressed.Bytes(),
Digest: tarSplitData.digester.Digest(),
UncompressedSize: tarSplitData.uncompressedCounter.Count,
}
return internal.WriteZstdChunkedManifest(dest, outMetadata, uint64(dest.Count), &ts, metadata, level)
}
type zstdChunkedWriter struct {

View File

@ -90,6 +90,8 @@ func GetType(t byte) (string, error) {
const (
ManifestChecksumKey = "io.github.containers.zstd-chunked.manifest-checksum"
ManifestInfoKey = "io.github.containers.zstd-chunked.manifest-position"
TarSplitChecksumKey = "io.github.containers.zstd-chunked.tarsplit-checksum"
TarSplitInfoKey = "io.github.containers.zstd-chunked.tarsplit-position"
// ManifestTypeCRFS is a manifest file compatible with the CRFS TOC file.
ManifestTypeCRFS = 1
@ -97,7 +99,7 @@ const (
// FooterSizeSupported is the footer size supported by this implementation.
// Newer versions of the image format might increase this value, so reject
// any version that is not supported.
FooterSizeSupported = 40
FooterSizeSupported = 56
)
var (
@ -125,9 +127,16 @@ func appendZstdSkippableFrame(dest io.Writer, data []byte) error {
return nil
}
func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, offset uint64, metadata []FileMetadata, level int) error {
type TarSplitData struct {
Data []byte
Digest digest.Digest
UncompressedSize int64
}
func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, offset uint64, tarSplitData *TarSplitData, metadata []FileMetadata, level int) error {
// 8 is the size of the zstd skippable frame header + the frame size
manifestOffset := offset + 8
const zstdSkippableFrameHeader = 8
manifestOffset := offset + zstdSkippableFrameHeader
toc := TOC{
Version: 1,
@ -167,13 +176,20 @@ func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, off
return err
}
outMetadata[TarSplitChecksumKey] = tarSplitData.Digest.String()
tarSplitOffset := manifestOffset + uint64(len(compressedManifest)) + zstdSkippableFrameHeader
outMetadata[TarSplitInfoKey] = fmt.Sprintf("%d:%d:%d", tarSplitOffset, len(tarSplitData.Data), tarSplitData.UncompressedSize)
if err := appendZstdSkippableFrame(dest, tarSplitData.Data); err != nil {
return err
}
// Store the offset to the manifest and its size in LE order
manifestDataLE := make([]byte, FooterSizeSupported)
binary.LittleEndian.PutUint64(manifestDataLE, manifestOffset)
binary.LittleEndian.PutUint64(manifestDataLE[8:], uint64(len(compressedManifest)))
binary.LittleEndian.PutUint64(manifestDataLE[16:], uint64(len(manifest)))
binary.LittleEndian.PutUint64(manifestDataLE[24:], uint64(ManifestTypeCRFS))
copy(manifestDataLE[32:], ZstdChunkedFrameMagic)
binary.LittleEndian.PutUint64(manifestDataLE[8*1:], uint64(len(compressedManifest)))
binary.LittleEndian.PutUint64(manifestDataLE[8*2:], uint64(len(manifest)))
binary.LittleEndian.PutUint64(manifestDataLE[8*3:], uint64(ManifestTypeCRFS))
copy(manifestDataLE[8*4:], ZstdChunkedFrameMagic)
return appendZstdSkippableFrame(dest, manifestDataLE)
}

View File

@ -55,6 +55,7 @@ type compressedFileType int
type chunkedDiffer struct {
stream ImageSourceSeekable
manifest []byte
tarSplit []byte
layersCache *layersCache
tocOffset int64
fileType compressedFileType
@ -64,6 +65,8 @@ type chunkedDiffer struct {
gzipReader *pgzip.Reader
zstdReader *zstd.Decoder
rawReader io.Reader
tocDigest digest.Digest
}
var xattrsToIgnore = map[string]interface{}{
@ -135,6 +138,26 @@ func copyFileContent(srcFd int, destFile string, dirfd int, mode os.FileMode, us
return dstFile, st.Size(), nil
}
// GetTOCDigest returns the digest of the TOC as recorded in the annotations.
// This is an experimental feature and may be changed/removed in the future.
func GetTOCDigest(annotations map[string]string) (*digest.Digest, error) {
if tocDigest, ok := annotations[estargz.TOCJSONDigestAnnotation]; ok {
d, err := digest.Parse(tocDigest)
if err != nil {
return nil, err
}
return &d, nil
}
if tocDigest, ok := annotations[internal.ManifestChecksumKey]; ok {
d, err := digest.Parse(tocDigest)
if err != nil {
return nil, err
}
return &d, nil
}
return nil, nil
}
// GetDiffer returns a differ than can be used with ApplyDiffWithDiffer.
func GetDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) {
if _, ok := annotations[internal.ManifestChecksumKey]; ok {
@ -147,7 +170,7 @@ func GetDiffer(ctx context.Context, store storage.Store, blobSize int64, annotat
}
func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (*chunkedDiffer, error) {
manifest, tocOffset, err := readZstdChunkedManifest(ctx, iss, blobSize, annotations)
manifest, tarSplit, tocOffset, err := readZstdChunkedManifest(ctx, iss, blobSize, annotations)
if err != nil {
return nil, fmt.Errorf("read zstd:chunked manifest: %w", err)
}
@ -156,13 +179,20 @@ func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize in
return nil, err
}
tocDigest, err := digest.Parse(annotations[internal.ManifestChecksumKey])
if err != nil {
return nil, fmt.Errorf("parse TOC digest %q: %w", annotations[internal.ManifestChecksumKey], err)
}
return &chunkedDiffer{
copyBuffer: makeCopyBuffer(),
stream: iss,
manifest: manifest,
layersCache: layersCache,
tocOffset: tocOffset,
fileType: fileTypeZstdChunked,
layersCache: layersCache,
manifest: manifest,
stream: iss,
tarSplit: tarSplit,
tocOffset: tocOffset,
tocDigest: tocDigest,
}, nil
}
@ -176,6 +206,11 @@ func makeEstargzChunkedDiffer(ctx context.Context, store storage.Store, blobSize
return nil, err
}
tocDigest, err := digest.Parse(annotations[estargz.TOCJSONDigestAnnotation])
if err != nil {
return nil, fmt.Errorf("parse TOC digest %q: %w", annotations[estargz.TOCJSONDigestAnnotation], err)
}
return &chunkedDiffer{
copyBuffer: makeCopyBuffer(),
stream: iss,
@ -183,6 +218,7 @@ func makeEstargzChunkedDiffer(ctx context.Context, store storage.Store, blobSize
layersCache: layersCache,
tocOffset: tocOffset,
fileType: fileTypeEstargz,
tocDigest: tocDigest,
}, nil
}
@ -363,6 +399,24 @@ func maybeDoIDRemap(manifest []internal.FileMetadata, options *archive.TarOption
return nil
}
func mapToSlice(inputMap map[uint32]struct{}) []uint32 {
var out []uint32
for value := range inputMap {
out = append(out, value)
}
return out
}
func collectIDs(entries []internal.FileMetadata) ([]uint32, []uint32) {
uids := make(map[uint32]struct{})
gids := make(map[uint32]struct{})
for _, entry := range entries {
uids[uint32(entry.UID)] = struct{}{}
gids[uint32(entry.GID)] = struct{}{}
}
return mapToSlice(uids), mapToSlice(gids)
}
type originFile struct {
Root string
Path string
@ -1271,12 +1325,13 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra
}
}()
bigData := map[string][]byte{
bigDataKey: c.manifest,
}
output := graphdriver.DriverWithDifferOutput{
Differ: c,
BigData: bigData,
Differ: c,
TarSplit: c.tarSplit,
BigData: map[string][]byte{
bigDataKey: c.manifest,
},
TOCDigest: c.tocDigest,
}
storeOpts, err := types.DefaultStoreOptionsAutoDetectUID()
@ -1305,6 +1360,8 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra
var missingParts []missingPart
output.UIDs, output.GIDs = collectIDs(toc.Entries)
mergedEntries, totalSize, err := c.mergeTocEntries(c.fileType, toc.Entries)
if err != nil {
return output, err
@ -1579,6 +1636,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra
if totalChunksSize > 0 {
logrus.Debugf("Missing %d bytes out of %d (%.2f %%)", missingPartsSize, totalChunksSize, float32(missingPartsSize*100.0)/float32(totalChunksSize))
}
return output, nil
}

View File

@ -9,9 +9,16 @@ import (
storage "github.com/containers/storage"
graphdriver "github.com/containers/storage/drivers"
digest "github.com/opencontainers/go-digest"
)
// GetDiffer returns a differ than can be used with ApplyDiffWithDiffer.
func GetDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) {
return nil, errors.New("format not supported on this architecture")
return nil, errors.New("format not supported on this system")
}
// GetTOCDigest returns the digest of the TOC as recorded in the annotations.
// This is an experimental feature and may be changed/removed in the future.
func GetTOCDigest(annotations map[string]string) (*digest.Digest, error) {
return nil, errors.New("format not supported on this system")
}