mirror of
https://github.com/containers/podman.git
synced 2025-12-11 01:11:30 +08:00
Bump c/storage v1.58.0, c/image v5.35.0, c/common v0.63.0
Bump: c/storage v1.58.0 c/image v5.35.0 c/common v0.63.0 In preparation for Podman v5.5.0 Signed-off-by: tomsweeneyredhat <tsweeney@redhat.com>
This commit is contained in:
2
vendor/github.com/containers/storage/pkg/chunked/bloom_filter_linux.go
generated
vendored
2
vendor/github.com/containers/storage/pkg/chunked/bloom_filter_linux.go
generated
vendored
@@ -65,7 +65,7 @@ func (bf *bloomFilter) writeTo(writer io.Writer) error {
|
||||
if err := binary.Write(writer, binary.LittleEndian, uint64(len(bf.bitArray))); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := binary.Write(writer, binary.LittleEndian, uint32(bf.k)); err != nil {
|
||||
if err := binary.Write(writer, binary.LittleEndian, bf.k); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := binary.Write(writer, binary.LittleEndian, bf.bitArray); err != nil {
|
||||
|
||||
2
vendor/github.com/containers/storage/pkg/chunked/compression_linux.go
generated
vendored
2
vendor/github.com/containers/storage/pkg/chunked/compression_linux.go
generated
vendored
@@ -87,7 +87,7 @@ func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64,
|
||||
return nil, 0, fmt.Errorf("parse ToC offset: %w", err)
|
||||
}
|
||||
|
||||
size := int64(blobSize - footerSize - tocOffset)
|
||||
size := blobSize - footerSize - tocOffset
|
||||
// set a reasonable limit
|
||||
if size > maxTocSize {
|
||||
// Not errFallbackCanConvert: we would still use too much memory.
|
||||
|
||||
2
vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go
generated
vendored
2
vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go
generated
vendored
@@ -160,7 +160,7 @@ func (rc *rollingChecksumReader) Read(b []byte) (bool, int, error) {
|
||||
return false, 0, io.EOF
|
||||
}
|
||||
|
||||
for i := 0; i < len(b); i++ {
|
||||
for i := range b {
|
||||
holeLen, n, err := rc.reader.readByte()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
|
||||
4
vendor/github.com/containers/storage/pkg/chunked/dump/dump.go
generated
vendored
4
vendor/github.com/containers/storage/pkg/chunked/dump/dump.go
generated
vendored
@@ -43,7 +43,7 @@ func escaped(val []byte, escape int) string {
|
||||
}
|
||||
|
||||
var result string
|
||||
for _, c := range []byte(val) {
|
||||
for _, c := range val {
|
||||
hexEscape := false
|
||||
var special string
|
||||
|
||||
@@ -214,7 +214,7 @@ func dumpNode(out io.Writer, added map[string]*minimal.FileMetadata, links map[s
|
||||
}
|
||||
|
||||
// GenerateDump generates a dump of the TOC in the same format as `composefs-info dump`
|
||||
func GenerateDump(tocI interface{}, verityDigests map[string]string) (io.Reader, error) {
|
||||
func GenerateDump(tocI any, verityDigests map[string]string) (io.Reader, error) {
|
||||
toc, ok := tocI.(*minimal.TOC)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid TOC type")
|
||||
|
||||
2
vendor/github.com/containers/storage/pkg/chunked/internal/minimal/compression.go
generated
vendored
2
vendor/github.com/containers/storage/pkg/chunked/internal/minimal/compression.go
generated
vendored
@@ -234,7 +234,7 @@ func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, off
|
||||
Offset: manifestOffset,
|
||||
LengthCompressed: uint64(len(compressedManifest)),
|
||||
LengthUncompressed: uint64(len(manifest)),
|
||||
OffsetTarSplit: uint64(tarSplitOffset),
|
||||
OffsetTarSplit: tarSplitOffset,
|
||||
LengthCompressedTarSplit: uint64(len(tarSplitData.Data)),
|
||||
LengthUncompressedTarSplit: uint64(tarSplitData.UncompressedSize),
|
||||
}
|
||||
|
||||
10
vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
generated
vendored
10
vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
generated
vendored
@@ -111,7 +111,7 @@ type chunkedDiffer struct {
|
||||
useFsVerity graphdriver.DifferFsVerity
|
||||
}
|
||||
|
||||
var xattrsToIgnore = map[string]interface{}{
|
||||
var xattrsToIgnore = map[string]any{
|
||||
"security.selinux": true,
|
||||
}
|
||||
|
||||
@@ -1011,7 +1011,7 @@ func mergeMissingChunks(missingParts []missingPart, target int) []missingPart {
|
||||
!missingParts[prevIndex].Hole && !missingParts[i].Hole &&
|
||||
len(missingParts[prevIndex].Chunks) == 1 && len(missingParts[i].Chunks) == 1 &&
|
||||
missingParts[prevIndex].Chunks[0].File.Name == missingParts[i].Chunks[0].File.Name {
|
||||
missingParts[prevIndex].SourceChunk.Length += uint64(gap) + missingParts[i].SourceChunk.Length
|
||||
missingParts[prevIndex].SourceChunk.Length += gap + missingParts[i].SourceChunk.Length
|
||||
missingParts[prevIndex].Chunks[0].CompressedSize += missingParts[i].Chunks[0].CompressedSize
|
||||
missingParts[prevIndex].Chunks[0].UncompressedSize += missingParts[i].Chunks[0].UncompressedSize
|
||||
} else {
|
||||
@@ -1069,7 +1069,7 @@ func mergeMissingChunks(missingParts []missingPart, target int) []missingPart {
|
||||
} else {
|
||||
gap := getGap(missingParts, i)
|
||||
prev := &newMissingParts[len(newMissingParts)-1]
|
||||
prev.SourceChunk.Length += uint64(gap) + missingParts[i].SourceChunk.Length
|
||||
prev.SourceChunk.Length += gap + missingParts[i].SourceChunk.Length
|
||||
prev.Hole = false
|
||||
prev.OriginFile = nil
|
||||
if gap > 0 {
|
||||
@@ -1483,7 +1483,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
||||
bigDataKey: c.manifest,
|
||||
chunkedLayerDataKey: lcdBigData,
|
||||
},
|
||||
Artifacts: map[string]interface{}{
|
||||
Artifacts: map[string]any{
|
||||
tocKey: toc,
|
||||
},
|
||||
TOCDigest: c.tocDigest,
|
||||
@@ -1761,7 +1761,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
||||
|
||||
// the file is missing, attempt to find individual chunks.
|
||||
for _, chunk := range r.chunks {
|
||||
compressedSize := int64(chunk.EndOffset - chunk.Offset)
|
||||
compressedSize := chunk.EndOffset - chunk.Offset
|
||||
size := remainingSize
|
||||
if chunk.ChunkSize > 0 {
|
||||
size = chunk.ChunkSize
|
||||
|
||||
Reference in New Issue
Block a user