mirror of
https://github.com/containers/podman.git
synced 2025-12-09 15:19:35 +08:00
vendor: update c/{buildah,common,image,storage}
Update to latest main to see if everything passes in preparation for the first 5.3 release candidate. Signed-off-by: Paul Holzinger <pholzing@redhat.com>
This commit is contained in:
10
vendor/github.com/containers/storage/.cirrus.yml
generated
vendored
10
vendor/github.com/containers/storage/.cirrus.yml
generated
vendored
@@ -23,7 +23,7 @@ env:
|
||||
# GCE project where images live
|
||||
IMAGE_PROJECT: "libpod-218412"
|
||||
# VM Image built in containers/automation_images
|
||||
IMAGE_SUFFIX: "c20240821t171500z-f40f39d13"
|
||||
IMAGE_SUFFIX: "c20241010t105554z-f40f39d13"
|
||||
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
|
||||
DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}"
|
||||
|
||||
@@ -180,6 +180,13 @@ gofix_task:
|
||||
build_script: go fix ./...
|
||||
test_script: git diff --exit-code
|
||||
|
||||
codespell_task:
|
||||
alias: codespell
|
||||
container:
|
||||
image: python
|
||||
build_script: pip install codespell
|
||||
test_script: codespell
|
||||
|
||||
|
||||
# Status aggregator for all tests. This task simply ensures a defined
|
||||
# set of tasks all passed, and allows confirming that based on the status
|
||||
@@ -197,6 +204,7 @@ success_task:
|
||||
- vendor
|
||||
- cross
|
||||
- gofix
|
||||
- codespell
|
||||
container:
|
||||
image: golang:1.21
|
||||
clone_script: 'mkdir -p "$CIRRUS_WORKING_DIR"' # Source code not needed
|
||||
|
||||
3
vendor/github.com/containers/storage/.codespellrc
generated
vendored
Normal file
3
vendor/github.com/containers/storage/.codespellrc
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
[codespell]
|
||||
skip = ./.git,./vendor,./tests/tools/vendor,AUTHORS
|
||||
ignore-words-list = afile,flate,prevend,Plack,worl
|
||||
2
vendor/github.com/containers/storage/Makefile
generated
vendored
2
vendor/github.com/containers/storage/Makefile
generated
vendored
@@ -46,7 +46,7 @@ containers-storage: ## build using gc on the host
|
||||
$(GO) build -compiler gc $(BUILDFLAGS) ./cmd/containers-storage
|
||||
|
||||
codespell:
|
||||
codespell -S Makefile,build,buildah,buildah.spec,imgtype,copy,AUTHORS,bin,vendor,.git,go.sum,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej" -L plack,worl,flate,uint,iff,od,ERRO -w
|
||||
codespell
|
||||
|
||||
binary local-binary: containers-storage
|
||||
|
||||
|
||||
4
vendor/github.com/containers/storage/drivers/driver.go
generated
vendored
4
vendor/github.com/containers/storage/drivers/driver.go
generated
vendored
@@ -189,14 +189,14 @@ type Driver interface {
|
||||
type DriverWithDifferOutput struct {
|
||||
Differ Differ
|
||||
Target string
|
||||
Size int64
|
||||
Size int64 // Size of the uncompressed layer, -1 if unknown. Must be known if UncompressedDigest is set.
|
||||
UIDs []uint32
|
||||
GIDs []uint32
|
||||
UncompressedDigest digest.Digest
|
||||
CompressedDigest digest.Digest
|
||||
Metadata string
|
||||
BigData map[string][]byte
|
||||
TarSplit []byte
|
||||
TarSplit []byte // nil if not available
|
||||
TOCDigest digest.Digest
|
||||
// RootDirMode is the mode of the root directory of the layer, if specified.
|
||||
RootDirMode *os.FileMode
|
||||
|
||||
48
vendor/github.com/containers/storage/drivers/quota/projectquota_supported.go
generated
vendored
48
vendor/github.com/containers/storage/drivers/quota/projectquota_supported.go
generated
vendored
@@ -18,6 +18,16 @@ package quota
|
||||
#include <linux/quota.h>
|
||||
#include <linux/dqblk_xfs.h>
|
||||
|
||||
#ifndef FS_XFLAG_PROJINHERIT
|
||||
struct fsxattr {
|
||||
__u32 fsx_xflags;
|
||||
__u32 fsx_extsize;
|
||||
__u32 fsx_nextents;
|
||||
__u32 fsx_projid;
|
||||
unsigned char fsx_pad[12];
|
||||
};
|
||||
#define FS_XFLAG_PROJINHERIT 0x00000200
|
||||
#endif
|
||||
#ifndef FS_IOC_FSGETXATTR
|
||||
#define FS_IOC_FSGETXATTR _IOR ('X', 31, struct fsxattr)
|
||||
#endif
|
||||
@@ -162,6 +172,11 @@ func NewControl(basePath string) (*Control, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Clear inherit flag from top-level directory if necessary.
|
||||
if err := stripProjectInherit(basePath); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
//
|
||||
// get first project id to be used for next container
|
||||
//
|
||||
@@ -339,6 +354,8 @@ func setProjectID(targetPath string, projectID uint32) error {
|
||||
}
|
||||
defer closeDir(dir)
|
||||
|
||||
logrus.Debugf("Setting quota project ID %d on %s", projectID, targetPath)
|
||||
|
||||
var fsx C.struct_fsxattr
|
||||
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR,
|
||||
uintptr(unsafe.Pointer(&fsx)))
|
||||
@@ -346,6 +363,7 @@ func setProjectID(targetPath string, projectID uint32) error {
|
||||
return fmt.Errorf("failed to get projid for %s: %w", targetPath, errno)
|
||||
}
|
||||
fsx.fsx_projid = C.__u32(projectID)
|
||||
fsx.fsx_xflags |= C.FS_XFLAG_PROJINHERIT
|
||||
_, _, errno = unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSSETXATTR,
|
||||
uintptr(unsafe.Pointer(&fsx)))
|
||||
if errno != 0 {
|
||||
@@ -355,6 +373,36 @@ func setProjectID(targetPath string, projectID uint32) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// stripProjectInherit strips the project inherit flag from a directory.
|
||||
// Used on the top-level directory to ensure project IDs are only inherited for
|
||||
// files in directories we set quotas on - not the directories we want to set
|
||||
// the quotas on, as that would make everything use the same project ID.
|
||||
func stripProjectInherit(targetPath string) error {
|
||||
dir, err := openDir(targetPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closeDir(dir)
|
||||
|
||||
var fsx C.struct_fsxattr
|
||||
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR,
|
||||
uintptr(unsafe.Pointer(&fsx)))
|
||||
if errno != 0 {
|
||||
return fmt.Errorf("failed to get xfs attrs for %s: %w", targetPath, errno)
|
||||
}
|
||||
if fsx.fsx_xflags&C.FS_XFLAG_PROJINHERIT != 0 {
|
||||
// Flag is set, need to clear it.
|
||||
logrus.Debugf("Clearing PROJINHERIT flag from directory %s", targetPath)
|
||||
fsx.fsx_xflags = fsx.fsx_xflags &^ C.FS_XFLAG_PROJINHERIT
|
||||
_, _, errno = unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSSETXATTR,
|
||||
uintptr(unsafe.Pointer(&fsx)))
|
||||
if errno != 0 {
|
||||
return fmt.Errorf("failed to clear PROJINHERIT for %s: %w", targetPath, errno)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// findNextProjectID - find the next project id to be used for containers
|
||||
// by scanning driver home directory to find used project ids
|
||||
func (q *Control) findNextProjectID() error {
|
||||
|
||||
15
vendor/github.com/containers/storage/layers.go
generated
vendored
15
vendor/github.com/containers/storage/layers.go
generated
vendored
@@ -136,9 +136,12 @@ type Layer struct {
|
||||
TOCDigest digest.Digest `json:"toc-digest,omitempty"`
|
||||
|
||||
// UncompressedSize is the length of the blob that was last passed to
|
||||
// ApplyDiff() or create(), after we decompressed it. If
|
||||
// UncompressedDigest is not set, this should be treated as if it were
|
||||
// an uninitialized value.
|
||||
// ApplyDiff() or create(), after we decompressed it.
|
||||
//
|
||||
// - If UncompressedDigest is set, this must be set to a valid value.
|
||||
// - Otherwise, if TOCDigest is set, this is either valid or -1.
|
||||
// - If neither of this digests is set, this should be treated as if it were
|
||||
// an uninitialized value.
|
||||
UncompressedSize int64 `json:"diff-size,omitempty"`
|
||||
|
||||
// CompressionType is the type of compression which we detected on the blob
|
||||
@@ -1214,8 +1217,8 @@ func (r *layerStore) Size(name string) (int64, error) {
|
||||
// We use the presence of a non-empty digest as an indicator that the size value was intentionally set, and that
|
||||
// a zero value is not just present because it was never set to anything else (which can happen if the layer was
|
||||
// created by a version of this library that didn't keep track of digest and size information).
|
||||
if layer.TOCDigest != "" || layer.UncompressedDigest != "" {
|
||||
return layer.UncompressedSize, nil
|
||||
if layer.UncompressedDigest != "" || layer.TOCDigest != "" {
|
||||
return layer.UncompressedSize, nil // This may return -1 if only TOCDigest is set
|
||||
}
|
||||
return -1, nil
|
||||
}
|
||||
@@ -2510,7 +2513,7 @@ func (r *layerStore) applyDiffFromStagingDirectory(id string, diffOutput *driver
|
||||
return err
|
||||
}
|
||||
|
||||
if len(diffOutput.TarSplit) != 0 {
|
||||
if diffOutput.TarSplit != nil {
|
||||
tsdata := bytes.Buffer{}
|
||||
compressor, err := pgzip.NewWriterLevel(&tsdata, pgzip.BestSpeed)
|
||||
if err != nil {
|
||||
|
||||
10
vendor/github.com/containers/storage/pkg/chunked/cache_linux.go
generated
vendored
10
vendor/github.com/containers/storage/pkg/chunked/cache_linux.go
generated
vendored
@@ -182,6 +182,9 @@ func makeBinaryDigest(stringDigest string) ([]byte, error) {
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
// loadLayerCache attempts to load the cache file for the specified layer.
|
||||
// If the cache file is not present or it it using a different cache file version, then
|
||||
// the function returns (nil, nil).
|
||||
func (c *layersCache) loadLayerCache(layerID string) (_ *layer, errRet error) {
|
||||
buffer, mmapBuffer, err := c.loadLayerBigData(layerID, cacheKey)
|
||||
if err != nil && !errors.Is(err, os.ErrNotExist) {
|
||||
@@ -202,6 +205,9 @@ func (c *layersCache) loadLayerCache(layerID string) (_ *layer, errRet error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if cacheFile == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return c.createLayer(layerID, cacheFile, mmapBuffer)
|
||||
}
|
||||
|
||||
@@ -268,7 +274,7 @@ func (c *layersCache) load() error {
|
||||
var newLayers []*layer
|
||||
for _, r := range allLayers {
|
||||
// The layer is present in the store and it is already loaded. Attempt to
|
||||
// re-use it if mmap'ed.
|
||||
// reuse it if mmap'ed.
|
||||
if l, found := loadedLayers[r.ID]; found {
|
||||
// If the layer is not marked for re-load, move it to newLayers.
|
||||
if !l.reloadWithMmap {
|
||||
@@ -618,6 +624,8 @@ func writeCache(manifest []byte, format graphdriver.DifferOutputFormat, id strin
|
||||
}, nil
|
||||
}
|
||||
|
||||
// readCacheFileFromMemory reads a cache file from a buffer.
|
||||
// It can return (nil, nil) if the cache file uses a different file version that the one currently supported.
|
||||
func readCacheFileFromMemory(bigDataBuffer []byte) (*cacheFile, error) {
|
||||
bigData := bytes.NewReader(bigDataBuffer)
|
||||
|
||||
|
||||
34
vendor/github.com/containers/storage/pkg/chunked/compression_linux.go
generated
vendored
34
vendor/github.com/containers/storage/pkg/chunked/compression_linux.go
generated
vendored
@@ -139,7 +139,7 @@ func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64,
|
||||
}
|
||||
|
||||
// readZstdChunkedManifest reads the zstd:chunked manifest from the seekable stream blobStream.
|
||||
// Returns (manifest blob, parsed manifest, tar-split blob, manifest offset).
|
||||
// Returns (manifest blob, parsed manifest, tar-split blob or nil, manifest offset).
|
||||
func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Digest, annotations map[string]string) ([]byte, *internal.TOC, []byte, int64, error) {
|
||||
offsetMetadata := annotations[internal.ManifestInfoKey]
|
||||
if offsetMetadata == "" {
|
||||
@@ -214,7 +214,7 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Di
|
||||
return nil, nil, nil, 0, fmt.Errorf("unmarshaling TOC: %w", err)
|
||||
}
|
||||
|
||||
decodedTarSplit := []byte{}
|
||||
var decodedTarSplit []byte = nil
|
||||
if toc.TarSplitDigest != "" {
|
||||
if tarSplitChunk.Offset <= 0 {
|
||||
return nil, nil, nil, 0, fmt.Errorf("TOC requires a tar-split, but the %s annotation does not describe a position", internal.TarSplitInfoKey)
|
||||
@@ -288,6 +288,36 @@ func ensureTOCMatchesTarSplit(toc *internal.TOC, tarSplit []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// tarSizeFromTarSplit computes the total tarball size, using only the tarSplit metadata
|
||||
func tarSizeFromTarSplit(tarSplit []byte) (int64, error) {
|
||||
var res int64 = 0
|
||||
|
||||
unpacker := storage.NewJSONUnpacker(bytes.NewReader(tarSplit))
|
||||
for {
|
||||
entry, err := unpacker.Next()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return -1, fmt.Errorf("reading tar-split entries: %w", err)
|
||||
}
|
||||
switch entry.Type {
|
||||
case storage.SegmentType:
|
||||
res += int64(len(entry.Payload))
|
||||
case storage.FileType:
|
||||
// entry.Size is the “logical size”, which might not be the physical size for sparse entries;
|
||||
// but the way tar-split/tar/asm.WriteOutputTarStream combines FileType entries and returned files contents,
|
||||
// sparse files are not supported.
|
||||
// Also https://github.com/opencontainers/image-spec/blob/main/layer.md says
|
||||
// > Sparse files SHOULD NOT be used because they lack consistent support across tar implementations.
|
||||
res += entry.Size
|
||||
default:
|
||||
return -1, fmt.Errorf("unexpected tar-split entry type %q", entry.Type)
|
||||
}
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// ensureTimePointersMatch ensures that a and b are equal
|
||||
func ensureTimePointersMatch(a, b *time.Time) error {
|
||||
// We didn’t always use “timeIfNotZero” when creating the TOC, so treat time.IsZero the same as nil.
|
||||
|
||||
81
vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
generated
vendored
81
vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
generated
vendored
@@ -89,7 +89,8 @@ type chunkedDiffer struct {
|
||||
// is no TOC referenced by the manifest.
|
||||
blobDigest digest.Digest
|
||||
|
||||
blobSize int64
|
||||
blobSize int64
|
||||
uncompressedTarSize int64 // -1 if unknown
|
||||
|
||||
pullOptions map[string]string
|
||||
|
||||
@@ -216,6 +217,7 @@ func makeConvertFromRawDiffer(store storage.Store, blobDigest digest.Digest, blo
|
||||
fsVerityDigests: make(map[string]string),
|
||||
blobDigest: blobDigest,
|
||||
blobSize: blobSize,
|
||||
uncompressedTarSize: -1, // Will be computed later
|
||||
convertToZstdChunked: true,
|
||||
copyBuffer: makeCopyBuffer(),
|
||||
layersCache: layersCache,
|
||||
@@ -229,24 +231,33 @@ func makeZstdChunkedDiffer(store storage.Store, blobSize int64, tocDigest digest
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read zstd:chunked manifest: %w", err)
|
||||
}
|
||||
var uncompressedTarSize int64 = -1
|
||||
if tarSplit != nil {
|
||||
uncompressedTarSize, err = tarSizeFromTarSplit(tarSplit)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("computing size from tar-split: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
layersCache, err := getLayersCache(store)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &chunkedDiffer{
|
||||
fsVerityDigests: make(map[string]string),
|
||||
blobSize: blobSize,
|
||||
tocDigest: tocDigest,
|
||||
copyBuffer: makeCopyBuffer(),
|
||||
fileType: fileTypeZstdChunked,
|
||||
layersCache: layersCache,
|
||||
manifest: manifest,
|
||||
toc: toc,
|
||||
pullOptions: pullOptions,
|
||||
stream: iss,
|
||||
tarSplit: tarSplit,
|
||||
tocOffset: tocOffset,
|
||||
fsVerityDigests: make(map[string]string),
|
||||
blobSize: blobSize,
|
||||
uncompressedTarSize: uncompressedTarSize,
|
||||
tocDigest: tocDigest,
|
||||
copyBuffer: makeCopyBuffer(),
|
||||
fileType: fileTypeZstdChunked,
|
||||
layersCache: layersCache,
|
||||
manifest: manifest,
|
||||
toc: toc,
|
||||
pullOptions: pullOptions,
|
||||
stream: iss,
|
||||
tarSplit: tarSplit,
|
||||
tocOffset: tocOffset,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -261,16 +272,17 @@ func makeEstargzChunkedDiffer(store storage.Store, blobSize int64, tocDigest dig
|
||||
}
|
||||
|
||||
return &chunkedDiffer{
|
||||
fsVerityDigests: make(map[string]string),
|
||||
blobSize: blobSize,
|
||||
tocDigest: tocDigest,
|
||||
copyBuffer: makeCopyBuffer(),
|
||||
fileType: fileTypeEstargz,
|
||||
layersCache: layersCache,
|
||||
manifest: manifest,
|
||||
pullOptions: pullOptions,
|
||||
stream: iss,
|
||||
tocOffset: tocOffset,
|
||||
fsVerityDigests: make(map[string]string),
|
||||
blobSize: blobSize,
|
||||
uncompressedTarSize: -1, // We would have to read and decompress the whole layer
|
||||
tocDigest: tocDigest,
|
||||
copyBuffer: makeCopyBuffer(),
|
||||
fileType: fileTypeEstargz,
|
||||
layersCache: layersCache,
|
||||
manifest: manifest,
|
||||
pullOptions: pullOptions,
|
||||
stream: iss,
|
||||
tocOffset: tocOffset,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -1153,7 +1165,6 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
||||
|
||||
var compressedDigest digest.Digest
|
||||
var uncompressedDigest digest.Digest
|
||||
var convertedBlobSize int64
|
||||
|
||||
if c.convertToZstdChunked {
|
||||
fd, err := unix.Open(dest, unix.O_TMPFILE|unix.O_RDWR|unix.O_CLOEXEC, 0o600)
|
||||
@@ -1185,7 +1196,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
||||
if err != nil {
|
||||
return graphdriver.DriverWithDifferOutput{}, err
|
||||
}
|
||||
convertedBlobSize = tarSize
|
||||
c.uncompressedTarSize = tarSize
|
||||
// fileSource is a O_TMPFILE file descriptor, so we
|
||||
// need to keep it open until the entire file is processed.
|
||||
defer fileSource.Close()
|
||||
@@ -1255,6 +1266,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
||||
TOCDigest: c.tocDigest,
|
||||
UncompressedDigest: uncompressedDigest,
|
||||
CompressedDigest: compressedDigest,
|
||||
Size: c.uncompressedTarSize,
|
||||
}
|
||||
|
||||
// When the hard links deduplication is used, file attributes are ignored because setting them
|
||||
@@ -1268,19 +1280,12 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
||||
|
||||
var missingParts []missingPart
|
||||
|
||||
mergedEntries, totalSizeFromTOC, err := c.mergeTocEntries(c.fileType, toc.Entries)
|
||||
mergedEntries, err := c.mergeTocEntries(c.fileType, toc.Entries)
|
||||
if err != nil {
|
||||
return output, err
|
||||
}
|
||||
|
||||
output.UIDs, output.GIDs = collectIDs(mergedEntries)
|
||||
if convertedBlobSize > 0 {
|
||||
// if the image was converted, store the original tar size, so that
|
||||
// it can be recreated correctly.
|
||||
output.Size = convertedBlobSize
|
||||
} else {
|
||||
output.Size = totalSizeFromTOC
|
||||
}
|
||||
|
||||
if err := maybeDoIDRemap(mergedEntries, options); err != nil {
|
||||
return output, err
|
||||
@@ -1597,9 +1602,7 @@ func mustSkipFile(fileType compressedFileType, e internal.FileMetadata) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []internal.FileMetadata) ([]fileMetadata, int64, error) {
|
||||
var totalFilesSize int64
|
||||
|
||||
func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []internal.FileMetadata) ([]fileMetadata, error) {
|
||||
countNextChunks := func(start int) int {
|
||||
count := 0
|
||||
for _, e := range entries[start:] {
|
||||
@@ -1629,10 +1632,8 @@ func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []i
|
||||
continue
|
||||
}
|
||||
|
||||
totalFilesSize += e.Size
|
||||
|
||||
if e.Type == TypeChunk {
|
||||
return nil, -1, fmt.Errorf("chunk type without a regular file")
|
||||
return nil, fmt.Errorf("chunk type without a regular file")
|
||||
}
|
||||
|
||||
if e.Type == TypeReg {
|
||||
@@ -1668,7 +1669,7 @@ func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []i
|
||||
lastChunkOffset = mergedEntries[i].chunks[j].Offset
|
||||
}
|
||||
}
|
||||
return mergedEntries, totalFilesSize, nil
|
||||
return mergedEntries, nil
|
||||
}
|
||||
|
||||
// validateChunkChecksum checks if the file at $root/$path[offset:chunk.ChunkSize] has the
|
||||
|
||||
2
vendor/github.com/containers/storage/pkg/idtools/idtools.go
generated
vendored
2
vendor/github.com/containers/storage/pkg/idtools/idtools.go
generated
vendored
@@ -367,7 +367,7 @@ func checkChownErr(err error, name string, uid, gid int) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Stat contains file states that can be overriden with ContainersOverrideXattr.
|
||||
// Stat contains file states that can be overridden with ContainersOverrideXattr.
|
||||
type Stat struct {
|
||||
IDs IDPair
|
||||
Mode os.FileMode
|
||||
|
||||
2
vendor/github.com/containers/storage/store.go
generated
vendored
2
vendor/github.com/containers/storage/store.go
generated
vendored
@@ -2201,7 +2201,7 @@ func (s *store) ImageSize(id string) (int64, error) {
|
||||
}
|
||||
// The UncompressedSize is only valid if there's a digest to go with it.
|
||||
n := layer.UncompressedSize
|
||||
if layer.UncompressedDigest == "" {
|
||||
if layer.UncompressedDigest == "" || n == -1 {
|
||||
// Compute the size.
|
||||
n, err = layerStore.DiffSize("", layer.ID)
|
||||
if err != nil {
|
||||
|
||||
87
vendor/github.com/containers/storage/userns.go
generated
vendored
87
vendor/github.com/containers/storage/userns.go
generated
vendored
@@ -1,18 +1,21 @@
|
||||
//go:build linux
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
|
||||
drivers "github.com/containers/storage/drivers"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/unshare"
|
||||
"github.com/containers/storage/types"
|
||||
securejoin "github.com/cyphar/filepath-securejoin"
|
||||
libcontainerUser "github.com/moby/sys/user"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// getAdditionalSubIDs looks up the additional IDs configured for
|
||||
@@ -85,40 +88,59 @@ const nobodyUser = 65534
|
||||
// parseMountedFiles returns the maximum UID and GID found in the /etc/passwd and
|
||||
// /etc/group files.
|
||||
func parseMountedFiles(containerMount, passwdFile, groupFile string) uint32 {
|
||||
var (
|
||||
passwd *os.File
|
||||
group *os.File
|
||||
size int
|
||||
err error
|
||||
)
|
||||
if passwdFile == "" {
|
||||
passwdFile = filepath.Join(containerMount, "etc/passwd")
|
||||
passwd, err = secureOpen(containerMount, "/etc/passwd")
|
||||
} else {
|
||||
// User-specified override from a volume. Will not be in
|
||||
// container root.
|
||||
passwd, err = os.Open(passwdFile)
|
||||
}
|
||||
if groupFile == "" {
|
||||
groupFile = filepath.Join(containerMount, "etc/group")
|
||||
}
|
||||
|
||||
size := 0
|
||||
|
||||
users, err := libcontainerUser.ParsePasswdFile(passwdFile)
|
||||
if err == nil {
|
||||
for _, u := range users {
|
||||
// Skip the "nobody" user otherwise we end up with 65536
|
||||
// ids with most images
|
||||
if u.Name == "nobody" || u.Name == "nogroup" {
|
||||
continue
|
||||
}
|
||||
if u.Uid > size && u.Uid != nobodyUser {
|
||||
size = u.Uid + 1
|
||||
}
|
||||
if u.Gid > size && u.Gid != nobodyUser {
|
||||
size = u.Gid + 1
|
||||
defer passwd.Close()
|
||||
|
||||
users, err := libcontainerUser.ParsePasswd(passwd)
|
||||
if err == nil {
|
||||
for _, u := range users {
|
||||
// Skip the "nobody" user otherwise we end up with 65536
|
||||
// ids with most images
|
||||
if u.Name == "nobody" || u.Name == "nogroup" {
|
||||
continue
|
||||
}
|
||||
if u.Uid > size && u.Uid != nobodyUser {
|
||||
size = u.Uid + 1
|
||||
}
|
||||
if u.Gid > size && u.Gid != nobodyUser {
|
||||
size = u.Gid + 1
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
groups, err := libcontainerUser.ParseGroupFile(groupFile)
|
||||
if groupFile == "" {
|
||||
group, err = secureOpen(containerMount, "/etc/group")
|
||||
} else {
|
||||
// User-specified override from a volume. Will not be in
|
||||
// container root.
|
||||
group, err = os.Open(groupFile)
|
||||
}
|
||||
if err == nil {
|
||||
for _, g := range groups {
|
||||
if g.Name == "nobody" || g.Name == "nogroup" {
|
||||
continue
|
||||
}
|
||||
if g.Gid > size && g.Gid != nobodyUser {
|
||||
size = g.Gid + 1
|
||||
defer group.Close()
|
||||
|
||||
groups, err := libcontainerUser.ParseGroup(group)
|
||||
if err == nil {
|
||||
for _, g := range groups {
|
||||
if g.Name == "nobody" || g.Name == "nogroup" {
|
||||
continue
|
||||
}
|
||||
if g.Gid > size && g.Gid != nobodyUser {
|
||||
size = g.Gid + 1
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -309,3 +331,14 @@ func getAutoUserNSIDMappings(
|
||||
gidMap := append(availableGIDs.zip(requestedContainerGIDs), additionalGIDMappings...)
|
||||
return uidMap, gidMap, nil
|
||||
}
|
||||
|
||||
// Securely open (read-only) a file in a container mount.
|
||||
func secureOpen(containerMount, file string) (*os.File, error) {
|
||||
tmpFile, err := securejoin.OpenInRoot(containerMount, file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer tmpFile.Close()
|
||||
|
||||
return securejoin.Reopen(tmpFile, unix.O_RDONLY)
|
||||
}
|
||||
|
||||
14
vendor/github.com/containers/storage/userns_unsupported.go
generated
vendored
Normal file
14
vendor/github.com/containers/storage/userns_unsupported.go
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
//go:build !linux
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/types"
|
||||
)
|
||||
|
||||
func (s *store) getAutoUserNS(_ *types.AutoUserNsOptions, _ *Image, _ rwLayerStore, _ []roLayerStore) ([]idtools.IDMap, []idtools.IDMap, error) {
|
||||
return nil, nil, errors.New("user namespaces are not supported on this platform")
|
||||
}
|
||||
Reference in New Issue
Block a user