vendor: update c/{common,storage}

Closes: https://github.com/containers/podman/issues/25572

Signed-off-by: Giuseppe Scrivano <gscrivan@redhat.com>
This commit is contained in:
Giuseppe Scrivano
2025-03-26 11:24:16 +01:00
parent b58250b35d
commit 7f592742b8
27 changed files with 759 additions and 277 deletions

View File

@ -23,7 +23,7 @@ env:
# GCE project where images live
IMAGE_PROJECT: "libpod-218412"
# VM Image built in containers/automation_images
IMAGE_SUFFIX: "c20250131t121915z-f41f40d13"
IMAGE_SUFFIX: "c20250324t111922z-f41f40d13"
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}"
@ -170,13 +170,13 @@ vendor_task:
cross_task:
alias: cross
container:
image: golang:1.22
image: golang:1.23
build_script: make cross
gofix_task:
alias: gofix
container:
image: golang:1.22
image: golang:1.23
build_script: go fix ./...
test_script: git diff --exit-code

View File

@ -1,123 +1,14 @@
# Contributing to Containers/Storage
# Contributing to Containers/Storage
We'd love to have you join the community! Below summarizes the processes
that we follow.
We'd love to have you join the community! [Learn how to contribute](https://github.com/containers/common/blob/main/CONTRIBUTING.md) to the Containers Group Projects.
## Topics
Please note that the following information is specific to this project:
* [Reporting Issues](#reporting-issues)
* [Submitting Pull Requests](#submitting-pull-requests)
* [Communications](#communications)
<!--
* [Becoming a Maintainer](#becoming-a-maintainer)
-->
## Reporting Issues
Before reporting an issue, check our backlog of
[open issues](https://github.com/containers/storage/issues)
to see if someone else has already reported it. If so, feel free to add
your scenario, or additional information, to the discussion. Or simply
"subscribe" to it to be notified when it is updated.
If you find a new issue with the project we'd love to hear about it! The most
important aspect of a bug report is that it includes enough information for
us to reproduce it. So, please include as much detail as possible and try
to remove the extra stuff that doesn't really relate to the issue itself.
The easier it is for us to reproduce it, the faster it'll be fixed!
Please don't include any private/sensitive information in your issue!
## Submitting Pull Requests
No Pull Request (PR) is too small! Typos, additional comments in the code,
new testcases, bug fixes, new features, more documentation, ... it's all
welcome!
While bug fixes can first be identified via an "issue", that is not required.
It's ok to just open up a PR with the fix, but make sure you include the same
information you would have included in an issue - like how to reproduce it.
PRs for new features should include some background on what use cases the
new code is trying to address. When possible and when it makes sense, try to break-up
larger PRs into smaller ones - it's easier to review smaller
code changes. But only if those smaller ones make sense as stand-alone PRs.
Regardless of the type of PR, all PRs should include:
* well documented code changes
* additional testcases. Ideally, they should fail w/o your code change applied
* documentation changes
Squash your commits into logical pieces of work that might want to be reviewed
separate from the rest of the PRs. But, squashing down to just one commit is ok
too since in the end the entire PR will be reviewed anyway. When in doubt,
squash.
PRs that fix issues should include a reference like `Closes #XXXX` in the
commit message so that github will automatically close the referenced issue
when the PR is merged.
<!--
All PRs require at least two LGTMs (Looks Good To Me) from maintainers.
-->
### Sign your PRs
The sign-off is a line at the end of the explanation for the patch. Your
signature certifies that you wrote the patch or otherwise have the right to pass
it on as an open-source patch. The rules are simple: if you can certify
the below (from [developercertificate.org](http://developercertificate.org/)):
```
Developer Certificate of Origin
Version 1.1
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
660 York Street, Suite 102,
San Francisco, CA 94110 USA
Everyone is permitted to copy and distribute verbatim copies of this
license document, but changing it is not allowed.
Developer's Certificate of Origin 1.1
By making a contribution to this project, I certify that:
(a) The contribution was created in whole or in part by me and I
have the right to submit it under the open source license
indicated in the file; or
(b) The contribution is based upon previous work that, to the best
of my knowledge, is covered under an appropriate open source
license and I have the right under that license to submit that
work with modifications, whether created in whole or in part
by me, under the same open source license (unless I am
permitted to submit under a different license), as indicated
in the file; or
(c) The contribution was provided directly to me by some other
person who certified (a), (b) or (c) and I have not modified
it.
(d) I understand and agree that this project and the contribution
are public and that a record of the contribution (including all
personal information I submit with it, including my sign-off) is
maintained indefinitely and may be redistributed consistent with
this project or the open source license(s) involved.
```
Then you just add a line to every git commit message:
Signed-off-by: Joe Smith <joe.smith@email.com>
Use your real name (sorry, no pseudonyms or anonymous contributions.)
If you set your `user.name` and `user.email` git configs, you can sign your
commit automatically with `git commit -s`.
* We dont typically require 2 LGTMs for this repository.
## Communications
For general questions, or discussions, please use the
For general questions, or discussions, please use the
IRC group on `irc.freenode.net` called `container-projects`
that has been setup.
@ -139,6 +30,6 @@ approval, or if the person requests to be removed then it is automatic.
Normally, a maintainer will only be removed if they are considered to be
inactive for a long period of time or are viewed as disruptive to the community.
The current list of maintainers can be found in the
The current list of maintainers can be found in the
[MAINTAINERS](MAINTAINERS) file.
-->

View File

@ -35,7 +35,7 @@ TESTFLAGS := $(shell $(GO) test -race $(BUILDFLAGS) ./pkg/stringutils 2>&1 > /de
# N/B: This value is managed by Renovate, manual changes are
# possible, as long as they don't disturb the formatting
# (i.e. DO NOT ADD A 'v' prefix!)
GOLANGCI_LINT_VERSION := 1.64.6
GOLANGCI_LINT_VERSION := 1.64.8
default all: local-binary docs local-validate local-cross ## validate all checks, build and cross-build\nbinaries and docs

View File

@ -859,23 +859,26 @@ func Tar(path string, compression Compression) (io.ReadCloser, error) {
// TarWithOptions creates an archive from the directory at `path`, only including files whose relative
// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`.
func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) {
// Fix the source path to work with long path names. This is a no-op
// on platforms other than Windows.
srcPath = fixVolumePathPrefix(srcPath)
tarWithOptionsTo := func(dest io.WriteCloser, srcPath string, options *TarOptions) (result error) {
// Fix the source path to work with long path names. This is a no-op
// on platforms other than Windows.
srcPath = fixVolumePathPrefix(srcPath)
defer func() {
if err := dest.Close(); err != nil && result == nil {
result = err
}
}()
pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns)
if err != nil {
return nil, err
}
pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns)
if err != nil {
return err
}
pipeReader, pipeWriter := io.Pipe()
compressWriter, err := CompressStream(dest, options.Compression)
if err != nil {
return err
}
compressWriter, err := CompressStream(pipeWriter, options.Compression)
if err != nil {
return nil, err
}
go func() {
ta := newTarWriter(
idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps),
compressWriter,
@ -885,16 +888,10 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
ta.WhiteoutConverter = GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData)
ta.CopyPass = options.CopyPass
includeFiles := options.IncludeFiles
defer func() {
// Make sure to check the error on Close.
if err := ta.TarWriter.Close(); err != nil {
logrus.Errorf("Can't close tar writer: %s", err)
}
if err := compressWriter.Close(); err != nil {
logrus.Errorf("Can't close compress writer: %s", err)
}
if err := pipeWriter.Close(); err != nil {
logrus.Errorf("Can't close pipe writer: %s", err)
if err := compressWriter.Close(); err != nil && result == nil {
result = err
}
}()
@ -908,7 +905,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
stat, err := os.Lstat(srcPath)
if err != nil {
return
return err
}
if !stat.IsDir() {
@ -916,22 +913,22 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
// 'walk' will error if "file/." is stat-ed and "file" is not a
// directory. So, we must split the source path and use the
// basename as the include.
if len(options.IncludeFiles) > 0 {
if len(includeFiles) > 0 {
logrus.Warn("Tar: Can't archive a file with includes")
}
dir, base := SplitPathDirEntry(srcPath)
srcPath = dir
options.IncludeFiles = []string{base}
includeFiles = []string{base}
}
if len(options.IncludeFiles) == 0 {
options.IncludeFiles = []string{"."}
if len(includeFiles) == 0 {
includeFiles = []string{"."}
}
seen := make(map[string]bool)
for _, include := range options.IncludeFiles {
for _, include := range includeFiles {
rebaseName := options.RebaseNames[include]
walkRoot := getWalkRoot(srcPath, include)
@ -1026,10 +1023,18 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
}
return nil
}); err != nil {
logrus.Errorf("%s", err)
return
return err
}
}
return ta.TarWriter.Close()
}
pipeReader, pipeWriter := io.Pipe()
go func() {
err := tarWithOptionsTo(pipeWriter, srcPath, options)
if pipeErr := pipeWriter.CloseWithError(err); pipeErr != nil {
logrus.Errorf("Can't close pipe writer: %s", pipeErr)
}
}()
return pipeReader, nil
@ -1216,9 +1221,6 @@ func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decomp
if options == nil {
options = &TarOptions{}
}
if options.ExcludePatterns == nil {
options.ExcludePatterns = []string{}
}
r := tarArchive
if decompress {

View File

@ -31,9 +31,6 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
if options == nil {
options = &TarOptions{}
}
if options.ExcludePatterns == nil {
options.ExcludePatterns = []string{}
}
idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
aufsTempdir := ""

View File

@ -69,9 +69,6 @@ func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions
options = &archive.TarOptions{}
options.InUserNS = unshare.IsRootless()
}
if options.ExcludePatterns == nil {
options.ExcludePatterns = []string{}
}
idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
rootIDs := idMappings.RootPair()

View File

@ -98,9 +98,6 @@ func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions
options.InUserNS = true
}
}
if options.ExcludePatterns == nil {
options.ExcludePatterns = []string{}
}
data, err := json.Marshal(options)
if err != nil {

View File

@ -1,4 +0,0 @@
package chrootarchive
func init() {
}

View File

@ -1,4 +0,0 @@
package chrootarchive
func init() {
}

View File

@ -13,7 +13,7 @@ import (
func Exists(path string) error {
// It uses unix.Faccessat which is a faster operation compared to os.Stat for
// simply checking the existence of a file.
err := unix.Faccessat(unix.AT_FDCWD, path, unix.F_OK, 0)
err := unix.Faccessat(unix.AT_FDCWD, path, unix.F_OK, unix.AT_EACCESS)
if err != nil {
return &os.PathError{Op: "faccessat", Path: path, Err: err}
}
@ -25,7 +25,7 @@ func Exists(path string) error {
func Lexists(path string) error {
// It uses unix.Faccessat which is a faster operation compared to os.Stat for
// simply checking the existence of a file.
err := unix.Faccessat(unix.AT_FDCWD, path, unix.F_OK, unix.AT_SYMLINK_NOFOLLOW)
err := unix.Faccessat(unix.AT_FDCWD, path, unix.F_OK, unix.AT_SYMLINK_NOFOLLOW|unix.AT_EACCESS)
if err != nil {
return &os.PathError{Op: "faccessat", Path: path, Err: err}
}

View File

@ -5,6 +5,7 @@ package idtools
import (
"errors"
"os/user"
"sync"
"unsafe"
)
@ -13,16 +14,14 @@ import (
#include <shadow/subid.h>
#include <stdlib.h>
#include <stdio.h>
const char *Prog = "storage";
FILE *shadow_logfd = NULL;
struct subid_range get_range(struct subid_range *ranges, int i)
{
shadow_logfd = stderr;
return ranges[i];
return ranges[i];
}
#if !defined(SUBID_ABI_MAJOR) || (SUBID_ABI_MAJOR < 4)
# define subid_init libsubid_init
# define subid_get_uid_ranges get_subuid_ranges
# define subid_get_gid_ranges get_subgid_ranges
#endif
@ -30,6 +29,10 @@ struct subid_range get_range(struct subid_range *ranges, int i)
*/
import "C"
var (
onceInit sync.Once
)
func readSubid(username string, isUser bool) (ranges, error) {
var ret ranges
uidstr := ""
@ -42,6 +45,10 @@ func readSubid(username string, isUser bool) (ranges, error) {
uidstr = u.Uid
}
onceInit.Do(func() {
C.subid_init(C.CString("storage"), C.stderr)
})
cUsername := C.CString(username)
defer C.free(unsafe.Pointer(cUsername))

View File

@ -2830,7 +2830,11 @@ func (s *store) Version() ([][2]string, error) {
return [][2]string{}, nil
}
func (s *store) mount(id string, options drivers.MountOpts) (string, error) {
func (s *store) MountImage(id string, mountOpts []string, mountLabel string) (string, error) {
if err := validateMountOptions(mountOpts); err != nil {
return "", err
}
// We need to make sure the home mount is present when the Mount is done, which happens by possibly reinitializing the graph driver
// in startUsingGraphDriver().
if err := s.startUsingGraphDriver(); err != nil {
@ -2842,57 +2846,61 @@ func (s *store) mount(id string, options drivers.MountOpts) (string, error) {
if err != nil {
return "", err
}
if options.UidMaps != nil || options.GidMaps != nil {
options.DisableShifting = !s.canUseShifting(options.UidMaps, options.GidMaps)
}
var imageHomeStore roImageStore
// function used to have a scope for rlstore.StopWriting()
tryMount := func() (string, error) {
if err := rlstore.startWriting(); err != nil {
if err := rlstore.startWriting(); err != nil {
return "", err
}
defer rlstore.stopWriting()
for _, s := range lstores {
if err := s.startReading(); err != nil {
return "", err
}
defer rlstore.stopWriting()
if rlstore.Exists(id) {
return rlstore.Mount(id, options)
}
return "", nil
defer s.stopReading()
}
mountPoint, err := tryMount()
if mountPoint != "" || err != nil {
return mountPoint, err
if err := s.imageStore.startWriting(); err != nil {
return "", err
}
defer s.imageStore.stopWriting()
// check if the layer is in a read-only store, and return a better error message
for _, store := range lstores {
if err := store.startReading(); err != nil {
return "", err
}
exists := store.Exists(id)
store.stopReading()
if exists {
return "", fmt.Errorf("mounting read/only store images is not allowed: %w", ErrStoreIsReadOnly)
cimage, err := s.imageStore.Get(id)
if err == nil {
imageHomeStore = s.imageStore
} else {
for _, s := range s.roImageStores {
if err := s.startReading(); err != nil {
return "", err
}
defer s.stopReading()
cimage, err = s.Get(id)
if err == nil {
imageHomeStore = s
break
}
}
}
if cimage == nil {
return "", fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
return "", ErrLayerUnknown
}
func (s *store) MountImage(id string, mountOpts []string, mountLabel string) (string, error) {
// Append ReadOnly option to mountOptions
img, err := s.Image(id)
idmappingsOpts := types.IDMappingOptions{
HostUIDMapping: true,
HostGIDMapping: true,
}
ilayer, err := s.imageTopLayerForMapping(cimage, imageHomeStore, rlstore, lstores, idmappingsOpts)
if err != nil {
return "", err
}
if err := validateMountOptions(mountOpts); err != nil {
return "", err
if len(ilayer.UIDMap) > 0 || len(ilayer.GIDMap) > 0 {
return "", fmt.Errorf("cannot create an image with canonical UID/GID mappings in a read-only store")
}
options := drivers.MountOpts{
MountLabel: mountLabel,
Options: append(mountOpts, "ro"),
}
return s.mount(img.TopLayer, options)
return rlstore.Mount(ilayer.ID, options)
}
func (s *store) Mount(id, mountLabel string) (string, error) {
@ -2914,7 +2922,43 @@ func (s *store) Mount(id, mountLabel string) (string, error) {
}
}
}
return s.mount(id, options)
// We need to make sure the home mount is present when the Mount is done, which happens by possibly reinitializing the graph driver
// in startUsingGraphDriver().
if err := s.startUsingGraphDriver(); err != nil {
return "", err
}
defer s.stopUsingGraphDriver()
rlstore, lstores, err := s.bothLayerStoreKindsLocked()
if err != nil {
return "", err
}
if options.UidMaps != nil || options.GidMaps != nil {
options.DisableShifting = !s.canUseShifting(options.UidMaps, options.GidMaps)
}
if err := rlstore.startWriting(); err != nil {
return "", err
}
defer rlstore.stopWriting()
if rlstore.Exists(id) {
return rlstore.Mount(id, options)
}
// check if the layer is in a read-only store, and return a better error message
for _, store := range lstores {
if err := store.startReading(); err != nil {
return "", err
}
exists := store.Exists(id)
store.stopReading()
if exists {
return "", fmt.Errorf("mounting read/only store images is not allowed: %w", ErrStoreIsReadOnly)
}
}
return "", ErrLayerUnknown
}
func (s *store) Mounted(id string) (int, error) {
@ -2938,7 +2982,23 @@ func (s *store) UnmountImage(id string, force bool) (bool, error) {
if err != nil {
return false, err
}
return s.Unmount(img.TopLayer, force)
return writeToLayerStore(s, func(lstore rwLayerStore) (bool, error) {
for _, layerID := range img.MappedTopLayers {
l, err := lstore.Get(layerID)
if err != nil {
if err == ErrLayerUnknown {
continue
}
return false, err
}
// check if the layer with the canonical mapping is in the mapped top layers
if len(l.UIDMap) == 0 && len(l.GIDMap) == 0 {
return lstore.unmount(l.ID, force, false)
}
}
return lstore.unmount(img.TopLayer, force, false)
})
}
func (s *store) Unmount(id string, force bool) (bool, error) {