mirror of
https://github.com/containers/podman.git
synced 2025-10-18 11:42:55 +08:00
vendor: bump c/storage
Signed-off-by: Giuseppe Scrivano <gscrivan@redhat.com>
This commit is contained in:
2
vendor/github.com/containers/storage/VERSION
generated
vendored
2
vendor/github.com/containers/storage/VERSION
generated
vendored
@ -1 +1 @@
|
||||
1.50.2
|
||||
1.50.3-dev
|
||||
|
33
vendor/github.com/containers/storage/drivers/overlay/check.go
generated
vendored
33
vendor/github.com/containers/storage/drivers/overlay/check.go
generated
vendored
@ -275,3 +275,36 @@ func supportsIdmappedLowerLayers(home string) (bool, error) {
|
||||
}()
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// supportsDataOnlyLayers checks if the kernel supports mounting a overlay file system
|
||||
// that uses data-only layers.
|
||||
func supportsDataOnlyLayers(home string) (bool, error) {
|
||||
layerDir, err := os.MkdirTemp(home, "compat")
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer func() {
|
||||
_ = os.RemoveAll(layerDir)
|
||||
}()
|
||||
|
||||
mergedDir := filepath.Join(layerDir, "merged")
|
||||
lowerDir := filepath.Join(layerDir, "lower")
|
||||
lowerDirDataOnly := filepath.Join(layerDir, "lower-data")
|
||||
upperDir := filepath.Join(layerDir, "upper")
|
||||
workDir := filepath.Join(layerDir, "work")
|
||||
|
||||
_ = idtools.MkdirAs(mergedDir, 0o700, 0, 0)
|
||||
_ = idtools.MkdirAs(lowerDir, 0o700, 0, 0)
|
||||
_ = idtools.MkdirAs(lowerDirDataOnly, 0o700, 0, 0)
|
||||
_ = idtools.MkdirAs(upperDir, 0o700, 0, 0)
|
||||
_ = idtools.MkdirAs(workDir, 0o700, 0, 0)
|
||||
|
||||
opts := fmt.Sprintf("lowerdir=%s::%s,upperdir=%s,workdir=%s,metacopy=on", lowerDir, lowerDirDataOnly, upperDir, workDir)
|
||||
flags := uintptr(0)
|
||||
if err := unix.Mount("overlay", mergedDir, "overlay", flags, opts); err != nil {
|
||||
return false, err
|
||||
}
|
||||
_ = unix.Unmount(mergedDir, unix.MNT_DETACH)
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
4
vendor/github.com/containers/storage/drivers/overlay/composefs_supported.go
generated
vendored
4
vendor/github.com/containers/storage/drivers/overlay/composefs_supported.go
generated
vendored
@ -93,7 +93,7 @@ func generateComposeFsBlob(toc []byte, composefsDir string) error {
|
||||
|
||||
fd, err := unix.Openat(unix.AT_FDCWD, destFile, unix.O_WRONLY|unix.O_CREAT|unix.O_TRUNC|unix.O_EXCL|unix.O_CLOEXEC, 0o644)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open output file: %w", err)
|
||||
return fmt.Errorf("failed to open output file %q: %w", destFile, err)
|
||||
}
|
||||
outFd := os.NewFile(uintptr(fd), "outFd")
|
||||
|
||||
@ -166,7 +166,7 @@ func hasACL(path string) (bool, error) {
|
||||
|
||||
func mountComposefsBlob(dataDir, mountPoint string) error {
|
||||
blobFile := getComposefsBlob(dataDir)
|
||||
loop, err := loopback.AttachLoopDevice(blobFile)
|
||||
loop, err := loopback.AttachLoopDeviceRO(blobFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
21
vendor/github.com/containers/storage/drivers/overlay/mount.go
generated
vendored
21
vendor/github.com/containers/storage/drivers/overlay/mount.go
generated
vendored
@ -141,14 +141,27 @@ func mountOverlayFromMain() {
|
||||
// the new value for the list of lowers, because it's shorter.
|
||||
if lowerv != "" {
|
||||
lowers := strings.Split(lowerv, ":")
|
||||
for i := range lowers {
|
||||
lowerFd, err := unix.Open(lowers[i], unix.O_RDONLY, 0)
|
||||
var newLowers []string
|
||||
dataOnly := false
|
||||
for _, lowerPath := range lowers {
|
||||
if lowerPath == "" {
|
||||
dataOnly = true
|
||||
continue
|
||||
}
|
||||
lowerFd, err := unix.Open(lowerPath, unix.O_RDONLY, 0)
|
||||
if err != nil {
|
||||
fatal(err)
|
||||
}
|
||||
lowers[i] = fmt.Sprintf("%d", lowerFd)
|
||||
var lower string
|
||||
if dataOnly {
|
||||
lower = fmt.Sprintf(":%d", lowerFd)
|
||||
dataOnly = false
|
||||
} else {
|
||||
lower = fmt.Sprintf("%d", lowerFd)
|
||||
}
|
||||
newLowers = append(newLowers, lower)
|
||||
}
|
||||
lowerv = strings.Join(lowers, ":")
|
||||
lowerv = strings.Join(newLowers, ":")
|
||||
}
|
||||
|
||||
// Reconstruct the Label field.
|
||||
|
58
vendor/github.com/containers/storage/drivers/overlay/overlay.go
generated
vendored
58
vendor/github.com/containers/storage/drivers/overlay/overlay.go
generated
vendored
@ -1447,7 +1447,9 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
|
||||
needsIDMapping := !disableShifting && len(options.UidMaps) > 0 && len(options.GidMaps) > 0 && d.options.mountProgram == ""
|
||||
|
||||
if len(optsList) == 0 {
|
||||
optsList = strings.Split(d.options.mountOptions, ",")
|
||||
if d.options.mountOptions != "" {
|
||||
optsList = strings.Split(d.options.mountOptions, ",")
|
||||
}
|
||||
} else {
|
||||
// If metacopy=on is present in d.options.mountOptions it must be present in the mount
|
||||
// options otherwise the kernel refuses to follow the metacopy xattr.
|
||||
@ -1540,7 +1542,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
|
||||
}
|
||||
}()
|
||||
|
||||
maybeAddComposefsMount := func(lowerID string, i int) (string, error) {
|
||||
maybeAddComposefsMount := func(lowerID string, i int, readWrite bool) (string, error) {
|
||||
composefsBlob := d.getComposefsData(lowerID)
|
||||
_, err = os.Stat(composefsBlob)
|
||||
if err != nil {
|
||||
@ -1551,6 +1553,10 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
|
||||
}
|
||||
logrus.Debugf("overlay: using composefs blob %s for lower %s", composefsBlob, lowerID)
|
||||
|
||||
if readWrite && i == 0 {
|
||||
return "", fmt.Errorf("cannot mount a composefs layer as writeable")
|
||||
}
|
||||
|
||||
dest := filepath.Join(composefsLayers, fmt.Sprintf("%d", i))
|
||||
if err := os.MkdirAll(dest, 0o700); err != nil {
|
||||
return "", err
|
||||
@ -1571,7 +1577,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
|
||||
|
||||
diffDir := path.Join(workDirBase, "diff")
|
||||
|
||||
if dest, err := maybeAddComposefsMount(id, 0); err != nil {
|
||||
if dest, err := maybeAddComposefsMount(id, 0, readWrite); err != nil {
|
||||
return "", err
|
||||
} else if dest != "" {
|
||||
diffDir = dest
|
||||
@ -1623,7 +1629,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
|
||||
return "", err
|
||||
}
|
||||
lowerID := filepath.Base(filepath.Dir(linkContent))
|
||||
composefsMount, err := maybeAddComposefsMount(lowerID, i+1)
|
||||
composefsMount, err := maybeAddComposefsMount(lowerID, i+1, readWrite)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -1655,8 +1661,6 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
|
||||
optsList = append(optsList, "metacopy=on", "redirect_dir=on")
|
||||
}
|
||||
|
||||
absLowers = append(absLowers, composeFsLayers...)
|
||||
|
||||
if len(absLowers) == 0 {
|
||||
absLowers = append(absLowers, path.Join(dir, "empty"))
|
||||
}
|
||||
@ -1750,11 +1754,20 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
|
||||
absLowers = newAbsDir
|
||||
}
|
||||
|
||||
lowerDirs := strings.Join(absLowers, ":")
|
||||
if len(composeFsLayers) > 0 {
|
||||
composeFsLayersLowerDirs := strings.Join(composeFsLayers, "::")
|
||||
lowerDirs = lowerDirs + "::" + composeFsLayersLowerDirs
|
||||
}
|
||||
// absLowers is not valid anymore now as we have added composeFsLayers to it, so prevent
|
||||
// its usage.
|
||||
absLowers = nil //nolint:ineffassign
|
||||
|
||||
var opts string
|
||||
if readWrite {
|
||||
opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), diffDir, workdir)
|
||||
opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerDirs, diffDir, workdir)
|
||||
} else {
|
||||
opts = fmt.Sprintf("lowerdir=%s:%s", diffDir, strings.Join(absLowers, ":"))
|
||||
opts = fmt.Sprintf("lowerdir=%s:%s", diffDir, lowerDirs)
|
||||
}
|
||||
if len(optsList) > 0 {
|
||||
opts = fmt.Sprintf("%s,%s", opts, strings.Join(optsList, ","))
|
||||
@ -1798,9 +1811,9 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
|
||||
if readWrite {
|
||||
diffDir := path.Join(id, "diff")
|
||||
workDir := path.Join(id, "work")
|
||||
opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), diffDir, workDir)
|
||||
opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerDirs, diffDir, workDir)
|
||||
} else {
|
||||
opts = fmt.Sprintf("lowerdir=%s:%s", diffDir, strings.Join(absLowers, ":"))
|
||||
opts = fmt.Sprintf("lowerdir=%s:%s", diffDir, lowerDirs)
|
||||
}
|
||||
if len(optsList) > 0 {
|
||||
opts = strings.Join(append([]string{opts}, optsList...), ",")
|
||||
@ -2007,11 +2020,34 @@ func (d *Driver) CleanupStagingDirectory(stagingDirectory string) error {
|
||||
return os.RemoveAll(stagingDirectory)
|
||||
}
|
||||
|
||||
func (d *Driver) supportsDataOnlyLayers() (bool, error) {
|
||||
feature := "dataonly-layers"
|
||||
overlayCacheResult, overlayCacheText, err := cachedFeatureCheck(d.runhome, feature)
|
||||
if err == nil {
|
||||
if overlayCacheResult {
|
||||
logrus.Debugf("Cached value indicated that data-only layers for overlay are supported")
|
||||
return true, nil
|
||||
}
|
||||
logrus.Debugf("Cached value indicated that data-only layers for overlay are not supported")
|
||||
return false, errors.New(overlayCacheText)
|
||||
}
|
||||
supportsDataOnly, err := supportsDataOnlyLayers(d.home)
|
||||
if err2 := cachedFeatureRecord(d.runhome, feature, supportsDataOnly, ""); err2 != nil {
|
||||
return false, fmt.Errorf("recording overlay data-only layers support status: %w", err2)
|
||||
}
|
||||
return supportsDataOnly, err
|
||||
}
|
||||
|
||||
func (d *Driver) useComposeFs() bool {
|
||||
if !composeFsSupported() || unshare.IsRootless() {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
supportsDataOnlyLayers, err := d.supportsDataOnlyLayers()
|
||||
if err != nil {
|
||||
logrus.Debugf("Check for data-only layers failed with: %v", err)
|
||||
return false
|
||||
}
|
||||
return supportsDataOnlyLayers
|
||||
}
|
||||
|
||||
// ApplyDiff applies the changes in the new layer using the specified function
|
||||
|
18
vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go
generated
vendored
18
vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go
generated
vendored
@ -114,6 +114,16 @@ func openNextAvailableLoopback(index int, sparseName string, sparseFile *os.File
|
||||
// AttachLoopDevice attaches the given sparse file to the next
|
||||
// available loopback device. It returns an opened *os.File.
|
||||
func AttachLoopDevice(sparseName string) (loop *os.File, err error) {
|
||||
return attachLoopDevice(sparseName, false)
|
||||
}
|
||||
|
||||
// AttachLoopDeviceRO attaches the given sparse file opened read-only to
|
||||
// the next available loopback device. It returns an opened *os.File.
|
||||
func AttachLoopDeviceRO(sparseName string) (loop *os.File, err error) {
|
||||
return attachLoopDevice(sparseName, true)
|
||||
}
|
||||
|
||||
func attachLoopDevice(sparseName string, readonly bool) (loop *os.File, err error) {
|
||||
// Try to retrieve the next available loopback device via syscall.
|
||||
// If it fails, we discard error and start looping for a
|
||||
// loopback from index 0.
|
||||
@ -122,8 +132,14 @@ func AttachLoopDevice(sparseName string) (loop *os.File, err error) {
|
||||
logrus.Debugf("Error retrieving the next available loopback: %s", err)
|
||||
}
|
||||
|
||||
var sparseFile *os.File
|
||||
|
||||
// OpenFile adds O_CLOEXEC
|
||||
sparseFile, err := os.OpenFile(sparseName, os.O_RDWR, 0o644)
|
||||
if readonly {
|
||||
sparseFile, err = os.OpenFile(sparseName, os.O_RDONLY, 0o644)
|
||||
} else {
|
||||
sparseFile, err = os.OpenFile(sparseName, os.O_RDWR, 0o644)
|
||||
}
|
||||
if err != nil {
|
||||
logrus.Errorf("Opening sparse file: %v", err)
|
||||
return nil, ErrAttachLoopbackDevice
|
||||
|
6
vendor/github.com/containers/storage/pkg/system/rm.go
generated
vendored
6
vendor/github.com/containers/storage/pkg/system/rm.go
generated
vendored
@ -28,7 +28,7 @@ func EnsureRemoveAll(dir string) error {
|
||||
|
||||
// track retries
|
||||
exitOnErr := make(map[string]int)
|
||||
maxRetry := 100
|
||||
maxRetry := 1000
|
||||
|
||||
// Attempt a simple remove all first, this avoids the more expensive
|
||||
// RecursiveUnmount call if not needed.
|
||||
@ -38,7 +38,7 @@ func EnsureRemoveAll(dir string) error {
|
||||
|
||||
// Attempt to unmount anything beneath this dir first
|
||||
if err := mount.RecursiveUnmount(dir); err != nil {
|
||||
logrus.Debugf("RecusiveUnmount on %s failed: %v", dir, err)
|
||||
logrus.Debugf("RecursiveUnmount on %s failed: %v", dir, err)
|
||||
}
|
||||
|
||||
for {
|
||||
@ -94,6 +94,6 @@ func EnsureRemoveAll(dir string) error {
|
||||
return err
|
||||
}
|
||||
exitOnErr[pe.Path]++
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
5
vendor/github.com/containers/storage/storage.conf
generated
vendored
5
vendor/github.com/containers/storage/storage.conf
generated
vendored
@ -27,9 +27,8 @@ runroot = "/run/containers/storage"
|
||||
# restorecon -R -v /NEWSTORAGEPATH
|
||||
graphroot = "/var/lib/containers/storage"
|
||||
|
||||
# Optional value for image storage location
|
||||
# If set, it must be different than graphroot.
|
||||
|
||||
# Optional alternate location of image store if a location separate from the
|
||||
# container store is required. If set, it must be different than graphroot.
|
||||
# imagestore = ""
|
||||
|
||||
|
||||
|
25
vendor/github.com/containers/storage/store.go
generated
vendored
25
vendor/github.com/containers/storage/store.go
generated
vendored
@ -2666,34 +2666,23 @@ func (s *store) DeleteContainer(id string) error {
|
||||
}
|
||||
|
||||
var wg multierror.Group
|
||||
wg.Go(func() error { return s.containerStore.Delete(id) })
|
||||
|
||||
middleDir := s.graphDriverName + "-containers"
|
||||
|
||||
wg.Go(func() error {
|
||||
gcpath := filepath.Join(s.GraphRoot(), middleDir, container.ID)
|
||||
// attempt a simple rm -rf first
|
||||
if err := os.RemoveAll(gcpath); err == nil {
|
||||
return nil
|
||||
}
|
||||
// and if it fails get to the more complicated cleanup
|
||||
return system.EnsureRemoveAll(gcpath)
|
||||
})
|
||||
|
||||
wg.Go(func() error {
|
||||
rcpath := filepath.Join(s.RunRoot(), middleDir, container.ID)
|
||||
// attempt a simple rm -rf first
|
||||
if err := os.RemoveAll(rcpath); err == nil {
|
||||
return nil
|
||||
}
|
||||
// and if it fails get to the more complicated cleanup
|
||||
return system.EnsureRemoveAll(rcpath)
|
||||
})
|
||||
|
||||
if multierr := wg.Wait(); multierr != nil {
|
||||
return multierr.ErrorOrNil()
|
||||
}
|
||||
return nil
|
||||
return s.containerStore.Delete(id)
|
||||
})
|
||||
}
|
||||
|
||||
@ -3418,16 +3407,16 @@ func (s *store) Shutdown(force bool) ([]string, error) {
|
||||
err = fmt.Errorf("a layer is mounted: %w", ErrLayerUsedByContainer)
|
||||
}
|
||||
if err == nil {
|
||||
err = s.graphDriver.Cleanup()
|
||||
// We don’t retain the lastWrite value, and treat this update as if someone else did the .Cleanup(),
|
||||
// so that we reload after a .Shutdown() the same way other processes would.
|
||||
// Shutdown() is basically an error path, so reliability is more important than performance.
|
||||
if _, err2 := s.graphLock.RecordWrite(); err2 != nil {
|
||||
if err == nil {
|
||||
err = err2
|
||||
} else {
|
||||
err = fmt.Errorf("(graphLock.RecordWrite failed: %v) %w", err2, err)
|
||||
}
|
||||
err = fmt.Errorf("(graphLock.RecordWrite failed: %w", err2)
|
||||
}
|
||||
// Do the Cleanup() only after we are sure that the change was recorded with RecordWrite(), so that
|
||||
// the next user picks it.
|
||||
if err == nil {
|
||||
err = s.graphDriver.Cleanup()
|
||||
}
|
||||
}
|
||||
return mounted, err
|
||||
|
5
vendor/github.com/containers/storage/types/options.go
generated
vendored
5
vendor/github.com/containers/storage/types/options.go
generated
vendored
@ -220,9 +220,8 @@ type StoreOptions struct {
|
||||
// GraphRoot is the filesystem path under which we will store the
|
||||
// contents of layers, images, and containers.
|
||||
GraphRoot string `json:"root,omitempty"`
|
||||
// Image Store is the location of image store which is seperated from the
|
||||
// container store. Usually this is not recommended unless users wants
|
||||
// seperate store for image and containers.
|
||||
// Image Store is the alternate location of image store if a location
|
||||
// separate from the container store is required.
|
||||
ImageStore string `json:"imagestore,omitempty"`
|
||||
// RootlessStoragePath is the storage path for rootless users
|
||||
// default $HOME/.local/share/containers/storage
|
||||
|
Reference in New Issue
Block a user