Create quota before _data dir for volumes

This resolves an ordering issue that prevented quotas from being
applied. XFS quotas are applied recursively, but only for
subdirectories created after the quota is applied; if we create
`_data` before the quota, and then use `_data` for all data in
the volume, the quota will never be used by the volume.

Also, add a test that volume quotas are working as designed using
an XFS formatted loop device in the system tests. This should
prevent any further regressions on basic quota functionality,
such as quotas being shared between volumes.

Fixes #25368
Fixes https://issues.redhat.com/browse/RHEL-82198
Fixes https://issues.redhat.com/browse/RHEL-82199

Signed-off-by: Matt Heon <mheon@redhat.com>
This commit is contained in:
Matt Heon
2025-02-27 10:38:53 -05:00
committed by openshift-cherrypick-robot
parent 7b0a999baf
commit bff9da4e3a
4 changed files with 96 additions and 11 deletions

View File

@ -168,16 +168,11 @@ func (r *Runtime) newVolume(ctx context.Context, noCreatePluginVolume bool, opti
if err := idtools.SafeChown(volPathRoot, volume.config.UID, volume.config.GID); err != nil {
return nil, fmt.Errorf("chowning volume directory %q to %d:%d: %w", volPathRoot, volume.config.UID, volume.config.GID, err)
}
fullVolPath := filepath.Join(volPathRoot, "_data")
if err := os.MkdirAll(fullVolPath, 0755); err != nil {
return nil, fmt.Errorf("creating volume directory %q: %w", fullVolPath, err)
}
if err := idtools.SafeChown(fullVolPath, volume.config.UID, volume.config.GID); err != nil {
return nil, fmt.Errorf("chowning volume directory %q to %d:%d: %w", fullVolPath, volume.config.UID, volume.config.GID, err)
}
if err := LabelVolumePath(fullVolPath, volume.config.MountLabel); err != nil {
return nil, err
}
// Setting quotas must happen *before* the _data inner directory
// is created, as the volume must be empty for the quota to be
// properly applied - if any subdirectories exist before the
// quota is applied, the quota will not be applied to them.
switch {
case volume.config.DisableQuota:
if volume.config.Size > 0 || volume.config.Inodes > 0 {
@ -206,10 +201,20 @@ func (r *Runtime) newVolume(ctx context.Context, noCreatePluginVolume bool, opti
// subdirectory - so the quota ID assignment logic works
// properly.
if err := q.SetQuota(volPathRoot, quota); err != nil {
return nil, fmt.Errorf("failed to set size quota size=%d inodes=%d for volume directory %q: %w", volume.config.Size, volume.config.Inodes, fullVolPath, err)
return nil, fmt.Errorf("failed to set size quota size=%d inodes=%d for volume directory %q: %w", volume.config.Size, volume.config.Inodes, volPathRoot, err)
}
}
fullVolPath := filepath.Join(volPathRoot, "_data")
if err := os.MkdirAll(fullVolPath, 0755); err != nil {
return nil, fmt.Errorf("creating volume directory %q: %w", fullVolPath, err)
}
if err := idtools.SafeChown(fullVolPath, volume.config.UID, volume.config.GID); err != nil {
return nil, fmt.Errorf("chowning volume directory %q to %d:%d: %w", fullVolPath, volume.config.UID, volume.config.GID, err)
}
if err := LabelVolumePath(fullVolPath, volume.config.MountLabel); err != nil {
return nil, err
}
volume.config.MountPoint = fullVolPath
}

View File

@ -151,6 +151,7 @@ Requires: openssl
Requires: socat
Requires: buildah
Requires: gnupg
Requires: xfsprogs
%description tests
%{summary}

View File

@ -0,0 +1,78 @@
#!/usr/bin/env bats -*- bats -*-
#
# podman volume XFS quota tests
#
# bats file_tags=distro-integration
#
load helpers
function setup() {
basic_setup
run_podman '?' volume rm -a
}
function teardown() {
run_podman '?' rm -af -t 0
run_podman '?' volume rm -a
loop=$PODMAN_TMPDIR/disk.img
vol_path=$PODMAN_TMPDIR/volpath
if [ -f ${loop} ]; then
if [ -d ${vol_path} ]; then
if mountpoint ${vol_path}; then
umount "$vol_path"
fi
rm -rf "$vol_path"
fi
while read path dev; do
if [[ "$path" == "$loop" ]]; then
losetup -d $dev
fi
done < <(losetup -l --noheadings --output BACK-FILE,NAME)
rm -f $loop
fi
basic_teardown
}
@test "podman volumes with XFS quotas" {
skip_if_rootless "Quotas are only possible with root"
skip_if_remote "Requires --root flag, not possible w/ remote"
# Minimum XFS filesystem size is 300mb
loop=$PODMAN_TMPDIR/disk.img
fallocate -l 300m ${loop}
run -0 losetup -f --show $loop
loop_dev="$output"
mkfs.xfs $loop_dev
safe_opts=$(podman_isolation_opts ${PODMAN_TMPDIR})
vol_path=$PODMAN_TMPDIR/volpath
mkdir -p $vol_path
safe_opts="$safe_opts --volumepath=$vol_path"
mount -t xfs -o defaults,pquota $loop_dev $vol_path
vol_one="testvol1"
run_podman $safe_opts volume create --opt o=size=2m $vol_one
vol_two="testvol2"
run_podman $safe_opts volume create --opt o=size=4m $vol_two
ctrname="testctr"
run_podman $safe_opts run -d --name=$ctrname -i -v $vol_one:/one -v $vol_two:/two $IMAGE top
run_podman $safe_opts exec $ctrname dd if=/dev/zero of=/one/oneMB bs=1M count=1
run_podman 1 $safe_opts exec $ctrname dd if=/dev/zero of=/one/twoMB bs=1M count=1
assert "$output" =~ "No space left on device"
run_podman $safe_opts exec $ctrname dd if=/dev/zero of=/two/threeMB bs=1M count=3
run_podman 1 $safe_opts exec $ctrname dd if=/dev/zero of=/two/oneMB bs=1M count=1
assert "$output" =~ "No space left on device"
run_podman $safe_opts rm -f -t 0 $ctrname
run_podman $safe_opts volume rm -af
}
# vim: filetype=sh

View File

@ -92,6 +92,7 @@ Requirements
- socat
- buildah
- gnupg
- xfsprogs
Further Details