mirror of
https://github.com/containers/podman.git
synced 2025-12-09 15:19:35 +08:00
CI: test overlay and vfs
We're only testing vfs in CI. That's bad. #18822 tried to remedy that but that only worked on system tests, not e2e. Here we introduce CI_DESIRED_STORAGE, to be set in .cirrus.yml in the same vein as all the other CI_DESIRED_X. Since it's 2023 we default to overlay, testing vfs only in priorfedora. Fixes required: - e2e tests: - in cleanup, umount ROOT/overlay to avoid leaking mounts - system tests: - fix a few badly-written tests that assumed/hardcoded overlay - buildx test: add weird exception to device-number test - mount tests: add special case code for vfs - unprivileged test: disable one section that is N/A on vfs Signed-off-by: Ed Santiago <santiago@redhat.com>
This commit is contained in:
@@ -33,6 +33,7 @@ import (
|
||||
. "github.com/onsi/gomega"
|
||||
. "github.com/onsi/gomega/gexec"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -993,7 +994,17 @@ func rmAll(podmanBin string, path string) {
|
||||
GinkgoWriter.Printf("%v\n", err)
|
||||
}
|
||||
} else {
|
||||
if err := os.RemoveAll(path); err != nil {
|
||||
// When using overlay as root, podman leaves a stray mount behind.
|
||||
// This leak causes remote tests to take a loooooong time, which
|
||||
// then causes Cirrus to time out. Unmount that stray.
|
||||
overlayPath := path + "/root/overlay"
|
||||
if _, err := os.Stat(overlayPath); err == nil {
|
||||
if err = unix.Unmount(overlayPath, unix.MNT_DETACH); err != nil {
|
||||
GinkgoWriter.Printf("Error unmounting %s: %v\n", overlayPath, err)
|
||||
}
|
||||
}
|
||||
|
||||
if err = os.RemoveAll(path); err != nil {
|
||||
GinkgoWriter.Printf("%q\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
package integration
|
||||
|
||||
var (
|
||||
STORAGE_FS = "vfs" //nolint:revive,stylecheck
|
||||
STORAGE_OPTIONS = "--storage-driver vfs" //nolint:revive,stylecheck
|
||||
ROOTLESS_STORAGE_FS = "vfs" //nolint:revive,stylecheck
|
||||
ROOTLESS_STORAGE_OPTIONS = "--storage-driver vfs" //nolint:revive,stylecheck
|
||||
STORAGE_FS = "overlay" //nolint:revive,stylecheck
|
||||
STORAGE_OPTIONS = "--storage-driver overlay" //nolint:revive,stylecheck
|
||||
ROOTLESS_STORAGE_FS = "overlay" //nolint:revive,stylecheck
|
||||
ROOTLESS_STORAGE_OPTIONS = "--storage-driver overlay" //nolint:revive,stylecheck
|
||||
CACHE_IMAGES = []string{ALPINE, BB, fedoraMinimal, NGINX_IMAGE, REDIS_IMAGE, REGISTRY_IMAGE, INFRA_IMAGE, CITEST_IMAGE, HEALTHCHECK_IMAGE, SYSTEMD_IMAGE, fedoraToolbox} //nolint:revive,stylecheck
|
||||
NGINX_IMAGE = "quay.io/libpod/alpine_nginx:latest" //nolint:revive,stylecheck
|
||||
BB_GLIBC = "docker.io/library/busybox:glibc" //nolint:revive,stylecheck
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
package integration
|
||||
|
||||
var (
|
||||
STORAGE_FS = "vfs" //nolint:revive,stylecheck
|
||||
STORAGE_OPTIONS = "--storage-driver vfs" //nolint:revive,stylecheck
|
||||
ROOTLESS_STORAGE_FS = "vfs" //nolint:revive,stylecheck
|
||||
ROOTLESS_STORAGE_OPTIONS = "--storage-driver vfs" //nolint:revive,stylecheck
|
||||
STORAGE_FS = "overlay" //nolint:revive,stylecheck
|
||||
STORAGE_OPTIONS = "--storage-driver overlay" //nolint:revive,stylecheck
|
||||
ROOTLESS_STORAGE_FS = "overlay" //nolint:revive,stylecheck
|
||||
ROOTLESS_STORAGE_OPTIONS = "--storage-driver overlay" //nolint:revive,stylecheck
|
||||
CACHE_IMAGES = []string{ALPINE, BB, fedoraMinimal, NGINX_IMAGE, REDIS_IMAGE, REGISTRY_IMAGE, INFRA_IMAGE, CITEST_IMAGE, HEALTHCHECK_IMAGE, SYSTEMD_IMAGE, fedoraToolbox} //nolint:revive,stylecheck
|
||||
NGINX_IMAGE = "quay.io/lsm5/alpine_nginx-aarch64:latest" //nolint:revive,stylecheck
|
||||
BB_GLIBC = "docker.io/library/busybox:glibc" //nolint:revive,stylecheck
|
||||
|
||||
@@ -3,8 +3,8 @@ package integration
|
||||
var (
|
||||
STORAGE_FS = "overlay"
|
||||
STORAGE_OPTIONS = "--storage-driver overlay"
|
||||
ROOTLESS_STORAGE_FS = "vfs"
|
||||
ROOTLESS_STORAGE_OPTIONS = "--storage-driver vfs"
|
||||
ROOTLESS_STORAGE_FS = "overlay"
|
||||
ROOTLESS_STORAGE_OPTIONS = "--storage-driver overlay"
|
||||
CACHE_IMAGES = []string{ALPINE, BB, fedoraMinimal, NGINX_IMAGE, REDIS_IMAGE, INFRA_IMAGE, CITEST_IMAGE}
|
||||
NGINX_IMAGE = "quay.io/libpod/alpine_nginx-ppc64le:latest"
|
||||
BB_GLIBC = "docker.io/ppc64le/busybox:glibc"
|
||||
|
||||
@@ -216,6 +216,21 @@ var _ = Describe("Podman Info", func() {
|
||||
Expect(session.ErrorToString()).To(Equal("Error: unsupported database backend: \"bogus\""))
|
||||
})
|
||||
|
||||
It("Podman info: check desired storage driver", func() {
|
||||
// defined in .cirrus.yml
|
||||
want := os.Getenv("CI_DESIRED_STORAGE")
|
||||
if want == "" {
|
||||
if os.Getenv("CIRRUS_CI") == "" {
|
||||
Skip("CI_DESIRED_STORAGE is not set--this is OK because we're not running under Cirrus")
|
||||
}
|
||||
Fail("CIRRUS_CI is set, but CI_DESIRED_STORAGE is not! See #20161")
|
||||
}
|
||||
session := podmanTest.Podman([]string{"info", "--format", "{{.Store.GraphDriverName}}"})
|
||||
session.WaitWithDefaultTimeout()
|
||||
Expect(session).To(ExitCleanly())
|
||||
Expect(session.OutputToString()).To(Equal(want), ".Store.GraphDriverName from podman info")
|
||||
})
|
||||
|
||||
It("Podman info: check lock count", Serial, func() {
|
||||
// This should not run on architectures and OSes that use the file locks backend.
|
||||
// Which, for now, is Linux + RISCV and FreeBSD, neither of which are in CI - so
|
||||
|
||||
@@ -54,6 +54,7 @@ function setup() {
|
||||
'Cgroups:{{.Host.CgroupsVersion}}+{{.Host.CgroupManager}}'
|
||||
'Net:{{.Host.NetworkBackend}}'
|
||||
'DB:{{.Host.DatabaseBackend}}'
|
||||
'Store:{{.Store.GraphDriverName}}'
|
||||
)
|
||||
run_podman info --format "$(IFS='/' echo ${want[@]})"
|
||||
echo "# $output" >&3
|
||||
|
||||
@@ -117,6 +117,22 @@ host.slirp4netns.executable | $expr_path
|
||||
is "$db_backend" "$CI_DESIRED_DATABASE" "CI_DESIRED_DATABASE (from .cirrus.yml)"
|
||||
}
|
||||
|
||||
@test "podman info - confirm desired storage driver" {
|
||||
if [[ -z "$CI_DESIRED_STORAGE" ]]; then
|
||||
# When running in Cirrus, CI_DESIRED_STORAGE *must* be defined
|
||||
# in .cirrus.yml so we can double-check that all CI VMs are
|
||||
# using overlay or vfs as desired.
|
||||
if [[ -n "$CIRRUS_CI" ]]; then
|
||||
die "CIRRUS_CI is set, but CI_DESIRED_STORAGE is not! See #20161"
|
||||
fi
|
||||
|
||||
# Not running under Cirrus (e.g., gating tests, or dev laptop).
|
||||
# Totally OK to skip this test.
|
||||
skip "CI_DESIRED_STORAGE is unset--OK, because we're not in Cirrus"
|
||||
fi
|
||||
|
||||
is "$(podman_storage_driver)" "$CI_DESIRED_STORAGE" "podman storage driver is not CI_DESIRED_STORAGE (from .cirrus.yml)"
|
||||
}
|
||||
|
||||
# 2021-04-06 discussed in watercooler: RHEL must never use crun, even if
|
||||
# using cgroups v2.
|
||||
@@ -163,7 +179,7 @@ host.slirp4netns.executable | $expr_path
|
||||
@test "podman --root PATH info - basic output" {
|
||||
if ! is_remote; then
|
||||
run_podman --storage-driver=vfs --root ${PODMAN_TMPDIR}/nothing-here-move-along info --format '{{ .Store.GraphOptions }}'
|
||||
is "$output" "map\[\]" "'podman --root should reset Graphoptions to []"
|
||||
is "$output" "map\[\]" "'podman --root should reset GraphOptions to []"
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
@@ -337,28 +337,34 @@ Deleted: $pauseID"
|
||||
@test "podman pull image with additional store" {
|
||||
skip_if_remote "only works on local"
|
||||
|
||||
# overlay or vfs
|
||||
local storagedriver="$(podman_storage_driver)"
|
||||
|
||||
local imstore=$PODMAN_TMPDIR/imagestore
|
||||
local sconf=$PODMAN_TMPDIR/storage.conf
|
||||
cat >$sconf <<EOF
|
||||
[storage]
|
||||
driver="overlay"
|
||||
driver="$storagedriver"
|
||||
|
||||
[storage.options]
|
||||
additionalimagestores = [ "$imstore/root" ]
|
||||
EOF
|
||||
|
||||
skopeo copy containers-storage:$IMAGE \
|
||||
containers-storage:\[overlay@$imstore/root+$imstore/runroot\]$IMAGE
|
||||
containers-storage:\[${storagedriver}@${imstore}/root+${imstore}/runroot\]$IMAGE
|
||||
|
||||
# IMPORTANT! Use -2/-1 indices, not 0/1, because $SYSTEMD_IMAGE may be
|
||||
# present in store, and if it is it will precede $IMAGE.
|
||||
CONTAINERS_STORAGE_CONF=$sconf run_podman images -a -n --format "{{.Repository}}:{{.Tag}} {{.ReadOnly}}"
|
||||
is "${lines[0]}" "$IMAGE false" "image from readonly store"
|
||||
is "${lines[1]}" "$IMAGE true" "image from readwrite store"
|
||||
assert "${#lines[*]}" -ge 2 "at least 2 lines from 'podman images'"
|
||||
is "${lines[-2]}" "$IMAGE false" "image from readonly store"
|
||||
is "${lines[-1]}" "$IMAGE true" "image from readwrite store"
|
||||
|
||||
CONTAINERS_STORAGE_CONF=$sconf run_podman images -a -n --format "{{.Id}}"
|
||||
id=${lines[0]}
|
||||
id=${lines[-1]}
|
||||
|
||||
CONTAINERS_STORAGE_CONF=$sconf run_podman pull -q $IMAGE
|
||||
is "$output" "$id" "Should only print one line"
|
||||
is "$output" "$id" "pull -q $IMAGE, using storage.conf"
|
||||
|
||||
run_podman --root $imstore/root rmi --all
|
||||
}
|
||||
|
||||
@@ -29,8 +29,18 @@ load helpers
|
||||
|
||||
# umount, and make sure files are gone
|
||||
run_podman umount $c_name
|
||||
if [ -e "$mount_path/$f_path" ]; then
|
||||
die "Mounted file exists even after umount: $mount_path/$f_path"
|
||||
if [[ -e "$mount_path/$f_path" ]]; then
|
||||
# With vfs, umount is a NOP: the path always exists as long as the
|
||||
# container exists. But with overlay, umount should truly remove.
|
||||
if [[ "$(podman_storage_driver)" != "vfs" ]]; then
|
||||
die "Mounted file exists even after umount: $mount_path/$f_path"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Remove the container. Now even with vfs the file must be gone.
|
||||
run_podman rm $c_name
|
||||
if [[ -e "$mount_path/$f_path" ]]; then
|
||||
die "Mounted file exists even after container rm: $mount_path/$f_path"
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -242,7 +252,10 @@ EOF
|
||||
# umount, and make sure files are gone
|
||||
run_podman umount $external_cid
|
||||
if [ -d "$mount_path" ]; then
|
||||
die "'podman umount' did not umount"
|
||||
# Under VFS, mountpoint always exists even despite umount
|
||||
if [[ "$(podman_storage_driver)" != "vfs" ]]; then
|
||||
die "'podman umount' did not umount $mount_path"
|
||||
fi
|
||||
fi
|
||||
buildah rm $external_cid
|
||||
}
|
||||
|
||||
@@ -77,9 +77,14 @@ EOF
|
||||
assert "${lines[0]}" = "${lines[5]}" "devnum( / ) = devnum( /[etc )"
|
||||
assert "${lines[0]}" = "${lines[7]}" "devnum( / ) = devnum( /etc )"
|
||||
assert "${lines[6]}" = "${lines[8]}" "devnum( /[etc/foo, ) = devnum( /etc/bar] )"
|
||||
# ...then, each volume should be different
|
||||
assert "${lines[0]}" != "${lines[3]}" "devnum( / ) != devnum( volume0 )"
|
||||
assert "${lines[0]}" != "${lines[6]}" "devnum( / ) != devnum( volume1 )"
|
||||
# ...then, check volumes; these differ between overlay and vfs.
|
||||
# Under Overlay (usual case), these will be different. On VFS, they're the same.
|
||||
local op="!="
|
||||
if [[ "$(podman_storage_driver)" == "vfs" ]]; then
|
||||
op="="
|
||||
fi
|
||||
assert "${lines[0]}" $op "${lines[3]}" "devnum( / ) $op devnum( volume0 )"
|
||||
assert "${lines[0]}" $op "${lines[6]}" "devnum( / ) $op devnum( volume1 )"
|
||||
|
||||
# FIXME: is this expected? I thought /a/b/c and /[etc/foo, would differ
|
||||
assert "${lines[3]}" = "${lines[6]}" "devnum( volume0 ) = devnum( volume1 )"
|
||||
|
||||
@@ -50,6 +50,15 @@ if chmod +w "$path"; then
|
||||
die "Able to chmod $path"
|
||||
fi
|
||||
|
||||
EOF
|
||||
|
||||
# Under overlay, and presumably any future storage drivers, we
|
||||
# should never be able to read or write $path.
|
||||
#
|
||||
# Under VFS, though, if podman has *ever* been run with --uidmap,
|
||||
# all images become world-accessible. So don't bother checking.
|
||||
if [[ $(podman_storage_driver) != "vfs" ]]; then
|
||||
cat >>$test_script <<EOF
|
||||
if [ -d "$path" ]; then
|
||||
if ls "$path" >/dev/null; then
|
||||
die "Able to run 'ls $path' without error"
|
||||
@@ -67,8 +76,9 @@ else
|
||||
fi
|
||||
fi
|
||||
|
||||
exit 0
|
||||
EOF
|
||||
fi
|
||||
echo "exit 0" >>$test_script
|
||||
chmod 755 $PODMAN_TMPDIR $test_script
|
||||
|
||||
# get podman image and container storage directories
|
||||
|
||||
@@ -553,6 +553,18 @@ function podman_runtime() {
|
||||
basename "${output:-[null]}"
|
||||
}
|
||||
|
||||
# Returns the storage driver: 'overlay' or 'vfs'
|
||||
function podman_storage_driver() {
|
||||
run_podman info --format '{{.Store.GraphDriverName}}' >/dev/null
|
||||
# Should there ever be a new driver
|
||||
case "$output" in
|
||||
overlay) ;;
|
||||
vfs) ;;
|
||||
*) die "Unknown storage driver '$output'; if this is a new driver, please review uses of this function in tests." ;;
|
||||
esac
|
||||
echo "$output"
|
||||
}
|
||||
|
||||
# rhbz#1895105: rootless journald is unavailable except to users in
|
||||
# certain magic groups; which our testuser account does not belong to
|
||||
# (intentional: that is the RHEL default, so that's the setup we test).
|
||||
|
||||
Reference in New Issue
Block a user