Files
podman/test/apiv2/45-system.at
Matthew Heon 07a8eb8295 Ensure that the DF endpoint updated volume refcount
The field was already exposed already in the `system df` output
so this just required a bit of plumbing and testing.

As part of this, fix `podman systemd df` volume in-use logic.
Previously, volumes were only considered to be in use if the
container using them was running. This does not match Docker's
behavior, where a volume is considered in use as long as a
container exists that uses the volume, even if said container is
not running.

Fixes #15720

Signed-off-by: Matthew Heon <matthew.heon@pm.me>
2022-09-12 16:43:24 -04:00

90 lines
2.9 KiB
Bash

# -*- sh -*-
#
# system related tests
#
## ensure system is clean
t POST 'libpod/system/prune?volumes=true&all=true' params='' 200
## podman system df
t GET system/df 200 '{"LayersSize":0,"Images":[],"Containers":[],"Volumes":[],"BuildCache":[],"BuilderSize":0}'
t GET libpod/system/df 200 '{"Images":[],"Containers":[],"Volumes":[]}'
# Create volume. We expect df to report this volume next invocation of system/df
t GET libpod/info 200
volumepath=$(jq -r ".store.volumePath" <<<"$output")
t POST libpod/volumes/create name=foo1 201 \
.Name=foo1 \
.Driver=local \
.Mountpoint=$volumepath/foo1/_data \
.CreatedAt~[0-9]\\{4\\}-[0-9]\\{2\\}-[0-9]\\{2\\}.* \
.Labels={} \
.Options={}
t GET system/df 200 '.Volumes[0].Name=foo1'
t GET libpod/system/df 200 '.Volumes[0].VolumeName=foo1'
# Verify that no containers reference the volume
t GET system/df 200 '.Volumes[0].UsageData.RefCount=0'
# Make a container using the volume
podman pull $IMAGE &>/dev/null
t POST containers/create Image=$IMAGE Volumes='{"/test":{}}' HostConfig='{"Binds":["foo1:/test"]}' 201 \
.Id~[0-9a-f]\\{64\\}
cid=$(jq -r '.Id' <<<"$output")
# Verify that one container references the volume
t GET system/df 200 '.Volumes[0].UsageData.RefCount=1'
# Remove the container
t DELETE containers/$cid?v=true 204
# Verify that no containers reference the volume
t GET system/df 200 '.Volumes[0].UsageData.RefCount=0'
# Create two more volumes to test pruneing
t POST libpod/volumes/create \
Name=foo2 \
Label='{"testlabel1":""}' \
Options='{"type":"tmpfs","o":"nodev,noexec"}}' \
201 \
.Name=foo2 \
.Driver=local \
.Mountpoint=$volumepath/foo2/_data \
.CreatedAt~[0-9]\\{4\\}-[0-9]\\{2\\}-[0-9]\\{2\\}.* \
.Labels.testlabel1="" \
.Options.o=nodev,noexec
t POST libpod/volumes/create \
Name=foo3 \
Label='{"testlabel1":"testonly"}' \
Options='{"type":"tmpfs","o":"nodev,noexec"}}' \
201 \
.Name=foo3 \
.Driver=local \
.Mountpoint=$volumepath/foo3/_data \
.CreatedAt~[0-9]\\{4\\}-[0-9]\\{2\\}-[0-9]\\{2\\}.* \
.Labels.testlabel1=testonly \
.Options.o=nodev,noexec
t GET system/df 200 '.Volumes | length=3'
t GET libpod/system/df 200 '.Volumes | length=3'
# Prune volumes
t POST 'libpod/system/prune?volumes=true&filters={"label":["testlabel1=idontmatch"]}' params='' 200
# nothing should have been pruned
t GET system/df 200 '.Volumes | length=3'
t GET libpod/system/df 200 '.Volumes | length=3'
# only foo3 should be pruned because of filter
t POST 'libpod/system/prune?volumes=true&filters={"label":["testlabel1=testonly"]}' params='' 200 .VolumePruneReports[0].Id=foo3
# only foo2 should be pruned because of filter
t POST 'libpod/system/prune?volumes=true&filters={"label":["testlabel1"]}' params='' 200 .VolumePruneReports[0].Id=foo2
# foo1, the last remaining volume should be pruned without any filters applied
t POST 'libpod/system/prune?volumes=true' params='' 200 .VolumePruneReports[0].Id=foo1
# TODO add other system prune tests for pods / images