mirror of
https://github.com/containers/podman.git
synced 2025-07-05 02:46:46 +08:00
System tests: teardown: clean up volumes
One of the system tests was creating a volume and not cleaning up after itself. Fix that: do cleanup in the test itself. And, add a 'volume rm -af' to global teardown() to leave things clean for the next tests. Also, OOPS! Correct some instances of 'podman' in two system tests to 'run_podman'. And remove an unused (misleading) variable. And, one more: in auto-update test, unit file, use $PODMAN, not /usr/bin/podman UGH! Yet one more: found/fixed a 'run<space>podman' Signed-off-by: Ed Santiago <santiago@redhat.com>
This commit is contained in:
@ -44,7 +44,7 @@ function _require_crun() {
|
|||||||
run_podman pod create --uidmap 0:200000:5000 --name=$random_pod_name
|
run_podman pod create --uidmap 0:200000:5000 --name=$random_pod_name
|
||||||
run_podman pod start $random_pod_name
|
run_podman pod start $random_pod_name
|
||||||
run_podman pod inspect --format '{{.InfraContainerID}}' $random_pod_name
|
run_podman pod inspect --format '{{.InfraContainerID}}' $random_pod_name
|
||||||
run podman inspect --format '{{.HostConfig.IDMappings.UIDMap}}' $output
|
run_podman inspect --format '{{.HostConfig.IDMappings.UIDMap}}' $output
|
||||||
is "$output" ".*0:200000:5000" "UID Map Successful"
|
is "$output" ".*0:200000:5000" "UID Map Successful"
|
||||||
|
|
||||||
# Remove the pod and the pause image
|
# Remove the pod and the pause image
|
||||||
|
@ -69,7 +69,7 @@ Log[-1].Output | \"Uh-oh on stdout!\\\nUh-oh on stderr!\"
|
|||||||
"
|
"
|
||||||
|
|
||||||
# now the on-failure should kick in and kill the container
|
# now the on-failure should kick in and kill the container
|
||||||
podman wait healthcheck_c
|
run_podman wait healthcheck_c
|
||||||
|
|
||||||
# Clean up
|
# Clean up
|
||||||
run_podman rm -t 0 -f healthcheck_c
|
run_podman rm -t 0 -f healthcheck_c
|
||||||
|
@ -241,9 +241,8 @@ function _confirm_update() {
|
|||||||
@test "podman auto-update - label io.containers.autoupdate=local" {
|
@test "podman auto-update - label io.containers.autoupdate=local" {
|
||||||
generate_service localtest local
|
generate_service localtest local
|
||||||
image=quay.io/libpod/localtest:latest
|
image=quay.io/libpod/localtest:latest
|
||||||
podman commit --change CMD=/bin/bash $cname $image
|
run_podman commit --change CMD=/bin/bash $cname $image
|
||||||
podman image inspect --format "{{.ID}}" $image
|
run_podman image inspect --format "{{.ID}}" $image
|
||||||
imageID="$output"
|
|
||||||
|
|
||||||
_wait_service_ready container-$cname.service
|
_wait_service_ready container-$cname.service
|
||||||
run_podman auto-update --dry-run --format "{{.Unit}},{{.Image}},{{.Updated}},{{.Policy}}"
|
run_podman auto-update --dry-run --format "{{.Unit}},{{.Image}},{{.Updated}},{{.Policy}}"
|
||||||
@ -393,7 +392,7 @@ After=network-online.target
|
|||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
Type=oneshot
|
Type=oneshot
|
||||||
ExecStart=/usr/bin/podman auto-update
|
ExecStart=$PODMAN auto-update
|
||||||
Environment="http_proxy=${http_proxy}"
|
Environment="http_proxy=${http_proxy}"
|
||||||
Environment="HTTP_PROXY=${HTTP_PROXY}"
|
Environment="HTTP_PROXY=${HTTP_PROXY}"
|
||||||
Environment="https_proxy=${https_proxy}"
|
Environment="https_proxy=${https_proxy}"
|
||||||
|
@ -97,6 +97,7 @@ EOF
|
|||||||
run_podman rm c_mount
|
run_podman rm c_mount
|
||||||
|
|
||||||
run_podman rm c_uidmap c_uidmap_v
|
run_podman rm c_uidmap c_uidmap_v
|
||||||
|
run_podman volume rm foo
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -95,6 +95,7 @@ function basic_teardown() {
|
|||||||
run_podman '?' pod rm -t 0 --all --force --ignore
|
run_podman '?' pod rm -t 0 --all --force --ignore
|
||||||
run_podman '?' rm -t 0 --all --force --ignore
|
run_podman '?' rm -t 0 --all --force --ignore
|
||||||
run_podman '?' network prune --force
|
run_podman '?' network prune --force
|
||||||
|
run_podman '?' volume rm -a -f
|
||||||
|
|
||||||
command rm -rf $PODMAN_TMPDIR
|
command rm -rf $PODMAN_TMPDIR
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user