#!/usr/bin/env bats load helpers load helpers.network LOOPDEVICE= # Emergency cleanup if loop test fails function teardown() { if [[ -n "$LOOPDEVICE" ]]; then losetup -d $LOOPDEVICE fi basic_teardown } # CANNOT BE PARALLELIZED: requires empty pod list @test "podman pod - basic tests" { run_podman pod list --noheading is "$output" "" "baseline: empty results from list --noheading" run_podman pod ls -n is "$output" "" "baseline: empty results from ls -n" run_podman pod ps --noheading is "$output" "" "baseline: empty results from ps --noheading" } # bats test_tags=ci:parallel @test "podman pod top - containers in different PID namespaces" { # With infra=false, we don't get a /pause container no_infra='--infra=false' run_podman pod create $no_infra podid="$output" # Start two containers... run_podman run -d --pod $podid $IMAGE top -d 2 cid1="$output" run_podman run -d --pod $podid $IMAGE top -d 2 cid2="$output" # ...and wait for them to actually start. wait_for_output "PID \+PPID \+USER " $cid1 wait_for_output "PID \+PPID \+USER " $cid2 # Both containers have emitted at least one top-like line. # Now run 'pod top', and expect two 'top -d 2' processes running. run_podman pod top $podid is "$output" ".*root.*top -d 2.*root.*top -d 2" "two 'top' containers" # By default (podman pod create w/ default --infra) there should be # a /pause container. if [ -z "$no_infra" ]; then is "$output" ".*0 \+1 \+0 \+[0-9. ?s]\+/pause" "there is a /pause container" fi # Cannot remove pod while containers are still running. Error messages # differ slightly between local and remote; these are the common elements. run_podman 125 pod rm $podid assert "${lines[0]}" =~ "Error: not all containers could be removed from pod $podid: removing pod containers.*" \ "pod rm while busy: error message line 1 of 3" assert "${lines[1]}" =~ "cannot remove container .* as it is running - running or paused containers cannot be removed without force: container state improper" \ "pod rm while busy: error message line 2 of 3" assert "${lines[2]}" =~ "cannot remove container .* as it is running - running or paused containers cannot be removed without force: container state improper" \ "pod rm while busy: error message line 3 of 3" # Clean up run_podman --noout pod rm -f -t 0 $podid is "$output" "" "output should be empty" } # bats test_tags=ci:parallel @test "podman pod create - custom volumes" { skip_if_remote "CONTAINERS_CONF_OVERRIDE only affects server side" image="i.do/not/exist:image" tmpdir=$PODMAN_TMPDIR/pod-test mkdir -p $tmpdir containersconf=$tmpdir/containers.conf cat >$containersconf <$containersconf <$port_in/tcp $c_name" \ "output of 'podman ps'" # send a random string to the container. This will cause the container # to output the string to its logs, then exit. teststring=$(random_string 30) echo "$teststring" > /dev/tcp/127.0.0.1/$port_out # Confirm that the container log output is the string we sent it. run_podman wait $cid run_podman logs $cid is "$output" "$teststring" "test string received on container" # Finally, confirm the infra-container and -command. We run this late, # not at pod creation, to give the infra container time to start & log. run_podman logs $infra_cid is "$output" "Confirmed: testimage pause invoked as $infra_command" \ "pod ran with our desired infra container + command" # Clean up run_podman rm $cid run_podman pod rm -t 0 -f --pod-id-file $pod_id_file if [[ -e $pod_id_file ]]; then die "pod-id-file $pod_id_file should be removed along with pod" fi run_podman rmi $infra_image } # bats test_tags=ci:parallel @test "podman pod create should fail when infra-name is already in use" { local infra_name="infra_container_$(safename)" local infra_image="quay.io/libpod/k8s-pause:3.5" local pod_name="p-$(safename)" run_podman --noout pod create --name $pod_name --infra-name "$infra_name" --infra-image "$infra_image" is "$output" "" "output from pod create should be empty" run_podman 125 pod create --infra-name "$infra_name" assert "$output" =~ "^Error: .*: the container name \"$infra_name\" is already in use by .* You have to remove that container to be able to reuse that name: that name is already in use" \ "Trying to create two pods with same infra-name" run_podman pod rm -f $pod_name run_podman rmi $infra_image } # bats test_tags=ci:parallel @test "podman pod create --share" { local pod_name="p-$(safename)" run_podman 125 pod create --share bogus --name $pod_name is "$output" ".*invalid kernel namespace to share: bogus. Options are: cgroup, ipc, net, pid, uts or none" \ "pod test for bogus --share option" run_podman pod create --share ipc --name $pod_name run_podman pod inspect $pod_name --format "{{.SharedNamespaces}}" is "$output" "[ipc]" run_podman run --rm --pod $pod_name --hostname foobar $IMAGE hostname is "$output" "foobar" "--hostname should work with non share UTS namespace" run_podman pod create --share +pid --replace --name $pod_name run_podman pod inspect $pod_name --format "{{.SharedNamespaces}}" for ns in uts pid ipc net; do is "$output" ".*$ns" done run_podman pod rm -f $pod_name } # bats test_tags=ci:parallel @test "podman pod create --pod new:$POD --hostname" { local pod_name="p-$(safename)" run_podman run --rm --pod "new:$pod_name" --hostname foobar $IMAGE hostname is "$output" "foobar" "--hostname should work when creating a new:pod" run_podman pod rm $pod_name run_podman run --rm --pod "new:$pod_name" $IMAGE hostname is "$output" "$pod_name" "new:POD should have hostname name set to podname" run_podman pod rm $pod_name } # bats test_tags=ci:parallel @test "podman rm --force to remove infra container" { local pod_name="p-$(safename)" run_podman create --pod "new:$pod_name" $IMAGE container_ID="$output" run_podman pod inspect --format "{{.InfraContainerID}}" $pod_name infra_ID="$output" run_podman 125 container rm $infra_ID is "$output" ".* and cannot be removed without removing the pod" run_podman 125 container rm --force $infra_ID is "$output" ".* and cannot be removed without removing the pod" run_podman container rm --depend $infra_ID is "$output" ".*$infra_ID.*" is "$output" ".*$container_ID.*" # Now make sure that --force --all works as well run_podman create --pod "new:$pod_name" $IMAGE container_1_ID="$output" run_podman create --pod "$pod_name" $IMAGE container_2_ID="$output" run_podman create $IMAGE container_3_ID="$output" run_podman pod inspect --format "{{.InfraContainerID}}" $pod_name infra_ID="$output" run_podman container rm --force --depend $infra_ID assert "$output" =~ ".*$infra_ID.*" "removed infra container" assert "$output" =~ ".*$container_1_ID.*" "removed container 1" assert "$output" =~ ".*$container_2_ID.*" "removed container 2" assert "$output" !~ ".*$container_3_ID.*" "container 3 should not have been removed!" run_podman container rm $container_3_ID } # bats test_tags=ci:parallel @test "podman pod create share net" { podname="p-$(safename)" run_podman pod create --name $podname run_podman pod inspect $podname --format {{.InfraConfig.HostNetwork}} is "$output" "false" "Default network sharing should be false" run_podman pod rm $podname run_podman pod create --share ipc --network private $podname run_podman pod inspect $podname --format {{.InfraConfig.HostNetwork}} is "$output" "false" "Private network sharing with only ipc should be false" run_podman pod rm $podname run_podman pod create --name $podname --share net --network private run_podman pod inspect $podname --format {{.InfraConfig.HostNetwork}} is "$output" "false" "Private network sharing with only net should be false" run_podman pod create --share net --network host --replace $podname run_podman pod inspect $podname --format {{.InfraConfig.HostNetwork}} is "$output" "true" "Host network sharing with only net should be true" run_podman pod rm $podname run_podman pod create --name $podname --share ipc --network host run_podman pod inspect $podname --format {{.InfraConfig.HostNetwork}} is "$output" "true" "Host network sharing with only ipc should be true" run_podman pod rm $podname } # bats test_tags=ci:parallel @test "pod exit policies" { # Test setting exit policies run_podman pod create podID="$output" run_podman pod inspect $podID --format "{{.ExitPolicy}}" is "$output" "continue" "default exit policy" run_podman pod rm $podID run_podman pod create --exit-policy stop podID="$output" run_podman pod inspect $podID --format "{{.ExitPolicy}}" is "$output" "stop" "custom exit policy" run_podman pod rm $podID run_podman 125 pod create --exit-policy invalid is "$output" "Error: .*running pod create option: invalid pod exit policy: \"invalid\"" "invalid exit policy" # Test exit-policy behaviour run_podman pod create --exit-policy continue podID="$output" run_podman run --pod $podID $IMAGE true run_podman pod inspect $podID --format "{{.State}}" _ensure_pod_state $podID Degraded run_podman pod rm $podID run_podman pod create --exit-policy stop podID="$output" run_podman run --pod $podID $IMAGE true run_podman pod inspect $podID --format "{{.State}}" _ensure_pod_state $podID Exited run_podman pod rm -t -1 -f $podID } # bats test_tags=ci:parallel @test "pod exit policies - play kube" { # play-kube sets the exit policy to "stop" local name="p-$(safename)" kubeFile="apiVersion: v1 kind: Pod metadata: name: $name spec: containers: - command: - \"true\" image: $IMAGE name: ctr restartPolicy: OnFailure" echo "$kubeFile" > $PODMAN_TMPDIR/test.yaml run_podman play kube $PODMAN_TMPDIR/test.yaml run_podman pod inspect $name --format "{{.ExitPolicy}}" is "$output" "stop" "custom exit policy" _ensure_pod_state $name Exited run_podman pod rm $name } # bats test_tags=ci:parallel @test "pod resource limits" { skip_if_remote "resource limits only implemented on non-remote" skip_if_rootless "resource limits only work with root" skip_if_cgroupsv1 "resource limits only meaningful on cgroups V2" # create loopback device lofile=${PODMAN_TMPDIR}/disk.img fallocate -l 1k ${lofile} LOOPDEVICE=$(losetup --show -f $lofile) # tr needed because losetup seems to use %2d lomajmin=$(losetup -l --noheadings --output MAJ:MIN $LOOPDEVICE | tr -d ' ') run grep -w bfq /sys/block/$(basename ${LOOPDEVICE})/queue/scheduler if [ $status -ne 0 ]; then losetup -d $LOOPDEVICE LOOPDEVICE= skip "BFQ scheduler is not supported on the system" fi echo bfq > /sys/block/$(basename ${LOOPDEVICE})/queue/scheduler # FIXME: #15464: blkio-weight-device not working expected_limits=" cpu.max | 500000 100000 memory.max | 5242880 memory.swap.max | 1068498944 io.bfq.weight | default 50 io.max | $lomajmin rbps=1048576 wbps=1048576 riops=max wiops=max " defer-assertion-failures for cgm in systemd cgroupfs; do local name="p-resources-$cgm-$(safename)" run_podman --cgroup-manager=$cgm pod create --name=$name --cpus=5 --memory=5m --memory-swap=1g --cpu-shares=1000 --cpuset-cpus=0 --cpuset-mems=0 --device-read-bps=${LOOPDEVICE}:1mb --device-write-bps=${LOOPDEVICE}:1mb --blkio-weight=50 run_podman --cgroup-manager=$cgm pod start $name run_podman pod inspect --format '{{.CgroupPath}}' $name local cgroup_path="$output" while read unit expect; do local actual=$(< /sys/fs/cgroup/$cgroup_path/$unit) is "$actual" "$expect" "resource limit under $cgm: $unit" done < <(parse_table "$expected_limits") run_podman --cgroup-manager=$cgm pod rm -f $name done # Clean up, and prevent duplicate cleanup in teardown losetup -d $LOOPDEVICE LOOPDEVICE= } # CANNOT BE PARALLELIZED: rm -a @test "podman pod ps doesn't race with pod rm" { # create a few pods for i in {0..10}; do run_podman pod create done # and delete them "${PODMAN_CMD[@]}" pod rm -a & # pod ps should not fail while pods are deleted run_podman pod ps -q # wait for pod rm -a wait } # CANNOT BE PARALLELIZED: naked ps @test "podman pod rm --force bogus" { run_podman 1 pod rm bogus is "$output" "Error: .*bogus.*: no such pod" "Should print error" run_podman pod rm -t -1 --force bogus is "$output" "" "Should print no output" run_podman pod create --name testpod run_podman pod rm --force bogus testpod assert "$output" =~ "[0-9a-f]{64}" "rm pod" run_podman pod ps -q assert "$output" = "" "no pods listed" } # bats test_tags=ci:parallel @test "podman pod create on failure" { podname="p-$(safename)" nwname="n-$(safename)" run_podman 125 pod create --network $nwname --name $podname # FIXME: podman and podman-remote do not return the same error message # but consistency would be nice is "$output" "Error: .*unable to find network with name or ID $nwname: network not found" # Make sure the pod doesn't get created on failure run_podman 1 pod exists $podname } # bats test_tags=ci:parallel @test "podman pod create restart tests" { podname="p-$(safename)" run_podman pod create --restart=on-failure --name $podname run_podman create --name test-ctr --pod $podname $IMAGE run_podman container inspect --format '{{ .HostConfig.RestartPolicy.Name }}' test-ctr is "$output" "on-failure" "container inherits from pod" run_podman create --replace --restart=always --name test-ctr --pod $podname $IMAGE run_podman container inspect --format '{{ .HostConfig.RestartPolicy.Name }}' test-ctr is "$output" "always" "container overrides restart policy from pod" run_podman pod rm -f $podname } # Helper used by pod ps --filter test. Creates one pod or container # with a UNIQUE two-character CID prefix. function thingy_with_unique_id() { local what="$1"; shift # pod or container local how="$1"; shift # e.g. "--name p1c1 --pod p1" while :;do local try_again= run_podman $what create $how # This is our return value; it propagates up to caller's namespace id="$output" # Make sure the first two characters aren't already used in an ID for existing_id in "$@"; do if [[ -z "$try_again" ]]; then if [[ "${existing_id:0:2}" == "${id:0:2}" ]]; then run_podman $what rm $id try_again=1 fi fi done if [[ -z "$try_again" ]]; then # Nope! groovy! caller gets $id return fi done } # bats test_tags=ci:parallel @test "podman pod ps --filter" { local -A podid local -A ctrid # Setup: create three pods, each with three containers, all of them with # unique (distinct) first two characters of their pod/container ID. for p in 1 2 3;do # no infra, please! That creates an extra container with a CID # that may collide with our other ones, and it's too hard to fix. podname="p-${p}-$(safename)" thingy_with_unique_id "pod" "--infra=false --name $podname" \ ${podid[*]} ${ctrid[*]} podid[$p]=$id for c in 1 2 3; do thingy_with_unique_id "container" \ "--pod $podname --name $podname-c${c} $IMAGE true" \ ${podid[*]} ${ctrid[*]} ctrid[$p$c]=$id done done # for debugging; without this, on test failure it's too hard to # associate IDs with names run_podman pod ps run_podman ps -a # Normally (sequential Bats) we can do equality checks on ps output, # because thingy_with_unique_id() guarantees that we won't have collisions # in the first two characters of the hash. When running in parallel, # there's no such guarantee. local op="=" if [[ -n "$PARALLEL_JOBSLOT" ]]; then op="=~" fi # Test: ps and filter for each pod and container, by ID defer-assertion-failures for p in 1 2 3; do local pid=${podid[$p]} local podname="p-$p-$(safename)" # Search by short pod ID, longer pod ID, pod ID regex, and pod name # ps by short ID, longer ID, regex, and name for filter in "id=${pid:0:2}" "id=${pid:0:10}" "id=^${pid:0:2}" "name=$podname"; do run_podman pod ps --filter=$filter --format '{{.Name}}:{{.Id}}' assert "$output" $op "$podname:${pid:0:12}" "pod $p, filter=$filter" done # ps by negation (regex) of our pid, should find all other pods f1="^[^${pid:0:1}]" f2="^.[^${pid:1:1}]" run_podman pod ps --filter=id="$f1" --filter=id="$f2" --format '{{.Name}}' assert "${#lines[*]}" -ge "2" "filter=$f1 + $f2 finds at least 2 pods" assert "$output" !~ "$podname" "filter=$f1 + $f2 does not find pod $p" # Confirm that the other two pods _are_ in our list for notp in 1 2 3; do if [[ $notp -ne $p ]]; then assert "$output" =~ "p-$notp-$(safename)" "filter=$f1 + $f2 finds pod $notp" fi done # Search by *container* ID for c in 1 2 3;do local cid=${ctrid[$p$c]} local podname="p-$p-$(safename)" for filter in "ctr-ids=${cid:0:2}" "ctr-ids=^${cid:0:2}.*"; do run_podman pod ps --filter=$filter --format '{{.Name}}:{{.Id}}' assert "$output" $op "$podname:${pid:0:12}" \ "pod $p, container $c, filter=$filter" done done done # Multiple filters, multiple pods run_podman pod ps --filter=ctr-ids=${ctrid[12]} \ --filter=ctr-ids=${ctrid[23]} \ --filter=ctr-ids=${ctrid[31]} \ --format='{{.Name}}' --sort=name assert "$(echo $output)" == "p-1-$(safename) p-2-$(safename) p-3-$(safename)" \ "multiple ctr-ids filters" # Clean up run_podman pod rm -f ${podid[*]} } # bats test_tags=ci:parallel @test "podman pod cleans cgroup and keeps limits" { skip_if_remote "we cannot check cgroup settings" skip_if_rootless_cgroupsv1 "rootless cannot use cgroups on v1" for infra in true false; do run_podman pod create --infra=$infra --memory=256M podid="$output" run_podman run -d --pod $podid $IMAGE top -d 2 run_podman pod inspect $podid --format "{{.CgroupPath}}" result="$output" assert "$result" =~ "/" ".CgroupPath is a valid path" if is_cgroupsv2; then cgroup_path=/sys/fs/cgroup/$result else cgroup_path=/sys/fs/cgroup/memory/$result fi if test ! -e $cgroup_path; then die "the cgroup $cgroup_path does not exist" fi run_podman pod stop -t 0 $podid if test -e $cgroup_path; then die "the cgroup $cgroup_path should not exist after pod stop" fi run_podman pod start $podid if test ! -e $cgroup_path; then die "the cgroup $cgroup_path does not exist" fi # validate that cgroup limits are in place after a restart # issue #19175 if is_cgroupsv2; then memory_limit_file=$cgroup_path/memory.max else memory_limit_file=$cgroup_path/memory.limit_in_bytes fi assert "$(< $memory_limit_file)" = "268435456" "Contents of $memory_limit_file" run_podman pod rm -t 0 -f $podid if test -e $cgroup_path; then die "the cgroup $cgroup_path should not exist after pod rm" fi done } # vim: filetype=sh