mirror of
https://github.com/containers/podman.git
synced 2025-05-21 00:56:36 +08:00
podman generate kube - add actual tests
This exposed a nasty bug in our system-test setup: Ubuntu (runc) was writing a scratch containers.conf file, and setting CONTAINERS_CONF to point to it. This was well-intentionedly introduced in #10199 as part of our long sad history of not testing runc. What I did not understand at that time is that CONTAINERS_CONF is **dangerous**: it does not mean "I will read standard containers.conf and then override", it means "I will **IGNORE** standard containers.conf and use only the settings in this file"! So on Ubuntu we were losing all the default settings: capabilities, sysctls, all. Yes, this is documented in containers.conf(5) but it is such a huge violation of POLA that I need to repeat it. In #14972, as yet another attempt to fix our runc crisis, I introduced a new runc-override mechanism: create a custom /etc/containers/containers.conf when OCI_RUNTIME=runc. Unlike the CONTAINERS_CONF envariable, the /etc file actually means what you think it means: "read the default file first, then override with the /etc file contents". I.e., we get the desired defaults. But I didn't remember this helpers.bash workaround, so our runc testing has actually been flawed: we have not been testing with the system containers.conf. This commit removes the no-longer-needed and never-actually-wanted workaround, and by virtue of testing the cap-drops in kube generate, we add a regression test to make sure this never happens again. It's a little scary that we haven't been testing capabilities. Also scary: this PR requires python, for converting yaml to json. I think that should be safe: python3 'import yaml' and 'json' works fine on a RHEL8.7 VM from 1minutetip. Signed-off-by: Ed Santiago <santiago@redhat.com>
This commit is contained in:
@ -5,11 +5,167 @@
|
||||
|
||||
load helpers
|
||||
|
||||
@test "podman kube generate - basic" {
|
||||
# standard capability drop list
|
||||
capabilities='{"drop":["CAP_MKNOD","CAP_NET_RAW","CAP_AUDIT_WRITE"]}'
|
||||
|
||||
# Warning that is emitted once on containers, multiple times on pods
|
||||
kubernetes_63='Truncation Annotation: .* Kubernetes only allows 63 characters'
|
||||
|
||||
# filter: convert yaml to json, because bash+yaml=madness
|
||||
function yaml2json() {
|
||||
egrep -v "$kubernetes_63" | python3 -c 'import yaml
|
||||
import json
|
||||
import sys
|
||||
json.dump(yaml.safe_load(sys.stdin), sys.stdout)'
|
||||
}
|
||||
|
||||
###############################################################################
|
||||
# BEGIN tests
|
||||
|
||||
@test "podman kube generate - usage message" {
|
||||
run_podman kube generate --help
|
||||
is "$output" ".*podman.* kube generate \[options\] {CONTAINER...|POD...|VOLUME...}"
|
||||
run_podman generate kube --help
|
||||
is "$output" ".*podman.* generate kube \[options\] {CONTAINER...|POD...|VOLUME...}"
|
||||
}
|
||||
|
||||
@test "podman kube generate - container" {
|
||||
cname=c$(random_string 15)
|
||||
run_podman container create --name $cname $IMAGE top
|
||||
run_podman kube generate $cname
|
||||
|
||||
# Convert yaml to json, and dump to stdout (to help in case of errors)
|
||||
json=$(yaml2json <<<"$output")
|
||||
jq . <<<"$json"
|
||||
|
||||
# What we expect to see. This is by necessity an incomplete list.
|
||||
# For instance, it does not include org.opencontainers.image.base.*
|
||||
# because sometimes we get that, sometimes we don't. No clue why.
|
||||
#
|
||||
# And, unfortunately, if new fields are added to the YAML, we won't
|
||||
# test those unless a developer remembers to add them here.
|
||||
#
|
||||
# Reasons for doing it this way, instead of straight-comparing yaml:
|
||||
# 1) the arbitrariness of the org.opencontainers.image.base annotations
|
||||
# 2) YAML order is nondeterministic, so on a pod with two containers
|
||||
# (as in the pod test below) we cannot rely on cname1/cname2.
|
||||
expect="
|
||||
apiVersion | = | v1
|
||||
kind | = | Pod
|
||||
|
||||
metadata.annotations.\"io.kubernetes.cri-o.TTY/$cname\" | = | false
|
||||
metadata.annotations.\"io.podman.annotations.autoremove/$cname\" | = | FALSE
|
||||
metadata.annotations.\"io.podman.annotations.init/$cname\" | = | FALSE
|
||||
metadata.annotations.\"io.podman.annotations.privileged/$cname\" | = | FALSE
|
||||
metadata.annotations.\"io.podman.annotations.publish-all/$cname\" | = | FALSE
|
||||
|
||||
metadata.creationTimestamp | =~ | [0-9T:-]\\+Z
|
||||
metadata.labels.app | = | ${cname}-pod
|
||||
metadata.name | = | ${cname}-pod
|
||||
|
||||
spec.containers[0].command | = | [\"top\"]
|
||||
spec.containers[0].image | = | $IMAGE
|
||||
spec.containers[0].name | = | $cname
|
||||
|
||||
spec.containers[0].securityContext.capabilities | = | $capabilities
|
||||
|
||||
status | = | null
|
||||
"
|
||||
|
||||
# Parse and check all those
|
||||
while read key op expect; do
|
||||
actual=$(jq -r -c ".$key" <<<"$json")
|
||||
assert "$actual" $op "$expect" ".$key"
|
||||
done < <(parse_table "$expect")
|
||||
|
||||
if ! is_remote; then
|
||||
count=$(egrep -c "$kubernetes_63" <<<"$output")
|
||||
assert "$count" = 1 "1 instance of the Kubernetes-63-char warning"
|
||||
fi
|
||||
|
||||
run_podman rm $cname
|
||||
}
|
||||
|
||||
@test "podman kube generate - pod" {
|
||||
local pname=p$(random_string 15)
|
||||
local cname1=c1$(random_string 15)
|
||||
local cname2=c2$(random_string 15)
|
||||
|
||||
run_podman pod create --name $pname --publish 9999:8888
|
||||
|
||||
# Needs at least one container. Error is slightly different between
|
||||
# regular and remote podman:
|
||||
# regular: Error: pod ... only has...
|
||||
# remote: Error: error generating YAML: pod ... only has...
|
||||
run_podman 125 kube generate $pname
|
||||
assert "$output" =~ "Error: .* only has an infra container"
|
||||
|
||||
run_podman container create --name $cname1 --pod $pname $IMAGE top
|
||||
run_podman container create --name $cname2 --pod $pname $IMAGE bottom
|
||||
run_podman kube generate $pname
|
||||
|
||||
json=$(yaml2json <<<"$output")
|
||||
jq . <<<"$json"
|
||||
|
||||
# See container test above for description of this table
|
||||
expect="
|
||||
apiVersion | = | v1
|
||||
kind | = | Pod
|
||||
|
||||
metadata.annotations.\"io.kubernetes.cri-o.ContainerType/$cname1\" | = | container
|
||||
metadata.annotations.\"io.kubernetes.cri-o.ContainerType/$cname2\" | = | container
|
||||
metadata.annotations.\"io.kubernetes.cri-o.SandboxID/$cname1\" | =~ | [0-9a-f]\\{56\\}
|
||||
metadata.annotations.\"io.kubernetes.cri-o.SandboxID/$cname2\" | =~ | [0-9a-f]\\{56\\}
|
||||
metadata.annotations.\"io.kubernetes.cri-o.TTY/$cname1\" | = | false
|
||||
metadata.annotations.\"io.kubernetes.cri-o.TTY/$cname2\" | = | false
|
||||
metadata.annotations.\"io.podman.annotations.autoremove/$cname1\" | = | FALSE
|
||||
metadata.annotations.\"io.podman.annotations.autoremove/$cname2\" | = | FALSE
|
||||
metadata.annotations.\"io.podman.annotations.init/$cname1\" | = | FALSE
|
||||
metadata.annotations.\"io.podman.annotations.init/$cname2\" | = | FALSE
|
||||
metadata.annotations.\"io.podman.annotations.privileged/$cname1\" | = | FALSE
|
||||
metadata.annotations.\"io.podman.annotations.privileged/$cname2\" | = | FALSE
|
||||
metadata.annotations.\"io.podman.annotations.publish-all/$cname1\" | = | FALSE
|
||||
metadata.annotations.\"io.podman.annotations.publish-all/$cname2\" | = | FALSE
|
||||
|
||||
metadata.creationTimestamp | =~ | [0-9T:-]\\+Z
|
||||
metadata.labels.app | = | ${pname}
|
||||
metadata.name | = | ${pname}
|
||||
|
||||
spec.hostname | = | $pname
|
||||
spec.restartPolicy | = | Never
|
||||
|
||||
spec.containers[0].command | = | [\"top\"]
|
||||
spec.containers[0].image | = | $IMAGE
|
||||
spec.containers[0].name | = | $cname1
|
||||
spec.containers[0].ports[0].containerPort | = | 8888
|
||||
spec.containers[0].ports[0].hostPort | = | 9999
|
||||
spec.containers[0].resources | = | {}
|
||||
|
||||
spec.containers[1].command | = | [\"bottom\"]
|
||||
spec.containers[1].image | = | $IMAGE
|
||||
spec.containers[1].name | = | $cname2
|
||||
spec.containers[1].ports | = | null
|
||||
spec.containers[1].resources | = | {}
|
||||
|
||||
spec.containers[0].securityContext.capabilities | = | $capabilities
|
||||
|
||||
status | = | {}
|
||||
"
|
||||
|
||||
while read key op expect; do
|
||||
actual=$(jq -r -c ".$key" <<<"$json")
|
||||
assert "$actual" $op "$expect" ".$key"
|
||||
done < <(parse_table "$expect")
|
||||
|
||||
# Why 4? Maybe two for each container?
|
||||
if ! is_remote; then
|
||||
count=$(egrep -c "$kubernetes_63" <<<"$output")
|
||||
assert "$count" = 4 "instances of the Kubernetes-63-char warning"
|
||||
fi
|
||||
|
||||
run_podman rm $cname1 $cname2
|
||||
run_podman pod rm $pname
|
||||
run_podman rmi $(pause_image)
|
||||
}
|
||||
|
||||
# vim: filetype=sh
|
||||
|
@ -36,20 +36,6 @@ fi
|
||||
# That way individual tests can override with their own setup/teardown,
|
||||
# while retaining the ability to include these if they so desire.
|
||||
|
||||
# Some CI systems set this to runc, overriding the default crun.
|
||||
if [[ -n $OCI_RUNTIME ]]; then
|
||||
if [[ -z $CONTAINERS_CONF ]]; then
|
||||
# FIXME: BATS provides no mechanism for end-of-run cleanup[1]; how
|
||||
# can we avoid leaving this file behind when we finish?
|
||||
# [1] https://github.com/bats-core/bats-core/issues/39
|
||||
export CONTAINERS_CONF=$(mktemp --tmpdir=${BATS_TMPDIR:-/tmp} podman-bats-XXXXXXX.containers.conf)
|
||||
cat >$CONTAINERS_CONF <<EOF
|
||||
[engine]
|
||||
runtime="$OCI_RUNTIME"
|
||||
EOF
|
||||
fi
|
||||
fi
|
||||
|
||||
# Setup helper: establish a test environment with exactly the images needed
|
||||
function basic_setup() {
|
||||
# Clean up all containers
|
||||
|
Reference in New Issue
Block a user