Merge pull request #16709 from vrothberg/fix-16515

kube sdnotify: run proxies for the lifespan of the service
This commit is contained in:
OpenShift Merge Robot
2022-12-07 18:10:31 -05:00
committed by GitHub
6 changed files with 216 additions and 114 deletions

View File

@@ -397,6 +397,10 @@ EOF
systemctl start $service_name
systemctl is-active $service_name
# Make sure that Podman is the service's MainPID
run systemctl show --property=MainPID --value $service_name
is "$(</proc/$output/comm)" "podman" "podman is the service mainPID"
# The name of the service container is predictable: the first 12 characters
# of the hash of the YAML file followed by the "-service" suffix
yaml_sha=$(sha256sum $yaml_source)
@@ -422,13 +426,13 @@ EOF
# container.
run_podman pod kill test_pod
for i in {0..5}; do
run systemctl is-failed $service_name
if [[ $output == "failed" ]]; then
run systemctl is-active $service_name
if [[ $output == "inactive" ]]; then
break
fi
sleep 0.5
done
is "$output" "failed" "systemd service transitioned to 'failed' state"
is "$output" "inactive" "systemd service transitioned to 'inactive' state: $service_name"
# Now stop and start the service again.
systemctl stop $service_name

View File

@@ -206,9 +206,10 @@ metadata:
app: test
name: test_pod
spec:
restartPolicy: "Never"
containers:
- command:
- top
- true
image: $IMAGE
name: test
resources: {}
@@ -219,26 +220,26 @@ EOF
yaml_sha=$(sha256sum $yaml_source)
service_container="${yaml_sha:0:12}-service"
export NOTIFY_SOCKET=$PODMAN_TMPDIR/conmon.sock
_start_socat
wait_for_file $_SOCAT_LOG
# Will run until all containers have stopped.
run_podman play kube --service-container=true $yaml_source
run_podman container wait $service_container test_pod-test
# Make sure the containers have the correct policy.
run_podman container inspect test_pod-test $service_container --format "{{.Config.SdNotifyMode}}"
is "$output" "ignore
ignore"
run_podman container inspect $service_container --format "{{.State.ConmonPid}}"
mainPID="$output"
wait_for_file $_SOCAT_LOG
# The 'echo's help us debug failed runs
run cat $_SOCAT_LOG
echo "socat log:"
echo "$output"
is "$output" "MAINPID=$mainPID
# The "with policies" test below checks the MAINPID.
is "$output" "MAINPID=.*
READY=1" "sdnotify sent MAINPID and READY"
_stop_socat
@@ -272,11 +273,12 @@ metadata:
io.containers.sdnotify: "container"
io.containers.sdnotify/b: "conmon"
spec:
restartPolicy: "Never"
containers:
- command:
- /bin/sh
- -c
- 'printenv NOTIFY_SOCKET; echo READY; while ! test -f /stop;do sleep 0.1;done;systemd-notify --ready'
- 'printenv NOTIFY_SOCKET; while ! test -f /stop;do sleep 0.1;done'
image: $_FEDORA
name: a
- command:
@@ -332,22 +334,28 @@ ignore"
run_podman logs $container_a
is "${lines[0]}" "/run/notify/notify.sock" "NOTIFY_SOCKET is passed to container"
# Instruct the container to send the READY
# Send the READY message. Doing it in an exec session helps debug
# potential issues.
run_podman exec --env NOTIFY_SOCKET="/run/notify/notify.sock" $container_a /usr/bin/systemd-notify --ready
# Instruct the container to stop
run_podman exec $container_a /bin/touch /stop
run_podman container inspect $service_container --format "{{.State.ConmonPid}}"
main_pid="$output"
run_podman container wait $container_a
run_podman container inspect $container_a --format "{{.State.ExitCode}}"
is "$output" "0" "container exited cleanly after sending READY message"
wait_for_file $_SOCAT_LOG
# The 'echo's help us debug failed runs
run cat $_SOCAT_LOG
echo "socat log:"
echo "$output"
is "$output" "MAINPID=$main_pid
is "$output" "MAINPID=.*
READY=1" "sdnotify sent MAINPID and READY"
# Make sure that Podman is the service's MainPID
main_pid=$(awk -F= '{print $2}' <<< ${lines[0]})
is "$(</proc/$main_pid/comm)" "podman" "podman is the service mainPID"
_stop_socat
# Clean up pod and pause image

View File

@@ -123,7 +123,25 @@ spec:
name: test
resources: {}
EOF
run_podman play kube --service-container=true $yaml_source
# Run `play kube` in the background as it will wait for the service
# container to exit.
timeout --foreground -v --kill=10 60 \
$PODMAN play kube --service-container=true $yaml_source &>/dev/null &
# Wait for the container to be running
container_a=test_pod-test
for i in $(seq 1 20); do
run_podman "?" container wait $container_a --condition="running"
if [[ $status == 0 ]]; then
break
fi
sleep 0.5
# Just for debugging
run_podman ps -a
done
if [[ $status != 0 ]]; then
die "container $container_a did not start"
fi
# The name of the service container is predictable: the first 12 characters
# of the hash of the YAML file followed by the "-service" suffix