kube play: support auto updates and rollbacks

Add auto-update support to `podman kube play`.  Auto-update policies can
be configured for:
 * the entire pod via the `io.containers.autoupdate` annotation
 * a specific container via the `io.containers.autoupdate/$name` annotation

To make use of rollbacks, the `io.containers.sdnotify` policy should be
set to `container` such that the workload running _inside_ the container
can send the READY message via the NOTIFY_SOCKET once ready.  For
further details on auto updates and rollbacks, please refer to the
specific article [1].

Since auto updates and rollbacks bases on Podman's systemd integration,
the k8s YAML must be executed in the `podman-kube@` systemd template.
For further details on how to run k8s YAML in systemd via Podman, please
refer to the specific article [2].

An examplary k8s YAML may look as follows:
```YAML
apiVersion: v1
kind: Pod
metadata:
  annotations:
      io.containers.autoupdate: "local"
      io.containers.autoupdate/b: "registry"
  labels:
    app: test
  name: test_pod
spec:
  containers:
  - command:
    - top
    image: alpine
    name: a
  - command:
    - top
    image: alpine
    name: b
```

[1] https://www.redhat.com/sysadmin/podman-auto-updates-rollbacks
[2] https://www.redhat.com/sysadmin/kubernetes-workloads-podman-systemd

Signed-off-by: Valentin Rothberg <vrothberg@redhat.com>
This commit is contained in:
Valentin Rothberg
2022-08-30 11:17:25 +02:00
parent bdfc4df1f2
commit 274d34a25a
9 changed files with 240 additions and 43 deletions

View File

@@ -301,24 +301,16 @@ LISTEN_FDNAMES=listen_fdnames" | sort)
}
@test "podman-kube@.service template" {
# If running from a podman source directory, build and use the source
# version of the play-kube-@ unit file
unit_name="podman-kube@.service"
unit_file="contrib/systemd/system/${unit_name}"
if [[ -e ${unit_file}.in ]]; then
echo "# [Building & using $unit_name from source]" >&3
# Force regenerating unit file (existing one may have /usr/bin path)
rm -f $unit_file
BINDIR=$(dirname $PODMAN) make $unit_file
cp $unit_file $UNIT_DIR/$unit_name
fi
install_kube_template
# Create the YAMl file
yaml_source="$PODMAN_TMPDIR/test.yaml"
cat >$yaml_source <<EOF
apiVersion: v1
kind: Pod
metadata:
annotations:
io.containers.autoupdate: "local"
io.containers.autoupdate/b: "registry"
labels:
app: test
name: test_pod
@@ -327,8 +319,11 @@ spec:
- command:
- top
image: $IMAGE
name: test
resources: {}
name: a
- command:
- top
image: $IMAGE
name: b
EOF
# Dispatch the YAML file
@@ -349,6 +344,12 @@ EOF
run_podman 125 container rm $service_container
is "$output" "Error: container .* is the service container of pod(s) .* and cannot be removed without removing the pod(s)"
# Add a simple `auto-update --dry-run` test here to avoid too much redundancy
# with 255-auto-update.bats
run_podman auto-update --dry-run --format "{{.Unit}},{{.Container}},{{.Image}},{{.Updated}},{{.Policy}}"
is "$output" ".*$service_name,.* (test_pod-a),$IMAGE,false,local.*" "global auto-update policy gets applied"
is "$output" ".*$service_name,.* (test_pod-b),$IMAGE,false,registry.*" "container-specified auto-update policy gets applied"
# Kill the pod and make sure the service is not running.
# The restart policy is set to "never" since there is no
# design yet for propagating exit codes up to the service

View File

@@ -266,8 +266,6 @@ EOF
# Generate a healthy image that will run correctly.
run_podman build -t quay.io/libpod/$image -f $dockerfile1
podman image inspect --format "{{.ID}}" $image
oldID="$output"
generate_service $image local /runme --sdnotify=container noTag
_wait_service_ready container-$cname.service
@@ -277,7 +275,7 @@ EOF
# Generate an unhealthy image that will fail.
run_podman build -t quay.io/libpod/$image -f $dockerfile2
podman image inspect --format "{{.ID}}" $image
run_podman image inspect --format "{{.ID}}" $image
newID="$output"
run_podman auto-update --dry-run --format "{{.Unit}},{{.Image}},{{.Updated}},{{.Policy}}"
@@ -409,4 +407,97 @@ EOF
_confirm_update $cname $ori_image
}
@test "podman-kube@.service template with rollback" {
# sdnotify fails with runc 1.0.0-3-dev2 on Ubuntu. Let's just
# assume that we work only with crun, nothing else.
# [copied from 260-sdnotify.bats]
runtime=$(podman_runtime)
if [[ "$runtime" != "crun" ]]; then
skip "this test only works with crun, not $runtime"
fi
install_kube_template
dockerfile1=$PODMAN_TMPDIR/Dockerfile.1
cat >$dockerfile1 <<EOF
FROM quay.io/libpod/fedora:31
RUN echo -e "#!/bin/sh\n\
printenv NOTIFY_SOCKET; echo READY; systemd-notify --ready;\n\
trap 'echo Received SIGTERM, finishing; exit' SIGTERM; echo WAITING; while :; do sleep 0.1; done" \
>> /runme
RUN chmod +x /runme
EOF
dockerfile2=$PODMAN_TMPDIR/Dockerfile.2
cat >$dockerfile2 <<EOF
FROM quay.io/libpod/fedora:31
RUN echo -e "#!/bin/sh\n\
exit 1" >> /runme
RUN chmod +x /runme
EOF
local_image=localhost/image:$(random_string 10)
# Generate a healthy image that will run correctly.
run_podman build -t $local_image -f $dockerfile1
run_podman image inspect --format "{{.ID}}" $local_image
oldID="$output"
# Create the YAMl file
yaml_source="$PODMAN_TMPDIR/test.yaml"
cat >$yaml_source <<EOF
apiVersion: v1
kind: Pod
metadata:
annotations:
io.containers.autoupdate: "registry"
io.containers.autoupdate/b: "local"
io.containers.sdnotify/b: "container"
labels:
app: test
name: test_pod
spec:
containers:
- command:
- top
image: $IMAGE
name: a
- command:
- /runme
image: $local_image
name: b
EOF
# Dispatch the YAML file
service_name="podman-kube@$(systemd-escape $yaml_source).service"
systemctl start $service_name
systemctl is-active $service_name
# Make sure the containers are properly configured
run_podman auto-update --dry-run --format "{{.Unit}},{{.Container}},{{.Image}},{{.Updated}},{{.Policy}}"
is "$output" ".*$service_name,.* (test_pod-a),$IMAGE,false,registry.*" "global auto-update policy gets applied"
is "$output" ".*$service_name,.* (test_pod-b),$local_image,false,local.*" "container-specified auto-update policy gets applied"
# Generate a broken image that will fail.
run_podman build -t $local_image -f $dockerfile2
run_podman image inspect --format "{{.ID}}" $local_image
newID="$output"
assert "$oldID" != "$newID" "broken image really is a new one"
# Make sure container b sees the new image
run_podman auto-update --dry-run --format "{{.Unit}},{{.Container}},{{.Image}},{{.Updated}},{{.Policy}}"
is "$output" ".*$service_name,.* (test_pod-a),$IMAGE,false,registry.*" "global auto-update policy gets applied"
is "$output" ".*$service_name,.* (test_pod-b),$local_image,pending,local.*" "container b sees the new image"
# Now update and check for the rollback
run_podman auto-update --format "{{.Unit}},{{.Container}},{{.Image}},{{.Updated}},{{.Policy}}"
is "$output" ".*$service_name,.* (test_pod-a),$IMAGE,rolled back,registry.*" "container a was rolled back as the update of b failed"
is "$output" ".*$service_name,.* (test_pod-b),$local_image,rolled back,local.*" "container b was rolled back as its update has failed"
# Clean up
systemctl stop $service_name
run_podman rmi -f $(pause_image) $local_image $newID $oldID
rm -f $UNIT_DIR/$unit_name
}
# vim: filetype=sh

View File

@@ -32,3 +32,17 @@ journalctl() {
systemd-run() {
command systemd-run $_DASHUSER "$@";
}
install_kube_template() {
# If running from a podman source directory, build and use the source
# version of the play-kube-@ unit file
unit_name="podman-kube@.service"
unit_file="contrib/systemd/system/${unit_name}"
if [[ -e ${unit_file}.in ]]; then
echo "# [Building & using $unit_name from source]" >&3
# Force regenerating unit file (existing one may have /usr/bin path)
rm -f $unit_file
BINDIR=$(dirname $PODMAN) make $unit_file
cp $unit_file $UNIT_DIR/$unit_name
fi
}