auto-update: simple rollback

Add support for simple rollbacks during `podman auto-update`.  Rollbacks
are enabled by default.  If a systemd unit cannot be restarted after an
update, the previous image will be retagged and the unit will be
restarted a second time.

Add system tests for rollbacks.  Also fix a bug in the restart sequence;
we have to use the channel to actually know whether the restart was
successful or not.

NOTE: To make rollbacks really useful, users must run their containers
with `--sdnotify=container` such that the containers send the ready
message over the (mounted) socket.  This way, restarting the systemd
units during auto update will block until the message has been received
(or a timeout kicked in).

Signed-off-by: Valentin Rothberg <rothberg@redhat.com>
This commit is contained in:
Valentin Rothberg
2021-07-28 15:19:04 +02:00
parent 117583c293
commit 30df551bde
8 changed files with 249 additions and 44 deletions

View File

@ -54,6 +54,7 @@ func init() {
_ = autoUpdateCommand.RegisterFlagCompletionFunc(authfileFlagName, completion.AutocompleteDefault)
flags.BoolVar(&autoUpdateOptions.DryRun, "dry-run", false, "Check for pending updates")
flags.BoolVar(&autoUpdateOptions.Rollback, "rollback", true, "Rollback to previous image if update fails")
flags.StringVar(&autoUpdateOptions.format, "format", "", "Change the output format to JSON or a Go template")
_ = autoUpdateCommand.RegisterFlagCompletionFunc("format", common.AutocompleteFormat(autoUpdateOutput{}))

View File

@ -51,6 +51,15 @@ The `UPDATED` field indicates the availability of a new image with "pending".
Change the default output format. This can be of a supported type like 'json' or a Go template.
Valid placeholders for the Go template are listed below:
#### **--rollback**=*true|false*
If restarting a systemd unit after updating the image has failed, rollback to using the previous image and restart the unit another time. Default is true.
Please note that detecting if a systemd unit has failed is best done by the container sending the READY message via SDNOTIFY. This way, restarting the unit will wait until having received the message or a timeout kicked in. Without that, restarting the systemd unit may succeed even if the container has failed shortly after.
For a container to send the READY message via SDNOTIFY it must be created with the `--sdnotify=container` option (see podman-run(1)). The application running inside the container can then execute `systemd-notify --ready` when ready or use the sdnotify bindings of the specific programming language (e.g., sd_notify(3)).
| **Placeholder** | **Description** |
| --------------- | -------------------------------------- |
| .Unit | Name of the systemd unit |
@ -132,4 +141,4 @@ $ podman auto-update
```
## SEE ALSO
**[podman(1)](podman.1.md)**, **[podman-generate-systemd(1)](podman-generate-systemd.1.md)**, **[podman-run(1)](podman-run.1.md)**, systemd.unit(5)
**[podman(1)](podman.1.md)**, **[podman-generate-systemd(1)](podman-generate-systemd.1.md)**, **[podman-run(1)](podman-run.1.md)**, sd_notify(3), systemd.unit(5)

View File

@ -88,7 +88,7 @@ func ValidateImageReference(imageName string) error {
} else if err != nil {
repo, err := reference.Parse(imageName)
if err != nil {
return errors.Wrap(err, "error enforcing fully-qualified docker transport reference for auto updates")
return errors.Wrap(err, "enforcing fully-qualified docker transport reference for auto updates")
}
if _, ok := repo.(reference.NamedTagged); !ok {
return errors.Errorf("auto updates require fully-qualified image references (no tag): %q", imageName)
@ -181,13 +181,13 @@ func autoUpdateRegistry(ctx context.Context, image *libimage.Image, ctr *libpod.
cid := ctr.ID()
rawImageName := ctr.RawImageName()
if rawImageName == "" {
return nil, errors.Errorf("error registry auto-updating container %q: raw-image name is empty", cid)
return nil, errors.Errorf("registry auto-updating container %q: raw-image name is empty", cid)
}
labels := ctr.Labels()
unit, exists := labels[systemdDefine.EnvVariable]
if !exists {
return nil, errors.Errorf("error auto-updating container %q: no %s label found", ctr.ID(), systemdDefine.EnvVariable)
return nil, errors.Errorf("auto-updating container %q: no %s label found", ctr.ID(), systemdDefine.EnvVariable)
}
report := &entities.AutoUpdateReport{
@ -201,7 +201,7 @@ func autoUpdateRegistry(ctx context.Context, image *libimage.Image, ctr *libpod.
if _, updated := updatedRawImages[rawImageName]; updated {
logrus.Infof("Auto-updating container %q using registry image %q", cid, rawImageName)
if err := restartSystemdUnit(ctr, unit, conn); err != nil {
if err := restartSystemdUnit(ctx, ctr, unit, conn); err != nil {
return report, err
}
report.Updated = "true"
@ -211,7 +211,7 @@ func autoUpdateRegistry(ctx context.Context, image *libimage.Image, ctr *libpod.
authfile := getAuthfilePath(ctr, options)
needsUpdate, err := newerRemoteImageAvailable(ctx, runtime, image, rawImageName, authfile)
if err != nil {
return report, errors.Wrapf(err, "error registry auto-updating container %q: image check for %q failed", cid, rawImageName)
return report, errors.Wrapf(err, "registry auto-updating container %q: image check for %q failed", cid, rawImageName)
}
if !needsUpdate {
@ -225,16 +225,30 @@ func autoUpdateRegistry(ctx context.Context, image *libimage.Image, ctr *libpod.
}
if _, err := updateImage(ctx, runtime, rawImageName, options); err != nil {
return report, errors.Wrapf(err, "error registry auto-updating container %q: image update for %q failed", cid, rawImageName)
return report, errors.Wrapf(err, "registry auto-updating container %q: image update for %q failed", cid, rawImageName)
}
updatedRawImages[rawImageName] = true
logrus.Infof("Auto-updating container %q using registry image %q", cid, rawImageName)
if err := restartSystemdUnit(ctr, unit, conn); err != nil {
return report, err
updateErr := restartSystemdUnit(ctx, ctr, unit, conn)
if updateErr == nil {
report.Updated = "true"
return report, nil
}
report.Updated = "true"
if !options.Rollback {
return report, updateErr
}
// To fallback, simply retag the old image and restart the service.
if err := image.Tag(rawImageName); err != nil {
return report, errors.Wrap(err, "falling back to previous image")
}
if err := restartSystemdUnit(ctx, ctr, unit, conn); err != nil {
return report, errors.Wrap(err, "restarting unit with old image during fallback")
}
report.Updated = "rolled back"
return report, nil
}
@ -243,13 +257,13 @@ func autoUpdateLocally(ctx context.Context, image *libimage.Image, ctr *libpod.C
cid := ctr.ID()
rawImageName := ctr.RawImageName()
if rawImageName == "" {
return nil, errors.Errorf("error locally auto-updating container %q: raw-image name is empty", cid)
return nil, errors.Errorf("locally auto-updating container %q: raw-image name is empty", cid)
}
labels := ctr.Labels()
unit, exists := labels[systemdDefine.EnvVariable]
if !exists {
return nil, errors.Errorf("error auto-updating container %q: no %s label found", ctr.ID(), systemdDefine.EnvVariable)
return nil, errors.Errorf("auto-updating container %q: no %s label found", ctr.ID(), systemdDefine.EnvVariable)
}
report := &entities.AutoUpdateReport{
@ -263,7 +277,7 @@ func autoUpdateLocally(ctx context.Context, image *libimage.Image, ctr *libpod.C
needsUpdate, err := newerLocalImageAvailable(runtime, image, rawImageName)
if err != nil {
return report, errors.Wrapf(err, "error locally auto-updating container %q: image check for %q failed", cid, rawImageName)
return report, errors.Wrapf(err, "locally auto-updating container %q: image check for %q failed", cid, rawImageName)
}
if !needsUpdate {
@ -277,23 +291,47 @@ func autoUpdateLocally(ctx context.Context, image *libimage.Image, ctr *libpod.C
}
logrus.Infof("Auto-updating container %q using local image %q", cid, rawImageName)
if err := restartSystemdUnit(ctr, unit, conn); err != nil {
return report, err
updateErr := restartSystemdUnit(ctx, ctr, unit, conn)
if updateErr == nil {
report.Updated = "true"
return report, nil
}
report.Updated = "true"
if !options.Rollback {
return report, updateErr
}
// To fallback, simply retag the old image and restart the service.
if err := image.Tag(rawImageName); err != nil {
return report, errors.Wrap(err, "falling back to previous image")
}
if err := restartSystemdUnit(ctx, ctr, unit, conn); err != nil {
return report, errors.Wrap(err, "restarting unit with old image during fallback")
}
report.Updated = "rolled back"
return report, nil
}
// restartSystemdUnit restarts the systemd unit the container is running in.
func restartSystemdUnit(ctr *libpod.Container, unit string, conn *dbus.Conn) error {
_, err := conn.RestartUnit(unit, "replace", nil)
if err != nil {
return errors.Wrapf(err, "error auto-updating container %q: restarting systemd unit %q failed", ctr.ID(), unit)
func restartSystemdUnit(ctx context.Context, ctr *libpod.Container, unit string, conn *dbus.Conn) error {
restartChan := make(chan string)
if _, err := conn.RestartUnitContext(ctx, unit, "replace", restartChan); err != nil {
return errors.Wrapf(err, "auto-updating container %q: restarting systemd unit %q failed", ctr.ID(), unit)
}
logrus.Infof("Successfully restarted systemd unit %q of container %q", unit, ctr.ID())
return nil
// Wait for the restart to finish and actually check if it was
// successful or not.
result := <-restartChan
switch result {
case "done":
logrus.Infof("Successfully restarted systemd unit %q of container %q", unit, ctr.ID())
return nil
default:
return errors.Errorf("auto-updating container %q: restarting systemd unit %q failed: expected %q but received %q", ctr.ID(), unit, "done", result)
}
}
// imageContainersMap generates a map[image ID] -> [containers using the image]

View File

@ -8,6 +8,9 @@ type AutoUpdateOptions struct {
// pending, it will be indicated in the Updated field of
// AutoUpdateReport.
DryRun bool
// If restarting the service with the new image failed, restart it
// another time with the previous image.
Rollback bool
}
// AutoUpdateReport contains the results from running auto-update.

View File

@ -74,8 +74,7 @@ func filterCommonContainerFlags(command []string, argCount int) []string {
case s == "--sdnotify", s == "--cgroups":
i++
continue
case strings.HasPrefix(s, "--sdnotify="),
strings.HasPrefix(s, "--rm="),
case strings.HasPrefix(s, "--rm="),
strings.HasPrefix(s, "--cgroups="):
continue
}

View File

@ -258,7 +258,6 @@ func executeContainerTemplate(info *containerInfo, options entities.GenerateSyst
}
startCommand = append(startCommand,
"run",
"--sdnotify=conmon",
"--cgroups=no-conmon",
"--rm",
)
@ -273,6 +272,7 @@ func executeContainerTemplate(info *containerInfo, options entities.GenerateSyst
fs.String("name", "", "")
fs.Bool("replace", false, "")
fs.StringArrayP("env", "e", nil, "")
fs.String("sdnotify", "", "")
fs.Parse(remainingCmd)
remainingCmd = filterCommonContainerFlags(remainingCmd, fs.NArg())
@ -294,6 +294,13 @@ func executeContainerTemplate(info *containerInfo, options entities.GenerateSyst
return "", err
}
// Default to --sdnotify=conmon unless already set by the
// container.
hasSdnotifyParam := fs.Lookup("sdnotify").Changed
if !hasSdnotifyParam {
startCommand = append(startCommand, "--sdnotify=conmon")
}
if !hasDetachParam {
// Enforce detaching
//

View File

@ -130,7 +130,29 @@ RequiresMountsFor=/var/run/containers/storage
Environment=PODMAN_SYSTEMD_UNIT=%n
Restart=always
TimeoutStopSec=70
ExecStart=/usr/bin/podman container run --sdnotify=conmon --cgroups=no-conmon --rm -d --replace --name jadda-jadda --hostname hello-world awesome-image:latest command arg1 ... argN "foo=arg \"with \" space"
ExecStart=/usr/bin/podman container run --cgroups=no-conmon --rm --sdnotify=conmon -d --replace --name jadda-jadda --hostname hello-world awesome-image:latest command arg1 ... argN "foo=arg \"with \" space"
Type=notify
NotifyAccess=all
[Install]
WantedBy=multi-user.target default.target
`
goodWithNameAndSdnotify := `# jadda-jadda.service
# autogenerated by Podman CI
[Unit]
Description=Podman jadda-jadda.service
Documentation=man:podman-generate-systemd(1)
Wants=network-online.target
After=network-online.target
RequiresMountsFor=/var/run/containers/storage
[Service]
Environment=PODMAN_SYSTEMD_UNIT=%n
Restart=always
TimeoutStopSec=70
ExecStart=/usr/bin/podman container run --cgroups=no-conmon --rm -d --replace --sdnotify=container --name jadda-jadda --hostname hello-world awesome-image:latest command arg1 ... argN "foo=arg \"with \" space"
Type=notify
NotifyAccess=all
@ -152,7 +174,7 @@ RequiresMountsFor=/var/run/containers/storage
Environment=PODMAN_SYSTEMD_UNIT=%n
Restart=always
TimeoutStopSec=70
ExecStart=/usr/bin/podman run --sdnotify=conmon --cgroups=no-conmon --rm --replace -d --name jadda-jadda --hostname hello-world awesome-image:latest command arg1 ... argN
ExecStart=/usr/bin/podman run --cgroups=no-conmon --rm --sdnotify=conmon --replace -d --name jadda-jadda --hostname hello-world awesome-image:latest command arg1 ... argN
Type=notify
NotifyAccess=all
@ -174,7 +196,7 @@ RequiresMountsFor=/var/run/containers/storage
Environment=PODMAN_SYSTEMD_UNIT=%n
Restart=always
TimeoutStopSec=70
ExecStart=/usr/bin/podman run --sdnotify=conmon --cgroups=no-conmon --rm --pod-id-file %t/pod-foobar.pod-id-file --replace -d --name jadda-jadda --hostname hello-world awesome-image:latest command arg1 ... argN
ExecStart=/usr/bin/podman run --cgroups=no-conmon --rm --pod-id-file %t/pod-foobar.pod-id-file --sdnotify=conmon --replace -d --name jadda-jadda --hostname hello-world awesome-image:latest command arg1 ... argN
Type=notify
NotifyAccess=all
@ -196,7 +218,7 @@ RequiresMountsFor=/var/run/containers/storage
Environment=PODMAN_SYSTEMD_UNIT=%n
Restart=always
TimeoutStopSec=70
ExecStart=/usr/bin/podman run --sdnotify=conmon --cgroups=no-conmon --rm --replace --detach --name jadda-jadda --hostname hello-world awesome-image:latest command arg1 ... argN
ExecStart=/usr/bin/podman run --cgroups=no-conmon --rm --sdnotify=conmon --replace --detach --name jadda-jadda --hostname hello-world awesome-image:latest command arg1 ... argN
Type=notify
NotifyAccess=all
@ -218,7 +240,7 @@ RequiresMountsFor=/var/run/containers/storage
Environment=PODMAN_SYSTEMD_UNIT=%n
Restart=always
TimeoutStopSec=70
ExecStart=/usr/bin/podman run --sdnotify=conmon --cgroups=no-conmon --rm -d awesome-image:latest
ExecStart=/usr/bin/podman run --cgroups=no-conmon --rm --sdnotify=conmon -d awesome-image:latest
Type=notify
NotifyAccess=all
@ -241,7 +263,7 @@ RequiresMountsFor=/var/run/containers/storage
Environment=PODMAN_SYSTEMD_UNIT=%n
Restart=always
TimeoutStopSec=102
ExecStart=/usr/bin/podman run --sdnotify=conmon --cgroups=no-conmon --rm ` +
ExecStart=/usr/bin/podman run --cgroups=no-conmon --rm --sdnotify=conmon ` +
detachparam +
` awesome-image:latest
Type=notify
@ -267,7 +289,7 @@ RequiresMountsFor=/var/run/containers/storage
Environment=PODMAN_SYSTEMD_UNIT=%n
Restart=always
TimeoutStopSec=102
ExecStart=/usr/bin/podman run --sdnotify=conmon --cgroups=no-conmon --rm -d --replace --name test -p 80:80 awesome-image:latest somecmd --detach=false
ExecStart=/usr/bin/podman run --cgroups=no-conmon --rm --sdnotify=conmon -d --replace --name test -p 80:80 awesome-image:latest somecmd --detach=false
Type=notify
NotifyAccess=all
@ -289,7 +311,7 @@ RequiresMountsFor=/var/run/containers/storage
Environment=PODMAN_SYSTEMD_UNIT=%n
Restart=always
TimeoutStopSec=102
ExecStart=/usr/bin/podman --events-backend none --runroot /root run --sdnotify=conmon --cgroups=no-conmon --rm -d awesome-image:latest
ExecStart=/usr/bin/podman --events-backend none --runroot /root run --cgroups=no-conmon --rm --sdnotify=conmon -d awesome-image:latest
Type=notify
NotifyAccess=all
@ -311,7 +333,7 @@ RequiresMountsFor=/var/run/containers/storage
Environment=PODMAN_SYSTEMD_UNIT=%n
Restart=always
TimeoutStopSec=70
ExecStart=/usr/bin/podman container run --sdnotify=conmon --cgroups=no-conmon --rm -d awesome-image:latest
ExecStart=/usr/bin/podman container run --cgroups=no-conmon --rm --sdnotify=conmon -d awesome-image:latest
Type=notify
NotifyAccess=all
@ -333,7 +355,7 @@ RequiresMountsFor=/var/run/containers/storage
Environment=PODMAN_SYSTEMD_UNIT=%n
Restart=always
TimeoutStopSec=70
ExecStart=/usr/bin/podman run --sdnotify=conmon --cgroups=no-conmon --rm -d --replace --name test --log-driver=journald --log-opt=tag={{.Name}} awesome-image:latest
ExecStart=/usr/bin/podman run --cgroups=no-conmon --rm --sdnotify=conmon -d --replace --name test --log-driver=journald --log-opt=tag={{.Name}} awesome-image:latest
Type=notify
NotifyAccess=all
@ -355,7 +377,7 @@ RequiresMountsFor=/var/run/containers/storage
Environment=PODMAN_SYSTEMD_UNIT=%n
Restart=always
TimeoutStopSec=70
ExecStart=/usr/bin/podman run --sdnotify=conmon --cgroups=no-conmon --rm -d --replace --name test awesome-image:latest sh -c "kill $$$$ && echo %%\\"
ExecStart=/usr/bin/podman run --cgroups=no-conmon --rm --sdnotify=conmon -d --replace --name test awesome-image:latest sh -c "kill $$$$ && echo %%\\"
Type=notify
NotifyAccess=all
@ -377,7 +399,7 @@ RequiresMountsFor=/var/run/containers/storage
Environment=PODMAN_SYSTEMD_UNIT=%n
Restart=always
TimeoutStopSec=70
ExecStart=/usr/bin/podman run --sdnotify=conmon --cgroups=no-conmon --rm -d --conmon-pidfile=foo --cidfile=foo awesome-image:latest podman run --cgroups=foo --conmon-pidfile=foo --cidfile=foo alpine
ExecStart=/usr/bin/podman run --cgroups=no-conmon --rm --sdnotify=conmon -d --conmon-pidfile=foo --cidfile=foo awesome-image:latest podman run --cgroups=foo --conmon-pidfile=foo --cidfile=foo alpine
Type=notify
NotifyAccess=all
@ -399,7 +421,7 @@ RequiresMountsFor=/var/run/containers/storage
Environment=PODMAN_SYSTEMD_UNIT=%n
Restart=always
TimeoutStopSec=70
ExecStart=/usr/bin/podman run --sdnotify=conmon --cgroups=no-conmon --rm --pod-id-file %t/pod-foobar.pod-id-file -d --conmon-pidfile=foo --cidfile=foo awesome-image:latest podman run --cgroups=foo --conmon-pidfile=foo --cidfile=foo --pod-id-file /tmp/pod-foobar.pod-id-file alpine
ExecStart=/usr/bin/podman run --cgroups=no-conmon --rm --pod-id-file %t/pod-foobar.pod-id-file --sdnotify=conmon -d --conmon-pidfile=foo --cidfile=foo awesome-image:latest podman run --cgroups=foo --conmon-pidfile=foo --cidfile=foo --pod-id-file /tmp/pod-foobar.pod-id-file alpine
Type=notify
NotifyAccess=all
@ -422,7 +444,7 @@ Environment=PODMAN_SYSTEMD_UNIT=%n
Environment=FOO=abc "BAR=my test" USER=%%a
Restart=always
TimeoutStopSec=70
ExecStart=/usr/bin/podman run --sdnotify=conmon --cgroups=no-conmon --rm -d --env FOO --env=BAR --env=MYENV=2 -e USER awesome-image:latest
ExecStart=/usr/bin/podman run --cgroups=no-conmon --rm --sdnotify=conmon -d --env FOO --env=BAR --env=MYENV=2 -e USER awesome-image:latest
Type=notify
NotifyAccess=all
@ -547,6 +569,25 @@ WantedBy=multi-user.target default.target
false,
false,
},
{"good with name and sdnotify",
containerInfo{
Executable: "/usr/bin/podman",
ServiceName: "jadda-jadda",
ContainerNameOrID: "jadda-jadda",
RestartPolicy: "always",
PIDFile: "/run/containers/storage/overlay-containers/639c53578af4d84b8800b4635fa4e680ee80fd67e0e6a2d4eea48d1e3230f401/userdata/conmon.pid",
StopTimeout: 10,
PodmanVersion: "CI",
CreateCommand: []string{"I'll get stripped", "container", "run", "--sdnotify=container", "--name", "jadda-jadda", "--hostname", "hello-world", "awesome-image:latest", "command", "arg1", "...", "argN", "foo=arg \"with \" space"},
EnvVariable: define.EnvVariable,
GraphRoot: "/var/lib/containers/storage",
RunRoot: "/var/run/containers/storage",
},
goodWithNameAndSdnotify,
true,
false,
false,
},
{"good with explicit short detach param",
containerInfo{
Executable: "/usr/bin/podman",

View File

@ -26,9 +26,17 @@ function teardown() {
done < $SNAME_FILE
rm -f $SNAME_FILE
run_podman ? rmi quay.io/libpod/alpine:latest
run_podman ? rmi quay.io/libpod/busybox:latest
run_podman ? rmi quay.io/libpod/localtest:latest
run_podman ? rmi -f \
quay.io/libpod/alpine:latest \
quay.io/libpod/busybox:latest \
quay.io/libpod/localtest:latest \
quay.io/libpod/autoupdatebroken:latest \
quay.io/libpod/test:latest \
quay.io/libpod/fedora:31
# The rollback tests may leave some dangling images behind, so let's prune
# them to leave a clean state.
run_podman ? image prune -f
basic_teardown
}
@ -43,18 +51,30 @@ function teardown() {
function generate_service() {
local target_img_basename=$1
local autoupdate=$2
local command=$3
local extraArgs=$4
local noTag=$5
# Unless specified, set a default command.
if [[ -z "$command" ]]; then
command="top -d 120"
fi
# Container name. Include the autoupdate type, to make debugging easier.
# IMPORTANT: variable 'cname' is passed (out of scope) up to caller!
cname=c_${autoupdate//\'/}_$(random_string)
target_img="quay.io/libpod/$target_img_basename:latest"
run_podman tag $IMAGE $target_img
if [[ -z "$noTag" ]]; then
run_podman tag $IMAGE $target_img
fi
if [[ -n "$autoupdate" ]]; then
label="--label io.containers.autoupdate=$autoupdate"
else
label=""
fi
run_podman run -d --name $cname $label $target_img top -d 120
run_podman create $extraArgs --name $cname $label $target_img $command
(cd $UNIT_DIR; run_podman generate systemd --new --files --name $cname)
echo "container-$cname" >> $SNAME_FILE
@ -128,6 +148,38 @@ function _confirm_update() {
_confirm_update $cname $ori_image
}
@test "podman auto-update - label io.containers.autoupdate=image with rollback" {
# Note: the autoupdatebroken image is empty on purpose so it cannot be
# executed and force a rollback. The rollback test for the local policy
# is exercising the case where the container doesn't send a ready message.
image=quay.io/libpod/autoupdatebroken
run_podman tag $IMAGE $image
generate_service autoupdatebroken image
_wait_service_ready container-$cname.service
run_podman auto-update --dry-run --format "{{.Unit}},{{.Image}},{{.Updated}},{{.Policy}}"
is "$output" ".*container-$cname.service,$image:latest,pending,registry.*" "Image update is pending."
run_podman container inspect --format "{{.Image}}" $cname
oldID="$output"
run_podman inspect --format "{{.ID}}" $cname
containerID="$output"
run_podman auto-update --format "{{.Unit}},{{.Image}},{{.Updated}},{{.Policy}}"
is "$output" "Trying to pull.*" "Image is updated."
is "$output" ".*container-$cname.service,$image:latest,rolled back,registry.*" "Image has been rolled back."
run_podman container inspect --format "{{.Image}}" $cname
is "$output" "$oldID" "container rolled back to previous image"
run_podman container inspect --format "{{.ID}}" $cname
if [[ $output == $containerID ]]; then
die "container has not been restarted during rollback (previous id: $containerID, current id: $output)"
fi
}
@test "podman auto-update - label io.containers.autoupdate=disabled" {
generate_service alpine disabled
@ -168,6 +220,61 @@ function _confirm_update() {
_confirm_update $cname $ori_image
}
@test "podman auto-update - label io.containers.autoupdate=local with rollback" {
# sdnotify fails with runc 1.0.0-3-dev2 on Ubuntu. Let's just
# assume that we work only with crun, nothing else.
# [copied from 260-sdnotify.bats]
runtime=$(podman_runtime)
if [[ "$runtime" != "crun" ]]; then
skip "this test only works with crun, not $runtime"
fi
dockerfile1=$PODMAN_TMPDIR/Dockerfile.1
cat >$dockerfile1 <<EOF
FROM quay.io/libpod/fedora:31
RUN echo -e "#!/bin/sh\n\
printenv NOTIFY_SOCKET; echo READY; systemd-notify --ready;\n\
trap 'echo Received SIGTERM, finishing; exit' SIGTERM; echo WAITING; while :; do sleep 0.1; done" \
>> /runme
RUN chmod +x /runme
EOF
dockerfile2=$PODMAN_TMPDIR/Dockerfile.2
cat >$dockerfile2 <<EOF
FROM quay.io/libpod/fedora:31
RUN echo -e "#!/bin/sh\n\
exit 1" >> /runme
RUN chmod +x /runme
EOF
image=test
# Generate a healthy image that will run correctly.
run_podman build -t quay.io/libpod/$image -f $dockerfile1
podman image inspect --format "{{.ID}}" $image
oldID="$output"
generate_service $image local /runme --sdnotify=container noTag
_wait_service_ready container-$cname.service
run_podman auto-update --dry-run --format "{{.Unit}},{{.Image}},{{.Updated}},{{.Policy}}"
is "$output" ".*container-$cname.service,quay.io/libpod/$image:latest,false,local.*" "No update available"
# Generate an unhealthy image that will fail.
run_podman build -t quay.io/libpod/$image -f $dockerfile2
podman image inspect --format "{{.ID}}" $image
newID="$output"
run_podman auto-update --dry-run --format "{{.Unit}},{{.Image}},{{.Updated}},{{.Policy}}"
is "$output" ".*container-$cname.service,quay.io/libpod/$image:latest,pending,local.*" "Image updated is pending"
# Note: we rollback automatically by default.
run_podman auto-update --format "{{.Unit}},{{.Image}},{{.Updated}},{{.Policy}}"
is "$output" ".*container-$cname.service,quay.io/libpod/$image:latest,rolled back,local.*" "Rolled back to old image"
# Make sure that new container is not using the new image ID anymore.
_confirm_update $cname $newID
}
@test "podman auto-update with multiple services" {
# Preserve original image ID, to confirm that it changes (or not)
run_podman inspect --format "{{.Id}}" $IMAGE