libpod: fix wait and exit-code logic

This commit addresses three intertwined bugs to fix an issue when using
Gitlab runner on Podman.  The three bug fixes are not split into
separate commits as tests won't pass otherwise; avoidable noise when
bisecting future issues.

1) Podman conflated states: even when asking to wait for the `exited`
   state, Podman returned as soon as a container transitioned to
   `stopped`.  The issues surfaced in Gitlab tests to fail [1] as
   `conmon`'s buffers have not (yet) been emptied when attaching to a
   container right after a wait.  The race window was extremely narrow,
   and I only managed to reproduce with the Gitlab runner [1] unit
   tests.

2) The clearer separation between `exited` and `stopped` revealed a race
   condition predating the changes.  If a container is configured for
   autoremoval (e.g., via `run --rm`), the "run" process competes with
   the "cleanup" process running in the background.  The window of the
   race condition was sufficiently large that the "cleanup" process has
   already removed the container and storage before the "run" process
   could read the exit code and hence waited indefinitely.

   Address the exit-code race condition by recording exit codes in the
   main libpod database.  Exit codes can now be read from a database.
   When waiting for a container to exit, Podman first waits for the
   container to transition to `exited` and will then query the database
   for its exit code. Outdated exit codes are pruned during cleanup
   (i.e., non-performance critical) and when refreshing the database
   after a reboot.  An exit code is considered outdated when it is older
   than 5 minutes.

   While the race condition predates this change, the waiting process
   has apparently always been fast enough in catching the exit code due
   to issue 1): `exited` and `stopped` were conflated.  The waiting
   process hence caught the exit code after the container transitioned
   to `stopped` but before it `exited` and got removed.

3) With 1) and 2), Podman is now waiting for a container to properly
   transition to the `exited` state.  Some tests did not pass after 1)
   and 2) which revealed the third bug: `conmon` was executed with its
   working directory pointing to the OCI runtime bundle of the
   container.  The changed working directory broke resolving relative
   paths in the "cleanup" process.  The "cleanup" process error'ed
   before actually cleaning up the container and waiting "main" process
   ran indefinitely - or until hitting a timeout.  Fix the issue by
   executing `conmon` with the same working directory as Podman.

Note that fixing 3) *may* address a number of issues we have seen in the
past where for *some* reason cleanup processes did not fire.

[1] https://gitlab.com/gitlab-org/gitlab-runner/-/issues/27119#note_970712864

Signed-off-by: Valentin Rothberg <vrothberg@redhat.com>

[MH: Minor reword of commit message]

Signed-off-by: Matthew Heon <mheon@redhat.com>
This commit is contained in:
Valentin Rothberg
2022-06-10 12:38:28 +02:00
committed by Matthew Heon
parent 15188dce05
commit 30e7cbccc1
10 changed files with 312 additions and 99 deletions

View File

@ -2,6 +2,7 @@ package libpod
import (
"context"
"fmt"
"io"
"io/ioutil"
"net/http"
@ -490,41 +491,84 @@ func (c *Container) RemoveArtifact(name string) error {
// Wait blocks until the container exits and returns its exit code.
func (c *Container) Wait(ctx context.Context) (int32, error) {
return c.WaitWithInterval(ctx, DefaultWaitInterval)
return c.WaitForExit(ctx, DefaultWaitInterval)
}
// WaitWithInterval blocks until the container to exit and returns its exit
// code. The argument is the interval at which checks the container's status.
func (c *Container) WaitWithInterval(ctx context.Context, waitTimeout time.Duration) (int32, error) {
// WaitForExit blocks until the container exits and returns its exit code. The
// argument is the interval at which checks the container's status.
func (c *Container) WaitForExit(ctx context.Context, pollInterval time.Duration) (int32, error) {
if !c.valid {
return -1, define.ErrCtrRemoved
}
exitFile, err := c.exitFilePath()
if err != nil {
return -1, err
}
chWait := make(chan error, 1)
id := c.ID()
var conmonTimer time.Timer
conmonTimerSet := false
go func() {
<-ctx.Done()
chWait <- define.ErrCanceled
}()
for {
// ignore errors here (with exception of cancellation), it is only used to avoid waiting
// too long.
_, e := WaitForFile(exitFile, chWait, waitTimeout)
if e == define.ErrCanceled {
return -1, define.ErrCanceled
getExitCode := func() (bool, int32, error) {
containerRemoved := false
if !c.batched {
c.lock.Lock()
defer c.lock.Unlock()
}
stopped, code, err := c.isStopped()
if err := c.syncContainer(); err != nil {
if !errors.Is(err, define.ErrNoSuchCtr) {
return false, -1, err
}
containerRemoved = true
}
// If conmon is not alive anymore set a timer to make sure
// we're returning even if conmon has forcefully been killed.
if !conmonTimerSet && !containerRemoved {
conmonAlive, err := c.ociRuntime.CheckConmonRunning(c)
switch {
case errors.Is(err, define.ErrNoSuchCtr):
containerRemoved = true
case err != nil:
return false, -1, err
case !conmonAlive:
timerDuration := time.Second * 20
conmonTimer = *time.NewTimer(timerDuration)
conmonTimerSet = true
}
}
if !containerRemoved {
// If conmon is dead for more than $timerDuration or if the
// container has exited properly, try to look up the exit code.
select {
case <-conmonTimer.C:
logrus.Debugf("Exceeded conmon timeout waiting for container %s to exit", id)
default:
if !c.ensureState(define.ContainerStateExited, define.ContainerStateConfigured) {
return false, -1, nil
}
}
}
exitCode, err := c.runtime.state.GetContainerExitCode(id)
if err != nil {
return true, -1, err
}
return true, exitCode, nil
}
for {
hasExited, exitCode, err := getExitCode()
if hasExited {
return exitCode, err
}
if err != nil {
return -1, err
}
if stopped {
return code, nil
select {
case <-ctx.Done():
return -1, fmt.Errorf("waiting for exit code of container %s canceled", id)
default:
time.Sleep(pollInterval)
}
}
}
@ -551,11 +595,12 @@ func (c *Container) WaitForConditionWithInterval(ctx context.Context, waitTimeou
wantedStates := make(map[define.ContainerStatus]bool, len(conditions))
for _, condition := range conditions {
if condition == define.ContainerStateStopped || condition == define.ContainerStateExited {
switch condition {
case define.ContainerStateExited, define.ContainerStateStopped:
waitForExit = true
continue
default:
wantedStates[condition] = true
}
wantedStates[condition] = true
}
trySend := func(code int32, err error) {
@ -572,7 +617,7 @@ func (c *Container) WaitForConditionWithInterval(ctx context.Context, waitTimeou
go func() {
defer wg.Done()
code, err := c.WaitWithInterval(ctx, waitTimeout)
code, err := c.WaitForExit(ctx, waitTimeout)
trySend(code, err)
}()
}