Files
podman/libpod/runtime_img.go
Matthew Heon e19e0de5fa Introduce graph-based pod container removal
Originally, during pod removal, we locked every container in the
pod at once, did a number of validity checks to ensure everything
was safe, and then removed all the containers in the pod.

A deadlock was recently discovered with this approach. In brief,
we cannot lock the entire pod (or much more than a single
container at a time) without causing a deadlock. As such, we
converted to an approach where we just looped over each container
in the pod, removing them individually. Unfortunately, this
removed a lot of the validity checking of the earlier approach,
allowing for a lot of unintended bad things. Infra containers
could be removed while containers in the pod still depended on
them, for example.

There's no easy way to do validity checks while in a simple loop,
so I implemented a version of our graph-traversal logic that
currently handles pod start. This version acts in the reverse
order of startup: startup starts from containers which depend on
nothing and moves outwards, while removal acts on containers which
have nothing depend on them and moves inwards. By doing graph
traversal, we can guarantee that nothing is removed while
something that depends on it still exists - so the infra
container should be the last thing in a pod that is removed, for
example.

In the (unlikely) case that a graph of the pod's containers
cannot be built (most likely impossible without database editing)
the old method of pod removal has been retained to ensure that
even misbehaving pods can be forcibly evicted from the state.

I'm fairly confident that this resolves the problem, but there
are a lot of assumptions around dependency structure built into
the original pod removal code and I am not 100% sure I have
captured all of them.

Fixes #15526

Signed-off-by: Matthew Heon <matthew.heon@pm.me>
2022-09-14 13:44:48 -04:00

123 lines
4.1 KiB
Go

package libpod
import (
"context"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
buildahDefine "github.com/containers/buildah/define"
"github.com/containers/buildah/imagebuildah"
"github.com/containers/common/libimage"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/libpod/events"
"github.com/containers/podman/v4/pkg/util"
"github.com/sirupsen/logrus"
)
// Runtime API
// RemoveContainersForImageCallback returns a callback that can be used in
// `libimage`. When forcefully removing images, containers using the image
// should be removed as well. The callback allows for more graceful removal as
// we can use the libpod-internal removal logic.
func (r *Runtime) RemoveContainersForImageCallback(ctx context.Context) libimage.RemoveContainerFunc {
return func(imageID string) error {
if !r.valid {
return define.ErrRuntimeStopped
}
ctrs, err := r.state.AllContainers()
if err != nil {
return err
}
for _, ctr := range ctrs {
if ctr.config.RootfsImageID != imageID {
continue
}
var timeout *uint
if ctr.config.IsInfra {
pod, err := r.state.Pod(ctr.config.Pod)
if err != nil {
return fmt.Errorf("container %s is in pod %s, but pod cannot be retrieved: %w", ctr.ID(), ctr.config.Pod, err)
}
if err := r.removePod(ctx, pod, true, true, timeout); err != nil {
return fmt.Errorf("removing image %s: container %s using image could not be removed: %w", imageID, ctr.ID(), err)
}
} else {
if err := r.removeContainer(ctx, ctr, true, false, false, false, timeout); err != nil {
return fmt.Errorf("removing image %s: container %s using image could not be removed: %w", imageID, ctr.ID(), err)
}
}
}
// Note that `libimage` will take care of removing any leftover
// containers from the storage.
return nil
}
}
// IsExternalContainerCallback returns a callback that be used in `libimage` to
// figure out whether a given container is an external one. A container is
// considered external if it is not present in libpod's database.
func (r *Runtime) IsExternalContainerCallback(_ context.Context) libimage.IsExternalContainerFunc {
// NOTE: pruning external containers is subject to race conditions
// (e.g., when a container gets removed). To address this and similar
// races, pruning had to happen inside c/storage. Containers has to be
// labelled with "podman/libpod" along with callbacks similar to
// libimage.
return func(idOrName string) (bool, error) {
_, err := r.LookupContainer(idOrName)
if err == nil {
return false, nil
}
if errors.Is(err, define.ErrNoSuchCtr) {
return true, nil
}
return false, nil
}
}
// newBuildEvent creates a new event based on completion of a built image
func (r *Runtime) newImageBuildCompleteEvent(idOrName string) {
e := events.NewEvent(events.Build)
e.Type = events.Image
e.Name = idOrName
if err := r.eventer.Write(e); err != nil {
logrus.Errorf("Unable to write build event: %q", err)
}
}
// Build adds the runtime to the imagebuildah call
func (r *Runtime) Build(ctx context.Context, options buildahDefine.BuildOptions, dockerfiles ...string) (string, reference.Canonical, error) {
if options.Runtime == "" {
options.Runtime = r.GetOCIRuntimePath()
}
// share the network interface between podman and buildah
options.NetworkInterface = r.network
id, ref, err := imagebuildah.BuildDockerfiles(ctx, r.store, options, dockerfiles...)
// Write event for build completion
r.newImageBuildCompleteEvent(id)
return id, ref, err
}
// DownloadFromFile reads all of the content from the reader and temporarily
// saves in it $TMPDIR/importxyz, which is deleted after the image is imported
func DownloadFromFile(reader *os.File) (string, error) {
outFile, err := ioutil.TempFile(util.Tmpdir(), "import")
if err != nil {
return "", fmt.Errorf("creating file: %w", err)
}
defer outFile.Close()
logrus.Debugf("saving %s to %s", reader.Name(), outFile.Name())
_, err = io.Copy(outFile, reader)
if err != nil {
return "", fmt.Errorf("saving %s to %s: %w", reader.Name(), outFile.Name(), err)
}
return outFile.Name(), nil
}