mirror of
https://github.com/containers/podman.git
synced 2025-07-17 17:43:23 +08:00

Originally, during pod removal, we locked every container in the pod at once, did a number of validity checks to ensure everything was safe, and then removed all the containers in the pod. A deadlock was recently discovered with this approach. In brief, we cannot lock the entire pod (or much more than a single container at a time) without causing a deadlock. As such, we converted to an approach where we just looped over each container in the pod, removing them individually. Unfortunately, this removed a lot of the validity checking of the earlier approach, allowing for a lot of unintended bad things. Infra containers could be removed while containers in the pod still depended on them, for example. There's no easy way to do validity checks while in a simple loop, so I implemented a version of our graph-traversal logic that currently handles pod start. This version acts in the reverse order of startup: startup starts from containers which depend on nothing and moves outwards, while removal acts on containers which have nothing depend on them and moves inwards. By doing graph traversal, we can guarantee that nothing is removed while something that depends on it still exists - so the infra container should be the last thing in a pod that is removed, for example. In the (unlikely) case that a graph of the pod's containers cannot be built (most likely impossible without database editing) the old method of pod removal has been retained to ensure that even misbehaving pods can be forcibly evicted from the state. I'm fairly confident that this resolves the problem, but there are a lot of assumptions around dependency structure built into the original pod removal code and I am not 100% sure I have captured all of them. Fixes #15526 Signed-off-by: Matthew Heon <matthew.heon@pm.me>
417 lines
13 KiB
Go
417 lines
13 KiB
Go
//go:build linux
|
|
// +build linux
|
|
|
|
package libpod
|
|
|
|
import (
|
|
"context"
|
|
"errors"
|
|
"fmt"
|
|
"path"
|
|
"path/filepath"
|
|
"strings"
|
|
|
|
"github.com/containers/common/pkg/cgroups"
|
|
"github.com/containers/common/pkg/config"
|
|
"github.com/containers/podman/v4/libpod/define"
|
|
"github.com/containers/podman/v4/libpod/events"
|
|
"github.com/containers/podman/v4/pkg/rootless"
|
|
"github.com/containers/podman/v4/pkg/specgen"
|
|
"github.com/hashicorp/go-multierror"
|
|
"github.com/sirupsen/logrus"
|
|
)
|
|
|
|
// NewPod makes a new, empty pod
|
|
func (r *Runtime) NewPod(ctx context.Context, p specgen.PodSpecGenerator, options ...PodCreateOption) (_ *Pod, deferredErr error) {
|
|
if !r.valid {
|
|
return nil, define.ErrRuntimeStopped
|
|
}
|
|
|
|
pod := newPod(r)
|
|
|
|
// Set default namespace to runtime's namespace
|
|
// Do so before options run so they can override it
|
|
if r.config.Engine.Namespace != "" {
|
|
pod.config.Namespace = r.config.Engine.Namespace
|
|
}
|
|
|
|
for _, option := range options {
|
|
if err := option(pod); err != nil {
|
|
return nil, fmt.Errorf("running pod create option: %w", err)
|
|
}
|
|
}
|
|
|
|
// Allocate a lock for the pod
|
|
lock, err := r.lockManager.AllocateLock()
|
|
if err != nil {
|
|
return nil, fmt.Errorf("allocating lock for new pod: %w", err)
|
|
}
|
|
pod.lock = lock
|
|
pod.config.LockID = pod.lock.ID()
|
|
|
|
defer func() {
|
|
if deferredErr != nil {
|
|
if err := pod.lock.Free(); err != nil {
|
|
logrus.Errorf("Freeing pod lock after failed creation: %v", err)
|
|
}
|
|
}
|
|
}()
|
|
|
|
pod.valid = true
|
|
|
|
// Check Cgroup parent sanity, and set it if it was not set
|
|
if r.config.Cgroups() != "disabled" {
|
|
switch r.config.Engine.CgroupManager {
|
|
case config.CgroupfsCgroupsManager:
|
|
canUseCgroup := !rootless.IsRootless() || isRootlessCgroupSet(pod.config.CgroupParent)
|
|
if canUseCgroup {
|
|
// need to actually create parent here
|
|
if pod.config.CgroupParent == "" {
|
|
pod.config.CgroupParent = CgroupfsDefaultCgroupParent
|
|
} else if strings.HasSuffix(path.Base(pod.config.CgroupParent), ".slice") {
|
|
return nil, fmt.Errorf("systemd slice received as cgroup parent when using cgroupfs: %w", define.ErrInvalidArg)
|
|
}
|
|
// If we are set to use pod cgroups, set the cgroup parent that
|
|
// all containers in the pod will share
|
|
if pod.config.UsePodCgroup {
|
|
pod.state.CgroupPath = filepath.Join(pod.config.CgroupParent, pod.ID())
|
|
if p.InfraContainerSpec != nil {
|
|
p.InfraContainerSpec.CgroupParent = pod.state.CgroupPath
|
|
// cgroupfs + rootless = permission denied when creating the cgroup.
|
|
if !rootless.IsRootless() {
|
|
res, err := GetLimits(p.ResourceLimits)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
// Need to both create and update the cgroup
|
|
// rather than create a new path in c/common for pod cgroup creation
|
|
// just create as if it is a ctr and then update figures out that we need to
|
|
// populate the resource limits on the pod level
|
|
cgc, err := cgroups.New(pod.state.CgroupPath, &res)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
err = cgc.Update(&res)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
case config.SystemdCgroupsManager:
|
|
if pod.config.CgroupParent == "" {
|
|
if rootless.IsRootless() {
|
|
pod.config.CgroupParent = SystemdDefaultRootlessCgroupParent
|
|
} else {
|
|
pod.config.CgroupParent = SystemdDefaultCgroupParent
|
|
}
|
|
} else if len(pod.config.CgroupParent) < 6 || !strings.HasSuffix(path.Base(pod.config.CgroupParent), ".slice") {
|
|
return nil, fmt.Errorf("did not receive systemd slice as cgroup parent when using systemd to manage cgroups: %w", define.ErrInvalidArg)
|
|
}
|
|
// If we are set to use pod cgroups, set the cgroup parent that
|
|
// all containers in the pod will share
|
|
if pod.config.UsePodCgroup {
|
|
cgroupPath, err := systemdSliceFromPath(pod.config.CgroupParent, fmt.Sprintf("libpod_pod_%s", pod.ID()), p.ResourceLimits)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("unable to create pod cgroup for pod %s: %w", pod.ID(), err)
|
|
}
|
|
pod.state.CgroupPath = cgroupPath
|
|
if p.InfraContainerSpec != nil {
|
|
p.InfraContainerSpec.CgroupParent = pod.state.CgroupPath
|
|
}
|
|
}
|
|
default:
|
|
return nil, fmt.Errorf("unsupported Cgroup manager: %s - cannot validate cgroup parent: %w", r.config.Engine.CgroupManager, define.ErrInvalidArg)
|
|
}
|
|
}
|
|
|
|
if pod.config.UsePodCgroup {
|
|
logrus.Debugf("Got pod cgroup as %s", pod.state.CgroupPath)
|
|
}
|
|
|
|
if !pod.HasInfraContainer() && pod.SharesNamespaces() {
|
|
return nil, errors.New("Pods must have an infra container to share namespaces")
|
|
}
|
|
if pod.HasInfraContainer() && !pod.SharesNamespaces() {
|
|
logrus.Infof("Pod has an infra container, but shares no namespaces")
|
|
}
|
|
|
|
// Unless the user has specified a name, use a randomly generated one.
|
|
// Note that name conflicts may occur (see #11735), so we need to loop.
|
|
generateName := pod.config.Name == ""
|
|
var addPodErr error
|
|
for {
|
|
if generateName {
|
|
name, err := r.generateName()
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
pod.config.Name = name
|
|
}
|
|
|
|
if p.InfraContainerSpec != nil && p.InfraContainerSpec.Hostname == "" {
|
|
p.InfraContainerSpec.Hostname = pod.config.Name
|
|
}
|
|
if addPodErr = r.state.AddPod(pod); addPodErr == nil {
|
|
return pod, nil
|
|
}
|
|
if !generateName || (!errors.Is(addPodErr, define.ErrPodExists) && !errors.Is(addPodErr, define.ErrCtrExists)) {
|
|
break
|
|
}
|
|
}
|
|
if addPodErr != nil {
|
|
return nil, fmt.Errorf("adding pod to state: %w", addPodErr)
|
|
}
|
|
|
|
return pod, nil
|
|
}
|
|
|
|
// AddInfra adds the created infra container to the pod state
|
|
func (r *Runtime) AddInfra(ctx context.Context, pod *Pod, infraCtr *Container) (*Pod, error) {
|
|
if !r.valid {
|
|
return nil, define.ErrRuntimeStopped
|
|
}
|
|
pod.state.InfraContainerID = infraCtr.ID()
|
|
if err := pod.save(); err != nil {
|
|
return nil, err
|
|
}
|
|
pod.newPodEvent(events.Create)
|
|
return pod, nil
|
|
}
|
|
|
|
// SavePod is a helper function to save the pod state from outside of libpod
|
|
func (r *Runtime) SavePod(pod *Pod) error {
|
|
if !r.valid {
|
|
return define.ErrRuntimeStopped
|
|
}
|
|
if err := pod.save(); err != nil {
|
|
return err
|
|
}
|
|
pod.newPodEvent(events.Create)
|
|
return nil
|
|
}
|
|
|
|
// DO NOT USE THIS FUNCTION DIRECTLY. Use removePod(), below. It will call
|
|
// removeMalformedPod() if necessary.
|
|
func (r *Runtime) removeMalformedPod(ctx context.Context, p *Pod, ctrs []*Container, force bool, timeout *uint, ctrNamedVolumes map[string]*ContainerNamedVolume) error {
|
|
var removalErr error
|
|
for _, ctr := range ctrs {
|
|
err := func() error {
|
|
ctrLock := ctr.lock
|
|
ctrLock.Lock()
|
|
defer func() {
|
|
ctrLock.Unlock()
|
|
}()
|
|
|
|
if err := ctr.syncContainer(); err != nil {
|
|
return err
|
|
}
|
|
|
|
for _, vol := range ctr.config.NamedVolumes {
|
|
ctrNamedVolumes[vol.Name] = vol
|
|
}
|
|
|
|
return r.removeContainer(ctx, ctr, force, false, true, true, timeout)
|
|
}()
|
|
|
|
if removalErr == nil {
|
|
removalErr = err
|
|
} else {
|
|
logrus.Errorf("Removing container %s from pod %s: %v", ctr.ID(), p.ID(), err)
|
|
}
|
|
}
|
|
if removalErr != nil {
|
|
return removalErr
|
|
}
|
|
|
|
// Clear infra container ID before we remove the infra container.
|
|
// There is a potential issue if we don't do that, and removal is
|
|
// interrupted between RemoveAllContainers() below and the pod's removal
|
|
// later - we end up with a reference to a nonexistent infra container.
|
|
p.state.InfraContainerID = ""
|
|
if err := p.save(); err != nil {
|
|
return err
|
|
}
|
|
|
|
// Remove all containers in the pod from the state.
|
|
if err := r.state.RemovePodContainers(p); err != nil {
|
|
// If this fails, there isn't much more we can do.
|
|
// The containers in the pod are unusable, but they still exist,
|
|
// so pod removal will fail.
|
|
return err
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool, timeout *uint) error {
|
|
if err := p.updatePod(); err != nil {
|
|
return err
|
|
}
|
|
|
|
ctrs, err := r.state.PodContainers(p)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
numCtrs := len(ctrs)
|
|
|
|
// If the only running container in the pod is the pause container, remove the pod and container unconditionally.
|
|
pauseCtrID := p.state.InfraContainerID
|
|
if numCtrs == 1 && ctrs[0].ID() == pauseCtrID {
|
|
removeCtrs = true
|
|
force = true
|
|
}
|
|
if !removeCtrs && numCtrs > 0 {
|
|
return fmt.Errorf("pod %s contains containers and cannot be removed: %w", p.ID(), define.ErrCtrExists)
|
|
}
|
|
|
|
var removalErr error
|
|
ctrNamedVolumes := make(map[string]*ContainerNamedVolume)
|
|
|
|
// Build a graph of all containers in the pod.
|
|
graph, err := BuildContainerGraph(ctrs)
|
|
if err != nil {
|
|
// We have to allow the pod to be removed.
|
|
// But let's only do it if force is set.
|
|
if !force {
|
|
return fmt.Errorf("cannot create container graph for pod %s: %w", p.ID(), err)
|
|
}
|
|
|
|
removalErr = fmt.Errorf("creating container graph for pod %s failed, fell back to loop removal: %w", p.ID(), err)
|
|
|
|
if err := r.removeMalformedPod(ctx, p, ctrs, force, timeout, ctrNamedVolumes); err != nil {
|
|
logrus.Errorf("Error creating container graph for pod %s: %v. Falling back to loop removal.", p.ID(), err)
|
|
return err
|
|
}
|
|
} else {
|
|
ctrErrors := make(map[string]error)
|
|
ctrsVisited := make(map[string]bool)
|
|
|
|
for _, node := range graph.notDependedOnNodes {
|
|
removeNode(ctx, node, p, force, timeout, false, ctrErrors, ctrsVisited, ctrNamedVolumes)
|
|
}
|
|
|
|
// This is gross, but I don't want to change the signature on
|
|
// removePod - especially since any change here eventually has
|
|
// to map down to one error unless we want to make a breaking
|
|
// API change.
|
|
if len(ctrErrors) > 0 {
|
|
var allErrs error
|
|
for id, err := range ctrErrors {
|
|
allErrs = multierror.Append(allErrs, fmt.Errorf("removing container %s from pod %s: %w", id, p.ID(), err))
|
|
}
|
|
return allErrs
|
|
}
|
|
}
|
|
|
|
for volName := range ctrNamedVolumes {
|
|
volume, err := r.state.Volume(volName)
|
|
if err != nil && !errors.Is(err, define.ErrNoSuchVolume) {
|
|
logrus.Errorf("Retrieving volume %s: %v", volName, err)
|
|
continue
|
|
}
|
|
if !volume.Anonymous() {
|
|
continue
|
|
}
|
|
if err := r.removeVolume(ctx, volume, false, timeout, false); err != nil {
|
|
if errors.Is(err, define.ErrNoSuchVolume) || errors.Is(err, define.ErrVolumeRemoved) {
|
|
continue
|
|
}
|
|
logrus.Errorf("Removing volume %s: %v", volName, err)
|
|
}
|
|
}
|
|
|
|
// Remove pod cgroup, if present
|
|
if p.state.CgroupPath != "" {
|
|
logrus.Debugf("Removing pod cgroup %s", p.state.CgroupPath)
|
|
|
|
switch p.runtime.config.Engine.CgroupManager {
|
|
case config.SystemdCgroupsManager:
|
|
if err := deleteSystemdCgroup(p.state.CgroupPath, p.ResourceLim()); err != nil {
|
|
if removalErr == nil {
|
|
removalErr = fmt.Errorf("removing pod %s cgroup: %w", p.ID(), err)
|
|
} else {
|
|
logrus.Errorf("Deleting pod %s cgroup %s: %v", p.ID(), p.state.CgroupPath, err)
|
|
}
|
|
}
|
|
case config.CgroupfsCgroupsManager:
|
|
// Delete the cgroupfs cgroup
|
|
// Make sure the conmon cgroup is deleted first
|
|
// Since the pod is almost gone, don't bother failing
|
|
// hard - instead, just log errors.
|
|
conmonCgroupPath := filepath.Join(p.state.CgroupPath, "conmon")
|
|
conmonCgroup, err := cgroups.Load(conmonCgroupPath)
|
|
if err != nil && err != cgroups.ErrCgroupDeleted && err != cgroups.ErrCgroupV1Rootless {
|
|
if removalErr == nil {
|
|
removalErr = fmt.Errorf("retrieving pod %s conmon cgroup: %w", p.ID(), err)
|
|
} else {
|
|
logrus.Debugf("Error retrieving pod %s conmon cgroup %s: %v", p.ID(), conmonCgroupPath, err)
|
|
}
|
|
}
|
|
if err == nil {
|
|
if err = conmonCgroup.Delete(); err != nil {
|
|
if removalErr == nil {
|
|
removalErr = fmt.Errorf("removing pod %s conmon cgroup: %w", p.ID(), err)
|
|
} else {
|
|
logrus.Errorf("Deleting pod %s conmon cgroup %s: %v", p.ID(), conmonCgroupPath, err)
|
|
}
|
|
}
|
|
}
|
|
cgroup, err := cgroups.Load(p.state.CgroupPath)
|
|
if err != nil && err != cgroups.ErrCgroupDeleted && err != cgroups.ErrCgroupV1Rootless {
|
|
if removalErr == nil {
|
|
removalErr = fmt.Errorf("retrieving pod %s cgroup: %w", p.ID(), err)
|
|
} else {
|
|
logrus.Errorf("Retrieving pod %s cgroup %s: %v", p.ID(), p.state.CgroupPath, err)
|
|
}
|
|
}
|
|
if err == nil {
|
|
if err := cgroup.Delete(); err != nil {
|
|
if removalErr == nil {
|
|
removalErr = fmt.Errorf("removing pod %s cgroup: %w", p.ID(), err)
|
|
} else {
|
|
logrus.Errorf("Deleting pod %s cgroup %s: %v", p.ID(), p.state.CgroupPath, err)
|
|
}
|
|
}
|
|
}
|
|
default:
|
|
// This should be caught much earlier, but let's still
|
|
// keep going so we make sure to evict the pod before
|
|
// ending up with an inconsistent state.
|
|
if removalErr == nil {
|
|
removalErr = fmt.Errorf("unrecognized cgroup manager %s when removing pod %s cgroups: %w", p.runtime.config.Engine.CgroupManager, p.ID(), define.ErrInternal)
|
|
} else {
|
|
logrus.Errorf("Unknown cgroups manager %s specified - cannot remove pod %s cgroup", p.runtime.config.Engine.CgroupManager, p.ID())
|
|
}
|
|
}
|
|
}
|
|
|
|
if err := p.maybeRemoveServiceContainer(); err != nil {
|
|
return err
|
|
}
|
|
|
|
// Remove pod from state
|
|
if err := r.state.RemovePod(p); err != nil {
|
|
if removalErr != nil {
|
|
logrus.Errorf("%v", removalErr)
|
|
}
|
|
return err
|
|
}
|
|
|
|
// Mark pod invalid
|
|
p.valid = false
|
|
p.newPodEvent(events.Remove)
|
|
|
|
// Deallocate the pod lock
|
|
if err := p.lock.Free(); err != nil {
|
|
if removalErr == nil {
|
|
removalErr = fmt.Errorf("freeing pod %s lock: %w", p.ID(), err)
|
|
} else {
|
|
logrus.Errorf("Freeing pod %s lock: %v", p.ID(), err)
|
|
}
|
|
}
|
|
|
|
return removalErr
|
|
}
|