mirror of
https://github.com/containers/podman.git
synced 2025-07-02 16:57:24 +08:00
Merge pull request #18507 from mheon/fix_rm_depends
Fix `podman rm -fa` with dependencies
This commit is contained in:
@ -113,7 +113,10 @@ func removePods(namesOrIDs []string, rmOptions entities.PodRmOptions, printIDs b
|
||||
return nil
|
||||
}
|
||||
setExitCode(err)
|
||||
return append(errs, err)
|
||||
errs = append(errs, err)
|
||||
if !strings.Contains(err.Error(), define.ErrRemovingCtrs.Error()) {
|
||||
return errs
|
||||
}
|
||||
}
|
||||
|
||||
// in the cli, first we print out all the successful attempts
|
||||
@ -128,6 +131,11 @@ func removePods(namesOrIDs []string, rmOptions entities.PodRmOptions, printIDs b
|
||||
}
|
||||
setExitCode(r.Err)
|
||||
errs = append(errs, r.Err)
|
||||
for ctr, err := range r.RemovedCtrs {
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("error removing container %s from pod %s: %w", ctr, r.Id, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return errs
|
||||
|
@ -359,7 +359,13 @@ func removeNode(ctx context.Context, node *containerNode, pod *Pod, force bool,
|
||||
}
|
||||
|
||||
if !ctrErrored {
|
||||
if err := node.container.runtime.removeContainer(ctx, node.container, force, false, true, false, timeout); err != nil {
|
||||
opts := ctrRmOpts{
|
||||
Force: force,
|
||||
RemovePod: true,
|
||||
Timeout: timeout,
|
||||
}
|
||||
|
||||
if _, _, err := node.container.runtime.removeContainer(ctx, node.container, opts); err != nil {
|
||||
ctrErrored = true
|
||||
ctrErrors[node.id] = err
|
||||
}
|
||||
|
@ -211,4 +211,8 @@ var (
|
||||
// ErrConmonVersionFormat is used when the expected version format of conmon
|
||||
// has changed.
|
||||
ErrConmonVersionFormat = "conmon version changed format"
|
||||
|
||||
// ErrRemovingCtrs indicates that there was an error removing all
|
||||
// containers from a pod.
|
||||
ErrRemovingCtrs = errors.New("removing pod containers")
|
||||
)
|
||||
|
@ -142,6 +142,14 @@ shm_struct_t *setup_lock_shm(char *path, uint32_t num_locks, int *error_code) {
|
||||
goto CLEANUP_UNMAP;
|
||||
}
|
||||
|
||||
// Ensure that recursive locking of a mutex by the same OS thread (which may
|
||||
// refer to numerous goroutines) blocks.
|
||||
ret_code = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_NORMAL);
|
||||
if (ret_code != 0) {
|
||||
*error_code = -1 * ret_code;
|
||||
goto CLEANUP_FREEATTR;
|
||||
}
|
||||
|
||||
// Set mutexes to pshared - multiprocess-safe
|
||||
ret_code = pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED);
|
||||
if (ret_code != 0) {
|
||||
|
@ -356,10 +356,20 @@ func generateResourceFile(res *spec.LinuxResources) (string, []string, error) {
|
||||
// If all is set, send to all PIDs in the container.
|
||||
// All is only supported if the container created cgroups.
|
||||
func (r *ConmonOCIRuntime) KillContainer(ctr *Container, signal uint, all bool) error {
|
||||
if _, err := r.killContainer(ctr, signal, all, false); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// If captureStderr is requested, OCI runtime STDERR will be captured as a
|
||||
// *bytes.buffer and returned; otherwise, it is set to os.Stderr.
|
||||
func (r *ConmonOCIRuntime) killContainer(ctr *Container, signal uint, all, captureStderr bool) (*bytes.Buffer, error) {
|
||||
logrus.Debugf("Sending signal %d to container %s", signal, ctr.ID())
|
||||
runtimeDir, err := util.GetRuntimeDir()
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
env := []string{fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir)}
|
||||
var args []string
|
||||
@ -369,19 +379,27 @@ func (r *ConmonOCIRuntime) KillContainer(ctr *Container, signal uint, all bool)
|
||||
} else {
|
||||
args = append(args, "kill", ctr.ID(), fmt.Sprintf("%d", signal))
|
||||
}
|
||||
if err := utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, env, r.path, args...); err != nil {
|
||||
var (
|
||||
stderr io.Writer = os.Stderr
|
||||
stderrBuffer *bytes.Buffer
|
||||
)
|
||||
if captureStderr {
|
||||
stderrBuffer = new(bytes.Buffer)
|
||||
stderr = stderrBuffer
|
||||
}
|
||||
if err := utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, stderr, env, r.path, args...); err != nil {
|
||||
// Update container state - there's a chance we failed because
|
||||
// the container exited in the meantime.
|
||||
if err2 := r.UpdateContainerStatus(ctr); err2 != nil {
|
||||
logrus.Infof("Error updating status for container %s: %v", ctr.ID(), err2)
|
||||
}
|
||||
if ctr.ensureState(define.ContainerStateStopped, define.ContainerStateExited) {
|
||||
return fmt.Errorf("%w: %s", define.ErrCtrStateInvalid, ctr.state.State)
|
||||
return stderrBuffer, fmt.Errorf("%w: %s", define.ErrCtrStateInvalid, ctr.state.State)
|
||||
}
|
||||
return fmt.Errorf("sending signal to container %s: %w", ctr.ID(), err)
|
||||
return stderrBuffer, fmt.Errorf("sending signal to container %s: %w", ctr.ID(), err)
|
||||
}
|
||||
|
||||
return nil
|
||||
return stderrBuffer, nil
|
||||
}
|
||||
|
||||
// StopContainer stops a container, first using its given stop signal (or
|
||||
@ -400,23 +418,65 @@ func (r *ConmonOCIRuntime) StopContainer(ctr *Container, timeout uint, all bool)
|
||||
return nil
|
||||
}
|
||||
|
||||
if timeout > 0 {
|
||||
stopSignal := ctr.config.StopSignal
|
||||
if stopSignal == 0 {
|
||||
stopSignal = uint(syscall.SIGTERM)
|
||||
killCtr := func(signal uint) (bool, error) {
|
||||
stderr, err := r.killContainer(ctr, signal, all, true)
|
||||
|
||||
// Before handling error from KillContainer, convert STDERR to a []string
|
||||
// (one string per line of output) and print it, ignoring known OCI runtime
|
||||
// errors that we don't care about
|
||||
stderrLines := strings.Split(stderr.String(), "\n")
|
||||
for _, line := range stderrLines {
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
if strings.Contains(line, "container not running") || strings.Contains(line, "open pidfd: No such process") {
|
||||
logrus.Debugf("Failure to kill container (already stopped?): logged %s", line)
|
||||
continue
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "%s\n", line)
|
||||
}
|
||||
if err := r.KillContainer(ctr, stopSignal, all); err != nil {
|
||||
|
||||
if err != nil {
|
||||
// There's an inherent race with the cleanup process (see
|
||||
// #16142, #17142). If the container has already been marked as
|
||||
// stopped or exited by the cleanup process, we can return
|
||||
// immediately.
|
||||
if errors.Is(err, define.ErrCtrStateInvalid) && ctr.ensureState(define.ContainerStateStopped, define.ContainerStateExited) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// If the PID is 0, then the container is already stopped.
|
||||
if ctr.state.PID == 0 {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Is the container gone?
|
||||
// If so, it probably died between the first check and
|
||||
// our sending the signal
|
||||
// The container is stopped, so exit cleanly
|
||||
err := unix.Kill(ctr.state.PID, 0)
|
||||
if err == unix.ESRCH {
|
||||
return nil
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, err
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if timeout > 0 {
|
||||
stopSignal := ctr.config.StopSignal
|
||||
if stopSignal == 0 {
|
||||
stopSignal = uint(syscall.SIGTERM)
|
||||
}
|
||||
|
||||
stopped, err := killCtr(stopSignal)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if stopped {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := waitContainerStop(ctr, time.Duration(timeout)*time.Second); err != nil {
|
||||
logrus.Debugf("Timed out stopping container %s with %s, resorting to SIGKILL: %v", ctr.ID(), unix.SignalName(syscall.Signal(stopSignal)), err)
|
||||
@ -427,27 +487,13 @@ func (r *ConmonOCIRuntime) StopContainer(ctr *Container, timeout uint, all bool)
|
||||
}
|
||||
}
|
||||
|
||||
// If the timeout was set to 0 or if stopping the container with the
|
||||
// specified signal did not work, use the big hammer with SIGKILL.
|
||||
if err := r.KillContainer(ctr, uint(unix.SIGKILL), all); err != nil {
|
||||
// There's an inherent race with the cleanup process (see
|
||||
// #16142, #17142). If the container has already been marked as
|
||||
// stopped or exited by the cleanup process, we can return
|
||||
// immediately.
|
||||
if errors.Is(err, define.ErrCtrStateInvalid) && ctr.ensureState(define.ContainerStateStopped, define.ContainerStateExited) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If the PID is 0, then the container is already stopped.
|
||||
if ctr.state.PID == 0 {
|
||||
return nil
|
||||
}
|
||||
// Again, check if the container is gone. If it is, exit cleanly.
|
||||
if aliveErr := unix.Kill(ctr.state.PID, 0); errors.Is(aliveErr, unix.ESRCH) {
|
||||
return nil
|
||||
}
|
||||
stopped, err := killCtr(uint(unix.SIGKILL))
|
||||
if err != nil {
|
||||
return fmt.Errorf("sending SIGKILL to container %s: %w", ctr.ID(), err)
|
||||
}
|
||||
if stopped {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Give runtime a few seconds to make it happen
|
||||
if err := waitContainerStop(ctr, killContainerTimeout); err != nil {
|
||||
|
@ -40,7 +40,12 @@ func (p *Pod) startInitContainers(ctx context.Context) error {
|
||||
icLock := initCon.lock
|
||||
icLock.Lock()
|
||||
var time *uint
|
||||
if err := p.runtime.removeContainer(ctx, initCon, false, false, true, false, time); err != nil {
|
||||
opts := ctrRmOpts{
|
||||
RemovePod: true,
|
||||
Timeout: time,
|
||||
}
|
||||
|
||||
if _, _, err := p.runtime.removeContainer(ctx, initCon, opts); err != nil {
|
||||
icLock.Unlock()
|
||||
return fmt.Errorf("failed to remove once init container %s: %w", initCon.ID(), err)
|
||||
}
|
||||
|
@ -97,10 +97,15 @@ func (r *Runtime) reset(ctx context.Context) error {
|
||||
return err
|
||||
}
|
||||
for _, p := range pods {
|
||||
if err := r.RemovePod(ctx, p, true, true, timeout); err != nil {
|
||||
if ctrs, err := r.RemovePod(ctx, p, true, true, timeout); err != nil {
|
||||
if errors.Is(err, define.ErrNoSuchPod) {
|
||||
continue
|
||||
}
|
||||
for ctr, err := range ctrs {
|
||||
if err != nil {
|
||||
logrus.Errorf("Error removing pod %s container %s: %v", p.ID(), ctr, err)
|
||||
}
|
||||
}
|
||||
logrus.Errorf("Removing Pod %s: %v", p.ID(), err)
|
||||
}
|
||||
}
|
||||
|
@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
@ -600,14 +601,66 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
|
||||
return ctr, nil
|
||||
}
|
||||
|
||||
// RemoveContainer removes the given container
|
||||
// If force is specified, the container will be stopped first
|
||||
// If removeVolume is specified, named volumes used by the container will
|
||||
// be removed also if and only if the container is the sole user
|
||||
// Otherwise, RemoveContainer will return an error if the container is running
|
||||
// RemoveContainer removes the given container. If force is true, the container
|
||||
// will be stopped first (otherwise, an error will be returned if the container
|
||||
// is running). If removeVolume is specified, anonymous named volumes used by the
|
||||
// container will be removed also (iff the container is the sole user of the
|
||||
// volumes). Timeout sets the stop timeout for the container if it is running.
|
||||
func (r *Runtime) RemoveContainer(ctx context.Context, c *Container, force bool, removeVolume bool, timeout *uint) error {
|
||||
// NOTE: container will be locked down the road.
|
||||
return r.removeContainer(ctx, c, force, removeVolume, false, false, timeout)
|
||||
opts := ctrRmOpts{
|
||||
Force: force,
|
||||
RemoveVolume: removeVolume,
|
||||
Timeout: timeout,
|
||||
}
|
||||
|
||||
// NOTE: container will be locked down the road. There is no unlocked
|
||||
// version of removeContainer.
|
||||
_, _, err := r.removeContainer(ctx, c, opts)
|
||||
return err
|
||||
}
|
||||
|
||||
// RemoveContainerAndDependencies removes the given container and all its
|
||||
// dependencies. This may include pods (if the container or any of its
|
||||
// dependencies is an infra or service container, the associated pod(s) will also
|
||||
// be removed). Otherwise, it functions identically to RemoveContainer.
|
||||
// Returns two arrays: containers removed, and pods removed. These arrays are
|
||||
// always returned, even if error is set, and indicate any containers that were
|
||||
// successfully removed prior to the error.
|
||||
func (r *Runtime) RemoveContainerAndDependencies(ctx context.Context, c *Container, force bool, removeVolume bool, timeout *uint) (map[string]error, map[string]error, error) {
|
||||
opts := ctrRmOpts{
|
||||
Force: force,
|
||||
RemoveVolume: removeVolume,
|
||||
RemoveDeps: true,
|
||||
Timeout: timeout,
|
||||
}
|
||||
|
||||
// NOTE: container will be locked down the road. There is no unlocked
|
||||
// version of removeContainer.
|
||||
return r.removeContainer(ctx, c, opts)
|
||||
}
|
||||
|
||||
// Options for removeContainer
|
||||
type ctrRmOpts struct {
|
||||
// Whether to stop running container(s)
|
||||
Force bool
|
||||
// Whether to remove anonymous volumes used by removing the container
|
||||
RemoveVolume bool
|
||||
// Only set by `removePod` as `removeContainer` is being called as part
|
||||
// of removing a whole pod.
|
||||
RemovePod bool
|
||||
// Whether to ignore dependencies of the container when removing
|
||||
// (This is *DANGEROUS* and should not be used outside of non-graph
|
||||
// traversal pod removal code).
|
||||
IgnoreDeps bool
|
||||
// Remove all the dependencies associated with the container. Can cause
|
||||
// multiple containers, and possibly one or more pods, to be removed.
|
||||
RemoveDeps bool
|
||||
// Do not lock the pod that the container is part of (used only by
|
||||
// recursive calls of removeContainer, used when removing dependencies)
|
||||
NoLockPod bool
|
||||
// Timeout to use when stopping the container. Only used if `Force` is
|
||||
// true.
|
||||
Timeout *uint
|
||||
}
|
||||
|
||||
// Internal function to remove a container.
|
||||
@ -617,15 +670,30 @@ func (r *Runtime) RemoveContainer(ctx context.Context, c *Container, force bool,
|
||||
// remove will handle that).
|
||||
// ignoreDeps is *DANGEROUS* and should not be used outside of a very specific
|
||||
// context (alternate pod removal code, where graph traversal is not possible).
|
||||
func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, removeVolume, removePod, ignoreDeps bool, timeout *uint) error {
|
||||
// removeDeps instructs Podman to remove dependency containers (and possible
|
||||
// a dependency pod if an infra container is involved). removeDeps conflicts
|
||||
// with removePod - pods have their own dependency management.
|
||||
// noLockPod is used for recursive removeContainer calls when the pod is already
|
||||
// locked.
|
||||
// TODO: At this point we should just start accepting an options struct
|
||||
func (r *Runtime) removeContainer(ctx context.Context, c *Container, opts ctrRmOpts) (removedCtrs map[string]error, removedPods map[string]error, retErr error) {
|
||||
removedCtrs = make(map[string]error)
|
||||
removedPods = make(map[string]error)
|
||||
|
||||
if !c.valid {
|
||||
if ok, _ := r.state.HasContainer(c.ID()); !ok {
|
||||
// Container probably already removed
|
||||
// Or was never in the runtime to begin with
|
||||
return nil
|
||||
removedCtrs[c.ID()] = nil
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if opts.RemovePod && opts.RemoveDeps {
|
||||
retErr = fmt.Errorf("cannot remove dependencies while also removing a pod: %w", define.ErrInvalidArg)
|
||||
return
|
||||
}
|
||||
|
||||
// We need to refresh container config from the DB, to ensure that any
|
||||
// changes (e.g. a rename) are picked up before we start removing.
|
||||
// Since HasContainer above succeeded, we can safely assume the
|
||||
@ -634,7 +702,8 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
|
||||
// exist once we're done.
|
||||
newConf, err := r.state.GetContainerConfig(c.ID())
|
||||
if err != nil {
|
||||
return fmt.Errorf("retrieving container %s configuration from DB to remove: %w", c.ID(), err)
|
||||
retErr = fmt.Errorf("retrieving container %s configuration from DB to remove: %w", c.ID(), err)
|
||||
return
|
||||
}
|
||||
c.config = newConf
|
||||
|
||||
@ -649,106 +718,224 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
|
||||
if c.config.Pod != "" {
|
||||
pod, err = r.state.Pod(c.config.Pod)
|
||||
if err != nil {
|
||||
return fmt.Errorf("container %s is in pod %s, but pod cannot be retrieved: %w", c.ID(), pod.ID(), err)
|
||||
// There's a potential race here where the pod we are in
|
||||
// was already removed.
|
||||
// If so, this container is also removed, as pods take
|
||||
// all their containers with them.
|
||||
// So if it's already gone, check if we are too.
|
||||
if errors.Is(err, define.ErrNoSuchPod) {
|
||||
// We could check the DB to see if we still
|
||||
// exist, but that would be a serious violation
|
||||
// of DB integrity.
|
||||
// Mark this container as removed so there's no
|
||||
// confusion, though.
|
||||
removedCtrs[c.ID()] = nil
|
||||
return
|
||||
}
|
||||
|
||||
retErr = err
|
||||
return
|
||||
}
|
||||
|
||||
if !removePod {
|
||||
if !opts.RemovePod {
|
||||
// Lock the pod while we're removing container
|
||||
if pod.config.LockID == c.config.LockID {
|
||||
return fmt.Errorf("container %s and pod %s share lock ID %d: %w", c.ID(), pod.ID(), c.config.LockID, define.ErrWillDeadlock)
|
||||
retErr = fmt.Errorf("container %s and pod %s share lock ID %d: %w", c.ID(), pod.ID(), c.config.LockID, define.ErrWillDeadlock)
|
||||
return
|
||||
}
|
||||
if !opts.NoLockPod {
|
||||
pod.lock.Lock()
|
||||
defer pod.lock.Unlock()
|
||||
}
|
||||
pod.lock.Lock()
|
||||
defer pod.lock.Unlock()
|
||||
if err := pod.updatePod(); err != nil {
|
||||
return err
|
||||
// As above, there's a chance the pod was
|
||||
// already removed.
|
||||
if errors.Is(err, define.ErrNoSuchPod) {
|
||||
removedCtrs[c.ID()] = nil
|
||||
return
|
||||
}
|
||||
|
||||
retErr = err
|
||||
return
|
||||
}
|
||||
|
||||
infraID := pod.state.InfraContainerID
|
||||
if c.ID() == infraID {
|
||||
return fmt.Errorf("container %s is the infra container of pod %s and cannot be removed without removing the pod", c.ID(), pod.ID())
|
||||
if c.ID() == infraID && !opts.RemoveDeps {
|
||||
retErr = fmt.Errorf("container %s is the infra container of pod %s and cannot be removed without removing the pod", c.ID(), pod.ID())
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// For pod removal, the container is already locked by the caller
|
||||
if !removePod {
|
||||
locked := false
|
||||
if !opts.RemovePod {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
defer func() {
|
||||
if locked {
|
||||
c.lock.Unlock()
|
||||
}
|
||||
}()
|
||||
locked = true
|
||||
}
|
||||
|
||||
if !r.valid {
|
||||
return define.ErrRuntimeStopped
|
||||
retErr = define.ErrRuntimeStopped
|
||||
return
|
||||
}
|
||||
|
||||
// Update the container to get current state
|
||||
if err := c.syncContainer(); err != nil {
|
||||
return err
|
||||
retErr = err
|
||||
return
|
||||
}
|
||||
|
||||
serviceForPod := false
|
||||
if c.IsService() {
|
||||
for _, id := range c.state.Service.Pods {
|
||||
if _, err := c.runtime.LookupPod(id); err != nil {
|
||||
depPod, err := c.runtime.LookupPod(id)
|
||||
if err != nil {
|
||||
if errors.Is(err, define.ErrNoSuchPod) {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
retErr = err
|
||||
return
|
||||
}
|
||||
return fmt.Errorf("container %s is the service container of pod(s) %s and cannot be removed without removing the pod(s)", c.ID(), strings.Join(c.state.Service.Pods, ","))
|
||||
if !opts.RemoveDeps {
|
||||
retErr = fmt.Errorf("container %s is the service container of pod(s) %s and cannot be removed without removing the pod(s)", c.ID(), strings.Join(c.state.Service.Pods, ","))
|
||||
return
|
||||
}
|
||||
// If we are the service container for the pod we are a
|
||||
// member of: we need to remove that pod last, since
|
||||
// this container is part of it.
|
||||
if pod != nil && pod.ID() == depPod.ID() {
|
||||
serviceForPod = true
|
||||
continue
|
||||
}
|
||||
logrus.Infof("Removing pod %s as container %s is its service container", depPod.ID(), c.ID())
|
||||
podRemovedCtrs, err := r.RemovePod(ctx, depPod, true, opts.Force, opts.Timeout)
|
||||
for ctr, err := range podRemovedCtrs {
|
||||
removedCtrs[ctr] = err
|
||||
}
|
||||
if err != nil && !errors.Is(err, define.ErrNoSuchPod) && !errors.Is(err, define.ErrPodRemoved) {
|
||||
removedPods[depPod.ID()] = err
|
||||
retErr = fmt.Errorf("error removing container %s dependency pods: %w", c.ID(), err)
|
||||
return
|
||||
}
|
||||
removedPods[depPod.ID()] = nil
|
||||
}
|
||||
}
|
||||
if (serviceForPod || c.config.IsInfra) && !opts.RemovePod {
|
||||
// We're going to remove the pod we are a part of.
|
||||
// This will get rid of us as well, so we can just return
|
||||
// immediately after.
|
||||
if locked {
|
||||
locked = false
|
||||
c.lock.Unlock()
|
||||
}
|
||||
|
||||
logrus.Infof("Removing pod %s (dependency of container %s)", pod.ID(), c.ID())
|
||||
podRemovedCtrs, err := r.removePod(ctx, pod, true, opts.Force, opts.Timeout)
|
||||
for ctr, err := range podRemovedCtrs {
|
||||
removedCtrs[ctr] = err
|
||||
}
|
||||
if err != nil && !errors.Is(err, define.ErrNoSuchPod) && !errors.Is(err, define.ErrPodRemoved) {
|
||||
removedPods[pod.ID()] = err
|
||||
retErr = fmt.Errorf("error removing container %s pod: %w", c.ID(), err)
|
||||
return
|
||||
}
|
||||
removedPods[pod.ID()] = nil
|
||||
return
|
||||
}
|
||||
|
||||
// If we're not force-removing, we need to check if we're in a good
|
||||
// state to remove.
|
||||
if !force {
|
||||
if !opts.Force {
|
||||
if err := c.checkReadyForRemoval(); err != nil {
|
||||
return err
|
||||
retErr = err
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if c.state.State == define.ContainerStatePaused {
|
||||
isV2, err := cgroups.IsCgroup2UnifiedMode()
|
||||
if err != nil {
|
||||
return err
|
||||
retErr = err
|
||||
return
|
||||
}
|
||||
// cgroups v1 and v2 handle signals on paused processes differently
|
||||
if !isV2 {
|
||||
if err := c.unpause(); err != nil {
|
||||
return err
|
||||
retErr = err
|
||||
return
|
||||
}
|
||||
}
|
||||
if err := c.ociRuntime.KillContainer(c, 9, false); err != nil {
|
||||
return err
|
||||
retErr = err
|
||||
return
|
||||
}
|
||||
// Need to update container state to make sure we know it's stopped
|
||||
if err := c.waitForExitFileAndSync(); err != nil {
|
||||
return err
|
||||
retErr = err
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Check that no other containers depend on the container.
|
||||
// Only used if not removing a pod - pods guarantee that all
|
||||
// deps will be evicted at the same time.
|
||||
if !ignoreDeps {
|
||||
if !opts.IgnoreDeps {
|
||||
deps, err := r.state.ContainerInUse(c)
|
||||
if err != nil {
|
||||
return err
|
||||
retErr = err
|
||||
return
|
||||
}
|
||||
if len(deps) != 0 {
|
||||
depsStr := strings.Join(deps, ", ")
|
||||
return fmt.Errorf("container %s has dependent containers which must be removed before it: %s: %w", c.ID(), depsStr, define.ErrCtrExists)
|
||||
if !opts.RemoveDeps {
|
||||
if len(deps) != 0 {
|
||||
depsStr := strings.Join(deps, ", ")
|
||||
retErr = fmt.Errorf("container %s has dependent containers which must be removed before it: %s: %w", c.ID(), depsStr, define.ErrCtrExists)
|
||||
return
|
||||
}
|
||||
}
|
||||
for _, depCtr := range deps {
|
||||
dep, err := r.GetContainer(depCtr)
|
||||
if err != nil {
|
||||
retErr = err
|
||||
return
|
||||
}
|
||||
logrus.Infof("Removing container %s (dependency of container %s)", dep.ID(), c.ID())
|
||||
recursiveOpts := ctrRmOpts{
|
||||
Force: opts.Force,
|
||||
RemoveVolume: opts.RemoveVolume,
|
||||
RemoveDeps: true,
|
||||
NoLockPod: true,
|
||||
Timeout: opts.Timeout,
|
||||
}
|
||||
ctrs, pods, err := r.removeContainer(ctx, dep, recursiveOpts)
|
||||
for rmCtr, err := range ctrs {
|
||||
removedCtrs[rmCtr] = err
|
||||
}
|
||||
for rmPod, err := range pods {
|
||||
removedPods[rmPod] = err
|
||||
}
|
||||
if err != nil {
|
||||
retErr = err
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check that the container's in a good state to be removed.
|
||||
if c.ensureState(define.ContainerStateRunning, define.ContainerStateStopping) {
|
||||
time := c.StopTimeout()
|
||||
if timeout != nil {
|
||||
time = *timeout
|
||||
if opts.Timeout != nil {
|
||||
time = *opts.Timeout
|
||||
}
|
||||
// Ignore ErrConmonDead - we couldn't retrieve the container's
|
||||
// exit code properly, but it's still stopped.
|
||||
if err := c.stop(time); err != nil && !errors.Is(err, define.ErrConmonDead) {
|
||||
return fmt.Errorf("cannot remove container %s as it could not be stopped: %w", c.ID(), err)
|
||||
retErr = fmt.Errorf("cannot remove container %s as it could not be stopped: %w", c.ID(), err)
|
||||
return
|
||||
}
|
||||
|
||||
// We unlocked as part of stop() above - there's a chance someone
|
||||
@ -759,17 +946,19 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
|
||||
if ok, _ := r.state.HasContainer(c.ID()); !ok {
|
||||
// When the container has already been removed, the OCI runtime directory remain.
|
||||
if err := c.cleanupRuntime(ctx); err != nil {
|
||||
return fmt.Errorf("cleaning up container %s from OCI runtime: %w", c.ID(), err)
|
||||
retErr = fmt.Errorf("cleaning up container %s from OCI runtime: %w", c.ID(), err)
|
||||
return
|
||||
}
|
||||
return nil
|
||||
// Do not add to removed containers, someone else
|
||||
// removed it.
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
var cleanupErr error
|
||||
reportErrorf := func(msg string, args ...any) {
|
||||
err := fmt.Errorf(msg, args...) // Always use fmt.Errorf instead of just logrus.Errorf(…) because the format string probably contains %w
|
||||
if cleanupErr == nil {
|
||||
cleanupErr = err
|
||||
if retErr == nil {
|
||||
retErr = err
|
||||
} else {
|
||||
logrus.Errorf("%s", err.Error())
|
||||
}
|
||||
@ -823,9 +1012,10 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
|
||||
reportErrorf("removing container %s from database: %w", c.ID(), err)
|
||||
}
|
||||
}
|
||||
removedCtrs[c.ID()] = nil
|
||||
|
||||
// Deallocate the container's lock
|
||||
if err := c.lock.Free(); err != nil {
|
||||
if err := c.lock.Free(); err != nil && !errors.Is(err, fs.ErrNotExist) {
|
||||
reportErrorf("freeing lock for container %s: %w", c.ID(), err)
|
||||
}
|
||||
|
||||
@ -834,8 +1024,8 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
|
||||
|
||||
c.newContainerEvent(events.Remove)
|
||||
|
||||
if !removeVolume {
|
||||
return cleanupErr
|
||||
if !opts.RemoveVolume {
|
||||
return
|
||||
}
|
||||
|
||||
for _, v := range c.config.NamedVolumes {
|
||||
@ -843,7 +1033,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
|
||||
if !volume.Anonymous() {
|
||||
continue
|
||||
}
|
||||
if err := runtime.removeVolume(ctx, volume, false, timeout, false); err != nil && !errors.Is(err, define.ErrNoSuchVolume) {
|
||||
if err := runtime.removeVolume(ctx, volume, false, opts.Timeout, false); err != nil && !errors.Is(err, define.ErrNoSuchVolume) {
|
||||
if errors.Is(err, define.ErrVolumeBeingUsed) {
|
||||
// Ignore error, since podman will report original error
|
||||
volumesFrom, _ := c.volumesFrom()
|
||||
@ -857,7 +1047,8 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
|
||||
}
|
||||
}
|
||||
|
||||
return cleanupErr
|
||||
//nolint:nakedret
|
||||
return
|
||||
}
|
||||
|
||||
// EvictContainer removes the given container partial or full ID or name, and
|
||||
@ -896,7 +1087,12 @@ func (r *Runtime) evictContainer(ctx context.Context, idOrName string, removeVol
|
||||
if err == nil {
|
||||
logrus.Infof("Container %s successfully retrieved from state, attempting normal removal", id)
|
||||
// Assume force = true for the evict case
|
||||
err = r.removeContainer(ctx, tmpCtr, true, removeVolume, false, false, timeout)
|
||||
opts := ctrRmOpts{
|
||||
Force: true,
|
||||
RemoveVolume: removeVolume,
|
||||
Timeout: timeout,
|
||||
}
|
||||
_, _, err = r.removeContainer(ctx, tmpCtr, opts)
|
||||
if !tmpCtr.valid {
|
||||
// If the container is marked invalid, remove succeeded
|
||||
// in kicking it out of the state - no need to continue.
|
||||
@ -1010,58 +1206,6 @@ func (r *Runtime) evictContainer(ctx context.Context, idOrName string, removeVol
|
||||
return id, cleanupErr
|
||||
}
|
||||
|
||||
// RemoveDepend removes all dependencies for a container.
|
||||
// If the container is an infra container, the entire pod gets removed.
|
||||
func (r *Runtime) RemoveDepend(ctx context.Context, rmCtr *Container, force bool, removeVolume bool, timeout *uint) ([]*reports.RmReport, error) {
|
||||
logrus.Debugf("Removing container %s and all dependent containers", rmCtr.ID())
|
||||
rmReports := make([]*reports.RmReport, 0)
|
||||
if rmCtr.IsInfra() {
|
||||
pod, err := r.GetPod(rmCtr.PodID())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
logrus.Debugf("Removing pod %s: depends on infra container %s", pod.ID(), rmCtr.ID())
|
||||
podContainerIDS, err := pod.AllContainersByID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := r.RemovePod(ctx, pod, true, force, timeout); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, cID := range podContainerIDS {
|
||||
rmReports = append(rmReports, &reports.RmReport{Id: cID})
|
||||
}
|
||||
return rmReports, nil
|
||||
}
|
||||
|
||||
deps, err := r.state.ContainerInUse(rmCtr)
|
||||
if err != nil {
|
||||
if err == define.ErrCtrRemoved {
|
||||
return rmReports, nil
|
||||
}
|
||||
return rmReports, err
|
||||
}
|
||||
for _, cid := range deps {
|
||||
ctr, err := r.state.Container(cid)
|
||||
if err != nil {
|
||||
if err == define.ErrNoSuchCtr {
|
||||
continue
|
||||
}
|
||||
return rmReports, err
|
||||
}
|
||||
|
||||
reports, err := r.RemoveDepend(ctx, ctr, force, removeVolume, timeout)
|
||||
if err != nil {
|
||||
return rmReports, err
|
||||
}
|
||||
rmReports = append(rmReports, reports...)
|
||||
}
|
||||
|
||||
report := reports.RmReport{Id: rmCtr.ID()}
|
||||
report.Err = r.removeContainer(ctx, rmCtr, force, removeVolume, false, false, timeout)
|
||||
return append(rmReports, &report), nil
|
||||
}
|
||||
|
||||
// GetContainer retrieves a container by its ID
|
||||
func (r *Runtime) GetContainer(id string) (*Container, error) {
|
||||
if !r.valid {
|
||||
|
@ -42,11 +42,15 @@ func (r *Runtime) RemoveContainersForImageCallback(ctx context.Context) libimage
|
||||
if err != nil {
|
||||
return fmt.Errorf("container %s is in pod %s, but pod cannot be retrieved: %w", ctr.ID(), ctr.config.Pod, err)
|
||||
}
|
||||
if err := r.removePod(ctx, pod, true, true, timeout); err != nil {
|
||||
if _, err := r.removePod(ctx, pod, true, true, timeout); err != nil {
|
||||
return fmt.Errorf("removing image %s: container %s using image could not be removed: %w", imageID, ctr.ID(), err)
|
||||
}
|
||||
} else {
|
||||
if err := r.removeContainer(ctx, ctr, true, false, false, false, timeout); err != nil {
|
||||
opts := ctrRmOpts{
|
||||
Force: true,
|
||||
Timeout: timeout,
|
||||
}
|
||||
if _, _, err := r.removeContainer(ctx, ctr, opts); err != nil {
|
||||
return fmt.Errorf("removing image %s: container %s using image could not be removed: %w", imageID, ctr.ID(), err)
|
||||
}
|
||||
}
|
||||
|
@ -27,16 +27,16 @@ type PodFilter func(*Pod) bool
|
||||
// If force is specified with removeCtrs, all containers will be stopped before
|
||||
// being removed
|
||||
// Otherwise, the pod will not be removed if any containers are running
|
||||
func (r *Runtime) RemovePod(ctx context.Context, p *Pod, removeCtrs, force bool, timeout *uint) error {
|
||||
func (r *Runtime) RemovePod(ctx context.Context, p *Pod, removeCtrs, force bool, timeout *uint) (map[string]error, error) {
|
||||
if !r.valid {
|
||||
return define.ErrRuntimeStopped
|
||||
return nil, define.ErrRuntimeStopped
|
||||
}
|
||||
|
||||
if !p.valid {
|
||||
if ok, _ := r.state.HasPod(p.ID()); !ok {
|
||||
// Pod probably already removed
|
||||
// Or was never in the runtime to begin with
|
||||
return nil
|
||||
return make(map[string]error), nil
|
||||
}
|
||||
}
|
||||
|
||||
@ -180,7 +180,7 @@ func (r *Runtime) PrunePods(ctx context.Context) (map[string]error, error) {
|
||||
}
|
||||
for _, pod := range pods {
|
||||
var timeout *uint
|
||||
err := r.removePod(context.TODO(), pod, true, false, timeout)
|
||||
_, err := r.removePod(context.TODO(), pod, true, false, timeout)
|
||||
response[pod.ID()] = err
|
||||
}
|
||||
return response, nil
|
||||
|
@ -124,8 +124,9 @@ func (r *Runtime) SavePod(pod *Pod) error {
|
||||
|
||||
// DO NOT USE THIS FUNCTION DIRECTLY. Use removePod(), below. It will call
|
||||
// removeMalformedPod() if necessary.
|
||||
func (r *Runtime) removeMalformedPod(ctx context.Context, p *Pod, ctrs []*Container, force bool, timeout *uint, ctrNamedVolumes map[string]*ContainerNamedVolume) error {
|
||||
var removalErr error
|
||||
func (r *Runtime) removeMalformedPod(ctx context.Context, p *Pod, ctrs []*Container, force bool, timeout *uint, ctrNamedVolumes map[string]*ContainerNamedVolume) (map[string]error, error) {
|
||||
removedCtrs := make(map[string]error)
|
||||
errored := false
|
||||
for _, ctr := range ctrs {
|
||||
err := func() error {
|
||||
ctrLock := ctr.lock
|
||||
@ -142,17 +143,33 @@ func (r *Runtime) removeMalformedPod(ctx context.Context, p *Pod, ctrs []*Contai
|
||||
ctrNamedVolumes[vol.Name] = vol
|
||||
}
|
||||
|
||||
return r.removeContainer(ctx, ctr, force, false, true, true, timeout)
|
||||
opts := ctrRmOpts{
|
||||
Force: force,
|
||||
RemovePod: true,
|
||||
IgnoreDeps: true,
|
||||
Timeout: timeout,
|
||||
}
|
||||
_, _, err := r.removeContainer(ctx, ctr, opts)
|
||||
return err
|
||||
}()
|
||||
|
||||
if removalErr == nil {
|
||||
removalErr = err
|
||||
} else {
|
||||
logrus.Errorf("Removing container %s from pod %s: %v", ctr.ID(), p.ID(), err)
|
||||
removedCtrs[ctr.ID()] = err
|
||||
if err != nil {
|
||||
errored = true
|
||||
}
|
||||
}
|
||||
if removalErr != nil {
|
||||
return removalErr
|
||||
|
||||
// So, technically, no containers have been *removed*.
|
||||
// They're still in the DB.
|
||||
// So just return nil for removed containers. Squash all the errors into
|
||||
// a multierror so we don't lose them.
|
||||
if errored {
|
||||
var allErrors error
|
||||
for ctr, err := range removedCtrs {
|
||||
if err != nil {
|
||||
allErrors = multierror.Append(allErrors, fmt.Errorf("removing container %s: %w", ctr, err))
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("no containers were removed due to the following errors: %w", allErrors)
|
||||
}
|
||||
|
||||
// Clear infra container ID before we remove the infra container.
|
||||
@ -161,7 +178,7 @@ func (r *Runtime) removeMalformedPod(ctx context.Context, p *Pod, ctrs []*Contai
|
||||
// later - we end up with a reference to a nonexistent infra container.
|
||||
p.state.InfraContainerID = ""
|
||||
if err := p.save(); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Remove all containers in the pod from the state.
|
||||
@ -169,20 +186,22 @@ func (r *Runtime) removeMalformedPod(ctx context.Context, p *Pod, ctrs []*Contai
|
||||
// If this fails, there isn't much more we can do.
|
||||
// The containers in the pod are unusable, but they still exist,
|
||||
// so pod removal will fail.
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil
|
||||
return removedCtrs, nil
|
||||
}
|
||||
|
||||
func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool, timeout *uint) error {
|
||||
func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool, timeout *uint) (map[string]error, error) {
|
||||
removedCtrs := make(map[string]error)
|
||||
|
||||
if err := p.updatePod(); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ctrs, err := r.state.PodContainers(p)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
numCtrs := len(ctrs)
|
||||
|
||||
@ -193,7 +212,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
|
||||
force = true
|
||||
}
|
||||
if !removeCtrs && numCtrs > 0 {
|
||||
return fmt.Errorf("pod %s contains containers and cannot be removed: %w", p.ID(), define.ErrCtrExists)
|
||||
return nil, fmt.Errorf("pod %s contains containers and cannot be removed: %w", p.ID(), define.ErrCtrExists)
|
||||
}
|
||||
|
||||
var removalErr error
|
||||
@ -205,14 +224,15 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
|
||||
// We have to allow the pod to be removed.
|
||||
// But let's only do it if force is set.
|
||||
if !force {
|
||||
return fmt.Errorf("cannot create container graph for pod %s: %w", p.ID(), err)
|
||||
return nil, fmt.Errorf("cannot create container graph for pod %s: %w", p.ID(), err)
|
||||
}
|
||||
|
||||
removalErr = fmt.Errorf("creating container graph for pod %s failed, fell back to loop removal: %w", p.ID(), err)
|
||||
|
||||
if err := r.removeMalformedPod(ctx, p, ctrs, force, timeout, ctrNamedVolumes); err != nil {
|
||||
removedCtrs, err = r.removeMalformedPod(ctx, p, ctrs, force, timeout, ctrNamedVolumes)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error creating container graph for pod %s: %v. Falling back to loop removal.", p.ID(), err)
|
||||
return err
|
||||
return removedCtrs, err
|
||||
}
|
||||
} else {
|
||||
ctrErrors := make(map[string]error)
|
||||
@ -222,16 +242,13 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
|
||||
removeNode(ctx, node, p, force, timeout, false, ctrErrors, ctrsVisited, ctrNamedVolumes)
|
||||
}
|
||||
|
||||
// This is gross, but I don't want to change the signature on
|
||||
// removePod - especially since any change here eventually has
|
||||
// to map down to one error unless we want to make a breaking
|
||||
// API change.
|
||||
// Finalize the removed containers list
|
||||
for ctr := range ctrsVisited {
|
||||
removedCtrs[ctr] = ctrErrors[ctr]
|
||||
}
|
||||
|
||||
if len(ctrErrors) > 0 {
|
||||
var allErrs error
|
||||
for id, err := range ctrErrors {
|
||||
allErrs = multierror.Append(allErrs, fmt.Errorf("removing container %s from pod %s: %w", id, p.ID(), err))
|
||||
}
|
||||
return allErrs
|
||||
return removedCtrs, fmt.Errorf("not all containers could be removed from pod %s: %w", p.ID(), define.ErrRemovingCtrs)
|
||||
}
|
||||
}
|
||||
|
||||
@ -318,7 +335,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
|
||||
}
|
||||
|
||||
if err := p.maybeRemoveServiceContainer(); err != nil {
|
||||
return err
|
||||
return removedCtrs, err
|
||||
}
|
||||
|
||||
// Remove pod from state
|
||||
@ -326,7 +343,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
|
||||
if removalErr != nil {
|
||||
logrus.Errorf("%v", removalErr)
|
||||
}
|
||||
return err
|
||||
return removedCtrs, err
|
||||
}
|
||||
|
||||
// Mark pod invalid
|
||||
@ -342,5 +359,5 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
|
||||
}
|
||||
}
|
||||
|
||||
return removalErr
|
||||
return removedCtrs, removalErr
|
||||
}
|
||||
|
@ -388,7 +388,11 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool, timeo
|
||||
|
||||
logrus.Debugf("Removing container %s (depends on volume %q)", ctr.ID(), v.Name())
|
||||
|
||||
if err := r.removeContainer(ctx, ctr, force, false, false, false, timeout); err != nil {
|
||||
opts := ctrRmOpts{
|
||||
Force: force,
|
||||
Timeout: timeout,
|
||||
}
|
||||
if _, _, err := r.removeContainer(ctx, ctr, opts); err != nil {
|
||||
return fmt.Errorf("removing container %s that depends on volume %s: %w", ctr.ID(), v.Name(), err)
|
||||
}
|
||||
}
|
||||
|
@ -246,11 +246,12 @@ func PodDelete(w http.ResponseWriter, r *http.Request) {
|
||||
utils.PodNotFound(w, name, err)
|
||||
return
|
||||
}
|
||||
if err := runtime.RemovePod(r.Context(), pod, true, query.Force, query.Timeout); err != nil {
|
||||
ctrs, err := runtime.RemovePod(r.Context(), pod, true, query.Force, query.Timeout)
|
||||
if err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, err)
|
||||
return
|
||||
}
|
||||
report := entities.PodRmReport{Id: pod.ID()}
|
||||
report := entities.PodRmReport{Id: pod.ID(), RemovedCtrs: ctrs}
|
||||
utils.WriteResponse(w, http.StatusOK, report)
|
||||
}
|
||||
|
||||
|
@ -105,8 +105,9 @@ type PodRmOptions struct {
|
||||
}
|
||||
|
||||
type PodRmReport struct {
|
||||
Err error
|
||||
Id string //nolint:revive,stylecheck
|
||||
RemovedCtrs map[string]error
|
||||
Err error
|
||||
Id string //nolint:revive,stylecheck
|
||||
}
|
||||
|
||||
// PddSpec is an abstracted version of PodSpecGen designed to eventually accept options
|
||||
|
@ -376,27 +376,25 @@ func (ic *ContainerEngine) ContainerRestart(ctx context.Context, namesOrIds []st
|
||||
return reports, nil
|
||||
}
|
||||
|
||||
func (ic *ContainerEngine) removeContainer(ctx context.Context, ctr *libpod.Container, options entities.RmOptions) error {
|
||||
err := ic.Libpod.RemoveContainer(ctx, ctr, options.Force, options.Volumes, options.Timeout)
|
||||
//nolint:unparam
|
||||
func (ic *ContainerEngine) removeContainer(ctx context.Context, ctr *libpod.Container, options entities.RmOptions) (map[string]error, map[string]error, error) {
|
||||
var err error
|
||||
ctrs := make(map[string]error)
|
||||
pods := make(map[string]error)
|
||||
if options.All || options.Depend {
|
||||
ctrs, pods, err = ic.Libpod.RemoveContainerAndDependencies(ctx, ctr, options.Force, options.Volumes, options.Timeout)
|
||||
} else {
|
||||
err = ic.Libpod.RemoveContainer(ctx, ctr, options.Force, options.Volumes, options.Timeout)
|
||||
}
|
||||
ctrs[ctr.ID()] = err
|
||||
if err == nil {
|
||||
return nil
|
||||
return ctrs, pods, nil
|
||||
}
|
||||
logrus.Debugf("Failed to remove container %s: %s", ctr.ID(), err.Error())
|
||||
if errors.Is(err, define.ErrNoSuchCtr) {
|
||||
// Ignore if the container does not exist (anymore) when either
|
||||
// it has been requested by the user of if the container is a
|
||||
// service one. Service containers are removed along with its
|
||||
// pods which in turn are removed along with their infra
|
||||
// container. Hence, there is an inherent race when removing
|
||||
// infra containers with service containers in parallel.
|
||||
if options.Ignore || ctr.IsService() {
|
||||
logrus.Debugf("Ignoring error (--allow-missing): %v", err)
|
||||
return nil
|
||||
}
|
||||
} else if errors.Is(err, define.ErrCtrRemoved) {
|
||||
return nil
|
||||
if errors.Is(err, define.ErrNoSuchCtr) || errors.Is(err, define.ErrCtrRemoved) {
|
||||
return ctrs, pods, nil
|
||||
}
|
||||
return err
|
||||
return ctrs, pods, err
|
||||
}
|
||||
|
||||
func (ic *ContainerEngine) ContainerRm(ctx context.Context, namesOrIds []string, options entities.RmOptions) ([]*reports.RmReport, error) {
|
||||
@ -435,50 +433,45 @@ func (ic *ContainerEngine) ContainerRm(ctx context.Context, namesOrIds []string,
|
||||
}
|
||||
}
|
||||
|
||||
alreadyRemoved := make(map[string]bool)
|
||||
addReports := func(newReports []*reports.RmReport) {
|
||||
for i := range newReports {
|
||||
alreadyRemoved[newReports[i].Id] = true
|
||||
rmReports = append(rmReports, newReports[i])
|
||||
}
|
||||
}
|
||||
|
||||
// First, remove dependent containers.
|
||||
if options.All || options.Depend {
|
||||
for _, ctr := range libpodContainers {
|
||||
// When `All` is set, remove the infra containers and
|
||||
// their dependencies first. Otherwise, we'd error out.
|
||||
//
|
||||
// TODO: All this logic should probably live in libpod.
|
||||
if alreadyRemoved[ctr.ID()] || (options.All && !ctr.IsInfra()) {
|
||||
continue
|
||||
}
|
||||
reports, err := ic.Libpod.RemoveDepend(ctx, ctr, options.Force, options.Volumes, options.Timeout)
|
||||
if err != nil {
|
||||
return rmReports, err
|
||||
}
|
||||
addReports(reports)
|
||||
}
|
||||
}
|
||||
ctrsMap := make(map[string]error)
|
||||
mapMutex := sync.Mutex{}
|
||||
|
||||
errMap, err := parallelctr.ContainerOp(ctx, libpodContainers, func(c *libpod.Container) error {
|
||||
if alreadyRemoved[c.ID()] {
|
||||
mapMutex.Lock()
|
||||
if _, ok := ctrsMap[c.ID()]; ok {
|
||||
return nil
|
||||
}
|
||||
return ic.removeContainer(ctx, c, options)
|
||||
mapMutex.Unlock()
|
||||
|
||||
// TODO: We should report removed pods back somehow.
|
||||
ctrs, _, err := ic.removeContainer(ctx, c, options)
|
||||
|
||||
mapMutex.Lock()
|
||||
defer mapMutex.Unlock()
|
||||
for ctr, err := range ctrs {
|
||||
ctrsMap[ctr] = err
|
||||
}
|
||||
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for ctr, err := range errMap {
|
||||
if alreadyRemoved[ctr.ID()] {
|
||||
continue
|
||||
if _, ok := ctrsMap[ctr.ID()]; ok {
|
||||
logrus.Debugf("Multiple results for container %s - attempted multiple removals?", ctr.ID())
|
||||
}
|
||||
ctrsMap[ctr.ID()] = err
|
||||
}
|
||||
|
||||
for ctr, err := range ctrsMap {
|
||||
report := new(reports.RmReport)
|
||||
report.Id = ctr.ID()
|
||||
report.Err = err
|
||||
report.RawInput = idToRawInput[ctr.ID()]
|
||||
report.Id = ctr
|
||||
if !(errors.Is(err, define.ErrNoSuchCtr) || errors.Is(err, define.ErrCtrRemoved)) {
|
||||
report.Err = err
|
||||
}
|
||||
report.RawInput = idToRawInput[ctr]
|
||||
rmReports = append(rmReports, report)
|
||||
}
|
||||
|
||||
@ -972,7 +965,7 @@ func (ic *ContainerEngine) ContainerStart(ctx context.Context, namesOrIds []stri
|
||||
ExitCode: exitCode,
|
||||
})
|
||||
if ctr.AutoRemove() {
|
||||
if err := ic.removeContainer(ctx, ctr.Container, entities.RmOptions{}); err != nil {
|
||||
if _, _, err := ic.removeContainer(ctx, ctr.Container, entities.RmOptions{}); err != nil {
|
||||
logrus.Errorf("Removing container %s: %v", ctr.ID(), err)
|
||||
}
|
||||
}
|
||||
@ -1008,7 +1001,7 @@ func (ic *ContainerEngine) ContainerStart(ctx context.Context, namesOrIds []stri
|
||||
report.Err = fmt.Errorf("unable to start container %q: %w", ctr.ID(), err)
|
||||
reports = append(reports, report)
|
||||
if ctr.AutoRemove() {
|
||||
if err := ic.removeContainer(ctx, ctr.Container, entities.RmOptions{}); err != nil {
|
||||
if _, _, err := ic.removeContainer(ctx, ctr.Container, entities.RmOptions{}); err != nil {
|
||||
logrus.Errorf("Removing container %s: %v", ctr.ID(), err)
|
||||
}
|
||||
}
|
||||
|
@ -133,7 +133,7 @@ func (ic *ContainerEngine) NetworkRm(ctx context.Context, namesOrIds []string, o
|
||||
if err != nil {
|
||||
return reports, err
|
||||
}
|
||||
if err := ic.Libpod.RemovePod(ctx, pod, true, true, options.Timeout); err != nil {
|
||||
if _, err := ic.Libpod.RemovePod(ctx, pod, true, true, options.Timeout); err != nil {
|
||||
return reports, err
|
||||
}
|
||||
} else if err := ic.Libpod.RemoveContainer(ctx, c, true, true, options.Timeout); err != nil && !errors.Is(err, define.ErrNoSuchCtr) {
|
||||
|
@ -274,10 +274,11 @@ func (ic *ContainerEngine) PodRm(ctx context.Context, namesOrIds []string, optio
|
||||
reports := make([]*entities.PodRmReport, 0, len(pods))
|
||||
for _, p := range pods {
|
||||
report := entities.PodRmReport{Id: p.ID()}
|
||||
err := ic.Libpod.RemovePod(ctx, p, true, options.Force, options.Timeout)
|
||||
ctrs, err := ic.Libpod.RemovePod(ctx, p, true, options.Force, options.Timeout)
|
||||
if err != nil {
|
||||
report.Err = err
|
||||
}
|
||||
report.RemovedCtrs = ctrs
|
||||
reports = append(reports, &report)
|
||||
}
|
||||
return reports, nil
|
||||
@ -376,8 +377,11 @@ func (ic *ContainerEngine) PodClone(ctx context.Context, podClone entities.PodCl
|
||||
|
||||
if podClone.Destroy {
|
||||
var timeout *uint
|
||||
err = ic.Libpod.RemovePod(ctx, p, true, true, timeout)
|
||||
_, err = ic.Libpod.RemovePod(ctx, p, true, true, timeout)
|
||||
if err != nil {
|
||||
// TODO: Possibly should handle case where containers
|
||||
// failed to remove - maybe compact the errors into a
|
||||
// multierror and return that?
|
||||
return &entities.PodCloneReport{Id: pod.ID()}, err
|
||||
}
|
||||
}
|
||||
|
@ -79,7 +79,7 @@ func MakeContainer(ctx context.Context, rt *libpod.Runtime, s *specgen.SpecGener
|
||||
compatibleOptions := &libpod.InfraInherit{}
|
||||
var infraSpec *specs.Spec
|
||||
if infra != nil {
|
||||
options, infraSpec, compatibleOptions, err = Inherit(*infra, s, rt)
|
||||
options, infraSpec, compatibleOptions, err = Inherit(infra, s, rt)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
@ -636,7 +636,7 @@ func createContainerOptions(rt *libpod.Runtime, s *specgen.SpecGenerator, pod *l
|
||||
return options, nil
|
||||
}
|
||||
|
||||
func Inherit(infra libpod.Container, s *specgen.SpecGenerator, rt *libpod.Runtime) (opts []libpod.CtrCreateOption, infraS *specs.Spec, compat *libpod.InfraInherit, err error) {
|
||||
func Inherit(infra *libpod.Container, s *specgen.SpecGenerator, rt *libpod.Runtime) (opts []libpod.CtrCreateOption, infraS *specs.Spec, compat *libpod.InfraInherit, err error) {
|
||||
inheritSpec := &specgen.SpecGenerator{}
|
||||
_, compatibleOptions, err := ConfigToSpec(rt, inheritSpec, infra.ID())
|
||||
if err != nil {
|
||||
|
@ -22,7 +22,7 @@ func MakePod(p *entities.PodSpec, rt *libpod.Runtime) (_ *libpod.Pod, finalErr e
|
||||
var createdPod *libpod.Pod
|
||||
defer func() {
|
||||
if finalErr != nil && createdPod != nil {
|
||||
if err := rt.RemovePod(context.Background(), createdPod, true, true, nil); err != nil {
|
||||
if _, err := rt.RemovePod(context.Background(), createdPod, true, true, nil); err != nil {
|
||||
logrus.Errorf("Removing pod: %v", err)
|
||||
}
|
||||
}
|
||||
|
@ -652,12 +652,7 @@ func (p *PodmanTestIntegration) Cleanup() {
|
||||
// An error would cause it to stop and return early otherwise.
|
||||
Expect(stop).To(Exit(0), "command: %v\nstdout: %s\nstderr: %s", stop.Command.Args, stop.OutputToString(), stop.ErrorToString())
|
||||
Expect(podrm).To(Exit(0), "command: %v\nstdout: %s\nstderr: %s", podrm.Command.Args, podrm.OutputToString(), podrm.ErrorToString())
|
||||
|
||||
// FIXME: Remove this special case when the issue is fixed.
|
||||
// Special case rm -fa is not working correctly with dependencies, https://github.com/containers/podman/issues/18180
|
||||
if !strings.Contains(rmall.ErrorToString(), "has dependent containers which must be removed before it") {
|
||||
Expect(rmall).To(Exit(0), "command: %v\nstdout: %s\nstderr: %s", rmall.Command.Args, rmall.OutputToString(), rmall.ErrorToString())
|
||||
}
|
||||
Expect(rmall).To(Exit(0), "command: %v\nstdout: %s\nstderr: %s", rmall.Command.Args, rmall.OutputToString(), rmall.ErrorToString())
|
||||
}
|
||||
|
||||
// CleanupVolume cleans up the volumes and containers.
|
||||
|
@ -123,7 +123,7 @@ var _ = Describe("Podman pod rm", func() {
|
||||
result := podmanTest.Podman([]string{"pod", "rm", "-a"})
|
||||
result.WaitWithDefaultTimeout()
|
||||
Expect(result).To(ExitWithError())
|
||||
Expect(result.ErrorToString()).To(ContainSubstring("cannot be removed"))
|
||||
Expect(result.ErrorToString()).To(ContainSubstring("not all containers could be removed from pod"))
|
||||
|
||||
numPods = podmanTest.NumberOfPods()
|
||||
ps = podmanTest.Podman([]string{"pod", "ps"})
|
||||
|
@ -322,4 +322,25 @@ var _ = Describe("Podman rm", func() {
|
||||
Expect(session1).Should(Exit(0))
|
||||
Expect(session1.OutputToString()).To(BeEquivalentTo(cid4))
|
||||
})
|
||||
|
||||
It("podman rm -fa with dependencies", func() {
|
||||
ctr1Name := "ctr1"
|
||||
ctr1 := podmanTest.RunTopContainer(ctr1Name)
|
||||
ctr1.WaitWithDefaultTimeout()
|
||||
Expect(ctr1).Should(Exit(0))
|
||||
cid1 := ctr1.OutputToString()
|
||||
|
||||
ctr2 := podmanTest.Podman([]string{"run", "-d", "--network", fmt.Sprintf("container:%s", ctr1Name), ALPINE, "top"})
|
||||
ctr2.WaitWithDefaultTimeout()
|
||||
Expect(ctr2).Should(Exit(0))
|
||||
cid2 := ctr2.OutputToString()
|
||||
|
||||
rm := podmanTest.Podman([]string{"rm", "-fa"})
|
||||
rm.WaitWithDefaultTimeout()
|
||||
Expect(rm).Should(Exit(0))
|
||||
Expect(rm.ErrorToString()).To(BeEmpty(), "rm -fa error logged")
|
||||
Expect(rm.OutputToStringArray()).Should(ConsistOf(cid1, cid2))
|
||||
|
||||
Expect(podmanTest.NumberOfContainers()).To(Equal(0))
|
||||
})
|
||||
})
|
||||
|
@ -63,7 +63,7 @@ var _ = Describe("Podman run ns", func() {
|
||||
})
|
||||
|
||||
It("podman run ipcns ipcmk container test", func() {
|
||||
setup := podmanTest.Podman([]string{"run", "-d", "--name", "test1", fedoraMinimal, "sleep", "30"})
|
||||
setup := podmanTest.Podman([]string{"run", "-d", "--name", "test1", fedoraMinimal, "sleep", "999"})
|
||||
setup.WaitWithDefaultTimeout()
|
||||
Expect(setup).Should(Exit(0))
|
||||
|
||||
@ -72,12 +72,7 @@ var _ = Describe("Podman run ns", func() {
|
||||
Expect(session).Should(Exit(0))
|
||||
output := strings.Split(session.OutputToString(), " ")
|
||||
ipc := output[len(output)-1]
|
||||
session = podmanTest.Podman([]string{"run", "--name=t2", "--ipc=container:test1", fedoraMinimal, "ipcs", "-m", "-i", ipc})
|
||||
session.WaitWithDefaultTimeout()
|
||||
Expect(session).Should(Exit(0))
|
||||
|
||||
// We have to remove the dependency container first, rm --all fails in the cleanup because of the unknown ordering.
|
||||
session = podmanTest.Podman([]string{"rm", "t2"})
|
||||
session = podmanTest.Podman([]string{"run", "--ipc=container:test1", fedoraMinimal, "ipcs", "-m", "-i", ipc})
|
||||
session.WaitWithDefaultTimeout()
|
||||
Expect(session).Should(Exit(0))
|
||||
})
|
||||
|
Reference in New Issue
Block a user