mirror of
https://github.com/containers/podman.git
synced 2025-05-20 08:36:23 +08:00
Add podman container cleanup
to CLI
When we run containers in detach mode, nothing cleans up the network stack or the mount points. This patch will tell conmon to execute the cleanup code when the container exits. It can also be called to attempt to cleanup previously running containers. Signed-off-by: Daniel J Walsh <dwalsh@redhat.com> Closes: #942 Approved by: mheon
This commit is contained in:

committed by
Atomic Bot

parent
41bd607c12
commit
7fc1a329bd
@ -58,7 +58,7 @@ RUN set -x \
|
||||
&& rm -rf "$GOPATH"
|
||||
|
||||
# Install conmon
|
||||
ENV CRIO_COMMIT 66788a10e57f42faf741c2f149d0ee6635063014
|
||||
ENV CRIO_COMMIT f9ae39e395880507d52295ca58e3683f22524777
|
||||
RUN set -x \
|
||||
&& export GOPATH="$(mktemp -d)" \
|
||||
&& git clone https://github.com/kubernetes-incubator/cri-o.git "$GOPATH/src/github.com/kubernetes-incubator/cri-o.git" \
|
||||
|
@ -57,7 +57,7 @@ RUN set -x \
|
||||
&& go get github.com/onsi/gomega/...
|
||||
|
||||
# Install conmon
|
||||
ENV CRIO_COMMIT 66788a10e57f42faf741c2f149d0ee6635063014
|
||||
ENV CRIO_COMMIT f9ae39e395880507d52295ca58e3683f22524777
|
||||
RUN set -x \
|
||||
&& export GOPATH="$(mktemp -d)" \
|
||||
&& git clone https://github.com/kubernetes-incubator/cri-o.git "$GOPATH/src/github.com/kubernetes-incubator/cri-o.git" \
|
||||
|
@ -59,7 +59,7 @@ RUN set -x \
|
||||
&& go get github.com/onsi/gomega/...
|
||||
|
||||
# Install conmon
|
||||
ENV CRIO_COMMIT 66788a10e57f42faf741c2f149d0ee6635063014
|
||||
ENV CRIO_COMMIT f9ae39e395880507d52295ca58e3683f22524777
|
||||
RUN set -x \
|
||||
&& export GOPATH="$(mktemp -d)" \
|
||||
&& git clone https://github.com/kubernetes-incubator/cri-o.git "$GOPATH/src/github.com/kubernetes-incubator/cri-o.git" \
|
||||
|
92
cmd/podman/cleanup.go
Normal file
92
cmd/podman/cleanup.go
Normal file
@ -0,0 +1,92 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/projectatomic/libpod/cmd/podman/libpodruntime"
|
||||
"github.com/projectatomic/libpod/libpod"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
var (
|
||||
cleanupFlags = []cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "all, a",
|
||||
Usage: "Cleans up all containers",
|
||||
},
|
||||
LatestFlag,
|
||||
}
|
||||
cleanupDescription = `
|
||||
podman container cleanup
|
||||
|
||||
Cleans up mount points and network stacks on one or more containers from the host. The container name or ID can be used. This command is used internally when running containers, but can also be used if container cleanup has failed when a container exits.
|
||||
`
|
||||
cleanupCommand = cli.Command{
|
||||
Name: "cleanup",
|
||||
Usage: "Cleanup network and mountpoints of one or more containers",
|
||||
Description: cleanupDescription,
|
||||
Flags: cleanupFlags,
|
||||
Action: cleanupCmd,
|
||||
ArgsUsage: "CONTAINER-NAME [CONTAINER-NAME ...]",
|
||||
}
|
||||
)
|
||||
|
||||
func cleanupCmd(c *cli.Context) error {
|
||||
if err := validateFlags(c, cleanupFlags); err != nil {
|
||||
return err
|
||||
}
|
||||
runtime, err := libpodruntime.GetRuntime(c)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get runtime")
|
||||
}
|
||||
defer runtime.Shutdown(false)
|
||||
|
||||
args := c.Args()
|
||||
|
||||
var lastError error
|
||||
var cleanupContainers []*libpod.Container
|
||||
if c.Bool("all") {
|
||||
if c.Bool("lastest") {
|
||||
return errors.New("--all and --latest cannot be used together")
|
||||
}
|
||||
if len(args) != 0 {
|
||||
return errors.New("--all and explicit container IDs cannot be used together")
|
||||
}
|
||||
cleanupContainers, err = runtime.GetContainers()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to get container list")
|
||||
}
|
||||
} else if c.Bool("latest") {
|
||||
if len(args) != 0 {
|
||||
return errors.New("--latest and explicit container IDs cannot be used together")
|
||||
}
|
||||
lastCtr, err := runtime.GetLatestContainer()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to get latest container")
|
||||
}
|
||||
cleanupContainers = append(cleanupContainers, lastCtr)
|
||||
} else {
|
||||
for _, i := range args {
|
||||
container, err := runtime.LookupContainer(i)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
lastError = errors.Wrapf(err, "unable to find container %s", i)
|
||||
continue
|
||||
}
|
||||
cleanupContainers = append(cleanupContainers, container)
|
||||
}
|
||||
}
|
||||
for _, ctr := range cleanupContainers {
|
||||
if err = ctr.Cleanup(); err != nil {
|
||||
if lastError != nil {
|
||||
fmt.Fprintln(os.Stderr, lastError)
|
||||
}
|
||||
lastError = errors.Wrapf(err, "failed to cleanup container %v", ctr.ID())
|
||||
} else {
|
||||
fmt.Println(ctr.ID())
|
||||
}
|
||||
}
|
||||
return lastError
|
||||
}
|
@ -7,6 +7,7 @@ import (
|
||||
var (
|
||||
subCommands = []cli.Command{
|
||||
attachCommand,
|
||||
cleanupCommand,
|
||||
commitCommand,
|
||||
createCommand,
|
||||
diffCommand,
|
||||
|
@ -116,7 +116,7 @@ func createCmd(c *cli.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
options, err := createConfig.GetContainerCreateOptions()
|
||||
options, err := createConfig.GetContainerCreateOptions(runtime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -110,7 +110,7 @@ func runCmd(c *cli.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
options, err := createConfig.GetContainerCreateOptions()
|
||||
options, err := createConfig.GetContainerCreateOptions(runtime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
38
docs/podman-container-cleanup.1.md
Normal file
38
docs/podman-container-cleanup.1.md
Normal file
@ -0,0 +1,38 @@
|
||||
% podman-container-cleanup "1"
|
||||
|
||||
## NAME
|
||||
podman\-container\-cleanup - Cleanup Container storage and networks
|
||||
|
||||
## SYNOPSIS
|
||||
**podman container cleanup [OPTIONS] CONTAINER**
|
||||
|
||||
## DESCRIPTION
|
||||
`podman container cleanup` cleans up exited containers by removing all mountpoints and network configuration from the host. The container name or ID can be used. The cleanup command does not remove the containers. Running containers will not be cleaned up.
|
||||
Sometimes container's mount points and network stacks can remain if the podman command was killed or the container ran in daemon mode. This command is automatically executed when you run containers in daemon mode by the conmon process when the container exits.
|
||||
|
||||
## OPTIONS
|
||||
|
||||
**--all, a**
|
||||
|
||||
Cleanup all containers.
|
||||
|
||||
**--latest, -l**
|
||||
Instead of providing the container name or ID, use the last created container. If you use methods other than Podman
|
||||
to run containers such as CRI-O, the last started container could be from either of those methods.
|
||||
## EXAMPLE
|
||||
|
||||
`podman container cleanup mywebserver`
|
||||
|
||||
`podman container cleanup mywebserver myflaskserver 860a4b23`
|
||||
|
||||
`podman container cleanup 860a4b23`
|
||||
|
||||
`podman container-cleanup -a`
|
||||
|
||||
`podman container cleanup --latest`
|
||||
|
||||
## SEE ALSO
|
||||
podman(1), podman-container(1)
|
||||
|
||||
## HISTORY
|
||||
Jun 2018, Originally compiled by Dan Walsh <dwalsh@redhat.com>
|
@ -14,6 +14,7 @@ The container command allows you to manage containers
|
||||
| Command | Man Page | Description |
|
||||
| ------- | --------------------------------------------------- | ---------------------------------------------------------------------------- |
|
||||
| attach | [podman-attach(1)](podman-attach.1.md) | Attach to a running container. |
|
||||
| cleanup | [podman-container-cleanup(1)](podman-container-cleanup.1.md) | Cleanup containers network and mountpoints. |
|
||||
| commit | [podman-commit(1)](podman-commit.1.md) | Create new image based on the changed container. |
|
||||
| create | [podman-create(1)](podman-create.1.md) | Create a new container. |
|
||||
| diff | [podman-diff(1)](podman-diff.1.md) | Inspect changes on a container or image's filesystem. |
|
||||
|
@ -314,6 +314,10 @@ type ContainerConfig struct {
|
||||
// TODO log options for log drivers
|
||||
|
||||
PostConfigureNetNS bool `json:"postConfigureNetNS"`
|
||||
|
||||
// ExitCommand is the container's exit command.
|
||||
// This Command will be executed when the container exits
|
||||
ExitCommand []string `json:"exitCommand,omitempty"`
|
||||
}
|
||||
|
||||
// ContainerStatus returns a string representation for users
|
||||
|
@ -71,6 +71,7 @@ func (c *Container) getContainerInspectData(size bool, driverData *inspect.Data)
|
||||
},
|
||||
ImageID: config.RootfsImageID,
|
||||
ImageName: config.RootfsImageName,
|
||||
ExitCommand: config.ExitCommand,
|
||||
Rootfs: config.Rootfs,
|
||||
ResolvConfPath: resolvPath,
|
||||
HostnamePath: hostnamePath,
|
||||
|
@ -695,7 +695,8 @@ func (c *Container) stop(timeout uint) error {
|
||||
return err
|
||||
}
|
||||
|
||||
return c.cleanup()
|
||||
// Container should clean itself up
|
||||
return nil
|
||||
}
|
||||
|
||||
// Internal, non-locking function to pause a container
|
||||
@ -928,11 +929,17 @@ func (c *Container) cleanup() error {
|
||||
}
|
||||
|
||||
if err := c.cleanupCgroups(); err != nil {
|
||||
if lastError != nil {
|
||||
logrus.Errorf("Error cleaning up container %s CGroups: %v", c.ID(), err)
|
||||
} else {
|
||||
lastError = err
|
||||
}
|
||||
/*
|
||||
if lastError != nil {
|
||||
logrus.Errorf("Error cleaning up container %s CGroups: %v", c.ID(), err)
|
||||
} else {
|
||||
lastError = err
|
||||
}
|
||||
*/
|
||||
// For now we are going to only warn on failures to clean up cgroups
|
||||
// We have a conflict with running podman containers cleanup in same cgroup as container
|
||||
logrus.Warnf("Ignoring Error cleaning up container %s CGroups: %v", c.ID(), err)
|
||||
|
||||
}
|
||||
|
||||
// Unmount storage
|
||||
|
@ -268,6 +268,12 @@ func (r *OCIRuntime) createOCIContainer(ctr *Container, cgroupParent string) (er
|
||||
if ctr.config.ConmonPidFile != "" {
|
||||
args = append(args, "--conmon-pidfile", ctr.config.ConmonPidFile)
|
||||
}
|
||||
if len(ctr.config.ExitCommand) > 0 {
|
||||
args = append(args, "--exit-command", ctr.config.ExitCommand[0])
|
||||
for _, arg := range ctr.config.ExitCommand[1:] {
|
||||
args = append(args, []string{"--exit-command-arg", arg}...)
|
||||
}
|
||||
}
|
||||
args = append(args, "--socket-dir-path", r.socketsDir)
|
||||
if ctr.config.Spec.Process.Terminal {
|
||||
args = append(args, "-t")
|
||||
|
@ -485,6 +485,18 @@ func WithIDMappings(idmappings storage.IDMappingOptions) CtrCreateOption {
|
||||
}
|
||||
}
|
||||
|
||||
// WithExitCommand sets the ExitCommand for the container, appending on the ctr.ID() to the end
|
||||
func WithExitCommand(exitCommand []string) CtrCreateOption {
|
||||
return func(ctr *Container) error {
|
||||
if ctr.valid {
|
||||
return ErrCtrFinalized
|
||||
}
|
||||
|
||||
ctr.config.ExitCommand = append(exitCommand, ctr.ID())
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithIPCNSFrom indicates the the container should join the IPC namespace of
|
||||
// the given container.
|
||||
// If the container has joined a pod, it can only join the namespaces of
|
||||
|
@ -167,6 +167,7 @@ type ContainerInspectData struct {
|
||||
Mounts []specs.Mount `json:"Mounts"`
|
||||
Dependencies []string `json:"Dependencies"`
|
||||
NetworkSettings *NetworkSettings `json:"NetworkSettings"` //TODO
|
||||
ExitCommand []string `json:"ExitCommand"`
|
||||
}
|
||||
|
||||
// ContainerInspectState represents the state of a container.
|
||||
|
@ -316,8 +316,25 @@ func (c *CreateConfig) GetTmpfsMounts() []spec.Mount {
|
||||
return m
|
||||
}
|
||||
|
||||
func createExitCommand(runtime *libpod.Runtime) []string {
|
||||
config := runtime.GetConfig()
|
||||
|
||||
cmd, _ := os.Executable()
|
||||
command := []string{cmd,
|
||||
"--root", config.StorageConfig.GraphRoot,
|
||||
"--runroot", config.StorageConfig.RunRoot,
|
||||
"--log-level", logrus.GetLevel().String(),
|
||||
"--cgroup-manager", config.CgroupManager,
|
||||
"--tmpdir", config.TmpDir,
|
||||
}
|
||||
if config.StorageConfig.GraphDriverName != "" {
|
||||
command = append(command, []string{"--storage-driver", config.StorageConfig.GraphDriverName}...)
|
||||
}
|
||||
return append(command, []string{"container", "cleanup"}...)
|
||||
}
|
||||
|
||||
// GetContainerCreateOptions takes a CreateConfig and returns a slice of CtrCreateOptions
|
||||
func (c *CreateConfig) GetContainerCreateOptions() ([]libpod.CtrCreateOption, error) {
|
||||
func (c *CreateConfig) GetContainerCreateOptions(runtime *libpod.Runtime) ([]libpod.CtrCreateOption, error) {
|
||||
var options []libpod.CtrCreateOption
|
||||
var portBindings []ocicni.PortMapping
|
||||
var err error
|
||||
@ -434,6 +451,9 @@ func (c *CreateConfig) GetContainerCreateOptions() ([]libpod.CtrCreateOption, er
|
||||
if c.CgroupParent != "" {
|
||||
options = append(options, libpod.WithCgroupParent(c.CgroupParent))
|
||||
}
|
||||
if c.Detach {
|
||||
options = append(options, libpod.WithExitCommand(createExitCommand(runtime)))
|
||||
}
|
||||
|
||||
return options, nil
|
||||
}
|
||||
|
@ -47,7 +47,7 @@ func (i *LibpodAPI) CreateContainer(call ioprojectatomicpodman.VarlinkCall, conf
|
||||
return call.ReplyErrorOccurred(err.Error())
|
||||
}
|
||||
|
||||
options, err := createConfig.GetContainerCreateOptions()
|
||||
options, err := createConfig.GetContainerCreateOptions(runtime)
|
||||
if err != nil {
|
||||
return call.ReplyErrorOccurred(err.Error())
|
||||
}
|
||||
|
47
test/e2e/run_cleanup_test.go
Normal file
47
test/e2e/run_cleanup_test.go
Normal file
@ -0,0 +1,47 @@
|
||||
package integration
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = Describe("Podman run exit", func() {
|
||||
var (
|
||||
tempdir string
|
||||
err error
|
||||
podmanTest PodmanTest
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
tempdir, err = CreateTempDirInTempDir()
|
||||
if err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
podmanTest = PodmanCreate(tempdir)
|
||||
podmanTest.RestoreAllArtifacts()
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
podmanTest.Cleanup()
|
||||
|
||||
})
|
||||
|
||||
It("podman run -d mount cleanup test", func() {
|
||||
mount := podmanTest.SystemExec("mount", nil)
|
||||
mount.WaitWithDefaultTimeout()
|
||||
out1 := mount.OutputToString()
|
||||
result := podmanTest.Podman([]string{"run", "-d", ALPINE, "echo", "hello"})
|
||||
result.WaitWithDefaultTimeout()
|
||||
Expect(result.ExitCode()).To(Equal(0))
|
||||
|
||||
result = podmanTest.SystemExec("sleep", []string{"5"})
|
||||
result.WaitWithDefaultTimeout()
|
||||
|
||||
mount = podmanTest.SystemExec("mount", nil)
|
||||
mount.WaitWithDefaultTimeout()
|
||||
out2 := mount.OutputToString()
|
||||
Expect(out1).To(Equal(out2))
|
||||
})
|
||||
})
|
Reference in New Issue
Block a user