mirror of
https://github.com/containers/podman.git
synced 2025-05-17 23:26:08 +08:00
Do not store the exit command in container config
There is a problem with creating and storing the exit command when the container was created. It only contains the options the container was created with but NOT the options the container is started with. One example would be a CNI network config. If I start a container once, then change the cni config dir with `--cni-config-dir` ans start it a second time it will start successfully. However the exit command still contains the wrong `--cni-config-dir` because it was not updated. To fix this we do not want to store the exit command at all. Instead we create it every time the conmon process for the container is startet. This guarantees us that the container cleanup process is startet with the correct settings. [NO NEW TESTS NEEDED] Signed-off-by: Paul Holzinger <pholzing@redhat.com>
This commit is contained in:
@ -133,28 +133,6 @@ $ podman container inspect foobar
|
||||
"Ports": {},
|
||||
"SandboxKey": ""
|
||||
},
|
||||
"ExitCommand": [
|
||||
"/usr/bin/podman",
|
||||
"--root",
|
||||
"/home/dwalsh/.local/share/containers/storage",
|
||||
"--runroot",
|
||||
"/run/user/3267/containers",
|
||||
"--log-level",
|
||||
"warning",
|
||||
"--cgroup-manager",
|
||||
"systemd",
|
||||
"--tmpdir",
|
||||
"/run/user/3267/libpod/tmp",
|
||||
"--runtime",
|
||||
"crun",
|
||||
"--storage-driver",
|
||||
"overlay",
|
||||
"--events-backend",
|
||||
"journald",
|
||||
"container",
|
||||
"cleanup",
|
||||
"99f66530fe9c7249f7cf29f78e8661669d5831cbe4ee80ea757d5e922dd6a8a6"
|
||||
],
|
||||
"Namespace": "",
|
||||
"IsInfra": false,
|
||||
"Config": {
|
||||
|
@ -364,13 +364,6 @@ type ContainerMiscConfig struct {
|
||||
PostConfigureNetNS bool `json:"postConfigureNetNS"`
|
||||
// OCIRuntime used to create the container
|
||||
OCIRuntime string `json:"runtime,omitempty"`
|
||||
// ExitCommand is the container's exit command.
|
||||
// This Command will be executed when the container exits by Conmon.
|
||||
// It is usually used to invoke post-run cleanup - for example, in
|
||||
// Podman, it invokes `podman container cleanup`, which in turn calls
|
||||
// Libpod's Cleanup() API to unmount the container and clean up its
|
||||
// network.
|
||||
ExitCommand []string `json:"exitCommand,omitempty"`
|
||||
// IsInfra is a bool indicating whether this container is an infra container used for
|
||||
// sharing kernel namespaces in a pod
|
||||
IsInfra bool `json:"pause"`
|
||||
|
@ -119,7 +119,6 @@ func (c *Container) getContainerInspectData(size bool, driverData *define.Driver
|
||||
},
|
||||
Image: config.RootfsImageID,
|
||||
ImageName: config.RootfsImageName,
|
||||
ExitCommand: config.ExitCommand,
|
||||
Namespace: config.Namespace,
|
||||
Rootfs: config.Rootfs,
|
||||
Pod: config.Pod,
|
||||
|
@ -654,7 +654,6 @@ type InspectContainerData struct {
|
||||
Mounts []InspectMount `json:"Mounts"`
|
||||
Dependencies []string `json:"Dependencies"`
|
||||
NetworkSettings *InspectNetworkSettings `json:"NetworkSettings"` //TODO
|
||||
ExitCommand []string `json:"ExitCommand"`
|
||||
Namespace string `json:"Namespace"`
|
||||
IsInfra bool `json:"IsInfra"`
|
||||
Config *InspectContainerConfig `json:"Config"`
|
||||
|
@ -30,6 +30,7 @@ import (
|
||||
"github.com/containers/podman/v3/pkg/checkpoint/crutils"
|
||||
"github.com/containers/podman/v3/pkg/errorhandling"
|
||||
"github.com/containers/podman/v3/pkg/rootless"
|
||||
"github.com/containers/podman/v3/pkg/specgenutil"
|
||||
"github.com/containers/podman/v3/pkg/util"
|
||||
"github.com/containers/podman/v3/utils"
|
||||
"github.com/containers/storage/pkg/homedir"
|
||||
@ -1071,11 +1072,15 @@ func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *Co
|
||||
args = append(args, "--no-pivot")
|
||||
}
|
||||
|
||||
if len(ctr.config.ExitCommand) > 0 {
|
||||
args = append(args, "--exit-command", ctr.config.ExitCommand[0])
|
||||
for _, arg := range ctr.config.ExitCommand[1:] {
|
||||
args = append(args, []string{"--exit-command-arg", arg}...)
|
||||
}
|
||||
exitCommand, err := specgenutil.CreateExitCommandArgs(ctr.runtime.storageConfig, ctr.runtime.config, logrus.IsLevelEnabled(logrus.DebugLevel), ctr.AutoRemove(), false)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
exitCommand = append(exitCommand, ctr.config.ID)
|
||||
|
||||
args = append(args, "--exit-command", exitCommand[0])
|
||||
for _, arg := range exitCommand[1:] {
|
||||
args = append(args, []string{"--exit-command-arg", arg}...)
|
||||
}
|
||||
|
||||
// Pass down the LISTEN_* environment (see #10443).
|
||||
|
@ -835,20 +835,6 @@ func WithIDMappings(idmappings storage.IDMappingOptions) CtrCreateOption {
|
||||
}
|
||||
}
|
||||
|
||||
// WithExitCommand sets the ExitCommand for the container, appending on the ctr.ID() to the end
|
||||
func WithExitCommand(exitCommand []string) CtrCreateOption {
|
||||
return func(ctr *Container) error {
|
||||
if ctr.valid {
|
||||
return define.ErrCtrFinalized
|
||||
}
|
||||
|
||||
ctr.config.ExitCommand = exitCommand
|
||||
ctr.config.ExitCommand = append(ctr.config.ExitCommand, ctr.ID())
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithUTSNSFromPod indicates the the container should join the UTS namespace of
|
||||
// its pod
|
||||
func WithUTSNSFromPod(p *Pod) CtrCreateOption {
|
||||
|
@ -186,8 +186,6 @@ func (r *Runtime) initContainerVariables(rSpec *spec.Spec, config *ContainerConf
|
||||
// If the ID is empty a new name for the restored container was requested
|
||||
if ctr.config.ID == "" {
|
||||
ctr.config.ID = stringid.GenerateNonCryptoID()
|
||||
// Fixup ExitCommand with new ID
|
||||
ctr.config.ExitCommand[len(ctr.config.ExitCommand)-1] = ctr.config.ID
|
||||
}
|
||||
// Reset the log path to point to the default
|
||||
ctr.config.LogPath = ""
|
||||
|
@ -12,7 +12,7 @@ import (
|
||||
"github.com/containers/podman/v3/pkg/api/handlers/utils"
|
||||
"github.com/containers/podman/v3/pkg/api/server/idle"
|
||||
api "github.com/containers/podman/v3/pkg/api/types"
|
||||
"github.com/containers/podman/v3/pkg/specgen/generate"
|
||||
"github.com/containers/podman/v3/pkg/specgenutil"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
@ -65,7 +65,7 @@ func ExecCreateHandler(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
// Automatically log to syslog if the server has log-level=debug set
|
||||
exitCommandArgs, err := generate.CreateExitCommandArgs(storageConfig, runtimeConfig, logrus.IsLevelEnabled(logrus.DebugLevel), true, true)
|
||||
exitCommandArgs, err := specgenutil.CreateExitCommandArgs(storageConfig, runtimeConfig, logrus.IsLevelEnabled(logrus.DebugLevel), true, true)
|
||||
if err != nil {
|
||||
utils.InternalServerError(w, err)
|
||||
return
|
||||
|
@ -239,11 +239,6 @@ func CRImportCheckpoint(ctx context.Context, runtime *libpod.Runtime, restoreOpt
|
||||
}
|
||||
}
|
||||
|
||||
// Check if the ExitCommand points to the correct container ID
|
||||
if containerConfig.ExitCommand[len(containerConfig.ExitCommand)-1] != containerConfig.ID {
|
||||
return nil, errors.Errorf("'ExitCommandID' uses ID %s instead of container ID %s", containerConfig.ExitCommand[len(containerConfig.ExitCommand)-1], containerConfig.ID)
|
||||
}
|
||||
|
||||
containers = append(containers, container)
|
||||
return containers, nil
|
||||
}
|
||||
|
@ -29,6 +29,7 @@ import (
|
||||
"github.com/containers/podman/v3/pkg/signal"
|
||||
"github.com/containers/podman/v3/pkg/specgen"
|
||||
"github.com/containers/podman/v3/pkg/specgen/generate"
|
||||
"github.com/containers/podman/v3/pkg/specgenutil"
|
||||
"github.com/containers/podman/v3/pkg/util"
|
||||
"github.com/containers/storage"
|
||||
"github.com/pkg/errors"
|
||||
@ -656,7 +657,7 @@ func makeExecConfig(options entities.ExecOptions, rt *libpod.Runtime) (*libpod.E
|
||||
return nil, errors.Wrapf(err, "error retrieving Libpod configuration to build exec exit command")
|
||||
}
|
||||
// TODO: Add some ability to toggle syslog
|
||||
exitCommandArgs, err := generate.CreateExitCommandArgs(storageConfig, runtimeConfig, false, false, true)
|
||||
exitCommandArgs, err := specgenutil.CreateExitCommandArgs(storageConfig, runtimeConfig, logrus.IsLevelEnabled(logrus.DebugLevel), false, true)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error constructing exit command for exec session")
|
||||
}
|
||||
|
@ -3,17 +3,14 @@ package generate
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
cdi "github.com/container-orchestrated-devices/container-device-interface/pkg"
|
||||
"github.com/containers/common/libimage"
|
||||
"github.com/containers/common/pkg/config"
|
||||
"github.com/containers/podman/v3/libpod"
|
||||
"github.com/containers/podman/v3/pkg/specgen"
|
||||
"github.com/containers/podman/v3/pkg/util"
|
||||
"github.com/containers/storage/types"
|
||||
spec "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/opencontainers/selinux/go-selinux/label"
|
||||
"github.com/pkg/errors"
|
||||
@ -163,15 +160,6 @@ func MakeContainer(ctx context.Context, rt *libpod.Runtime, s *specgen.SpecGener
|
||||
}
|
||||
options = append(options, opts...)
|
||||
|
||||
var exitCommandArgs []string
|
||||
|
||||
exitCommandArgs, err = CreateExitCommandArgs(rt.StorageConfig(), rtc, logrus.IsLevelEnabled(logrus.DebugLevel), s.Remove, false)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
options = append(options, libpod.WithExitCommand(exitCommandArgs))
|
||||
|
||||
if len(s.Aliases) > 0 {
|
||||
options = append(options, libpod.WithNetworkAliases(s.Aliases))
|
||||
}
|
||||
@ -500,54 +488,3 @@ func createContainerOptions(ctx context.Context, rt *libpod.Runtime, s *specgen.
|
||||
}
|
||||
return options, nil
|
||||
}
|
||||
|
||||
func CreateExitCommandArgs(storageConfig types.StoreOptions, config *config.Config, syslog, rm, exec bool) ([]string, error) {
|
||||
// We need a cleanup process for containers in the current model.
|
||||
// But we can't assume that the caller is Podman - it could be another
|
||||
// user of the API.
|
||||
// As such, provide a way to specify a path to Podman, so we can
|
||||
// still invoke a cleanup process.
|
||||
|
||||
podmanPath, err := os.Executable()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
command := []string{podmanPath,
|
||||
"--root", storageConfig.GraphRoot,
|
||||
"--runroot", storageConfig.RunRoot,
|
||||
"--log-level", logrus.GetLevel().String(),
|
||||
"--cgroup-manager", config.Engine.CgroupManager,
|
||||
"--tmpdir", config.Engine.TmpDir,
|
||||
"--cni-config-dir", config.Network.NetworkConfigDir,
|
||||
}
|
||||
if config.Engine.OCIRuntime != "" {
|
||||
command = append(command, []string{"--runtime", config.Engine.OCIRuntime}...)
|
||||
}
|
||||
if storageConfig.GraphDriverName != "" {
|
||||
command = append(command, []string{"--storage-driver", storageConfig.GraphDriverName}...)
|
||||
}
|
||||
for _, opt := range storageConfig.GraphDriverOptions {
|
||||
command = append(command, []string{"--storage-opt", opt}...)
|
||||
}
|
||||
if config.Engine.EventsLogger != "" {
|
||||
command = append(command, []string{"--events-backend", config.Engine.EventsLogger}...)
|
||||
}
|
||||
|
||||
if syslog {
|
||||
command = append(command, "--syslog")
|
||||
}
|
||||
command = append(command, []string{"container", "cleanup"}...)
|
||||
|
||||
if rm {
|
||||
command = append(command, "--rm")
|
||||
}
|
||||
|
||||
// This has to be absolutely last, to ensure that the exec session ID
|
||||
// will be added after it by Libpod.
|
||||
if exec {
|
||||
command = append(command, "--exec")
|
||||
}
|
||||
|
||||
return command, nil
|
||||
}
|
||||
|
@ -3,10 +3,13 @@ package specgenutil
|
||||
import (
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/common/pkg/config"
|
||||
"github.com/containers/podman/v3/libpod/network/types"
|
||||
storageTypes "github.com/containers/storage/types"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@ -272,3 +275,54 @@ func parseAndValidatePort(port string) (uint16, error) {
|
||||
}
|
||||
return uint16(num), nil
|
||||
}
|
||||
|
||||
func CreateExitCommandArgs(storageConfig storageTypes.StoreOptions, config *config.Config, syslog, rm, exec bool) ([]string, error) {
|
||||
// We need a cleanup process for containers in the current model.
|
||||
// But we can't assume that the caller is Podman - it could be another
|
||||
// user of the API.
|
||||
// As such, provide a way to specify a path to Podman, so we can
|
||||
// still invoke a cleanup process.
|
||||
|
||||
podmanPath, err := os.Executable()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
command := []string{podmanPath,
|
||||
"--root", storageConfig.GraphRoot,
|
||||
"--runroot", storageConfig.RunRoot,
|
||||
"--log-level", logrus.GetLevel().String(),
|
||||
"--cgroup-manager", config.Engine.CgroupManager,
|
||||
"--tmpdir", config.Engine.TmpDir,
|
||||
"--cni-config-dir", config.Network.NetworkConfigDir,
|
||||
}
|
||||
if config.Engine.OCIRuntime != "" {
|
||||
command = append(command, []string{"--runtime", config.Engine.OCIRuntime}...)
|
||||
}
|
||||
if storageConfig.GraphDriverName != "" {
|
||||
command = append(command, []string{"--storage-driver", storageConfig.GraphDriverName}...)
|
||||
}
|
||||
for _, opt := range storageConfig.GraphDriverOptions {
|
||||
command = append(command, []string{"--storage-opt", opt}...)
|
||||
}
|
||||
if config.Engine.EventsLogger != "" {
|
||||
command = append(command, []string{"--events-backend", config.Engine.EventsLogger}...)
|
||||
}
|
||||
|
||||
if syslog {
|
||||
command = append(command, "--syslog")
|
||||
}
|
||||
command = append(command, []string{"container", "cleanup"}...)
|
||||
|
||||
if rm {
|
||||
command = append(command, "--rm")
|
||||
}
|
||||
|
||||
// This has to be absolutely last, to ensure that the exec session ID
|
||||
// will be added after it by Libpod.
|
||||
if exec {
|
||||
command = append(command, "--exec")
|
||||
}
|
||||
|
||||
return command, nil
|
||||
}
|
||||
|
Reference in New Issue
Block a user