Merge pull request #6270 from mheon/detached_exec

Implement detached exec
This commit is contained in:
OpenShift Merge Robot
2020-05-21 16:02:52 +02:00
committed by GitHub
17 changed files with 898 additions and 581 deletions

View File

@ -7,6 +7,8 @@ import (
"github.com/containers/libpod/cmd/podman/registry" "github.com/containers/libpod/cmd/podman/registry"
"github.com/containers/libpod/cmd/podman/utils" "github.com/containers/libpod/cmd/podman/utils"
"github.com/containers/libpod/pkg/domain/entities" "github.com/containers/libpod/pkg/domain/entities"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )
@ -43,6 +45,7 @@ func init() {
flags := cleanupCommand.Flags() flags := cleanupCommand.Flags()
flags.BoolVarP(&cleanupOptions.All, "all", "a", false, "Cleans up all containers") flags.BoolVarP(&cleanupOptions.All, "all", "a", false, "Cleans up all containers")
flags.BoolVarP(&cleanupOptions.Latest, "latest", "l", false, "Act on the latest container podman is aware of") flags.BoolVarP(&cleanupOptions.Latest, "latest", "l", false, "Act on the latest container podman is aware of")
flags.StringVar(&cleanupOptions.Exec, "exec", "", "Clean up the given exec session instead of the container")
flags.BoolVar(&cleanupOptions.Remove, "rm", false, "After cleanup, remove the container entirely") flags.BoolVar(&cleanupOptions.Remove, "rm", false, "After cleanup, remove the container entirely")
flags.BoolVar(&cleanupOptions.RemoveImage, "rmi", false, "After cleanup, remove the image entirely") flags.BoolVar(&cleanupOptions.RemoveImage, "rmi", false, "After cleanup, remove the image entirely")
@ -52,8 +55,26 @@ func cleanup(cmd *cobra.Command, args []string) error {
var ( var (
errs utils.OutputErrors errs utils.OutputErrors
) )
if cleanupOptions.Exec != "" {
switch {
case cleanupOptions.All:
return errors.Errorf("exec and all options conflict")
case len(args) > 1:
return errors.Errorf("cannot use exec option when more than one container is given")
case cleanupOptions.RemoveImage:
return errors.Errorf("exec and rmi options conflict")
}
}
responses, err := registry.ContainerEngine().ContainerCleanup(registry.GetContext(), args, cleanupOptions) responses, err := registry.ContainerEngine().ContainerCleanup(registry.GetContext(), args, cleanupOptions)
if err != nil { if err != nil {
// `podman container cleanup` is almost always run in the
// background. Our only way of relaying information to the user
// is via syslog.
// As such, we need to logrus.Errorf our errors to ensure they
// are properly printed if --syslog is set.
logrus.Errorf("Error running container cleanup: %v", err)
return err return err
} }
for _, r := range responses { for _, r := range responses {
@ -62,12 +83,15 @@ func cleanup(cmd *cobra.Command, args []string) error {
continue continue
} }
if r.RmErr != nil { if r.RmErr != nil {
logrus.Errorf("Error removing container: %v", r.RmErr)
errs = append(errs, r.RmErr) errs = append(errs, r.RmErr)
} }
if r.RmiErr != nil { if r.RmiErr != nil {
logrus.Errorf("Error removing image: %v", r.RmiErr)
errs = append(errs, r.RmiErr) errs = append(errs, r.RmiErr)
} }
if r.CleanErr != nil { if r.CleanErr != nil {
logrus.Errorf("Error cleaning up container: %v", r.CleanErr)
errs = append(errs, r.CleanErr) errs = append(errs, r.CleanErr)
} }
} }

View File

@ -2,9 +2,11 @@ package containers
import ( import (
"bufio" "bufio"
"fmt"
"os" "os"
"github.com/containers/libpod/cmd/podman/registry" "github.com/containers/libpod/cmd/podman/registry"
"github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/pkg/domain/entities" "github.com/containers/libpod/pkg/domain/entities"
envLib "github.com/containers/libpod/pkg/env" envLib "github.com/containers/libpod/pkg/env"
"github.com/pkg/errors" "github.com/pkg/errors"
@ -41,10 +43,12 @@ var (
var ( var (
envInput, envFile []string envInput, envFile []string
execOpts entities.ExecOptions execOpts entities.ExecOptions
execDetach bool
) )
func execFlags(flags *pflag.FlagSet) { func execFlags(flags *pflag.FlagSet) {
flags.SetInterspersed(false) flags.SetInterspersed(false)
flags.BoolVarP(&execDetach, "detach", "d", false, "Run the exec session in detached mode (backgrounded)")
flags.StringVar(&execOpts.DetachKeys, "detach-keys", containerConfig.DetachKeys(), "Select the key sequence for detaching a container. Format is a single character [a-Z] or ctrl-<value> where <value> is one of: a-z, @, ^, [, , or _") flags.StringVar(&execOpts.DetachKeys, "detach-keys", containerConfig.DetachKeys(), "Select the key sequence for detaching a container. Format is a single character [a-Z] or ctrl-<value> where <value> is one of: a-z, @, ^, [, , or _")
flags.StringArrayVarP(&envInput, "env", "e", []string{}, "Set environment variables") flags.StringArrayVarP(&envInput, "env", "e", []string{}, "Set environment variables")
flags.StringSliceVar(&envFile, "env-file", []string{}, "Read in a file of environment variables") flags.StringSliceVar(&envFile, "env-file", []string{}, "Read in a file of environment variables")
@ -106,16 +110,27 @@ func exec(cmd *cobra.Command, args []string) error {
} }
execOpts.Envs = envLib.Join(execOpts.Envs, cliEnv) execOpts.Envs = envLib.Join(execOpts.Envs, cliEnv)
execOpts.Streams.OutputStream = os.Stdout
execOpts.Streams.ErrorStream = os.Stderr
if execOpts.Interactive {
execOpts.Streams.InputStream = bufio.NewReader(os.Stdin)
execOpts.Streams.AttachInput = true
}
execOpts.Streams.AttachOutput = true
execOpts.Streams.AttachError = true
exitCode, err := registry.ContainerEngine().ContainerExec(registry.GetContext(), nameOrId, execOpts) if !execDetach {
streams := define.AttachStreams{}
streams.OutputStream = os.Stdout
streams.ErrorStream = os.Stderr
if execOpts.Interactive {
streams.InputStream = bufio.NewReader(os.Stdin)
streams.AttachInput = true
}
streams.AttachOutput = true
streams.AttachError = true
exitCode, err := registry.ContainerEngine().ContainerExec(registry.GetContext(), nameOrId, execOpts, streams)
registry.SetExitCode(exitCode) registry.SetExitCode(exitCode)
return err return err
} }
id, err := registry.ContainerEngine().ContainerExecDetached(registry.GetContext(), nameOrId, execOpts)
if err != nil {
return err
}
fmt.Println(id)
return nil
}

View File

@ -16,6 +16,13 @@ Sometimes container's mount points and network stacks can remain if the podman c
Cleanup all containers. Cleanup all containers.
**--exec**=_session_
Clean up an exec session for a single container.
Can only be specified if a single container is being cleaned up (conflicts with **--all** as such).
If **--rm** is not specified, temporary files for the exec session will be cleaned up; if it is, the exec session will be removed from the container.
Conflicts with **--rmi** as the container is not being cleaned up so the image cannot be removed.
**--latest**, **-l** **--latest**, **-l**
Instead of providing the container name or ID, use the last created container. If you use methods other than Podman Instead of providing the container name or ID, use the last created container. If you use methods other than Podman
to run containers such as CRI-O, the last started container could be from either of those methods. to run containers such as CRI-O, the last started container could be from either of those methods.

View File

@ -13,6 +13,10 @@ podman\-exec - Execute a command in a running container
## OPTIONS ## OPTIONS
**--detach**
Start the exec session, but do not attach to it. The command will run in the background and the exec session will be automatically removed when it completes. The **podman exec** command will print the ID of the exec session and exit immediately after it starts.
**--detach-keys**=*sequence* **--detach-keys**=*sequence*
Specify the key sequence for detaching a container. Format is a single character `[a-Z]` or one or more `ctrl-<value>` characters where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. Specifying "" will disable this feature. The default is *ctrl-p,ctrl-q*. Specify the key sequence for detaching a container. Format is a single character `[a-Z]` or one or more `ctrl-<value>` characters where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. Specifying "" will disable this feature. The default is *ctrl-p,ctrl-q*.

View File

@ -62,6 +62,13 @@ type ExecConfig struct {
// given is the number that will be passed into the exec session, // given is the number that will be passed into the exec session,
// starting at 3. // starting at 3.
PreserveFDs uint `json:"preserveFds,omitempty"` PreserveFDs uint `json:"preserveFds,omitempty"`
// ExitCommand is the exec session's exit command.
// This command will be executed when the exec session exits.
// If unset, no command will be executed.
// Two arguments will be appended to the exit command by Libpod:
// The ID of the exec session, and the ID of the container the exec
// session is a part of (in that order).
ExitCommand []string `json:"exitCommand,omitempty"`
} }
// ExecSession contains information on a single exec session attached to a given // ExecSession contains information on a single exec session attached to a given
@ -191,6 +198,10 @@ func (c *Container) ExecCreate(config *ExecConfig) (string, error) {
return "", errors.Wrapf(err, "error copying exec configuration into exec session") return "", errors.Wrapf(err, "error copying exec configuration into exec session")
} }
if len(session.Config.ExitCommand) > 0 {
session.Config.ExitCommand = append(session.Config.ExitCommand, []string{session.ID(), c.ID()}...)
}
if c.state.ExecSessions == nil { if c.state.ExecSessions == nil {
c.state.ExecSessions = make(map[string]*ExecSession) c.state.ExecSessions = make(map[string]*ExecSession)
} }
@ -210,11 +221,52 @@ func (c *Container) ExecCreate(config *ExecConfig) (string, error) {
} }
// ExecStart starts an exec session in the container, but does not attach to it. // ExecStart starts an exec session in the container, but does not attach to it.
// Returns immediately upon starting the exec session. // Returns immediately upon starting the exec session, unlike other ExecStart
// functions, which will only return when the exec session exits.
func (c *Container) ExecStart(sessionID string) error { func (c *Container) ExecStart(sessionID string) error {
// Will be implemented in part 2, migrating Start and implementing if !c.batched {
// detached Start. c.lock.Lock()
return define.ErrNotImplemented defer c.lock.Unlock()
if err := c.syncContainer(); err != nil {
return err
}
}
// Verify that we are in a good state to continue
if !c.ensureState(define.ContainerStateRunning) {
return errors.Wrapf(define.ErrCtrStateInvalid, "can only start exec sessions when their container is running")
}
session, ok := c.state.ExecSessions[sessionID]
if !ok {
return errors.Wrapf(define.ErrNoSuchExecSession, "container %s has no exec session with ID %s", c.ID(), sessionID)
}
if session.State != define.ExecStateCreated {
return errors.Wrapf(define.ErrExecSessionStateInvalid, "can only start created exec sessions, while container %s session %s state is %q", c.ID(), session.ID(), session.State.String())
}
logrus.Infof("Going to start container %s exec session %s and attach to it", c.ID(), session.ID())
opts, err := prepareForExec(c, session)
if err != nil {
return err
}
pid, err := c.ociRuntime.ExecContainerDetached(c, session.ID(), opts, session.Config.AttachStdin)
if err != nil {
return err
}
c.newContainerEvent(events.Exec)
logrus.Debugf("Successfully started exec session %s in container %s", session.ID(), c.ID())
// Update and save session to reflect PID/running
session.PID = pid
session.State = define.ExecStateRunning
return c.save()
} }
// ExecStartAndAttach starts and attaches to an exec session in a container. // ExecStartAndAttach starts and attaches to an exec session in a container.
@ -511,9 +563,29 @@ func (c *Container) ExecCleanup(sessionID string) error {
} }
if session.State == define.ExecStateRunning { if session.State == define.ExecStateRunning {
// Check if the exec session is still running.
alive, err := c.ociRuntime.ExecUpdateStatus(c, session.ID())
if err != nil {
return err
}
if alive {
return errors.Wrapf(define.ErrExecSessionStateInvalid, "cannot clean up container %s exec session %s as it is running", c.ID(), session.ID()) return errors.Wrapf(define.ErrExecSessionStateInvalid, "cannot clean up container %s exec session %s as it is running", c.ID(), session.ID())
} }
exitCode, err := c.readExecExitCode(session.ID())
if err != nil {
return err
}
session.ExitCode = exitCode
session.PID = 0
session.State = define.ExecStateStopped
if err := c.save(); err != nil {
return err
}
}
logrus.Infof("Cleaning up container %s exec session %s", c.ID(), session.ID()) logrus.Infof("Cleaning up container %s exec session %s", c.ID(), session.ID())
return c.cleanupExecBundle(session.ID()) return c.cleanupExecBundle(session.ID())
@ -541,11 +613,11 @@ func (c *Container) ExecRemove(sessionID string, force bool) error {
// Update status of exec session if running, so we cna check if it // Update status of exec session if running, so we cna check if it
// stopped in the meantime. // stopped in the meantime.
if session.State == define.ExecStateRunning { if session.State == define.ExecStateRunning {
stopped, err := c.ociRuntime.ExecUpdateStatus(c, session.ID()) running, err := c.ociRuntime.ExecUpdateStatus(c, session.ID())
if err != nil { if err != nil {
return err return err
} }
if stopped { if !running {
session.State = define.ExecStateStopped session.State = define.ExecStateStopped
// TODO: should we retrieve exit code here? // TODO: should we retrieve exit code here?
// TODO: Might be worth saving state here. // TODO: Might be worth saving state here.
@ -800,13 +872,6 @@ func (c *Container) getActiveExecSessions() ([]string, error) {
continue continue
} }
if !alive { if !alive {
if err := c.cleanupExecBundle(id); err != nil {
if lastErr != nil {
logrus.Errorf("Error checking container %s exec sessions: %v", c.ID(), lastErr)
}
lastErr = err
}
_, isLegacy := c.state.LegacyExecSessions[id] _, isLegacy := c.state.LegacyExecSessions[id]
if isLegacy { if isLegacy {
delete(c.state.LegacyExecSessions, id) delete(c.state.LegacyExecSessions, id)
@ -826,6 +891,12 @@ func (c *Container) getActiveExecSessions() ([]string, error) {
needSave = true needSave = true
} }
if err := c.cleanupExecBundle(id); err != nil {
if lastErr != nil {
logrus.Errorf("Error checking container %s exec sessions: %v", c.ID(), lastErr)
}
lastErr = err
}
} else { } else {
activeSessions = append(activeSessions, id) activeSessions = append(activeSessions, id)
} }
@ -846,6 +917,8 @@ func (c *Container) getActiveExecSessions() ([]string, error) {
func (c *Container) removeAllExecSessions() error { func (c *Container) removeAllExecSessions() error {
knownSessions := c.getKnownExecSessions() knownSessions := c.getKnownExecSessions()
logrus.Debugf("Removing all exec sessions for container %s", c.ID())
var lastErr error var lastErr error
for _, id := range knownSessions { for _, id := range knownSessions {
if err := c.ociRuntime.ExecStopContainer(c, id, c.StopTimeout()); err != nil { if err := c.ociRuntime.ExecStopContainer(c, id, c.StopTimeout()); err != nil {
@ -910,6 +983,7 @@ func prepareForExec(c *Container, session *ExecSession) (*ExecOptions, error) {
opts.User = user opts.User = user
opts.PreserveFDs = session.Config.PreserveFDs opts.PreserveFDs = session.Config.PreserveFDs
opts.DetachKeys = session.Config.DetachKeys opts.DetachKeys = session.Config.DetachKeys
opts.ExitCommand = session.Config.ExitCommand
return opts, nil return opts, nil
} }

View File

@ -68,10 +68,10 @@ type OCIRuntime interface {
AttachResize(ctr *Container, newSize remotecommand.TerminalSize) error AttachResize(ctr *Container, newSize remotecommand.TerminalSize) error
// ExecContainer executes a command in a running container. // ExecContainer executes a command in a running container.
// Returns an int (exit code), error channel (errors from attach), and // Returns an int (PID of exec session), error channel (errors from
// error (errors that occurred attempting to start the exec session). // attach), and error (errors that occurred attempting to start the exec
// This returns once the exec session is running - not once it has // session). This returns once the exec session is running - not once it
// completed, as one might expect. The attach session will remain // has completed, as one might expect. The attach session will remain
// running, in a goroutine that will return via the chan error in the // running, in a goroutine that will return via the chan error in the
// return signature. // return signature.
ExecContainer(ctr *Container, sessionID string, options *ExecOptions, streams *define.AttachStreams) (int, chan error, error) ExecContainer(ctr *Container, sessionID string, options *ExecOptions, streams *define.AttachStreams) (int, chan error, error)
@ -81,6 +81,10 @@ type OCIRuntime interface {
// start, with a goroutine running in the background to handle attach). // start, with a goroutine running in the background to handle attach).
// The HTTP attach itself maintains the same invariants as HTTPAttach. // The HTTP attach itself maintains the same invariants as HTTPAttach.
ExecContainerHTTP(ctr *Container, sessionID string, options *ExecOptions, httpConn net.Conn, httpBuf *bufio.ReadWriter, streams *HTTPAttachStreams, cancel <-chan bool) (int, chan error, error) ExecContainerHTTP(ctr *Container, sessionID string, options *ExecOptions, httpConn net.Conn, httpBuf *bufio.ReadWriter, streams *HTTPAttachStreams, cancel <-chan bool) (int, chan error, error)
// ExecContainerDetached executes a command in a running container, but
// does not attach to it. Returns the PID of the exec session and an
// error (if starting the exec session failed)
ExecContainerDetached(ctr *Container, sessionID string, options *ExecOptions, stdin bool) (int, error)
// ExecAttachResize resizes the terminal of a running exec session. Only // ExecAttachResize resizes the terminal of a running exec session. Only
// allowed with sessions that were created with a TTY. // allowed with sessions that were created with a TTY.
ExecAttachResize(ctr *Container, sessionID string, newSize remotecommand.TerminalSize) error ExecAttachResize(ctr *Container, sessionID string, newSize remotecommand.TerminalSize) error
@ -165,6 +169,9 @@ type ExecOptions struct {
// If provided but set to "", detaching from the container will be // If provided but set to "", detaching from the container will be
// disabled. // disabled.
DetachKeys *string DetachKeys *string
// ExitCommand is a command that will be run after the exec session
// exits.
ExitCommand []string
} }
// HTTPAttachStreams informs the HTTPAttach endpoint which of the container's // HTTPAttachStreams informs the HTTPAttach endpoint which of the container's

View File

@ -0,0 +1,599 @@
package libpod
import (
"bufio"
"fmt"
"net"
"os"
"os/exec"
"path/filepath"
"syscall"
"time"
"github.com/containers/common/pkg/config"
"github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/pkg/errorhandling"
"github.com/containers/libpod/pkg/util"
"github.com/containers/libpod/utils"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
"k8s.io/client-go/tools/remotecommand"
)
// ExecContainer executes a command in a running container
func (r *ConmonOCIRuntime) ExecContainer(c *Container, sessionID string, options *ExecOptions, streams *define.AttachStreams) (int, chan error, error) {
if options == nil {
return -1, nil, errors.Wrapf(define.ErrInvalidArg, "must provide an ExecOptions struct to ExecContainer")
}
if len(options.Cmd) == 0 {
return -1, nil, errors.Wrapf(define.ErrInvalidArg, "must provide a command to execute")
}
if sessionID == "" {
return -1, nil, errors.Wrapf(define.ErrEmptyID, "must provide a session ID for exec")
}
// TODO: Should we default this to false?
// Or maybe make streams mandatory?
attachStdin := true
if streams != nil {
attachStdin = streams.AttachInput
}
var ociLog string
if logrus.GetLevel() != logrus.DebugLevel && r.supportsJSON {
ociLog = c.execOCILog(sessionID)
}
execCmd, pipes, err := r.startExec(c, sessionID, options, attachStdin, ociLog)
if err != nil {
return -1, nil, err
}
// Only close sync pipe. Start and attach are consumed in the attach
// goroutine.
defer func() {
if pipes.syncPipe != nil && !pipes.syncClosed {
errorhandling.CloseQuiet(pipes.syncPipe)
pipes.syncClosed = true
}
}()
// TODO Only create if !detach
// Attach to the container before starting it
attachChan := make(chan error)
go func() {
// attachToExec is responsible for closing pipes
attachChan <- c.attachToExec(streams, options.DetachKeys, sessionID, pipes.startPipe, pipes.attachPipe)
close(attachChan)
}()
if err := execCmd.Wait(); err != nil {
return -1, nil, errors.Wrapf(err, "cannot run conmon")
}
pid, err := readConmonPipeData(pipes.syncPipe, ociLog)
return pid, attachChan, err
}
// ExecContainerHTTP executes a new command in an existing container and
// forwards its standard streams over an attach
func (r *ConmonOCIRuntime) ExecContainerHTTP(ctr *Container, sessionID string, options *ExecOptions, httpConn net.Conn, httpBuf *bufio.ReadWriter, streams *HTTPAttachStreams, cancel <-chan bool) (int, chan error, error) {
if streams != nil {
if !streams.Stdin && !streams.Stdout && !streams.Stderr {
return -1, nil, errors.Wrapf(define.ErrInvalidArg, "must provide at least one stream to attach to")
}
}
if options == nil {
return -1, nil, errors.Wrapf(define.ErrInvalidArg, "must provide exec options to ExecContainerHTTP")
}
detachString := config.DefaultDetachKeys
if options.DetachKeys != nil {
detachString = *options.DetachKeys
}
detachKeys, err := processDetachKeys(detachString)
if err != nil {
return -1, nil, err
}
// TODO: Should we default this to false?
// Or maybe make streams mandatory?
attachStdin := true
if streams != nil {
attachStdin = streams.Stdin
}
var ociLog string
if logrus.GetLevel() != logrus.DebugLevel && r.supportsJSON {
ociLog = ctr.execOCILog(sessionID)
}
execCmd, pipes, err := r.startExec(ctr, sessionID, options, attachStdin, ociLog)
if err != nil {
return -1, nil, err
}
// Only close sync pipe. Start and attach are consumed in the attach
// goroutine.
defer func() {
if pipes.syncPipe != nil && !pipes.syncClosed {
errorhandling.CloseQuiet(pipes.syncPipe)
pipes.syncClosed = true
}
}()
attachChan := make(chan error)
go func() {
// attachToExec is responsible for closing pipes
attachChan <- attachExecHTTP(ctr, sessionID, httpBuf, streams, pipes, detachKeys, options.Terminal, cancel)
close(attachChan)
}()
// Wait for conmon to succeed, when return.
if err := execCmd.Wait(); err != nil {
return -1, nil, errors.Wrapf(err, "cannot run conmon")
}
pid, err := readConmonPipeData(pipes.syncPipe, ociLog)
return pid, attachChan, err
}
// ExecContainerDetached executes a command in a running container, but does
// not attach to it.
func (r *ConmonOCIRuntime) ExecContainerDetached(ctr *Container, sessionID string, options *ExecOptions, stdin bool) (int, error) {
if options == nil {
return -1, errors.Wrapf(define.ErrInvalidArg, "must provide exec options to ExecContainerHTTP")
}
var ociLog string
if logrus.GetLevel() != logrus.DebugLevel && r.supportsJSON {
ociLog = ctr.execOCILog(sessionID)
}
execCmd, pipes, err := r.startExec(ctr, sessionID, options, stdin, ociLog)
if err != nil {
return -1, err
}
defer func() {
pipes.cleanup()
}()
// Wait for Conmon to tell us we're ready to attach.
// We aren't actually *going* to attach, but this means that we're good
// to proceed.
if _, err := readConmonPipeData(pipes.attachPipe, ""); err != nil {
return -1, err
}
// Start the exec session
if err := writeConmonPipeData(pipes.startPipe); err != nil {
return -1, err
}
// Wait for conmon to succeed, when return.
if err := execCmd.Wait(); err != nil {
return -1, errors.Wrapf(err, "cannot run conmon")
}
pid, err := readConmonPipeData(pipes.syncPipe, ociLog)
return pid, err
}
// ExecAttachResize resizes the TTY of the given exec session.
func (r *ConmonOCIRuntime) ExecAttachResize(ctr *Container, sessionID string, newSize remotecommand.TerminalSize) error {
controlFile, err := openControlFile(ctr, ctr.execBundlePath(sessionID))
if err != nil {
return err
}
defer controlFile.Close()
if _, err = fmt.Fprintf(controlFile, "%d %d %d\n", 1, newSize.Height, newSize.Width); err != nil {
return errors.Wrapf(err, "failed to write to ctl file to resize terminal")
}
return nil
}
// ExecStopContainer stops a given exec session in a running container.
func (r *ConmonOCIRuntime) ExecStopContainer(ctr *Container, sessionID string, timeout uint) error {
pid, err := ctr.getExecSessionPID(sessionID)
if err != nil {
return err
}
logrus.Debugf("Going to stop container %s exec session %s", ctr.ID(), sessionID)
// Is the session dead?
// Ping the PID with signal 0 to see if it still exists.
if err := unix.Kill(pid, 0); err != nil {
if err == unix.ESRCH {
return nil
}
return errors.Wrapf(err, "error pinging container %s exec session %s PID %d with signal 0", ctr.ID(), sessionID, pid)
}
if timeout > 0 {
// Use SIGTERM by default, then SIGSTOP after timeout.
logrus.Debugf("Killing exec session %s (PID %d) of container %s with SIGTERM", sessionID, pid, ctr.ID())
if err := unix.Kill(pid, unix.SIGTERM); err != nil {
if err == unix.ESRCH {
return nil
}
return errors.Wrapf(err, "error killing container %s exec session %s PID %d with SIGTERM", ctr.ID(), sessionID, pid)
}
// Wait for the PID to stop
if err := waitPidStop(pid, time.Duration(timeout)*time.Second); err != nil {
logrus.Warnf("Timed out waiting for container %s exec session %s to stop, resorting to SIGKILL", ctr.ID(), sessionID)
} else {
// No error, container is dead
return nil
}
}
// SIGTERM did not work. On to SIGKILL.
logrus.Debugf("Killing exec session %s (PID %d) of container %s with SIGKILL", sessionID, pid, ctr.ID())
if err := unix.Kill(pid, unix.SIGTERM); err != nil {
if err == unix.ESRCH {
return nil
}
return errors.Wrapf(err, "error killing container %s exec session %s PID %d with SIGKILL", ctr.ID(), sessionID, pid)
}
// Wait for the PID to stop
if err := waitPidStop(pid, killContainerTimeout*time.Second); err != nil {
return errors.Wrapf(err, "timed out waiting for container %s exec session %s PID %d to stop after SIGKILL", ctr.ID(), sessionID, pid)
}
return nil
}
// ExecUpdateStatus checks if the given exec session is still running.
func (r *ConmonOCIRuntime) ExecUpdateStatus(ctr *Container, sessionID string) (bool, error) {
pid, err := ctr.getExecSessionPID(sessionID)
if err != nil {
return false, err
}
logrus.Debugf("Checking status of container %s exec session %s", ctr.ID(), sessionID)
// Is the session dead?
// Ping the PID with signal 0 to see if it still exists.
if err := unix.Kill(pid, 0); err != nil {
if err == unix.ESRCH {
return false, nil
}
return false, errors.Wrapf(err, "error pinging container %s exec session %s PID %d with signal 0", ctr.ID(), sessionID, pid)
}
return true, nil
}
// ExecContainerCleanup cleans up files created when a command is run via
// ExecContainer. This includes the attach socket for the exec session.
func (r *ConmonOCIRuntime) ExecContainerCleanup(ctr *Container, sessionID string) error {
// Clean up the sockets dir. Issue #3962
// Also ignore if it doesn't exist for some reason; hence the conditional return below
if err := os.RemoveAll(filepath.Join(r.socketsDir, sessionID)); err != nil && !os.IsNotExist(err) {
return err
}
return nil
}
// ExecAttachSocketPath is the path to a container's exec session attach socket.
func (r *ConmonOCIRuntime) ExecAttachSocketPath(ctr *Container, sessionID string) (string, error) {
// We don't even use container, so don't validity check it
if sessionID == "" {
return "", errors.Wrapf(define.ErrInvalidArg, "must provide a valid session ID to get attach socket path")
}
return filepath.Join(r.socketsDir, sessionID, "attach"), nil
}
// This contains pipes used by the exec API.
type execPipes struct {
syncPipe *os.File
syncClosed bool
startPipe *os.File
startClosed bool
attachPipe *os.File
attachClosed bool
}
func (p *execPipes) cleanup() {
if p.syncPipe != nil && !p.syncClosed {
errorhandling.CloseQuiet(p.syncPipe)
p.syncClosed = true
}
if p.startPipe != nil && !p.startClosed {
errorhandling.CloseQuiet(p.startPipe)
p.startClosed = true
}
if p.attachPipe != nil && !p.attachClosed {
errorhandling.CloseQuiet(p.attachPipe)
p.attachClosed = true
}
}
// Start an exec session's conmon parent from the given options.
func (r *ConmonOCIRuntime) startExec(c *Container, sessionID string, options *ExecOptions, attachStdin bool, ociLog string) (_ *exec.Cmd, _ *execPipes, deferredErr error) {
pipes := new(execPipes)
if options == nil {
return nil, nil, errors.Wrapf(define.ErrInvalidArg, "must provide an ExecOptions struct to ExecContainer")
}
if len(options.Cmd) == 0 {
return nil, nil, errors.Wrapf(define.ErrInvalidArg, "must provide a command to execute")
}
if sessionID == "" {
return nil, nil, errors.Wrapf(define.ErrEmptyID, "must provide a session ID for exec")
}
// create sync pipe to receive the pid
parentSyncPipe, childSyncPipe, err := newPipe()
if err != nil {
return nil, nil, errors.Wrapf(err, "error creating socket pair")
}
pipes.syncPipe = parentSyncPipe
defer func() {
if deferredErr != nil {
pipes.cleanup()
}
}()
// create start pipe to set the cgroup before running
// attachToExec is responsible for closing parentStartPipe
childStartPipe, parentStartPipe, err := newPipe()
if err != nil {
return nil, nil, errors.Wrapf(err, "error creating socket pair")
}
pipes.startPipe = parentStartPipe
// create the attach pipe to allow attach socket to be created before
// $RUNTIME exec starts running. This is to make sure we can capture all output
// from the process through that socket, rather than half reading the log, half attaching to the socket
// attachToExec is responsible for closing parentAttachPipe
parentAttachPipe, childAttachPipe, err := newPipe()
if err != nil {
return nil, nil, errors.Wrapf(err, "error creating socket pair")
}
pipes.attachPipe = parentAttachPipe
childrenClosed := false
defer func() {
if !childrenClosed {
errorhandling.CloseQuiet(childSyncPipe)
errorhandling.CloseQuiet(childAttachPipe)
errorhandling.CloseQuiet(childStartPipe)
}
}()
runtimeDir, err := util.GetRuntimeDir()
if err != nil {
return nil, nil, err
}
finalEnv := make([]string, 0, len(options.Env))
for k, v := range options.Env {
finalEnv = append(finalEnv, fmt.Sprintf("%s=%s", k, v))
}
processFile, err := prepareProcessExec(c, options.Cmd, finalEnv, options.Terminal, options.Cwd, options.User, sessionID)
if err != nil {
return nil, nil, err
}
args := r.sharedConmonArgs(c, sessionID, c.execBundlePath(sessionID), c.execPidPath(sessionID), c.execLogPath(sessionID), c.execExitFileDir(sessionID), ociLog, "")
if options.PreserveFDs > 0 {
args = append(args, formatRuntimeOpts("--preserve-fds", fmt.Sprintf("%d", options.PreserveFDs))...)
}
for _, capability := range options.CapAdd {
args = append(args, formatRuntimeOpts("--cap", capability)...)
}
if options.Terminal {
args = append(args, "-t")
}
if attachStdin {
args = append(args, "-i")
}
// Append container ID and command
args = append(args, "-e")
// TODO make this optional when we can detach
args = append(args, "--exec-attach")
args = append(args, "--exec-process-spec", processFile.Name())
if len(options.ExitCommand) > 0 {
args = append(args, "--exit-command", options.ExitCommand[0])
for _, arg := range options.ExitCommand[1:] {
args = append(args, []string{"--exit-command-arg", arg}...)
}
}
logrus.WithFields(logrus.Fields{
"args": args,
}).Debugf("running conmon: %s", r.conmonPath)
// TODO: Need to pass this back so we can wait on it.
execCmd := exec.Command(r.conmonPath, args...)
// TODO: This is commented because it doesn't make much sense in HTTP
// attach, and I'm not certain it does for non-HTTP attach as well.
// if streams != nil {
// // Don't add the InputStream to the execCmd. Instead, the data should be passed
// // through CopyDetachable
// if streams.AttachOutput {
// execCmd.Stdout = options.Streams.OutputStream
// }
// if streams.AttachError {
// execCmd.Stderr = options.Streams.ErrorStream
// }
// }
conmonEnv, extraFiles, err := r.configureConmonEnv(runtimeDir)
if err != nil {
return nil, nil, err
}
if options.PreserveFDs > 0 {
for fd := 3; fd < int(3+options.PreserveFDs); fd++ {
execCmd.ExtraFiles = append(execCmd.ExtraFiles, os.NewFile(uintptr(fd), fmt.Sprintf("fd-%d", fd)))
}
}
// we don't want to step on users fds they asked to preserve
// Since 0-2 are used for stdio, start the fds we pass in at preserveFDs+3
execCmd.Env = r.conmonEnv
execCmd.Env = append(execCmd.Env, fmt.Sprintf("_OCI_SYNCPIPE=%d", options.PreserveFDs+3), fmt.Sprintf("_OCI_STARTPIPE=%d", options.PreserveFDs+4), fmt.Sprintf("_OCI_ATTACHPIPE=%d", options.PreserveFDs+5))
execCmd.Env = append(execCmd.Env, conmonEnv...)
execCmd.ExtraFiles = append(execCmd.ExtraFiles, childSyncPipe, childStartPipe, childAttachPipe)
execCmd.ExtraFiles = append(execCmd.ExtraFiles, extraFiles...)
execCmd.Dir = c.execBundlePath(sessionID)
execCmd.SysProcAttr = &syscall.SysProcAttr{
Setpgid: true,
}
err = startCommandGivenSelinux(execCmd)
// We don't need children pipes on the parent side
errorhandling.CloseQuiet(childSyncPipe)
errorhandling.CloseQuiet(childAttachPipe)
errorhandling.CloseQuiet(childStartPipe)
childrenClosed = true
if err != nil {
return nil, nil, errors.Wrapf(err, "cannot start container %s", c.ID())
}
if err := r.moveConmonToCgroupAndSignal(c, execCmd, parentStartPipe); err != nil {
return nil, nil, err
}
if options.PreserveFDs > 0 {
for fd := 3; fd < int(3+options.PreserveFDs); fd++ {
// These fds were passed down to the runtime. Close them
// and not interfere
if err := os.NewFile(uintptr(fd), fmt.Sprintf("fd-%d", fd)).Close(); err != nil {
logrus.Debugf("unable to close file fd-%d", fd)
}
}
}
return execCmd, pipes, nil
}
// Attach to a container over HTTP
func attachExecHTTP(c *Container, sessionID string, httpBuf *bufio.ReadWriter, streams *HTTPAttachStreams, pipes *execPipes, detachKeys []byte, isTerminal bool, cancel <-chan bool) error {
if pipes == nil || pipes.startPipe == nil || pipes.attachPipe == nil {
return errors.Wrapf(define.ErrInvalidArg, "must provide a start and attach pipe to finish an exec attach")
}
defer func() {
if !pipes.startClosed {
errorhandling.CloseQuiet(pipes.startPipe)
pipes.startClosed = true
}
if !pipes.attachClosed {
errorhandling.CloseQuiet(pipes.attachPipe)
pipes.attachClosed = true
}
}()
logrus.Debugf("Attaching to container %s exec session %s", c.ID(), sessionID)
// set up the socket path, such that it is the correct length and location for exec
sockPath, err := c.execAttachSocketPath(sessionID)
if err != nil {
return err
}
socketPath := buildSocketPath(sockPath)
// 2: read from attachFd that the parent process has set up the console socket
if _, err := readConmonPipeData(pipes.attachPipe, ""); err != nil {
return err
}
// 2: then attach
conn, err := net.DialUnix("unixpacket", nil, &net.UnixAddr{Name: socketPath, Net: "unixpacket"})
if err != nil {
return errors.Wrapf(err, "failed to connect to container's attach socket: %v", socketPath)
}
defer func() {
if err := conn.Close(); err != nil {
logrus.Errorf("unable to close socket: %q", err)
}
}()
// Make a channel to pass errors back
errChan := make(chan error)
attachStdout := true
attachStderr := true
attachStdin := true
if streams != nil {
attachStdout = streams.Stdout
attachStderr = streams.Stderr
attachStdin = streams.Stdin
}
// Next, STDIN. Avoid entirely if attachStdin unset.
if attachStdin {
go func() {
logrus.Debugf("Beginning STDIN copy")
_, err := utils.CopyDetachable(conn, httpBuf, detachKeys)
logrus.Debugf("STDIN copy completed")
errChan <- err
}()
}
// 4: send start message to child
if err := writeConmonPipeData(pipes.startPipe); err != nil {
return err
}
// Handle STDOUT/STDERR *after* start message is sent
go func() {
var err error
if isTerminal {
// Hack: return immediately if attachStdout not set to
// emulate Docker.
// Basically, when terminal is set, STDERR goes nowhere.
// Everything does over STDOUT.
// Therefore, if not attaching STDOUT - we'll never copy
// anything from here.
logrus.Debugf("Performing terminal HTTP attach for container %s", c.ID())
if attachStdout {
err = httpAttachTerminalCopy(conn, httpBuf, c.ID())
}
} else {
logrus.Debugf("Performing non-terminal HTTP attach for container %s", c.ID())
err = httpAttachNonTerminalCopy(conn, httpBuf, c.ID(), attachStdin, attachStdout, attachStderr)
}
errChan <- err
logrus.Debugf("STDOUT/ERR copy completed")
}()
if cancel != nil {
select {
case err := <-errChan:
return err
case <-cancel:
return nil
}
} else {
var connErr error = <-errChan
return connErr
}
}

View File

@ -635,229 +635,6 @@ func (r *ConmonOCIRuntime) AttachResize(ctr *Container, newSize remotecommand.Te
return nil return nil
} }
// ExecContainer executes a command in a running container
func (r *ConmonOCIRuntime) ExecContainer(c *Container, sessionID string, options *ExecOptions, streams *define.AttachStreams) (int, chan error, error) {
if options == nil {
return -1, nil, errors.Wrapf(define.ErrInvalidArg, "must provide an ExecOptions struct to ExecContainer")
}
if len(options.Cmd) == 0 {
return -1, nil, errors.Wrapf(define.ErrInvalidArg, "must provide a command to execute")
}
if sessionID == "" {
return -1, nil, errors.Wrapf(define.ErrEmptyID, "must provide a session ID for exec")
}
// TODO: Should we default this to false?
// Or maybe make streams mandatory?
attachStdin := true
if streams != nil {
attachStdin = streams.AttachInput
}
var ociLog string
if logrus.GetLevel() != logrus.DebugLevel && r.supportsJSON {
ociLog = c.execOCILog(sessionID)
}
execCmd, pipes, err := r.startExec(c, sessionID, options, attachStdin, ociLog)
if err != nil {
return -1, nil, err
}
// Only close sync pipe. Start and attach are consumed in the attach
// goroutine.
defer func() {
if pipes.syncPipe != nil && !pipes.syncClosed {
errorhandling.CloseQuiet(pipes.syncPipe)
pipes.syncClosed = true
}
}()
// TODO Only create if !detach
// Attach to the container before starting it
attachChan := make(chan error)
go func() {
// attachToExec is responsible for closing pipes
attachChan <- c.attachToExec(streams, options.DetachKeys, sessionID, pipes.startPipe, pipes.attachPipe)
close(attachChan)
}()
if err := execCmd.Wait(); err != nil {
return -1, nil, errors.Wrapf(err, "cannot run conmon")
}
pid, err := readConmonPipeData(pipes.syncPipe, ociLog)
return pid, attachChan, err
}
// ExecContainerHTTP executes a new command in an existing container and
// forwards its standard streams over an attach
func (r *ConmonOCIRuntime) ExecContainerHTTP(ctr *Container, sessionID string, options *ExecOptions, httpConn net.Conn, httpBuf *bufio.ReadWriter, streams *HTTPAttachStreams, cancel <-chan bool) (int, chan error, error) {
if streams != nil {
if !streams.Stdin && !streams.Stdout && !streams.Stderr {
return -1, nil, errors.Wrapf(define.ErrInvalidArg, "must provide at least one stream to attach to")
}
}
if options == nil {
return -1, nil, errors.Wrapf(define.ErrInvalidArg, "must provide exec options to ExecContainerHTTP")
}
detachString := config.DefaultDetachKeys
if options.DetachKeys != nil {
detachString = *options.DetachKeys
}
detachKeys, err := processDetachKeys(detachString)
if err != nil {
return -1, nil, err
}
// TODO: Should we default this to false?
// Or maybe make streams mandatory?
attachStdin := true
if streams != nil {
attachStdin = streams.Stdin
}
var ociLog string
if logrus.GetLevel() != logrus.DebugLevel && r.supportsJSON {
ociLog = ctr.execOCILog(sessionID)
}
execCmd, pipes, err := r.startExec(ctr, sessionID, options, attachStdin, ociLog)
if err != nil {
return -1, nil, err
}
// Only close sync pipe. Start and attach are consumed in the attach
// goroutine.
defer func() {
if pipes.syncPipe != nil && !pipes.syncClosed {
errorhandling.CloseQuiet(pipes.syncPipe)
pipes.syncClosed = true
}
}()
attachChan := make(chan error)
go func() {
// attachToExec is responsible for closing pipes
attachChan <- attachExecHTTP(ctr, sessionID, httpBuf, streams, pipes, detachKeys, options.Terminal, cancel)
close(attachChan)
}()
// Wait for conmon to succeed, when return.
if err := execCmd.Wait(); err != nil {
return -1, nil, errors.Wrapf(err, "cannot run conmon")
}
pid, err := readConmonPipeData(pipes.syncPipe, ociLog)
return pid, attachChan, err
}
// ExecAttachResize resizes the TTY of the given exec session.
func (r *ConmonOCIRuntime) ExecAttachResize(ctr *Container, sessionID string, newSize remotecommand.TerminalSize) error {
controlFile, err := openControlFile(ctr, ctr.execBundlePath(sessionID))
if err != nil {
return err
}
defer controlFile.Close()
if _, err = fmt.Fprintf(controlFile, "%d %d %d\n", 1, newSize.Height, newSize.Width); err != nil {
return errors.Wrapf(err, "failed to write to ctl file to resize terminal")
}
return nil
}
// ExecStopContainer stops a given exec session in a running container.
func (r *ConmonOCIRuntime) ExecStopContainer(ctr *Container, sessionID string, timeout uint) error {
pid, err := ctr.getExecSessionPID(sessionID)
if err != nil {
return err
}
logrus.Debugf("Going to stop container %s exec session %s", ctr.ID(), sessionID)
// Is the session dead?
// Ping the PID with signal 0 to see if it still exists.
if err := unix.Kill(pid, 0); err != nil {
if err == unix.ESRCH {
return nil
}
return errors.Wrapf(err, "error pinging container %s exec session %s PID %d with signal 0", ctr.ID(), sessionID, pid)
}
if timeout > 0 {
// Use SIGTERM by default, then SIGSTOP after timeout.
logrus.Debugf("Killing exec session %s (PID %d) of container %s with SIGTERM", sessionID, pid, ctr.ID())
if err := unix.Kill(pid, unix.SIGTERM); err != nil {
if err == unix.ESRCH {
return nil
}
return errors.Wrapf(err, "error killing container %s exec session %s PID %d with SIGTERM", ctr.ID(), sessionID, pid)
}
// Wait for the PID to stop
if err := waitPidStop(pid, time.Duration(timeout)*time.Second); err != nil {
logrus.Warnf("Timed out waiting for container %s exec session %s to stop, resorting to SIGKILL", ctr.ID(), sessionID)
} else {
// No error, container is dead
return nil
}
}
// SIGTERM did not work. On to SIGKILL.
logrus.Debugf("Killing exec session %s (PID %d) of container %s with SIGKILL", sessionID, pid, ctr.ID())
if err := unix.Kill(pid, unix.SIGTERM); err != nil {
if err == unix.ESRCH {
return nil
}
return errors.Wrapf(err, "error killing container %s exec session %s PID %d with SIGKILL", ctr.ID(), sessionID, pid)
}
// Wait for the PID to stop
if err := waitPidStop(pid, killContainerTimeout*time.Second); err != nil {
return errors.Wrapf(err, "timed out waiting for container %s exec session %s PID %d to stop after SIGKILL", ctr.ID(), sessionID, pid)
}
return nil
}
// ExecUpdateStatus checks if the given exec session is still running.
func (r *ConmonOCIRuntime) ExecUpdateStatus(ctr *Container, sessionID string) (bool, error) {
pid, err := ctr.getExecSessionPID(sessionID)
if err != nil {
return false, err
}
logrus.Debugf("Checking status of container %s exec session %s", ctr.ID(), sessionID)
// Is the session dead?
// Ping the PID with signal 0 to see if it still exists.
if err := unix.Kill(pid, 0); err != nil {
if err == unix.ESRCH {
return false, nil
}
return false, errors.Wrapf(err, "error pinging container %s exec session %s PID %d with signal 0", ctr.ID(), sessionID, pid)
}
return true, nil
}
// ExecContainerCleanup cleans up files created when a command is run via
// ExecContainer. This includes the attach socket for the exec session.
func (r *ConmonOCIRuntime) ExecContainerCleanup(ctr *Container, sessionID string) error {
// Clean up the sockets dir. Issue #3962
// Also ignore if it doesn't exist for some reason; hence the conditional return below
if err := os.RemoveAll(filepath.Join(r.socketsDir, sessionID)); err != nil && !os.IsNotExist(err) {
return err
}
return nil
}
// CheckpointContainer checkpoints the given container. // CheckpointContainer checkpoints the given container.
func (r *ConmonOCIRuntime) CheckpointContainer(ctr *Container, options ContainerCheckpointOptions) error { func (r *ConmonOCIRuntime) CheckpointContainer(ctr *Container, options ContainerCheckpointOptions) error {
if err := label.SetSocketLabel(ctr.ProcessLabel()); err != nil { if err := label.SetSocketLabel(ctr.ProcessLabel()); err != nil {
@ -934,16 +711,6 @@ func (r *ConmonOCIRuntime) AttachSocketPath(ctr *Container) (string, error) {
return filepath.Join(r.socketsDir, ctr.ID(), "attach"), nil return filepath.Join(r.socketsDir, ctr.ID(), "attach"), nil
} }
// ExecAttachSocketPath is the path to a container's exec session attach socket.
func (r *ConmonOCIRuntime) ExecAttachSocketPath(ctr *Container, sessionID string) (string, error) {
// We don't even use container, so don't validity check it
if sessionID == "" {
return "", errors.Wrapf(define.ErrInvalidArg, "must provide a valid session ID to get attach socket path")
}
return filepath.Join(r.socketsDir, sessionID, "attach"), nil
}
// ExitFilePath is the path to a container's exit file. // ExitFilePath is the path to a container's exit file.
func (r *ConmonOCIRuntime) ExitFilePath(ctr *Container) (string, error) { func (r *ConmonOCIRuntime) ExitFilePath(ctr *Container) (string, error) {
if ctr == nil { if ctr == nil {
@ -1765,297 +1532,3 @@ func httpAttachNonTerminalCopy(container *net.UnixConn, http *bufio.ReadWriter,
} }
} }
// This contains pipes used by the exec API.
type execPipes struct {
syncPipe *os.File
syncClosed bool
startPipe *os.File
startClosed bool
attachPipe *os.File
attachClosed bool
}
func (p *execPipes) cleanup() {
if p.syncPipe != nil && !p.syncClosed {
errorhandling.CloseQuiet(p.syncPipe)
p.syncClosed = true
}
if p.startPipe != nil && !p.startClosed {
errorhandling.CloseQuiet(p.startPipe)
p.startClosed = true
}
if p.attachPipe != nil && !p.attachClosed {
errorhandling.CloseQuiet(p.attachPipe)
p.attachClosed = true
}
}
// Start an exec session's conmon parent from the given options.
func (r *ConmonOCIRuntime) startExec(c *Container, sessionID string, options *ExecOptions, attachStdin bool, ociLog string) (_ *exec.Cmd, _ *execPipes, deferredErr error) {
pipes := new(execPipes)
if options == nil {
return nil, nil, errors.Wrapf(define.ErrInvalidArg, "must provide an ExecOptions struct to ExecContainer")
}
if len(options.Cmd) == 0 {
return nil, nil, errors.Wrapf(define.ErrInvalidArg, "must provide a command to execute")
}
if sessionID == "" {
return nil, nil, errors.Wrapf(define.ErrEmptyID, "must provide a session ID for exec")
}
// create sync pipe to receive the pid
parentSyncPipe, childSyncPipe, err := newPipe()
if err != nil {
return nil, nil, errors.Wrapf(err, "error creating socket pair")
}
pipes.syncPipe = parentSyncPipe
defer func() {
if deferredErr != nil {
pipes.cleanup()
}
}()
// create start pipe to set the cgroup before running
// attachToExec is responsible for closing parentStartPipe
childStartPipe, parentStartPipe, err := newPipe()
if err != nil {
return nil, nil, errors.Wrapf(err, "error creating socket pair")
}
pipes.startPipe = parentStartPipe
// create the attach pipe to allow attach socket to be created before
// $RUNTIME exec starts running. This is to make sure we can capture all output
// from the process through that socket, rather than half reading the log, half attaching to the socket
// attachToExec is responsible for closing parentAttachPipe
parentAttachPipe, childAttachPipe, err := newPipe()
if err != nil {
return nil, nil, errors.Wrapf(err, "error creating socket pair")
}
pipes.attachPipe = parentAttachPipe
childrenClosed := false
defer func() {
if !childrenClosed {
errorhandling.CloseQuiet(childSyncPipe)
errorhandling.CloseQuiet(childAttachPipe)
errorhandling.CloseQuiet(childStartPipe)
}
}()
runtimeDir, err := util.GetRuntimeDir()
if err != nil {
return nil, nil, err
}
finalEnv := make([]string, 0, len(options.Env))
for k, v := range options.Env {
finalEnv = append(finalEnv, fmt.Sprintf("%s=%s", k, v))
}
processFile, err := prepareProcessExec(c, options.Cmd, finalEnv, options.Terminal, options.Cwd, options.User, sessionID)
if err != nil {
return nil, nil, err
}
args := r.sharedConmonArgs(c, sessionID, c.execBundlePath(sessionID), c.execPidPath(sessionID), c.execLogPath(sessionID), c.execExitFileDir(sessionID), ociLog, "")
if options.PreserveFDs > 0 {
args = append(args, formatRuntimeOpts("--preserve-fds", fmt.Sprintf("%d", options.PreserveFDs))...)
}
for _, capability := range options.CapAdd {
args = append(args, formatRuntimeOpts("--cap", capability)...)
}
if options.Terminal {
args = append(args, "-t")
}
if attachStdin {
args = append(args, "-i")
}
// Append container ID and command
args = append(args, "-e")
// TODO make this optional when we can detach
args = append(args, "--exec-attach")
args = append(args, "--exec-process-spec", processFile.Name())
logrus.WithFields(logrus.Fields{
"args": args,
}).Debugf("running conmon: %s", r.conmonPath)
// TODO: Need to pass this back so we can wait on it.
execCmd := exec.Command(r.conmonPath, args...)
// TODO: This is commented because it doesn't make much sense in HTTP
// attach, and I'm not certain it does for non-HTTP attach as well.
// if streams != nil {
// // Don't add the InputStream to the execCmd. Instead, the data should be passed
// // through CopyDetachable
// if streams.AttachOutput {
// execCmd.Stdout = options.Streams.OutputStream
// }
// if streams.AttachError {
// execCmd.Stderr = options.Streams.ErrorStream
// }
// }
conmonEnv, extraFiles, err := r.configureConmonEnv(runtimeDir)
if err != nil {
return nil, nil, err
}
if options.PreserveFDs > 0 {
for fd := 3; fd < int(3+options.PreserveFDs); fd++ {
execCmd.ExtraFiles = append(execCmd.ExtraFiles, os.NewFile(uintptr(fd), fmt.Sprintf("fd-%d", fd)))
}
}
// we don't want to step on users fds they asked to preserve
// Since 0-2 are used for stdio, start the fds we pass in at preserveFDs+3
execCmd.Env = r.conmonEnv
execCmd.Env = append(execCmd.Env, fmt.Sprintf("_OCI_SYNCPIPE=%d", options.PreserveFDs+3), fmt.Sprintf("_OCI_STARTPIPE=%d", options.PreserveFDs+4), fmt.Sprintf("_OCI_ATTACHPIPE=%d", options.PreserveFDs+5))
execCmd.Env = append(execCmd.Env, conmonEnv...)
execCmd.ExtraFiles = append(execCmd.ExtraFiles, childSyncPipe, childStartPipe, childAttachPipe)
execCmd.ExtraFiles = append(execCmd.ExtraFiles, extraFiles...)
execCmd.Dir = c.execBundlePath(sessionID)
execCmd.SysProcAttr = &syscall.SysProcAttr{
Setpgid: true,
}
err = startCommandGivenSelinux(execCmd)
// We don't need children pipes on the parent side
errorhandling.CloseQuiet(childSyncPipe)
errorhandling.CloseQuiet(childAttachPipe)
errorhandling.CloseQuiet(childStartPipe)
childrenClosed = true
if err != nil {
return nil, nil, errors.Wrapf(err, "cannot start container %s", c.ID())
}
if err := r.moveConmonToCgroupAndSignal(c, execCmd, parentStartPipe); err != nil {
return nil, nil, err
}
if options.PreserveFDs > 0 {
for fd := 3; fd < int(3+options.PreserveFDs); fd++ {
// These fds were passed down to the runtime. Close them
// and not interfere
if err := os.NewFile(uintptr(fd), fmt.Sprintf("fd-%d", fd)).Close(); err != nil {
logrus.Debugf("unable to close file fd-%d", fd)
}
}
}
return execCmd, pipes, nil
}
// Attach to a container over HTTP
func attachExecHTTP(c *Container, sessionID string, httpBuf *bufio.ReadWriter, streams *HTTPAttachStreams, pipes *execPipes, detachKeys []byte, isTerminal bool, cancel <-chan bool) error {
if pipes == nil || pipes.startPipe == nil || pipes.attachPipe == nil {
return errors.Wrapf(define.ErrInvalidArg, "must provide a start and attach pipe to finish an exec attach")
}
defer func() {
if !pipes.startClosed {
errorhandling.CloseQuiet(pipes.startPipe)
pipes.startClosed = true
}
if !pipes.attachClosed {
errorhandling.CloseQuiet(pipes.attachPipe)
pipes.attachClosed = true
}
}()
logrus.Debugf("Attaching to container %s exec session %s", c.ID(), sessionID)
// set up the socket path, such that it is the correct length and location for exec
sockPath, err := c.execAttachSocketPath(sessionID)
if err != nil {
return err
}
socketPath := buildSocketPath(sockPath)
// 2: read from attachFd that the parent process has set up the console socket
if _, err := readConmonPipeData(pipes.attachPipe, ""); err != nil {
return err
}
// 2: then attach
conn, err := net.DialUnix("unixpacket", nil, &net.UnixAddr{Name: socketPath, Net: "unixpacket"})
if err != nil {
return errors.Wrapf(err, "failed to connect to container's attach socket: %v", socketPath)
}
defer func() {
if err := conn.Close(); err != nil {
logrus.Errorf("unable to close socket: %q", err)
}
}()
// Make a channel to pass errors back
errChan := make(chan error)
attachStdout := true
attachStderr := true
attachStdin := true
if streams != nil {
attachStdout = streams.Stdout
attachStderr = streams.Stderr
attachStdin = streams.Stdin
}
// Next, STDIN. Avoid entirely if attachStdin unset.
if attachStdin {
go func() {
logrus.Debugf("Beginning STDIN copy")
_, err := utils.CopyDetachable(conn, httpBuf, detachKeys)
logrus.Debugf("STDIN copy completed")
errChan <- err
}()
}
// 4: send start message to child
if err := writeConmonPipeData(pipes.startPipe); err != nil {
return err
}
// Handle STDOUT/STDERR *after* start message is sent
go func() {
var err error
if isTerminal {
// Hack: return immediately if attachStdout not set to
// emulate Docker.
// Basically, when terminal is set, STDERR goes nowhere.
// Everything does over STDOUT.
// Therefore, if not attaching STDOUT - we'll never copy
// anything from here.
logrus.Debugf("Performing terminal HTTP attach for container %s", c.ID())
if attachStdout {
err = httpAttachTerminalCopy(conn, httpBuf, c.ID())
}
} else {
logrus.Debugf("Performing non-terminal HTTP attach for container %s", c.ID())
err = httpAttachNonTerminalCopy(conn, httpBuf, c.ID(), attachStdin, attachStdout, attachStderr)
}
errChan <- err
logrus.Debugf("STDOUT/ERR copy completed")
}()
if cancel != nil {
select {
case err := <-errChan:
return err
case <-cancel:
return nil
}
} else {
var connErr error = <-errChan
return connErr
}
}

View File

@ -130,6 +130,11 @@ func (r *MissingRuntime) ExecContainerHTTP(ctr *Container, sessionID string, opt
return -1, nil, r.printError() return -1, nil, r.printError()
} }
// ExecContainerDetached is not available as the runtime is missing
func (r *MissingRuntime) ExecContainerDetached(ctr *Container, sessionID string, options *ExecOptions, stdin bool) (int, error) {
return -1, r.printError()
}
// ExecAttachResize is not available as the runtime is missing. // ExecAttachResize is not available as the runtime is missing.
func (r *MissingRuntime) ExecAttachResize(ctr *Container, sessionID string, newSize remotecommand.TerminalSize) error { func (r *MissingRuntime) ExecAttachResize(ctr *Container, sessionID string, newSize remotecommand.TerminalSize) error {
return r.printError() return r.printError()

View File

@ -390,6 +390,8 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool,
} }
} }
logrus.Debugf("Removing container %s", c.ID())
// We need to lock the pod before we lock the container. // We need to lock the pod before we lock the container.
// To avoid races around removing a container and the pod it is in. // To avoid races around removing a container and the pod it is in.
// Don't need to do this in pod removal case - we're evicting the entire // Don't need to do this in pod removal case - we're evicting the entire

View File

@ -242,7 +242,6 @@ type ExecOptions struct {
Latest bool Latest bool
PreserveFDs uint PreserveFDs uint
Privileged bool Privileged bool
Streams define.AttachStreams
Tty bool Tty bool
User string User string
WorkDir string WorkDir string
@ -311,6 +310,7 @@ type ContainerRunReport struct {
// cleanup command // cleanup command
type ContainerCleanupOptions struct { type ContainerCleanupOptions struct {
All bool All bool
Exec string
Latest bool Latest bool
Remove bool Remove bool
RemoveImage bool RemoveImage bool

View File

@ -19,7 +19,8 @@ type ContainerEngine interface {
ContainerCp(ctx context.Context, source, dest string, options ContainerCpOptions) (*ContainerCpReport, error) ContainerCp(ctx context.Context, source, dest string, options ContainerCpOptions) (*ContainerCpReport, error)
ContainerCreate(ctx context.Context, s *specgen.SpecGenerator) (*ContainerCreateReport, error) ContainerCreate(ctx context.Context, s *specgen.SpecGenerator) (*ContainerCreateReport, error)
ContainerDiff(ctx context.Context, nameOrId string, options DiffOptions) (*DiffReport, error) ContainerDiff(ctx context.Context, nameOrId string, options DiffOptions) (*DiffReport, error)
ContainerExec(ctx context.Context, nameOrId string, options ExecOptions) (int, error) ContainerExec(ctx context.Context, nameOrId string, options ExecOptions, streams define.AttachStreams) (int, error)
ContainerExecDetached(ctx context.Context, nameOrID string, options ExecOptions) (string, error)
ContainerExists(ctx context.Context, nameOrId string) (*BoolReport, error) ContainerExists(ctx context.Context, nameOrId string) (*BoolReport, error)
ContainerExport(ctx context.Context, nameOrId string, options ContainerExportOptions) error ContainerExport(ctx context.Context, nameOrId string, options ContainerExportOptions) error
ContainerInit(ctx context.Context, namesOrIds []string, options ContainerInitOptions) ([]*ContainerInitReport, error) ContainerInit(ctx context.Context, namesOrIds []string, options ContainerInitOptions) ([]*ContainerInitReport, error)

View File

@ -536,7 +536,22 @@ func (ic *ContainerEngine) ContainerAttach(ctx context.Context, nameOrId string,
return nil return nil
} }
func (ic *ContainerEngine) ContainerExec(ctx context.Context, nameOrId string, options entities.ExecOptions) (int, error) { func makeExecConfig(options entities.ExecOptions) *libpod.ExecConfig {
execConfig := new(libpod.ExecConfig)
execConfig.Command = options.Cmd
execConfig.Terminal = options.Tty
execConfig.Privileged = options.Privileged
execConfig.Environment = options.Envs
execConfig.User = options.User
execConfig.WorkDir = options.WorkDir
execConfig.DetachKeys = &options.DetachKeys
execConfig.PreserveFDs = options.PreserveFDs
execConfig.AttachStdin = options.Interactive
return execConfig
}
func checkExecPreserveFDs(options entities.ExecOptions) (int, error) {
ec := define.ExecErrorCodeGeneric ec := define.ExecErrorCodeGeneric
if options.PreserveFDs > 0 { if options.PreserveFDs > 0 {
entries, err := ioutil.ReadDir("/proc/self/fd") entries, err := ioutil.ReadDir("/proc/self/fd")
@ -559,15 +574,66 @@ func (ic *ContainerEngine) ContainerExec(ctx context.Context, nameOrId string, o
} }
} }
} }
return ec, nil
}
func (ic *ContainerEngine) ContainerExec(ctx context.Context, nameOrId string, options entities.ExecOptions, streams define.AttachStreams) (int, error) {
ec, err := checkExecPreserveFDs(options)
if err != nil {
return ec, err
}
ctrs, err := getContainersByContext(false, options.Latest, []string{nameOrId}, ic.Libpod) ctrs, err := getContainersByContext(false, options.Latest, []string{nameOrId}, ic.Libpod)
if err != nil { if err != nil {
return ec, err return ec, err
} }
ctr := ctrs[0] ctr := ctrs[0]
ec, err = terminal.ExecAttachCtr(ctx, ctr, options.Tty, options.Privileged, options.Envs, options.Cmd, options.User, options.WorkDir, &options.Streams, options.PreserveFDs, options.DetachKeys)
execConfig := makeExecConfig(options)
ec, err = terminal.ExecAttachCtr(ctx, ctr, execConfig, &streams)
return define.TranslateExecErrorToExitCode(ec, err), err return define.TranslateExecErrorToExitCode(ec, err), err
} }
func (ic *ContainerEngine) ContainerExecDetached(ctx context.Context, nameOrId string, options entities.ExecOptions) (string, error) {
_, err := checkExecPreserveFDs(options)
if err != nil {
return "", err
}
ctrs, err := getContainersByContext(false, options.Latest, []string{nameOrId}, ic.Libpod)
if err != nil {
return "", err
}
ctr := ctrs[0]
execConfig := makeExecConfig(options)
// Make an exit command
storageConfig := ic.Libpod.StorageConfig()
runtimeConfig, err := ic.Libpod.GetConfig()
if err != nil {
return "", errors.Wrapf(err, "error retrieving Libpod configuration to build exec exit command")
}
podmanPath, err := os.Executable()
if err != nil {
return "", errors.Wrapf(err, "error retrieving executable to build exec exit command")
}
// TODO: Add some ability to toggle syslog
exitCommandArgs := generate.CreateExitCommandArgs(storageConfig, runtimeConfig, podmanPath, false, true, true)
execConfig.ExitCommand = exitCommandArgs
// Create and start the exec session
id, err := ctr.ExecCreate(execConfig)
if err != nil {
return "", err
}
// TODO: we should try and retrieve exit code if this fails.
if err := ctr.ExecStart(id); err != nil {
return "", err
}
return id, nil
}
func (ic *ContainerEngine) ContainerStart(ctx context.Context, namesOrIds []string, options entities.ContainerStartOptions) ([]*entities.ContainerStartReport, error) { func (ic *ContainerEngine) ContainerStart(ctx context.Context, namesOrIds []string, options entities.ContainerStartOptions) ([]*entities.ContainerStartReport, error) {
var reports []*entities.ContainerStartReport var reports []*entities.ContainerStartReport
var exitCode = define.ExecErrorCodeGeneric var exitCode = define.ExecErrorCodeGeneric
@ -836,6 +902,20 @@ func (ic *ContainerEngine) ContainerCleanup(ctx context.Context, namesOrIds []st
for _, ctr := range ctrs { for _, ctr := range ctrs {
var err error var err error
report := entities.ContainerCleanupReport{Id: ctr.ID()} report := entities.ContainerCleanupReport{Id: ctr.ID()}
if options.Exec != "" {
if options.Remove {
if err := ctr.ExecRemove(options.Exec, false); err != nil {
return nil, err
}
} else {
if err := ctr.ExecCleanup(options.Exec); err != nil {
return nil, err
}
}
return []*entities.ContainerCleanupReport{}, nil
}
if options.Remove { if options.Remove {
err = ic.Libpod.RemoveContainer(ctx, ctr, false, true) err = ic.Libpod.RemoveContainer(ctx, ctr, false, true)
if err != nil { if err != nil {

View File

@ -15,13 +15,13 @@ import (
) )
// ExecAttachCtr execs and attaches to a container // ExecAttachCtr execs and attaches to a container
func ExecAttachCtr(ctx context.Context, ctr *libpod.Container, tty, privileged bool, env map[string]string, cmd []string, user, workDir string, streams *define.AttachStreams, preserveFDs uint, detachKeys string) (int, error) { func ExecAttachCtr(ctx context.Context, ctr *libpod.Container, execConfig *libpod.ExecConfig, streams *define.AttachStreams) (int, error) {
resize := make(chan remotecommand.TerminalSize) resize := make(chan remotecommand.TerminalSize)
haveTerminal := terminal.IsTerminal(int(os.Stdin.Fd())) haveTerminal := terminal.IsTerminal(int(os.Stdin.Fd()))
// Check if we are attached to a terminal. If we are, generate resize // Check if we are attached to a terminal. If we are, generate resize
// events, and set the terminal to raw mode // events, and set the terminal to raw mode
if haveTerminal && tty { if haveTerminal && execConfig.Terminal {
cancel, oldTermState, err := handleTerminalAttach(ctx, resize) cancel, oldTermState, err := handleTerminalAttach(ctx, resize)
if err != nil { if err != nil {
return -1, err return -1, err
@ -34,16 +34,6 @@ func ExecAttachCtr(ctx context.Context, ctr *libpod.Container, tty, privileged b
}() }()
} }
execConfig := new(libpod.ExecConfig)
execConfig.Command = cmd
execConfig.Terminal = tty
execConfig.Privileged = privileged
execConfig.Environment = env
execConfig.User = user
execConfig.WorkDir = workDir
execConfig.DetachKeys = &detachKeys
execConfig.PreserveFDs = preserveFDs
return ctr.Exec(execConfig, streams, resize) return ctr.Exec(execConfig, streams, resize)
} }

View File

@ -330,10 +330,14 @@ func (ic *ContainerEngine) ContainerAttach(ctx context.Context, nameOrId string,
return containers.Attach(ic.ClientCxt, nameOrId, &options.DetachKeys, nil, bindings.PTrue, options.Stdin, options.Stdout, options.Stderr, nil) return containers.Attach(ic.ClientCxt, nameOrId, &options.DetachKeys, nil, bindings.PTrue, options.Stdin, options.Stdout, options.Stderr, nil)
} }
func (ic *ContainerEngine) ContainerExec(ctx context.Context, nameOrId string, options entities.ExecOptions) (int, error) { func (ic *ContainerEngine) ContainerExec(ctx context.Context, nameOrId string, options entities.ExecOptions, streams define.AttachStreams) (int, error) {
return 125, errors.New("not implemented") return 125, errors.New("not implemented")
} }
func (ic *ContainerEngine) ContainerExecDetached(ctx context.Context, nameOrID string, options entities.ExecOptions) (string, error) {
return "", errors.New("not implemented")
}
func startAndAttach(ic *ContainerEngine, name string, detachKeys *string, input, output, errput *os.File) error { //nolint func startAndAttach(ic *ContainerEngine, name string, detachKeys *string, input, output, errput *os.File) error { //nolint
attachErr := make(chan error) attachErr := make(chan error)
attachReady := make(chan bool) attachReady := make(chan bool)

View File

@ -111,7 +111,8 @@ func MakeContainer(ctx context.Context, rt *libpod.Runtime, s *specgen.SpecGener
if err != nil { if err != nil {
return nil, err return nil, err
} }
options = append(options, createExitCommandOption(s, rt.StorageConfig(), rtc, podmanPath)) // TODO: Enable syslog support - we'll need to put this in SpecGen.
options = append(options, libpod.WithExitCommand(CreateExitCommandArgs(rt.StorageConfig(), rtc, podmanPath, false, s.Remove, false)))
runtimeSpec, err := SpecGenToOCI(ctx, s, rt, rtc, newImage, finalMounts) runtimeSpec, err := SpecGenToOCI(ctx, s, rt, rtc, newImage, finalMounts)
if err != nil { if err != nil {
@ -228,7 +229,7 @@ func createContainerOptions(ctx context.Context, rt *libpod.Runtime, s *specgen.
return options, nil return options, nil
} }
func createExitCommandOption(s *specgen.SpecGenerator, storageConfig storage.StoreOptions, config *config.Config, podmanPath string) libpod.CtrCreateOption { func CreateExitCommandArgs(storageConfig storage.StoreOptions, config *config.Config, podmanPath string, syslog, rm bool, exec bool) []string {
// We need a cleanup process for containers in the current model. // We need a cleanup process for containers in the current model.
// But we can't assume that the caller is Podman - it could be another // But we can't assume that the caller is Podman - it could be another
// user of the API. // user of the API.
@ -255,14 +256,18 @@ func createExitCommandOption(s *specgen.SpecGenerator, storageConfig storage.Sto
command = append(command, []string{"--events-backend", config.Engine.EventsLogger}...) command = append(command, []string{"--events-backend", config.Engine.EventsLogger}...)
} }
// TODO Mheon wants to leave this for now if syslog {
//if s.sys { command = append(command, "--syslog", "true")
// command = append(command, "--syslog", "true") }
//}
command = append(command, []string{"container", "cleanup"}...) command = append(command, []string{"container", "cleanup"}...)
if s.Remove { if rm {
command = append(command, "--rm") command = append(command, "--rm")
} }
return libpod.WithExitCommand(command)
if exec {
command = append(command, "--exec")
}
return command
} }

View File

@ -283,4 +283,31 @@ var _ = Describe("Podman exec", func() {
Expect(exec.ExitCode()).To(Equal(0)) Expect(exec.ExitCode()).To(Equal(0))
Expect(strings.Contains(exec.OutputToString(), fmt.Sprintf("%s(%s)", gid, groupName))).To(BeTrue()) Expect(strings.Contains(exec.OutputToString(), fmt.Sprintf("%s(%s)", gid, groupName))).To(BeTrue())
}) })
It("podman exec --detach", func() {
ctrName := "testctr"
ctr := podmanTest.Podman([]string{"run", "-t", "-i", "-d", "--name", ctrName, ALPINE, "top"})
ctr.WaitWithDefaultTimeout()
Expect(ctr.ExitCode()).To(Equal(0))
exec1 := podmanTest.Podman([]string{"exec", "-t", "-i", "-d", ctrName, "top"})
exec1.WaitWithDefaultTimeout()
Expect(ctr.ExitCode()).To(Equal(0))
data := podmanTest.InspectContainer(ctrName)
Expect(len(data)).To(Equal(1))
Expect(len(data[0].ExecIDs)).To(Equal(1))
Expect(strings.Contains(exec1.OutputToString(), data[0].ExecIDs[0])).To(BeTrue())
exec2 := podmanTest.Podman([]string{"exec", "-t", "-i", ctrName, "ps", "-a"})
exec2.WaitWithDefaultTimeout()
Expect(ctr.ExitCode()).To(Equal(0))
Expect(strings.Count(exec2.OutputToString(), "top")).To(Equal(2))
// Ensure that stop with a running detached exec session is
// clean.
stop := podmanTest.Podman([]string{"stop", ctrName})
stop.WaitWithDefaultTimeout()
Expect(stop.ExitCode()).To(Equal(0))
})
}) })