Add structure for new exec session tracking to DB

As part of the rework of exec sessions, we need to address them
independently of containers. In the new API, we need to be able
to fetch them by their ID, regardless of what container they are
associated with. Unfortunately, our existing exec sessions are
tied to individual containers; there's no way to tell what
container a session belongs to and retrieve it without getting
every exec session for every container.

This adds a pointer to the container an exec session is
associated with to the database. The sessions themselves are
still stored in the container.

Exec-related APIs have been restructured to work with the new
database representation. The originally monolithic API has been
split into a number of smaller calls to allow more fine-grained
control of lifecycle. Support for legacy exec sessions has been
retained, but in a deprecated fashion; we should remove this in
a few releases.

Signed-off-by: Matthew Heon <matthew.heon@pm.me>
This commit is contained in:
Matthew Heon
2020-02-04 16:56:07 -05:00
parent f138405b46
commit 118e78c5d6
19 changed files with 1430 additions and 489 deletions

View File

@ -41,6 +41,8 @@ type BoltState struct {
// containing the path to the container's network namespace, a dependencies
// bucket containing the container's dependencies, and an optional pod key
// containing the ID of the pod the container is joined to.
// After updates to include exec sessions, may also include an exec bucket
// with the IDs of exec sessions currently in use by the container.
// - allCtrsBkt: Map of ID to name containing only containers. Used for
// container lookup operations.
// - podBkt: Contains a sub-bucket for each pod in the state.
@ -49,6 +51,10 @@ type BoltState struct {
// containers in the pod.
// - allPodsBkt: Map of ID to name containing only pods. Used for pod lookup
// operations.
// - execBkt: Map of exec session ID to exec session - contains a sub-bucket for
// each exec session in the DB.
// - execRegistryBkt: Map of exec session ID to nothing. Contains one entry for
// each exec session. Used for iterating through all exec sessions.
// - runtimeConfigBkt: Contains configuration of the libpod instance that
// initially created the database. This must match for any further instances
// that access the database, to ensure that state mismatches with
@ -86,6 +92,7 @@ func NewBoltState(path string, runtime *Runtime) (State, error) {
allPodsBkt,
volBkt,
allVolsBkt,
execBkt,
runtimeConfigBkt,
}
@ -171,6 +178,11 @@ func (s *BoltState) Refresh() error {
return err
}
execBucket, err := getExecBucket(tx)
if err != nil {
return err
}
// Iterate through all IDs. Check if they are containers.
// If they are, unmarshal their state, and then clear
// PID, mountpoint, and state for all of them
@ -245,6 +257,26 @@ func (s *BoltState) Refresh() error {
return errors.Wrapf(err, "error updating state for container %s in DB", string(id))
}
// Delete all exec sessions, if there are any
ctrExecBkt := ctrBkt.Bucket(execBkt)
if ctrExecBkt != nil {
// Can't delete in a ForEach, so build a list of
// what to remove then remove.
toRemove := []string{}
err = ctrExecBkt.ForEach(func(id, unused []byte) error {
toRemove = append(toRemove, string(id))
return nil
})
if err != nil {
return err
}
for _, execId := range toRemove {
if err := ctrExecBkt.Delete([]byte(execId)); err != nil {
return errors.Wrapf(err, "error removing exec session %s from container %s", execId, string(id))
}
}
}
return nil
})
if err != nil {
@ -285,7 +317,30 @@ func (s *BoltState) Refresh() error {
return nil
})
if err != nil {
return err
}
// Now refresh exec sessions
// We want to remove them all, but for-each can't modify buckets
// So we have to make a list of what to operate on, then do the
// work.
toRemoveExec := []string{}
err = execBucket.ForEach(func(id, unused []byte) error {
toRemoveExec = append(toRemoveExec, string(id))
return nil
})
if err != nil {
return err
}
for _, execSession := range toRemoveExec {
if err := execBucket.Delete([]byte(execSession)); err != nil {
return errors.Wrapf(err, "error deleting exec session %s registry from database", execSession)
}
}
return nil
})
return err
}
@ -895,6 +950,287 @@ func (s *BoltState) GetContainerConfig(id string) (*ContainerConfig, error) {
return config, nil
}
// AddExecSession adds an exec session to the state.
func (s *BoltState) AddExecSession(ctr *Container, session *ExecSession) error {
if !s.valid {
return define.ErrDBClosed
}
if !ctr.valid {
return define.ErrCtrRemoved
}
db, err := s.getDBCon()
if err != nil {
return err
}
defer s.deferredCloseDBCon(db)
ctrID := []byte(ctr.ID())
sessionID := []byte(session.ID())
err = db.Update(func(tx *bolt.Tx) error {
execBucket, err := getExecBucket(tx)
if err != nil {
return err
}
ctrBucket, err := getCtrBucket(tx)
if err != nil {
return err
}
dbCtr := ctrBucket.Bucket(ctrID)
if dbCtr == nil {
ctr.valid = false
return errors.Wrapf(define.ErrNoSuchCtr, "container %s is not present in the database", ctr.ID())
}
ctrExecSessionBucket, err := dbCtr.CreateBucketIfNotExists(execBkt)
if err != nil {
return errors.Wrapf(err, "error creating exec sessions bucket for container %s", ctr.ID())
}
execExists := execBucket.Get(sessionID)
if execExists != nil {
return errors.Wrapf(define.ErrExecSessionExists, "an exec session with ID %s already exists", session.ID())
}
if err := execBucket.Put(sessionID, ctrID); err != nil {
return errors.Wrapf(err, "error adding exec session %s to DB", session.ID())
}
if err := ctrExecSessionBucket.Put(sessionID, ctrID); err != nil {
return errors.Wrapf(err, "error adding exec session %s to container %s in DB", session.ID(), ctr.ID())
}
return nil
})
return err
}
// GetExecSession returns the ID of the container an exec session is associated
// with.
func (s *BoltState) GetExecSession(id string) (string, error) {
if !s.valid {
return "", define.ErrDBClosed
}
if id == "" {
return "", define.ErrEmptyID
}
db, err := s.getDBCon()
if err != nil {
return "", err
}
defer s.deferredCloseDBCon(db)
ctrID := ""
err = db.View(func(tx *bolt.Tx) error {
execBucket, err := getExecBucket(tx)
if err != nil {
return err
}
ctr := execBucket.Get([]byte(id))
if ctr == nil {
return errors.Wrapf(define.ErrNoSuchExecSession, "no exec session with ID %s found", id)
}
ctrID = string(ctr)
return nil
})
return ctrID, err
}
// RemoveExecSession removes references to the given exec session in the
// database.
func (s *BoltState) RemoveExecSession(session *ExecSession) error {
if !s.valid {
return define.ErrDBClosed
}
db, err := s.getDBCon()
if err != nil {
return err
}
defer s.deferredCloseDBCon(db)
sessionID := []byte(session.ID())
containerID := []byte(session.ContainerID())
err = db.Update(func(tx *bolt.Tx) error {
execBucket, err := getExecBucket(tx)
if err != nil {
return err
}
ctrBucket, err := getCtrBucket(tx)
if err != nil {
return err
}
sessionExists := execBucket.Get(sessionID)
if sessionExists == nil {
return define.ErrNoSuchExecSession
}
// Check that container ID matches
if string(sessionExists) != session.ContainerID() {
return errors.Wrapf(define.ErrInternal, "database inconsistency: exec session %s points to container %s in state but %s in database", session.ID(), session.ContainerID(), string(sessionExists))
}
if err := execBucket.Delete(sessionID); err != nil {
return errors.Wrapf(err, "error removing exec session %s from database", session.ID())
}
dbCtr := ctrBucket.Bucket(containerID)
if dbCtr == nil {
// State is inconsistent. We refer to a container that
// is no longer in the state.
// Return without error, to attempt to recover.
return nil
}
ctrExecBucket := dbCtr.Bucket(execBkt)
if ctrExecBucket == nil {
// Again, state is inconsistent. We should have an exec
// bucket, and it should have this session.
// Again, nothing we can do, so proceed and try to
// recover.
return nil
}
ctrSessionExists := ctrExecBucket.Get(sessionID)
if ctrSessionExists != nil {
if err := ctrExecBucket.Delete(sessionID); err != nil {
return errors.Wrapf(err, "error removing exec session %s from container %s in database", session.ID(), session.ContainerID())
}
}
return nil
})
return err
}
// GetContainerExecSessions retrieves the IDs of all exec sessions running in a
// container that the database is aware of (IE, were added via AddExecSession).
func (s *BoltState) GetContainerExecSessions(ctr *Container) ([]string, error) {
if !s.valid {
return nil, define.ErrDBClosed
}
if !ctr.valid {
return nil, define.ErrCtrRemoved
}
db, err := s.getDBCon()
if err != nil {
return nil, err
}
defer s.deferredCloseDBCon(db)
ctrID := []byte(ctr.ID())
sessions := []string{}
err = db.View(func(tx *bolt.Tx) error {
ctrBucket, err := getCtrBucket(tx)
if err != nil {
return err
}
dbCtr := ctrBucket.Bucket(ctrID)
if dbCtr == nil {
ctr.valid = false
return define.ErrNoSuchCtr
}
ctrExecSessions := dbCtr.Bucket(execBkt)
if ctrExecSessions == nil {
return nil
}
return ctrExecSessions.ForEach(func(id, unused []byte) error {
sessions = append(sessions, string(id))
return nil
})
})
if err != nil {
return nil, err
}
return sessions, nil
}
// RemoveContainerExecSessions removes all exec sessions attached to a given
// container.
func (s *BoltState) RemoveContainerExecSessions(ctr *Container) error {
if !s.valid {
return define.ErrDBClosed
}
if !ctr.valid {
return define.ErrCtrRemoved
}
db, err := s.getDBCon()
if err != nil {
return err
}
defer s.deferredCloseDBCon(db)
ctrID := []byte(ctr.ID())
sessions := []string{}
err = db.Update(func(tx *bolt.Tx) error {
execBucket, err := getExecBucket(tx)
if err != nil {
return err
}
ctrBucket, err := getCtrBucket(tx)
if err != nil {
return err
}
dbCtr := ctrBucket.Bucket(ctrID)
if dbCtr == nil {
ctr.valid = false
return define.ErrNoSuchCtr
}
ctrExecSessions := dbCtr.Bucket(execBkt)
if ctrExecSessions == nil {
return nil
}
err = ctrExecSessions.ForEach(func(id, unused []byte) error {
sessions = append(sessions, string(id))
return nil
})
if err != nil {
return err
}
for _, session := range sessions {
if err := ctrExecSessions.Delete([]byte(session)); err != nil {
return errors.Wrapf(err, "error removing container %s exec session %s from database", ctr.ID(), session)
}
// Check if the session exists in the global table
// before removing. It should, but in cases where the DB
// has become inconsistent, we should try and proceed
// so we can recover.
sessionExists := execBucket.Get([]byte(session))
if sessionExists == nil {
continue
}
if string(sessionExists) != ctr.ID() {
return errors.Wrapf(define.ErrInternal, "database mismatch: exec session %s is associated with containers %s and %s", session, ctr.ID(), string(sessionExists))
}
if err := execBucket.Delete([]byte(session)); err != nil {
return errors.Wrapf(err, "error removing container %s exec session %s from exec sessions", ctr.ID(), session)
}
}
return nil
})
return err
}
// RewriteContainerConfig rewrites a container's configuration.
// WARNING: This function is DANGEROUS. Do not use without reading the full
// comment on this function in state.go.

View File

@ -24,6 +24,7 @@ const (
allPodsName = "allPods"
volName = "vol"
allVolsName = "allVolumes"
execName = "exec"
runtimeConfigName = "runtime-config"
configName = "config"
@ -54,6 +55,7 @@ var (
allPodsBkt = []byte(allPodsName)
volBkt = []byte(volName)
allVolsBkt = []byte(allVolsName)
execBkt = []byte(execName)
runtimeConfigBkt = []byte(runtimeConfigName)
configKey = []byte(configName)
@ -339,6 +341,14 @@ func getAllVolsBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
return bkt, nil
}
func getExecBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
bkt := tx.Bucket(execBkt)
if bkt == nil {
return nil, errors.Wrapf(define.ErrDBBadConfig, "exec bucket not found in DB")
}
return bkt, nil
}
func getRuntimeConfigBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
bkt := tx.Bucket(runtimeConfigBkt)
if bkt == nil {
@ -787,6 +797,23 @@ func (s *BoltState) removeContainer(ctr *Container, pod *Pod, tx *bolt.Tx) error
}
}
// Does the container have exec sessions?
ctrExecSessionsBkt := ctrExists.Bucket(execBkt)
if ctrExecSessionsBkt != nil {
sessions := []string{}
err = ctrExecSessionsBkt.ForEach(func(id, value []byte) error {
sessions = append(sessions, string(id))
return nil
})
if err != nil {
return err
}
if len(sessions) > 0 {
return errors.Wrapf(define.ErrExecSessionExists, "container %s has active exec sessions: %s", ctr.ID(), strings.Join(sessions, ", "))
}
}
// Does the container have dependencies?
ctrDepsBkt := ctrExists.Bucket(dependenciesBkt)
if ctrDepsBkt == nil {

View File

@ -58,13 +58,11 @@ func getTestContainer(id, name string, manager lock.Manager) (*Container, error)
PID: 1234,
ExecSessions: map[string]*ExecSession{
"abcd": {
ID: "1",
Command: []string{"2", "3"},
Id: "1",
PID: 9876,
},
"ef01": {
ID: "5",
Command: []string{"hello", "world"},
Id: "5",
PID: 46765,
},
},

View File

@ -181,9 +181,13 @@ type ContainerState struct {
PID int `json:"pid,omitempty"`
// ConmonPID is the PID of the container's conmon
ConmonPID int `json:"conmonPid,omitempty"`
// ExecSessions contains active exec sessions for container
// Exec session ID is mapped to PID of exec process
ExecSessions map[string]*ExecSession `json:"execSessions,omitempty"`
// ExecSessions contains all exec sessions that are associated with this
// container.
ExecSessions map[string]*ExecSession `json:"newExecSessions,omitempty"`
// LegacyExecSessions are legacy exec sessions from older versions of
// Podman.
// These are DEPRECATED and will be removed in a future release.
LegacyExecSessions map[string]*legacyExecSession `json:"execSessions,omitempty"`
// NetworkStatus contains the configuration results for all networks
// the pod is attached to. Only populated if we created a network
// namespace for the container, and the network namespace is currently
@ -214,54 +218,6 @@ type ContainerState struct {
containerPlatformState
}
// ExecSession contains information on an active exec session
type ExecSession struct {
// ID is the ID of the exec session.
ID string `json:"id"`
// Command the the command that will be invoked in the exec session.
Command []string `json:"command"`
// State is the state of the exec session.
State define.ContainerExecStatus `json:"state"`
// PID is the PID of the process created by the exec session.
PID int `json:"pid,omitempty"`
// Terminal is whether the exec session will allocate a pseudoterminal.
Terminal bool `json:"terminal,omitempty"`
// AttachStdin is whether the STDIN stream will be forwarded to the exec
// session's first process when attaching. Only available if Terminal is
// false.
AttachStdin bool `json:"attachStdin,omitempty"`
// AttachStdout is whether the STDOUT stream will be forwarded to the
// exec session's first process when attaching. Only available if
// Terminal is false.
AttachStdout bool `json:"attachStdout,omitempty"`
// AttachStderr is whether the STDERR stream will be forwarded to the
// exec session's first process when attaching. Only available if
// Terminal is false.
AttachStderr bool `json:"attachStderr,omitempty"`
// DetachKeys are keys that will be used to detach from the exec
// session. Here, nil will use the default detach keys, where a pointer
// to the empty string ("") will disable detaching via detach keys.
DetachKeys *string `json:"detachKeys,omitempty"`
// Environment is a set of environment variables that will be set for
// the first process started by the exec session.
Environment map[string]string `json:"environment,omitempty"`
// Privileged is whether the exec session will be privileged - that is,
// will be granted additional capabilities.
Privileged bool `json:"privileged,omitempty"`
// User is the user the exec session will be run as.
User string `json:"user,omitempty"`
// WorkDir is the working directory for the first process that will be
// launched by the exec session.
WorkDir string `json:"workDir,omitempty"`
// PreserveFDs indicates that a number of extra FDs from the process
// running libpod will be passed into the container. These are assumed
// to begin at 3 (immediately after the standard streams). The number
// given is the number that will be passed into the exec session,
// starting at 3.
PreserveFDs uint `json:"preserveFds,omitempty"`
}
// ContainerConfig contains all information that was used to create the
// container. It may not be changed once created.
// It is stored, read-only, on disk
@ -973,13 +929,13 @@ func (c *Container) ExecSession(id string) (*ExecSession, error) {
session, ok := c.state.ExecSessions[id]
if !ok {
return nil, errors.Wrapf(define.ErrNoSuchCtr, "no exec session with ID %s found in container %s", id, c.ID())
return nil, errors.Wrapf(define.ErrNoSuchExecSession, "no exec session with ID %s found in container %s", id, c.ID())
}
returnSession := new(ExecSession)
returnSession.ID = session.ID
returnSession.Command = session.Command
returnSession.PID = session.PID
if err := JSONDeepCopy(session, returnSession); err != nil {
return nil, errors.Wrapf(err, "error copying contents of container %s exec session %s", c.ID(), session.ID())
}
return returnSession, nil
}

View File

@ -9,10 +9,8 @@ import (
"os"
"time"
"github.com/containers/common/pkg/capabilities"
"github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/libpod/events"
"github.com/containers/storage/pkg/stringid"
"github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@ -215,142 +213,6 @@ func (c *Container) Kill(signal uint) error {
return c.save()
}
// Exec starts a new process inside the container
// Returns an exit code and an error. If Exec was not able to exec in the container before a failure, an exit code of define.ExecErrorCodeCannotInvoke is returned.
// If another generic error happens, an exit code of define.ExecErrorCodeGeneric is returned.
// Sometimes, the $RUNTIME exec call errors, and if that is the case, the exit code is the exit code of the call.
// Otherwise, the exit code will be the exit code of the executed call inside of the container.
// TODO investigate allowing exec without attaching
func (c *Container) Exec(tty, privileged bool, env map[string]string, cmd []string, user, workDir string, streams *AttachStreams, preserveFDs uint, resize chan remotecommand.TerminalSize, detachKeys string) (int, error) {
var capList []string
if !c.batched {
c.lock.Lock()
defer c.lock.Unlock()
if err := c.syncContainer(); err != nil {
return define.ExecErrorCodeCannotInvoke, err
}
}
if c.state.State != define.ContainerStateRunning {
return define.ExecErrorCodeCannotInvoke, errors.Wrapf(define.ErrCtrStateInvalid, "cannot exec into container that is not running")
}
if privileged || c.config.Privileged {
capList = capabilities.AllCapabilities()
}
// Generate exec session ID
// Ensure we don't conflict with an existing session ID
sessionID := stringid.GenerateNonCryptoID()
found := true
// This really ought to be a do-while, but Go doesn't have those...
for found {
found = false
for id := range c.state.ExecSessions {
if id == sessionID {
found = true
break
}
}
if found {
sessionID = stringid.GenerateNonCryptoID()
}
}
logrus.Debugf("Creating new exec session in container %s with session id %s", c.ID(), sessionID)
if err := c.createExecBundle(sessionID); err != nil {
return define.ExecErrorCodeCannotInvoke, err
}
defer func() {
// cleanup exec bundle
if err := c.cleanupExecBundle(sessionID); err != nil {
logrus.Errorf("Error removing exec session %s bundle path for container %s: %v", sessionID, c.ID(), err)
}
}()
opts := new(ExecOptions)
opts.Cmd = cmd
opts.CapAdd = capList
opts.Env = env
opts.Terminal = tty
opts.Cwd = workDir
opts.User = user
opts.Streams = streams
opts.PreserveFDs = preserveFDs
opts.Resize = resize
opts.DetachKeys = detachKeys
pid, attachChan, err := c.ociRuntime.ExecContainer(c, sessionID, opts)
if err != nil {
ec := define.ExecErrorCodeGeneric
// Conmon will pass a non-zero exit code from the runtime as a pid here.
// we differentiate a pid with an exit code by sending it as negative, so reverse
// that change and return the exit code the runtime failed with.
if pid < 0 {
ec = -1 * pid
}
return ec, err
}
// We have the PID, add it to state
if c.state.ExecSessions == nil {
c.state.ExecSessions = make(map[string]*ExecSession)
}
session := new(ExecSession)
session.ID = sessionID
session.Command = cmd
session.PID = pid
c.state.ExecSessions[sessionID] = session
if err := c.save(); err != nil {
// Now we have a PID but we can't save it in the DB
// TODO handle this better
return define.ExecErrorCodeGeneric, errors.Wrapf(err, "error saving exec sessions %s for container %s", sessionID, c.ID())
}
c.newContainerEvent(events.Exec)
logrus.Debugf("Successfully started exec session %s in container %s", sessionID, c.ID())
// Unlock so other processes can use the container
if !c.batched {
c.lock.Unlock()
}
lastErr := <-attachChan
exitCode, err := c.readExecExitCode(sessionID)
if err != nil {
if lastErr != nil {
logrus.Errorf(lastErr.Error())
}
lastErr = err
}
if exitCode != 0 {
if lastErr != nil {
logrus.Errorf(lastErr.Error())
}
lastErr = errors.Wrapf(define.ErrOCIRuntime, "non zero exit code: %d", exitCode)
}
// Lock again
if !c.batched {
c.lock.Lock()
}
// Sync the container again to pick up changes in state
if err := c.syncContainer(); err != nil {
logrus.Errorf("error syncing container %s state to remove exec session %s", c.ID(), sessionID)
return exitCode, lastErr
}
// Remove the exec session from state
delete(c.state.ExecSessions, sessionID)
if err := c.save(); err != nil {
logrus.Errorf("Error removing exec session %s from container %s state: %v", sessionID, c.ID(), err)
}
return exitCode, lastErr
}
// AttachStreams contains streams that will be attached to the container
type AttachStreams struct {
// OutputStream will be attached to container's STDOUT
@ -493,7 +355,11 @@ func (c *Container) Unmount(force bool) error {
if c.ensureState(define.ContainerStateRunning, define.ContainerStatePaused) {
return errors.Wrapf(define.ErrCtrStateInvalid, "cannot unmount storage for container %s as it is running or paused", c.ID())
}
if len(c.state.ExecSessions) != 0 {
execSessions, err := c.getActiveExecSessions()
if err != nil {
return err
}
if len(execSessions) != 0 {
return errors.Wrapf(define.ErrCtrStateInvalid, "container %s has active exec sessions, refusing to unmount", c.ID())
}
return errors.Wrapf(define.ErrInternal, "can't unmount %s last mount, it is still in use", c.ID())
@ -674,15 +540,15 @@ func (c *Container) Cleanup(ctx context.Context) error {
// If we didn't restart, we perform a normal cleanup
// Reap exec sessions first.
if err := c.reapExecSessions(); err != nil {
// Check for running exec sessions
sessions, err := c.getActiveExecSessions()
if err != nil {
return err
}
// Check if we have active exec sessions after reaping.
if len(c.state.ExecSessions) != 0 {
if len(sessions) > 0 {
return errors.Wrapf(define.ErrCtrStateInvalid, "container %s has active exec sessions, refusing to clean up", c.ID())
}
defer c.newContainerEvent(events.Cleanup)
return c.cleanup(ctx)
}
@ -757,114 +623,11 @@ func (c *Container) Sync() error {
return nil
}
// Refresh refreshes a container's state in the database, restarting the
// container if it is running
// Refresh is DEPRECATED and REMOVED.
func (c *Container) Refresh(ctx context.Context) error {
if !c.batched {
c.lock.Lock()
defer c.lock.Unlock()
if err := c.syncContainer(); err != nil {
return err
}
}
if c.state.State == define.ContainerStateRemoving {
return errors.Wrapf(define.ErrCtrStateInvalid, "cannot refresh containers that are being removed")
}
wasCreated := false
if c.state.State == define.ContainerStateCreated {
wasCreated = true
}
wasRunning := false
if c.state.State == define.ContainerStateRunning {
wasRunning = true
}
wasPaused := false
if c.state.State == define.ContainerStatePaused {
wasPaused = true
}
// First, unpause the container if it's paused
if c.state.State == define.ContainerStatePaused {
if err := c.unpause(); err != nil {
return err
}
}
// Next, if the container is running, stop it
if c.state.State == define.ContainerStateRunning {
if err := c.stop(c.config.StopTimeout); err != nil {
return err
}
}
// If there are active exec sessions, we need to kill them
if len(c.state.ExecSessions) > 0 {
logrus.Infof("Killing %d exec sessions in container %s. They will not be restored after refresh.",
len(c.state.ExecSessions), c.ID())
}
for _, session := range c.state.ExecSessions {
if err := c.ociRuntime.ExecStopContainer(c, session.ID, c.StopTimeout()); err != nil {
return errors.Wrapf(err, "error stopping exec session %s of container %s", session.ID, c.ID())
}
}
// If the container is in ContainerStateStopped, we need to delete it
// from the runtime and clear conmon state
if c.state.State == define.ContainerStateStopped {
if err := c.delete(ctx); err != nil {
return err
}
if err := c.removeConmonFiles(); err != nil {
return err
}
}
// Fire cleanup code one more time unconditionally to ensure we are good
// to refresh
if err := c.cleanup(ctx); err != nil {
return err
}
logrus.Debugf("Resetting state of container %s", c.ID())
// We've finished unwinding the container back to its initial state
// Now safe to refresh container state
if err := resetState(c.state); err != nil {
return errors.Wrapf(err, "error resetting state of container %s", c.ID())
}
if err := c.refresh(); err != nil {
return err
}
logrus.Debugf("Successfully refresh container %s state", c.ID())
// Initialize the container if it was created in runc
if wasCreated || wasRunning || wasPaused {
if err := c.prepare(); err != nil {
return err
}
if err := c.init(ctx, false); err != nil {
return err
}
}
// If the container was running before, start it
if wasRunning || wasPaused {
if err := c.start(); err != nil {
return err
}
}
// If the container was paused before, re-pause it
if wasPaused {
if err := c.pause(); err != nil {
return err
}
}
return nil
// This has been deprecated for a long while, and is in the process of
// being removed.
return define.ErrNotImplemented
}
// ContainerCheckpointOptions is a struct used to pass the parameters

769
libpod/container_exec.go Normal file
View File

@ -0,0 +1,769 @@
package libpod
import (
"io/ioutil"
"os"
"path/filepath"
"strconv"
"time"
"github.com/containers/common/pkg/capabilities"
"github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/libpod/events"
"github.com/containers/storage/pkg/stringid"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"k8s.io/client-go/tools/remotecommand"
)
// ExecConfig contains the configuration of an exec session
type ExecConfig struct {
// Command the the command that will be invoked in the exec session.
// Must not be empty.
Command []string `json:"command"`
// Terminal is whether the exec session will allocate a pseudoterminal.
Terminal bool `json:"terminal,omitempty"`
// AttachStdin is whether the STDIN stream will be forwarded to the exec
// session's first process when attaching. Only available if Terminal is
// false.
AttachStdin bool `json:"attachStdin,omitempty"`
// AttachStdout is whether the STDOUT stream will be forwarded to the
// exec session's first process when attaching. Only available if
// Terminal is false.
AttachStdout bool `json:"attachStdout,omitempty"`
// AttachStderr is whether the STDERR stream will be forwarded to the
// exec session's first process when attaching. Only available if
// Terminal is false.
AttachStderr bool `json:"attachStderr,omitempty"`
// DetachKeys are keys that will be used to detach from the exec
// session. Here, nil will use the default detach keys, where a pointer
// to the empty string ("") will disable detaching via detach keys.
DetachKeys *string `json:"detachKeys,omitempty"`
// Environment is a set of environment variables that will be set for
// the first process started by the exec session.
Environment map[string]string `json:"environment,omitempty"`
// Privileged is whether the exec session will be privileged - that is,
// will be granted additional capabilities.
Privileged bool `json:"privileged,omitempty"`
// User is the user the exec session will be run as.
// If set to "" the exec session will be started as the same user the
// container was started as.
User string `json:"user,omitempty"`
// WorkDir is the working directory for the first process that will be
// launched by the exec session.
// If set to "" the exec session will be started in / within the
// container.
WorkDir string `json:"workDir,omitempty"`
// PreserveFDs indicates that a number of extra FDs from the process
// running libpod will be passed into the container. These are assumed
// to begin at 3 (immediately after the standard streams). The number
// given is the number that will be passed into the exec session,
// starting at 3.
PreserveFDs uint `json:"preserveFds,omitempty"`
}
// ExecSession contains information on a single exec session attached to a given
// container.
type ExecSession struct {
// Id is the ID of the exec session.
// Named somewhat strangely to not conflict with ID().
Id string `json:"id"`
// ContainerId is the ID of the container this exec session belongs to.
// Named somewhat strangely to not conflict with ContainerID().
ContainerId string `json:"containerId"`
// State is the state of the exec session.
State define.ContainerExecStatus `json:"state"`
// PID is the PID of the process created by the exec session.
PID int `json:"pid,omitempty"`
// ExitCode is the exit code of the exec session, if it has exited.
ExitCode int `json:"exitCode,omitempty"`
// Config is the configuration of this exec session.
// Cannot be empty.
Config *ExecConfig `json:"config"`
}
// ID returns the ID of an exec session.
func (e *ExecSession) ID() string {
return e.Id
}
// ContainerID returns the ID of the container this exec session was started in.
func (e *ExecSession) ContainerID() string {
return e.ContainerId
}
// legacyExecSession contains information on an active exec session. It is a
// holdover from a previous Podman version and is DEPRECATED.
type legacyExecSession struct {
ID string `json:"id"`
Command []string `json:"command"`
PID int `json:"pid"`
}
// ExecCreate creates a new exec session for the container.
// The session is not started. The ID of the new exec session will be returned.
func (c *Container) ExecCreate(config *ExecConfig) (string, error) {
if !c.batched {
c.lock.Lock()
defer c.lock.Unlock()
if err := c.syncContainer(); err != nil {
return "", err
}
}
// Verify our config
if config == nil {
return "", errors.Wrapf(define.ErrInvalidArg, "must provide a configuration to ExecCreate")
}
if len(config.Command) == 0 {
return "", errors.Wrapf(define.ErrInvalidArg, "must provide a non-empty command to start an exec session")
}
if config.Terminal && (config.AttachStdin || config.AttachStdout || config.AttachStderr) {
return "", errors.Wrapf(define.ErrInvalidArg, "cannot specify streams to attach to when exec session has a pseudoterminal")
}
// Generate an ID for our new exec session
sessionID := stringid.GenerateNonCryptoID()
found := true
// This really ought to be a do-while, but Go doesn't have those...
for found {
found = false
for id := range c.state.ExecSessions {
if id == sessionID {
found = true
break
}
}
if found {
sessionID = stringid.GenerateNonCryptoID()
}
}
// Make our new exec session
session := new(ExecSession)
session.Id = sessionID
session.ContainerId = c.ID()
session.State = define.ExecStateCreated
session.Config = new(ExecConfig)
if err := JSONDeepCopy(config, session.Config); err != nil {
return "", errors.Wrapf(err, "error copying exec configuration into exec session")
}
if c.state.ExecSessions == nil {
c.state.ExecSessions = make(map[string]*ExecSession)
}
// Need to add to container state and exec session registry
c.state.ExecSessions[session.ID()] = session
if err := c.save(); err != nil {
return "", err
}
if err := c.runtime.state.AddExecSession(c, session); err != nil {
return "", err
}
logrus.Infof("Created exec session %s in container %s", session.ID(), c.ID())
return sessionID, nil
}
// ExecStart starts an exec session in the container, but does not attach to it.
// Returns immediately upon starting the exec session.
func (c *Container) ExecStart(sessionID string) error {
// Will be implemented in part 2, migrating Start and implementing
// detached Start.
return define.ErrNotImplemented
}
// ExecStartAndAttach starts and attaches to an exec session in a container.
// TODO: Should we include detach keys in the signature to allow override?
// TODO: How do we handle AttachStdin/AttachStdout/AttachStderr?
func (c *Container) ExecStartAndAttach(sessionID string, streams *AttachStreams) error {
if !c.batched {
c.lock.Lock()
defer c.lock.Unlock()
if err := c.syncContainer(); err != nil {
return err
}
}
session, ok := c.state.ExecSessions[sessionID]
if !ok {
return errors.Wrapf(define.ErrNoSuchExecSession, "container %s has no exec session with ID %s", c.ID(), sessionID)
}
if session.State != define.ExecStateCreated {
return errors.Wrapf(define.ErrExecSessionStateInvalid, "can only start created exec sessions, while container %s session %s state is %q", c.ID(), session.ID(), session.State.String())
}
logrus.Infof("Going to start container %s exec session %s and attach to it", c.ID(), session.ID())
// TODO: check logic here - should we set Privileged if the container is
// privileged?
var capList []string
if session.Config.Privileged || c.config.Privileged {
capList = capabilities.AllCapabilities()
}
user := c.config.User
if session.Config.User != "" {
user = session.Config.User
}
if err := c.createExecBundle(session.ID()); err != nil {
return err
}
opts := new(ExecOptions)
opts.Cmd = session.Config.Command
opts.CapAdd = capList
opts.Env = session.Config.Environment
opts.Terminal = session.Config.Terminal
opts.Cwd = session.Config.WorkDir
opts.User = user
opts.Streams = streams
opts.PreserveFDs = session.Config.PreserveFDs
opts.DetachKeys = session.Config.DetachKeys
pid, attachChan, err := c.ociRuntime.ExecContainer(c, session.ID(), opts)
if err != nil {
return err
}
c.newContainerEvent(events.Exec)
logrus.Debugf("Successfully started exec session %s in container %s", session.ID(), c.ID())
var lastErr error
// Update and save session to reflect PID/running
session.PID = pid
session.State = define.ExecStateRunning
if err := c.save(); err != nil {
lastErr = err
}
// Unlock so other processes can use the container
if !c.batched {
c.lock.Unlock()
}
tmpErr := <-attachChan
if lastErr != nil {
logrus.Errorf("Container %s exec session %s error: %v", c.ID(), session.ID(), lastErr)
}
lastErr = tmpErr
exitCode, err := c.readExecExitCode(session.ID())
if err != nil {
if lastErr != nil {
logrus.Errorf("Container %s exec session %s error: %v", c.ID(), session.ID(), lastErr)
}
lastErr = err
}
logrus.Debugf("Container %s exec session %s completed with exit code %d", c.ID(), session.ID(), exitCode)
// Lock again
if !c.batched {
c.lock.Lock()
}
// Sync the container to pick up state changes
if err := c.syncContainer(); err != nil {
if lastErr != nil {
logrus.Errorf("Container %s exec session %s error: %v", c.ID(), session.ID(), lastErr)
}
return errors.Wrapf(err, "error syncing container %s state to remove exec session %s", c.ID(), session.ID())
}
// Update status
// Since we did a syncContainer, the old session has been overwritten.
// Grab a fresh one from the database.
session, ok = c.state.ExecSessions[sessionID]
if !ok {
// Exec session already removed.
logrus.Infof("Container %s exec session %s already removed from database", c.ID(), sessionID)
return nil
}
session.State = define.ExecStateStopped
session.ExitCode = exitCode
session.PID = 0
if err := c.save(); err != nil {
if lastErr != nil {
logrus.Errorf("Container %s exec session %s error: %v", c.ID(), session.ID(), lastErr)
}
lastErr = err
}
// Clean up after ourselves
if err := c.cleanupExecBundle(session.ID()); err != nil {
if lastErr != nil {
logrus.Errorf("Container %s exec session %s error: %v", c.ID(), session.ID(), lastErr)
}
lastErr = err
}
return lastErr
}
// ExecHTTPStartAndAttach starts and performs an HTTP attach to an exec session.
func (c *Container) ExecHTTPStartAndAttach(sessionID string) error {
// Will be implemented in part 2, migrating Start.
return define.ErrNotImplemented
}
// ExecStop stops an exec session in the container.
// If a timeout is provided, it will be used; otherwise, the timeout will
// default to the stop timeout of the container.
// Cleanup will be invoked automatically once the session is stopped.
func (c *Container) ExecStop(sessionID string, timeout *uint) error {
if !c.batched {
c.lock.Lock()
defer c.lock.Unlock()
if err := c.syncContainer(); err != nil {
return err
}
}
session, ok := c.state.ExecSessions[sessionID]
if !ok {
return errors.Wrapf(define.ErrNoSuchExecSession, "container %s has no exec session with ID %s", c.ID(), sessionID)
}
if session.State != define.ExecStateRunning {
return errors.Wrapf(define.ErrExecSessionStateInvalid, "container %s exec session %s is %q, can only stop running sessions", c.ID(), session.ID(), session.State.String())
}
logrus.Infof("Stopping container %s exec session %s", c.ID(), session.ID())
finalTimeout := c.StopTimeout()
if timeout != nil {
finalTimeout = *timeout
}
// Stop the session
if err := c.ociRuntime.ExecStopContainer(c, session.ID(), finalTimeout); err != nil {
return err
}
var cleanupErr error
// Retrieve exit code and update status
exitCode, err := c.readExecExitCode(session.ID())
if err != nil {
cleanupErr = err
}
session.ExitCode = exitCode
session.PID = 0
session.State = define.ExecStateStopped
if err := c.save(); err != nil {
if cleanupErr != nil {
logrus.Errorf("Error stopping container %s exec session %s: %v", c.ID(), session.ID(), cleanupErr)
}
cleanupErr = err
}
if err := c.cleanupExecBundle(session.ID()); err != nil {
if cleanupErr != nil {
logrus.Errorf("Error stopping container %s exec session %s: %v", c.ID(), session.ID(), cleanupErr)
}
cleanupErr = err
}
return cleanupErr
}
// ExecCleanup cleans up an exec session in the container, removing temporary
// files associated with it.
func (c *Container) ExecCleanup(sessionID string) error {
if !c.batched {
c.lock.Lock()
defer c.lock.Unlock()
if err := c.syncContainer(); err != nil {
return err
}
}
session, ok := c.state.ExecSessions[sessionID]
if !ok {
return errors.Wrapf(define.ErrNoSuchExecSession, "container %s has no exec session with ID %s", c.ID(), sessionID)
}
if session.State == define.ExecStateRunning {
return errors.Wrapf(define.ErrExecSessionStateInvalid, "cannot clean up container %s exec session %s as it is running", c.ID(), session.ID())
}
logrus.Infof("Cleaning up container %s exec session %s", c.ID(), session.ID())
return c.cleanupExecBundle(session.ID())
}
// ExecRemove removes an exec session in the container.
// If force is given, the session will be stopped first if it is running.
func (c *Container) ExecRemove(sessionID string, force bool) error {
if !c.batched {
c.lock.Lock()
defer c.lock.Unlock()
if err := c.syncContainer(); err != nil {
return err
}
}
session, ok := c.state.ExecSessions[sessionID]
if !ok {
return errors.Wrapf(define.ErrNoSuchExecSession, "container %s has no exec session with ID %s", c.ID(), sessionID)
}
logrus.Infof("Removing container %s exec session %s", c.ID(), session.ID())
// Update status of exec session if running, so we cna check if it
// stopped in the meantime.
if session.State == define.ExecStateRunning {
stopped, err := c.ociRuntime.ExecUpdateStatus(c, session.ID())
if err != nil {
return err
}
if stopped {
session.State = define.ExecStateStopped
// TODO: should we retrieve exit code here?
// TODO: Might be worth saving state here.
}
}
if session.State == define.ExecStateRunning {
if !force {
return errors.Wrapf(define.ErrExecSessionStateInvalid, "container %s exec session %s is still running, cannot remove", c.ID(), session.ID())
}
// Stop the session
if err := c.ociRuntime.ExecStopContainer(c, session.ID(), c.StopTimeout()); err != nil {
return err
}
if err := c.cleanupExecBundle(session.ID()); err != nil {
return err
}
}
// First remove exec session from DB.
if err := c.runtime.state.RemoveExecSession(session); err != nil {
return err
}
// Next, remove it from the container and save state
delete(c.state.ExecSessions, sessionID)
if err := c.save(); err != nil {
return err
}
logrus.Debugf("Successfully removed container %s exec session %s", c.ID(), session.ID())
return nil
}
// ExecResize resizes the TTY of the given exec session. Only available if the
// exec session created a TTY.
func (c *Container) ExecResize(sessionID string, newSize remotecommand.TerminalSize) error {
if !c.batched {
c.lock.Lock()
defer c.lock.Unlock()
if err := c.syncContainer(); err != nil {
return err
}
}
session, ok := c.state.ExecSessions[sessionID]
if !ok {
return errors.Wrapf(define.ErrNoSuchExecSession, "container %s has no exec session with ID %s", c.ID(), sessionID)
}
logrus.Infof("Removing container %s exec session %s", c.ID(), session.ID())
if session.State != define.ExecStateRunning {
return errors.Wrapf(define.ErrExecSessionStateInvalid, "cannot resize container %s exec session %s as it is not running", c.ID(), session.ID())
}
return c.ociRuntime.ExecAttachResize(c, sessionID, newSize)
}
// Exec emulates the old Libpod exec API, providing a single call to create,
// run, and remove an exec session. Returns exit code and error. Exit code is
// not guaranteed to be set sanely if error is not nil.
func (c *Container) Exec(config *ExecConfig, streams *AttachStreams, resize <-chan remotecommand.TerminalSize) (int, error) {
sessionID, err := c.ExecCreate(config)
if err != nil {
return -1, err
}
if err := c.ExecStartAndAttach(sessionID, streams); err != nil {
return -1, err
}
// Start resizing if we have a resize channel.
// This goroutine may likely leak, given that we cannot close it here.
// Not a big deal, since it should run for as long as the Podman process
// does. Could be a big deal for `podman service` but we don't need this
// API there.
// TODO: Refactor so this is closed here, before we remove the exec
// session.
if resize != nil {
go func() {
for resizeRequest := range resize {
if err := c.ExecResize(sessionID, resizeRequest); err != nil {
// Assume the exec session went down.
logrus.Warnf("Error resizing exec session %s: %v", sessionID, err)
return
}
}
}()
}
session, err := c.ExecSession(sessionID)
if err != nil {
return -1, err
}
exitCode := session.ExitCode
if err := c.ExecRemove(sessionID, false); err != nil {
return -1, err
}
if exitCode != 0 {
return exitCode, errors.Wrapf(define.ErrOCIRuntime, "exec session exited with non-zero exit code %d", exitCode)
}
return exitCode, nil
}
// cleanup an exec session after its done
func (c *Container) cleanupExecBundle(sessionID string) error {
if err := os.RemoveAll(c.execBundlePath(sessionID)); err != nil && !os.IsNotExist(err) {
return err
}
return c.ociRuntime.ExecContainerCleanup(c, sessionID)
}
// the path to a containers exec session bundle
func (c *Container) execBundlePath(sessionID string) string {
return filepath.Join(c.bundlePath(), sessionID)
}
// Get PID file path for a container's exec session
func (c *Container) execPidPath(sessionID string) string {
return filepath.Join(c.execBundlePath(sessionID), "exec_pid")
}
// the log path for an exec session
func (c *Container) execLogPath(sessionID string) string {
return filepath.Join(c.execBundlePath(sessionID), "exec_log")
}
// the socket conmon creates for an exec session
func (c *Container) execAttachSocketPath(sessionID string) (string, error) {
return c.ociRuntime.ExecAttachSocketPath(c, sessionID)
}
// execExitFileDir gets the path to the container's exit file
func (c *Container) execExitFileDir(sessionID string) string {
return filepath.Join(c.execBundlePath(sessionID), "exit")
}
// execOCILog returns the file path for the exec sessions oci log
func (c *Container) execOCILog(sessionID string) string {
if !c.ociRuntime.SupportsJSONErrors() {
return ""
}
return filepath.Join(c.execBundlePath(sessionID), "oci-log")
}
// create a bundle path and associated files for an exec session
func (c *Container) createExecBundle(sessionID string) (err error) {
bundlePath := c.execBundlePath(sessionID)
if createErr := os.MkdirAll(bundlePath, execDirPermission); createErr != nil {
return createErr
}
defer func() {
if err != nil {
if err2 := os.RemoveAll(bundlePath); err != nil {
logrus.Warnf("error removing exec bundle after creation caused another error: %v", err2)
}
}
}()
if err2 := os.MkdirAll(c.execExitFileDir(sessionID), execDirPermission); err2 != nil {
// The directory is allowed to exist
if !os.IsExist(err2) {
err = errors.Wrapf(err2, "error creating OCI runtime exit file path %s", c.execExitFileDir(sessionID))
}
}
return
}
// readExecExitCode reads the exit file for an exec session and returns
// the exit code
func (c *Container) readExecExitCode(sessionID string) (int, error) {
exitFile := filepath.Join(c.execExitFileDir(sessionID), c.ID())
chWait := make(chan error)
defer close(chWait)
_, err := WaitForFile(exitFile, chWait, time.Second*5)
if err != nil {
return -1, err
}
ec, err := ioutil.ReadFile(exitFile)
if err != nil {
return -1, err
}
ecInt, err := strconv.Atoi(string(ec))
if err != nil {
return -1, err
}
return ecInt, nil
}
// getExecSessionPID gets the PID of an active exec session
func (c *Container) getExecSessionPID(sessionID string) (int, error) {
session, ok := c.state.ExecSessions[sessionID]
if ok {
return session.PID, nil
}
oldSession, ok := c.state.LegacyExecSessions[sessionID]
if ok {
return oldSession.PID, nil
}
return -1, errors.Wrapf(define.ErrNoSuchExecSession, "no exec session with ID %s found in container %s", sessionID, c.ID())
}
// getKnownExecSessions gets a list of all exec sessions we think are running,
// but does not verify their current state.
// Please use getActiveExecSessions() outside of container_exec.go, as this
// function performs further checks to return an accurate list.
func (c *Container) getKnownExecSessions() []string {
knownSessions := []string{}
// First check legacy sessions.
// TODO: This is DEPRECATED and will be removed in a future major
// release.
for sessionID := range c.state.LegacyExecSessions {
knownSessions = append(knownSessions, sessionID)
}
// Next check new exec sessions, but only if in running state
for sessionID, session := range c.state.ExecSessions {
if session.State == define.ExecStateRunning {
knownSessions = append(knownSessions, sessionID)
}
}
return knownSessions
}
// getActiveExecSessions checks if there are any active exec sessions in the
// current container. Returns an array of active exec sessions.
// Will continue through errors where possible.
// Currently handles both new and legacy, deprecated exec sessions.
func (c *Container) getActiveExecSessions() ([]string, error) {
activeSessions := []string{}
knownSessions := c.getKnownExecSessions()
// Instead of saving once per iteration, do it once at the end.
var lastErr error
needSave := false
for _, id := range knownSessions {
alive, err := c.ociRuntime.ExecUpdateStatus(c, id)
if err != nil {
if lastErr != nil {
logrus.Errorf("Error checking container %s exec sessions: %v", c.ID(), lastErr)
}
lastErr = err
continue
}
if !alive {
if err := c.cleanupExecBundle(id); err != nil {
if lastErr != nil {
logrus.Errorf("Error checking container %s exec sessions: %v", c.ID(), lastErr)
}
lastErr = err
}
_, isLegacy := c.state.LegacyExecSessions[id]
if isLegacy {
delete(c.state.LegacyExecSessions, id)
needSave = true
} else {
session := c.state.ExecSessions[id]
exitCode, err := c.readExecExitCode(session.ID())
if err != nil {
if lastErr != nil {
logrus.Errorf("Error checking container %s exec sessions: %v", c.ID(), lastErr)
}
lastErr = err
}
session.ExitCode = exitCode
session.PID = 0
session.State = define.ExecStateStopped
needSave = true
}
} else {
activeSessions = append(activeSessions, id)
}
}
if needSave {
if err := c.save(); err != nil {
if lastErr != nil {
logrus.Errorf("Error reaping exec sessions for container %s: %v", c.ID(), lastErr)
}
lastErr = err
}
}
return activeSessions, lastErr
}
// removeAllExecSessions stops and removes all the container's exec sessions
func (c *Container) removeAllExecSessions() error {
knownSessions := c.getKnownExecSessions()
var lastErr error
for _, id := range knownSessions {
if err := c.ociRuntime.ExecStopContainer(c, id, c.StopTimeout()); err != nil {
if lastErr != nil {
logrus.Errorf("Error stopping container %s exec sessions: %v", c.ID(), lastErr)
}
lastErr = err
continue
}
if err := c.cleanupExecBundle(id); err != nil {
if lastErr != nil {
logrus.Errorf("Error stopping container %s exec sessions: %v", c.ID(), lastErr)
}
lastErr = err
}
}
// Delete all exec sessions
if err := c.runtime.state.RemoveContainerExecSessions(c); err != nil {
if lastErr != nil {
logrus.Errorf("Error stopping container %s exec sessions: %v", c.ID(), lastErr)
}
lastErr = err
}
c.state.ExecSessions = nil
c.state.LegacyExecSessions = nil
if err := c.save(); err != nil {
if lastErr != nil {
logrus.Errorf("Error stopping container %s exec sessions: %v", c.ID(), lastErr)
}
lastErr = err
}
return lastErr
}

View File

@ -142,92 +142,6 @@ func (c *Container) exitFilePath() (string, error) {
return c.ociRuntime.ExitFilePath(c)
}
// create a bundle path and associated files for an exec session
func (c *Container) createExecBundle(sessionID string) (err error) {
bundlePath := c.execBundlePath(sessionID)
if createErr := os.MkdirAll(bundlePath, execDirPermission); createErr != nil {
return createErr
}
defer func() {
if err != nil {
if err2 := os.RemoveAll(bundlePath); err != nil {
logrus.Warnf("error removing exec bundle after creation caused another error: %v", err2)
}
}
}()
if err2 := os.MkdirAll(c.execExitFileDir(sessionID), execDirPermission); err2 != nil {
// The directory is allowed to exist
if !os.IsExist(err2) {
err = errors.Wrapf(err2, "error creating OCI runtime exit file path %s", c.execExitFileDir(sessionID))
}
}
return
}
// cleanup an exec session after its done
func (c *Container) cleanupExecBundle(sessionID string) error {
if err := os.RemoveAll(c.execBundlePath(sessionID)); err != nil && !os.IsNotExist(err) {
return err
}
return c.ociRuntime.ExecContainerCleanup(c, sessionID)
}
// the path to a containers exec session bundle
func (c *Container) execBundlePath(sessionID string) string {
return filepath.Join(c.bundlePath(), sessionID)
}
// Get PID file path for a container's exec session
func (c *Container) execPidPath(sessionID string) string {
return filepath.Join(c.execBundlePath(sessionID), "exec_pid")
}
// the log path for an exec session
func (c *Container) execLogPath(sessionID string) string {
return filepath.Join(c.execBundlePath(sessionID), "exec_log")
}
// the socket conmon creates for an exec session
func (c *Container) execAttachSocketPath(sessionID string) (string, error) {
return c.ociRuntime.ExecAttachSocketPath(c, sessionID)
}
// execExitFileDir gets the path to the container's exit file
func (c *Container) execExitFileDir(sessionID string) string {
return filepath.Join(c.execBundlePath(sessionID), "exit")
}
// execOCILog returns the file path for the exec sessions oci log
func (c *Container) execOCILog(sessionID string) string {
if !c.ociRuntime.SupportsJSONErrors() {
return ""
}
return filepath.Join(c.execBundlePath(sessionID), "oci-log")
}
// readExecExitCode reads the exit file for an exec session and returns
// the exit code
func (c *Container) readExecExitCode(sessionID string) (int, error) {
exitFile := filepath.Join(c.execExitFileDir(sessionID), c.ID())
chWait := make(chan error)
defer close(chWait)
_, err := WaitForFile(exitFile, chWait, time.Second*5)
if err != nil {
return -1, err
}
ec, err := ioutil.ReadFile(exitFile)
if err != nil {
return -1, err
}
ecInt, err := strconv.Atoi(string(ec))
if err != nil {
return -1, err
}
return ecInt, nil
}
// Wait for the container's exit file to appear.
// When it does, update our state based on it.
func (c *Container) waitForExitFileAndSync() error {
@ -568,6 +482,7 @@ func resetState(state *ContainerState) error {
state.State = define.ContainerStateConfigured
}
state.ExecSessions = make(map[string]*ExecSession)
state.LegacyExecSessions = nil
state.NetworkStatus = nil
state.BindMounts = make(map[string]string)
state.StoppedByUser = false
@ -1814,12 +1729,12 @@ func (c *Container) checkReadyForRemoval() error {
return errors.Wrapf(define.ErrCtrStateInvalid, "cannot remove container %s as it is %s - running or paused containers cannot be removed without force", c.ID(), c.state.State.String())
}
// Reap exec sessions
if err := c.reapExecSessions(); err != nil {
// Check exec sessions
sessions, err := c.getActiveExecSessions()
if err != nil {
return err
}
if len(c.state.ExecSessions) != 0 {
if len(sessions) != 0 {
return errors.Wrapf(define.ErrCtrStateInvalid, "cannot remove container %s as it has active exec sessions", c.ID())
}
@ -1926,41 +1841,6 @@ func (c *Container) checkExitFile() error {
return c.handleExitFile(exitFile, info)
}
// Reap dead exec sessions
func (c *Container) reapExecSessions() error {
// Instead of saving once per iteration, use a defer to do it once at
// the end.
var lastErr error
needSave := false
for id := range c.state.ExecSessions {
alive, err := c.ociRuntime.ExecUpdateStatus(c, id)
if err != nil {
if lastErr != nil {
logrus.Errorf("Error reaping exec sessions for container %s: %v", c.ID(), lastErr)
}
lastErr = err
continue
}
if !alive {
// Clean up lingering files and remove the exec session
if err := c.ociRuntime.ExecContainerCleanup(c, id); err != nil {
return errors.Wrapf(err, "error cleaning up container %s exec session %s files", c.ID(), id)
}
delete(c.state.ExecSessions, id)
needSave = true
}
}
if needSave {
if err := c.save(); err != nil {
if lastErr != nil {
logrus.Errorf("Error reaping exec sessions for container %s: %v", c.ID(), lastErr)
}
lastErr = err
}
}
return lastErr
}
func (c *Container) hasNamespace(namespace spec.LinuxNamespaceType) bool {
if c.config.Spec == nil || c.config.Spec.Linux == nil {
return false

View File

@ -134,7 +134,9 @@ func (c *Container) execPS(args []string) ([]string, error) {
}()
cmd := append([]string{"ps"}, args...)
ec, err := c.Exec(false, false, map[string]string{}, cmd, "", "", streams, 0, nil, "")
config := new(ExecConfig)
config.Command = cmd
ec, err := c.Exec(config, streams, nil)
if err != nil {
return nil, err
} else if ec != 0 {

View File

@ -20,6 +20,10 @@ var (
// ErrNoSuchVolume indicates the requested volume does not exist
ErrNoSuchVolume = errors.New("no such volume")
// ErrNoSuchExecSession indicates that the requested exec session does
// not exist.
ErrNoSuchExecSession = errors.New("no such exec session")
// ErrCtrExists indicates a container with the same name or ID already
// exists
ErrCtrExists = errors.New("container already exists")
@ -29,10 +33,16 @@ var (
ErrImageExists = errors.New("image already exists")
// ErrVolumeExists indicates a volume with the same name already exists
ErrVolumeExists = errors.New("volume already exists")
// ErrExecSessionExists indicates an exec session with the same ID
// already exists.
ErrExecSessionExists = errors.New("exec session already exists")
// ErrCtrStateInvalid indicates a container is in an improper state for
// the requested operation
ErrCtrStateInvalid = errors.New("container state improper")
// ErrExecSessionStateInvalid indicates that an exec session is in an
// improper state for the requested operation
ErrExecSessionStateInvalid = errors.New("exec session state improper")
// ErrVolumeBeingUsed indicates that a volume is being used by at least one container
ErrVolumeBeingUsed = errors.New("volume is being used")
@ -90,6 +100,9 @@ var (
// ErrVolumeRemoved indicates that the volume has already been removed and
// no further operations can be performed on it
ErrVolumeRemoved = errors.New("volume has already been removed")
// ErrExecSessionRemoved indicates that the exec session has already
// been removed and no further operations can be performed on it.
ErrExecSessionRemoved = errors.New("exec session has already been removed")
// ErrDBClosed indicates that the connection to the state database has
// already been closed

View File

@ -143,7 +143,9 @@ func (c *Container) runHealthCheck() (HealthCheckStatus, error) {
logrus.Debugf("executing health check command %s for %s", strings.Join(newCommand, " "), c.ID())
timeStart := time.Now()
hcResult := HealthCheckSuccess
_, hcErr := c.Exec(false, false, map[string]string{}, newCommand, "", "", streams, 0, nil, "")
config := new(ExecConfig)
config.Command = newCommand
_, hcErr := c.Exec(config, streams, nil)
if hcErr != nil {
errCause := errors.Cause(hcErr)
hcResult = HealthCheckFailure

View File

@ -20,10 +20,16 @@ type InMemoryState struct {
pods map[string]*Pod
// Maps container ID to container struct.
containers map[string]*Container
// Maps volume ID to volume struct
volumes map[string]*Volume
// Maps exec session ID to ID of associated container
execSessions map[string]string
// Maps container ID to a list of IDs of dependencies.
ctrDepends map[string][]string
// Maps volume ID to IDs of dependencies
volumeDepends map[string][]string
// Maps container ID to IDs of associated exec sessions.
ctrExecSessions map[string][]string
// Maps pod ID to a map of container ID to container struct.
podContainers map[string]map[string]*Container
// Global name registry - ensures name uniqueness and performs lookups.
@ -51,10 +57,13 @@ func NewInMemoryState() (State, error) {
state.pods = make(map[string]*Pod)
state.containers = make(map[string]*Container)
state.volumes = make(map[string]*Volume)
state.execSessions = make(map[string]string)
state.ctrDepends = make(map[string][]string)
state.volumeDepends = make(map[string][]string)
state.ctrExecSessions = make(map[string][]string)
state.podContainers = make(map[string]map[string]*Container)
state.nameIndex = registrar.NewRegistrar()
@ -316,6 +325,13 @@ func (s *InMemoryState) RemoveContainer(ctr *Container) error {
return errors.Wrapf(define.ErrCtrExists, "the following containers depend on container %s: %s", ctr.ID(), depsStr)
}
// Ensure we don't have active exec sessions
ctrSessions := s.ctrExecSessions[ctr.ID()]
if len(ctrSessions) > 0 {
sessStr := strings.Join(ctrSessions, ", ")
return errors.Wrapf(define.ErrCtrExists, "the following exec sessions are running for container %s: %s", ctr.ID(), sessStr)
}
if _, ok := s.containers[ctr.ID()]; !ok {
ctr.valid = false
return errors.Wrapf(define.ErrNoSuchCtr, "no container exists in state with ID %s", ctr.ID())
@ -437,6 +453,117 @@ func (s *InMemoryState) GetContainerConfig(id string) (*ContainerConfig, error)
return ctr.Config(), nil
}
// Add an exec session to the database
func (s *InMemoryState) AddExecSession(ctr *Container, session *ExecSession) error {
if !ctr.valid {
return define.ErrCtrRemoved
}
if session.ContainerID() != ctr.ID() {
return errors.Wrapf(define.ErrInvalidArg, "container ID and exec session ID must match")
}
if _, ok := s.containers[ctr.ID()]; !ok {
return define.ErrNoSuchCtr
}
if _, ok := s.execSessions[session.ID()]; ok {
return define.ErrExecSessionExists
}
s.execSessions[session.ID()] = ctr.ID()
ctrSessions, ok := s.ctrExecSessions[ctr.ID()]
if !ok {
ctrSessions = []string{}
}
ctrSessions = append(ctrSessions, session.ID())
s.ctrExecSessions[ctr.ID()] = ctrSessions
return nil
}
// Get an exec session from the database by full or partial ID.
func (s *InMemoryState) GetExecSession(id string) (string, error) {
if id == "" {
return "", define.ErrEmptyID
}
session, ok := s.execSessions[id]
if !ok {
return "", define.ErrNoSuchExecSession
}
return session, nil
}
// RemoveExecSession removes an exec session from the database.
func (s *InMemoryState) RemoveExecSession(session *ExecSession) error {
if _, ok := s.execSessions[session.ID()]; !ok {
return define.ErrNoSuchExecSession
}
ctrSessions, ok := s.ctrExecSessions[session.ContainerID()]
// If !ok - internal state seems inconsistent, but the thing we wanted
// to remove is gone. Continue.
if ok {
newSessions := []string{}
for _, sess := range ctrSessions {
if sess != session.ID() {
newSessions = append(newSessions, sess)
}
}
s.ctrExecSessions[session.ContainerID()] = newSessions
}
delete(s.execSessions, session.ID())
return nil
}
// GetContainerExecSessions retrieves all exec sessions for the given container.
func (s *InMemoryState) GetContainerExecSessions(ctr *Container) ([]string, error) {
if !ctr.valid {
return nil, define.ErrCtrRemoved
}
if _, ok := s.containers[ctr.ID()]; !ok {
ctr.valid = false
return nil, define.ErrNoSuchCtr
}
ctrSessions := s.ctrExecSessions[ctr.ID()]
return ctrSessions, nil
}
// RemoveContainerExecSessions removes all exec sessions for the given
// container.
func (s *InMemoryState) RemoveContainerExecSessions(ctr *Container) error {
if !ctr.valid {
return define.ErrCtrRemoved
}
if _, ok := s.containers[ctr.ID()]; !ok {
ctr.valid = false
return define.ErrNoSuchCtr
}
ctrSessions, ok := s.ctrExecSessions[ctr.ID()]
if !ok {
return nil
}
for _, sess := range ctrSessions {
if _, ok := s.execSessions[sess]; !ok {
// We have an internal state inconsistency
// Error out
return errors.Wrapf(define.ErrInternal, "inconsistent database state: exec session %s is missing", sess)
}
delete(s.execSessions, sess)
}
delete(s.ctrExecSessions, ctr.ID())
return nil
}
// RewriteContainerConfig rewrites a container's configuration.
// This function is DANGEROUS, even with an in-memory state.
// Please read the full comment on it in state.go before using it.
@ -1056,6 +1183,13 @@ func (s *InMemoryState) RemoveContainerFromPod(pod *Pod, ctr *Container) error {
return errors.Wrapf(define.ErrCtrExists, "the following containers depend on container %s: %s", ctr.ID(), depsStr)
}
// Ensure we don't have active exec sessions
ctrSessions := s.ctrExecSessions[ctr.ID()]
if len(ctrSessions) > 0 {
sessStr := strings.Join(ctrSessions, ", ")
return errors.Wrapf(define.ErrCtrExists, "the following exec sessions are running for container %s: %s", ctr.ID(), sessStr)
}
// Retrieve pod containers
podCtrs, ok := s.podContainers[pod.ID()]
if !ok {

View File

@ -71,6 +71,9 @@ type OCIRuntime interface {
// Returns an int (exit code), error channel (errors from attach), and
// error (errors that occurred attempting to start the exec session).
ExecContainer(ctr *Container, sessionID string, options *ExecOptions) (int, chan error, error)
// ExecAttachResize resizes the terminal of a running exec session. Only
// allowed with sessions that were created with a TTY.
ExecAttachResize(ctr *Container, sessionID string, newSize remotecommand.TerminalSize) error
// ExecStopContainer stops a given exec session in a running container.
// SIGTERM with be sent initially, then SIGKILL after the given timeout.
// If timeout is 0, SIGKILL will be sent immediately, and SIGTERM will
@ -143,12 +146,12 @@ type ExecOptions struct {
// to 0, 1, 2) that will be passed to the executed process. The total FDs
// passed will be 3 + PreserveFDs.
PreserveFDs uint
// Resize is a channel where terminal resize events are sent to be
// handled.
Resize chan remotecommand.TerminalSize
// DetachKeys is a set of keys that, when pressed in sequence, will
// detach from the container.
DetachKeys string
// If not provided, the default keys will be used.
// If provided but set to "", detaching from the container will be
// disabled.
DetachKeys *string
}
// HTTPAttachStreams informs the HTTPAttach endpoint which of the container's

View File

@ -93,7 +93,7 @@ func (c *Container) attach(streams *AttachStreams, keys string, resize <-chan re
// 4. attachToExec sends on startFd, signalling it has attached to the socket and child is ready to go
// 5. child receives on startFd, runs the runtime exec command
// attachToExec is responsible for closing startFd and attachFd
func (c *Container) attachToExec(streams *AttachStreams, keys string, resize <-chan remotecommand.TerminalSize, sessionID string, startFd, attachFd *os.File) error {
func (c *Container) attachToExec(streams *AttachStreams, keys *string, sessionID string, startFd, attachFd *os.File) error {
if !streams.AttachOutput && !streams.AttachError && !streams.AttachInput {
return errors.Wrapf(define.ErrInvalidArg, "must provide at least one stream to attach to")
}
@ -104,7 +104,11 @@ func (c *Container) attachToExec(streams *AttachStreams, keys string, resize <-c
defer errorhandling.CloseQuiet(startFd)
defer errorhandling.CloseQuiet(attachFd)
detachKeys, err := processDetachKeys(keys)
detachString := define.DefaultDetachKeys
if keys != nil {
detachString = *keys
}
detachKeys, err := processDetachKeys(detachString)
if err != nil {
return err
}
@ -134,10 +138,6 @@ func (c *Container) attachToExec(streams *AttachStreams, keys string, resize <-c
}
}()
// Register the resize func after we've read the attach socket, as we know at this point the
// 'ctl' file has been created in conmon
registerResizeFunc(resize, c.execBundlePath(sessionID))
// start listening on stdio of the process
receiveStdoutError, stdinDone := setupStdioChannels(streams, conn, detachKeys)

View File

@ -769,7 +769,7 @@ func (r *ConmonOCIRuntime) ExecContainer(c *Container, sessionID string, options
attachChan := make(chan error)
go func() {
// attachToExec is responsible for closing pipes
attachChan <- c.attachToExec(options.Streams, options.DetachKeys, options.Resize, sessionID, parentStartPipe, parentAttachPipe)
attachChan <- c.attachToExec(options.Streams, options.DetachKeys, sessionID, parentStartPipe, parentAttachPipe)
close(attachChan)
}()
attachToExecCalled = true
@ -783,37 +783,54 @@ func (r *ConmonOCIRuntime) ExecContainer(c *Container, sessionID string, options
return pid, attachChan, err
}
// ExecAttachResize resizes the TTY of the given exec session.
func (r *ConmonOCIRuntime) ExecAttachResize(ctr *Container, sessionID string, newSize remotecommand.TerminalSize) error {
// TODO: probably want a dedicated function to get ctl file path?
controlPath := filepath.Join(ctr.execBundlePath(sessionID), "ctl")
controlFile, err := os.OpenFile(controlPath, unix.O_WRONLY, 0)
if err != nil {
return errors.Wrapf(err, "could not open ctl file for terminal resize for container %s exec session %s", ctr.ID(), sessionID)
}
defer controlFile.Close()
logrus.Debugf("Received a resize event for container %s exec session %s: %+v", ctr.ID(), sessionID, newSize)
if _, err = fmt.Fprintf(controlFile, "%d %d %d\n", 1, newSize.Height, newSize.Width); err != nil {
return errors.Wrapf(err, "failed to write to ctl file to resize terminal")
}
return nil
}
// ExecStopContainer stops a given exec session in a running container.
func (r *ConmonOCIRuntime) ExecStopContainer(ctr *Container, sessionID string, timeout uint) error {
session, ok := ctr.state.ExecSessions[sessionID]
if !ok {
// TODO This should probably be a separate error
return errors.Wrapf(define.ErrInvalidArg, "no exec session with ID %s found in container %s", sessionID, ctr.ID())
pid, err := ctr.getExecSessionPID(sessionID)
if err != nil {
return err
}
logrus.Debugf("Going to stop container %s exec session %s", ctr.ID(), sessionID)
// Is the session dead?
// Ping the PID with signal 0 to see if it still exists.
if err := unix.Kill(session.PID, 0); err != nil {
if err := unix.Kill(pid, 0); err != nil {
if err == unix.ESRCH {
return nil
}
return errors.Wrapf(err, "error pinging container %s exec session %s PID %d with signal 0", ctr.ID(), sessionID, session.PID)
return errors.Wrapf(err, "error pinging container %s exec session %s PID %d with signal 0", ctr.ID(), sessionID, pid)
}
if timeout > 0 {
// Use SIGTERM by default, then SIGSTOP after timeout.
logrus.Debugf("Killing exec session %s (PID %d) of container %s with SIGTERM", sessionID, session.PID, ctr.ID())
if err := unix.Kill(session.PID, unix.SIGTERM); err != nil {
logrus.Debugf("Killing exec session %s (PID %d) of container %s with SIGTERM", sessionID, pid, ctr.ID())
if err := unix.Kill(pid, unix.SIGTERM); err != nil {
if err == unix.ESRCH {
return nil
}
return errors.Wrapf(err, "error killing container %s exec session %s PID %d with SIGTERM", ctr.ID(), sessionID, session.PID)
return errors.Wrapf(err, "error killing container %s exec session %s PID %d with SIGTERM", ctr.ID(), sessionID, pid)
}
// Wait for the PID to stop
if err := waitPidStop(session.PID, time.Duration(timeout)*time.Second); err != nil {
if err := waitPidStop(pid, time.Duration(timeout)*time.Second); err != nil {
logrus.Warnf("Timed out waiting for container %s exec session %s to stop, resorting to SIGKILL", ctr.ID(), sessionID)
} else {
// No error, container is dead
@ -822,17 +839,17 @@ func (r *ConmonOCIRuntime) ExecStopContainer(ctr *Container, sessionID string, t
}
// SIGTERM did not work. On to SIGKILL.
logrus.Debugf("Killing exec session %s (PID %d) of container %s with SIGKILL", sessionID, session.PID, ctr.ID())
if err := unix.Kill(session.PID, unix.SIGTERM); err != nil {
logrus.Debugf("Killing exec session %s (PID %d) of container %s with SIGKILL", sessionID, pid, ctr.ID())
if err := unix.Kill(pid, unix.SIGTERM); err != nil {
if err == unix.ESRCH {
return nil
}
return errors.Wrapf(err, "error killing container %s exec session %s PID %d with SIGKILL", ctr.ID(), sessionID, session.PID)
return errors.Wrapf(err, "error killing container %s exec session %s PID %d with SIGKILL", ctr.ID(), sessionID, pid)
}
// Wait for the PID to stop
if err := waitPidStop(session.PID, killContainerTimeout*time.Second); err != nil {
return errors.Wrapf(err, "timed out waiting for container %s exec session %s PID %d to stop after SIGKILL", ctr.ID(), sessionID, session.PID)
if err := waitPidStop(pid, killContainerTimeout*time.Second); err != nil {
return errors.Wrapf(err, "timed out waiting for container %s exec session %s PID %d to stop after SIGKILL", ctr.ID(), sessionID, pid)
}
return nil
@ -840,21 +857,20 @@ func (r *ConmonOCIRuntime) ExecStopContainer(ctr *Container, sessionID string, t
// ExecUpdateStatus checks if the given exec session is still running.
func (r *ConmonOCIRuntime) ExecUpdateStatus(ctr *Container, sessionID string) (bool, error) {
session, ok := ctr.state.ExecSessions[sessionID]
if !ok {
// TODO This should probably be a separate error
return false, errors.Wrapf(define.ErrInvalidArg, "no exec session with ID %s found in container %s", sessionID, ctr.ID())
pid, err := ctr.getExecSessionPID(sessionID)
if err != nil {
return false, err
}
logrus.Debugf("Checking status of container %s exec session %s", ctr.ID(), sessionID)
// Is the session dead?
// Ping the PID with signal 0 to see if it still exists.
if err := unix.Kill(session.PID, 0); err != nil {
if err := unix.Kill(pid, 0); err != nil {
if err == unix.ESRCH {
return false, nil
}
return false, errors.Wrapf(err, "error pinging container %s exec session %s PID %d with signal 0", ctr.ID(), sessionID, session.PID)
return false, errors.Wrapf(err, "error pinging container %s exec session %s PID %d with signal 0", ctr.ID(), sessionID, pid)
}
return true, nil

View File

@ -125,6 +125,11 @@ func (r *MissingRuntime) ExecContainer(ctr *Container, sessionID string, options
return -1, nil, r.printError()
}
// ExecAttachResize is not available as the runtime is missing.
func (r *MissingRuntime) ExecAttachResize(ctr *Container, sessionID string, newSize remotecommand.TerminalSize) error {
return r.printError()
}
// ExecStopContainer is not available as the runtime is missing.
// TODO: We can also investigate using unix.Kill() on the PID of the exec
// session here if we want to make stopping containers possible. Won't be

View File

@ -462,11 +462,9 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool,
}
}
// Check that all of our exec sessions have finished
for _, session := range c.state.ExecSessions {
if err := c.ociRuntime.ExecStopContainer(c, session.ID, c.StopTimeout()); err != nil {
return errors.Wrapf(err, "error stopping exec session %s of container %s", session.ID, c.ID())
}
// Remove all active exec sessions
if err := c.removeAllExecSessions(); err != nil {
return err
}
// Check that no other containers depend on the container.
@ -483,9 +481,8 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool,
}
}
// Set ContainerStateRemoving and remove exec sessions
// Set ContainerStateRemoving
c.state.State = define.ContainerStateRemoving
c.state.ExecSessions = nil
if err := c.save(); err != nil {
return errors.Wrapf(err, "unable to set container %s removing state in database", c.ID())

View File

@ -72,6 +72,8 @@ type State interface {
// Removes container from state.
// Containers that are part of pods must use RemoveContainerFromPod.
// The container must be part of the set namespace.
// All dependencies must be removed first.
// All exec sessions referencing the container must be removed first.
RemoveContainer(ctr *Container) error
// UpdateContainer updates a container's state from the backing store.
// The container must be part of the set namespace.
@ -95,6 +97,30 @@ type State interface {
// Return a container config from the database by full ID
GetContainerConfig(id string) (*ContainerConfig, error)
// Add creates a reference to an exec session in the database.
// The container the exec session is attached to will be recorded.
// The container state will not be modified.
// The actual exec session itself is part of the container's state.
// We assume higher-level callers will add the session by saving the
// container's state before calling this. This only ensures that the ID
// of the exec session is associated with the ID of the container.
// Implementations may, but are not required to, verify that the state
// of the given container has an exec session with the ID given.
AddExecSession(ctr *Container, session *ExecSession) error
// Get retrieves the container a given exec session is attached to.
GetExecSession(id string) (string, error)
// Remove a reference to an exec session from the database.
// This will not modify container state to remove the exec session there
// and instead only removes the session ID -> container ID reference
// added by AddExecSession.
RemoveExecSession(session *ExecSession) error
// Get the IDs of all exec sessions attached to a given container.
GetContainerExecSessions(ctr *Container) ([]string, error)
// Remove all exec sessions for a single container.
// Usually used as part of removing the container.
// As with RemoveExecSession, container state will not be modified.
RemoveContainerExecSessions(ctr *Container) error
// PLEASE READ FULL DESCRIPTION BEFORE USING.
// Rewrite a container's configuration.
// This function breaks libpod's normal prohibition on a read-only

View File

@ -16,7 +16,6 @@ import (
// ExecAttachCtr execs and attaches to a container
func ExecAttachCtr(ctx context.Context, ctr *libpod.Container, tty, privileged bool, env map[string]string, cmd []string, user, workDir string, streams *libpod.AttachStreams, preserveFDs uint, detachKeys string) (int, error) {
resize := make(chan remotecommand.TerminalSize)
haveTerminal := terminal.IsTerminal(int(os.Stdin.Fd()))
// Check if we are attached to a terminal. If we are, generate resize
@ -33,7 +32,18 @@ func ExecAttachCtr(ctx context.Context, ctr *libpod.Container, tty, privileged b
}
}()
}
return ctr.Exec(tty, privileged, env, cmd, user, workDir, streams, preserveFDs, resize, detachKeys)
execConfig := new(libpod.ExecConfig)
execConfig.Command = cmd
execConfig.Terminal = tty
execConfig.Privileged = privileged
execConfig.Environment = env
execConfig.User = user
execConfig.WorkDir = workDir
execConfig.DetachKeys = &detachKeys
execConfig.PreserveFDs = preserveFDs
return ctr.Exec(execConfig, streams, resize)
}
// StartAttachCtr starts and (if required) attaches to a container

View File

@ -846,11 +846,6 @@ func (i *LibpodAPI) ExecContainer(call iopodman.VarlinkCall, opts iopodman.ExecO
workDir = *opts.Workdir
}
var detachKeys string
if opts.DetachKeys != nil {
detachKeys = *opts.DetachKeys
}
resizeChan := make(chan remotecommand.TerminalSize)
reader, writer, _, pipeWriter, streams := setupStreams(call)
@ -870,8 +865,17 @@ func (i *LibpodAPI) ExecContainer(call iopodman.VarlinkCall, opts iopodman.ExecO
}
}()
execConfig := new(libpod.ExecConfig)
execConfig.Command = opts.Cmd
execConfig.Terminal = opts.Tty
execConfig.Privileged = opts.Privileged
execConfig.Environment = envs
execConfig.User = user
execConfig.WorkDir = workDir
execConfig.DetachKeys = opts.DetachKeys
go func() {
ec, err := ctr.Exec(opts.Tty, opts.Privileged, envs, opts.Cmd, user, workDir, streams, 0, resizeChan, detachKeys)
ec, err := ctr.Exec(execConfig, streams, resizeChan)
if err != nil {
logrus.Errorf(err.Error())
}