Merge pull request #3483 from mheon/get_conmon_pid

Store and print PID of conmon
This commit is contained in:
OpenShift Merge Robot
2019-07-03 12:45:48 +02:00
committed by GitHub
5 changed files with 29 additions and 1 deletions

View File

@ -168,6 +168,8 @@ type ContainerState struct {
OOMKilled bool `json:"oomKilled,omitempty"`
// PID is the PID of a running container
PID int `json:"pid,omitempty"`
// ConmonPID is the PID of the container's conmon
ConmonPID int `json:"conmonPid,omitempty"`
// ExecSessions contains active exec sessions for container
// Exec session ID is mapped to PID of exec process
ExecSessions map[string]*ExecSession `json:"execSessions,omitempty"`
@ -849,7 +851,7 @@ func (c *Container) OOMKilled() (bool, error) {
return c.state.OOMKilled, nil
}
// PID returns the PID of the container
// PID returns the PID of the container.
// If the container is not running, a pid of 0 will be returned. No error will
// occur.
func (c *Container) PID() (int, error) {
@ -865,6 +867,22 @@ func (c *Container) PID() (int, error) {
return c.state.PID, nil
}
// ConmonPID Returns the PID of the container's conmon process.
// If the container is not running, a PID of 0 will be returned. No error will
// occur.
func (c *Container) ConmonPID() (int, error) {
if !c.batched {
c.lock.Lock()
defer c.lock.Unlock()
if err := c.syncContainer(); err != nil {
return -1, err
}
}
return c.state.ConmonPID, nil
}
// ExecSessions retrieves active exec sessions running in the container
func (c *Container) ExecSessions() ([]string, error) {
if !c.batched {

View File

@ -145,6 +145,7 @@ type InspectContainerState struct {
OOMKilled bool `json:"OOMKilled"`
Dead bool `json:"Dead"`
Pid int `json:"Pid"`
ConmonPid int `json:"ConmonPid,omitempty"`
ExitCode int32 `json:"ExitCode"`
Error string `json:"Error"` // TODO
StartedAt time.Time `json:"StartedAt"`
@ -261,6 +262,7 @@ func (c *Container) getContainerInspectData(size bool, driverData *driver.Data)
OOMKilled: runtimeInfo.OOMKilled,
Dead: runtimeInfo.State.String() == "bad state",
Pid: runtimeInfo.PID,
ConmonPid: runtimeInfo.ConmonPID,
ExitCode: runtimeInfo.ExitCode,
Error: "", // can't get yet
StartedAt: runtimeInfo.StartedTime,

View File

@ -452,6 +452,7 @@ func (c *Container) teardownStorage() error {
// It does not save the results - assumes the database will do that for us
func resetState(state *ContainerState) error {
state.PID = 0
state.ConmonPID = 0
state.Mountpoint = ""
state.Mounted = false
if state.State != define.ContainerStateExited {
@ -1043,6 +1044,8 @@ func (c *Container) stop(timeout uint) error {
return err
}
c.state.PID = 0
c.state.ConmonPID = 0
c.state.StoppedByUser = true
if err := c.save(); err != nil {
return errors.Wrapf(err, "error saving container %s state after stopping", c.ID())

View File

@ -234,6 +234,8 @@ func (r *OCIRuntime) updateContainerStatus(ctr *Container, useRuntime bool) erro
// Alright, it exists. Transition to Stopped state.
ctr.state.State = define.ContainerStateStopped
ctr.state.PID = 0
ctr.state.ConmonPID = 0
// Read the exit file to get our stopped time and exit code.
return ctr.handleExitFile(exitFile, info)

View File

@ -446,6 +446,9 @@ func (r *OCIRuntime) createOCIContainer(ctr *Container, cgroupParent string, res
return errors.Wrapf(define.ErrInternal, "container create failed")
}
ctr.state.PID = ss.si.Pid
if cmd.Process != nil {
ctr.state.ConmonPID = cmd.Process.Pid
}
case <-time.After(ContainerCreateTimeout):
return errors.Wrapf(define.ErrInternal, "container creation timeout")
}