mirror of
https://github.com/containers/podman.git
synced 2025-08-06 19:44:14 +08:00

This started off as an attempt to make `podman stop` on a container started with `--rm` actually remove the container, instead of just cleaning it up and waiting for the cleanup process to finish the removal. In the process, I realized that `podman run --rmi` was rather broken. It was only done as part of the Podman CLI, not the cleanup process (meaning it only worked with attached containers) and the way it was wired meant that I was fairly confident that it wouldn't work if I did a `podman stop` on an attached container run with `--rmi`. I rewired it to use the same mechanism that `podman run --rm` uses, so it should be a lot more durable now, and I also wired it into `podman inspect` so you can tell that a container will remove its image. Tests have been added for the changes to `podman run --rmi`. No tests for `stop` on a `run --rm` container as that would be racy. Fixes #22852 Fixes RHEL-39513 Signed-off-by: Matt Heon <mheon@redhat.com>
241 lines
7.4 KiB
Go
241 lines
7.4 KiB
Go
//go:build !remote
|
|
|
|
package compat
|
|
|
|
import (
|
|
"encoding/json"
|
|
"errors"
|
|
"fmt"
|
|
"net/http"
|
|
"strings"
|
|
|
|
"github.com/containers/common/pkg/resize"
|
|
"github.com/containers/podman/v5/libpod"
|
|
"github.com/containers/podman/v5/libpod/define"
|
|
"github.com/containers/podman/v5/pkg/api/handlers"
|
|
"github.com/containers/podman/v5/pkg/api/handlers/utils"
|
|
"github.com/containers/podman/v5/pkg/api/server/idle"
|
|
api "github.com/containers/podman/v5/pkg/api/types"
|
|
"github.com/containers/podman/v5/pkg/domain/entities"
|
|
"github.com/containers/podman/v5/pkg/specgenutil"
|
|
"github.com/containers/podman/v5/pkg/util"
|
|
"github.com/gorilla/mux"
|
|
"github.com/sirupsen/logrus"
|
|
)
|
|
|
|
// ExecCreateHandler creates an exec session for a given container.
|
|
func ExecCreateHandler(w http.ResponseWriter, r *http.Request) {
|
|
runtime := r.Context().Value(api.RuntimeKey).(*libpod.Runtime)
|
|
|
|
input := new(handlers.ExecCreateConfig)
|
|
if err := json.NewDecoder(r.Body).Decode(&input); err != nil {
|
|
utils.InternalServerError(w, fmt.Errorf("decoding request body as JSON: %w", err))
|
|
return
|
|
}
|
|
|
|
ctrName := utils.GetName(r)
|
|
ctr, err := runtime.LookupContainer(ctrName)
|
|
if err != nil {
|
|
utils.ContainerNotFound(w, ctrName, err)
|
|
return
|
|
}
|
|
|
|
libpodConfig := new(libpod.ExecConfig)
|
|
libpodConfig.Command = input.Cmd
|
|
libpodConfig.Terminal = input.Tty
|
|
libpodConfig.AttachStdin = input.AttachStdin
|
|
libpodConfig.AttachStderr = input.AttachStderr
|
|
libpodConfig.AttachStdout = input.AttachStdout
|
|
if input.DetachKeys != "" {
|
|
libpodConfig.DetachKeys = &input.DetachKeys
|
|
}
|
|
libpodConfig.Environment = make(map[string]string)
|
|
for _, envStr := range input.Env {
|
|
key, val, hasVal := strings.Cut(envStr, "=")
|
|
if !hasVal {
|
|
utils.Error(w, http.StatusBadRequest, fmt.Errorf("environment variable %q badly formed, must be key=value", envStr))
|
|
return
|
|
}
|
|
libpodConfig.Environment[key] = val
|
|
}
|
|
libpodConfig.WorkDir = input.WorkingDir
|
|
libpodConfig.Privileged = input.Privileged
|
|
libpodConfig.User = input.User
|
|
|
|
if input.Tty {
|
|
util.ExecAddTERM(ctr.Env(), libpodConfig.Environment)
|
|
}
|
|
|
|
// Make our exit command
|
|
storageConfig := runtime.StorageConfig()
|
|
runtimeConfig, err := runtime.GetConfig()
|
|
if err != nil {
|
|
utils.InternalServerError(w, err)
|
|
return
|
|
}
|
|
// Automatically log to syslog if the server has log-level=debug set
|
|
exitCommandArgs, err := specgenutil.CreateExitCommandArgs(storageConfig, runtimeConfig, logrus.IsLevelEnabled(logrus.DebugLevel), true, false, true)
|
|
if err != nil {
|
|
utils.InternalServerError(w, err)
|
|
return
|
|
}
|
|
libpodConfig.ExitCommand = exitCommandArgs
|
|
|
|
// Run the exit command after 5 minutes, to mimic Docker's exec cleanup
|
|
// behavior.
|
|
libpodConfig.ExitCommandDelay = runtimeConfig.Engine.ExitCommandDelay
|
|
|
|
sessID, err := ctr.ExecCreate(libpodConfig)
|
|
if err != nil {
|
|
if errors.Is(err, define.ErrCtrStateInvalid) {
|
|
// Check if the container is paused. If so, return a 409
|
|
state, err := ctr.State()
|
|
if err == nil {
|
|
// Ignore the error != nil case. We're already
|
|
// throwing an InternalServerError below.
|
|
if state == define.ContainerStatePaused {
|
|
utils.Error(w, http.StatusConflict, fmt.Errorf("cannot create exec session as container %s is paused", ctr.ID()))
|
|
return
|
|
}
|
|
}
|
|
}
|
|
utils.InternalServerError(w, err)
|
|
return
|
|
}
|
|
|
|
utils.WriteResponse(w, http.StatusCreated, entities.IDResponse{ID: sessID})
|
|
}
|
|
|
|
// ExecInspectHandler inspects a given exec session.
|
|
func ExecInspectHandler(w http.ResponseWriter, r *http.Request) {
|
|
runtime := r.Context().Value(api.RuntimeKey).(*libpod.Runtime)
|
|
|
|
sessionID := mux.Vars(r)["id"]
|
|
sessionCtr, err := runtime.GetExecSessionContainer(sessionID)
|
|
if err != nil {
|
|
utils.Error(w, http.StatusNotFound, err)
|
|
return
|
|
}
|
|
|
|
logrus.Debugf("Inspecting exec session %s of container %s", sessionID, sessionCtr.ID())
|
|
|
|
session, err := sessionCtr.ExecSession(sessionID)
|
|
if err != nil {
|
|
utils.InternalServerError(w, fmt.Errorf("retrieving exec session %s from container %s: %w", sessionID, sessionCtr.ID(), err))
|
|
return
|
|
}
|
|
|
|
inspectOut, err := session.Inspect()
|
|
if err != nil {
|
|
utils.InternalServerError(w, err)
|
|
return
|
|
}
|
|
|
|
utils.WriteResponse(w, http.StatusOK, inspectOut)
|
|
}
|
|
|
|
// ExecStartHandler runs a given exec session.
|
|
func ExecStartHandler(w http.ResponseWriter, r *http.Request) {
|
|
runtime := r.Context().Value(api.RuntimeKey).(*libpod.Runtime)
|
|
|
|
sessionID := mux.Vars(r)["id"]
|
|
|
|
// TODO: We should read/support Tty from here.
|
|
bodyParams := new(handlers.ExecStartConfig)
|
|
|
|
if err := json.NewDecoder(r.Body).Decode(&bodyParams); err != nil {
|
|
utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to decode parameters for %s: %w", r.URL.String(), err))
|
|
return
|
|
}
|
|
// TODO: Verify TTY setting against what inspect session was made with
|
|
|
|
sessionCtr, err := runtime.GetExecSessionContainer(sessionID)
|
|
if err != nil {
|
|
utils.Error(w, http.StatusNotFound, err)
|
|
return
|
|
}
|
|
|
|
logrus.Debugf("Starting exec session %s of container %s", sessionID, sessionCtr.ID())
|
|
|
|
state, err := sessionCtr.State()
|
|
if err != nil {
|
|
utils.InternalServerError(w, err)
|
|
return
|
|
}
|
|
if state != define.ContainerStateRunning {
|
|
utils.Error(w, http.StatusConflict, fmt.Errorf("cannot exec in a container that is not running; container %s is %s", sessionCtr.ID(), state.String()))
|
|
return
|
|
}
|
|
|
|
if bodyParams.Detach {
|
|
// If we are detaching, we do NOT want to hijack.
|
|
// Instead, we perform a detached start, and return 200 if
|
|
// successful.
|
|
if err := sessionCtr.ExecStart(sessionID); err != nil {
|
|
utils.InternalServerError(w, err)
|
|
return
|
|
}
|
|
// This is a 200 despite having no content
|
|
utils.WriteResponse(w, http.StatusOK, "")
|
|
return
|
|
}
|
|
|
|
logErr := func(e error) {
|
|
logrus.Error(fmt.Errorf("attaching to container %s exec session %s: %w", sessionCtr.ID(), sessionID, e))
|
|
}
|
|
|
|
var size *resize.TerminalSize
|
|
if bodyParams.Tty && (bodyParams.Height > 0 || bodyParams.Width > 0) {
|
|
size = &resize.TerminalSize{
|
|
Height: bodyParams.Height,
|
|
Width: bodyParams.Width,
|
|
}
|
|
}
|
|
|
|
hijackChan := make(chan bool, 1)
|
|
err = sessionCtr.ExecHTTPStartAndAttach(sessionID, r, w, nil, nil, nil, hijackChan, size)
|
|
|
|
if <-hijackChan {
|
|
// If connection was Hijacked, we have to signal it's being closed
|
|
t := r.Context().Value(api.IdleTrackerKey).(*idle.Tracker)
|
|
defer t.Close()
|
|
|
|
if err != nil {
|
|
// Cannot report error to client as a 500 as the Upgrade set status to 101
|
|
logErr(err)
|
|
}
|
|
} else {
|
|
// If the Hijack failed we are going to assume we can still inform client of failure
|
|
utils.InternalServerError(w, err)
|
|
logErr(err)
|
|
}
|
|
logrus.Debugf("Attach for container %s exec session %s completed successfully", sessionCtr.ID(), sessionID)
|
|
}
|
|
|
|
// ExecRemoveHandler removes a exec session.
|
|
func ExecRemoveHandler(w http.ResponseWriter, r *http.Request) {
|
|
runtime := r.Context().Value(api.RuntimeKey).(*libpod.Runtime)
|
|
|
|
sessionID := mux.Vars(r)["id"]
|
|
|
|
bodyParams := new(handlers.ExecRemoveConfig)
|
|
|
|
if err := json.NewDecoder(r.Body).Decode(&bodyParams); err != nil {
|
|
utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to decode parameters for %s: %w", r.URL.String(), err))
|
|
return
|
|
}
|
|
|
|
sessionCtr, err := runtime.GetExecSessionContainer(sessionID)
|
|
if err != nil {
|
|
utils.Error(w, http.StatusNotFound, err)
|
|
return
|
|
}
|
|
|
|
logrus.Debugf("Removing exec session %s of container %s", sessionID, sessionCtr.ID())
|
|
if err := sessionCtr.ExecRemove(sessionID, bodyParams.Force); err != nil {
|
|
utils.InternalServerError(w, err)
|
|
return
|
|
}
|
|
logrus.Debugf("Removing exec session %s for container %s completed successfully", sessionID, sessionCtr.ID())
|
|
}
|