Files
podman/pkg/api/handlers/utils/containers.go
Paul Holzinger 768ad8653a rework event code to improve API errors
One of the problems with the Events() API was that you had to call it in
a new goroutine. This meant the the error returned by it had to be read
back via a second channel. This cuased other bugs in the past but here
the biggest problem is that basic errors such as invalid since/until
options were not directly returned to the caller.
It meant in the API we were not able to write http code 200 quickly
because we always waited for the first event or error from the
channels. This in turn made some clients not happy as they assume the
server hangs on time out if no such events are generated.

To fix this we resturcture the entire event flow. First we spawn the
goroutine inside the eventer Read() function so not all the callers have
to. Then we can return the basic error quickly without the goroutine.
The caller then checks the error like any normal function and the API
can use this one to decide which status code to return.
Second we now return errors/event in one channel then the callers can
decide to ignore or log them which makes it a bit more clear.

Fixes c46884aa93 ("podman events: check for an error after we finish reading events")
Fixes #23712

Signed-off-by: Paul Holzinger <pholzing@redhat.com>
2024-11-01 18:54:13 +01:00

302 lines
8.4 KiB
Go

//go:build !remote
package utils
import (
"context"
"errors"
"fmt"
"net/http"
"strconv"
"strings"
"time"
"github.com/containers/podman/v5/libpod/events"
api "github.com/containers/podman/v5/pkg/api/types"
"github.com/containers/podman/v5/pkg/domain/entities"
"github.com/containers/podman/v5/pkg/domain/infra/abi"
"github.com/containers/podman/v5/pkg/api/handlers"
"github.com/sirupsen/logrus"
"github.com/containers/podman/v5/libpod/define"
"github.com/containers/podman/v5/libpod"
"github.com/gorilla/schema"
)
type waitQueryDocker struct {
Condition string `schema:"condition"`
}
type waitQueryLibpod struct {
Interval string `schema:"interval"`
Conditions []string `schema:"condition"`
}
func WaitContainerDocker(w http.ResponseWriter, r *http.Request) {
var err error
ctx := r.Context()
query := waitQueryDocker{}
decoder := ctx.Value(api.DecoderKey).(*schema.Decoder)
if err = decoder.Decode(&query, r.URL.Query()); err != nil {
Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
interval := time.Millisecond * 250
condition := "not-running"
if _, found := r.URL.Query()["condition"]; found {
condition = query.Condition
if !isValidDockerCondition(query.Condition) {
BadRequest(w, "condition", condition, errors.New("not a valid docker condition"))
return
}
}
name := GetName(r)
exists, err := containerExists(ctx, name)
if err != nil {
InternalServerError(w, err)
return
}
if !exists {
ContainerNotFound(w, name, define.ErrNoSuchCtr)
return
}
// In docker compatibility mode we have to send headers in advance,
// otherwise docker client would freeze.
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
if flusher, ok := w.(http.Flusher); ok {
flusher.Flush()
}
exitCode, err := waitDockerCondition(ctx, name, interval, condition)
var errStruct *struct{ Message string }
if err != nil {
logrus.Errorf("While waiting on condition: %q", err)
errStruct = &struct {
Message string
}{
Message: err.Error(),
}
}
responseData := handlers.ContainerWaitOKBody{
StatusCode: int(exitCode),
Error: errStruct,
}
enc := json.NewEncoder(w)
enc.SetEscapeHTML(true)
err = enc.Encode(&responseData)
if err != nil {
logrus.Errorf("Unable to write json: %q", err)
}
}
func WaitContainerLibpod(w http.ResponseWriter, r *http.Request) {
var (
err error
interval = time.Millisecond * 250
)
decoder := r.Context().Value(api.DecoderKey).(*schema.Decoder)
query := waitQueryLibpod{}
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
return
}
if _, found := r.URL.Query()["interval"]; found {
interval, err = time.ParseDuration(query.Interval)
if err != nil {
InternalServerError(w, err)
return
}
}
runtime := r.Context().Value(api.RuntimeKey).(*libpod.Runtime)
containerEngine := &abi.ContainerEngine{Libpod: runtime}
opts := entities.WaitOptions{
Conditions: query.Conditions,
Interval: interval,
}
name := GetName(r)
reports, err := containerEngine.ContainerWait(r.Context(), []string{name}, opts)
if err != nil {
if errors.Is(err, define.ErrNoSuchCtr) {
// Special case: In the common scenario of podman-remote run --rm
// the API is required to attach + start + wait to get exit code.
// This has the problem that the wait call races against the container
// removal from the cleanup process so it may not get the exit code back.
// However we keep the exit code around for longer than the container so
// we can just look it up here. Of course this only works when we get a
// full id as param but podman-remote will do that
if len(opts.Conditions) == 0 {
if code, err := runtime.GetContainerExitCode(name); err == nil {
WriteResponse(w, http.StatusOK, strconv.Itoa(int(code)))
return
}
}
ContainerNotFound(w, name, err)
return
}
InternalServerError(w, err)
}
if len(reports) != 1 {
Error(w, http.StatusInternalServerError, fmt.Errorf("the ContainerWait() function returned unexpected count of reports: %d", len(reports)))
return
}
WriteResponse(w, http.StatusOK, strconv.Itoa(int(reports[0].ExitCode)))
}
type containerWaitFn func(conditions ...define.ContainerStatus) (int32, error)
func createContainerWaitFn(ctx context.Context, containerName string, interval time.Duration) containerWaitFn {
runtime := ctx.Value(api.RuntimeKey).(*libpod.Runtime)
var containerEngine entities.ContainerEngine = &abi.ContainerEngine{Libpod: runtime}
return func(conditions ...define.ContainerStatus) (int32, error) {
var rawConditions []string
for _, con := range conditions {
rawConditions = append(rawConditions, con.String())
}
opts := entities.WaitOptions{
Conditions: rawConditions,
Interval: interval,
}
ctrWaitReport, err := containerEngine.ContainerWait(ctx, []string{containerName}, opts)
if err != nil {
return -1, err
}
if len(ctrWaitReport) != 1 {
return -1, fmt.Errorf("the ContainerWait() function returned unexpected count of reports: %d", len(ctrWaitReport))
}
return ctrWaitReport[0].ExitCode, ctrWaitReport[0].Error
}
}
func isValidDockerCondition(cond string) bool {
switch cond {
case "next-exit", "removed", "not-running", "":
return true
}
return false
}
func waitDockerCondition(ctx context.Context, containerName string, interval time.Duration, dockerCondition string) (int32, error) {
containerWait := createContainerWaitFn(ctx, containerName, interval)
var err error
var code int32
switch dockerCondition {
case "next-exit":
code, err = waitNextExit(ctx, containerName)
case "removed":
code, err = waitRemoved(containerWait)
case "not-running", "":
code, err = waitNotRunning(containerWait)
default:
panic("not a valid docker condition")
}
return code, err
}
var notRunningStates = []define.ContainerStatus{
define.ContainerStateCreated,
define.ContainerStateRemoving,
define.ContainerStateExited,
define.ContainerStateConfigured,
}
func waitRemoved(ctrWait containerWaitFn) (int32, error) {
var code int32
for {
c, err := ctrWait(define.ContainerStateExited)
if errors.Is(err, define.ErrNoSuchCtr) {
// Make sure to wait until the container has been removed.
break
}
if err != nil {
return code, err
}
// If the container doesn't exist, the return code is -1, so
// only set it in case of success.
code = c
}
return code, nil
}
func waitNextExit(ctx context.Context, containerName string) (int32, error) {
runtime := ctx.Value(api.RuntimeKey).(*libpod.Runtime)
containerEngine := &abi.ContainerEngine{Libpod: runtime}
eventChannel := make(chan events.ReadResult)
opts := entities.EventsOptions{
EventChan: eventChannel,
Filter: []string{"event=died", fmt.Sprintf("container=%s", containerName)},
Stream: true,
}
// ctx is used to cancel event watching goroutine
ctx, cancel := context.WithCancel(ctx)
defer cancel()
err := containerEngine.Events(ctx, opts)
if err != nil {
return -1, err
}
for evt := range eventChannel {
if evt.Error == nil {
if evt.Event.ContainerExitCode != nil {
return int32(*evt.Event.ContainerExitCode), nil
}
}
}
// if we are here then containerEngine.Events() has exited
// it may happen if request was canceled (e.g. client closed connection prematurely) or
// the server is in process of shutting down
return -1, nil
}
func waitNotRunning(ctrWait containerWaitFn) (int32, error) {
return ctrWait(notRunningStates...)
}
func containerExists(ctx context.Context, name string) (bool, error) {
runtime := ctx.Value(api.RuntimeKey).(*libpod.Runtime)
var containerEngine entities.ContainerEngine = &abi.ContainerEngine{Libpod: runtime}
var ctrExistsOpts entities.ContainerExistsOptions
ctrExistRep, err := containerEngine.ContainerExists(ctx, name, ctrExistsOpts)
if err != nil {
return false, err
}
return ctrExistRep.Value, nil
}
// PSTitles merges CAPS headers from ps output. All PS headers are single words, except for
// CAPS. Function compines CAP Headers into single field separated by a space.
func PSTitles(output string) []string {
var titles []string
for _, title := range strings.Fields(output) {
switch title {
case "AMBIENT", "INHERITED", "PERMITTED", "EFFECTIVE", "BOUNDING":
{
titles = append(titles, title+" CAPS")
}
case "CAPS":
continue
default:
titles = append(titles, title)
}
}
return titles
}