mirror of
https://github.com/containers/podman.git
synced 2025-09-22 12:14:26 +08:00
remote API: restore v4 payload in container inspect
The v5 API made a breaking change for podman inspect, this means that an old client could not longer parse the result from the new 5.X server. The other way around new client and old server already worked. As it turned out there were several users that run into this, one case to hit this is using an old 4.X podman machine wich now pulls a newer coreos with podman 5.0. But there are also other users running into it. In order to keep the API working we now have a version check and return the old v4 compatible payload so the old remote client can still work against a newer server thus removing any major breaking change for an old client. Fixes #22657 Signed-off-by: Paul Holzinger <pholzing@redhat.com>
This commit is contained in:
@ -89,6 +89,10 @@ type InspectContainerConfig struct {
|
||||
SdNotifyMode string `json:"sdNotifyMode,omitempty"`
|
||||
// SdNotifySocket is the NOTIFY_SOCKET in use by/configured for the container.
|
||||
SdNotifySocket string `json:"sdNotifySocket,omitempty"`
|
||||
|
||||
// V4PodmanCompatMarshal indicates that the json marshaller should
|
||||
// use the old v4 inspect format to keep API compatibility.
|
||||
V4PodmanCompatMarshal bool `json:"-"`
|
||||
}
|
||||
|
||||
// UnmarshalJSON allow compatibility with podman V4 API
|
||||
@ -136,6 +140,28 @@ func (insp *InspectContainerConfig) UnmarshalJSON(data []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (insp *InspectContainerConfig) MarshalJSON() ([]byte, error) {
|
||||
// the alias is needed otherwise MarshalJSON will
|
||||
type Alias InspectContainerConfig
|
||||
conf := (*Alias)(insp)
|
||||
if !insp.V4PodmanCompatMarshal {
|
||||
return json.Marshal(conf)
|
||||
}
|
||||
|
||||
type v4InspectContainerConfig struct {
|
||||
Entrypoint string `json:"Entrypoint"`
|
||||
StopSignal uint `json:"StopSignal"`
|
||||
*Alias
|
||||
}
|
||||
stopSignal, _ := signal.ParseSignal(insp.StopSignal)
|
||||
newConf := &v4InspectContainerConfig{
|
||||
Entrypoint: strings.Join(insp.Entrypoint, " "),
|
||||
StopSignal: uint(stopSignal),
|
||||
Alias: conf,
|
||||
}
|
||||
return json.Marshal(newConf)
|
||||
}
|
||||
|
||||
// InspectRestartPolicy holds information about the container's restart policy.
|
||||
type InspectRestartPolicy struct {
|
||||
// Name contains the container's restart policy.
|
||||
|
@ -144,6 +144,11 @@ func GetContainer(w http.ResponseWriter, r *http.Request) {
|
||||
utils.InternalServerError(w, err)
|
||||
return
|
||||
}
|
||||
// if client request old v4 payload we should return v4 compatible json
|
||||
if _, err := utils.SupportedVersion(r, ">=5.0.0"); err != nil {
|
||||
data.Config.V4PodmanCompatMarshal = true
|
||||
}
|
||||
|
||||
utils.WriteResponse(w, http.StatusOK, data)
|
||||
}
|
||||
|
||||
|
@ -8,7 +8,7 @@ podman pull $IMAGE &>/dev/null
|
||||
# Ensure clean slate
|
||||
podman rm -a -f &>/dev/null
|
||||
|
||||
podman run -d --name foo $IMAGE top
|
||||
podman run -d --name foo --entrypoint='["sh","-c"]' $IMAGE top
|
||||
|
||||
# Check exists for none such
|
||||
t GET libpod/containers/nonesuch/exists 404
|
||||
@ -44,7 +44,15 @@ t GET libpod/containers/foo/json 200 \
|
||||
.State.Status=running \
|
||||
.ImageName=$IMAGE \
|
||||
.Config.Cmd[0]=top \
|
||||
.Name=foo
|
||||
.Name=foo \
|
||||
.Config.StopSignal="SIGTERM" \
|
||||
.Config.Entrypoint[0]="sh" \
|
||||
.Config.Entrypoint[1]="-c"
|
||||
|
||||
# now check v4 request return old compatible output
|
||||
t GET /v4.0.0/libpod/containers/foo/json 200 \
|
||||
.Config.StopSignal=15 \
|
||||
.Config.Entrypoint="sh -c"
|
||||
|
||||
# List processes of the container
|
||||
t GET libpod/containers/foo/top 200 \
|
||||
|
@ -305,7 +305,7 @@ function t() {
|
||||
url=http://$HOST:$PORT
|
||||
case "$path" in
|
||||
/*) url="$url$path" ;;
|
||||
libpod/*) url="$url/v4.0.0/$path" ;;
|
||||
libpod/*) url="$url/v5.0.0/$path" ;;
|
||||
*) url="$url/v1.41/$path" ;;
|
||||
esac
|
||||
fi
|
||||
|
Reference in New Issue
Block a user