mirror of
https://github.com/containers/podman.git
synced 2025-07-02 08:47:43 +08:00
Merge pull request #22322 from mheon/update_the_config
Make `podman update` changes persistent
This commit is contained in:
@ -1423,7 +1423,7 @@ func AutocompleteEventFilter(cmd *cobra.Command, args []string, toComplete strin
|
|||||||
events.PullError.String(), events.Push.String(), events.Refresh.String(), events.Remove.String(),
|
events.PullError.String(), events.Push.String(), events.Refresh.String(), events.Remove.String(),
|
||||||
events.Rename.String(), events.Renumber.String(), events.Restart.String(), events.Restore.String(),
|
events.Rename.String(), events.Renumber.String(), events.Restart.String(), events.Restore.String(),
|
||||||
events.Save.String(), events.Start.String(), events.Stop.String(), events.Sync.String(), events.Tag.String(),
|
events.Save.String(), events.Start.String(), events.Stop.String(), events.Sync.String(), events.Tag.String(),
|
||||||
events.Unmount.String(), events.Unpause.String(), events.Untag.String(),
|
events.Unmount.String(), events.Unpause.String(), events.Untag.String(), events.Update.String(),
|
||||||
}, cobra.ShellCompDirectiveNoFileComp
|
}, cobra.ShellCompDirectiveNoFileComp
|
||||||
}
|
}
|
||||||
eventTypes := func(_ string) ([]string, cobra.ShellCompDirective) {
|
eventTypes := func(_ string) ([]string, cobra.ShellCompDirective) {
|
||||||
|
@ -643,7 +643,8 @@ func DefineCreateFlags(cmd *cobra.Command, cf *entities.ContainerCreateOptions,
|
|||||||
`If a container with the same name exists, replace it`,
|
`If a container with the same name exists, replace it`,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
if mode == entities.InfraMode || (mode == entities.CreateMode) { // infra container flags, create should also pick these up
|
// Restart is allowed for created, updated, and infra ctr
|
||||||
|
if mode == entities.InfraMode || mode == entities.CreateMode || mode == entities.UpdateMode {
|
||||||
restartFlagName := "restart"
|
restartFlagName := "restart"
|
||||||
createFlags.StringVar(
|
createFlags.StringVar(
|
||||||
&cf.Restart,
|
&cf.Restart,
|
||||||
@ -651,7 +652,8 @@ func DefineCreateFlags(cmd *cobra.Command, cf *entities.ContainerCreateOptions,
|
|||||||
`Restart policy to apply when a container exits ("always"|"no"|"never"|"on-failure"|"unless-stopped")`,
|
`Restart policy to apply when a container exits ("always"|"no"|"never"|"on-failure"|"unless-stopped")`,
|
||||||
)
|
)
|
||||||
_ = cmd.RegisterFlagCompletionFunc(restartFlagName, AutocompleteRestartOption)
|
_ = cmd.RegisterFlagCompletionFunc(restartFlagName, AutocompleteRestartOption)
|
||||||
|
}
|
||||||
|
if mode == entities.InfraMode || (mode == entities.CreateMode) { // infra container flags, create should also pick these up
|
||||||
shmSizeFlagName := "shm-size"
|
shmSizeFlagName := "shm-size"
|
||||||
createFlags.String(
|
createFlags.String(
|
||||||
shmSizeFlagName, shmSize(),
|
shmSizeFlagName, shmSize(),
|
||||||
|
@ -7,9 +7,11 @@ import (
|
|||||||
|
|
||||||
"github.com/containers/podman/v5/cmd/podman/common"
|
"github.com/containers/podman/v5/cmd/podman/common"
|
||||||
"github.com/containers/podman/v5/cmd/podman/registry"
|
"github.com/containers/podman/v5/cmd/podman/registry"
|
||||||
|
"github.com/containers/podman/v5/libpod/define"
|
||||||
"github.com/containers/podman/v5/pkg/domain/entities"
|
"github.com/containers/podman/v5/pkg/domain/entities"
|
||||||
"github.com/containers/podman/v5/pkg/specgen"
|
"github.com/containers/podman/v5/pkg/specgen"
|
||||||
"github.com/containers/podman/v5/pkg/specgenutil"
|
"github.com/containers/podman/v5/pkg/specgenutil"
|
||||||
|
"github.com/containers/podman/v5/pkg/util"
|
||||||
"github.com/opencontainers/runtime-spec/specs-go"
|
"github.com/opencontainers/runtime-spec/specs-go"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
@ -70,6 +72,17 @@ func update(cmd *cobra.Command, args []string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if updateOpts.Restart != "" {
|
||||||
|
policy, retries, err := util.ParseRestartPolicy(updateOpts.Restart)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
s.RestartPolicy = policy
|
||||||
|
if policy == define.RestartPolicyOnFailure {
|
||||||
|
s.RestartRetries = &retries
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// we need to pass the whole specgen since throttle devices are parsed later due to cross compat.
|
// we need to pass the whole specgen since throttle devices are parsed later due to cross compat.
|
||||||
s.ResourceLimits, err = specgenutil.GetResources(s, &updateOpts)
|
s.ResourceLimits, err = specgenutil.GetResources(s, &updateOpts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
####> This option file is used in:
|
####> This option file is used in:
|
||||||
####> podman create, pod clone, pod create, run
|
####> podman create, pod clone, pod create, run, update
|
||||||
####> If file is edited, make sure the changes
|
####> If file is edited, make sure the changes
|
||||||
####> are applicable to all of those.
|
####> are applicable to all of those.
|
||||||
#### **--restart**=*policy*
|
#### **--restart**=*policy*
|
||||||
|
@ -47,6 +47,7 @@ The *container* event type reports the follow statuses:
|
|||||||
* sync
|
* sync
|
||||||
* unmount
|
* unmount
|
||||||
* unpause
|
* unpause
|
||||||
|
* update
|
||||||
|
|
||||||
The *pod* event type reports the follow statuses:
|
The *pod* event type reports the follow statuses:
|
||||||
* create
|
* create
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
% podman-update 1
|
% podman-update 1
|
||||||
|
|
||||||
## NAME
|
## NAME
|
||||||
podman\-update - Update the cgroup configuration of a given container
|
podman\-update - Update the configuration of a given container
|
||||||
|
|
||||||
## SYNOPSIS
|
## SYNOPSIS
|
||||||
**podman update** [*options*] *container*
|
**podman update** [*options*] *container*
|
||||||
@ -10,10 +10,8 @@ podman\-update - Update the cgroup configuration of a given container
|
|||||||
|
|
||||||
## DESCRIPTION
|
## DESCRIPTION
|
||||||
|
|
||||||
Updates the cgroup configuration of an already existing container. The currently supported options are a subset of the
|
Updates the configuration of an already existing container, allowing different resource limits to be set.
|
||||||
podman create/run resource limits options. These new options are non-persistent and only last for the current execution of the container; the configuration is honored on its next run.
|
The currently supported options are a subset of the podman create/run resource limit options.
|
||||||
This means that this command can only be executed on an already running container and the changes made is erased the next time the container is stopped and restarted, this is to ensure immutability.
|
|
||||||
This command takes one argument, a container name or ID, alongside the resource flags to modify the cgroup.
|
|
||||||
|
|
||||||
## OPTIONS
|
## OPTIONS
|
||||||
|
|
||||||
@ -55,6 +53,8 @@ This command takes one argument, a container name or ID, alongside the resource
|
|||||||
|
|
||||||
@@option pids-limit
|
@@option pids-limit
|
||||||
|
|
||||||
|
@@option restart
|
||||||
|
|
||||||
|
|
||||||
## EXAMPLEs
|
## EXAMPLEs
|
||||||
|
|
||||||
|
@ -384,7 +384,7 @@ the exit codes follow the `chroot` standard, see below:
|
|||||||
| [podman-unpause(1)](podman-unpause.1.md) | Unpause one or more containers. |
|
| [podman-unpause(1)](podman-unpause.1.md) | Unpause one or more containers. |
|
||||||
| [podman-unshare(1)](podman-unshare.1.md) | Run a command inside of a modified user namespace. |
|
| [podman-unshare(1)](podman-unshare.1.md) | Run a command inside of a modified user namespace. |
|
||||||
| [podman-untag(1)](podman-untag.1.md) | Remove one or more names from a locally-stored image. |
|
| [podman-untag(1)](podman-untag.1.md) | Remove one or more names from a locally-stored image. |
|
||||||
| [podman-update(1)](podman-update.1.md) | Update the cgroup configuration of a given container. |
|
| [podman-update(1)](podman-update.1.md) | Update the configuration of a given container. |
|
||||||
| [podman-version(1)](podman-version.1.md) | Display the Podman version information. |
|
| [podman-version(1)](podman-version.1.md) | Display the Podman version information. |
|
||||||
| [podman-volume(1)](podman-volume.1.md) | Simple management tool for volumes. |
|
| [podman-volume(1)](podman-volume.1.md) | Simple management tool for volumes. |
|
||||||
| [podman-wait(1)](podman-wait.1.md) | Wait on one or more containers to stop and print their exit codes. |
|
| [podman-wait(1)](podman-wait.1.md) | Wait on one or more containers to stop and print their exit codes. |
|
||||||
|
@ -119,12 +119,24 @@ func (c *Container) Start(ctx context.Context, recursive bool) (finalErr error)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Update updates the given container.
|
// Update updates the given container.
|
||||||
// only the cgroup config can be updated and therefore only a linux resource spec is passed.
|
// Either resource limits or restart policy can be updated.
|
||||||
func (c *Container) Update(res *spec.LinuxResources) error {
|
// Either resourcs or restartPolicy must not be nil.
|
||||||
if err := c.syncContainer(); err != nil {
|
// If restartRetries is not nil, restartPolicy must be set and must be "on-failure".
|
||||||
return err
|
func (c *Container) Update(resources *spec.LinuxResources, restartPolicy *string, restartRetries *uint) error {
|
||||||
|
if !c.batched {
|
||||||
|
c.lock.Lock()
|
||||||
|
defer c.lock.Unlock()
|
||||||
|
|
||||||
|
if err := c.syncContainer(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return c.update(res)
|
|
||||||
|
if c.ensureState(define.ContainerStateRemoving) {
|
||||||
|
return fmt.Errorf("container %s is being removed, cannot update: %w", c.ID(), define.ErrCtrStateInvalid)
|
||||||
|
}
|
||||||
|
|
||||||
|
return c.update(resources, restartPolicy, restartRetries)
|
||||||
}
|
}
|
||||||
|
|
||||||
// StartAndAttach starts a container and attaches to it.
|
// StartAndAttach starts a container and attaches to it.
|
||||||
|
@ -467,6 +467,9 @@ func (c *Container) generateInspectContainerHostConfig(ctrSpec *spec.Spec, named
|
|||||||
|
|
||||||
restartPolicy := new(define.InspectRestartPolicy)
|
restartPolicy := new(define.InspectRestartPolicy)
|
||||||
restartPolicy.Name = c.config.RestartPolicy
|
restartPolicy.Name = c.config.RestartPolicy
|
||||||
|
if restartPolicy.Name == "" {
|
||||||
|
restartPolicy.Name = define.RestartPolicyNo
|
||||||
|
}
|
||||||
restartPolicy.MaximumRetryCount = c.config.RestartRetries
|
restartPolicy.MaximumRetryCount = c.config.RestartRetries
|
||||||
hostConfig.RestartPolicy = restartPolicy
|
hostConfig.RestartPolicy = restartPolicy
|
||||||
if c.config.NoCgroups {
|
if c.config.NoCgroups {
|
||||||
|
@ -2514,11 +2514,77 @@ func (c *Container) extractSecretToCtrStorage(secr *ContainerSecret) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// update calls the ociRuntime update function to modify a cgroup config after container creation
|
// Update a container's resources or restart policy after creation.
|
||||||
func (c *Container) update(resources *spec.LinuxResources) error {
|
// At least one of resources or restartPolicy must not be nil.
|
||||||
if err := c.ociRuntime.UpdateContainer(c, resources); err != nil {
|
func (c *Container) update(resources *spec.LinuxResources, restartPolicy *string, restartRetries *uint) error {
|
||||||
|
if resources == nil && restartPolicy == nil {
|
||||||
|
return fmt.Errorf("must provide at least one of resources and restartPolicy to update a container: %w", define.ErrInvalidArg)
|
||||||
|
}
|
||||||
|
if restartRetries != nil && restartPolicy == nil {
|
||||||
|
return fmt.Errorf("must provide restart policy if updating restart retries: %w", define.ErrInvalidArg)
|
||||||
|
}
|
||||||
|
|
||||||
|
oldResources := c.config.Spec.Linux.Resources
|
||||||
|
oldRestart := c.config.RestartPolicy
|
||||||
|
oldRetries := c.config.RestartRetries
|
||||||
|
|
||||||
|
if restartPolicy != nil {
|
||||||
|
if err := define.ValidateRestartPolicy(*restartPolicy); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if restartRetries != nil {
|
||||||
|
if *restartPolicy != define.RestartPolicyOnFailure {
|
||||||
|
return fmt.Errorf("cannot set restart policy retries unless policy is on-failure: %w", define.ErrInvalidArg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
c.config.RestartPolicy = *restartPolicy
|
||||||
|
if restartRetries != nil {
|
||||||
|
c.config.RestartRetries = *restartRetries
|
||||||
|
} else {
|
||||||
|
c.config.RestartRetries = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if resources != nil {
|
||||||
|
if c.config.Spec.Linux == nil {
|
||||||
|
c.config.Spec.Linux = new(spec.Linux)
|
||||||
|
}
|
||||||
|
c.config.Spec.Linux.Resources = resources
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := c.runtime.state.SafeRewriteContainerConfig(c, "", "", c.config); err != nil {
|
||||||
|
// Assume DB write failed, revert to old resources block
|
||||||
|
c.config.Spec.Linux.Resources = oldResources
|
||||||
|
c.config.RestartPolicy = oldRestart
|
||||||
|
c.config.RestartRetries = oldRetries
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if c.ensureState(define.ContainerStateCreated, define.ContainerStateRunning, define.ContainerStatePaused) && resources != nil {
|
||||||
|
// So `podman inspect` on running containers sources its OCI spec from disk.
|
||||||
|
// To keep inspect accurate we need to update the on-disk OCI spec.
|
||||||
|
onDiskSpec, err := c.specFromState()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("retrieving on-disk OCI spec to update: %w", err)
|
||||||
|
}
|
||||||
|
if onDiskSpec.Linux == nil {
|
||||||
|
onDiskSpec.Linux = new(spec.Linux)
|
||||||
|
}
|
||||||
|
onDiskSpec.Linux.Resources = resources
|
||||||
|
if err := c.saveSpec(onDiskSpec); err != nil {
|
||||||
|
logrus.Errorf("Unable to update container %s OCI spec - `podman inspect` may not be accurate until container is restarted: %v", c.ID(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := c.ociRuntime.UpdateContainer(c, resources); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
logrus.Debugf("updated container %s", c.ID())
|
logrus.Debugf("updated container %s", c.ID())
|
||||||
|
|
||||||
|
c.newContainerEvent(events.Update)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,9 @@
|
|||||||
package define
|
package define
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
// Valid restart policy types.
|
// Valid restart policy types.
|
||||||
const (
|
const (
|
||||||
// RestartPolicyNone indicates that no restart policy has been requested
|
// RestartPolicyNone indicates that no restart policy has been requested
|
||||||
@ -27,6 +31,16 @@ var RestartPolicyMap = map[string]string{
|
|||||||
RestartPolicyUnlessStopped: RestartPolicyUnlessStopped,
|
RestartPolicyUnlessStopped: RestartPolicyUnlessStopped,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Validate that the given string is a valid restart policy.
|
||||||
|
func ValidateRestartPolicy(policy string) error {
|
||||||
|
switch policy {
|
||||||
|
case RestartPolicyNone, RestartPolicyNo, RestartPolicyOnFailure, RestartPolicyAlways, RestartPolicyUnlessStopped:
|
||||||
|
return nil
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("%q is not a valid restart policy: %w", policy, ErrInvalidArg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// InitContainerTypes
|
// InitContainerTypes
|
||||||
const (
|
const (
|
||||||
// AlwaysInitContainer is an init container that runs on each
|
// AlwaysInitContainer is an init container that runs on each
|
||||||
|
@ -208,6 +208,8 @@ const (
|
|||||||
Unpause Status = "unpause"
|
Unpause Status = "unpause"
|
||||||
// Untag ...
|
// Untag ...
|
||||||
Untag Status = "untag"
|
Untag Status = "untag"
|
||||||
|
// Update indicates that a container's configuration has been modified.
|
||||||
|
Update Status = "update"
|
||||||
)
|
)
|
||||||
|
|
||||||
// EventFilter for filtering events
|
// EventFilter for filtering events
|
||||||
|
@ -231,6 +231,8 @@ func StringToStatus(name string) (Status, error) {
|
|||||||
return Unpause, nil
|
return Unpause, nil
|
||||||
case Untag.String():
|
case Untag.String():
|
||||||
return Untag, nil
|
return Untag, nil
|
||||||
|
case Update.String():
|
||||||
|
return Update, nil
|
||||||
}
|
}
|
||||||
return "", fmt.Errorf("unknown event status %q", name)
|
return "", fmt.Errorf("unknown event status %q", name)
|
||||||
}
|
}
|
||||||
|
@ -1392,13 +1392,12 @@ func WithRestartPolicy(policy string) CtrCreateOption {
|
|||||||
return define.ErrCtrFinalized
|
return define.ErrCtrFinalized
|
||||||
}
|
}
|
||||||
|
|
||||||
switch policy {
|
if err := define.ValidateRestartPolicy(policy); err != nil {
|
||||||
case define.RestartPolicyNone, define.RestartPolicyNo, define.RestartPolicyOnFailure, define.RestartPolicyAlways, define.RestartPolicyUnlessStopped:
|
return err
|
||||||
ctr.config.RestartPolicy = policy
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("%q is not a valid restart policy: %w", policy, define.ErrInvalidArg)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ctr.config.RestartPolicy = policy
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -28,6 +28,7 @@ import (
|
|||||||
"github.com/docker/docker/api/types/network"
|
"github.com/docker/docker/api/types/network"
|
||||||
"github.com/docker/go-connections/nat"
|
"github.com/docker/go-connections/nat"
|
||||||
"github.com/docker/go-units"
|
"github.com/docker/go-units"
|
||||||
|
spec "github.com/opencontainers/runtime-spec/specs-go"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -661,3 +662,133 @@ func RenameContainer(w http.ResponseWriter, r *http.Request) {
|
|||||||
|
|
||||||
utils.WriteResponse(w, http.StatusNoContent, nil)
|
utils.WriteResponse(w, http.StatusNoContent, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func UpdateContainer(w http.ResponseWriter, r *http.Request) {
|
||||||
|
runtime := r.Context().Value(api.RuntimeKey).(*libpod.Runtime)
|
||||||
|
name := utils.GetName(r)
|
||||||
|
|
||||||
|
ctr, err := runtime.LookupContainer(name)
|
||||||
|
if err != nil {
|
||||||
|
utils.ContainerNotFound(w, name, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
options := new(container.UpdateConfig)
|
||||||
|
if err := json.NewDecoder(r.Body).Decode(options); err != nil {
|
||||||
|
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("decoding request body: %w", err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only handle the bits of update that Docker uses as examples.
|
||||||
|
// For example, the update API claims to be able to update devices for
|
||||||
|
// existing containers... Which I am very dubious about.
|
||||||
|
// Ignore bits like that unless someone asks us for them.
|
||||||
|
|
||||||
|
// We're going to be editing this, so we have to deep-copy to not affect
|
||||||
|
// the container's own resources
|
||||||
|
resources := new(spec.LinuxResources)
|
||||||
|
oldResources := ctr.LinuxResources()
|
||||||
|
if oldResources != nil {
|
||||||
|
if err := libpod.JSONDeepCopy(oldResources, resources); err != nil {
|
||||||
|
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("copying old resource limits: %w", err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// CPU limits
|
||||||
|
cpu := resources.CPU
|
||||||
|
if cpu == nil {
|
||||||
|
cpu = new(spec.LinuxCPU)
|
||||||
|
}
|
||||||
|
useCPU := false
|
||||||
|
if options.CPUShares != 0 {
|
||||||
|
shares := uint64(options.CPUShares)
|
||||||
|
cpu.Shares = &shares
|
||||||
|
useCPU = true
|
||||||
|
}
|
||||||
|
if options.CPUPeriod != 0 {
|
||||||
|
period := uint64(options.CPUPeriod)
|
||||||
|
cpu.Period = &period
|
||||||
|
useCPU = true
|
||||||
|
}
|
||||||
|
if options.CPUQuota != 0 {
|
||||||
|
cpu.Quota = &options.CPUQuota
|
||||||
|
useCPU = true
|
||||||
|
}
|
||||||
|
if options.CPURealtimeRuntime != 0 {
|
||||||
|
cpu.RealtimeRuntime = &options.CPURealtimeRuntime
|
||||||
|
useCPU = true
|
||||||
|
}
|
||||||
|
if options.CPURealtimePeriod != 0 {
|
||||||
|
period := uint64(options.CPURealtimePeriod)
|
||||||
|
cpu.RealtimePeriod = &period
|
||||||
|
useCPU = true
|
||||||
|
}
|
||||||
|
if options.CpusetCpus != "" {
|
||||||
|
cpu.Cpus = options.CpusetCpus
|
||||||
|
useCPU = true
|
||||||
|
}
|
||||||
|
if options.CpusetMems != "" {
|
||||||
|
cpu.Mems = options.CpusetMems
|
||||||
|
useCPU = true
|
||||||
|
}
|
||||||
|
if useCPU {
|
||||||
|
resources.CPU = cpu
|
||||||
|
}
|
||||||
|
|
||||||
|
// Memory limits
|
||||||
|
mem := resources.Memory
|
||||||
|
if mem == nil {
|
||||||
|
mem = new(spec.LinuxMemory)
|
||||||
|
}
|
||||||
|
useMem := false
|
||||||
|
if options.Memory != 0 {
|
||||||
|
mem.Limit = &options.Memory
|
||||||
|
useMem = true
|
||||||
|
}
|
||||||
|
if options.MemorySwap != 0 {
|
||||||
|
mem.Swap = &options.MemorySwap
|
||||||
|
useMem = true
|
||||||
|
}
|
||||||
|
if options.MemoryReservation != 0 {
|
||||||
|
mem.Reservation = &options.MemoryReservation
|
||||||
|
useMem = true
|
||||||
|
}
|
||||||
|
if useMem {
|
||||||
|
resources.Memory = mem
|
||||||
|
}
|
||||||
|
|
||||||
|
// PIDs limit
|
||||||
|
if options.PidsLimit != nil {
|
||||||
|
if resources.Pids == nil {
|
||||||
|
resources.Pids = new(spec.LinuxPids)
|
||||||
|
}
|
||||||
|
resources.Pids.Limit = *options.PidsLimit
|
||||||
|
}
|
||||||
|
|
||||||
|
// Blkio Weight
|
||||||
|
if options.BlkioWeight != 0 {
|
||||||
|
if resources.BlockIO == nil {
|
||||||
|
resources.BlockIO = new(spec.LinuxBlockIO)
|
||||||
|
}
|
||||||
|
resources.BlockIO.Weight = &options.BlkioWeight
|
||||||
|
}
|
||||||
|
|
||||||
|
// Restart policy
|
||||||
|
localPolicy := string(options.RestartPolicy.Name)
|
||||||
|
restartPolicy := &localPolicy
|
||||||
|
|
||||||
|
var restartRetries *uint
|
||||||
|
if options.RestartPolicy.MaximumRetryCount != 0 {
|
||||||
|
localRetries := uint(options.RestartPolicy.MaximumRetryCount)
|
||||||
|
restartRetries = &localRetries
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := ctr.Update(resources, restartPolicy, restartRetries); err != nil {
|
||||||
|
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("updating container: %w", err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
responseStruct := container.ContainerUpdateOKBody{}
|
||||||
|
utils.WriteResponse(w, http.StatusOK, responseStruct)
|
||||||
|
}
|
||||||
|
@ -402,18 +402,46 @@ func InitContainer(w http.ResponseWriter, r *http.Request) {
|
|||||||
func UpdateContainer(w http.ResponseWriter, r *http.Request) {
|
func UpdateContainer(w http.ResponseWriter, r *http.Request) {
|
||||||
name := utils.GetName(r)
|
name := utils.GetName(r)
|
||||||
runtime := r.Context().Value(api.RuntimeKey).(*libpod.Runtime)
|
runtime := r.Context().Value(api.RuntimeKey).(*libpod.Runtime)
|
||||||
|
decoder := utils.GetDecoder(r)
|
||||||
|
query := struct {
|
||||||
|
RestartPolicy string `schema:"restartPolicy"`
|
||||||
|
RestartRetries uint `schema:"restartRetries"`
|
||||||
|
}{
|
||||||
|
// override any golang type defaults
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
|
||||||
|
utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
ctr, err := runtime.LookupContainer(name)
|
ctr, err := runtime.LookupContainer(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.ContainerNotFound(w, name, err)
|
utils.ContainerNotFound(w, name, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var restartPolicy *string
|
||||||
|
var restartRetries *uint
|
||||||
|
if query.RestartPolicy != "" {
|
||||||
|
restartPolicy = &query.RestartPolicy
|
||||||
|
if query.RestartPolicy == define.RestartPolicyOnFailure {
|
||||||
|
restartRetries = &query.RestartRetries
|
||||||
|
} else if query.RestartRetries != 0 {
|
||||||
|
utils.Error(w, http.StatusBadRequest, errors.New("cannot set restart retries unless restart policy is on-failure"))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
} else if query.RestartRetries != 0 {
|
||||||
|
utils.Error(w, http.StatusBadRequest, errors.New("cannot set restart retries unless restart policy is set"))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
options := &handlers.UpdateEntities{Resources: &specs.LinuxResources{}}
|
options := &handlers.UpdateEntities{Resources: &specs.LinuxResources{}}
|
||||||
if err := json.NewDecoder(r.Body).Decode(&options.Resources); err != nil {
|
if err := json.NewDecoder(r.Body).Decode(&options.Resources); err != nil {
|
||||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("decode(): %w", err))
|
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("decode(): %w", err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
err = ctr.Update(options.Resources)
|
err = ctr.Update(options.Resources, restartPolicy, restartRetries)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.InternalServerError(w, err)
|
utils.InternalServerError(w, err)
|
||||||
return
|
return
|
||||||
|
@ -4,6 +4,7 @@ package swagger
|
|||||||
import (
|
import (
|
||||||
"github.com/containers/podman/v5/pkg/domain/entities"
|
"github.com/containers/podman/v5/pkg/domain/entities"
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
|
"github.com/docker/docker/api/types/container"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Details for creating a volume
|
// Details for creating a volume
|
||||||
@ -48,3 +49,7 @@ type networkConnectRequestLibpod entities.NetworkConnectOptions
|
|||||||
// Network update
|
// Network update
|
||||||
// swagger:model
|
// swagger:model
|
||||||
type networkUpdateRequestLibpod entities.NetworkUpdateOptions
|
type networkUpdateRequestLibpod entities.NetworkUpdateOptions
|
||||||
|
|
||||||
|
// Container update
|
||||||
|
// swagger:model
|
||||||
|
type containerUpdateRequest container.UpdateConfig
|
||||||
|
@ -675,6 +675,35 @@ func (s *APIServer) registerContainersHandlers(r *mux.Router) error {
|
|||||||
// $ref: "#/responses/internalError"
|
// $ref: "#/responses/internalError"
|
||||||
r.HandleFunc(VersionedPath("/containers/{name}/rename"), s.APIHandler(compat.RenameContainer)).Methods(http.MethodPost)
|
r.HandleFunc(VersionedPath("/containers/{name}/rename"), s.APIHandler(compat.RenameContainer)).Methods(http.MethodPost)
|
||||||
r.HandleFunc("/containers/{name}/rename", s.APIHandler(compat.RenameContainer)).Methods(http.MethodPost)
|
r.HandleFunc("/containers/{name}/rename", s.APIHandler(compat.RenameContainer)).Methods(http.MethodPost)
|
||||||
|
// swagger:operation POST /containers/{name}/update compat ContainerUpdate
|
||||||
|
// ---
|
||||||
|
// tags:
|
||||||
|
// - containers (compat)
|
||||||
|
// summary: Update configuration of an existing container
|
||||||
|
// description: Change configuration settings for an existing container without requiring recreation.
|
||||||
|
// parameters:
|
||||||
|
// - in: path
|
||||||
|
// name: name
|
||||||
|
// type: string
|
||||||
|
// required: true
|
||||||
|
// description: Full or partial ID or full name of the container to rename
|
||||||
|
// - in: body
|
||||||
|
// name: resources
|
||||||
|
// required: false
|
||||||
|
// description: attributes for updating the container
|
||||||
|
// schema:
|
||||||
|
// $ref: "#/definitions/containerUpdateRequest"
|
||||||
|
// produces:
|
||||||
|
// - application/json
|
||||||
|
// responses:
|
||||||
|
// 200:
|
||||||
|
// description: no error
|
||||||
|
// 404:
|
||||||
|
// $ref: "#/responses/containerNotFound"
|
||||||
|
// 500:
|
||||||
|
// $ref: "#/responses/internalError"
|
||||||
|
r.HandleFunc(VersionedPath("/containers/{name}/update"), s.APIHandler(compat.UpdateContainer)).Methods(http.MethodPost)
|
||||||
|
r.HandleFunc("/containers/{name}/update", s.APIHandler(compat.UpdateContainer)).Methods(http.MethodPost)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
libpod endpoints
|
libpod endpoints
|
||||||
@ -1755,8 +1784,18 @@ func (s *APIServer) registerContainersHandlers(r *mux.Router) error {
|
|||||||
// type: string
|
// type: string
|
||||||
// required: true
|
// required: true
|
||||||
// description: Full or partial ID or full name of the container to update
|
// description: Full or partial ID or full name of the container to update
|
||||||
|
// - in: query
|
||||||
|
// name: restartPolicy
|
||||||
|
// type: string
|
||||||
|
// required: false
|
||||||
|
// description: New restart policy for the container.
|
||||||
|
// - in: query
|
||||||
|
// name: restartRetries
|
||||||
|
// type: integer
|
||||||
|
// required: false
|
||||||
|
// description: New amount of retries for the container's restart policy. Only allowed if restartPolicy is set to on-failure
|
||||||
// - in: body
|
// - in: body
|
||||||
// name: resources
|
// name: config
|
||||||
// description: attributes for updating the container
|
// description: attributes for updating the container
|
||||||
// schema:
|
// schema:
|
||||||
// $ref: "#/definitions/UpdateEntities"
|
// $ref: "#/definitions/UpdateEntities"
|
||||||
@ -1766,6 +1805,8 @@ func (s *APIServer) registerContainersHandlers(r *mux.Router) error {
|
|||||||
// responses:
|
// responses:
|
||||||
// 201:
|
// 201:
|
||||||
// $ref: "#/responses/containerUpdateResponse"
|
// $ref: "#/responses/containerUpdateResponse"
|
||||||
|
// 400:
|
||||||
|
// $ref: "#/responses/badParamError"
|
||||||
// 404:
|
// 404:
|
||||||
// $ref: "#/responses/containerNotFound"
|
// $ref: "#/responses/containerNotFound"
|
||||||
// 500:
|
// 500:
|
||||||
|
@ -3,6 +3,8 @@ package containers
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/containers/podman/v5/pkg/bindings"
|
"github.com/containers/podman/v5/pkg/bindings"
|
||||||
@ -16,12 +18,20 @@ func Update(ctx context.Context, options *types.ContainerUpdateOptions) (string,
|
|||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
params := url.Values{}
|
||||||
|
if options.Specgen.RestartPolicy != "" {
|
||||||
|
params.Set("restartPolicy", options.Specgen.RestartPolicy)
|
||||||
|
if options.Specgen.RestartRetries != nil {
|
||||||
|
params.Set("restartRetries", strconv.Itoa(int(*options.Specgen.RestartRetries)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
resources, err := jsoniter.MarshalToString(options.Specgen.ResourceLimits)
|
resources, err := jsoniter.MarshalToString(options.Specgen.ResourceLimits)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
stringReader := strings.NewReader(resources)
|
stringReader := strings.NewReader(resources)
|
||||||
response, err := conn.DoRequest(ctx, stringReader, http.MethodPost, "/containers/%s/update", nil, nil, options.NameOrID)
|
response, err := conn.DoRequest(ctx, stringReader, http.MethodPost, "/containers/%s/update", params, nil, options.NameOrID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
@ -1767,7 +1767,12 @@ func (ic *ContainerEngine) ContainerUpdate(ctx context.Context, updateOptions *e
|
|||||||
return "", fmt.Errorf("container not found")
|
return "", fmt.Errorf("container not found")
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = containers[0].Update(updateOptions.Specgen.ResourceLimits); err != nil {
|
var restartPolicy *string
|
||||||
|
if updateOptions.Specgen.RestartPolicy != "" {
|
||||||
|
restartPolicy = &updateOptions.Specgen.RestartPolicy
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = containers[0].Update(updateOptions.Specgen.ResourceLimits, restartPolicy, updateOptions.Specgen.RestartRetries); err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
return containers[0].ID(), nil
|
return containers[0].ID(), nil
|
||||||
|
@ -619,7 +619,12 @@ func createContainerOptions(rt *libpod.Runtime, s *specgen.SpecGenerator, pod *l
|
|||||||
}
|
}
|
||||||
restartPolicy = s.RestartPolicy
|
restartPolicy = s.RestartPolicy
|
||||||
}
|
}
|
||||||
options = append(options, libpod.WithRestartRetries(retries), libpod.WithRestartPolicy(restartPolicy))
|
if restartPolicy != "" {
|
||||||
|
options = append(options, libpod.WithRestartPolicy(restartPolicy))
|
||||||
|
}
|
||||||
|
if retries != 0 {
|
||||||
|
options = append(options, libpod.WithRestartRetries(retries))
|
||||||
|
}
|
||||||
|
|
||||||
healthCheckSet := false
|
healthCheckSet := false
|
||||||
if s.ContainerHealthCheckConfig.HealthConfig != nil {
|
if s.ContainerHealthCheckConfig.HealthConfig != nil {
|
||||||
|
@ -730,6 +730,12 @@ if root; then
|
|||||||
eid=$(jq -r '.Id' <<<"$output")
|
eid=$(jq -r '.Id' <<<"$output")
|
||||||
t POST exec/$eid/start 200 $cpu_weight_expect
|
t POST exec/$eid/start 200 $cpu_weight_expect
|
||||||
|
|
||||||
|
# Now use the compat API
|
||||||
|
echo '{ "Memory": 536870912 }' >${TMPD}/compatupdate.json
|
||||||
|
t POST containers/updateCtr/update ${TMPD}/compatupdate.json 200
|
||||||
|
t GET libpod/containers/updateCtr/json 200 \
|
||||||
|
.HostConfig.Memory=536870912
|
||||||
|
|
||||||
podman rm -f updateCtr
|
podman rm -f updateCtr
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
@ -454,6 +454,31 @@ func (p *PodmanTestIntegration) InspectContainer(name string) []define.InspectCo
|
|||||||
return session.InspectContainerToJSON()
|
return session.InspectContainerToJSON()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Pull a single field from a container using `podman inspect --format {{ field }}`,
|
||||||
|
// and verify it against the given expected value.
|
||||||
|
func (p *PodmanTestIntegration) CheckContainerSingleField(name, field, expected string) {
|
||||||
|
inspect := p.Podman([]string{"inspect", "--format", fmt.Sprintf("{{ %s }}", field), name})
|
||||||
|
inspect.WaitWithDefaultTimeout()
|
||||||
|
ExpectWithOffset(1, inspect).Should(Exit(0))
|
||||||
|
ExpectWithOffset(1, inspect.OutputToString()).To(Equal(expected))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that the contents of a single file in the given container matches the expected value.
|
||||||
|
func (p *PodmanTestIntegration) CheckFileInContainer(name, filepath, expected string) {
|
||||||
|
exec := p.Podman([]string{"exec", name, "cat", filepath})
|
||||||
|
exec.WaitWithDefaultTimeout()
|
||||||
|
ExpectWithOffset(1, exec).Should(Exit(0))
|
||||||
|
ExpectWithOffset(1, exec.OutputToString()).To(Equal(expected))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that the contents of a single file in the given container containers the given value.
|
||||||
|
func (p *PodmanTestIntegration) CheckFileInContainerSubstring(name, filepath, expected string) {
|
||||||
|
exec := p.Podman([]string{"exec", name, "cat", filepath})
|
||||||
|
exec.WaitWithDefaultTimeout()
|
||||||
|
ExpectWithOffset(1, exec).Should(Exit(0))
|
||||||
|
ExpectWithOffset(1, exec.OutputToString()).To(ContainSubstring(expected))
|
||||||
|
}
|
||||||
|
|
||||||
// StopContainer stops a container with no timeout, ensuring a fast test.
|
// StopContainer stops a container with no timeout, ensuring a fast test.
|
||||||
func (p *PodmanTestIntegration) StopContainer(nameOrID string) {
|
func (p *PodmanTestIntegration) StopContainer(nameOrID string) {
|
||||||
stop := p.Podman([]string{"stop", "-t0", nameOrID})
|
stop := p.Podman([]string{"stop", "-t0", nameOrID})
|
||||||
|
@ -35,47 +35,25 @@ var _ = Describe("Podman update", func() {
|
|||||||
Expect(session).Should(ExitCleanly())
|
Expect(session).Should(ExitCleanly())
|
||||||
|
|
||||||
// checking cpu quota from --cpus
|
// checking cpu quota from --cpus
|
||||||
session = podmanTest.Podman([]string{"exec", ctrID, "cat", "/sys/fs/cgroup/cpu/cpu.cfs_quota_us"})
|
podmanTest.CheckFileInContainerSubstring(ctrID, "/sys/fs/cgroup/cpu/cpu.cfs_quota_us", "500000")
|
||||||
session.WaitWithDefaultTimeout()
|
|
||||||
Expect(session).Should(ExitCleanly())
|
|
||||||
Expect(session.OutputToString()).Should(ContainSubstring("500000"))
|
|
||||||
|
|
||||||
// checking cpuset-cpus
|
// checking cpuset-cpus
|
||||||
session = podmanTest.Podman([]string{"exec", ctrID, "cat", "/sys/fs/cgroup/cpuset/cpuset.cpus"})
|
podmanTest.CheckFileInContainer(ctrID, "/sys/fs/cgroup/cpuset/cpuset.cpus", "0")
|
||||||
session.WaitWithDefaultTimeout()
|
|
||||||
Expect(session).Should(ExitCleanly())
|
|
||||||
Expect(session.OutputToString()).Should(Equal("0"))
|
|
||||||
|
|
||||||
// checking cpuset-mems
|
// checking cpuset-mems
|
||||||
session = podmanTest.Podman([]string{"exec", ctrID, "cat", "/sys/fs/cgroup/cpuset/cpuset.mems"})
|
podmanTest.CheckFileInContainer(ctrID, "/sys/fs/cgroup/cpuset/cpuset.mems", "0")
|
||||||
session.WaitWithDefaultTimeout()
|
|
||||||
Expect(session).Should(ExitCleanly())
|
|
||||||
Expect(session.OutputToString()).Should(Equal("0"))
|
|
||||||
|
|
||||||
// checking memory limit
|
// checking memory limit
|
||||||
session = podmanTest.Podman([]string{"exec", ctrID, "cat", "/sys/fs/cgroup/memory/memory.limit_in_bytes"})
|
podmanTest.CheckFileInContainerSubstring(ctrID, "/sys/fs/cgroup/memory/memory.limit_in_bytes", "1073741824")
|
||||||
session.WaitWithDefaultTimeout()
|
|
||||||
Expect(session).Should(ExitCleanly())
|
|
||||||
Expect(session.OutputToString()).Should(ContainSubstring("1073741824"))
|
|
||||||
|
|
||||||
// checking memory-swap
|
// checking memory-swap
|
||||||
session = podmanTest.Podman([]string{"exec", ctrID, "cat", "/sys/fs/cgroup/memory/memory.memsw.limit_in_bytes"})
|
podmanTest.CheckFileInContainerSubstring(ctrID, "/sys/fs/cgroup/memory/memory.memsw.limit_in_bytes", "2147483648")
|
||||||
session.WaitWithDefaultTimeout()
|
|
||||||
Expect(session).Should(ExitCleanly())
|
|
||||||
Expect(session.OutputToString()).Should(ContainSubstring("2147483648"))
|
|
||||||
|
|
||||||
// checking cpu-shares
|
// checking cpu-shares
|
||||||
session = podmanTest.Podman([]string{"exec", ctrID, "cat", "/sys/fs/cgroup/cpu/cpu.shares"})
|
podmanTest.CheckFileInContainerSubstring(ctrID, "/sys/fs/cgroup/cpu/cpu.shares", "123")
|
||||||
session.WaitWithDefaultTimeout()
|
|
||||||
Expect(session).Should(ExitCleanly())
|
|
||||||
Expect(session.OutputToString()).Should(ContainSubstring("123"))
|
|
||||||
|
|
||||||
// checking pids-limit
|
// checking pids-limit
|
||||||
session = podmanTest.Podman([]string{"exec", ctrID, "cat", "/sys/fs/cgroup/pids/pids.max"})
|
podmanTest.CheckFileInContainerSubstring(ctrID, "/sys/fs/cgroup/pids/pids.max", "123")
|
||||||
session.WaitWithDefaultTimeout()
|
|
||||||
Expect(session).Should(ExitCleanly())
|
|
||||||
Expect(session.OutputToString()).Should(ContainSubstring("123"))
|
|
||||||
|
|
||||||
})
|
})
|
||||||
|
|
||||||
It("podman update container unspecified pid limit", func() {
|
It("podman update container unspecified pid limit", func() {
|
||||||
@ -99,10 +77,7 @@ var _ = Describe("Podman update", func() {
|
|||||||
ctrID = session.OutputToString()
|
ctrID = session.OutputToString()
|
||||||
|
|
||||||
// checking pids-limit was not changed after update when not specified as an option
|
// checking pids-limit was not changed after update when not specified as an option
|
||||||
session = podmanTest.Podman([]string{"exec", ctrID, "cat", "/sys/fs/cgroup/pids.max"})
|
podmanTest.CheckFileInContainerSubstring(ctrID, "/sys/fs/cgroup/pids.max", "max")
|
||||||
session.WaitWithDefaultTimeout()
|
|
||||||
Expect(session).Should(ExitCleanly())
|
|
||||||
Expect(session.OutputToString()).Should(ContainSubstring("max"))
|
|
||||||
})
|
})
|
||||||
|
|
||||||
It("podman update container all options v2", func() {
|
It("podman update container all options v2", func() {
|
||||||
@ -138,58 +113,31 @@ var _ = Describe("Podman update", func() {
|
|||||||
ctrID = session.OutputToString()
|
ctrID = session.OutputToString()
|
||||||
|
|
||||||
// checking cpu quota and period
|
// checking cpu quota and period
|
||||||
session = podmanTest.Podman([]string{"exec", ctrID, "cat", "/sys/fs/cgroup/cpu.max"})
|
podmanTest.CheckFileInContainerSubstring(ctrID, "/sys/fs/cgroup/cpu.max", "500000")
|
||||||
session.WaitWithDefaultTimeout()
|
|
||||||
Expect(session).Should(ExitCleanly())
|
|
||||||
Expect(session.OutputToString()).Should(ContainSubstring("500000"))
|
|
||||||
|
|
||||||
// checking blkio weight
|
// checking blkio weight
|
||||||
session = podmanTest.Podman([]string{"exec", ctrID, "cat", "/sys/fs/cgroup/io.bfq.weight"})
|
podmanTest.CheckFileInContainerSubstring(ctrID, "/sys/fs/cgroup/io.bfq.weight", "123")
|
||||||
session.WaitWithDefaultTimeout()
|
|
||||||
Expect(session).Should(ExitCleanly())
|
|
||||||
Expect(session.OutputToString()).Should(ContainSubstring("123"))
|
|
||||||
|
|
||||||
// checking device-read/write-bps/iops
|
// checking device-read/write-bps/iops
|
||||||
session = podmanTest.Podman([]string{"exec", ctrID, "cat", "/sys/fs/cgroup/io.max"})
|
podmanTest.CheckFileInContainerSubstring(ctrID, "/sys/fs/cgroup/io.max", "rbps=10485760 wbps=10485760 riops=1000 wiops=1000")
|
||||||
session.WaitWithDefaultTimeout()
|
|
||||||
Expect(session).Should(ExitCleanly())
|
|
||||||
Expect(session.OutputToString()).Should(ContainSubstring("rbps=10485760 wbps=10485760 riops=1000 wiops=1000"))
|
|
||||||
|
|
||||||
// checking cpuset-cpus
|
// checking cpuset-cpus
|
||||||
session = podmanTest.Podman([]string{"exec", ctrID, "cat", "/sys/fs/cgroup/cpuset.cpus"})
|
podmanTest.CheckFileInContainer(ctrID, "/sys/fs/cgroup/cpuset.cpus", "0")
|
||||||
session.WaitWithDefaultTimeout()
|
|
||||||
Expect(session).Should(ExitCleanly())
|
|
||||||
Expect(session.OutputToString()).Should(Equal("0"))
|
|
||||||
|
|
||||||
// checking cpuset-mems
|
// checking cpuset-mems
|
||||||
session = podmanTest.Podman([]string{"exec", ctrID, "cat", "/sys/fs/cgroup/cpuset.mems"})
|
podmanTest.CheckFileInContainer(ctrID, "/sys/fs/cgroup/cpuset.mems", "0")
|
||||||
session.WaitWithDefaultTimeout()
|
|
||||||
Expect(session).Should(ExitCleanly())
|
|
||||||
Expect(session.OutputToString()).Should(Equal("0"))
|
|
||||||
|
|
||||||
// checking memory limit
|
// checking memory limit
|
||||||
session = podmanTest.Podman([]string{"exec", ctrID, "cat", "/sys/fs/cgroup/memory.max"})
|
podmanTest.CheckFileInContainerSubstring(ctrID, "/sys/fs/cgroup/memory.max", "1073741824")
|
||||||
session.WaitWithDefaultTimeout()
|
|
||||||
Expect(session).Should(ExitCleanly())
|
|
||||||
Expect(session.OutputToString()).Should(ContainSubstring("1073741824"))
|
|
||||||
|
|
||||||
// checking memory-swap
|
// checking memory-swap
|
||||||
session = podmanTest.Podman([]string{"exec", ctrID, "cat", "/sys/fs/cgroup/memory.swap.max"})
|
podmanTest.CheckFileInContainerSubstring(ctrID, "/sys/fs/cgroup/memory.swap.max", "1073741824")
|
||||||
session.WaitWithDefaultTimeout()
|
|
||||||
Expect(session).Should(ExitCleanly())
|
|
||||||
Expect(session.OutputToString()).Should(ContainSubstring("1073741824"))
|
|
||||||
|
|
||||||
// checking cpu-shares
|
// checking cpu-shares
|
||||||
session = podmanTest.Podman([]string{"exec", ctrID, "cat", "/sys/fs/cgroup/cpu.weight"})
|
podmanTest.CheckFileInContainerSubstring(ctrID, "/sys/fs/cgroup/cpu.weight", "5")
|
||||||
session.WaitWithDefaultTimeout()
|
|
||||||
Expect(session).Should(ExitCleanly())
|
|
||||||
Expect(session.OutputToString()).Should(ContainSubstring("5"))
|
|
||||||
|
|
||||||
// checking pids-limit
|
// checking pids-limit
|
||||||
session = podmanTest.Podman([]string{"exec", ctrID, "cat", "/sys/fs/cgroup/pids.max"})
|
podmanTest.CheckFileInContainerSubstring(ctrID, "/sys/fs/cgroup/pids.max", "123")
|
||||||
session.WaitWithDefaultTimeout()
|
|
||||||
Expect(session).Should(ExitCleanly())
|
|
||||||
Expect(session.OutputToString()).Should(ContainSubstring("123"))
|
|
||||||
})
|
})
|
||||||
|
|
||||||
It("podman update keep original resources if not overridden", func() {
|
It("podman update keep original resources if not overridden", func() {
|
||||||
@ -209,13 +157,85 @@ var _ = Describe("Podman update", func() {
|
|||||||
|
|
||||||
ctrID := session.OutputToString()
|
ctrID := session.OutputToString()
|
||||||
|
|
||||||
|
path := "/sys/fs/cgroup/cpu/cpu.cfs_quota_us"
|
||||||
if v2, _ := cgroupv2.Enabled(); v2 {
|
if v2, _ := cgroupv2.Enabled(); v2 {
|
||||||
session = podmanTest.Podman([]string{"exec", ctrID, "cat", "/sys/fs/cgroup/cpu.max"})
|
path = "/sys/fs/cgroup/cpu.max"
|
||||||
} else {
|
|
||||||
session = podmanTest.Podman([]string{"exec", ctrID, "cat", "/sys/fs/cgroup/cpu/cpu.cfs_quota_us"})
|
|
||||||
}
|
}
|
||||||
session.WaitWithDefaultTimeout()
|
|
||||||
Expect(session).Should(ExitCleanly())
|
podmanTest.CheckFileInContainerSubstring(ctrID, path, "500000")
|
||||||
Expect(session.OutputToString()).Should(ContainSubstring("500000"))
|
})
|
||||||
|
|
||||||
|
It("podman update persists changes", func() {
|
||||||
|
SkipIfCgroupV1("testing flags that only work in cgroup v2")
|
||||||
|
SkipIfRootless("many of these handlers are not enabled while rootless in CI")
|
||||||
|
|
||||||
|
memoryInspect := ".HostConfig.Memory"
|
||||||
|
memoryCgroup := "/sys/fs/cgroup/memory.max"
|
||||||
|
mem512m := "536870912"
|
||||||
|
mem256m := "268435456"
|
||||||
|
|
||||||
|
testCtr := "test-ctr-name"
|
||||||
|
ctr1 := podmanTest.Podman([]string{"run", "-d", "--name", testCtr, "-m", "512m", ALPINE, "top"})
|
||||||
|
ctr1.WaitWithDefaultTimeout()
|
||||||
|
Expect(ctr1).Should(ExitCleanly())
|
||||||
|
|
||||||
|
podmanTest.CheckContainerSingleField(testCtr, memoryInspect, mem512m)
|
||||||
|
podmanTest.CheckFileInContainer(testCtr, memoryCgroup, mem512m)
|
||||||
|
|
||||||
|
update := podmanTest.Podman([]string{"update", "-m", "256m", testCtr})
|
||||||
|
update.WaitWithDefaultTimeout()
|
||||||
|
Expect(update).Should(ExitCleanly())
|
||||||
|
|
||||||
|
podmanTest.CheckContainerSingleField(testCtr, memoryInspect, mem256m)
|
||||||
|
podmanTest.CheckFileInContainer(testCtr, memoryCgroup, mem256m)
|
||||||
|
|
||||||
|
restart := podmanTest.Podman([]string{"restart", testCtr})
|
||||||
|
restart.WaitWithDefaultTimeout()
|
||||||
|
Expect(restart).Should(ExitCleanly())
|
||||||
|
|
||||||
|
podmanTest.CheckContainerSingleField(testCtr, memoryInspect, mem256m)
|
||||||
|
podmanTest.CheckFileInContainer(testCtr, memoryCgroup, mem256m)
|
||||||
|
|
||||||
|
pause := podmanTest.Podman([]string{"pause", testCtr})
|
||||||
|
pause.WaitWithDefaultTimeout()
|
||||||
|
Expect(pause).Should(ExitCleanly())
|
||||||
|
|
||||||
|
update2 := podmanTest.Podman([]string{"update", "-m", "512m", testCtr})
|
||||||
|
update2.WaitWithDefaultTimeout()
|
||||||
|
Expect(update2).Should(ExitCleanly())
|
||||||
|
|
||||||
|
unpause := podmanTest.Podman([]string{"unpause", testCtr})
|
||||||
|
unpause.WaitWithDefaultTimeout()
|
||||||
|
Expect(unpause).Should(ExitCleanly())
|
||||||
|
|
||||||
|
podmanTest.CheckContainerSingleField(testCtr, memoryInspect, mem512m)
|
||||||
|
podmanTest.CheckFileInContainer(testCtr, memoryCgroup, mem512m)
|
||||||
|
})
|
||||||
|
|
||||||
|
It("podman update sets restart policy", func() {
|
||||||
|
restartPolicyName := ".HostConfig.RestartPolicy.Name"
|
||||||
|
restartPolicyRetries := ".HostConfig.RestartPolicy.MaximumRetryCount"
|
||||||
|
|
||||||
|
testCtr := "test-ctr-name"
|
||||||
|
ctr1 := podmanTest.Podman([]string{"run", "-dt", "--name", testCtr, ALPINE, "top"})
|
||||||
|
ctr1.WaitWithDefaultTimeout()
|
||||||
|
Expect(ctr1).Should(ExitCleanly())
|
||||||
|
|
||||||
|
podmanTest.CheckContainerSingleField(testCtr, restartPolicyName, "no")
|
||||||
|
podmanTest.CheckContainerSingleField(testCtr, restartPolicyRetries, "0")
|
||||||
|
|
||||||
|
update1 := podmanTest.Podman([]string{"update", "--restart", "on-failure:5", testCtr})
|
||||||
|
update1.WaitWithDefaultTimeout()
|
||||||
|
Expect(update1).Should(ExitCleanly())
|
||||||
|
|
||||||
|
podmanTest.CheckContainerSingleField(testCtr, restartPolicyName, "on-failure")
|
||||||
|
podmanTest.CheckContainerSingleField(testCtr, restartPolicyRetries, "5")
|
||||||
|
|
||||||
|
update2 := podmanTest.Podman([]string{"update", "--restart", "always", testCtr})
|
||||||
|
update2.WaitWithDefaultTimeout()
|
||||||
|
Expect(update2).Should(ExitCleanly())
|
||||||
|
|
||||||
|
podmanTest.CheckContainerSingleField(testCtr, restartPolicyName, "always")
|
||||||
|
podmanTest.CheckContainerSingleField(testCtr, restartPolicyRetries, "0")
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
@ -127,4 +127,26 @@ device-write-iops = /dev/zero:4000 | - | -
|
|||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@test "podman update - set restart policy" {
|
||||||
|
touch ${PODMAN_TMPDIR}/sentinel
|
||||||
|
run_podman run --security-opt label=disable --name testctr -v ${PODMAN_TMPDIR}:/testdir -d $IMAGE sh -c "touch /testdir/alive; while test -e /testdir/sentinel; do sleep 0.1; done;"
|
||||||
|
|
||||||
|
run_podman container inspect testctr --format "{{ .HostConfig.RestartPolicy.Name }}"
|
||||||
|
is "$output" "no"
|
||||||
|
|
||||||
|
run_podman update --restart always testctr
|
||||||
|
|
||||||
|
run_podman container inspect testctr --format "{{ .HostConfig.RestartPolicy.Name }}"
|
||||||
|
is "$output" "always"
|
||||||
|
|
||||||
|
# Ensure the container is alive
|
||||||
|
wait_for_file ${PODMAN_TMPDIR}/alive
|
||||||
|
|
||||||
|
rm -f ${PODMAN_TMPDIR}/alive
|
||||||
|
rm -f ${PODMAN_TMPDIR}/sentinel
|
||||||
|
|
||||||
|
# Restart should ensure that the container comes back up and recreates the file
|
||||||
|
wait_for_file ${PODMAN_TMPDIR}/alive
|
||||||
|
}
|
||||||
|
|
||||||
# vim: filetype=sh
|
# vim: filetype=sh
|
||||||
|
Reference in New Issue
Block a user