mirror of
https://github.com/containers/podman.git
synced 2025-05-22 01:27:07 +08:00
Address review comments on restart policy
Signed-off-by: Matthew Heon <matthew.heon@pm.me>
This commit is contained in:
@ -42,7 +42,7 @@ func CreateContainer(ctx context.Context, c *GenericCLIResults, runtime *libpod.
|
||||
defer span.Finish()
|
||||
}
|
||||
if c.Bool("rm") && c.String("restart") != "" && c.String("restart") != "no" {
|
||||
return nil, nil, errors.Errorf("the --rm flag conflicts with --restart")
|
||||
return nil, nil, errors.Errorf("the --rm option conflicts with --restart")
|
||||
}
|
||||
|
||||
rtc, err := runtime.GetConfig()
|
||||
|
@ -576,8 +576,8 @@ Valid values are:
|
||||
- `always` : Restart containers when they exit, regardless of status, retrying indefinitely
|
||||
|
||||
Please note that restart will not restart containers after a system reboot.
|
||||
If you require this functionality, please add your Podman commands to a systemd unit file, or create an init script for your init system of choice.
|
||||
There is an example of restarting a container with systemd below.
|
||||
This this functionality is required in your environment, you can invoke Podman from a systemd unit file, or create an init script for whichever init system is in use.
|
||||
To generate systemd unit files, please see *podman generate systemd*
|
||||
|
||||
**--rm**=*true*|*false*
|
||||
|
||||
@ -865,21 +865,6 @@ the uids and gids from the host.
|
||||
$ podman create --uidmap 0:30000:7000 --gidmap 0:30000:7000 fedora echo hello
|
||||
```
|
||||
|
||||
### Running a podman container to restart inside of a systemd unit file
|
||||
|
||||
|
||||
```
|
||||
[Unit]
|
||||
Description=My App
|
||||
[Service]
|
||||
Restart=always
|
||||
ExecStart=/usr/bin/podman start -a my_app
|
||||
ExecStop=/usr/bin/podman stop -t 10 my_app
|
||||
KillMode=process
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
```
|
||||
|
||||
### Rootless Containers
|
||||
|
||||
Podman runs as a non root user on most systems. This feature requires that a new enough version of shadow-utils
|
||||
|
@ -598,8 +598,8 @@ Valid values are:
|
||||
- `always` : Restart containers when they exit, regardless of status, retrying indefinitely
|
||||
|
||||
Please note that restart will not restart containers after a system reboot.
|
||||
If you require this functionality, please add your Podman commands to a systemd unit file, or create an init script for your init system of choice.
|
||||
To manage container restart via systemd, see *podman generate systemd*.
|
||||
This this functionality is required in your environment, you can invoke Podman from a systemd unit file, or create an init script for whichever init system is in use.
|
||||
To generate systemd unit files, please see *podman generate systemd*
|
||||
|
||||
**--rm**=*true*|*false*
|
||||
|
||||
|
@ -601,23 +601,18 @@ func (c *Container) Cleanup(ctx context.Context) error {
|
||||
return errors.Wrapf(ErrCtrStateInvalid, "container %s is running or paused, refusing to clean up", c.ID())
|
||||
}
|
||||
|
||||
// If we have a restart policy match when updating the state, we need to
|
||||
// restart the container.
|
||||
// However, perform a full validation of restart policy first.
|
||||
if c.state.RestartPolicyMatch {
|
||||
if c.config.RestartPolicy == RestartPolicyOnFailure && c.state.ExitCode != 0 {
|
||||
logrus.Debugf("Container %s restart policy trigger: on retry %d (of %d)",
|
||||
c.ID(), c.state.RestartCount, c.config.RestartRetries)
|
||||
}
|
||||
if (c.config.RestartPolicy == RestartPolicyOnFailure && c.state.ExitCode != 0 &&
|
||||
(c.config.RestartRetries > 0 && c.state.RestartCount < c.config.RestartRetries)) ||
|
||||
c.config.RestartPolicy == RestartPolicyAlways {
|
||||
// The container stopped. We need to restart it.
|
||||
return c.handleRestartPolicy(ctx)
|
||||
// Handle restart policy.
|
||||
// Returns a bool indicating whether we actually restarted.
|
||||
// If we did, don't proceed to cleanup - just exit.
|
||||
didRestart, err := c.handleRestartPolicy(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if didRestart {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If we aren't hitting restart policy, we perform a normal cleanup
|
||||
// If we didn't restart, we perform a normal cleanup
|
||||
|
||||
// Check if we have active exec sessions
|
||||
if len(c.state.ExecSessions) != 0 {
|
||||
|
@ -213,20 +213,47 @@ func (c *Container) handleExitFile(exitFile string, fi os.FileInfo) error {
|
||||
// Handle container restart policy.
|
||||
// This is called when a container has exited, and was not explicitly stopped by
|
||||
// an API call to stop the container or pod it is in.
|
||||
func (c *Container) handleRestartPolicy(ctx context.Context) (err error) {
|
||||
func (c *Container) handleRestartPolicy(ctx context.Context) (restarted bool, err error) {
|
||||
// If we did not get a restart policy match, exit immediately.
|
||||
// Do the same if we're not a policy that restarts.
|
||||
if !c.state.RestartPolicyMatch ||
|
||||
c.config.RestartPolicy == RestartPolicyNo ||
|
||||
c.config.RestartPolicy == RestartPolicyNone {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// If we're RestartPolicyOnFailure, we need to check retries and exit
|
||||
// code.
|
||||
if c.config.RestartPolicy == RestartPolicyOnFailure {
|
||||
if c.state.ExitCode == 0 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// If we don't have a max retries set, continue
|
||||
if c.config.RestartRetries > 0 {
|
||||
if c.state.RestartCount < c.config.RestartRetries {
|
||||
logrus.Debugf("Container %s restart policy trigger: on retry %d (of %d)",
|
||||
c.ID(), c.state.RestartCount, c.config.RestartRetries)
|
||||
} else {
|
||||
logrus.Debugf("Container %s restart policy trigger: retries exhausted")
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Debugf("Restarting container %s due to restart policy %s", c.ID(), c.config.RestartPolicy)
|
||||
|
||||
// Need to check if dependencies are alive.
|
||||
if err = c.checkDependenciesAndHandleError(ctx); err != nil {
|
||||
return err
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Is the container running again?
|
||||
// If so, we don't have to do anything
|
||||
if c.state.State == ContainerStateRunning || c.state.State == ContainerStatePaused {
|
||||
return nil
|
||||
return false, nil
|
||||
} else if c.state.State == ContainerStateUnknown {
|
||||
return errors.Wrapf(ErrInternal, "invalid container state encountered in restart attempt!")
|
||||
return false, errors.Wrapf(ErrInternal, "invalid container state encountered in restart attempt!")
|
||||
}
|
||||
|
||||
c.newContainerEvent(events.Restart)
|
||||
@ -235,7 +262,7 @@ func (c *Container) handleRestartPolicy(ctx context.Context) (err error) {
|
||||
c.state.RestartCount = c.state.RestartCount + 1
|
||||
logrus.Debugf("Container %s now on retry %d", c.ID(), c.state.RestartCount)
|
||||
if err := c.save(); err != nil {
|
||||
return err
|
||||
return false, err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
@ -246,22 +273,25 @@ func (c *Container) handleRestartPolicy(ctx context.Context) (err error) {
|
||||
}
|
||||
}()
|
||||
if err := c.prepare(); err != nil {
|
||||
return err
|
||||
return false, err
|
||||
}
|
||||
|
||||
if c.state.State == ContainerStateStopped {
|
||||
// Reinitialize the container if we need to
|
||||
if err := c.reinit(ctx, true); err != nil {
|
||||
return err
|
||||
return false, err
|
||||
}
|
||||
} else if c.state.State == ContainerStateConfigured ||
|
||||
c.state.State == ContainerStateExited {
|
||||
// Initialize the container
|
||||
if err := c.init(ctx, true); err != nil {
|
||||
return err
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
return c.start()
|
||||
if err := c.start(); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Sync this container with on-disk state and runtime status
|
||||
|
Reference in New Issue
Block a user