mirror of
https://github.com/containers/podman.git
synced 2025-06-20 00:51:16 +08:00
Merge pull request #21226 from mheon/force_remove_on_system_reset
Fix `podman system reset` with external containers
This commit is contained in:
@ -64,6 +64,14 @@ func ContainerEngine() entities.ContainerEngine {
|
||||
func NewContainerEngine(cmd *cobra.Command, args []string) (entities.ContainerEngine, error) {
|
||||
if containerEngine == nil {
|
||||
podmanOptions.FlagSet = cmd.Flags()
|
||||
if cmd.Name() == "reset" && cmd.Parent().Name() == "system" {
|
||||
logrus.Debugf("Performing system reset, runtime validation checks will be relaxed")
|
||||
podmanOptions.IsReset = true
|
||||
}
|
||||
if cmd.Name() == "renumber" && cmd.Parent().Name() == "system" {
|
||||
logrus.Debugf("Performing system renumber, runtime validation checks will be relaxed")
|
||||
podmanOptions.IsRenumber = true
|
||||
}
|
||||
engine, err := infra.NewContainerEngine(&podmanOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -11,7 +11,6 @@ import (
|
||||
"github.com/containers/podman/v4/cmd/podman/validate"
|
||||
"github.com/containers/podman/v4/libpod/define"
|
||||
"github.com/containers/podman/v4/pkg/domain/entities"
|
||||
"github.com/containers/podman/v4/pkg/domain/infra"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -54,19 +53,7 @@ func init() {
|
||||
}
|
||||
|
||||
func migrate(cmd *cobra.Command, args []string) {
|
||||
// Shutdown all running engines, `renumber` will hijack repository
|
||||
registry.ContainerEngine().Shutdown(registry.Context())
|
||||
registry.ImageEngine().Shutdown(registry.Context())
|
||||
|
||||
engine, err := infra.NewSystemEngine(entities.MigrateMode, registry.PodmanConfig())
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(define.ExecErrorCodeGeneric)
|
||||
}
|
||||
defer engine.Shutdown(registry.Context())
|
||||
|
||||
err = engine.Migrate(registry.Context(), cmd.Flags(), registry.PodmanConfig(), migrateOptions)
|
||||
if err != nil {
|
||||
if err := registry.ContainerEngine().Migrate(registry.Context(), migrateOptions); err != nil {
|
||||
fmt.Println(err)
|
||||
|
||||
// FIXME change this to return the error like other commands
|
||||
|
@ -10,8 +10,6 @@ import (
|
||||
"github.com/containers/podman/v4/cmd/podman/registry"
|
||||
"github.com/containers/podman/v4/cmd/podman/validate"
|
||||
"github.com/containers/podman/v4/libpod/define"
|
||||
"github.com/containers/podman/v4/pkg/domain/entities"
|
||||
"github.com/containers/podman/v4/pkg/domain/infra"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -41,19 +39,7 @@ func init() {
|
||||
})
|
||||
}
|
||||
func renumber(cmd *cobra.Command, args []string) {
|
||||
// Shutdown all running engines, `renumber` will hijack all methods
|
||||
registry.ContainerEngine().Shutdown(registry.Context())
|
||||
registry.ImageEngine().Shutdown(registry.Context())
|
||||
|
||||
engine, err := infra.NewSystemEngine(entities.RenumberMode, registry.PodmanConfig())
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(define.ExecErrorCodeGeneric)
|
||||
}
|
||||
defer engine.Shutdown(registry.Context())
|
||||
|
||||
err = engine.Renumber(registry.Context(), cmd.Flags(), registry.PodmanConfig())
|
||||
if err != nil {
|
||||
if err := registry.ContainerEngine().Renumber(registry.Context()); err != nil {
|
||||
fmt.Println(err)
|
||||
// FIXME change this to return the error like other commands
|
||||
// defer will never run on os.Exit()
|
||||
|
@ -12,9 +12,6 @@ import (
|
||||
"github.com/containers/common/pkg/completion"
|
||||
"github.com/containers/podman/v4/cmd/podman/registry"
|
||||
"github.com/containers/podman/v4/cmd/podman/validate"
|
||||
"github.com/containers/podman/v4/libpod/define"
|
||||
"github.com/containers/podman/v4/pkg/domain/entities"
|
||||
"github.com/containers/podman/v4/pkg/domain/infra"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@ -48,10 +45,9 @@ func init() {
|
||||
|
||||
func reset(cmd *cobra.Command, args []string) {
|
||||
// Get all the external containers in use
|
||||
listCtn, _ := registry.ContainerEngine().ContainerListExternal(registry.Context())
|
||||
listCtnIds := make([]string, 0, len(listCtn))
|
||||
for _, externalCtn := range listCtn {
|
||||
listCtnIds = append(listCtnIds, externalCtn.ID)
|
||||
listCtn, err := registry.ContainerEngine().ContainerListExternal(registry.Context())
|
||||
if err != nil {
|
||||
logrus.Error(err)
|
||||
}
|
||||
// Prompt for confirmation if --force is not set
|
||||
if !forceFlag {
|
||||
@ -90,25 +86,14 @@ func reset(cmd *cobra.Command, args []string) {
|
||||
}
|
||||
}
|
||||
|
||||
// Purge all the external containers with storage
|
||||
_, err := registry.ContainerEngine().ContainerRm(registry.Context(), listCtnIds, entities.RmOptions{Force: true, All: true, Ignore: true, Volumes: true})
|
||||
if err != nil {
|
||||
logrus.Error(err)
|
||||
}
|
||||
// Clean build cache if any
|
||||
err = volumes.CleanCacheMount()
|
||||
if err != nil {
|
||||
if err := volumes.CleanCacheMount(); err != nil {
|
||||
logrus.Error(err)
|
||||
}
|
||||
// Shutdown all running engines, `reset` will hijack repository
|
||||
registry.ContainerEngine().Shutdown(registry.Context())
|
||||
registry.ImageEngine().Shutdown(registry.Context())
|
||||
|
||||
// Do not try to shut the engine down, as a Reset engine is not valid
|
||||
// after its creation.
|
||||
if _, err := infra.NewSystemEngine(entities.ResetMode, registry.PodmanConfig()); err != nil {
|
||||
// ContainerEngine() is unusable and shut down after this.
|
||||
if err := registry.ContainerEngine().Reset(registry.Context()); err != nil {
|
||||
logrus.Error(err)
|
||||
os.Exit(define.ExecErrorCodeGeneric)
|
||||
}
|
||||
|
||||
// Shutdown podman-machine and delete all machine files
|
||||
|
@ -454,9 +454,10 @@ func WithDefaultInfraCommand(cmd string) RuntimeOption {
|
||||
}
|
||||
}
|
||||
|
||||
// WithReset instructs libpod to reset all storage to factory defaults.
|
||||
// All containers, pods, volumes, images, and networks will be removed.
|
||||
// All directories created by Libpod will be removed.
|
||||
// WithReset tells Libpod that the runtime will be used to perform a system
|
||||
// reset. A number of checks at initialization are relaxed as the runtime is
|
||||
// going to be used to remove all containers, pods, volumes, images, and
|
||||
// networks.
|
||||
func WithReset() RuntimeOption {
|
||||
return func(rt *Runtime) error {
|
||||
if rt.valid {
|
||||
@ -469,10 +470,8 @@ func WithReset() RuntimeOption {
|
||||
}
|
||||
}
|
||||
|
||||
// WithRenumber instructs libpod to perform a lock renumbering while
|
||||
// initializing. This will handle migrations from early versions of libpod with
|
||||
// file locks to newer versions with SHM locking, as well as changes in the
|
||||
// number of configured locks.
|
||||
// WithRenumber tells Libpod that the runtime will be used to perform a system
|
||||
// renumber. A number of checks on initialization related to locks are relaxed.
|
||||
func WithRenumber() RuntimeOption {
|
||||
return func(rt *Runtime) error {
|
||||
if rt.valid {
|
||||
@ -485,43 +484,6 @@ func WithRenumber() RuntimeOption {
|
||||
}
|
||||
}
|
||||
|
||||
// WithMigrate instructs libpod to migrate container configurations to account
|
||||
// for changes between Engine versions. All running containers will be stopped
|
||||
// during a migration, then restarted after the migration is complete.
|
||||
func WithMigrate() RuntimeOption {
|
||||
return func(rt *Runtime) error {
|
||||
if rt.valid {
|
||||
return define.ErrRuntimeFinalized
|
||||
}
|
||||
|
||||
rt.doMigrate = true
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithMigrateRuntime instructs Engine to change the default OCI runtime on all
|
||||
// containers during a migration. This is not used if `MigrateRuntime()` is not
|
||||
// also passed.
|
||||
// Engine makes no promises that your containers continue to work with the new
|
||||
// runtime - migrations between dissimilar runtimes may well break things.
|
||||
// Use with caution.
|
||||
func WithMigrateRuntime(requestedRuntime string) RuntimeOption {
|
||||
return func(rt *Runtime) error {
|
||||
if rt.valid {
|
||||
return define.ErrRuntimeFinalized
|
||||
}
|
||||
|
||||
if requestedRuntime == "" {
|
||||
return fmt.Errorf("must provide a non-empty name for new runtime: %w", define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
rt.migrateRuntime = requestedRuntime
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithEventsLogger sets the events backend to use.
|
||||
// Currently supported values are "file" for file backend and "journald" for
|
||||
// journald backend.
|
||||
|
@ -90,8 +90,24 @@ func (r *Runtime) removeAllDirs() error {
|
||||
return lastErr
|
||||
}
|
||||
|
||||
// Reset removes all storage
|
||||
func (r *Runtime) reset(ctx context.Context) error {
|
||||
// Reset removes all Libpod files.
|
||||
// All containers, images, volumes, pods, and networks will be removed.
|
||||
// Calls Shutdown(), rendering the runtime unusable after this is run.
|
||||
func (r *Runtime) Reset(ctx context.Context) error {
|
||||
// Acquire the alive lock and hold it.
|
||||
// Ensures that we don't let other Podman commands run while we are
|
||||
// removing everything.
|
||||
aliveLock, err := r.getRuntimeAliveLock()
|
||||
if err != nil {
|
||||
return fmt.Errorf("retrieving alive lock: %w", err)
|
||||
}
|
||||
aliveLock.Lock()
|
||||
defer aliveLock.Unlock()
|
||||
|
||||
if !r.valid {
|
||||
return define.ErrRuntimeStopped
|
||||
}
|
||||
|
||||
var timeout *uint
|
||||
pods, err := r.GetAllPods()
|
||||
if err != nil {
|
||||
@ -117,13 +133,23 @@ func (r *Runtime) reset(ctx context.Context) error {
|
||||
}
|
||||
|
||||
for _, c := range ctrs {
|
||||
if err := r.RemoveContainer(ctx, c, true, true, timeout); err != nil {
|
||||
if err := r.RemoveStorageContainer(c.ID(), true); err != nil {
|
||||
if errors.Is(err, define.ErrNoSuchCtr) {
|
||||
continue
|
||||
}
|
||||
logrus.Errorf("Removing container %s: %v", c.ID(), err)
|
||||
if ctrs, _, err := r.RemoveContainerAndDependencies(ctx, c, true, true, timeout); err != nil {
|
||||
for ctr, err := range ctrs {
|
||||
logrus.Errorf("Error removing container %s: %v", ctr, err)
|
||||
}
|
||||
|
||||
if errors.Is(err, define.ErrNoSuchCtr) || errors.Is(err, define.ErrCtrRemoved) {
|
||||
continue
|
||||
}
|
||||
|
||||
logrus.Errorf("Removing container %s: %v", c.ID(), err)
|
||||
|
||||
// There's no point trying a storage container removal
|
||||
// here. High likelihood it doesn't work
|
||||
// (RemoveStorageContainer will refuse to remove if a
|
||||
// container exists in the Libpod database) and, even if
|
||||
// it would, we'll get the container later on during
|
||||
// image removal.
|
||||
}
|
||||
}
|
||||
|
||||
@ -131,11 +157,7 @@ func (r *Runtime) reset(ctx context.Context) error {
|
||||
logrus.Errorf("Stopping pause process: %v", err)
|
||||
}
|
||||
|
||||
rmiOptions := &libimage.RemoveImagesOptions{Filters: []string{"readonly=false"}}
|
||||
if _, rmiErrors := r.LibimageRuntime().RemoveImages(ctx, nil, rmiOptions); rmiErrors != nil {
|
||||
return errorhandling.JoinErrors(rmiErrors)
|
||||
}
|
||||
|
||||
// Volumes before images, as volumes can mount images.
|
||||
volumes, err := r.state.AllVolumes()
|
||||
if err != nil {
|
||||
return err
|
||||
@ -149,6 +171,19 @@ func (r *Runtime) reset(ctx context.Context) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Set force and ignore.
|
||||
// Ignore shouldn't be necessary, but it seems safer. We want everything
|
||||
// gone anyways...
|
||||
rmiOptions := &libimage.RemoveImagesOptions{
|
||||
Force: true,
|
||||
Ignore: true,
|
||||
RemoveContainerFunc: r.RemoveContainersForImageCallback(ctx),
|
||||
Filters: []string{"readonly=false"},
|
||||
}
|
||||
if _, rmiErrors := r.LibimageRuntime().RemoveImages(ctx, nil, rmiOptions); rmiErrors != nil {
|
||||
return errorhandling.JoinErrors(rmiErrors)
|
||||
}
|
||||
|
||||
// remove all networks
|
||||
nets, err := r.network.NetworkList()
|
||||
if err != nil {
|
||||
@ -239,5 +274,13 @@ func (r *Runtime) reset(ctx context.Context) error {
|
||||
prevError = err
|
||||
}
|
||||
|
||||
// Shut down the runtime, it's no longer usable after mass-deletion.
|
||||
if err := r.Shutdown(false); err != nil {
|
||||
if prevError != nil {
|
||||
logrus.Error(prevError)
|
||||
}
|
||||
prevError = err
|
||||
}
|
||||
|
||||
return prevError
|
||||
}
|
||||
|
@ -89,22 +89,21 @@ type Runtime struct {
|
||||
// This bool is just needed so that we can set it for netavark interface.
|
||||
syslog bool
|
||||
|
||||
// doReset indicates that the runtime should perform a system reset.
|
||||
// All Podman files will be removed.
|
||||
// doReset indicates that the runtime will perform a system reset.
|
||||
// A reset will remove all containers, pods, volumes, networks, etc.
|
||||
// A number of validation checks are relaxed, or replaced with logic to
|
||||
// remove as much of the runtime as possible if they fail. This ensures
|
||||
// that even a broken Libpod can still be removed via `system reset`.
|
||||
// This does not actually perform a `system reset`. That is done by
|
||||
// calling "Reset()" on the returned runtime.
|
||||
doReset bool
|
||||
|
||||
// doRenumber indicates that the runtime should perform a lock renumber
|
||||
// during initialization.
|
||||
// Once the runtime has been initialized and returned, this variable is
|
||||
// unused.
|
||||
// doRenumber indicates that the runtime will perform a system renumber.
|
||||
// A renumber will reassign lock numbers for all containers, pods, etc.
|
||||
// This will not perform the renumber itself, but will ignore some
|
||||
// errors related to lock initialization so a renumber can be performed
|
||||
// if something has gone wrong.
|
||||
doRenumber bool
|
||||
|
||||
doMigrate bool
|
||||
// System migrate can move containers to a new runtime.
|
||||
// We make no promises that these migrated containers work on the new
|
||||
// runtime, though.
|
||||
migrateRuntime string
|
||||
|
||||
// valid indicates whether the runtime is ready to use.
|
||||
// valid is set to true when a runtime is returned from GetRuntime(),
|
||||
// and remains true until the runtime is shut down (rendering its
|
||||
@ -230,11 +229,6 @@ func newRuntimeFromConfig(conf *config.Config, options ...RuntimeOption) (*Runti
|
||||
|
||||
runtime.config.CheckCgroupsAndAdjustConfig()
|
||||
|
||||
// If resetting storage, do *not* return a runtime.
|
||||
if runtime.doReset {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return runtime, nil
|
||||
}
|
||||
|
||||
@ -347,10 +341,6 @@ func makeRuntime(runtime *Runtime) (retErr error) {
|
||||
}
|
||||
runtime.conmonPath = cPath
|
||||
|
||||
if runtime.doReset && runtime.doRenumber {
|
||||
return fmt.Errorf("cannot perform system reset while renumbering locks: %w", define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
if runtime.config.Engine.StaticDir == "" {
|
||||
runtime.config.Engine.StaticDir = filepath.Join(runtime.storageConfig.GraphRoot, "libpod")
|
||||
runtime.storageSet.StaticDirSet = true
|
||||
@ -555,9 +545,8 @@ func makeRuntime(runtime *Runtime) (retErr error) {
|
||||
// We now need to see if the system has restarted
|
||||
// We check for the presence of a file in our tmp directory to verify this
|
||||
// This check must be locked to prevent races
|
||||
runtimeAliveLock := filepath.Join(runtime.config.Engine.TmpDir, "alive.lck")
|
||||
runtimeAliveFile := filepath.Join(runtime.config.Engine.TmpDir, "alive")
|
||||
aliveLock, err := lockfile.GetLockFile(runtimeAliveLock)
|
||||
aliveLock, err := runtime.getRuntimeAliveLock()
|
||||
if err != nil {
|
||||
return fmt.Errorf("acquiring runtime init lock: %w", err)
|
||||
}
|
||||
@ -631,27 +620,6 @@ func makeRuntime(runtime *Runtime) (retErr error) {
|
||||
return err
|
||||
}
|
||||
|
||||
// If we're resetting storage, do it now.
|
||||
// We will not return a valid runtime.
|
||||
// TODO: Plumb this context out so it can be set.
|
||||
if runtime.doReset {
|
||||
// Mark the runtime as valid, so normal functionality "mostly"
|
||||
// works and we can use regular functions to remove
|
||||
// ctrs/pods/etc
|
||||
runtime.valid = true
|
||||
|
||||
return runtime.reset(context.Background())
|
||||
}
|
||||
|
||||
// If we're renumbering locks, do it now.
|
||||
// It breaks out of normal runtime init, and will not return a valid
|
||||
// runtime.
|
||||
if runtime.doRenumber {
|
||||
if err := runtime.renumberLocks(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// If we need to refresh the state, do it now - things are guaranteed to
|
||||
// be set up by now.
|
||||
if doRefresh {
|
||||
@ -673,12 +641,6 @@ func makeRuntime(runtime *Runtime) (retErr error) {
|
||||
// further
|
||||
runtime.valid = true
|
||||
|
||||
if runtime.doMigrate {
|
||||
if err := runtime.migrate(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -1187,6 +1149,11 @@ func (r *Runtime) graphRootMountedFlag(mounts []spec.Mount) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Returns a copy of the runtime alive lock
|
||||
func (r *Runtime) getRuntimeAliveLock() (*lockfile.LockFile, error) {
|
||||
return lockfile.GetLockFile(filepath.Join(r.config.Engine.TmpDir, "alive.lck"))
|
||||
}
|
||||
|
||||
// Network returns the network interface which is used by the runtime
|
||||
func (r *Runtime) Network() nettypes.ContainerNetwork {
|
||||
return r.network
|
||||
|
@ -42,7 +42,24 @@ func (r *Runtime) stopPauseProcess() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Runtime) migrate() error {
|
||||
// Migrate stops the rootless pause process and performs any necessary database
|
||||
// migrations that are required. It can also migrate all containers to a new OCI
|
||||
// runtime, if requested.
|
||||
func (r *Runtime) Migrate(newRuntime string) error {
|
||||
// Acquire the alive lock and hold it.
|
||||
// Ensures that we don't let other Podman commands run while we are
|
||||
// rewriting things in the DB.
|
||||
aliveLock, err := r.getRuntimeAliveLock()
|
||||
if err != nil {
|
||||
return fmt.Errorf("retrieving alive lock: %w", err)
|
||||
}
|
||||
aliveLock.Lock()
|
||||
defer aliveLock.Unlock()
|
||||
|
||||
if !r.valid {
|
||||
return define.ErrRuntimeStopped
|
||||
}
|
||||
|
||||
runningContainers, err := r.GetRunningContainers()
|
||||
if err != nil {
|
||||
return err
|
||||
@ -62,10 +79,14 @@ func (r *Runtime) migrate() error {
|
||||
}
|
||||
|
||||
// Did the user request a new runtime?
|
||||
runtimeChangeRequested := r.migrateRuntime != ""
|
||||
requestedRuntime, runtimeExists := r.ociRuntimes[r.migrateRuntime]
|
||||
if !runtimeExists && runtimeChangeRequested {
|
||||
return fmt.Errorf("change to runtime %q requested but no such runtime is defined: %w", r.migrateRuntime, define.ErrInvalidArg)
|
||||
runtimeChangeRequested := newRuntime != ""
|
||||
var requestedRuntime OCIRuntime
|
||||
if runtimeChangeRequested {
|
||||
runtime, exists := r.ociRuntimes[newRuntime]
|
||||
if !exists {
|
||||
return fmt.Errorf("change to runtime %q requested but no such runtime is defined: %w", newRuntime, define.ErrInvalidArg)
|
||||
}
|
||||
requestedRuntime = runtime
|
||||
}
|
||||
|
||||
for _, ctr := range allCtrs {
|
||||
@ -80,9 +101,9 @@ func (r *Runtime) migrate() error {
|
||||
}
|
||||
|
||||
// Reset runtime
|
||||
if runtimeChangeRequested {
|
||||
logrus.Infof("Resetting container %s runtime to runtime %s", ctr.ID(), r.migrateRuntime)
|
||||
ctr.config.OCIRuntime = r.migrateRuntime
|
||||
if runtimeChangeRequested && ctr.config.OCIRuntime != newRuntime {
|
||||
logrus.Infof("Resetting container %s runtime to runtime %s", ctr.ID(), newRuntime)
|
||||
ctr.config.OCIRuntime = newRuntime
|
||||
ctr.ociRuntime = requestedRuntime
|
||||
|
||||
needsWrite = true
|
||||
|
@ -10,6 +10,6 @@ func (r *Runtime) stopPauseProcess() error {
|
||||
return errors.New("not implemented (*Runtime) stopPauseProcess")
|
||||
}
|
||||
|
||||
func (r *Runtime) migrate() error {
|
||||
func (r *Runtime) Migrate(newRuntime string) error {
|
||||
return errors.New("not implemented (*Runtime) migrate")
|
||||
}
|
||||
|
@ -5,18 +5,34 @@ package libpod
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/containers/podman/v4/libpod/define"
|
||||
"github.com/containers/podman/v4/libpod/events"
|
||||
)
|
||||
|
||||
// renumberLocks reassigns lock numbers for all containers and pods in the
|
||||
// state.
|
||||
// TODO: It would be desirable to make it impossible to call this until all
|
||||
// other libpod sessions are dead.
|
||||
// Possibly use a read-write file lock, with all non-renumber podmans owning the
|
||||
// lock as read, renumber attempting to take a write lock?
|
||||
// The alternative is some sort of session tracking, and I don't know how
|
||||
// reliable that can be.
|
||||
func (r *Runtime) renumberLocks() error {
|
||||
// RenumberLocks reassigns lock numbers for all containers and pods in the
|
||||
// state. This should NOT be run while there are other Libpod
|
||||
func (r *Runtime) RenumberLocks() error {
|
||||
// TODO: It would be desirable to make it impossible to call this until all
|
||||
// other libpod sessions are dead.
|
||||
// Possibly use a read-write file lock, with all non-renumber podmans owning the
|
||||
// lock as read, renumber attempting to take a write lock?
|
||||
// The alternative is some sort of session tracking, and I don't know how
|
||||
// reliable that can be.
|
||||
|
||||
// Acquire the alive lock and hold it.
|
||||
// Ensures that we don't let other Podman commands run while we are
|
||||
// changing around lock numbers.
|
||||
aliveLock, err := r.getRuntimeAliveLock()
|
||||
if err != nil {
|
||||
return fmt.Errorf("retrieving alive lock: %w", err)
|
||||
}
|
||||
aliveLock.Lock()
|
||||
defer aliveLock.Unlock()
|
||||
|
||||
if !r.valid {
|
||||
return define.ErrRuntimeStopped
|
||||
}
|
||||
|
||||
// Start off by deallocating all locks
|
||||
if err := r.lockManager.FreeAllLocks(); err != nil {
|
||||
return err
|
||||
@ -76,5 +92,5 @@ func (r *Runtime) renumberLocks() error {
|
||||
|
||||
r.NewSystemEvent(events.Renumber)
|
||||
|
||||
return nil
|
||||
return r.Shutdown(false)
|
||||
}
|
||||
|
@ -14,12 +14,6 @@ type EngineSetup string
|
||||
const (
|
||||
ABIMode = EngineMode("abi")
|
||||
TunnelMode = EngineMode("tunnel")
|
||||
|
||||
MigrateMode = EngineSetup("migrate")
|
||||
NoFDsMode = EngineSetup("disablefds")
|
||||
NormalMode = EngineSetup("normal")
|
||||
RenumberMode = EngineSetup("renumber")
|
||||
ResetMode = EngineSetup("reset")
|
||||
)
|
||||
|
||||
// Convert EngineMode to String
|
||||
@ -42,6 +36,8 @@ type PodmanConfig struct {
|
||||
EngineMode EngineMode // ABI or Tunneling mode
|
||||
HooksDir []string
|
||||
Identity string // ssh identity for connecting to server
|
||||
IsRenumber bool // Is this a system renumber command? If so, a number of checks will be relaxed
|
||||
IsReset bool // Is this a system reset command? If so, a number of checks will be skipped/omitted
|
||||
MaxWorks int // maximum number of parallel threads
|
||||
MemoryProfile string // Hidden: Should memory profile be taken
|
||||
RegistriesConf string // allows for specifying a custom registries.conf
|
||||
|
@ -63,6 +63,7 @@ type ContainerEngine interface { //nolint:interfacebloat
|
||||
Info(ctx context.Context) (*define.Info, error)
|
||||
KubeApply(ctx context.Context, body io.Reader, opts ApplyOptions) error
|
||||
Locks(ctx context.Context) (*LocksReport, error)
|
||||
Migrate(ctx context.Context, options SystemMigrateOptions) error
|
||||
NetworkConnect(ctx context.Context, networkname string, options NetworkConnectOptions) error
|
||||
NetworkCreate(ctx context.Context, network types.Network, createOptions *types.NetworkCreateOptions) (*types.Network, error)
|
||||
NetworkUpdate(ctx context.Context, networkname string, options NetworkUpdateOptions) error
|
||||
@ -91,6 +92,8 @@ type ContainerEngine interface { //nolint:interfacebloat
|
||||
PodStop(ctx context.Context, namesOrIds []string, options PodStopOptions) ([]*PodStopReport, error)
|
||||
PodTop(ctx context.Context, options PodTopOptions) (*StringSliceReport, error)
|
||||
PodUnpause(ctx context.Context, namesOrIds []string, options PodunpauseOptions) ([]*PodUnpauseReport, error)
|
||||
Renumber(ctx context.Context) error
|
||||
Reset(ctx context.Context) error
|
||||
SetupRootless(ctx context.Context, noMoveProcess bool) error
|
||||
SecretCreate(ctx context.Context, name string, reader io.Reader, options SecretCreateOptions) (*SecretCreateReport, error)
|
||||
SecretInspect(ctx context.Context, nameOrIDs []string, options SecretInspectOptions) ([]*SecretInfoReport, []error, error)
|
||||
|
@ -1,14 +0,0 @@
|
||||
package entities
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
type SystemEngine interface {
|
||||
Renumber(ctx context.Context, flags *pflag.FlagSet, config *PodmanConfig) error
|
||||
Migrate(ctx context.Context, flags *pflag.FlagSet, config *PodmanConfig, options SystemMigrateOptions) error
|
||||
Reset(ctx context.Context) error
|
||||
Shutdown(ctx context.Context)
|
||||
}
|
@ -16,7 +16,6 @@ import (
|
||||
"github.com/containers/storage"
|
||||
"github.com/containers/storage/pkg/directory"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
func (ic *ContainerEngine) Info(ctx context.Context) (*define.Info, error) {
|
||||
@ -277,16 +276,16 @@ func (ic *ContainerEngine) SystemDf(ctx context.Context, options entities.System
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (se *SystemEngine) Reset(ctx context.Context) error {
|
||||
return nil
|
||||
func (ic *ContainerEngine) Reset(ctx context.Context) error {
|
||||
return ic.Libpod.Reset(ctx)
|
||||
}
|
||||
|
||||
func (se *SystemEngine) Renumber(ctx context.Context, flags *pflag.FlagSet, config *entities.PodmanConfig) error {
|
||||
return nil
|
||||
func (ic *ContainerEngine) Renumber(ctx context.Context) error {
|
||||
return ic.Libpod.RenumberLocks()
|
||||
}
|
||||
|
||||
func (se SystemEngine) Migrate(ctx context.Context, flags *pflag.FlagSet, config *entities.PodmanConfig, options entities.SystemMigrateOptions) error {
|
||||
return nil
|
||||
func (ic *ContainerEngine) Migrate(ctx context.Context, options entities.SystemMigrateOptions) error {
|
||||
return ic.Libpod.Migrate(options.NewRuntime)
|
||||
}
|
||||
|
||||
func (se SystemEngine) Shutdown(ctx context.Context) {
|
||||
|
@ -6,10 +6,8 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/containers/podman/v4/libpod"
|
||||
"github.com/containers/podman/v4/pkg/bindings"
|
||||
"github.com/containers/podman/v4/pkg/domain/entities"
|
||||
"github.com/containers/podman/v4/pkg/domain/infra/abi"
|
||||
"github.com/containers/podman/v4/pkg/domain/infra/tunnel"
|
||||
)
|
||||
|
||||
@ -42,32 +40,3 @@ func NewImageEngine(facts *entities.PodmanConfig) (entities.ImageEngine, error)
|
||||
}
|
||||
return nil, fmt.Errorf("runtime mode '%v' is not supported", facts.EngineMode)
|
||||
}
|
||||
|
||||
// NewSystemEngine factory provides a libpod runtime for specialized system operations
|
||||
func NewSystemEngine(setup entities.EngineSetup, facts *entities.PodmanConfig) (entities.SystemEngine, error) {
|
||||
switch facts.EngineMode {
|
||||
case entities.ABIMode:
|
||||
var r *libpod.Runtime
|
||||
var err error
|
||||
switch setup {
|
||||
case entities.NormalMode:
|
||||
r, err = GetRuntime(context.Background(), facts.FlagSet, facts)
|
||||
case entities.RenumberMode:
|
||||
r, err = GetRuntimeRenumber(context.Background(), facts.FlagSet, facts)
|
||||
case entities.ResetMode:
|
||||
r, err = GetRuntimeReset(context.Background(), facts.FlagSet, facts)
|
||||
case entities.MigrateMode:
|
||||
name, flagErr := facts.FlagSet.GetString("new-runtime")
|
||||
if flagErr != nil {
|
||||
return nil, flagErr
|
||||
}
|
||||
r, err = GetRuntimeMigrate(context.Background(), facts.FlagSet, facts, name)
|
||||
case entities.NoFDsMode:
|
||||
r, err = GetRuntimeDisableFDs(context.Background(), facts.FlagSet, facts)
|
||||
}
|
||||
return &abi.SystemEngine{Libpod: r}, err
|
||||
case entities.TunnelMode:
|
||||
return nil, fmt.Errorf("tunnel system runtime not supported")
|
||||
}
|
||||
return nil, fmt.Errorf("runtime mode '%v' is not supported", facts.EngineMode)
|
||||
}
|
||||
|
@ -1,14 +0,0 @@
|
||||
//go:build remote
|
||||
|
||||
package infra
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/containers/podman/v4/pkg/domain/entities"
|
||||
)
|
||||
|
||||
// NewSystemEngine factory provides a libpod runtime for specialized system operations
|
||||
func NewSystemEngine(setup entities.EngineSetup, facts *entities.PodmanConfig) (entities.SystemEngine, error) {
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
@ -33,72 +33,25 @@ var (
|
||||
)
|
||||
|
||||
type engineOpts struct {
|
||||
name string
|
||||
renumber bool
|
||||
migrate bool
|
||||
withFDS bool
|
||||
reset bool
|
||||
renumber bool
|
||||
config *entities.PodmanConfig
|
||||
}
|
||||
|
||||
// GetRuntimeMigrate gets a libpod runtime that will perform a migration of existing containers
|
||||
func GetRuntimeMigrate(ctx context.Context, fs *flag.FlagSet, cfg *entities.PodmanConfig, newRuntime string) (*libpod.Runtime, error) {
|
||||
return getRuntime(ctx, fs, &engineOpts{
|
||||
name: newRuntime,
|
||||
renumber: false,
|
||||
migrate: true,
|
||||
withFDS: true,
|
||||
reset: false,
|
||||
config: cfg,
|
||||
})
|
||||
}
|
||||
|
||||
// GetRuntimeDisableFDs gets a libpod runtime that will disable sd notify
|
||||
func GetRuntimeDisableFDs(ctx context.Context, fs *flag.FlagSet, cfg *entities.PodmanConfig) (*libpod.Runtime, error) {
|
||||
return getRuntime(ctx, fs, &engineOpts{
|
||||
renumber: false,
|
||||
migrate: false,
|
||||
withFDS: false,
|
||||
reset: false,
|
||||
config: cfg,
|
||||
})
|
||||
}
|
||||
|
||||
// GetRuntimeRenumber gets a libpod runtime that will perform a lock renumber
|
||||
func GetRuntimeRenumber(ctx context.Context, fs *flag.FlagSet, cfg *entities.PodmanConfig) (*libpod.Runtime, error) {
|
||||
return getRuntime(ctx, fs, &engineOpts{
|
||||
renumber: true,
|
||||
migrate: false,
|
||||
withFDS: true,
|
||||
reset: false,
|
||||
config: cfg,
|
||||
})
|
||||
}
|
||||
|
||||
// GetRuntime generates a new libpod runtime configured by command line options
|
||||
func GetRuntime(ctx context.Context, flags *flag.FlagSet, cfg *entities.PodmanConfig) (*libpod.Runtime, error) {
|
||||
runtimeSync.Do(func() {
|
||||
runtimeLib, runtimeErr = getRuntime(ctx, flags, &engineOpts{
|
||||
renumber: false,
|
||||
migrate: false,
|
||||
withFDS: true,
|
||||
reset: false,
|
||||
reset: cfg.IsReset,
|
||||
renumber: cfg.IsRenumber,
|
||||
config: cfg,
|
||||
})
|
||||
})
|
||||
return runtimeLib, runtimeErr
|
||||
}
|
||||
|
||||
func GetRuntimeReset(ctx context.Context, fs *flag.FlagSet, cfg *entities.PodmanConfig) (*libpod.Runtime, error) {
|
||||
return getRuntime(ctx, fs, &engineOpts{
|
||||
renumber: false,
|
||||
migrate: false,
|
||||
withFDS: true,
|
||||
reset: true,
|
||||
config: cfg,
|
||||
})
|
||||
}
|
||||
|
||||
func getRuntime(ctx context.Context, fs *flag.FlagSet, opts *engineOpts) (*libpod.Runtime, error) {
|
||||
options := []libpod.RuntimeOption{}
|
||||
storageOpts := types.StoreOptions{}
|
||||
@ -161,17 +114,10 @@ func getRuntime(ctx context.Context, fs *flag.FlagSet, opts *engineOpts) (*libpo
|
||||
if fs.Changed("transient-store") {
|
||||
options = append(options, libpod.WithTransientStore(cfg.TransientStore))
|
||||
}
|
||||
if opts.migrate {
|
||||
options = append(options, libpod.WithMigrate())
|
||||
if opts.name != "" {
|
||||
options = append(options, libpod.WithMigrateRuntime(opts.name))
|
||||
}
|
||||
}
|
||||
|
||||
if opts.reset {
|
||||
options = append(options, libpod.WithReset())
|
||||
}
|
||||
|
||||
if opts.renumber {
|
||||
options = append(options, libpod.WithRenumber())
|
||||
}
|
||||
|
@ -23,6 +23,18 @@ func (ic *ContainerEngine) SystemPrune(ctx context.Context, opts entities.System
|
||||
return system.Prune(ic.ClientCtx, options)
|
||||
}
|
||||
|
||||
func (ic *ContainerEngine) Migrate(ctx context.Context, options entities.SystemMigrateOptions) error {
|
||||
return errors.New("runtime migration is not supported on remote clients")
|
||||
}
|
||||
|
||||
func (ic *ContainerEngine) Renumber(ctx context.Context) error {
|
||||
return errors.New("lock renumbering is not supported on remote clients")
|
||||
}
|
||||
|
||||
func (ic *ContainerEngine) Reset(ctx context.Context) error {
|
||||
return errors.New("system reset is not supported on remote clients")
|
||||
}
|
||||
|
||||
func (ic *ContainerEngine) SystemDf(ctx context.Context, options entities.SystemDfOptions) (*entities.SystemDfReport, error) {
|
||||
return system.DiskUsage(ic.ClientCtx, nil)
|
||||
}
|
||||
|
@ -1,6 +1,8 @@
|
||||
package integration
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
. "github.com/containers/podman/v4/test/utils"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
@ -74,4 +76,32 @@ var _ = Describe("podman system reset", Serial, func() {
|
||||
Expect(session.OutputToStringArray()).To(BeEmpty())
|
||||
}
|
||||
})
|
||||
|
||||
It("system reset completely removes container", func() {
|
||||
SkipIfRemote("system reset not supported on podman --remote")
|
||||
useCustomNetworkDir(podmanTest, tempdir)
|
||||
|
||||
rmi := podmanTest.Podman([]string{"rmi", "--force", "--all"})
|
||||
rmi.WaitWithDefaultTimeout()
|
||||
Expect(rmi).Should(ExitCleanly())
|
||||
podmanTest.AddImageToRWStore(ALPINE)
|
||||
|
||||
// The name ensures that we have a Libpod resource that we'll
|
||||
// hit if we recreate the container after a reset and it still
|
||||
// exists. The port does the same for a system-level resource.
|
||||
ctrName := "testctr"
|
||||
port1 := GetPort()
|
||||
port2 := GetPort()
|
||||
session := podmanTest.Podman([]string{"run", "--name", ctrName, "-p", fmt.Sprintf("%d:%d", port1, port2), "-d", ALPINE, "top"})
|
||||
session.WaitWithDefaultTimeout()
|
||||
Expect(session).Should(ExitCleanly())
|
||||
|
||||
reset := podmanTest.Podman([]string{"system", "reset", "--force"})
|
||||
reset.WaitWithDefaultTimeout()
|
||||
Expect(reset).Should(ExitCleanly())
|
||||
|
||||
session2 := podmanTest.Podman([]string{"run", "--name", ctrName, "-p", fmt.Sprintf("%d:%d", port1, port2), "-d", ALPINE, "top"})
|
||||
session2.WaitWithDefaultTimeout()
|
||||
Expect(session2).Should(ExitCleanly())
|
||||
})
|
||||
})
|
||||
|
Reference in New Issue
Block a user