mirror of
https://github.com/containers/podman.git
synced 2025-05-17 23:26:08 +08:00
Remove Libpod special-init conditions
Before this, for some special Podman commands (system reset, system migrate, system renumber), Podman would create a first Libpod runtime to do initialization and flag parsing, then stop that runtime and create an entirely new runtime to perform the actual task. This is an artifact of the pre-Podman 2.0 days, when there was almost no indirection between Libpod and the CLI, and we only used one runtime because we didn't need a second runtime for flag parsing and basic init. This system was clunky, and apparently, very buggy. When we migrated to SQLite, some logic was introduced where we'd select a different database location based on whether or not Libpod's StaticDir was manually set - which differed between the first invocation of Libpod and the second. So we'd get a different database for some commands (like `system reset`) and they would not be able to see existing containers, meaning they would not function properly. The immediate cause is obviously the SQLite behavior, but I'm certain there's a lot more baggage hiding behind this multiple Libpod runtime logic, so let's just refactor it out. It doesn't make sense, and complicates the code. Instead, make Reset, Renumber, and Migrate methods of the libpod Runtime. For Reset and Renumber, we can shut the runtime down afterwards to achieve the desired effect (no valid runtime after). Then pipe all of them through the ContainerEngine so cmd/podman can access them. As part of this, remove the SystemEngine part of pkg/domain. This was supposed to encompass these "special" commands, but every command in SystemEngine is actually a ContainerEngine command. Reset, Renumber, Migrate - they all need a full Libpod and access to all containers. There's no point to a separate engine if it just wraps Libpod in the exact same way as ContainerEngine. This consolidation saves us a bit more code and complexity. Signed-off-by: Matt Heon <mheon@redhat.com> <MH: Fixed cherry-pick conflicts for v4.8 branch> Signed-off-by: Matt Heon <mheon@redhat.com>
This commit is contained in:
@ -64,6 +64,14 @@ func ContainerEngine() entities.ContainerEngine {
|
||||
func NewContainerEngine(cmd *cobra.Command, args []string) (entities.ContainerEngine, error) {
|
||||
if containerEngine == nil {
|
||||
podmanOptions.FlagSet = cmd.Flags()
|
||||
if cmd.Name() == "reset" && cmd.Parent().Name() == "system" {
|
||||
logrus.Debugf("Performing system reset, runtime validation checks will be relaxed")
|
||||
podmanOptions.IsReset = true
|
||||
}
|
||||
if cmd.Name() == "renumber" && cmd.Parent().Name() == "system" {
|
||||
logrus.Debugf("Performing system renumber, runtime validation checks will be relaxed")
|
||||
podmanOptions.IsRenumber = true
|
||||
}
|
||||
engine, err := infra.NewContainerEngine(&podmanOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -12,7 +12,6 @@ import (
|
||||
"github.com/containers/podman/v4/cmd/podman/validate"
|
||||
"github.com/containers/podman/v4/libpod/define"
|
||||
"github.com/containers/podman/v4/pkg/domain/entities"
|
||||
"github.com/containers/podman/v4/pkg/domain/infra"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -55,19 +54,7 @@ func init() {
|
||||
}
|
||||
|
||||
func migrate(cmd *cobra.Command, args []string) {
|
||||
// Shutdown all running engines, `renumber` will hijack repository
|
||||
registry.ContainerEngine().Shutdown(registry.Context())
|
||||
registry.ImageEngine().Shutdown(registry.Context())
|
||||
|
||||
engine, err := infra.NewSystemEngine(entities.MigrateMode, registry.PodmanConfig())
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(define.ExecErrorCodeGeneric)
|
||||
}
|
||||
defer engine.Shutdown(registry.Context())
|
||||
|
||||
err = engine.Migrate(registry.Context(), cmd.Flags(), registry.PodmanConfig(), migrateOptions)
|
||||
if err != nil {
|
||||
if err := registry.ContainerEngine().Migrate(registry.Context(), migrateOptions); err != nil {
|
||||
fmt.Println(err)
|
||||
|
||||
// FIXME change this to return the error like other commands
|
||||
|
@ -11,8 +11,6 @@ import (
|
||||
"github.com/containers/podman/v4/cmd/podman/registry"
|
||||
"github.com/containers/podman/v4/cmd/podman/validate"
|
||||
"github.com/containers/podman/v4/libpod/define"
|
||||
"github.com/containers/podman/v4/pkg/domain/entities"
|
||||
"github.com/containers/podman/v4/pkg/domain/infra"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -42,19 +40,7 @@ func init() {
|
||||
})
|
||||
}
|
||||
func renumber(cmd *cobra.Command, args []string) {
|
||||
// Shutdown all running engines, `renumber` will hijack all methods
|
||||
registry.ContainerEngine().Shutdown(registry.Context())
|
||||
registry.ImageEngine().Shutdown(registry.Context())
|
||||
|
||||
engine, err := infra.NewSystemEngine(entities.RenumberMode, registry.PodmanConfig())
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(define.ExecErrorCodeGeneric)
|
||||
}
|
||||
defer engine.Shutdown(registry.Context())
|
||||
|
||||
err = engine.Renumber(registry.Context(), cmd.Flags(), registry.PodmanConfig())
|
||||
if err != nil {
|
||||
if err := registry.ContainerEngine().Renumber(registry.Context()); err != nil {
|
||||
fmt.Println(err)
|
||||
// FIXME change this to return the error like other commands
|
||||
// defer will never run on os.Exit()
|
||||
|
@ -13,9 +13,6 @@ import (
|
||||
"github.com/containers/common/pkg/completion"
|
||||
"github.com/containers/podman/v4/cmd/podman/registry"
|
||||
"github.com/containers/podman/v4/cmd/podman/validate"
|
||||
"github.com/containers/podman/v4/libpod/define"
|
||||
"github.com/containers/podman/v4/pkg/domain/entities"
|
||||
"github.com/containers/podman/v4/pkg/domain/infra"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@ -94,15 +91,10 @@ func reset(cmd *cobra.Command, args []string) {
|
||||
if err := volumes.CleanCacheMount(); err != nil {
|
||||
logrus.Error(err)
|
||||
}
|
||||
// Shutdown all running engines, `reset` will hijack repository
|
||||
registry.ContainerEngine().Shutdown(registry.Context())
|
||||
registry.ImageEngine().Shutdown(registry.Context())
|
||||
|
||||
// Do not try to shut the engine down, as a Reset engine is not valid
|
||||
// after its creation.
|
||||
if _, err := infra.NewSystemEngine(entities.ResetMode, registry.PodmanConfig()); err != nil {
|
||||
// ContainerEngine() is unusable and shut down after this.
|
||||
if err := registry.ContainerEngine().Reset(registry.Context()); err != nil {
|
||||
logrus.Error(err)
|
||||
os.Exit(define.ExecErrorCodeGeneric)
|
||||
}
|
||||
|
||||
// Shutdown podman-machine and delete all machine files
|
||||
|
@ -465,9 +465,10 @@ func WithDefaultInfraCommand(cmd string) RuntimeOption {
|
||||
}
|
||||
}
|
||||
|
||||
// WithReset instructs libpod to reset all storage to factory defaults.
|
||||
// All containers, pods, volumes, images, and networks will be removed.
|
||||
// All directories created by Libpod will be removed.
|
||||
// WithReset tells Libpod that the runtime will be used to perform a system
|
||||
// reset. A number of checks at initialization are relaxed as the runtime is
|
||||
// going to be used to remove all containers, pods, volumes, images, and
|
||||
// networks.
|
||||
func WithReset() RuntimeOption {
|
||||
return func(rt *Runtime) error {
|
||||
if rt.valid {
|
||||
@ -480,10 +481,8 @@ func WithReset() RuntimeOption {
|
||||
}
|
||||
}
|
||||
|
||||
// WithRenumber instructs libpod to perform a lock renumbering while
|
||||
// initializing. This will handle migrations from early versions of libpod with
|
||||
// file locks to newer versions with SHM locking, as well as changes in the
|
||||
// number of configured locks.
|
||||
// WithRenumber tells Libpod that the runtime will be used to perform a system
|
||||
// renumber. A number of checks on initialization related to locks are relaxed.
|
||||
func WithRenumber() RuntimeOption {
|
||||
return func(rt *Runtime) error {
|
||||
if rt.valid {
|
||||
@ -496,43 +495,6 @@ func WithRenumber() RuntimeOption {
|
||||
}
|
||||
}
|
||||
|
||||
// WithMigrate instructs libpod to migrate container configurations to account
|
||||
// for changes between Engine versions. All running containers will be stopped
|
||||
// during a migration, then restarted after the migration is complete.
|
||||
func WithMigrate() RuntimeOption {
|
||||
return func(rt *Runtime) error {
|
||||
if rt.valid {
|
||||
return define.ErrRuntimeFinalized
|
||||
}
|
||||
|
||||
rt.doMigrate = true
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithMigrateRuntime instructs Engine to change the default OCI runtime on all
|
||||
// containers during a migration. This is not used if `MigrateRuntime()` is not
|
||||
// also passed.
|
||||
// Engine makes no promises that your containers continue to work with the new
|
||||
// runtime - migrations between dissimilar runtimes may well break things.
|
||||
// Use with caution.
|
||||
func WithMigrateRuntime(requestedRuntime string) RuntimeOption {
|
||||
return func(rt *Runtime) error {
|
||||
if rt.valid {
|
||||
return define.ErrRuntimeFinalized
|
||||
}
|
||||
|
||||
if requestedRuntime == "" {
|
||||
return fmt.Errorf("must provide a non-empty name for new runtime: %w", define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
rt.migrateRuntime = requestedRuntime
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithEventsLogger sets the events backend to use.
|
||||
// Currently supported values are "file" for file backend and "journald" for
|
||||
// journald backend.
|
||||
|
@ -92,8 +92,24 @@ func (r *Runtime) removeAllDirs() error {
|
||||
return lastErr
|
||||
}
|
||||
|
||||
// Reset removes all storage
|
||||
func (r *Runtime) reset(ctx context.Context) error {
|
||||
// Reset removes all Libpod files.
|
||||
// All containers, images, volumes, pods, and networks will be removed.
|
||||
// Calls Shutdown(), rendering the runtime unusable after this is run.
|
||||
func (r *Runtime) Reset(ctx context.Context) error {
|
||||
// Acquire the alive lock and hold it.
|
||||
// Ensures that we don't let other Podman commands run while we are
|
||||
// removing everything.
|
||||
aliveLock, err := r.getRuntimeAliveLock()
|
||||
if err != nil {
|
||||
return fmt.Errorf("retrieving alive lock: %w", err)
|
||||
}
|
||||
aliveLock.Lock()
|
||||
defer aliveLock.Unlock()
|
||||
|
||||
if !r.valid {
|
||||
return define.ErrRuntimeStopped
|
||||
}
|
||||
|
||||
var timeout *uint
|
||||
pods, err := r.GetAllPods()
|
||||
if err != nil {
|
||||
@ -260,5 +276,13 @@ func (r *Runtime) reset(ctx context.Context) error {
|
||||
prevError = err
|
||||
}
|
||||
|
||||
// Shut down the runtime, it's no longer usable after mass-deletion.
|
||||
if err := r.Shutdown(false); err != nil {
|
||||
if prevError != nil {
|
||||
logrus.Error(prevError)
|
||||
}
|
||||
prevError = err
|
||||
}
|
||||
|
||||
return prevError
|
||||
}
|
||||
|
@ -90,22 +90,21 @@ type Runtime struct {
|
||||
// This bool is just needed so that we can set it for netavark interface.
|
||||
syslog bool
|
||||
|
||||
// doReset indicates that the runtime should perform a system reset.
|
||||
// All Podman files will be removed.
|
||||
// doReset indicates that the runtime will perform a system reset.
|
||||
// A reset will remove all containers, pods, volumes, networks, etc.
|
||||
// A number of validation checks are relaxed, or replaced with logic to
|
||||
// remove as much of the runtime as possible if they fail. This ensures
|
||||
// that even a broken Libpod can still be removed via `system reset`.
|
||||
// This does not actually perform a `system reset`. That is done by
|
||||
// calling "Reset()" on the returned runtime.
|
||||
doReset bool
|
||||
|
||||
// doRenumber indicates that the runtime should perform a lock renumber
|
||||
// during initialization.
|
||||
// Once the runtime has been initialized and returned, this variable is
|
||||
// unused.
|
||||
// doRenumber indicates that the runtime will perform a system renumber.
|
||||
// A renumber will reassign lock numbers for all containers, pods, etc.
|
||||
// This will not perform the renumber itself, but will ignore some
|
||||
// errors related to lock initialization so a renumber can be performed
|
||||
// if something has gone wrong.
|
||||
doRenumber bool
|
||||
|
||||
doMigrate bool
|
||||
// System migrate can move containers to a new runtime.
|
||||
// We make no promises that these migrated containers work on the new
|
||||
// runtime, though.
|
||||
migrateRuntime string
|
||||
|
||||
// valid indicates whether the runtime is ready to use.
|
||||
// valid is set to true when a runtime is returned from GetRuntime(),
|
||||
// and remains true until the runtime is shut down (rendering its
|
||||
@ -233,11 +232,6 @@ func newRuntimeFromConfig(conf *config.Config, options ...RuntimeOption) (*Runti
|
||||
|
||||
runtime.config.CheckCgroupsAndAdjustConfig()
|
||||
|
||||
// If resetting storage, do *not* return a runtime.
|
||||
if runtime.doReset {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return runtime, nil
|
||||
}
|
||||
|
||||
@ -563,9 +557,8 @@ func makeRuntime(runtime *Runtime) (retErr error) {
|
||||
// We now need to see if the system has restarted
|
||||
// We check for the presence of a file in our tmp directory to verify this
|
||||
// This check must be locked to prevent races
|
||||
runtimeAliveLock := filepath.Join(runtime.config.Engine.TmpDir, "alive.lck")
|
||||
runtimeAliveFile := filepath.Join(runtime.config.Engine.TmpDir, "alive")
|
||||
aliveLock, err := lockfile.GetLockFile(runtimeAliveLock)
|
||||
aliveLock, err := runtime.getRuntimeAliveLock()
|
||||
if err != nil {
|
||||
return fmt.Errorf("acquiring runtime init lock: %w", err)
|
||||
}
|
||||
@ -639,27 +632,6 @@ func makeRuntime(runtime *Runtime) (retErr error) {
|
||||
return err
|
||||
}
|
||||
|
||||
// If we're resetting storage, do it now.
|
||||
// We will not return a valid runtime.
|
||||
// TODO: Plumb this context out so it can be set.
|
||||
if runtime.doReset {
|
||||
// Mark the runtime as valid, so normal functionality "mostly"
|
||||
// works and we can use regular functions to remove
|
||||
// ctrs/pods/etc
|
||||
runtime.valid = true
|
||||
|
||||
return runtime.reset(context.Background())
|
||||
}
|
||||
|
||||
// If we're renumbering locks, do it now.
|
||||
// It breaks out of normal runtime init, and will not return a valid
|
||||
// runtime.
|
||||
if runtime.doRenumber {
|
||||
if err := runtime.renumberLocks(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// If we need to refresh the state, do it now - things are guaranteed to
|
||||
// be set up by now.
|
||||
if doRefresh {
|
||||
@ -681,12 +653,6 @@ func makeRuntime(runtime *Runtime) (retErr error) {
|
||||
// further
|
||||
runtime.valid = true
|
||||
|
||||
if runtime.doMigrate {
|
||||
if err := runtime.migrate(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -1195,6 +1161,11 @@ func (r *Runtime) graphRootMountedFlag(mounts []spec.Mount) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Returns a copy of the runtime alive lock
|
||||
func (r *Runtime) getRuntimeAliveLock() (*lockfile.LockFile, error) {
|
||||
return lockfile.GetLockFile(filepath.Join(r.config.Engine.TmpDir, "alive.lck"))
|
||||
}
|
||||
|
||||
// Network returns the network interface which is used by the runtime
|
||||
func (r *Runtime) Network() nettypes.ContainerNetwork {
|
||||
return r.network
|
||||
|
@ -43,7 +43,24 @@ func (r *Runtime) stopPauseProcess() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Runtime) migrate() error {
|
||||
// Migrate stops the rootless pause process and performs any necessary database
|
||||
// migrations that are required. It can also migrate all containers to a new OCI
|
||||
// runtime, if requested.
|
||||
func (r *Runtime) Migrate(newRuntime string) error {
|
||||
// Acquire the alive lock and hold it.
|
||||
// Ensures that we don't let other Podman commands run while we are
|
||||
// rewriting things in the DB.
|
||||
aliveLock, err := r.getRuntimeAliveLock()
|
||||
if err != nil {
|
||||
return fmt.Errorf("retrieving alive lock: %w", err)
|
||||
}
|
||||
aliveLock.Lock()
|
||||
defer aliveLock.Unlock()
|
||||
|
||||
if !r.valid {
|
||||
return define.ErrRuntimeStopped
|
||||
}
|
||||
|
||||
runningContainers, err := r.GetRunningContainers()
|
||||
if err != nil {
|
||||
return err
|
||||
@ -63,10 +80,14 @@ func (r *Runtime) migrate() error {
|
||||
}
|
||||
|
||||
// Did the user request a new runtime?
|
||||
runtimeChangeRequested := r.migrateRuntime != ""
|
||||
requestedRuntime, runtimeExists := r.ociRuntimes[r.migrateRuntime]
|
||||
if !runtimeExists && runtimeChangeRequested {
|
||||
return fmt.Errorf("change to runtime %q requested but no such runtime is defined: %w", r.migrateRuntime, define.ErrInvalidArg)
|
||||
runtimeChangeRequested := newRuntime != ""
|
||||
var requestedRuntime OCIRuntime
|
||||
if runtimeChangeRequested {
|
||||
runtime, exists := r.ociRuntimes[newRuntime]
|
||||
if !exists {
|
||||
return fmt.Errorf("change to runtime %q requested but no such runtime is defined: %w", newRuntime, define.ErrInvalidArg)
|
||||
}
|
||||
requestedRuntime = runtime
|
||||
}
|
||||
|
||||
for _, ctr := range allCtrs {
|
||||
@ -81,9 +102,9 @@ func (r *Runtime) migrate() error {
|
||||
}
|
||||
|
||||
// Reset runtime
|
||||
if runtimeChangeRequested {
|
||||
logrus.Infof("Resetting container %s runtime to runtime %s", ctr.ID(), r.migrateRuntime)
|
||||
ctr.config.OCIRuntime = r.migrateRuntime
|
||||
if runtimeChangeRequested && ctr.config.OCIRuntime != newRuntime {
|
||||
logrus.Infof("Resetting container %s runtime to runtime %s", ctr.ID(), newRuntime)
|
||||
ctr.config.OCIRuntime = newRuntime
|
||||
ctr.ociRuntime = requestedRuntime
|
||||
|
||||
needsWrite = true
|
||||
|
@ -11,6 +11,6 @@ func (r *Runtime) stopPauseProcess() error {
|
||||
return errors.New("not implemented (*Runtime) stopPauseProcess")
|
||||
}
|
||||
|
||||
func (r *Runtime) migrate() error {
|
||||
func (r *Runtime) Migrate(newRuntime string) error {
|
||||
return errors.New("not implemented (*Runtime) migrate")
|
||||
}
|
||||
|
@ -6,18 +6,34 @@ package libpod
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/containers/podman/v4/libpod/define"
|
||||
"github.com/containers/podman/v4/libpod/events"
|
||||
)
|
||||
|
||||
// renumberLocks reassigns lock numbers for all containers and pods in the
|
||||
// state.
|
||||
// TODO: It would be desirable to make it impossible to call this until all
|
||||
// other libpod sessions are dead.
|
||||
// Possibly use a read-write file lock, with all non-renumber podmans owning the
|
||||
// lock as read, renumber attempting to take a write lock?
|
||||
// The alternative is some sort of session tracking, and I don't know how
|
||||
// reliable that can be.
|
||||
func (r *Runtime) renumberLocks() error {
|
||||
// RenumberLocks reassigns lock numbers for all containers and pods in the
|
||||
// state. This should NOT be run while there are other Libpod
|
||||
func (r *Runtime) RenumberLocks() error {
|
||||
// TODO: It would be desirable to make it impossible to call this until all
|
||||
// other libpod sessions are dead.
|
||||
// Possibly use a read-write file lock, with all non-renumber podmans owning the
|
||||
// lock as read, renumber attempting to take a write lock?
|
||||
// The alternative is some sort of session tracking, and I don't know how
|
||||
// reliable that can be.
|
||||
|
||||
// Acquire the alive lock and hold it.
|
||||
// Ensures that we don't let other Podman commands run while we are
|
||||
// changing around lock numbers.
|
||||
aliveLock, err := r.getRuntimeAliveLock()
|
||||
if err != nil {
|
||||
return fmt.Errorf("retrieving alive lock: %w", err)
|
||||
}
|
||||
aliveLock.Lock()
|
||||
defer aliveLock.Unlock()
|
||||
|
||||
if !r.valid {
|
||||
return define.ErrRuntimeStopped
|
||||
}
|
||||
|
||||
// Start off by deallocating all locks
|
||||
if err := r.lockManager.FreeAllLocks(); err != nil {
|
||||
return err
|
||||
@ -77,5 +93,5 @@ func (r *Runtime) renumberLocks() error {
|
||||
|
||||
r.NewSystemEvent(events.Renumber)
|
||||
|
||||
return nil
|
||||
return r.Shutdown(false)
|
||||
}
|
||||
|
@ -14,12 +14,6 @@ type EngineSetup string
|
||||
const (
|
||||
ABIMode = EngineMode("abi")
|
||||
TunnelMode = EngineMode("tunnel")
|
||||
|
||||
MigrateMode = EngineSetup("migrate")
|
||||
NoFDsMode = EngineSetup("disablefds")
|
||||
NormalMode = EngineSetup("normal")
|
||||
RenumberMode = EngineSetup("renumber")
|
||||
ResetMode = EngineSetup("reset")
|
||||
)
|
||||
|
||||
// Convert EngineMode to String
|
||||
@ -42,6 +36,8 @@ type PodmanConfig struct {
|
||||
EngineMode EngineMode // ABI or Tunneling mode
|
||||
HooksDir []string
|
||||
Identity string // ssh identity for connecting to server
|
||||
IsRenumber bool // Is this a system renumber command? If so, a number of checks will be relaxed
|
||||
IsReset bool // Is this a system reset command? If so, a number of checks will be skipped/omitted
|
||||
MaxWorks int // maximum number of parallel threads
|
||||
MemoryProfile string // Hidden: Should memory profile be taken
|
||||
RegistriesConf string // allows for specifying a custom registries.conf
|
||||
|
@ -63,6 +63,7 @@ type ContainerEngine interface { //nolint:interfacebloat
|
||||
Info(ctx context.Context) (*define.Info, error)
|
||||
KubeApply(ctx context.Context, body io.Reader, opts ApplyOptions) error
|
||||
Locks(ctx context.Context) (*LocksReport, error)
|
||||
Migrate(ctx context.Context, options SystemMigrateOptions) error
|
||||
NetworkConnect(ctx context.Context, networkname string, options NetworkConnectOptions) error
|
||||
NetworkCreate(ctx context.Context, network types.Network, createOptions *types.NetworkCreateOptions) (*types.Network, error)
|
||||
NetworkUpdate(ctx context.Context, networkname string, options NetworkUpdateOptions) error
|
||||
@ -91,6 +92,8 @@ type ContainerEngine interface { //nolint:interfacebloat
|
||||
PodStop(ctx context.Context, namesOrIds []string, options PodStopOptions) ([]*PodStopReport, error)
|
||||
PodTop(ctx context.Context, options PodTopOptions) (*StringSliceReport, error)
|
||||
PodUnpause(ctx context.Context, namesOrIds []string, options PodunpauseOptions) ([]*PodUnpauseReport, error)
|
||||
Renumber(ctx context.Context) error
|
||||
Reset(ctx context.Context) error
|
||||
SetupRootless(ctx context.Context, noMoveProcess bool) error
|
||||
SecretCreate(ctx context.Context, name string, reader io.Reader, options SecretCreateOptions) (*SecretCreateReport, error)
|
||||
SecretInspect(ctx context.Context, nameOrIDs []string, options SecretInspectOptions) ([]*SecretInfoReport, []error, error)
|
||||
|
@ -1,14 +0,0 @@
|
||||
package entities
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
type SystemEngine interface {
|
||||
Renumber(ctx context.Context, flags *pflag.FlagSet, config *PodmanConfig) error
|
||||
Migrate(ctx context.Context, flags *pflag.FlagSet, config *PodmanConfig, options SystemMigrateOptions) error
|
||||
Reset(ctx context.Context) error
|
||||
Shutdown(ctx context.Context)
|
||||
}
|
@ -21,7 +21,6 @@ import (
|
||||
"github.com/containers/storage/pkg/directory"
|
||||
"github.com/containers/storage/pkg/unshare"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
func (ic *ContainerEngine) Info(ctx context.Context) (*define.Info, error) {
|
||||
@ -370,16 +369,16 @@ func (ic *ContainerEngine) SystemDf(ctx context.Context, options entities.System
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (se *SystemEngine) Reset(ctx context.Context) error {
|
||||
return nil
|
||||
func (ic *ContainerEngine) Reset(ctx context.Context) error {
|
||||
return ic.Libpod.Reset(ctx)
|
||||
}
|
||||
|
||||
func (se *SystemEngine) Renumber(ctx context.Context, flags *pflag.FlagSet, config *entities.PodmanConfig) error {
|
||||
return nil
|
||||
func (ic *ContainerEngine) Renumber(ctx context.Context) error {
|
||||
return ic.Libpod.RenumberLocks()
|
||||
}
|
||||
|
||||
func (se SystemEngine) Migrate(ctx context.Context, flags *pflag.FlagSet, config *entities.PodmanConfig, options entities.SystemMigrateOptions) error {
|
||||
return nil
|
||||
func (ic *ContainerEngine) Migrate(ctx context.Context, options entities.SystemMigrateOptions) error {
|
||||
return ic.Libpod.Migrate(options.NewRuntime)
|
||||
}
|
||||
|
||||
func (se SystemEngine) Shutdown(ctx context.Context) {
|
||||
|
@ -7,10 +7,8 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/containers/podman/v4/libpod"
|
||||
"github.com/containers/podman/v4/pkg/bindings"
|
||||
"github.com/containers/podman/v4/pkg/domain/entities"
|
||||
"github.com/containers/podman/v4/pkg/domain/infra/abi"
|
||||
"github.com/containers/podman/v4/pkg/domain/infra/tunnel"
|
||||
)
|
||||
|
||||
@ -43,32 +41,3 @@ func NewImageEngine(facts *entities.PodmanConfig) (entities.ImageEngine, error)
|
||||
}
|
||||
return nil, fmt.Errorf("runtime mode '%v' is not supported", facts.EngineMode)
|
||||
}
|
||||
|
||||
// NewSystemEngine factory provides a libpod runtime for specialized system operations
|
||||
func NewSystemEngine(setup entities.EngineSetup, facts *entities.PodmanConfig) (entities.SystemEngine, error) {
|
||||
switch facts.EngineMode {
|
||||
case entities.ABIMode:
|
||||
var r *libpod.Runtime
|
||||
var err error
|
||||
switch setup {
|
||||
case entities.NormalMode:
|
||||
r, err = GetRuntime(context.Background(), facts.FlagSet, facts)
|
||||
case entities.RenumberMode:
|
||||
r, err = GetRuntimeRenumber(context.Background(), facts.FlagSet, facts)
|
||||
case entities.ResetMode:
|
||||
r, err = GetRuntimeReset(context.Background(), facts.FlagSet, facts)
|
||||
case entities.MigrateMode:
|
||||
name, flagErr := facts.FlagSet.GetString("new-runtime")
|
||||
if flagErr != nil {
|
||||
return nil, flagErr
|
||||
}
|
||||
r, err = GetRuntimeMigrate(context.Background(), facts.FlagSet, facts, name)
|
||||
case entities.NoFDsMode:
|
||||
r, err = GetRuntimeDisableFDs(context.Background(), facts.FlagSet, facts)
|
||||
}
|
||||
return &abi.SystemEngine{Libpod: r}, err
|
||||
case entities.TunnelMode:
|
||||
return nil, fmt.Errorf("tunnel system runtime not supported")
|
||||
}
|
||||
return nil, fmt.Errorf("runtime mode '%v' is not supported", facts.EngineMode)
|
||||
}
|
||||
|
@ -1,15 +0,0 @@
|
||||
//go:build remote
|
||||
// +build remote
|
||||
|
||||
package infra
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/containers/podman/v4/pkg/domain/entities"
|
||||
)
|
||||
|
||||
// NewSystemEngine factory provides a libpod runtime for specialized system operations
|
||||
func NewSystemEngine(setup entities.EngineSetup, facts *entities.PodmanConfig) (entities.SystemEngine, error) {
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
@ -34,61 +34,21 @@ var (
|
||||
)
|
||||
|
||||
type engineOpts struct {
|
||||
name string
|
||||
renumber bool
|
||||
migrate bool
|
||||
noStore bool
|
||||
withFDS bool
|
||||
reset bool
|
||||
renumber bool
|
||||
config *entities.PodmanConfig
|
||||
}
|
||||
|
||||
// GetRuntimeMigrate gets a libpod runtime that will perform a migration of existing containers
|
||||
func GetRuntimeMigrate(ctx context.Context, fs *flag.FlagSet, cfg *entities.PodmanConfig, newRuntime string) (*libpod.Runtime, error) {
|
||||
return getRuntime(ctx, fs, &engineOpts{
|
||||
name: newRuntime,
|
||||
renumber: false,
|
||||
migrate: true,
|
||||
noStore: false,
|
||||
withFDS: true,
|
||||
reset: false,
|
||||
config: cfg,
|
||||
})
|
||||
}
|
||||
|
||||
// GetRuntimeDisableFDs gets a libpod runtime that will disable sd notify
|
||||
func GetRuntimeDisableFDs(ctx context.Context, fs *flag.FlagSet, cfg *entities.PodmanConfig) (*libpod.Runtime, error) {
|
||||
return getRuntime(ctx, fs, &engineOpts{
|
||||
renumber: false,
|
||||
migrate: false,
|
||||
noStore: false,
|
||||
withFDS: false,
|
||||
reset: false,
|
||||
config: cfg,
|
||||
})
|
||||
}
|
||||
|
||||
// GetRuntimeRenumber gets a libpod runtime that will perform a lock renumber
|
||||
func GetRuntimeRenumber(ctx context.Context, fs *flag.FlagSet, cfg *entities.PodmanConfig) (*libpod.Runtime, error) {
|
||||
return getRuntime(ctx, fs, &engineOpts{
|
||||
renumber: true,
|
||||
migrate: false,
|
||||
noStore: false,
|
||||
withFDS: true,
|
||||
reset: false,
|
||||
config: cfg,
|
||||
})
|
||||
}
|
||||
|
||||
// GetRuntime generates a new libpod runtime configured by command line options
|
||||
func GetRuntime(ctx context.Context, flags *flag.FlagSet, cfg *entities.PodmanConfig) (*libpod.Runtime, error) {
|
||||
runtimeSync.Do(func() {
|
||||
runtimeLib, runtimeErr = getRuntime(ctx, flags, &engineOpts{
|
||||
renumber: false,
|
||||
migrate: false,
|
||||
noStore: false,
|
||||
withFDS: true,
|
||||
reset: false,
|
||||
reset: cfg.IsReset,
|
||||
renumber: cfg.IsRenumber,
|
||||
config: cfg,
|
||||
})
|
||||
})
|
||||
@ -98,23 +58,10 @@ func GetRuntime(ctx context.Context, flags *flag.FlagSet, cfg *entities.PodmanCo
|
||||
// GetRuntimeNoStore generates a new libpod runtime configured by command line options
|
||||
func GetRuntimeNoStore(ctx context.Context, fs *flag.FlagSet, cfg *entities.PodmanConfig) (*libpod.Runtime, error) {
|
||||
return getRuntime(ctx, fs, &engineOpts{
|
||||
renumber: false,
|
||||
migrate: false,
|
||||
noStore: true,
|
||||
withFDS: true,
|
||||
reset: false,
|
||||
config: cfg,
|
||||
})
|
||||
}
|
||||
|
||||
func GetRuntimeReset(ctx context.Context, fs *flag.FlagSet, cfg *entities.PodmanConfig) (*libpod.Runtime, error) {
|
||||
return getRuntime(ctx, fs, &engineOpts{
|
||||
renumber: false,
|
||||
migrate: false,
|
||||
noStore: false,
|
||||
withFDS: true,
|
||||
reset: true,
|
||||
config: cfg,
|
||||
noStore: true,
|
||||
withFDS: true,
|
||||
reset: false,
|
||||
config: cfg,
|
||||
})
|
||||
}
|
||||
|
||||
@ -180,17 +127,10 @@ func getRuntime(ctx context.Context, fs *flag.FlagSet, opts *engineOpts) (*libpo
|
||||
if fs.Changed("transient-store") {
|
||||
options = append(options, libpod.WithTransientStore(cfg.TransientStore))
|
||||
}
|
||||
if opts.migrate {
|
||||
options = append(options, libpod.WithMigrate())
|
||||
if opts.name != "" {
|
||||
options = append(options, libpod.WithMigrateRuntime(opts.name))
|
||||
}
|
||||
}
|
||||
|
||||
if opts.reset {
|
||||
options = append(options, libpod.WithReset())
|
||||
}
|
||||
|
||||
if opts.renumber {
|
||||
options = append(options, libpod.WithRenumber())
|
||||
}
|
||||
|
@ -23,6 +23,18 @@ func (ic *ContainerEngine) SystemPrune(ctx context.Context, opts entities.System
|
||||
return system.Prune(ic.ClientCtx, options)
|
||||
}
|
||||
|
||||
func (ic *ContainerEngine) Migrate(ctx context.Context, options entities.SystemMigrateOptions) error {
|
||||
return errors.New("runtime migration is not supported on remote clients")
|
||||
}
|
||||
|
||||
func (ic *ContainerEngine) Renumber(ctx context.Context) error {
|
||||
return errors.New("lock renumbering is not supported on remote clients")
|
||||
}
|
||||
|
||||
func (ic *ContainerEngine) Reset(ctx context.Context) error {
|
||||
return errors.New("system reset is not supported on remote clients")
|
||||
}
|
||||
|
||||
func (ic *ContainerEngine) SystemDf(ctx context.Context, options entities.SystemDfOptions) (*entities.SystemDfReport, error) {
|
||||
return system.DiskUsage(ic.ClientCtx, nil)
|
||||
}
|
||||
|
@ -1,6 +1,8 @@
|
||||
package integration
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
. "github.com/containers/podman/v4/test/utils"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
@ -74,4 +76,32 @@ var _ = Describe("podman system reset", Serial, func() {
|
||||
Expect(session.OutputToStringArray()).To(BeEmpty())
|
||||
}
|
||||
})
|
||||
|
||||
It("system reset completely removes container", func() {
|
||||
SkipIfRemote("system reset not supported on podman --remote")
|
||||
useCustomNetworkDir(podmanTest, tempdir)
|
||||
|
||||
rmi := podmanTest.Podman([]string{"rmi", "--force", "--all"})
|
||||
rmi.WaitWithDefaultTimeout()
|
||||
Expect(rmi).Should(ExitCleanly())
|
||||
podmanTest.AddImageToRWStore(ALPINE)
|
||||
|
||||
// The name ensures that we have a Libpod resource that we'll
|
||||
// hit if we recreate the container after a reset and it still
|
||||
// exists. The port does the same for a system-level resource.
|
||||
ctrName := "testctr"
|
||||
port1 := GetPort()
|
||||
port2 := GetPort()
|
||||
session := podmanTest.Podman([]string{"run", "--name", ctrName, "-p", fmt.Sprintf("%d:%d", port1, port2), "-d", ALPINE, "top"})
|
||||
session.WaitWithDefaultTimeout()
|
||||
Expect(session).Should(ExitCleanly())
|
||||
|
||||
reset := podmanTest.Podman([]string{"system", "reset", "--force"})
|
||||
reset.WaitWithDefaultTimeout()
|
||||
Expect(reset).Should(ExitCleanly())
|
||||
|
||||
session2 := podmanTest.Podman([]string{"run", "--name", ctrName, "-p", fmt.Sprintf("%d:%d", port1, port2), "-d", ALPINE, "top"})
|
||||
session2.WaitWithDefaultTimeout()
|
||||
Expect(session2).Should(ExitCleanly())
|
||||
})
|
||||
})
|
||||
|
Reference in New Issue
Block a user