mirror of
https://github.com/containers/podman.git
synced 2025-07-02 16:57:24 +08:00
Remove Libpod special-init conditions
Before this, for some special Podman commands (system reset, system migrate, system renumber), Podman would create a first Libpod runtime to do initialization and flag parsing, then stop that runtime and create an entirely new runtime to perform the actual task. This is an artifact of the pre-Podman 2.0 days, when there was almost no indirection between Libpod and the CLI, and we only used one runtime because we didn't need a second runtime for flag parsing and basic init. This system was clunky, and apparently, very buggy. When we migrated to SQLite, some logic was introduced where we'd select a different database location based on whether or not Libpod's StaticDir was manually set - which differed between the first invocation of Libpod and the second. So we'd get a different database for some commands (like `system reset`) and they would not be able to see existing containers, meaning they would not function properly. The immediate cause is obviously the SQLite behavior, but I'm certain there's a lot more baggage hiding behind this multiple Libpod runtime logic, so let's just refactor it out. It doesn't make sense, and complicates the code. Instead, make Reset, Renumber, and Migrate methods of the libpod Runtime. For Reset and Renumber, we can shut the runtime down afterwards to achieve the desired effect (no valid runtime after). Then pipe all of them through the ContainerEngine so cmd/podman can access them. As part of this, remove the SystemEngine part of pkg/domain. This was supposed to encompass these "special" commands, but every command in SystemEngine is actually a ContainerEngine command. Reset, Renumber, Migrate - they all need a full Libpod and access to all containers. There's no point to a separate engine if it just wraps Libpod in the exact same way as ContainerEngine. This consolidation saves us a bit more code and complexity. Signed-off-by: Matt Heon <mheon@redhat.com>
This commit is contained in:
@ -89,22 +89,21 @@ type Runtime struct {
|
||||
// This bool is just needed so that we can set it for netavark interface.
|
||||
syslog bool
|
||||
|
||||
// doReset indicates that the runtime should perform a system reset.
|
||||
// All Podman files will be removed.
|
||||
// doReset indicates that the runtime will perform a system reset.
|
||||
// A reset will remove all containers, pods, volumes, networks, etc.
|
||||
// A number of validation checks are relaxed, or replaced with logic to
|
||||
// remove as much of the runtime as possible if they fail. This ensures
|
||||
// that even a broken Libpod can still be removed via `system reset`.
|
||||
// This does not actually perform a `system reset`. That is done by
|
||||
// calling "Reset()" on the returned runtime.
|
||||
doReset bool
|
||||
|
||||
// doRenumber indicates that the runtime should perform a lock renumber
|
||||
// during initialization.
|
||||
// Once the runtime has been initialized and returned, this variable is
|
||||
// unused.
|
||||
// doRenumber indicates that the runtime will perform a system renumber.
|
||||
// A renumber will reassign lock numbers for all containers, pods, etc.
|
||||
// This will not perform the renumber itself, but will ignore some
|
||||
// errors related to lock initialization so a renumber can be performed
|
||||
// if something has gone wrong.
|
||||
doRenumber bool
|
||||
|
||||
doMigrate bool
|
||||
// System migrate can move containers to a new runtime.
|
||||
// We make no promises that these migrated containers work on the new
|
||||
// runtime, though.
|
||||
migrateRuntime string
|
||||
|
||||
// valid indicates whether the runtime is ready to use.
|
||||
// valid is set to true when a runtime is returned from GetRuntime(),
|
||||
// and remains true until the runtime is shut down (rendering its
|
||||
@ -230,11 +229,6 @@ func newRuntimeFromConfig(conf *config.Config, options ...RuntimeOption) (*Runti
|
||||
|
||||
runtime.config.CheckCgroupsAndAdjustConfig()
|
||||
|
||||
// If resetting storage, do *not* return a runtime.
|
||||
if runtime.doReset {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return runtime, nil
|
||||
}
|
||||
|
||||
@ -347,10 +341,6 @@ func makeRuntime(runtime *Runtime) (retErr error) {
|
||||
}
|
||||
runtime.conmonPath = cPath
|
||||
|
||||
if runtime.doReset && runtime.doRenumber {
|
||||
return fmt.Errorf("cannot perform system reset while renumbering locks: %w", define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
if runtime.config.Engine.StaticDir == "" {
|
||||
runtime.config.Engine.StaticDir = filepath.Join(runtime.storageConfig.GraphRoot, "libpod")
|
||||
runtime.storageSet.StaticDirSet = true
|
||||
@ -555,9 +545,8 @@ func makeRuntime(runtime *Runtime) (retErr error) {
|
||||
// We now need to see if the system has restarted
|
||||
// We check for the presence of a file in our tmp directory to verify this
|
||||
// This check must be locked to prevent races
|
||||
runtimeAliveLock := filepath.Join(runtime.config.Engine.TmpDir, "alive.lck")
|
||||
runtimeAliveFile := filepath.Join(runtime.config.Engine.TmpDir, "alive")
|
||||
aliveLock, err := lockfile.GetLockFile(runtimeAliveLock)
|
||||
aliveLock, err := runtime.getRuntimeAliveLock()
|
||||
if err != nil {
|
||||
return fmt.Errorf("acquiring runtime init lock: %w", err)
|
||||
}
|
||||
@ -631,27 +620,6 @@ func makeRuntime(runtime *Runtime) (retErr error) {
|
||||
return err
|
||||
}
|
||||
|
||||
// If we're resetting storage, do it now.
|
||||
// We will not return a valid runtime.
|
||||
// TODO: Plumb this context out so it can be set.
|
||||
if runtime.doReset {
|
||||
// Mark the runtime as valid, so normal functionality "mostly"
|
||||
// works and we can use regular functions to remove
|
||||
// ctrs/pods/etc
|
||||
runtime.valid = true
|
||||
|
||||
return runtime.reset(context.Background())
|
||||
}
|
||||
|
||||
// If we're renumbering locks, do it now.
|
||||
// It breaks out of normal runtime init, and will not return a valid
|
||||
// runtime.
|
||||
if runtime.doRenumber {
|
||||
if err := runtime.renumberLocks(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// If we need to refresh the state, do it now - things are guaranteed to
|
||||
// be set up by now.
|
||||
if doRefresh {
|
||||
@ -673,12 +641,6 @@ func makeRuntime(runtime *Runtime) (retErr error) {
|
||||
// further
|
||||
runtime.valid = true
|
||||
|
||||
if runtime.doMigrate {
|
||||
if err := runtime.migrate(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -1187,6 +1149,11 @@ func (r *Runtime) graphRootMountedFlag(mounts []spec.Mount) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Returns a copy of the runtime alive lock
|
||||
func (r *Runtime) getRuntimeAliveLock() (*lockfile.LockFile, error) {
|
||||
return lockfile.GetLockFile(filepath.Join(r.config.Engine.TmpDir, "alive.lck"))
|
||||
}
|
||||
|
||||
// Network returns the network interface which is used by the runtime
|
||||
func (r *Runtime) Network() nettypes.ContainerNetwork {
|
||||
return r.network
|
||||
|
Reference in New Issue
Block a user