mirror of
https://github.com/containers/podman.git
synced 2025-08-24 18:12:25 +08:00
Prevent two podman machines running on darwin
As issue #25112 points out, it was possible to start a machine on one of the darwin providers and then switch providers and start another one with a different name. This PR firstly prevents that use which is a forbidden use case. Secondarily, performed some minor cleanup on the error messages being used so that the error would be specific to this condition. This bug fix is for darwin only. In the case of Windows, we probably need to answer the question I raised in #24067 first, which is whether we want to stop allowing WSL to run multiple machines. Fixes #25112 Signed-off-by: Brent Baude <bbaude@redhat.com>
This commit is contained in:
@ -8,11 +8,8 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
ErrNoSuchVM = errors.New("VM does not exist")
|
||||
ErrWrongState = errors.New("VM in wrong state to perform action")
|
||||
ErrVMAlreadyExists = errors.New("VM already exists")
|
||||
ErrVMAlreadyRunning = errors.New("VM already running or starting")
|
||||
ErrMultipleActiveVM = errors.New("only one VM can be active at a time")
|
||||
ErrNotImplemented = errors.New("functionality not implemented")
|
||||
)
|
||||
|
||||
@ -49,3 +46,16 @@ type ErrIncompatibleMachineConfig struct {
|
||||
func (err *ErrIncompatibleMachineConfig) Error() string {
|
||||
return fmt.Sprintf("incompatible machine config %q (%s) for this version of Podman", err.Path, err.Name)
|
||||
}
|
||||
|
||||
type ErrMultipleActiveVM struct {
|
||||
Name string
|
||||
Provider string
|
||||
}
|
||||
|
||||
func (err *ErrMultipleActiveVM) Error() string {
|
||||
msg := ""
|
||||
if err.Provider != "" {
|
||||
msg = " on the " + err.Provider + " provider"
|
||||
}
|
||||
return fmt.Sprintf("%s already starting or running%s: only one VM can be active at a time", err.Name, msg)
|
||||
}
|
||||
|
@ -61,8 +61,10 @@ var _ = Describe("podman machine start", func() {
|
||||
})
|
||||
|
||||
It("start machine already started", func() {
|
||||
name := randomString()
|
||||
i := new(initMachine)
|
||||
session, err := mb.setCmd(i.withImage(mb.imagePath)).run()
|
||||
machineTestBuilderInit := mb.setName(name).setCmd(i.withImage(mb.imagePath))
|
||||
session, err := machineTestBuilderInit.run()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(session).To(Exit(0))
|
||||
s := new(startMachine)
|
||||
@ -78,7 +80,7 @@ var _ = Describe("podman machine start", func() {
|
||||
startSession, err = mb.setCmd(s).run()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(startSession).To(Exit(125))
|
||||
Expect(startSession.errorToString()).To(ContainSubstring("VM already running or starting"))
|
||||
Expect(startSession.errorToString()).To(ContainSubstring(fmt.Sprintf("Error: unable to start %q: already running", machineTestBuilderInit.name)))
|
||||
})
|
||||
|
||||
It("start machine with conflict on SSH port", func() {
|
||||
@ -210,10 +212,10 @@ var _ = Describe("podman machine start", func() {
|
||||
Expect(startSession1).To(Or(Exit(0), Exit(125)), "start command should succeed or fail with 125")
|
||||
if startSession1.ExitCode() == 0 {
|
||||
Expect(startSession2).To(Exit(125), "first start worked, second start must fail")
|
||||
Expect(startSession2.errorToString()).To(ContainSubstring("machine %s is already running: only one VM can be active at a time", machine1))
|
||||
Expect(startSession2.errorToString()).To(ContainSubstring("%s already starting or running: only one VM can be active at a time", machine1))
|
||||
} else {
|
||||
Expect(startSession2).To(Exit(0), "first start failed, second start succeed")
|
||||
Expect(startSession1.errorToString()).To(ContainSubstring("machine %s is already running: only one VM can be active at a time", machine2))
|
||||
Expect(startSession1.errorToString()).To(ContainSubstring("%s already starting or running: only one VM can be active at a time", machine2))
|
||||
}
|
||||
})
|
||||
})
|
||||
|
@ -265,7 +265,7 @@ func VMExists(name string, vmstubbers []vmconfigs.VMProvider) (*vmconfigs.Machin
|
||||
return nil, false, err
|
||||
}
|
||||
if mc, found := mcs[name]; found {
|
||||
return mc, true, nil
|
||||
return mc.MachineConfig, true, nil
|
||||
}
|
||||
// Check with the provider hypervisor
|
||||
for _, vmstubber := range vmstubbers {
|
||||
@ -281,31 +281,46 @@ func VMExists(name string, vmstubbers []vmconfigs.VMProvider) (*vmconfigs.Machin
|
||||
}
|
||||
|
||||
// checkExclusiveActiveVM checks if any of the machines are already running
|
||||
func checkExclusiveActiveVM(provider vmconfigs.VMProvider, mc *vmconfigs.MachineConfig) error {
|
||||
func checkExclusiveActiveVM(currentProvider vmconfigs.VMProvider, mc *vmconfigs.MachineConfig) error {
|
||||
providers := provider.GetAll()
|
||||
// Check if any other machines are running; if so, we error
|
||||
localMachines, err := getMCsOverProviders([]vmconfigs.VMProvider{provider})
|
||||
localMachines, err := getMCsOverProviders(providers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for name, localMachine := range localMachines {
|
||||
state, err := provider.State(localMachine, false)
|
||||
state, err := localMachine.Provider.State(localMachine.MachineConfig, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if state == machineDefine.Running || state == machineDefine.Starting {
|
||||
if mc.Name == name {
|
||||
return fmt.Errorf("unable to start %q: machine %s: %w", mc.Name, name, machineDefine.ErrVMAlreadyRunning)
|
||||
return fmt.Errorf("unable to start %q: already running", mc.Name)
|
||||
}
|
||||
return fmt.Errorf("unable to start %q: machine %s is already running: %w", mc.Name, name, machineDefine.ErrMultipleActiveVM)
|
||||
|
||||
// A machine is running in the current provider
|
||||
if currentProvider.VMType() == localMachine.Provider.VMType() {
|
||||
fail := machineDefine.ErrMultipleActiveVM{Name: name}
|
||||
return fmt.Errorf("unable to start %q: %w", mc.Name, &fail)
|
||||
}
|
||||
// A machine is running in an alternate provider
|
||||
fail := machineDefine.ErrMultipleActiveVM{Name: name, Provider: localMachine.Provider.VMType().String()}
|
||||
return fmt.Errorf("unable to start: %w", &fail)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type knownMachineConfig struct {
|
||||
Provider vmconfigs.VMProvider
|
||||
MachineConfig *vmconfigs.MachineConfig
|
||||
}
|
||||
|
||||
// getMCsOverProviders loads machineconfigs from a config dir derived from the "provider". it returns only what is known on
|
||||
// disk so things like status may be incomplete or inaccurate
|
||||
func getMCsOverProviders(vmstubbers []vmconfigs.VMProvider) (map[string]*vmconfigs.MachineConfig, error) {
|
||||
mcs := make(map[string]*vmconfigs.MachineConfig)
|
||||
func getMCsOverProviders(vmstubbers []vmconfigs.VMProvider) (map[string]knownMachineConfig, error) {
|
||||
mcs := make(map[string]knownMachineConfig)
|
||||
for _, stubber := range vmstubbers {
|
||||
dirs, err := env.GetMachineDirs(stubber.VMType())
|
||||
if err != nil {
|
||||
@ -320,7 +335,10 @@ func getMCsOverProviders(vmstubbers []vmconfigs.VMProvider) (map[string]*vmconfi
|
||||
// iterate known mcs and add the stubbers
|
||||
for mcName, mc := range stubberMCs {
|
||||
if _, ok := mcs[mcName]; !ok {
|
||||
mcs[mcName] = mc
|
||||
mcs[mcName] = knownMachineConfig{
|
||||
Provider: stubber,
|
||||
MachineConfig: mc,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -414,7 +432,7 @@ func Start(mc *vmconfigs.MachineConfig, mp vmconfigs.VMProvider, dirs *machineDe
|
||||
}
|
||||
|
||||
if state == machineDefine.Running || state == machineDefine.Starting {
|
||||
return fmt.Errorf("machine %s: %w", mc.Name, machineDefine.ErrVMAlreadyRunning)
|
||||
return fmt.Errorf("unable to start %q: already running", mc.Name)
|
||||
}
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user