mirror of
https://github.com/containers/podman.git
synced 2025-05-21 00:56:36 +08:00

InfraContainer should go through the same creation process as regular containers. This change was from the cmd level down, involving new container CLI opts and specgen creating functions. What now happens is that both container and pod cli options are populated in cmd and used to create a podSpecgen and a containerSpecgen. The process then goes as follows FillOutSpecGen (infra) -> MapSpec (podOpts -> infraOpts) -> PodCreate -> MakePod -> createPodOptions -> NewPod -> CompleteSpec (infra) -> MakeContainer -> NewContainer -> newContainer -> AddInfra (to pod state) Signed-off-by: cdoern <cdoern@redhat.com>
91 lines
2.4 KiB
Go
91 lines
2.4 KiB
Go
package libpod
|
|
|
|
import (
|
|
"fmt"
|
|
"path/filepath"
|
|
"time"
|
|
|
|
"github.com/containers/common/pkg/config"
|
|
"github.com/containers/podman/v3/libpod/define"
|
|
"github.com/containers/podman/v3/pkg/rootless"
|
|
"github.com/containers/storage/pkg/stringid"
|
|
"github.com/pkg/errors"
|
|
"github.com/sirupsen/logrus"
|
|
)
|
|
|
|
// Creates a new, empty pod
|
|
func newPod(runtime *Runtime) *Pod {
|
|
pod := new(Pod)
|
|
pod.config = new(PodConfig)
|
|
pod.config.ID = stringid.GenerateNonCryptoID()
|
|
pod.config.Labels = make(map[string]string)
|
|
pod.config.CreatedTime = time.Now()
|
|
// pod.config.InfraContainer = new(ContainerConfig)
|
|
pod.state = new(podState)
|
|
pod.runtime = runtime
|
|
|
|
return pod
|
|
}
|
|
|
|
// Update pod state from database
|
|
func (p *Pod) updatePod() error {
|
|
if err := p.runtime.state.UpdatePod(p); err != nil {
|
|
return err
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// Save pod state to database
|
|
func (p *Pod) save() error {
|
|
if err := p.runtime.state.SavePod(p); err != nil {
|
|
return errors.Wrapf(err, "error saving pod %s state", p.ID())
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// Refresh a pod's state after restart
|
|
// This cannot lock any other pod, but may lock individual containers, as those
|
|
// will have refreshed by the time pod refresh runs.
|
|
func (p *Pod) refresh() error {
|
|
// Need to to an update from the DB to pull potentially-missing state
|
|
if err := p.runtime.state.UpdatePod(p); err != nil {
|
|
return err
|
|
}
|
|
|
|
if !p.valid {
|
|
return define.ErrPodRemoved
|
|
}
|
|
|
|
// Retrieve the pod's lock
|
|
lock, err := p.runtime.lockManager.AllocateAndRetrieveLock(p.config.LockID)
|
|
if err != nil {
|
|
return errors.Wrapf(err, "error retrieving lock %d for pod %s", p.config.LockID, p.ID())
|
|
}
|
|
p.lock = lock
|
|
|
|
// We need to recreate the pod's cgroup
|
|
if p.config.UsePodCgroup {
|
|
switch p.runtime.config.Engine.CgroupManager {
|
|
case config.SystemdCgroupsManager:
|
|
cgroupPath, err := systemdSliceFromPath(p.config.CgroupParent, fmt.Sprintf("libpod_pod_%s", p.ID()))
|
|
if err != nil {
|
|
logrus.Errorf("Error creating CGroup for pod %s: %v", p.ID(), err)
|
|
}
|
|
p.state.CgroupPath = cgroupPath
|
|
case config.CgroupfsCgroupsManager:
|
|
if rootless.IsRootless() && isRootlessCgroupSet(p.config.CgroupParent) {
|
|
p.state.CgroupPath = filepath.Join(p.config.CgroupParent, p.ID())
|
|
|
|
logrus.Debugf("setting pod cgroup to %s", p.state.CgroupPath)
|
|
}
|
|
default:
|
|
return errors.Wrapf(define.ErrInvalidArg, "unknown cgroups manager %s specified", p.runtime.config.Engine.CgroupManager)
|
|
}
|
|
}
|
|
|
|
// Save changes
|
|
return p.save()
|
|
}
|