mirror of
https://github.com/containers/podman.git
synced 2025-06-20 00:51:16 +08:00
Merge pull request #16083 from dfr/freebsd-pod
Add support for 'podman pod' on FreeBSD
This commit is contained in:
@ -2,14 +2,10 @@ package libpod
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/containers/common/pkg/config"
|
||||
"github.com/containers/podman/v4/libpod/define"
|
||||
"github.com/containers/podman/v4/pkg/rootless"
|
||||
"github.com/containers/storage/pkg/stringid"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Creates a new, empty pod
|
||||
@ -64,24 +60,8 @@ func (p *Pod) refresh() error {
|
||||
}
|
||||
p.lock = lock
|
||||
|
||||
// We need to recreate the pod's cgroup
|
||||
if p.config.UsePodCgroup {
|
||||
switch p.runtime.config.Engine.CgroupManager {
|
||||
case config.SystemdCgroupsManager:
|
||||
cgroupPath, err := systemdSliceFromPath(p.config.CgroupParent, fmt.Sprintf("libpod_pod_%s", p.ID()), p.ResourceLim())
|
||||
if err != nil {
|
||||
logrus.Errorf("Creating Cgroup for pod %s: %v", p.ID(), err)
|
||||
}
|
||||
p.state.CgroupPath = cgroupPath
|
||||
case config.CgroupfsCgroupsManager:
|
||||
if rootless.IsRootless() && isRootlessCgroupSet(p.config.CgroupParent) {
|
||||
p.state.CgroupPath = filepath.Join(p.config.CgroupParent, p.ID())
|
||||
|
||||
logrus.Debugf("setting pod cgroup to %s", p.state.CgroupPath)
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("unknown cgroups manager %s specified: %w", p.runtime.config.Engine.CgroupManager, define.ErrInvalidArg)
|
||||
}
|
||||
if err := p.platformRefresh(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Save changes
|
||||
|
5
libpod/pod_internal_freebsd.go
Normal file
5
libpod/pod_internal_freebsd.go
Normal file
@ -0,0 +1,5 @@
|
||||
package libpod
|
||||
|
||||
func (p *Pod) platformRefresh() error {
|
||||
return nil
|
||||
}
|
34
libpod/pod_internal_linux.go
Normal file
34
libpod/pod_internal_linux.go
Normal file
@ -0,0 +1,34 @@
|
||||
package libpod
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/containers/common/pkg/config"
|
||||
"github.com/containers/podman/v4/libpod/define"
|
||||
"github.com/containers/podman/v4/pkg/rootless"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func (p *Pod) platformRefresh() error {
|
||||
// We need to recreate the pod's cgroup
|
||||
if p.config.UsePodCgroup {
|
||||
switch p.runtime.config.Engine.CgroupManager {
|
||||
case config.SystemdCgroupsManager:
|
||||
cgroupPath, err := systemdSliceFromPath(p.config.CgroupParent, fmt.Sprintf("libpod_pod_%s", p.ID()), p.ResourceLim())
|
||||
if err != nil {
|
||||
logrus.Errorf("Creating Cgroup for pod %s: %v", p.ID(), err)
|
||||
}
|
||||
p.state.CgroupPath = cgroupPath
|
||||
case config.CgroupfsCgroupsManager:
|
||||
if rootless.IsRootless() && isRootlessCgroupSet(p.config.CgroupParent) {
|
||||
p.state.CgroupPath = filepath.Join(p.config.CgroupParent, p.ID())
|
||||
|
||||
logrus.Debugf("setting pod cgroup to %s", p.state.CgroupPath)
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("unknown cgroups manager %s specified: %w", p.runtime.config.Engine.CgroupManager, define.ErrInvalidArg)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
346
libpod/runtime_pod_common.go
Normal file
346
libpod/runtime_pod_common.go
Normal file
@ -0,0 +1,346 @@
|
||||
//go:build linux || freebsd
|
||||
// +build linux freebsd
|
||||
|
||||
package libpod
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/containers/common/pkg/cgroups"
|
||||
"github.com/containers/common/pkg/config"
|
||||
"github.com/containers/podman/v4/libpod/define"
|
||||
"github.com/containers/podman/v4/libpod/events"
|
||||
"github.com/containers/podman/v4/pkg/specgen"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// NewPod makes a new, empty pod
|
||||
func (r *Runtime) NewPod(ctx context.Context, p specgen.PodSpecGenerator, options ...PodCreateOption) (_ *Pod, deferredErr error) {
|
||||
if !r.valid {
|
||||
return nil, define.ErrRuntimeStopped
|
||||
}
|
||||
|
||||
pod := newPod(r)
|
||||
|
||||
// Set default namespace to runtime's namespace
|
||||
// Do so before options run so they can override it
|
||||
if r.config.Engine.Namespace != "" {
|
||||
pod.config.Namespace = r.config.Engine.Namespace
|
||||
}
|
||||
|
||||
for _, option := range options {
|
||||
if err := option(pod); err != nil {
|
||||
return nil, fmt.Errorf("running pod create option: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Allocate a lock for the pod
|
||||
lock, err := r.lockManager.AllocateLock()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("allocating lock for new pod: %w", err)
|
||||
}
|
||||
pod.lock = lock
|
||||
pod.config.LockID = pod.lock.ID()
|
||||
|
||||
defer func() {
|
||||
if deferredErr != nil {
|
||||
if err := pod.lock.Free(); err != nil {
|
||||
logrus.Errorf("Freeing pod lock after failed creation: %v", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
pod.valid = true
|
||||
|
||||
if err := r.platformMakePod(pod, p); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !pod.HasInfraContainer() && pod.SharesNamespaces() {
|
||||
return nil, errors.New("Pods must have an infra container to share namespaces")
|
||||
}
|
||||
if pod.HasInfraContainer() && !pod.SharesNamespaces() {
|
||||
logrus.Infof("Pod has an infra container, but shares no namespaces")
|
||||
}
|
||||
|
||||
// Unless the user has specified a name, use a randomly generated one.
|
||||
// Note that name conflicts may occur (see #11735), so we need to loop.
|
||||
generateName := pod.config.Name == ""
|
||||
var addPodErr error
|
||||
for {
|
||||
if generateName {
|
||||
name, err := r.generateName()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pod.config.Name = name
|
||||
}
|
||||
|
||||
if p.InfraContainerSpec != nil && p.InfraContainerSpec.Hostname == "" {
|
||||
p.InfraContainerSpec.Hostname = pod.config.Name
|
||||
}
|
||||
if addPodErr = r.state.AddPod(pod); addPodErr == nil {
|
||||
return pod, nil
|
||||
}
|
||||
if !generateName || (!errors.Is(addPodErr, define.ErrPodExists) && !errors.Is(addPodErr, define.ErrCtrExists)) {
|
||||
break
|
||||
}
|
||||
}
|
||||
if addPodErr != nil {
|
||||
return nil, fmt.Errorf("adding pod to state: %w", addPodErr)
|
||||
}
|
||||
|
||||
return pod, nil
|
||||
}
|
||||
|
||||
// AddInfra adds the created infra container to the pod state
|
||||
func (r *Runtime) AddInfra(ctx context.Context, pod *Pod, infraCtr *Container) (*Pod, error) {
|
||||
if !r.valid {
|
||||
return nil, define.ErrRuntimeStopped
|
||||
}
|
||||
pod.state.InfraContainerID = infraCtr.ID()
|
||||
if err := pod.save(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pod.newPodEvent(events.Create)
|
||||
return pod, nil
|
||||
}
|
||||
|
||||
// SavePod is a helper function to save the pod state from outside of libpod
|
||||
func (r *Runtime) SavePod(pod *Pod) error {
|
||||
if !r.valid {
|
||||
return define.ErrRuntimeStopped
|
||||
}
|
||||
if err := pod.save(); err != nil {
|
||||
return err
|
||||
}
|
||||
pod.newPodEvent(events.Create)
|
||||
return nil
|
||||
}
|
||||
|
||||
// DO NOT USE THIS FUNCTION DIRECTLY. Use removePod(), below. It will call
|
||||
// removeMalformedPod() if necessary.
|
||||
func (r *Runtime) removeMalformedPod(ctx context.Context, p *Pod, ctrs []*Container, force bool, timeout *uint, ctrNamedVolumes map[string]*ContainerNamedVolume) error {
|
||||
var removalErr error
|
||||
for _, ctr := range ctrs {
|
||||
err := func() error {
|
||||
ctrLock := ctr.lock
|
||||
ctrLock.Lock()
|
||||
defer func() {
|
||||
ctrLock.Unlock()
|
||||
}()
|
||||
|
||||
if err := ctr.syncContainer(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, vol := range ctr.config.NamedVolumes {
|
||||
ctrNamedVolumes[vol.Name] = vol
|
||||
}
|
||||
|
||||
return r.removeContainer(ctx, ctr, force, false, true, true, timeout)
|
||||
}()
|
||||
|
||||
if removalErr == nil {
|
||||
removalErr = err
|
||||
} else {
|
||||
logrus.Errorf("Removing container %s from pod %s: %v", ctr.ID(), p.ID(), err)
|
||||
}
|
||||
}
|
||||
if removalErr != nil {
|
||||
return removalErr
|
||||
}
|
||||
|
||||
// Clear infra container ID before we remove the infra container.
|
||||
// There is a potential issue if we don't do that, and removal is
|
||||
// interrupted between RemoveAllContainers() below and the pod's removal
|
||||
// later - we end up with a reference to a nonexistent infra container.
|
||||
p.state.InfraContainerID = ""
|
||||
if err := p.save(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove all containers in the pod from the state.
|
||||
if err := r.state.RemovePodContainers(p); err != nil {
|
||||
// If this fails, there isn't much more we can do.
|
||||
// The containers in the pod are unusable, but they still exist,
|
||||
// so pod removal will fail.
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool, timeout *uint) error {
|
||||
if err := p.updatePod(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctrs, err := r.state.PodContainers(p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
numCtrs := len(ctrs)
|
||||
|
||||
// If the only running container in the pod is the pause container, remove the pod and container unconditionally.
|
||||
pauseCtrID := p.state.InfraContainerID
|
||||
if numCtrs == 1 && ctrs[0].ID() == pauseCtrID {
|
||||
removeCtrs = true
|
||||
force = true
|
||||
}
|
||||
if !removeCtrs && numCtrs > 0 {
|
||||
return fmt.Errorf("pod %s contains containers and cannot be removed: %w", p.ID(), define.ErrCtrExists)
|
||||
}
|
||||
|
||||
var removalErr error
|
||||
ctrNamedVolumes := make(map[string]*ContainerNamedVolume)
|
||||
|
||||
// Build a graph of all containers in the pod.
|
||||
graph, err := BuildContainerGraph(ctrs)
|
||||
if err != nil {
|
||||
// We have to allow the pod to be removed.
|
||||
// But let's only do it if force is set.
|
||||
if !force {
|
||||
return fmt.Errorf("cannot create container graph for pod %s: %w", p.ID(), err)
|
||||
}
|
||||
|
||||
removalErr = fmt.Errorf("creating container graph for pod %s failed, fell back to loop removal: %w", p.ID(), err)
|
||||
|
||||
if err := r.removeMalformedPod(ctx, p, ctrs, force, timeout, ctrNamedVolumes); err != nil {
|
||||
logrus.Errorf("Error creating container graph for pod %s: %v. Falling back to loop removal.", p.ID(), err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
ctrErrors := make(map[string]error)
|
||||
ctrsVisited := make(map[string]bool)
|
||||
|
||||
for _, node := range graph.notDependedOnNodes {
|
||||
removeNode(ctx, node, p, force, timeout, false, ctrErrors, ctrsVisited, ctrNamedVolumes)
|
||||
}
|
||||
|
||||
// This is gross, but I don't want to change the signature on
|
||||
// removePod - especially since any change here eventually has
|
||||
// to map down to one error unless we want to make a breaking
|
||||
// API change.
|
||||
if len(ctrErrors) > 0 {
|
||||
var allErrs error
|
||||
for id, err := range ctrErrors {
|
||||
allErrs = multierror.Append(allErrs, fmt.Errorf("removing container %s from pod %s: %w", id, p.ID(), err))
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
}
|
||||
|
||||
for volName := range ctrNamedVolumes {
|
||||
volume, err := r.state.Volume(volName)
|
||||
if err != nil && !errors.Is(err, define.ErrNoSuchVolume) {
|
||||
logrus.Errorf("Retrieving volume %s: %v", volName, err)
|
||||
continue
|
||||
}
|
||||
if !volume.Anonymous() {
|
||||
continue
|
||||
}
|
||||
if err := r.removeVolume(ctx, volume, false, timeout, false); err != nil {
|
||||
if errors.Is(err, define.ErrNoSuchVolume) || errors.Is(err, define.ErrVolumeRemoved) {
|
||||
continue
|
||||
}
|
||||
logrus.Errorf("Removing volume %s: %v", volName, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Remove pod cgroup, if present
|
||||
if p.state.CgroupPath != "" {
|
||||
logrus.Debugf("Removing pod cgroup %s", p.state.CgroupPath)
|
||||
|
||||
switch p.runtime.config.Engine.CgroupManager {
|
||||
case config.SystemdCgroupsManager:
|
||||
if err := deleteSystemdCgroup(p.state.CgroupPath, p.ResourceLim()); err != nil {
|
||||
if removalErr == nil {
|
||||
removalErr = fmt.Errorf("removing pod %s cgroup: %w", p.ID(), err)
|
||||
} else {
|
||||
logrus.Errorf("Deleting pod %s cgroup %s: %v", p.ID(), p.state.CgroupPath, err)
|
||||
}
|
||||
}
|
||||
case config.CgroupfsCgroupsManager:
|
||||
// Delete the cgroupfs cgroup
|
||||
// Make sure the conmon cgroup is deleted first
|
||||
// Since the pod is almost gone, don't bother failing
|
||||
// hard - instead, just log errors.
|
||||
conmonCgroupPath := filepath.Join(p.state.CgroupPath, "conmon")
|
||||
conmonCgroup, err := cgroups.Load(conmonCgroupPath)
|
||||
if err != nil && err != cgroups.ErrCgroupDeleted && err != cgroups.ErrCgroupV1Rootless {
|
||||
if removalErr == nil {
|
||||
removalErr = fmt.Errorf("retrieving pod %s conmon cgroup: %w", p.ID(), err)
|
||||
} else {
|
||||
logrus.Debugf("Error retrieving pod %s conmon cgroup %s: %v", p.ID(), conmonCgroupPath, err)
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
if err = conmonCgroup.Delete(); err != nil {
|
||||
if removalErr == nil {
|
||||
removalErr = fmt.Errorf("removing pod %s conmon cgroup: %w", p.ID(), err)
|
||||
} else {
|
||||
logrus.Errorf("Deleting pod %s conmon cgroup %s: %v", p.ID(), conmonCgroupPath, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
cgroup, err := cgroups.Load(p.state.CgroupPath)
|
||||
if err != nil && err != cgroups.ErrCgroupDeleted && err != cgroups.ErrCgroupV1Rootless {
|
||||
if removalErr == nil {
|
||||
removalErr = fmt.Errorf("retrieving pod %s cgroup: %w", p.ID(), err)
|
||||
} else {
|
||||
logrus.Errorf("Retrieving pod %s cgroup %s: %v", p.ID(), p.state.CgroupPath, err)
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
if err := cgroup.Delete(); err != nil {
|
||||
if removalErr == nil {
|
||||
removalErr = fmt.Errorf("removing pod %s cgroup: %w", p.ID(), err)
|
||||
} else {
|
||||
logrus.Errorf("Deleting pod %s cgroup %s: %v", p.ID(), p.state.CgroupPath, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
// This should be caught much earlier, but let's still
|
||||
// keep going so we make sure to evict the pod before
|
||||
// ending up with an inconsistent state.
|
||||
if removalErr == nil {
|
||||
removalErr = fmt.Errorf("unrecognized cgroup manager %s when removing pod %s cgroups: %w", p.runtime.config.Engine.CgroupManager, p.ID(), define.ErrInternal)
|
||||
} else {
|
||||
logrus.Errorf("Unknown cgroups manager %s specified - cannot remove pod %s cgroup", p.runtime.config.Engine.CgroupManager, p.ID())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := p.maybeRemoveServiceContainer(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove pod from state
|
||||
if err := r.state.RemovePod(p); err != nil {
|
||||
if removalErr != nil {
|
||||
logrus.Errorf("%v", removalErr)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Mark pod invalid
|
||||
p.valid = false
|
||||
p.newPodEvent(events.Remove)
|
||||
|
||||
// Deallocate the pod lock
|
||||
if err := p.lock.Free(); err != nil {
|
||||
if removalErr == nil {
|
||||
removalErr = fmt.Errorf("freeing pod %s lock: %w", p.ID(), err)
|
||||
} else {
|
||||
logrus.Errorf("Freeing pod %s lock: %v", p.ID(), err)
|
||||
}
|
||||
}
|
||||
|
||||
return removalErr
|
||||
}
|
9
libpod/runtime_pod_freebsd.go
Normal file
9
libpod/runtime_pod_freebsd.go
Normal file
@ -0,0 +1,9 @@
|
||||
package libpod
|
||||
|
||||
import (
|
||||
"github.com/containers/podman/v4/pkg/specgen"
|
||||
)
|
||||
|
||||
func (r *Runtime) platformMakePod(pod *Pod, p specgen.PodSpecGenerator) error {
|
||||
return nil
|
||||
}
|
@ -1,11 +1,6 @@
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package libpod
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"path"
|
||||
"path/filepath"
|
||||
@ -14,51 +9,12 @@ import (
|
||||
"github.com/containers/common/pkg/cgroups"
|
||||
"github.com/containers/common/pkg/config"
|
||||
"github.com/containers/podman/v4/libpod/define"
|
||||
"github.com/containers/podman/v4/libpod/events"
|
||||
"github.com/containers/podman/v4/pkg/rootless"
|
||||
"github.com/containers/podman/v4/pkg/specgen"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// NewPod makes a new, empty pod
|
||||
func (r *Runtime) NewPod(ctx context.Context, p specgen.PodSpecGenerator, options ...PodCreateOption) (_ *Pod, deferredErr error) {
|
||||
if !r.valid {
|
||||
return nil, define.ErrRuntimeStopped
|
||||
}
|
||||
|
||||
pod := newPod(r)
|
||||
|
||||
// Set default namespace to runtime's namespace
|
||||
// Do so before options run so they can override it
|
||||
if r.config.Engine.Namespace != "" {
|
||||
pod.config.Namespace = r.config.Engine.Namespace
|
||||
}
|
||||
|
||||
for _, option := range options {
|
||||
if err := option(pod); err != nil {
|
||||
return nil, fmt.Errorf("running pod create option: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Allocate a lock for the pod
|
||||
lock, err := r.lockManager.AllocateLock()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("allocating lock for new pod: %w", err)
|
||||
}
|
||||
pod.lock = lock
|
||||
pod.config.LockID = pod.lock.ID()
|
||||
|
||||
defer func() {
|
||||
if deferredErr != nil {
|
||||
if err := pod.lock.Free(); err != nil {
|
||||
logrus.Errorf("Freeing pod lock after failed creation: %v", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
pod.valid = true
|
||||
|
||||
func (r *Runtime) platformMakePod(pod *Pod, p specgen.PodSpecGenerator) error {
|
||||
// Check Cgroup parent sanity, and set it if it was not set
|
||||
if r.config.Cgroups() != "disabled" {
|
||||
switch r.config.Engine.CgroupManager {
|
||||
@ -69,7 +25,7 @@ func (r *Runtime) NewPod(ctx context.Context, p specgen.PodSpecGenerator, option
|
||||
if pod.config.CgroupParent == "" {
|
||||
pod.config.CgroupParent = CgroupfsDefaultCgroupParent
|
||||
} else if strings.HasSuffix(path.Base(pod.config.CgroupParent), ".slice") {
|
||||
return nil, fmt.Errorf("systemd slice received as cgroup parent when using cgroupfs: %w", define.ErrInvalidArg)
|
||||
return fmt.Errorf("systemd slice received as cgroup parent when using cgroupfs: %w", define.ErrInvalidArg)
|
||||
}
|
||||
// If we are set to use pod cgroups, set the cgroup parent that
|
||||
// all containers in the pod will share
|
||||
@ -81,7 +37,7 @@ func (r *Runtime) NewPod(ctx context.Context, p specgen.PodSpecGenerator, option
|
||||
if !rootless.IsRootless() {
|
||||
res, err := GetLimits(p.ResourceLimits)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
// Need to both create and update the cgroup
|
||||
// rather than create a new path in c/common for pod cgroup creation
|
||||
@ -89,11 +45,11 @@ func (r *Runtime) NewPod(ctx context.Context, p specgen.PodSpecGenerator, option
|
||||
// populate the resource limits on the pod level
|
||||
cgc, err := cgroups.New(pod.state.CgroupPath, &res)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
err = cgc.Update(&res)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -107,14 +63,14 @@ func (r *Runtime) NewPod(ctx context.Context, p specgen.PodSpecGenerator, option
|
||||
pod.config.CgroupParent = SystemdDefaultCgroupParent
|
||||
}
|
||||
} else if len(pod.config.CgroupParent) < 6 || !strings.HasSuffix(path.Base(pod.config.CgroupParent), ".slice") {
|
||||
return nil, fmt.Errorf("did not receive systemd slice as cgroup parent when using systemd to manage cgroups: %w", define.ErrInvalidArg)
|
||||
return fmt.Errorf("did not receive systemd slice as cgroup parent when using systemd to manage cgroups: %w", define.ErrInvalidArg)
|
||||
}
|
||||
// If we are set to use pod cgroups, set the cgroup parent that
|
||||
// all containers in the pod will share
|
||||
if pod.config.UsePodCgroup {
|
||||
cgroupPath, err := systemdSliceFromPath(pod.config.CgroupParent, fmt.Sprintf("libpod_pod_%s", pod.ID()), p.ResourceLimits)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to create pod cgroup for pod %s: %w", pod.ID(), err)
|
||||
return fmt.Errorf("unable to create pod cgroup for pod %s: %w", pod.ID(), err)
|
||||
}
|
||||
pod.state.CgroupPath = cgroupPath
|
||||
if p.InfraContainerSpec != nil {
|
||||
@ -122,7 +78,7 @@ func (r *Runtime) NewPod(ctx context.Context, p specgen.PodSpecGenerator, option
|
||||
}
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported Cgroup manager: %s - cannot validate cgroup parent: %w", r.config.Engine.CgroupManager, define.ErrInvalidArg)
|
||||
return fmt.Errorf("unsupported Cgroup manager: %s - cannot validate cgroup parent: %w", r.config.Engine.CgroupManager, define.ErrInvalidArg)
|
||||
}
|
||||
}
|
||||
|
||||
@ -130,287 +86,5 @@ func (r *Runtime) NewPod(ctx context.Context, p specgen.PodSpecGenerator, option
|
||||
logrus.Debugf("Got pod cgroup as %s", pod.state.CgroupPath)
|
||||
}
|
||||
|
||||
if !pod.HasInfraContainer() && pod.SharesNamespaces() {
|
||||
return nil, errors.New("Pods must have an infra container to share namespaces")
|
||||
}
|
||||
if pod.HasInfraContainer() && !pod.SharesNamespaces() {
|
||||
logrus.Infof("Pod has an infra container, but shares no namespaces")
|
||||
}
|
||||
|
||||
// Unless the user has specified a name, use a randomly generated one.
|
||||
// Note that name conflicts may occur (see #11735), so we need to loop.
|
||||
generateName := pod.config.Name == ""
|
||||
var addPodErr error
|
||||
for {
|
||||
if generateName {
|
||||
name, err := r.generateName()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pod.config.Name = name
|
||||
}
|
||||
|
||||
if p.InfraContainerSpec != nil && p.InfraContainerSpec.Hostname == "" {
|
||||
p.InfraContainerSpec.Hostname = pod.config.Name
|
||||
}
|
||||
if addPodErr = r.state.AddPod(pod); addPodErr == nil {
|
||||
return pod, nil
|
||||
}
|
||||
if !generateName || (!errors.Is(addPodErr, define.ErrPodExists) && !errors.Is(addPodErr, define.ErrCtrExists)) {
|
||||
break
|
||||
}
|
||||
}
|
||||
if addPodErr != nil {
|
||||
return nil, fmt.Errorf("adding pod to state: %w", addPodErr)
|
||||
}
|
||||
|
||||
return pod, nil
|
||||
}
|
||||
|
||||
// AddInfra adds the created infra container to the pod state
|
||||
func (r *Runtime) AddInfra(ctx context.Context, pod *Pod, infraCtr *Container) (*Pod, error) {
|
||||
if !r.valid {
|
||||
return nil, define.ErrRuntimeStopped
|
||||
}
|
||||
pod.state.InfraContainerID = infraCtr.ID()
|
||||
if err := pod.save(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pod.newPodEvent(events.Create)
|
||||
return pod, nil
|
||||
}
|
||||
|
||||
// SavePod is a helper function to save the pod state from outside of libpod
|
||||
func (r *Runtime) SavePod(pod *Pod) error {
|
||||
if !r.valid {
|
||||
return define.ErrRuntimeStopped
|
||||
}
|
||||
if err := pod.save(); err != nil {
|
||||
return err
|
||||
}
|
||||
pod.newPodEvent(events.Create)
|
||||
return nil
|
||||
}
|
||||
|
||||
// DO NOT USE THIS FUNCTION DIRECTLY. Use removePod(), below. It will call
|
||||
// removeMalformedPod() if necessary.
|
||||
func (r *Runtime) removeMalformedPod(ctx context.Context, p *Pod, ctrs []*Container, force bool, timeout *uint, ctrNamedVolumes map[string]*ContainerNamedVolume) error {
|
||||
var removalErr error
|
||||
for _, ctr := range ctrs {
|
||||
err := func() error {
|
||||
ctrLock := ctr.lock
|
||||
ctrLock.Lock()
|
||||
defer func() {
|
||||
ctrLock.Unlock()
|
||||
}()
|
||||
|
||||
if err := ctr.syncContainer(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, vol := range ctr.config.NamedVolumes {
|
||||
ctrNamedVolumes[vol.Name] = vol
|
||||
}
|
||||
|
||||
return r.removeContainer(ctx, ctr, force, false, true, true, timeout)
|
||||
}()
|
||||
|
||||
if removalErr == nil {
|
||||
removalErr = err
|
||||
} else {
|
||||
logrus.Errorf("Removing container %s from pod %s: %v", ctr.ID(), p.ID(), err)
|
||||
}
|
||||
}
|
||||
if removalErr != nil {
|
||||
return removalErr
|
||||
}
|
||||
|
||||
// Clear infra container ID before we remove the infra container.
|
||||
// There is a potential issue if we don't do that, and removal is
|
||||
// interrupted between RemoveAllContainers() below and the pod's removal
|
||||
// later - we end up with a reference to a nonexistent infra container.
|
||||
p.state.InfraContainerID = ""
|
||||
if err := p.save(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove all containers in the pod from the state.
|
||||
if err := r.state.RemovePodContainers(p); err != nil {
|
||||
// If this fails, there isn't much more we can do.
|
||||
// The containers in the pod are unusable, but they still exist,
|
||||
// so pod removal will fail.
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool, timeout *uint) error {
|
||||
if err := p.updatePod(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctrs, err := r.state.PodContainers(p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
numCtrs := len(ctrs)
|
||||
|
||||
// If the only running container in the pod is the pause container, remove the pod and container unconditionally.
|
||||
pauseCtrID := p.state.InfraContainerID
|
||||
if numCtrs == 1 && ctrs[0].ID() == pauseCtrID {
|
||||
removeCtrs = true
|
||||
force = true
|
||||
}
|
||||
if !removeCtrs && numCtrs > 0 {
|
||||
return fmt.Errorf("pod %s contains containers and cannot be removed: %w", p.ID(), define.ErrCtrExists)
|
||||
}
|
||||
|
||||
var removalErr error
|
||||
ctrNamedVolumes := make(map[string]*ContainerNamedVolume)
|
||||
|
||||
// Build a graph of all containers in the pod.
|
||||
graph, err := BuildContainerGraph(ctrs)
|
||||
if err != nil {
|
||||
// We have to allow the pod to be removed.
|
||||
// But let's only do it if force is set.
|
||||
if !force {
|
||||
return fmt.Errorf("cannot create container graph for pod %s: %w", p.ID(), err)
|
||||
}
|
||||
|
||||
removalErr = fmt.Errorf("creating container graph for pod %s failed, fell back to loop removal: %w", p.ID(), err)
|
||||
|
||||
if err := r.removeMalformedPod(ctx, p, ctrs, force, timeout, ctrNamedVolumes); err != nil {
|
||||
logrus.Errorf("Error creating container graph for pod %s: %v. Falling back to loop removal.", p.ID(), err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
ctrErrors := make(map[string]error)
|
||||
ctrsVisited := make(map[string]bool)
|
||||
|
||||
for _, node := range graph.notDependedOnNodes {
|
||||
removeNode(ctx, node, p, force, timeout, false, ctrErrors, ctrsVisited, ctrNamedVolumes)
|
||||
}
|
||||
|
||||
// This is gross, but I don't want to change the signature on
|
||||
// removePod - especially since any change here eventually has
|
||||
// to map down to one error unless we want to make a breaking
|
||||
// API change.
|
||||
if len(ctrErrors) > 0 {
|
||||
var allErrs error
|
||||
for id, err := range ctrErrors {
|
||||
allErrs = multierror.Append(allErrs, fmt.Errorf("removing container %s from pod %s: %w", id, p.ID(), err))
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
}
|
||||
|
||||
for volName := range ctrNamedVolumes {
|
||||
volume, err := r.state.Volume(volName)
|
||||
if err != nil && !errors.Is(err, define.ErrNoSuchVolume) {
|
||||
logrus.Errorf("Retrieving volume %s: %v", volName, err)
|
||||
continue
|
||||
}
|
||||
if !volume.Anonymous() {
|
||||
continue
|
||||
}
|
||||
if err := r.removeVolume(ctx, volume, false, timeout, false); err != nil {
|
||||
if errors.Is(err, define.ErrNoSuchVolume) || errors.Is(err, define.ErrVolumeRemoved) {
|
||||
continue
|
||||
}
|
||||
logrus.Errorf("Removing volume %s: %v", volName, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Remove pod cgroup, if present
|
||||
if p.state.CgroupPath != "" {
|
||||
logrus.Debugf("Removing pod cgroup %s", p.state.CgroupPath)
|
||||
|
||||
switch p.runtime.config.Engine.CgroupManager {
|
||||
case config.SystemdCgroupsManager:
|
||||
if err := deleteSystemdCgroup(p.state.CgroupPath, p.ResourceLim()); err != nil {
|
||||
if removalErr == nil {
|
||||
removalErr = fmt.Errorf("removing pod %s cgroup: %w", p.ID(), err)
|
||||
} else {
|
||||
logrus.Errorf("Deleting pod %s cgroup %s: %v", p.ID(), p.state.CgroupPath, err)
|
||||
}
|
||||
}
|
||||
case config.CgroupfsCgroupsManager:
|
||||
// Delete the cgroupfs cgroup
|
||||
// Make sure the conmon cgroup is deleted first
|
||||
// Since the pod is almost gone, don't bother failing
|
||||
// hard - instead, just log errors.
|
||||
conmonCgroupPath := filepath.Join(p.state.CgroupPath, "conmon")
|
||||
conmonCgroup, err := cgroups.Load(conmonCgroupPath)
|
||||
if err != nil && err != cgroups.ErrCgroupDeleted && err != cgroups.ErrCgroupV1Rootless {
|
||||
if removalErr == nil {
|
||||
removalErr = fmt.Errorf("retrieving pod %s conmon cgroup: %w", p.ID(), err)
|
||||
} else {
|
||||
logrus.Debugf("Error retrieving pod %s conmon cgroup %s: %v", p.ID(), conmonCgroupPath, err)
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
if err = conmonCgroup.Delete(); err != nil {
|
||||
if removalErr == nil {
|
||||
removalErr = fmt.Errorf("removing pod %s conmon cgroup: %w", p.ID(), err)
|
||||
} else {
|
||||
logrus.Errorf("Deleting pod %s conmon cgroup %s: %v", p.ID(), conmonCgroupPath, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
cgroup, err := cgroups.Load(p.state.CgroupPath)
|
||||
if err != nil && err != cgroups.ErrCgroupDeleted && err != cgroups.ErrCgroupV1Rootless {
|
||||
if removalErr == nil {
|
||||
removalErr = fmt.Errorf("retrieving pod %s cgroup: %w", p.ID(), err)
|
||||
} else {
|
||||
logrus.Errorf("Retrieving pod %s cgroup %s: %v", p.ID(), p.state.CgroupPath, err)
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
if err := cgroup.Delete(); err != nil {
|
||||
if removalErr == nil {
|
||||
removalErr = fmt.Errorf("removing pod %s cgroup: %w", p.ID(), err)
|
||||
} else {
|
||||
logrus.Errorf("Deleting pod %s cgroup %s: %v", p.ID(), p.state.CgroupPath, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
// This should be caught much earlier, but let's still
|
||||
// keep going so we make sure to evict the pod before
|
||||
// ending up with an inconsistent state.
|
||||
if removalErr == nil {
|
||||
removalErr = fmt.Errorf("unrecognized cgroup manager %s when removing pod %s cgroups: %w", p.runtime.config.Engine.CgroupManager, p.ID(), define.ErrInternal)
|
||||
} else {
|
||||
logrus.Errorf("Unknown cgroups manager %s specified - cannot remove pod %s cgroup", p.runtime.config.Engine.CgroupManager, p.ID())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := p.maybeRemoveServiceContainer(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove pod from state
|
||||
if err := r.state.RemovePod(p); err != nil {
|
||||
if removalErr != nil {
|
||||
logrus.Errorf("%v", removalErr)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Mark pod invalid
|
||||
p.valid = false
|
||||
p.newPodEvent(events.Remove)
|
||||
|
||||
// Deallocate the pod lock
|
||||
if err := p.lock.Free(); err != nil {
|
||||
if removalErr == nil {
|
||||
removalErr = fmt.Errorf("freeing pod %s lock: %w", p.ID(), err)
|
||||
} else {
|
||||
logrus.Errorf("Freeing pod %s lock: %v", p.ID(), err)
|
||||
}
|
||||
}
|
||||
|
||||
return removalErr
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
//go:build !linux
|
||||
// +build !linux
|
||||
//go:build !linux && !freebsd
|
||||
// +build !linux,!freebsd
|
||||
|
||||
package libpod
|
||||
|
||||
|
@ -19,6 +19,11 @@ func systemdSliceFromPath(parent, name string, resources *spec.LinuxResources) (
|
||||
return "", errors.New("not implemented systemdSliceFromPath")
|
||||
}
|
||||
|
||||
// deleteSystemdCgroup deletes the systemd cgroup at the given location
|
||||
func deleteSystemdCgroup(path string, resources *spec.LinuxResources) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// No equivalent on FreeBSD?
|
||||
func LabelVolumePath(path string) error {
|
||||
return nil
|
||||
|
@ -49,10 +49,10 @@ func MakePod(p *entities.PodSpec, rt *libpod.Runtime) (*libpod.Pod, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if p.PodSpecGen.InfraContainerSpec.ResourceLimits.BlockIO != nil {
|
||||
if p.PodSpecGen.InfraContainerSpec.ResourceLimits != nil &&
|
||||
p.PodSpecGen.InfraContainerSpec.ResourceLimits.BlockIO != nil {
|
||||
p.PodSpecGen.ResourceLimits.BlockIO = p.PodSpecGen.InfraContainerSpec.ResourceLimits.BlockIO
|
||||
}
|
||||
|
||||
err = specgen.WeightDevices(p.PodSpecGen.InfraContainerSpec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
Reference in New Issue
Block a user