podman cgroup enhancement

currently, setting any sort of resource limit in a pod does nothing. With the newly refactored creation process in c/common, podman ca now set resources at a pod level
meaning that resource related flags can now be exposed to podman pod create.

cgroupfs and systemd are both supported with varying completion. cgroupfs is a much simpler process and one that is virtually complete for all resource types, the flags now just need to be added. systemd on the other hand
has to be handeled via the dbus api meaning that the limits need to be passed as recognized properties to systemd. The properties added so far are the ones that podman pod create supports as well as `cpuset-mems` as this will
be the next flag I work on.

Signed-off-by: Charlie Doern <cdoern@redhat.com>
This commit is contained in:
cdoern
2022-06-13 15:35:16 -04:00
committed by Charlie Doern
parent 95707a08bf
commit 2792e598c7
70 changed files with 5660 additions and 307 deletions

View File

@ -23,6 +23,9 @@ import (
"text/template"
"time"
runcconfig "github.com/opencontainers/runc/libcontainer/configs"
"github.com/opencontainers/runc/libcontainer/devices"
"github.com/containers/common/pkg/cgroups"
"github.com/containers/common/pkg/config"
conmonConfig "github.com/containers/conmon/runner/config"
@ -1451,9 +1454,14 @@ func (r *ConmonOCIRuntime) moveConmonToCgroupAndSignal(ctr *Container, cmd *exec
// TODO: This should be a switch - we are not guaranteed that
// there are only 2 valid cgroup managers
cgroupParent := ctr.CgroupParent()
cgroupPath := filepath.Join(ctr.config.CgroupParent, "conmon")
Resource := ctr.Spec().Linux.Resources
cgroupResources, err := GetLimits(Resource)
if err != nil {
logrus.StandardLogger().Log(logLevel, "Could not get ctr resources")
}
if ctr.CgroupManager() == config.SystemdCgroupsManager {
unitName := createUnitName("libpod-conmon", ctr.ID())
realCgroupParent := cgroupParent
splitParent := strings.Split(cgroupParent, "/")
if strings.HasSuffix(cgroupParent, ".slice") && len(splitParent) > 1 {
@ -1465,8 +1473,7 @@ func (r *ConmonOCIRuntime) moveConmonToCgroupAndSignal(ctr *Container, cmd *exec
logrus.StandardLogger().Logf(logLevel, "Failed to add conmon to systemd sandbox cgroup: %v", err)
}
} else {
cgroupPath := filepath.Join(ctr.config.CgroupParent, "conmon")
control, err := cgroups.New(cgroupPath, &spec.LinuxResources{})
control, err := cgroups.New(cgroupPath, &cgroupResources)
if err != nil {
logrus.StandardLogger().Logf(logLevel, "Failed to add conmon to cgroupfs sandbox cgroup: %v", err)
} else if err := control.AddPid(cmd.Process.Pid); err != nil {
@ -1748,3 +1755,191 @@ func httpAttachNonTerminalCopy(container *net.UnixConn, http *bufio.ReadWriter,
}
}
}
// GetLimits converts spec resource limits to cgroup consumable limits
func GetLimits(resource *spec.LinuxResources) (runcconfig.Resources, error) {
if resource == nil {
resource = &spec.LinuxResources{}
}
final := &runcconfig.Resources{}
devs := []*devices.Rule{}
// Devices
for _, entry := range resource.Devices {
if entry.Major == nil || entry.Minor == nil {
continue
}
runeType := 'a'
switch entry.Type {
case "b":
runeType = 'b'
case "c":
runeType = 'c'
}
devs = append(devs, &devices.Rule{
Type: devices.Type(runeType),
Major: *entry.Major,
Minor: *entry.Minor,
Permissions: devices.Permissions(entry.Access),
Allow: entry.Allow,
})
}
final.Devices = devs
// HugepageLimits
pageLimits := []*runcconfig.HugepageLimit{}
for _, entry := range resource.HugepageLimits {
pageLimits = append(pageLimits, &runcconfig.HugepageLimit{
Pagesize: entry.Pagesize,
Limit: entry.Limit,
})
}
final.HugetlbLimit = pageLimits
// Networking
netPriorities := []*runcconfig.IfPrioMap{}
if resource.Network != nil {
for _, entry := range resource.Network.Priorities {
netPriorities = append(netPriorities, &runcconfig.IfPrioMap{
Interface: entry.Name,
Priority: int64(entry.Priority),
})
}
}
final.NetPrioIfpriomap = netPriorities
rdma := make(map[string]runcconfig.LinuxRdma)
for name, entry := range resource.Rdma {
rdma[name] = runcconfig.LinuxRdma{HcaHandles: entry.HcaHandles, HcaObjects: entry.HcaObjects}
}
final.Rdma = rdma
// Memory
if resource.Memory != nil {
if resource.Memory.Limit != nil {
final.Memory = *resource.Memory.Limit
}
if resource.Memory.Reservation != nil {
final.MemoryReservation = *resource.Memory.Reservation
}
if resource.Memory.Swap != nil {
final.MemorySwap = *resource.Memory.Swap
}
if resource.Memory.Swappiness != nil {
final.MemorySwappiness = resource.Memory.Swappiness
}
}
// CPU
if resource.CPU != nil {
if resource.CPU.Period != nil {
final.CpuPeriod = *resource.CPU.Period
}
if resource.CPU.Quota != nil {
final.CpuQuota = *resource.CPU.Quota
}
if resource.CPU.RealtimePeriod != nil {
final.CpuRtPeriod = *resource.CPU.RealtimePeriod
}
if resource.CPU.RealtimeRuntime != nil {
final.CpuRtRuntime = *resource.CPU.RealtimeRuntime
}
if resource.CPU.Shares != nil {
final.CpuShares = *resource.CPU.Shares
}
final.CpusetCpus = resource.CPU.Cpus
final.CpusetMems = resource.CPU.Mems
}
// BlkIO
if resource.BlockIO != nil {
if len(resource.BlockIO.ThrottleReadBpsDevice) > 0 {
for _, entry := range resource.BlockIO.ThrottleReadBpsDevice {
throttle := &runcconfig.ThrottleDevice{}
dev := &runcconfig.BlockIODevice{
Major: entry.Major,
Minor: entry.Minor,
}
throttle.BlockIODevice = *dev
throttle.Rate = entry.Rate
final.BlkioThrottleReadBpsDevice = append(final.BlkioThrottleReadBpsDevice, throttle)
}
}
if len(resource.BlockIO.ThrottleWriteBpsDevice) > 0 {
for _, entry := range resource.BlockIO.ThrottleWriteBpsDevice {
throttle := &runcconfig.ThrottleDevice{}
dev := &runcconfig.BlockIODevice{
Major: entry.Major,
Minor: entry.Minor,
}
throttle.BlockIODevice = *dev
throttle.Rate = entry.Rate
final.BlkioThrottleWriteBpsDevice = append(final.BlkioThrottleWriteBpsDevice, throttle)
}
}
if len(resource.BlockIO.ThrottleReadIOPSDevice) > 0 {
for _, entry := range resource.BlockIO.ThrottleReadIOPSDevice {
throttle := &runcconfig.ThrottleDevice{}
dev := &runcconfig.BlockIODevice{
Major: entry.Major,
Minor: entry.Minor,
}
throttle.BlockIODevice = *dev
throttle.Rate = entry.Rate
final.BlkioThrottleReadIOPSDevice = append(final.BlkioThrottleReadIOPSDevice, throttle)
}
}
if len(resource.BlockIO.ThrottleWriteIOPSDevice) > 0 {
for _, entry := range resource.BlockIO.ThrottleWriteIOPSDevice {
throttle := &runcconfig.ThrottleDevice{}
dev := &runcconfig.BlockIODevice{
Major: entry.Major,
Minor: entry.Minor,
}
throttle.BlockIODevice = *dev
throttle.Rate = entry.Rate
final.BlkioThrottleWriteIOPSDevice = append(final.BlkioThrottleWriteIOPSDevice, throttle)
}
}
if resource.BlockIO.LeafWeight != nil {
final.BlkioLeafWeight = *resource.BlockIO.LeafWeight
}
if resource.BlockIO.Weight != nil {
final.BlkioWeight = *resource.BlockIO.Weight
}
if len(resource.BlockIO.WeightDevice) > 0 {
for _, entry := range resource.BlockIO.WeightDevice {
weight := &runcconfig.WeightDevice{}
dev := &runcconfig.BlockIODevice{
Major: entry.Major,
Minor: entry.Minor,
}
if entry.Weight != nil {
weight.Weight = *entry.Weight
}
if entry.LeafWeight != nil {
weight.LeafWeight = *entry.LeafWeight
}
weight.BlockIODevice = *dev
final.BlkioWeightDevice = append(final.BlkioWeightDevice, weight)
}
}
}
// Pids
if resource.Pids != nil {
final.PidsLimit = resource.Pids.Limit
}
// Networking
if resource.Network != nil {
if resource.Network.ClassID != nil {
final.NetClsClassid = *resource.Network.ClassID
}
}
// Unified state
final.Unified = resource.Unified
return *final, nil
}