Merge pull request #16123 from alexlarsson/less-json-dup

Avoid unnecessary calls to Container.Config() and Container.Spec()
This commit is contained in:
Daniel J Walsh
2022-10-12 10:45:47 -04:00
committed by GitHub
10 changed files with 41 additions and 38 deletions

View File

@ -688,6 +688,14 @@ func (c *Container) Terminal() bool {
return false return false
} }
// LinuxResources return the containers Linux Resources (if any)
func (c *Container) LinuxResources() *spec.LinuxResources {
if c.config.Spec != nil && c.config.Spec.Linux != nil {
return c.config.Spec.Linux.Resources
}
return nil
}
// State Accessors // State Accessors
// Require locking // Require locking

View File

@ -700,10 +700,10 @@ func containerToV1Container(ctx context.Context, c *Container) (v1.Container, []
kubeContainer.StdinOnce = false kubeContainer.StdinOnce = false
kubeContainer.TTY = c.Terminal() kubeContainer.TTY = c.Terminal()
if c.config.Spec.Linux != nil && resources := c.LinuxResources()
c.config.Spec.Linux.Resources != nil { if resources != nil {
if c.config.Spec.Linux.Resources.Memory != nil && if resources.Memory != nil &&
c.config.Spec.Linux.Resources.Memory.Limit != nil { resources.Memory.Limit != nil {
if kubeContainer.Resources.Limits == nil { if kubeContainer.Resources.Limits == nil {
kubeContainer.Resources.Limits = v1.ResourceList{} kubeContainer.Resources.Limits = v1.ResourceList{}
} }
@ -713,11 +713,11 @@ func containerToV1Container(ctx context.Context, c *Container) (v1.Container, []
kubeContainer.Resources.Limits[v1.ResourceMemory] = *qty kubeContainer.Resources.Limits[v1.ResourceMemory] = *qty
} }
if c.config.Spec.Linux.Resources.CPU != nil && if resources.CPU != nil &&
c.config.Spec.Linux.Resources.CPU.Quota != nil && resources.CPU.Quota != nil &&
c.config.Spec.Linux.Resources.CPU.Period != nil { resources.CPU.Period != nil {
quota := *c.config.Spec.Linux.Resources.CPU.Quota quota := *resources.CPU.Quota
period := *c.config.Spec.Linux.Resources.CPU.Period period := *resources.CPU.Period
if quota > 0 && period > 0 { if quota > 0 && period > 0 {
cpuLimitMilli := int64(1000 * util.PeriodAndQuotaToCores(period, quota)) cpuLimitMilli := int64(1000 * util.PeriodAndQuotaToCores(period, quota))

View File

@ -133,8 +133,7 @@ func (r *ConmonOCIRuntime) moveConmonToCgroupAndSignal(ctr *Container, cmd *exec
// there are only 2 valid cgroup managers // there are only 2 valid cgroup managers
cgroupParent := ctr.CgroupParent() cgroupParent := ctr.CgroupParent()
cgroupPath := filepath.Join(ctr.config.CgroupParent, "conmon") cgroupPath := filepath.Join(ctr.config.CgroupParent, "conmon")
Resource := ctr.Spec().Linux.Resources cgroupResources, err := GetLimits(ctr.LinuxResources())
cgroupResources, err := GetLimits(Resource)
if err != nil { if err != nil {
logrus.StandardLogger().Log(logLevel, "Could not get ctr resources") logrus.StandardLogger().Log(logLevel, "Could not get ctr resources")
} }

View File

@ -100,9 +100,9 @@ func (c *Container) getPlatformContainerStats(stats *define.ContainerStats, prev
func (c *Container) getMemLimit() uint64 { func (c *Container) getMemLimit() uint64 {
memLimit := uint64(math.MaxUint64) memLimit := uint64(math.MaxUint64)
if c.config.Spec.Linux != nil && c.config.Spec.Linux.Resources != nil && resources := c.LinuxResources()
c.config.Spec.Linux.Resources.Memory != nil && c.config.Spec.Linux.Resources.Memory.Limit != nil { if resources != nil && resources.Memory != nil && resources.Memory.Limit != nil {
memLimit = uint64(*c.config.Spec.Linux.Resources.Memory.Limit) memLimit = uint64(*resources.Memory.Limit)
} }
mi, err := system.ReadMemInfo() mi, err := system.ReadMemInfo()

View File

@ -86,9 +86,9 @@ func (c *Container) getPlatformContainerStats(stats *define.ContainerStats, prev
func (c *Container) getMemLimit() uint64 { func (c *Container) getMemLimit() uint64 {
memLimit := uint64(math.MaxUint64) memLimit := uint64(math.MaxUint64)
if c.config.Spec.Linux != nil && c.config.Spec.Linux.Resources != nil && resources := c.LinuxResources()
c.config.Spec.Linux.Resources.Memory != nil && c.config.Spec.Linux.Resources.Memory.Limit != nil { if resources != nil && resources.Memory != nil && resources.Memory.Limit != nil {
memLimit = uint64(*c.config.Spec.Linux.Resources.Memory.Limit) memLimit = uint64(*resources.Memory.Limit)
} }
si := &syscall.Sysinfo_t{} si := &syscall.Sysinfo_t{}

View File

@ -134,10 +134,10 @@ streamLabel: // A label to flatten the scope
InstanceID: "", InstanceID: "",
} }
cfg := ctnr.Config() resources := ctnr.LinuxResources()
memoryLimit := cgroupStat.MemoryStats.Usage.Limit memoryLimit := cgroupStat.MemoryStats.Usage.Limit
if cfg.Spec.Linux != nil && cfg.Spec.Linux.Resources != nil && cfg.Spec.Linux.Resources.Memory != nil && *cfg.Spec.Linux.Resources.Memory.Limit > 0 { if resources != nil && resources.Memory != nil && *resources.Memory.Limit > 0 {
memoryLimit = uint64(*cfg.Spec.Linux.Resources.Memory.Limit) memoryLimit = uint64(*resources.Memory.Limit)
} }
memInfo, err := system.ReadMemInfo() memInfo, err := system.ReadMemInfo()

View File

@ -84,19 +84,19 @@ func GenerateContainerFilterFuncs(filter string, filterValues []string, r *libpo
// - ancestor=(<image-name>[:tag]|<image-id>| ⟨image@digest⟩) - containers created from an image or a descendant. // - ancestor=(<image-name>[:tag]|<image-id>| ⟨image@digest⟩) - containers created from an image or a descendant.
return func(c *libpod.Container) bool { return func(c *libpod.Container) bool {
for _, filterValue := range filterValues { for _, filterValue := range filterValues {
containerConfig := c.Config() rootfsImageID, rootfsImageName := c.Image()
var imageTag string var imageTag string
var imageNameWithoutTag string var imageNameWithoutTag string
// Compare with ImageID, ImageName // Compare with ImageID, ImageName
// Will match ImageName if running image has tag latest for other tags exact complete filter must be given // Will match ImageName if running image has tag latest for other tags exact complete filter must be given
imageNameSlice := strings.SplitN(containerConfig.RootfsImageName, ":", 2) imageNameSlice := strings.SplitN(rootfsImageName, ":", 2)
if len(imageNameSlice) == 2 { if len(imageNameSlice) == 2 {
imageNameWithoutTag = imageNameSlice[0] imageNameWithoutTag = imageNameSlice[0]
imageTag = imageNameSlice[1] imageTag = imageNameSlice[1]
} }
if (containerConfig.RootfsImageID == filterValue) || if (rootfsImageID == filterValue) ||
(containerConfig.RootfsImageName == filterValue) || (rootfsImageName == filterValue) ||
(imageNameWithoutTag == filterValue && imageTag == "latest") { (imageNameWithoutTag == filterValue && imageTag == "latest") {
return true return true
} }
@ -110,14 +110,12 @@ func GenerateContainerFilterFuncs(filter string, filterValues []string, r *libpo
if err != nil { if err != nil {
return nil, err return nil, err
} }
containerConfig := ctr.Config() if createTime.IsZero() || createTime.After(ctr.CreatedTime()) {
if createTime.IsZero() || createTime.After(containerConfig.CreatedTime) { createTime = ctr.CreatedTime()
createTime = containerConfig.CreatedTime
} }
} }
return func(c *libpod.Container) bool { return func(c *libpod.Container) bool {
cc := c.Config() return createTime.After(c.CreatedTime())
return createTime.After(cc.CreatedTime)
}, nil }, nil
case "since": case "since":
var createTime time.Time var createTime time.Time
@ -126,19 +124,17 @@ func GenerateContainerFilterFuncs(filter string, filterValues []string, r *libpo
if err != nil { if err != nil {
return nil, err return nil, err
} }
containerConfig := ctr.Config() if createTime.IsZero() || createTime.After(ctr.CreatedTime()) {
if createTime.IsZero() || createTime.After(containerConfig.CreatedTime) { createTime = ctr.CreatedTime()
createTime = containerConfig.CreatedTime
} }
} }
return func(c *libpod.Container) bool { return func(c *libpod.Container) bool {
cc := c.Config() return createTime.Before(c.CreatedTime())
return createTime.Before(cc.CreatedTime)
}, nil }, nil
case "volume": case "volume":
//- volume=(<volume-name>|<mount-point-destination>) //- volume=(<volume-name>|<mount-point-destination>)
return func(c *libpod.Container) bool { return func(c *libpod.Container) bool {
containerConfig := c.Config() containerConfig := c.ConfigNoCopy()
var dest string var dest string
for _, filterValue := range filterValues { for _, filterValue := range filterValues {
arr := strings.SplitN(filterValue, ":", 2) arr := strings.SplitN(filterValue, ":", 2)

View File

@ -136,7 +136,7 @@ func (ic *ContainerEngine) SetupRootless(_ context.Context, noMoveProcess bool)
paths := []string{} paths := []string{}
for _, ctr := range ctrs { for _, ctr := range ctrs {
paths = append(paths, ctr.Config().ConmonPidFile) paths = append(paths, ctr.ConfigNoCopy().ConmonPidFile)
} }
if len(paths) > 0 { if len(paths) > 0 {

View File

@ -474,7 +474,7 @@ func ConfigToSpec(rt *libpod.Runtime, specg *specgen.SpecGenerator, contaierID s
} }
} }
specg.OverlayVolumes = overlay specg.OverlayVolumes = overlay
_, mounts := c.SortUserVolumes(c.Spec()) _, mounts := c.SortUserVolumes(c.ConfigNoCopy().Spec)
specg.Mounts = mounts specg.Mounts = mounts
specg.HostDeviceList = conf.DeviceHostSrc specg.HostDeviceList = conf.DeviceHostSrc
specg.Networks = conf.Networks specg.Networks = conf.Networks

View File

@ -288,7 +288,7 @@ func getVolumesFrom(volumesFrom []string, runtime *libpod.Runtime) (map[string]s
// Now we get the container's spec and loop through its volumes // Now we get the container's spec and loop through its volumes
// and append them in if we can find them. // and append them in if we can find them.
spec := ctr.Spec() spec := ctr.ConfigNoCopy().Spec
if spec == nil { if spec == nil {
return nil, nil, fmt.Errorf("retrieving container %s spec for volumes-from", ctr.ID()) return nil, nil, fmt.Errorf("retrieving container %s spec for volumes-from", ctr.ID())
} }