Merge pull request #16123 from alexlarsson/less-json-dup

Avoid unnecessary calls to Container.Config() and Container.Spec()
This commit is contained in:
Daniel J Walsh
2022-10-12 10:45:47 -04:00
committed by GitHub
10 changed files with 41 additions and 38 deletions

View File

@ -688,6 +688,14 @@ func (c *Container) Terminal() bool {
return false
}
// LinuxResources return the containers Linux Resources (if any)
func (c *Container) LinuxResources() *spec.LinuxResources {
if c.config.Spec != nil && c.config.Spec.Linux != nil {
return c.config.Spec.Linux.Resources
}
return nil
}
// State Accessors
// Require locking

View File

@ -700,10 +700,10 @@ func containerToV1Container(ctx context.Context, c *Container) (v1.Container, []
kubeContainer.StdinOnce = false
kubeContainer.TTY = c.Terminal()
if c.config.Spec.Linux != nil &&
c.config.Spec.Linux.Resources != nil {
if c.config.Spec.Linux.Resources.Memory != nil &&
c.config.Spec.Linux.Resources.Memory.Limit != nil {
resources := c.LinuxResources()
if resources != nil {
if resources.Memory != nil &&
resources.Memory.Limit != nil {
if kubeContainer.Resources.Limits == nil {
kubeContainer.Resources.Limits = v1.ResourceList{}
}
@ -713,11 +713,11 @@ func containerToV1Container(ctx context.Context, c *Container) (v1.Container, []
kubeContainer.Resources.Limits[v1.ResourceMemory] = *qty
}
if c.config.Spec.Linux.Resources.CPU != nil &&
c.config.Spec.Linux.Resources.CPU.Quota != nil &&
c.config.Spec.Linux.Resources.CPU.Period != nil {
quota := *c.config.Spec.Linux.Resources.CPU.Quota
period := *c.config.Spec.Linux.Resources.CPU.Period
if resources.CPU != nil &&
resources.CPU.Quota != nil &&
resources.CPU.Period != nil {
quota := *resources.CPU.Quota
period := *resources.CPU.Period
if quota > 0 && period > 0 {
cpuLimitMilli := int64(1000 * util.PeriodAndQuotaToCores(period, quota))

View File

@ -133,8 +133,7 @@ func (r *ConmonOCIRuntime) moveConmonToCgroupAndSignal(ctr *Container, cmd *exec
// there are only 2 valid cgroup managers
cgroupParent := ctr.CgroupParent()
cgroupPath := filepath.Join(ctr.config.CgroupParent, "conmon")
Resource := ctr.Spec().Linux.Resources
cgroupResources, err := GetLimits(Resource)
cgroupResources, err := GetLimits(ctr.LinuxResources())
if err != nil {
logrus.StandardLogger().Log(logLevel, "Could not get ctr resources")
}

View File

@ -100,9 +100,9 @@ func (c *Container) getPlatformContainerStats(stats *define.ContainerStats, prev
func (c *Container) getMemLimit() uint64 {
memLimit := uint64(math.MaxUint64)
if c.config.Spec.Linux != nil && c.config.Spec.Linux.Resources != nil &&
c.config.Spec.Linux.Resources.Memory != nil && c.config.Spec.Linux.Resources.Memory.Limit != nil {
memLimit = uint64(*c.config.Spec.Linux.Resources.Memory.Limit)
resources := c.LinuxResources()
if resources != nil && resources.Memory != nil && resources.Memory.Limit != nil {
memLimit = uint64(*resources.Memory.Limit)
}
mi, err := system.ReadMemInfo()

View File

@ -86,9 +86,9 @@ func (c *Container) getPlatformContainerStats(stats *define.ContainerStats, prev
func (c *Container) getMemLimit() uint64 {
memLimit := uint64(math.MaxUint64)
if c.config.Spec.Linux != nil && c.config.Spec.Linux.Resources != nil &&
c.config.Spec.Linux.Resources.Memory != nil && c.config.Spec.Linux.Resources.Memory.Limit != nil {
memLimit = uint64(*c.config.Spec.Linux.Resources.Memory.Limit)
resources := c.LinuxResources()
if resources != nil && resources.Memory != nil && resources.Memory.Limit != nil {
memLimit = uint64(*resources.Memory.Limit)
}
si := &syscall.Sysinfo_t{}

View File

@ -134,10 +134,10 @@ streamLabel: // A label to flatten the scope
InstanceID: "",
}
cfg := ctnr.Config()
resources := ctnr.LinuxResources()
memoryLimit := cgroupStat.MemoryStats.Usage.Limit
if cfg.Spec.Linux != nil && cfg.Spec.Linux.Resources != nil && cfg.Spec.Linux.Resources.Memory != nil && *cfg.Spec.Linux.Resources.Memory.Limit > 0 {
memoryLimit = uint64(*cfg.Spec.Linux.Resources.Memory.Limit)
if resources != nil && resources.Memory != nil && *resources.Memory.Limit > 0 {
memoryLimit = uint64(*resources.Memory.Limit)
}
memInfo, err := system.ReadMemInfo()

View File

@ -84,19 +84,19 @@ func GenerateContainerFilterFuncs(filter string, filterValues []string, r *libpo
// - ancestor=(<image-name>[:tag]|<image-id>| ⟨image@digest⟩) - containers created from an image or a descendant.
return func(c *libpod.Container) bool {
for _, filterValue := range filterValues {
containerConfig := c.Config()
rootfsImageID, rootfsImageName := c.Image()
var imageTag string
var imageNameWithoutTag string
// Compare with ImageID, ImageName
// Will match ImageName if running image has tag latest for other tags exact complete filter must be given
imageNameSlice := strings.SplitN(containerConfig.RootfsImageName, ":", 2)
imageNameSlice := strings.SplitN(rootfsImageName, ":", 2)
if len(imageNameSlice) == 2 {
imageNameWithoutTag = imageNameSlice[0]
imageTag = imageNameSlice[1]
}
if (containerConfig.RootfsImageID == filterValue) ||
(containerConfig.RootfsImageName == filterValue) ||
if (rootfsImageID == filterValue) ||
(rootfsImageName == filterValue) ||
(imageNameWithoutTag == filterValue && imageTag == "latest") {
return true
}
@ -110,14 +110,12 @@ func GenerateContainerFilterFuncs(filter string, filterValues []string, r *libpo
if err != nil {
return nil, err
}
containerConfig := ctr.Config()
if createTime.IsZero() || createTime.After(containerConfig.CreatedTime) {
createTime = containerConfig.CreatedTime
if createTime.IsZero() || createTime.After(ctr.CreatedTime()) {
createTime = ctr.CreatedTime()
}
}
return func(c *libpod.Container) bool {
cc := c.Config()
return createTime.After(cc.CreatedTime)
return createTime.After(c.CreatedTime())
}, nil
case "since":
var createTime time.Time
@ -126,19 +124,17 @@ func GenerateContainerFilterFuncs(filter string, filterValues []string, r *libpo
if err != nil {
return nil, err
}
containerConfig := ctr.Config()
if createTime.IsZero() || createTime.After(containerConfig.CreatedTime) {
createTime = containerConfig.CreatedTime
if createTime.IsZero() || createTime.After(ctr.CreatedTime()) {
createTime = ctr.CreatedTime()
}
}
return func(c *libpod.Container) bool {
cc := c.Config()
return createTime.Before(cc.CreatedTime)
return createTime.Before(c.CreatedTime())
}, nil
case "volume":
//- volume=(<volume-name>|<mount-point-destination>)
return func(c *libpod.Container) bool {
containerConfig := c.Config()
containerConfig := c.ConfigNoCopy()
var dest string
for _, filterValue := range filterValues {
arr := strings.SplitN(filterValue, ":", 2)

View File

@ -136,7 +136,7 @@ func (ic *ContainerEngine) SetupRootless(_ context.Context, noMoveProcess bool)
paths := []string{}
for _, ctr := range ctrs {
paths = append(paths, ctr.Config().ConmonPidFile)
paths = append(paths, ctr.ConfigNoCopy().ConmonPidFile)
}
if len(paths) > 0 {

View File

@ -474,7 +474,7 @@ func ConfigToSpec(rt *libpod.Runtime, specg *specgen.SpecGenerator, contaierID s
}
}
specg.OverlayVolumes = overlay
_, mounts := c.SortUserVolumes(c.Spec())
_, mounts := c.SortUserVolumes(c.ConfigNoCopy().Spec)
specg.Mounts = mounts
specg.HostDeviceList = conf.DeviceHostSrc
specg.Networks = conf.Networks

View File

@ -288,7 +288,7 @@ func getVolumesFrom(volumesFrom []string, runtime *libpod.Runtime) (map[string]s
// Now we get the container's spec and loop through its volumes
// and append them in if we can find them.
spec := ctr.Spec()
spec := ctr.ConfigNoCopy().Spec
if spec == nil {
return nil, nil, fmt.Errorf("retrieving container %s spec for volumes-from", ctr.ID())
}