Merge pull request #14159 from vrothberg/service-container

play kube: service container
This commit is contained in:
Daniel J Walsh
2022-05-12 13:35:56 -04:00
committed by GitHub
53 changed files with 691 additions and 183 deletions

View File

@ -139,6 +139,15 @@ func init() {
flags.StringVar(&kubeOptions.ContextDir, contextDirFlagName, "", "Path to top level of context directory")
_ = kubeCmd.RegisterFlagCompletionFunc(contextDirFlagName, completion.AutocompleteDefault)
// NOTE: The service-container flag is marked as hidden as it
// is purely designed for running play-kube in systemd units.
// It is not something users should need to know or care about.
//
// Having a flag rather than an env variable is cleaner.
serviceFlagName := "service-container"
flags.BoolVar(&kubeOptions.ServiceContainer, serviceFlagName, false, "Starts a service container before all pods")
_ = flags.MarkHidden("service-container")
flags.StringVar(&kubeOptions.SignaturePolicy, "signature-policy", "", "`Pathname` of signature policy file (not usually used)")
_ = flags.MarkHidden("signature-policy")

2
go.mod
View File

@ -12,7 +12,7 @@ require (
github.com/containernetworking/cni v1.1.0
github.com/containernetworking/plugins v1.1.1
github.com/containers/buildah v1.26.1
github.com/containers/common v0.48.0
github.com/containers/common v0.48.1-0.20220510094751-400832f41771
github.com/containers/conmon v2.0.20+incompatible
github.com/containers/image/v5 v5.21.2-0.20220511203756-fe4fd4ed8be4
github.com/containers/ocicrypt v1.1.4-0.20220428134531-566b808bdf6f

3
go.sum
View File

@ -339,8 +339,9 @@ github.com/containernetworking/plugins v1.1.1 h1:+AGfFigZ5TiQH00vhR8qPeSatj53eNG
github.com/containernetworking/plugins v1.1.1/go.mod h1:Sr5TH/eBsGLXK/h71HeLfX19sZPp3ry5uHSkI4LPxV8=
github.com/containers/buildah v1.26.1 h1:D65Vuo+orsI14WWtJhSX6KrpgBBa7+hveVWevzG8p8E=
github.com/containers/buildah v1.26.1/go.mod h1:CsWSG8OpJd8v3mlLREJzVAOBgC93DjRNALUVHoi8QsY=
github.com/containers/common v0.48.0 h1:997nnXBZ+eNpfSM7L4SxhhZubQrfEyw3jRyNMTSsNlw=
github.com/containers/common v0.48.0/go.mod h1:zPLZCfLXfnd1jI0QRsD4By54fP4k1+ifQs+tulIe3o0=
github.com/containers/common v0.48.1-0.20220510094751-400832f41771 h1:rHd882jzJK1fIXCJWvc1zTX5CIv2aOyzzkqj6mezLLw=
github.com/containers/common v0.48.1-0.20220510094751-400832f41771/go.mod h1:xapcAN0NbthUpjBv2UWZ5uiCGBlYbuj0o1Qg4hCaiL8=
github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg=
github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
github.com/containers/image/v5 v5.21.1/go.mod h1:zl35egpcDQa79IEXIuoUe1bW+D1pdxRxYjNlyb3YiXw=

View File

@ -211,6 +211,14 @@ type ContainerState struct {
// network and an interface names
NetInterfaceDescriptions ContainerNetworkDescriptions `json:"networkDescriptions,omitempty"`
// Service indicates that container is the service container of a
// service. A service consists of one or more pods. The service
// container is started before all pods and is stopped when the last
// pod stops. The service container allows for tracking and managing
// the entire life cycle of service which may be started via
// `podman-play-kube`.
Service Service
// containerPlatformState holds platform-specific container state.
containerPlatformState

View File

@ -382,6 +382,9 @@ type ContainerMiscConfig struct {
// IsInfra is a bool indicating whether this container is an infra container used for
// sharing kernel namespaces in a pod
IsInfra bool `json:"pause"`
// IsService is a bool indicating whether this container is a service container used for
// tracking the life cycle of K8s service.
IsService bool `json:"isService"`
// SdNotifyMode tells libpod what to do with a NOTIFY_SOCKET if passed
SdNotifyMode string `json:"sdnotifyMode,omitempty"`
// Systemd tells libpod to setup the container in systemd mode, a value of nil denotes false

View File

@ -171,6 +171,7 @@ func (c *Container) getContainerInspectData(size bool, driverData *define.Driver
Mounts: inspectMounts,
Dependencies: c.Dependencies(),
IsInfra: c.IsInfra(),
IsService: c.isService(),
}
if c.state.ConfigPath != "" {

View File

@ -1,6 +1,8 @@
package libpod
import (
"fmt"
"github.com/containers/podman/v4/libpod/define"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
@ -27,6 +29,12 @@ func (c *Container) validate() error {
return errors.Wrapf(define.ErrInvalidArg, "must set root filesystem source to either image or rootfs")
}
// A container cannot be marked as an infra and service container at
// the same time.
if c.IsInfra() && c.isService() {
return fmt.Errorf("cannot be infra and service container at the same time: %w", define.ErrInvalidArg)
}
// Cannot make a network namespace if we are joining another container's
// network namespace
if c.config.CreateNetNS && c.config.NetNsCtr != "" {

View File

@ -683,6 +683,7 @@ type InspectContainerData struct {
NetworkSettings *InspectNetworkSettings `json:"NetworkSettings"`
Namespace string `json:"Namespace"`
IsInfra bool `json:"IsInfra"`
IsService bool `json:"IsService"`
Config *InspectContainerConfig `json:"Config"`
HostConfig *InspectContainerHostConfig `json:"HostConfig"`
}

View File

@ -1,6 +1,7 @@
package libpod
import (
"fmt"
"net"
"os"
"path/filepath"
@ -1477,7 +1478,7 @@ func WithCreateCommand(cmd []string) CtrCreateOption {
}
}
// withIsInfra allows us to dfferentiate between infra containers and regular containers
// withIsInfra allows us to dfferentiate between infra containers and other containers
// within the container config
func withIsInfra() CtrCreateOption {
return func(ctr *Container) error {
@ -1491,6 +1492,20 @@ func withIsInfra() CtrCreateOption {
}
}
// WithIsService allows us to dfferentiate between service containers and other container
// within the container config
func WithIsService() CtrCreateOption {
return func(ctr *Container) error {
if ctr.valid {
return define.ErrCtrFinalized
}
ctr.config.IsService = true
return nil
}
}
// WithCreateWorkingDir tells Podman to create the container's working directory
// if it does not exist.
func WithCreateWorkingDir() CtrCreateOption {
@ -2081,6 +2096,27 @@ func WithInfraContainer() PodCreateOption {
}
}
// WithServiceContainer associates the specified service container ID with the pod.
func WithServiceContainer(id string) PodCreateOption {
return func(pod *Pod) error {
if pod.valid {
return define.ErrPodFinalized
}
ctr, err := pod.runtime.LookupContainer(id)
if err != nil {
return fmt.Errorf("looking up service container: %w", err)
}
if err := ctr.addServicePodLocked(pod.ID()); err != nil {
return fmt.Errorf("associating service container %s with pod %s: %w", id, pod.ID(), err)
}
pod.config.ServiceContainerID = id
return nil
}
}
// WithVolatile sets the volatile flag for the container storage.
// The option can potentially cause data loss when used on a container that must survive a machine reboot.
func WithVolatile() CtrCreateOption {

View File

@ -64,6 +64,13 @@ type PodConfig struct {
HasInfra bool `json:"hasInfra,omitempty"`
// ServiceContainerID is the main container of a service. A service
// consists of one or more pods. The service container is started
// before all pods and is stopped when the last pod stops.
// The service container allows for tracking and managing the entire
// life cycle of service which may be started via `podman-play-kube`.
ServiceContainerID string `json:"serviceContainerID,omitempty"`
// Time pod was created
CreatedTime time.Time `json:"created"`

View File

@ -75,6 +75,10 @@ func (p *Pod) Start(ctx context.Context) (map[string]error, error) {
return nil, define.ErrPodRemoved
}
if err := p.maybeStartServiceContainer(ctx); err != nil {
return nil, err
}
// Before "regular" containers start in the pod, all init containers
// must have run and exited successfully.
if err := p.startInitContainers(ctx); err != nil {
@ -197,6 +201,11 @@ func (p *Pod) stopWithTimeout(ctx context.Context, cleanup bool, timeout int) (m
if len(ctrErrors) > 0 {
return ctrErrors, errors.Wrapf(define.ErrPodPartialFail, "error stopping some containers")
}
if err := p.maybeStopServiceContainer(); err != nil {
return nil, err
}
return nil, nil
}
@ -297,6 +306,10 @@ func (p *Pod) Cleanup(ctx context.Context) (map[string]error, error) {
return ctrErrors, errors.Wrapf(define.ErrPodPartialFail, "error cleaning up some containers")
}
if err := p.maybeStopServiceContainer(); err != nil {
return nil, err
}
return nil, nil
}
@ -443,6 +456,10 @@ func (p *Pod) Restart(ctx context.Context) (map[string]error, error) {
return nil, define.ErrPodRemoved
}
if err := p.maybeStartServiceContainer(ctx); err != nil {
return nil, err
}
allCtrs, err := p.runtime.state.PodContainers(p)
if err != nil {
return nil, err
@ -530,6 +547,11 @@ func (p *Pod) Kill(ctx context.Context, signal uint) (map[string]error, error) {
if len(ctrErrors) > 0 {
return ctrErrors, errors.Wrapf(define.ErrPodPartialFail, "error killing some containers")
}
if err := p.maybeStopServiceContainer(); err != nil {
return nil, err
}
return nil, nil
}

View File

@ -40,7 +40,7 @@ func (r *Runtime) RemoveContainersForImageCallback(ctx context.Context) libimage
if ctr.config.IsInfra {
pod, err := r.state.Pod(ctr.config.Pod)
if err != nil {
return errors.Wrapf(err, "container %s is in pod %s, but pod cannot be retrieved", ctr.ID(), pod.ID())
return errors.Wrapf(err, "container %s is in pod %s, but pod cannot be retrieved", ctr.ID(), ctr.config.Pod)
}
if err := r.removePod(ctx, pod, true, true, timeout); err != nil {
return errors.Wrapf(err, "removing image %s: container %s using image could not be removed", imageID, ctr.ID())

View File

@ -380,6 +380,10 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool,
}
}
if err := p.maybeRemoveServiceContainer(); err != nil {
return err
}
// Remove pod from state
if err := r.state.RemovePod(p); err != nil {
if removalErr != nil {

213
libpod/service.go Normal file
View File

@ -0,0 +1,213 @@
package libpod
import (
"context"
"fmt"
"github.com/containers/podman/v4/libpod/define"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// A service consists of one or more pods. The service container is started
// before all pods and is stopped when the last pod stops. The service
// container allows for tracking and managing the entire life cycle of service
// which may be started via `podman-play-kube`.
type Service struct {
// Pods running as part of the service.
Pods []string `json:"servicePods"`
}
// Indicates whether the pod is associated with a service container.
// The pod is expected to be updated and locked.
func (p *Pod) hasServiceContainer() bool {
return p.config.ServiceContainerID != ""
}
// Returns the pod's service container.
// The pod is expected to be updated and locked.
func (p *Pod) serviceContainer() (*Container, error) {
id := p.config.ServiceContainerID
if id == "" {
return nil, errors.Wrap(define.ErrNoSuchCtr, "pod has no service container")
}
return p.runtime.state.Container(id)
}
// ServiceContainer returns the service container.
func (p *Pod) ServiceContainer() (*Container, error) {
p.lock.Lock()
defer p.lock.Unlock()
if err := p.updatePod(); err != nil {
return nil, err
}
return p.serviceContainer()
}
func (c *Container) addServicePodLocked(id string) error {
c.lock.Lock()
defer c.lock.Unlock()
if err := c.syncContainer(); err != nil {
return err
}
c.state.Service.Pods = append(c.state.Service.Pods, id)
return c.save()
}
func (c *Container) isService() bool {
return c.config.IsService
}
// canStopServiceContainer returns true if all pods of the service are stopped.
// Note that the method acquires the container lock.
func (c *Container) canStopServiceContainerLocked() (bool, error) {
c.lock.Lock()
defer c.lock.Unlock()
if err := c.syncContainer(); err != nil {
return false, err
}
if !c.isService() {
return false, fmt.Errorf("internal error: checking service: container %s is not a service container", c.ID())
}
for _, id := range c.state.Service.Pods {
pod, err := c.runtime.LookupPod(id)
if err != nil {
if errors.Is(err, define.ErrNoSuchPod) {
continue
}
return false, err
}
status, err := pod.GetPodStatus()
if err != nil {
return false, err
}
// We can only stop the service if all pods are done.
switch status {
case define.PodStateStopped, define.PodStateExited, define.PodStateErrored:
continue
default:
return false, nil
}
}
return true, nil
}
// Checks whether the service container can be stopped and does so.
func (p *Pod) maybeStopServiceContainer() error {
if !p.hasServiceContainer() {
return nil
}
serviceCtr, err := p.serviceContainer()
if err != nil {
return fmt.Errorf("getting pod's service container: %w", err)
}
// Checking whether the service can be stopped must be done in
// the runtime's work queue to resolve ABBA dead locks in the
// pod->container->servicePods hierarchy.
p.runtime.queueWork(func() {
logrus.Debugf("Pod %s has a service %s: checking if it can be stopped", p.ID(), serviceCtr.ID())
canStop, err := serviceCtr.canStopServiceContainerLocked()
if err != nil {
logrus.Errorf("Checking whether service of container %s can be stopped: %v", serviceCtr.ID(), err)
return
}
if !canStop {
return
}
logrus.Debugf("Stopping service container %s", serviceCtr.ID())
if err := serviceCtr.Stop(); err != nil {
logrus.Errorf("Stopping service container %s: %v", serviceCtr.ID(), err)
}
})
return nil
}
// Starts the pod's service container if it's not already running.
func (p *Pod) maybeStartServiceContainer(ctx context.Context) error {
if !p.hasServiceContainer() {
return nil
}
serviceCtr, err := p.serviceContainer()
if err != nil {
return fmt.Errorf("getting pod's service container: %w", err)
}
serviceCtr.lock.Lock()
defer serviceCtr.lock.Unlock()
if err := serviceCtr.syncContainer(); err != nil {
return err
}
if serviceCtr.state.State == define.ContainerStateRunning {
return nil
}
// Restart will reinit among other things.
return serviceCtr.restartWithTimeout(ctx, 0)
}
// canRemoveServiceContainer returns true if all pods of the service are removed.
// Note that the method acquires the container lock.
func (c *Container) canRemoveServiceContainerLocked() (bool, error) {
c.lock.Lock()
defer c.lock.Unlock()
if err := c.syncContainer(); err != nil {
return false, err
}
if !c.isService() {
return false, fmt.Errorf("internal error: checking service: container %s is not a service container", c.ID())
}
for _, id := range c.state.Service.Pods {
if _, err := c.runtime.LookupPod(id); err != nil {
if errors.Is(err, define.ErrNoSuchPod) {
continue
}
return false, err
}
return false, nil
}
return true, nil
}
// Checks whether the service container can be removed and does so.
func (p *Pod) maybeRemoveServiceContainer() error {
if !p.hasServiceContainer() {
return nil
}
serviceCtr, err := p.serviceContainer()
if err != nil {
return fmt.Errorf("getting pod's service container: %w", err)
}
// Checking whether the service can be stopped must be done in
// the runtime's work queue to resolve ABBA dead locks in the
// pod->container->servicePods hierarchy.
p.runtime.queueWork(func() {
logrus.Debugf("Pod %s has a service %s: checking if it can be removed", p.ID(), serviceCtr.ID())
canRemove, err := serviceCtr.canRemoveServiceContainerLocked()
if err != nil {
logrus.Errorf("Checking whether service of container %s can be removed: %v", serviceCtr.ID(), err)
return
}
if !canRemove {
return
}
timeout := uint(0)
logrus.Debugf("Removing service container %s", serviceCtr.ID())
if err := p.runtime.RemoveContainer(context.Background(), serviceCtr, true, false, &timeout); err != nil {
logrus.Errorf("Removing service container %s: %v", serviceCtr.ID(), err)
}
})
return nil
}

View File

@ -54,6 +54,8 @@ type PlayKubeOptions struct {
LogOptions []string
// Start - don't start the pod if false
Start types.OptionalBool
// ServiceContainer - creates a service container that is started before and is stopped after all pods.
ServiceContainer bool
// Userns - define the user namespace to use.
Userns string
}

View File

@ -28,12 +28,54 @@ import (
"github.com/containers/podman/v4/pkg/specgenutil"
"github.com/containers/podman/v4/pkg/util"
"github.com/ghodss/yaml"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
yamlv2 "gopkg.in/yaml.v2"
)
func (ic *ContainerEngine) PlayKube(ctx context.Context, body io.Reader, options entities.PlayKubeOptions) (*entities.PlayKubeReport, error) {
// createServiceContainer creates a container that can later on
// be associated with the pods of a K8s yaml. It will be started along with
// the first pod.
func (ic *ContainerEngine) createServiceContainer(ctx context.Context, name string) (*libpod.Container, error) {
// Similar to infra containers, a service container is using the pause image.
image, err := generate.PullOrBuildInfraImage(ic.Libpod, "")
if err != nil {
return nil, fmt.Errorf("image for service container: %w", err)
}
ctrOpts := entities.ContainerCreateOptions{
// Inherited from infra containers
ImageVolume: "bind",
IsInfra: false,
MemorySwappiness: -1,
// No need to spin up slirp etc.
Net: &entities.NetOptions{Network: specgen.Namespace{NSMode: specgen.NoNetwork}},
}
// Create and fill out the runtime spec.
s := specgen.NewSpecGenerator(image, false)
if err := specgenutil.FillOutSpecGen(s, &ctrOpts, []string{}); err != nil {
return nil, fmt.Errorf("completing spec for service container: %w", err)
}
s.Name = name
runtimeSpec, spec, opts, err := generate.MakeContainer(ctx, ic.Libpod, s, false, nil)
if err != nil {
return nil, fmt.Errorf("creating runtime spec for service container: %w", err)
}
opts = append(opts, libpod.WithIsService())
// Create a new libpod container based on the spec.
ctr, err := ic.Libpod.NewContainer(ctx, runtimeSpec, spec, false, opts...)
if err != nil {
return nil, fmt.Errorf("creating service container: %w", err)
}
return ctr, nil
}
func (ic *ContainerEngine) PlayKube(ctx context.Context, body io.Reader, options entities.PlayKubeOptions) (_ *entities.PlayKubeReport, finalErr error) {
report := &entities.PlayKubeReport{}
validKinds := 0
@ -67,6 +109,30 @@ func (ic *ContainerEngine) PlayKube(ctx context.Context, body io.Reader, options
return nil, errors.Wrap(err, "unable to read kube YAML")
}
// TODO: create constants for the various "kinds" of yaml files.
var serviceContainer *libpod.Container
if options.ServiceContainer && (kind == "Pod" || kind == "Deployment") {
// The name of the service container is the first 12
// characters of the yaml file's hash followed by the
// '-service' suffix to guarantee a predictable and
// discoverable name.
hash := digest.FromBytes(content).Encoded()
ctr, err := ic.createServiceContainer(ctx, hash[0:12]+"-service")
if err != nil {
return nil, err
}
serviceContainer = ctr
// Make sure to remove the container in case something goes wrong below.
defer func() {
if finalErr == nil {
return
}
if err := ic.Libpod.RemoveContainer(ctx, ctr, true, false, nil); err != nil {
logrus.Errorf("Cleaning up service container after failure: %v", err)
}
}()
}
switch kind {
case "Pod":
var podYAML v1.Pod
@ -90,7 +156,7 @@ func (ic *ContainerEngine) PlayKube(ctx context.Context, body io.Reader, options
podYAML.Annotations[name] = val
}
r, err := ic.playKubePod(ctx, podTemplateSpec.ObjectMeta.Name, &podTemplateSpec, options, &ipIndex, podYAML.Annotations, configMaps)
r, err := ic.playKubePod(ctx, podTemplateSpec.ObjectMeta.Name, &podTemplateSpec, options, &ipIndex, podYAML.Annotations, configMaps, serviceContainer)
if err != nil {
return nil, err
}
@ -104,7 +170,7 @@ func (ic *ContainerEngine) PlayKube(ctx context.Context, body io.Reader, options
return nil, errors.Wrap(err, "unable to read YAML as Kube Deployment")
}
r, err := ic.playKubeDeployment(ctx, &deploymentYAML, options, &ipIndex, configMaps)
r, err := ic.playKubeDeployment(ctx, &deploymentYAML, options, &ipIndex, configMaps, serviceContainer)
if err != nil {
return nil, err
}
@ -148,7 +214,7 @@ func (ic *ContainerEngine) PlayKube(ctx context.Context, body io.Reader, options
return report, nil
}
func (ic *ContainerEngine) playKubeDeployment(ctx context.Context, deploymentYAML *v1apps.Deployment, options entities.PlayKubeOptions, ipIndex *int, configMaps []v1.ConfigMap) (*entities.PlayKubeReport, error) {
func (ic *ContainerEngine) playKubeDeployment(ctx context.Context, deploymentYAML *v1apps.Deployment, options entities.PlayKubeOptions, ipIndex *int, configMaps []v1.ConfigMap, serviceContainer *libpod.Container) (*entities.PlayKubeReport, error) {
var (
deploymentName string
podSpec v1.PodTemplateSpec
@ -170,7 +236,7 @@ func (ic *ContainerEngine) playKubeDeployment(ctx context.Context, deploymentYAM
// create "replicas" number of pods
for i = 0; i < numReplicas; i++ {
podName := fmt.Sprintf("%s-pod-%d", deploymentName, i)
podReport, err := ic.playKubePod(ctx, podName, &podSpec, options, ipIndex, deploymentYAML.Annotations, configMaps)
podReport, err := ic.playKubePod(ctx, podName, &podSpec, options, ipIndex, deploymentYAML.Annotations, configMaps, serviceContainer)
if err != nil {
return nil, errors.Wrapf(err, "error encountered while bringing up pod %s", podName)
}
@ -179,7 +245,7 @@ func (ic *ContainerEngine) playKubeDeployment(ctx context.Context, deploymentYAM
return &report, nil
}
func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podYAML *v1.PodTemplateSpec, options entities.PlayKubeOptions, ipIndex *int, annotations map[string]string, configMaps []v1.ConfigMap) (*entities.PlayKubeReport, error) {
func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podYAML *v1.PodTemplateSpec, options entities.PlayKubeOptions, ipIndex *int, annotations map[string]string, configMaps []v1.ConfigMap, serviceContainer *libpod.Container) (*entities.PlayKubeReport, error) {
var (
writer io.Writer
playKubePod entities.PlayKubePod
@ -374,6 +440,10 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
}
}
if serviceContainer != nil {
podSpec.PodSpecGen.ServiceContainerID = serviceContainer.ID()
}
// Create the Pod
pod, err := generate.MakePod(&podSpec, ic.Libpod)
if err != nil {

View File

@ -0,0 +1,89 @@
package generate
import (
"context"
"fmt"
"io/ioutil"
"os"
buildahDefine "github.com/containers/buildah/define"
"github.com/containers/common/pkg/config"
"github.com/containers/podman/v4/libpod"
"github.com/containers/podman/v4/libpod/define"
)
// PullOrBuildInfraImage pulls down the specified image or the one set in
// containers.conf. If none is set, it builds a local pause image.
func PullOrBuildInfraImage(rt *libpod.Runtime, imageName string) (string, error) {
rtConfig, err := rt.GetConfigNoCopy()
if err != nil {
return "", err
}
if imageName == "" {
imageName = rtConfig.Engine.InfraImage
}
if imageName != "" {
_, err := rt.LibimageRuntime().Pull(context.Background(), imageName, config.PullPolicyMissing, nil)
if err != nil {
return "", err
}
return imageName, nil
}
name, err := buildPauseImage(rt, rtConfig)
if err != nil {
return "", fmt.Errorf("building local pause image: %w", err)
}
return name, nil
}
func buildPauseImage(rt *libpod.Runtime, rtConfig *config.Config) (string, error) {
version, err := define.GetVersion()
if err != nil {
return "", err
}
imageName := fmt.Sprintf("localhost/podman-pause:%s-%d", version.Version, version.Built)
// First check if the image has already been built.
if _, _, err := rt.LibimageRuntime().LookupImage(imageName, nil); err == nil {
return imageName, nil
}
// Also look into the path as some distributions install catatonit in
// /usr/bin.
catatonitPath, err := rtConfig.FindHelperBinary("catatonit", true)
if err != nil {
return "", fmt.Errorf("finding pause binary: %w", err)
}
buildContent := fmt.Sprintf(`FROM scratch
COPY %s /catatonit
ENTRYPOINT ["/catatonit", "-P"]`, catatonitPath)
tmpF, err := ioutil.TempFile("", "pause.containerfile")
if err != nil {
return "", err
}
if _, err := tmpF.WriteString(buildContent); err != nil {
return "", err
}
if err := tmpF.Close(); err != nil {
return "", err
}
defer os.Remove(tmpF.Name())
buildOptions := buildahDefine.BuildOptions{
CommonBuildOpts: &buildahDefine.CommonBuildOptions{},
Output: imageName,
Quiet: true,
IgnoreFile: "/dev/null", // makes sure to not read a local .ignorefile (see #13529)
IIDFile: "/dev/null", // prevents Buildah from writing the ID on stdout
}
if _, _, err := rt.Build(context.Background(), buildOptions, tmpF.Name()); err != nil {
return "", err
}
return imageName, nil
}

View File

@ -2,13 +2,8 @@ package generate
import (
"context"
"fmt"
"io/ioutil"
"net"
"os"
buildahDefine "github.com/containers/buildah/define"
"github.com/containers/common/pkg/config"
"github.com/containers/podman/v4/libpod"
"github.com/containers/podman/v4/libpod/define"
"github.com/containers/podman/v4/pkg/domain/entities"
@ -17,99 +12,19 @@ import (
"github.com/sirupsen/logrus"
)
func buildPauseImage(rt *libpod.Runtime, rtConfig *config.Config) (string, error) {
version, err := define.GetVersion()
if err != nil {
return "", err
}
imageName := fmt.Sprintf("localhost/podman-pause:%s-%d", version.Version, version.Built)
// First check if the image has already been built.
if _, _, err := rt.LibimageRuntime().LookupImage(imageName, nil); err == nil {
return imageName, nil
}
// Also look into the path as some distributions install catatonit in
// /usr/bin.
catatonitPath, err := rtConfig.FindHelperBinary("catatonit", true)
if err != nil {
return "", fmt.Errorf("finding pause binary: %w", err)
}
buildContent := fmt.Sprintf(`FROM scratch
COPY %s /catatonit
ENTRYPOINT ["/catatonit", "-P"]`, catatonitPath)
tmpF, err := ioutil.TempFile("", "pause.containerfile")
if err != nil {
return "", err
}
if _, err := tmpF.WriteString(buildContent); err != nil {
return "", err
}
if err := tmpF.Close(); err != nil {
return "", err
}
defer os.Remove(tmpF.Name())
buildOptions := buildahDefine.BuildOptions{
CommonBuildOpts: &buildahDefine.CommonBuildOptions{},
Output: imageName,
Quiet: true,
IgnoreFile: "/dev/null", // makes sure to not read a local .ignorefile (see #13529)
IIDFile: "/dev/null", // prevents Buildah from writing the ID on stdout
}
if _, _, err := rt.Build(context.Background(), buildOptions, tmpF.Name()); err != nil {
return "", err
}
return imageName, nil
}
func pullOrBuildInfraImage(p *entities.PodSpec, rt *libpod.Runtime) error {
if p.PodSpecGen.NoInfra {
return nil
}
rtConfig, err := rt.GetConfigNoCopy()
if err != nil {
return err
}
// NOTE: we need pull down the infra image if it was explicitly set by
// the user (or containers.conf) to the non-default one.
imageName := p.PodSpecGen.InfraImage
if imageName == "" {
imageName = rtConfig.Engine.InfraImage
}
if imageName != "" {
_, err := rt.LibimageRuntime().Pull(context.Background(), imageName, config.PullPolicyMissing, nil)
if err != nil {
return err
}
} else {
name, err := buildPauseImage(rt, rtConfig)
if err != nil {
return fmt.Errorf("building local pause image: %w", err)
}
imageName = name
}
p.PodSpecGen.InfraImage = imageName
p.PodSpecGen.InfraContainerSpec.RawImageName = imageName
return nil
}
func MakePod(p *entities.PodSpec, rt *libpod.Runtime) (*libpod.Pod, error) {
if err := p.PodSpecGen.Validate(); err != nil {
return nil, err
}
if err := pullOrBuildInfraImage(p, rt); err != nil {
if !p.PodSpecGen.NoInfra {
imageName, err := PullOrBuildInfraImage(rt, p.PodSpecGen.InfraImage)
if err != nil {
return nil, err
}
p.PodSpecGen.InfraImage = imageName
p.PodSpecGen.InfraContainerSpec.RawImageName = imageName
}
if !p.PodSpecGen.NoInfra && p.PodSpecGen.InfraContainerSpec != nil {
var err error
@ -180,6 +95,11 @@ func createPodOptions(p *specgen.PodSpecGenerator) ([]libpod.PodCreateOption, er
options = append(options, libpod.WithPodUser())
}
}
if len(p.ServiceContainerID) > 0 {
options = append(options, libpod.WithServiceContainer(p.ServiceContainerID))
}
if len(p.CgroupParent) > 0 {
options = append(options, libpod.WithPodCgroupParent(p.CgroupParent))
}

View File

@ -204,6 +204,9 @@ type PodSpecGenerator struct {
PodStorageConfig
PodSecurityConfig
InfraContainerSpec *SpecGenerator `json:"-"`
// The ID of the pod's service container.
ServiceContainerID string `json:"serviceContainerID,omitempty"`
}
type PodResourceConfig struct {

View File

@ -456,13 +456,12 @@ var _ = Describe("Verify podman containers.conf usage", func() {
containersConf = []byte("[engine]\nimage_copy_tmp_dir=\"storage1\"")
err = ioutil.WriteFile(configPath, containersConf, os.ModePerm)
Expect(err).ToNot(HaveOccurred())
if IsRemote() {
podmanTest.RestartRemoteService()
}
SkipIfRemote("Restarting the system service will fail loading the broken containers.conf")
session = podmanTest.Podman([]string{"info", "--format", "{{.Store.ImageCopyTmpDir}}"})
session.WaitWithDefaultTimeout()
Expect(session).Should(Exit(0))
Expect(session).Should(Exit(125))
Expect(session.Err.Contents()).To(ContainSubstring("invalid image_copy_tmp_dir"))
})

View File

@ -408,19 +408,6 @@ EOF
run_podman pod rm test
}
# Wait for the pod (1st arg) to transition into the state (2nd arg)
function _ensure_pod_state() {
for i in {0..5}; do
run_podman pod inspect $1 --format "{{.State}}"
if [[ $output == "$2" ]]; then
break
fi
sleep 0.5
done
is "$output" "$2" "unexpected pod state"
}
@test "pod exit policies" {
# Test setting exit policies
run_podman pod create

View File

@ -100,6 +100,61 @@ RELABEL="system_u:object_r:container_file_t:s0"
run_podman pod rm -t 0 -f test_pod
}
@test "podman play --service-container" {
skip_if_remote "service containers only work locally"
TESTDIR=$PODMAN_TMPDIR/testdir
mkdir -p $TESTDIR
yaml="
apiVersion: v1
kind: Pod
metadata:
labels:
app: test
name: test_pod
spec:
containers:
- command:
- top
image: $IMAGE
name: test
resources: {}
"
echo "$yaml" > $PODMAN_TMPDIR/test.yaml
run_podman play kube --service-container=true $PODMAN_TMPDIR/test.yaml
# Make sure that the service container exists and runs.
run_podman container inspect "352a88685060-service" --format "{{.State.Running}}"
is "$output" "true"
# Stop the *main* container and make sure that
# 1) The pod transitions to Exited
# 2) The service container is stopped
# #) The service container is marked as an service container
run_podman stop test_pod-test
_ensure_pod_state test_pod Exited
run_podman container inspect "352a88685060-service" --format "{{.State.Running}}"
is "$output" "false"
run_podman container inspect "352a88685060-service" --format "{{.IsService}}"
is "$output" "true"
# Restart the pod, make sure the service is running again
run_podman pod restart test_pod
run_podman container inspect "352a88685060-service" --format "{{.State.Running}}"
is "$output" "true"
# Kill the pod and make sure the service is not running
run_podman pod kill test_pod
run_podman container inspect "352a88685060-service" --format "{{.State.Running}}"
is "$output" "false"
# Remove the pod and make sure the service is removed along with it
run_podman pod rm test_pod
run_podman 1 container exists "352a88685060-service"
}
@test "podman play --network" {
TESTDIR=$PODMAN_TMPDIR/testdir
mkdir -p $TESTDIR

View File

@ -392,6 +392,19 @@ function pause_image() {
echo "localhost/podman-pause:$output"
}
# Wait for the pod (1st arg) to transition into the state (2nd arg)
function _ensure_pod_state() {
for i in {0..5}; do
run_podman pod inspect $1 --format "{{.State}}"
if [[ $output == "$2" ]]; then
break
fi
sleep 0.5
done
is "$output" "$2" "unexpected pod state"
}
###########################
# _add_label_if_missing # make sure skip messages include rootless/remote
###########################

View File

@ -608,7 +608,7 @@ func (i *Image) RepoTags() ([]string, error) {
// NamedTaggedRepoTags returns the repotags associated with the image as a
// slice of reference.NamedTagged.
func (i *Image) NamedTaggedRepoTags() ([]reference.NamedTagged, error) {
var repoTags []reference.NamedTagged
repoTags := make([]reference.NamedTagged, 0, len(i.Names()))
for _, name := range i.Names() {
parsed, err := reference.Parse(name)
if err != nil {

View File

@ -32,8 +32,8 @@ func (r *Runtime) Load(ctx context.Context, path string, options *LoadOptions) (
options = &LoadOptions{}
}
var loadErrors []error
// we have 4 functions, so a maximum of 4 errors
loadErrors := make([]error, 0, 4)
for _, f := range []func() ([]string, string, error){
// OCI
func() ([]string, string, error) {
@ -88,6 +88,8 @@ func (r *Runtime) Load(ctx context.Context, path string, options *LoadOptions) (
}
// Give a decent error message if nothing above worked.
// we want the colon here for the multiline error
//nolint:revive
loadError := fmt.Errorf("payload does not match any of the supported image formats:")
for _, err := range loadErrors {
loadError = fmt.Errorf("%v\n * %v", loadError, err)

View File

@ -115,7 +115,7 @@ type NameTagPair struct {
func ToNameTagPairs(repoTags []reference.Named) ([]NameTagPair, error) {
none := "<none>"
var pairs []NameTagPair
pairs := make([]NameTagPair, 0, len(repoTags))
for i, named := range repoTags {
pair := NameTagPair{
Name: named.Name(),

View File

@ -413,11 +413,11 @@ func (r *Runtime) imagesIDsForManifest(manifestBytes []byte, sys *types.SystemCo
}
imageDigest = d
}
var results []string
images, err := r.store.ImagesByDigest(imageDigest)
if err != nil {
return nil, errors.Wrapf(err, "listing images by manifest digest")
}
results := make([]string, 0, len(images))
for _, image := range images {
results = append(results, image.ID)
}

View File

@ -6,6 +6,7 @@ import (
"os"
"strings"
"github.com/containers/common/pkg/config"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/pkg/shortnames"
storageTransport "github.com/containers/image/v5/storage"
@ -22,13 +23,16 @@ import (
var json = jsoniter.ConfigCompatibleWithStandardLibrary
// tmpdir returns a path to a temporary directory.
func tmpdir() string {
tmpdir := os.Getenv("TMPDIR")
if tmpdir == "" {
tmpdir = "/var/tmp"
func tmpdir() (string, error) {
var tmpdir string
defaultContainerConfig, err := config.Default()
if err == nil {
tmpdir, err = defaultContainerConfig.ImageCopyTmpDir()
if err == nil {
return tmpdir, nil
}
return tmpdir
}
return tmpdir, err
}
// RuntimeOptions allow for creating a customized Runtime.
@ -103,7 +107,11 @@ func RuntimeFromStore(store storage.Store, options *RuntimeOptions) (*Runtime, e
systemContext = types.SystemContext{}
}
if systemContext.BigFilesTemporaryDir == "" {
systemContext.BigFilesTemporaryDir = tmpdir()
tmpdir, err := tmpdir()
if err != nil {
return nil, err
}
systemContext.BigFilesTemporaryDir = tmpdir
}
setRegistriesConfPath(&systemContext)
@ -224,7 +232,7 @@ func (r *Runtime) LookupImage(name string, options *LookupImageOptions) (*Image,
}
logrus.Debugf("Found image %q in local containers storage (%s)", name, storageRef.StringWithinTransport())
return r.storageToImage(img, storageRef), "", nil
} else {
}
// Docker compat: strip off the tag iff name is tagged and digested
// (e.g., fedora:latest@sha256...). In that case, the tag is stripped
// off and entirely ignored. The digest is the sole source of truth.
@ -233,7 +241,6 @@ func (r *Runtime) LookupImage(name string, options *LookupImageOptions) (*Image,
return nil, "", err
}
name = normalizedName
}
byDigest := false
originalName := name

View File

@ -96,7 +96,7 @@ func (n *cniNetwork) networkCreate(newNetwork *types.Network, defaultNet bool) (
newNetwork.ID = getNetworkIDFromName(newNetwork.Name)
// when we do not have ipam we must disable dns
internalutil.IpamNoneDisableDns(newNetwork)
internalutil.IpamNoneDisableDNS(newNetwork)
// FIXME: Should this be a hard error?
if newNetwork.DNSEnabled && newNetwork.Internal && hasDNSNamePlugin(n.cniPluginDirs) {

View File

@ -106,7 +106,7 @@ func (n *cniNetwork) Setup(namespacePath string, options types.SetupOptions) (ma
}
// CNIResultToStatus convert the cni result to status block
// nolint:golint
// nolint:golint,revive
func CNIResultToStatus(res cnitypes.Result) (types.StatusBlock, error) {
result := types.StatusBlock{}
cniResult, err := types040.GetResult(res)

View File

@ -41,7 +41,7 @@ func CommonNetworkCreate(n NetUtil, network *types.Network) error {
return nil
}
func IpamNoneDisableDns(network *types.Network) {
func IpamNoneDisableDNS(network *types.Network) {
if network.IPAMOptions[types.Driver] == types.NoneIPAMDriver {
logrus.Debugf("dns disabled for network %q because ipam driver is set to none", network.Name)
network.DNSEnabled = false

View File

@ -121,7 +121,7 @@ func (n *netavarkNetwork) networkCreate(newNetwork *types.Network, defaultNet bo
}
// when we do not have ipam we must disable dns
internalutil.IpamNoneDisableDns(newNetwork)
internalutil.IpamNoneDisableDNS(newNetwork)
// add gateway when not internal or dns enabled
addGateway := !newNetwork.Internal || newNetwork.DNSEnabled

View File

@ -46,6 +46,9 @@ const (
// 1. read ${graphroot}/defaultNetworkBackend
// 2. find netavark binary (if not installed use CNI)
// 3. check containers, images and CNI networks and if there are some we have an existing install and should continue to use CNI
//
// revive does not like the name because the package is already called network
//nolint:revive
func NetworkBackend(store storage.Store, conf *config.Config, syslog bool) (types.NetworkBackend, types.ContainerNetwork, error) {
backend := types.NetworkBackend(conf.Network.NetworkBackend)
if backend == "" {

View File

@ -251,19 +251,17 @@ func CheckProfileAndLoadDefault(name string) (string, error) {
if unshare.IsRootless() {
if name != "" {
return "", errors.Wrapf(ErrApparmorRootless, "cannot load AppArmor profile %q", name)
} else {
}
logrus.Debug("Skipping loading default AppArmor profile (rootless mode)")
return "", nil
}
}
// Check if AppArmor is disabled and error out if a profile is to be set.
if !runcaa.IsEnabled() {
if name == "" {
return "", nil
} else {
return "", errors.Errorf("profile %q specified but AppArmor is disabled on the host", name)
}
return "", errors.Errorf("profile %q specified but AppArmor is disabled on the host", name)
}
if name == "" {

View File

@ -26,8 +26,8 @@ func GetDefaultAuthFile() string {
if authfile := os.Getenv("REGISTRY_AUTH_FILE"); authfile != "" {
return authfile
}
if auth_env := os.Getenv("DOCKER_CONFIG"); auth_env != "" {
return filepath.Join(auth_env, "config.json")
if authEnv := os.Getenv("DOCKER_CONFIG"); authEnv != "" {
return filepath.Join(authEnv, "config.json")
}
return ""
}
@ -313,7 +313,7 @@ func Logout(systemContext *types.SystemContext, opts *LogoutOptions, args []stri
fmt.Printf("Not logged into %s with current tool. Existing credentials were established via docker login. Please use docker logout instead.\n", key)
return nil
}
return errors.Errorf("Not logged into %s\n", key)
return errors.Errorf("not logged into %s", key)
default:
return errors.Wrapf(err, "logging out of %q", key)
}

View File

@ -104,8 +104,8 @@ func AllCapabilities() []string {
// NormalizeCapabilities normalizes caps by adding a "CAP_" prefix (if not yet
// present).
func NormalizeCapabilities(caps []string) ([]string, error) {
normalized := make([]string, len(caps))
for i, c := range caps {
normalized := make([]string, 0, len(caps))
for _, c := range caps {
c = strings.ToUpper(c)
if c == All {
normalized = append(normalized, c)
@ -117,7 +117,7 @@ func NormalizeCapabilities(caps []string) ([]string, error) {
if !stringInSlice(c, capabilityList) {
return nil, errors.Wrapf(ErrUnknownCapability, "%q", c)
}
normalized[i] = c
normalized = append(normalized, c)
}
sort.Strings(normalized)
return normalized, nil
@ -140,8 +140,6 @@ func ValidateCapabilities(caps []string) error {
// "ALL" in capAdd adds returns known capabilities
// "All" in capDrop returns only the capabilities specified in capAdd
func MergeCapabilities(base, adds, drops []string) ([]string, error) {
var caps []string
// Normalize the base capabilities
base, err := NormalizeCapabilities(base)
if err != nil {
@ -189,6 +187,7 @@ func MergeCapabilities(base, adds, drops []string) ([]string, error) {
}
}
caps := make([]string, 0, len(base)+len(capAdd))
// Drop any capabilities in capDrop that are in base
for _, cap := range base {
if stringInSlice(cap, capDrop) {

View File

@ -9,6 +9,7 @@ import (
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
"syscall"
@ -96,6 +97,22 @@ func UserOwnsCurrentSystemdCgroup() (bool, error) {
// It differs from os.RemoveAll as it doesn't attempt to unlink files.
// On cgroupfs we are allowed only to rmdir empty directories.
func rmDirRecursively(path string) error {
killProcesses := func(signal syscall.Signal) {
if signal == unix.SIGKILL {
if err := ioutil.WriteFile(filepath.Join(path, "cgroup.kill"), []byte("1"), 0600); err == nil {
return
}
}
// kill all the processes that are still part of the cgroup
if procs, err := ioutil.ReadFile(filepath.Join(path, "cgroup.procs")); err == nil {
for _, pidS := range strings.Split(string(procs), "\n") {
if pid, err := strconv.Atoi(pidS); err == nil {
_ = unix.Kill(pid, signal)
}
}
}
}
if err := os.Remove(path); err == nil || os.IsNotExist(err) {
return nil
}
@ -118,8 +135,16 @@ func rmDirRecursively(path string) error {
return nil
}
if errors.Is(err, unix.EBUSY) {
// attempt up to 5 seconds if the cgroup is busy
if attempts < 500 {
// send a SIGTERM after 3 second
if attempts == 300 {
killProcesses(unix.SIGTERM)
}
// send SIGKILL after 8 seconds
if attempts == 800 {
killProcesses(unix.SIGKILL)
}
// give up after 10 seconds
if attempts < 1000 {
time.Sleep(time.Millisecond * 10)
attempts++
continue

View File

@ -51,7 +51,7 @@ func AutocompleteCapabilities(cmd *cobra.Command, args []string, toComplete stri
offset = 4
}
var completions []string
completions := make([]string, 0, len(caps))
for _, cap := range caps {
completions = append(completions, convertCase(cap)[offset:])
}

View File

@ -553,6 +553,9 @@ type SecretConfig struct {
}
// ConfigMapConfig represents the "configmap" TOML config table
//
// revive does not like the name because the package is already called config
//nolint:revive
type ConfigMapConfig struct {
// Driver specifies the configmap driver to use.
// Current valid value:
@ -1215,14 +1218,14 @@ func (c *Config) ActiveDestination() (uri, identity string, err error) {
// FindHelperBinary will search the given binary name in the configured directories.
// If searchPATH is set to true it will also search in $PATH.
func (c *Config) FindHelperBinary(name string, searchPATH bool) (string, error) {
dir_list := c.Engine.HelperBinariesDir
dirList := c.Engine.HelperBinariesDir
// If set, search this directory first. This is used in testing.
if dir, found := os.LookupEnv("CONTAINERS_HELPER_BINARY_DIR"); found {
dir_list = append([]string{dir}, dir_list...)
dirList = append([]string{dir}, dirList...)
}
for _, path := range dir_list {
for _, path := range dirList {
fullpath := filepath.Join(path, name)
if fi, err := os.Stat(fullpath); err == nil && fi.Mode().IsRegular() {
return fullpath, nil

View File

@ -36,11 +36,13 @@ func ComputeUntilTimestamp(filterValues []string) (time.Time, error) {
//
// Please refer to https://github.com/containers/podman/issues/6899 for some
// background.
//
// revive does not like the name because the package is already called filters
//nolint:revive
func FiltersFromRequest(r *http.Request) ([]string, error) {
var (
compatFilters map[string]map[string]bool
filters map[string][]string
libpodFilters []string
raw []byte
)
@ -54,6 +56,7 @@ func FiltersFromRequest(r *http.Request) ([]string, error) {
// Backwards compat with older versions of Docker.
if err := json.Unmarshal(raw, &compatFilters); err == nil {
libpodFilters := make([]string, 0, len(compatFilters))
for filterKey, filterMap := range compatFilters {
for filterValue, toAdd := range filterMap {
if toAdd {
@ -68,6 +71,7 @@ func FiltersFromRequest(r *http.Request) ([]string, error) {
return nil, err
}
libpodFilters := make([]string, 0, len(filters))
for filterKey, filterSlice := range filters {
f := filterKey
for _, filterValue := range filterSlice {

View File

@ -9,6 +9,8 @@ import (
"github.com/sirupsen/logrus"
)
// TODO: change name to MachineMarker since package is already called machine
//nolint:revive
type MachineMarker struct {
Enabled bool
Type string
@ -54,6 +56,8 @@ func IsPodmanMachine() bool {
return GetMachineMarker().Enabled
}
// TODO: change name to HostType since package is already called machine
//nolint:revive
func MachineHostType() string {
return GetMachineMarker().Type
}

View File

@ -13,7 +13,6 @@ import (
)
func DeviceFromPath(device string) ([]devices.Device, error) {
var devs []devices.Device
src, dst, permissions, err := Device(device)
if err != nil {
return nil, err
@ -27,7 +26,7 @@ func DeviceFromPath(device string) ([]devices.Device, error) {
}
if !srcInfo.IsDir() {
devs := make([]devices.Device, 0, 1)
dev, err := devices.DeviceFromPath(src, permissions)
if err != nil {
return nil, errors.Wrapf(err, "%s is not a valid device", src)
@ -42,6 +41,7 @@ func DeviceFromPath(device string) ([]devices.Device, error) {
if err != nil {
return nil, errors.Wrapf(err, "error getting source devices from directory %s", src)
}
devs := make([]devices.Device, 0, len(srcDevices))
for _, d := range srcDevices {
d.Path = filepath.Join(dst, filepath.Base(d.Path))
d.Permissions = devices.Permissions(permissions)

View File

@ -17,12 +17,17 @@ import (
)
// RetryOptions defines the option to retry
// revive does not like the name because the package is already called retry
//nolint:revive
type RetryOptions struct {
MaxRetry int // The number of times to possibly retry
Delay time.Duration // The delay to use between retries, if set
}
// RetryIfNecessary retries the operation in exponential backoff with the retryOptions
//
// revive does not like the name because the package is already called retry
//nolint:revive
func RetryIfNecessary(ctx context.Context, operation func() error, retryOptions *RetryOptions) error {
err := operation()
for attempt := 0; err != nil && isRetryable(err) && attempt < retryOptions.MaxRetry; attempt++ {

View File

@ -76,6 +76,7 @@ var (
specs.ActAllow: ActAllow,
specs.ActTrace: ActTrace,
specs.ActLog: ActLog,
specs.ActNotify: ActNotify,
}
specOperatorToSeccompOperatorMap = map[specs.LinuxSeccompOperator]Operator{
specs.OpNotEqual: OpNotEqual,

View File

@ -130,7 +130,7 @@ func matchSyscall(filter *libseccomp.ScmpFilter, call *Syscall) error {
return errors.Wrapf(err, "create seccomp syscall condition for syscall %s", call.Name)
}
argCounts[cond.Index] += 1
argCounts[cond.Index]++
conditions = append(conditions, newCond)
}

View File

@ -75,6 +75,7 @@ const (
ActTrace Action = "SCMP_ACT_TRACE"
ActAllow Action = "SCMP_ACT_ALLOW"
ActLog Action = "SCMP_ACT_LOG"
ActNotify Action = "SCMP_ACT_NOTIFY"
)
// Operator used to match syscall arguments in Seccomp

View File

@ -55,7 +55,7 @@ func (d *Driver) List() ([]string, error) {
if err != nil {
return nil, err
}
var allID []string
allID := make([]string, 0, len(secretData))
for k := range secretData {
allID = append(allID, k)
}
@ -134,9 +134,8 @@ func (d *Driver) getAllData() (map[string][]byte, error) {
if os.IsNotExist(err) {
// the file will be created later on a store()
return make(map[string][]byte), nil
} else {
return nil, err
}
return nil, err
}
file, err := os.Open(d.secretsDataFilePath)

View File

@ -53,6 +53,9 @@ var secretsFile = "secrets.json"
var secretNameRegexp = regexp.MustCompile(`^[a-zA-Z0-9][a-zA-Z0-9_.-]*$`)
// SecretsManager holds information on handling secrets
//
// revive does not like the name because the package is already called secrets
//nolint:revive
type SecretsManager struct {
// secretsPath is the path to the db file where secrets are stored
secretsDBPath string
@ -82,6 +85,9 @@ type Secret struct {
// The driver stores the actual bytes of secret data, as opposed to
// the secret metadata.
// Currently only the unencrypted filedriver is implemented.
//
// revive does not like the name because the package is already called secrets
//nolint:revive
type SecretsDriver interface {
// List lists all secret ids in the secrets data store
List() ([]string, error)
@ -234,7 +240,7 @@ func (s *SecretsManager) List() ([]Secret, error) {
if err != nil {
return nil, err
}
var ls []Secret
ls := make([]Secret, 0, len(secrets))
for _, v := range secrets {
ls = append(ls, v)
}
@ -276,9 +282,8 @@ func getDriver(name string, opts map[string]string) (SecretsDriver, error) {
case "file":
if path, ok := opts["path"]; ok {
return filedriver.NewDriver(path)
} else {
return nil, errors.Wrap(errInvalidDriverOpt, "need path for filedriver")
}
return nil, errors.Wrap(errInvalidDriverOpt, "need path for filedriver")
case "pass":
return passdriver.NewDriver(opts)
case "shell":

View File

@ -31,9 +31,8 @@ func (s *SecretsManager) loadDB() error {
// the db cache will show no entries anyway.
// The file will be created later on a store()
return nil
} else {
return err
}
return err
}
// We check if the file has been modified after the last time it was loaded into the cache.

View File

@ -212,8 +212,8 @@ func rchown(chowndir string, uid, gid int) error {
// addSubscriptionsFromMountsFile copies the contents of host directory to container directory
// and returns a list of mounts
func addSubscriptionsFromMountsFile(filePath, mountLabel, containerRunDir string, uid, gid int) ([]rspec.Mount, error) {
var mounts []rspec.Mount
defaultMountsPaths := getMounts(filePath)
mounts := make([]rspec.Mount, 0, len(defaultMountsPaths))
for _, path := range defaultMountsPaths {
hostDirOrFile, ctrDirOrFile, err := getMountsMap(path)
if err != nil {

View File

@ -12,6 +12,8 @@ import (
// NUMANodeCount queries the system for the count of Memory Nodes available
// for use to this process.
func NUMANodeCount() int {
// this is the correct flag name (not defined in the unix package)
//nolint:revive
MPOL_F_MEMS_ALLOWED := (1 << 2)
var mask [1024 / 64]uintptr
_, _, err := unix.RawSyscall6(unix.SYS_GET_MEMPOLICY, 0, uintptr(unsafe.Pointer(&mask[0])), uintptr(len(mask)*8), 0, uintptr(MPOL_F_MEMS_ALLOWED), 0)

View File

@ -1,4 +1,4 @@
package version
// Version is the version of the build.
const Version = "0.48.0"
const Version = "0.49.0-dev"

2
vendor/modules.txt vendored
View File

@ -109,7 +109,7 @@ github.com/containers/buildah/pkg/rusage
github.com/containers/buildah/pkg/sshagent
github.com/containers/buildah/pkg/util
github.com/containers/buildah/util
# github.com/containers/common v0.48.0
# github.com/containers/common v0.48.1-0.20220510094751-400832f41771
## explicit
github.com/containers/common/libimage
github.com/containers/common/libimage/manifests