Merge pull request #8252 from baude/playkubetospecgen

migrate play kube to spec gen
This commit is contained in:
OpenShift Merge Robot
2020-11-10 19:43:47 +01:00
committed by GitHub
6 changed files with 596 additions and 634 deletions

View File

@ -6,29 +6,22 @@ import (
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"github.com/containers/buildah/pkg/parse"
"github.com/containers/image/v5/types"
"github.com/containers/podman/v2/libpod"
"github.com/containers/podman/v2/libpod/image"
ann "github.com/containers/podman/v2/pkg/annotations"
"github.com/containers/podman/v2/pkg/domain/entities"
envLib "github.com/containers/podman/v2/pkg/env"
ns "github.com/containers/podman/v2/pkg/namespaces"
createconfig "github.com/containers/podman/v2/pkg/spec"
"github.com/containers/podman/v2/pkg/specgen/generate"
"github.com/containers/podman/v2/pkg/specgen/generate/kube"
"github.com/containers/podman/v2/pkg/util"
"github.com/containers/storage"
"github.com/cri-o/ocicni/pkg/ocicni"
"github.com/docker/distribution/reference"
"github.com/ghodss/yaml"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
v1apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
)
const (
@ -110,7 +103,6 @@ func (ic *ContainerEngine) playKubeDeployment(ctx context.Context, deploymentYAM
func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podYAML *v1.PodTemplateSpec, options entities.PlayKubeOptions) (*entities.PlayKubeReport, error) {
var (
pod *libpod.Pod
registryCreds *types.DockerAuthConfig
writer io.Writer
playKubePod entities.PlayKubePod
@ -129,49 +121,10 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
}
}
podOptions := []libpod.PodCreateOption{
libpod.WithInfraContainer(),
libpod.WithPodName(podName),
}
if podYAML.ObjectMeta.Labels != nil {
podOptions = append(podOptions, libpod.WithPodLabels(podYAML.ObjectMeta.Labels))
}
// TODO we only configure Process namespace. We also need to account for Host{IPC,Network,PID}
// which is not currently possible with pod create
if podYAML.Spec.ShareProcessNamespace != nil && *podYAML.Spec.ShareProcessNamespace {
podOptions = append(podOptions, libpod.WithPodPID())
}
hostname := podYAML.Spec.Hostname
if hostname == "" {
hostname = podName
}
podOptions = append(podOptions, libpod.WithPodHostname(hostname))
if podYAML.Spec.HostNetwork {
podOptions = append(podOptions, libpod.WithPodHostNetwork())
}
if podYAML.Spec.HostAliases != nil {
hosts := make([]string, 0, len(podYAML.Spec.HostAliases))
for _, hostAlias := range podYAML.Spec.HostAliases {
for _, host := range hostAlias.Hostnames {
hosts = append(hosts, host+":"+hostAlias.IP)
}
}
podOptions = append(podOptions, libpod.WithPodHosts(hosts))
}
nsOptions, err := generate.GetNamespaceOptions(strings.Split(createconfig.DefaultKernelNamespaces, ","))
p, err := kube.ToPodGen(ctx, podName, podYAML)
if err != nil {
return nil, err
}
podOptions = append(podOptions, nsOptions...)
podPorts := getPodPorts(podYAML.Spec.Containers)
podOptions = append(podOptions, libpod.WithInfraContainerPorts(podPorts))
if options.Network != "" {
switch strings.ToLower(options.Network) {
case "bridge", "host":
@ -183,12 +136,12 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
// networks.
networks := strings.Split(options.Network, ",")
logrus.Debugf("Pod joining CNI networks: %v", networks)
podOptions = append(podOptions, libpod.WithPodNetworks(networks))
p.CNINetworks = append(p.CNINetworks, networks...)
}
}
// Create the Pod
pod, err = ic.Libpod.NewPod(ctx, podOptions...)
pod, err := generate.MakePod(p, ic.Libpod)
if err != nil {
return nil, err
}
@ -197,29 +150,7 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
if err != nil {
return nil, err
}
hasUserns := false
if podInfraID != "" {
podCtr, err := ic.Libpod.GetContainer(podInfraID)
if err != nil {
return nil, err
}
mappings, err := podCtr.IDMappings()
if err != nil {
return nil, err
}
hasUserns = len(mappings.UIDMap) > 0
}
namespaces := map[string]string{
// Disabled during code review per mheon
//"pid": fmt.Sprintf("container:%s", podInfraID),
"net": fmt.Sprintf("container:%s", podInfraID),
"ipc": fmt.Sprintf("container:%s", podInfraID),
"uts": fmt.Sprintf("container:%s", podInfraID),
}
if hasUserns {
namespaces["user"] = fmt.Sprintf("container:%s", podInfraID)
}
if !options.Quiet {
writer = os.Stderr
}
@ -295,7 +226,7 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
volumes[volume.Name] = hostPath.Path
}
seccompPaths, err := initializeSeccompPaths(podYAML.ObjectMeta.Annotations, options.SeccompProfileRoot)
seccompPaths, err := kube.InitializeSeccompPaths(podYAML.ObjectMeta.Annotations, options.SeccompProfileRoot)
if err != nil {
return nil, err
}
@ -347,17 +278,19 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
pullPolicy = util.PullImageAlways
}
}
// This ensures the image is the image store
newImage, err := ic.Libpod.ImageRuntime().New(ctx, container.Image, options.SignaturePolicy, options.Authfile, writer, &dockerRegistryOptions, image.SigningOptions{}, nil, pullPolicy)
if err != nil {
return nil, err
}
conf, err := kubeContainerToCreateConfig(ctx, container, newImage, namespaces, volumes, pod.ID(), podName, podInfraID, configMaps, seccompPaths, options.LogDriver)
specGen, err := kube.ToSpecGen(ctx, container, container.Image, newImage, volumes, pod.ID(), podName, podInfraID, configMaps, seccompPaths, ctrRestartPolicy)
if err != nil {
return nil, err
}
conf.RestartPolicy = ctrRestartPolicy
ctr, err := createconfig.CreateContainerFromCreateConfig(ctx, ic.Libpod, conf, pod)
ctr, err := generate.MakeContainer(ctx, ic.Libpod, specGen)
if err != nil {
return nil, err
}
@ -365,12 +298,20 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
}
//start the containers
for _, ctr := range containers {
if err := ctr.Start(ctx, true); err != nil {
// Making this a hard failure here to avoid a mess
// the other containers are in created status
podStartErrors, err := pod.Start(ctx)
if err != nil {
return nil, err
}
// Previous versions of playkube started containers individually and then
// looked for errors. Because we now use the uber-Pod start call, we should
// iterate the map of possible errors and return one if there is a problem. This
// keeps the behavior the same
for _, e := range podStartErrors {
if e != nil {
return nil, e
}
}
playKubePod.ID = pod.ID()
@ -383,268 +324,6 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
return &report, nil
}
// getPodPorts converts a slice of kube container descriptions to an
// array of ocicni portmapping descriptions usable in libpod
func getPodPorts(containers []v1.Container) []ocicni.PortMapping {
var infraPorts []ocicni.PortMapping
for _, container := range containers {
for _, p := range container.Ports {
if p.HostPort != 0 && p.ContainerPort == 0 {
p.ContainerPort = p.HostPort
}
if p.Protocol == "" {
p.Protocol = "tcp"
}
portBinding := ocicni.PortMapping{
HostPort: p.HostPort,
ContainerPort: p.ContainerPort,
Protocol: strings.ToLower(string(p.Protocol)),
HostIP: p.HostIP,
}
// only hostPort is utilized in podman context, all container ports
// are accessible inside the shared network namespace
if p.HostPort != 0 {
infraPorts = append(infraPorts, portBinding)
}
}
}
return infraPorts
}
func setupSecurityContext(securityConfig *createconfig.SecurityConfig, userConfig *createconfig.UserConfig, containerYAML v1.Container) {
if containerYAML.SecurityContext == nil {
return
}
if containerYAML.SecurityContext.ReadOnlyRootFilesystem != nil {
securityConfig.ReadOnlyRootfs = *containerYAML.SecurityContext.ReadOnlyRootFilesystem
}
if containerYAML.SecurityContext.Privileged != nil {
securityConfig.Privileged = *containerYAML.SecurityContext.Privileged
}
if containerYAML.SecurityContext.AllowPrivilegeEscalation != nil {
securityConfig.NoNewPrivs = !*containerYAML.SecurityContext.AllowPrivilegeEscalation
}
if seopt := containerYAML.SecurityContext.SELinuxOptions; seopt != nil {
if seopt.User != "" {
securityConfig.SecurityOpts = append(securityConfig.SecurityOpts, fmt.Sprintf("label=user:%s", seopt.User))
securityConfig.LabelOpts = append(securityConfig.LabelOpts, fmt.Sprintf("user:%s", seopt.User))
}
if seopt.Role != "" {
securityConfig.SecurityOpts = append(securityConfig.SecurityOpts, fmt.Sprintf("label=role:%s", seopt.Role))
securityConfig.LabelOpts = append(securityConfig.LabelOpts, fmt.Sprintf("role:%s", seopt.Role))
}
if seopt.Type != "" {
securityConfig.SecurityOpts = append(securityConfig.SecurityOpts, fmt.Sprintf("label=type:%s", seopt.Type))
securityConfig.LabelOpts = append(securityConfig.LabelOpts, fmt.Sprintf("type:%s", seopt.Type))
}
if seopt.Level != "" {
securityConfig.SecurityOpts = append(securityConfig.SecurityOpts, fmt.Sprintf("label=level:%s", seopt.Level))
securityConfig.LabelOpts = append(securityConfig.LabelOpts, fmt.Sprintf("level:%s", seopt.Level))
}
}
if caps := containerYAML.SecurityContext.Capabilities; caps != nil {
for _, capability := range caps.Add {
securityConfig.CapAdd = append(securityConfig.CapAdd, string(capability))
}
for _, capability := range caps.Drop {
securityConfig.CapDrop = append(securityConfig.CapDrop, string(capability))
}
}
if containerYAML.SecurityContext.RunAsUser != nil {
userConfig.User = fmt.Sprintf("%d", *containerYAML.SecurityContext.RunAsUser)
}
if containerYAML.SecurityContext.RunAsGroup != nil {
if userConfig.User == "" {
userConfig.User = "0"
}
userConfig.User = fmt.Sprintf("%s:%d", userConfig.User, *containerYAML.SecurityContext.RunAsGroup)
}
}
// kubeContainerToCreateConfig takes a v1.Container and returns a createconfig describing a container
func kubeContainerToCreateConfig(ctx context.Context, containerYAML v1.Container, newImage *image.Image, namespaces map[string]string, volumes map[string]string, podID, podName, infraID string, configMaps []v1.ConfigMap, seccompPaths *kubeSeccompPaths, logDriver string) (*createconfig.CreateConfig, error) {
var (
containerConfig createconfig.CreateConfig
pidConfig createconfig.PidConfig
networkConfig createconfig.NetworkConfig
cgroupConfig createconfig.CgroupConfig
utsConfig createconfig.UtsConfig
ipcConfig createconfig.IpcConfig
userConfig createconfig.UserConfig
securityConfig createconfig.SecurityConfig
)
// The default for MemorySwappiness is -1, not 0
containerConfig.Resources.MemorySwappiness = -1
containerConfig.Image = containerYAML.Image
containerConfig.ImageID = newImage.ID()
// podName should be non-empty for Deployment objects to be able to create
// multiple pods having containers with unique names
if podName == "" {
return nil, errors.Errorf("kubeContainerToCreateConfig got empty podName")
}
containerConfig.Name = fmt.Sprintf("%s-%s", podName, containerYAML.Name)
containerConfig.Tty = containerYAML.TTY
containerConfig.Pod = podID
imageData, _ := newImage.Inspect(ctx)
userConfig.User = "0"
if imageData != nil {
userConfig.User = imageData.Config.User
}
setupSecurityContext(&securityConfig, &userConfig, containerYAML)
// Since we prefix the container name with pod name to work-around the uniqueness requirement,
// the seccom profile should reference the actual container name from the YAML
// but apply to the containers with the prefixed name
securityConfig.SeccompProfilePath = seccompPaths.findForContainer(containerYAML.Name)
var err error
milliCPU, err := quantityToInt64(containerYAML.Resources.Limits.Cpu())
if err != nil {
return nil, errors.Wrap(err, "Failed to set CPU quota")
}
if milliCPU > 0 {
period, quota := util.CoresToPeriodAndQuota(float64(milliCPU) / 1000)
containerConfig.Resources.CPUPeriod = period
containerConfig.Resources.CPUQuota = quota
}
containerConfig.Resources.Memory, err = quantityToInt64(containerYAML.Resources.Limits.Memory())
if err != nil {
return nil, errors.Wrap(err, "Failed to set memory limit")
}
containerConfig.Resources.MemoryReservation, err = quantityToInt64(containerYAML.Resources.Requests.Memory())
if err != nil {
return nil, errors.Wrap(err, "Failed to set memory reservation")
}
containerConfig.Command = []string{}
if imageData != nil && imageData.Config != nil {
containerConfig.Command = imageData.Config.Entrypoint
}
if len(containerYAML.Command) != 0 {
containerConfig.Command = containerYAML.Command
}
// doc https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#notes
if len(containerYAML.Args) != 0 {
containerConfig.Command = append(containerConfig.Command, containerYAML.Args...)
} else if len(containerYAML.Command) == 0 {
// Add the Cmd from the image config only if containerYAML.Command and containerYAML.Args are empty
containerConfig.Command = append(containerConfig.Command, imageData.Config.Cmd...)
}
if imageData != nil && len(containerConfig.Command) == 0 {
return nil, errors.Errorf("No command specified in container YAML or as CMD or ENTRYPOINT in this image for %s", containerConfig.Name)
}
containerConfig.UserCommand = containerConfig.Command
containerConfig.StopSignal = 15
containerConfig.WorkDir = "/"
if imageData != nil {
// FIXME,
// we are currently ignoring imageData.Config.ExposedPorts
containerConfig.BuiltinImgVolumes = imageData.Config.Volumes
if imageData.Config.WorkingDir != "" {
containerConfig.WorkDir = imageData.Config.WorkingDir
}
containerConfig.Labels = imageData.Config.Labels
if imageData.Config.StopSignal != "" {
stopSignal, err := util.ParseSignal(imageData.Config.StopSignal)
if err != nil {
return nil, err
}
containerConfig.StopSignal = stopSignal
}
}
if containerYAML.WorkingDir != "" {
containerConfig.WorkDir = containerYAML.WorkingDir
}
// If the user does not pass in ID mappings, just set to basics
if userConfig.IDMappings == nil {
userConfig.IDMappings = &storage.IDMappingOptions{}
}
networkConfig.NetMode = ns.NetworkMode(namespaces["net"])
ipcConfig.IpcMode = ns.IpcMode(namespaces["ipc"])
utsConfig.UtsMode = ns.UTSMode(namespaces["uts"])
// disabled in code review per mheon
//containerConfig.PidMode = ns.PidMode(namespaces["pid"])
userConfig.UsernsMode = ns.UsernsMode(namespaces["user"])
if len(containerConfig.WorkDir) == 0 {
containerConfig.WorkDir = "/"
}
containerConfig.Pid = pidConfig
containerConfig.Network = networkConfig
containerConfig.Uts = utsConfig
containerConfig.Ipc = ipcConfig
containerConfig.Cgroup = cgroupConfig
containerConfig.User = userConfig
containerConfig.Security = securityConfig
if logDriver != "" {
containerConfig.LogDriver = logDriver
}
annotations := make(map[string]string)
if infraID != "" {
annotations[ann.SandboxID] = infraID
annotations[ann.ContainerType] = ann.ContainerTypeContainer
}
containerConfig.Annotations = annotations
// Environment Variables
envs := map[string]string{}
if imageData != nil {
imageEnv, err := envLib.ParseSlice(imageData.Config.Env)
if err != nil {
return nil, errors.Wrap(err, "error parsing image environment variables")
}
envs = imageEnv
}
for _, env := range containerYAML.Env {
value := envVarValue(env, configMaps)
envs[env.Name] = value
}
for _, envFrom := range containerYAML.EnvFrom {
cmEnvs := envVarsFromConfigMap(envFrom, configMaps)
for k, v := range cmEnvs {
envs[k] = v
}
}
containerConfig.Env = envs
for _, volume := range containerYAML.VolumeMounts {
var readonly string
hostPath, exists := volumes[volume.Name]
if !exists {
return nil, errors.Errorf("Volume mount %s specified for container but not configured in volumes", volume.Name)
}
if err := parse.ValidateVolumeCtrDir(volume.MountPath); err != nil {
return nil, errors.Wrapf(err, "error in parsing MountPath")
}
if volume.ReadOnly {
readonly = ":ro"
}
containerConfig.Volumes = append(containerConfig.Volumes, fmt.Sprintf("%s:%s%s", hostPath, volume.MountPath, readonly))
}
return &containerConfig, nil
}
// readConfigMapFromFile returns a kubernetes configMap obtained from --configmap flag
func readConfigMapFromFile(r io.Reader) (v1.ConfigMap, error) {
var cm v1.ConfigMap
@ -664,125 +343,3 @@ func readConfigMapFromFile(r io.Reader) (v1.ConfigMap, error) {
return cm, nil
}
// envVarsFromConfigMap returns all key-value pairs as env vars from a configMap that matches the envFrom setting of a container
func envVarsFromConfigMap(envFrom v1.EnvFromSource, configMaps []v1.ConfigMap) map[string]string {
envs := map[string]string{}
if envFrom.ConfigMapRef != nil {
cmName := envFrom.ConfigMapRef.Name
for _, c := range configMaps {
if cmName == c.Name {
envs = c.Data
break
}
}
}
return envs
}
// envVarValue returns the environment variable value configured within the container's env setting.
// It gets the value from a configMap if specified, otherwise returns env.Value
func envVarValue(env v1.EnvVar, configMaps []v1.ConfigMap) string {
for _, c := range configMaps {
if env.ValueFrom != nil {
if env.ValueFrom.ConfigMapKeyRef != nil {
if env.ValueFrom.ConfigMapKeyRef.Name == c.Name {
if value, ok := c.Data[env.ValueFrom.ConfigMapKeyRef.Key]; ok {
return value
}
}
}
}
}
return env.Value
}
// kubeSeccompPaths holds information about a pod YAML's seccomp configuration
// it holds both container and pod seccomp paths
type kubeSeccompPaths struct {
containerPaths map[string]string
podPath string
}
// findForContainer checks whether a container has a seccomp path configured for it
// if not, it returns the podPath, which should always have a value
func (k *kubeSeccompPaths) findForContainer(ctrName string) string {
if path, ok := k.containerPaths[ctrName]; ok {
return path
}
return k.podPath
}
// initializeSeccompPaths takes annotations from the pod object metadata and finds annotations pertaining to seccomp
// it parses both pod and container level
// if the annotation is of the form "localhost/%s", the seccomp profile will be set to profileRoot/%s
func initializeSeccompPaths(annotations map[string]string, profileRoot string) (*kubeSeccompPaths, error) {
seccompPaths := &kubeSeccompPaths{containerPaths: make(map[string]string)}
var err error
if annotations != nil {
for annKeyValue, seccomp := range annotations {
// check if it is prefaced with container.seccomp.security.alpha.kubernetes.io/
prefixAndCtr := strings.Split(annKeyValue, "/")
if prefixAndCtr[0]+"/" != v1.SeccompContainerAnnotationKeyPrefix {
continue
} else if len(prefixAndCtr) != 2 {
// this could be caused by a user inputting either of
// container.seccomp.security.alpha.kubernetes.io{,/}
// both of which are invalid
return nil, errors.Errorf("Invalid seccomp path: %s", prefixAndCtr[0])
}
path, err := verifySeccompPath(seccomp, profileRoot)
if err != nil {
return nil, err
}
seccompPaths.containerPaths[prefixAndCtr[1]] = path
}
podSeccomp, ok := annotations[v1.SeccompPodAnnotationKey]
if ok {
seccompPaths.podPath, err = verifySeccompPath(podSeccomp, profileRoot)
} else {
seccompPaths.podPath, err = libpod.DefaultSeccompPath()
}
if err != nil {
return nil, err
}
}
return seccompPaths, nil
}
// verifySeccompPath takes a path and checks whether it is a default, unconfined, or a path
// the available options are parsed as defined in https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp
func verifySeccompPath(path string, profileRoot string) (string, error) {
switch path {
case v1.DeprecatedSeccompProfileDockerDefault:
fallthrough
case v1.SeccompProfileRuntimeDefault:
return libpod.DefaultSeccompPath()
case "unconfined":
return path, nil
default:
parts := strings.Split(path, "/")
if parts[0] == "localhost" {
return filepath.Join(profileRoot, parts[1]), nil
}
return "", errors.Errorf("invalid seccomp path: %s", path)
}
}
func quantityToInt64(quantity *resource.Quantity) (int64, error) {
if i, ok := quantity.AsInt64(); ok {
return i, nil
}
if i, ok := quantity.AsDec().Unscaled(); ok {
return i, nil
}
return 0, errors.Errorf("Quantity cannot be represented as int64: %v", quantity)
}

View File

@ -6,34 +6,9 @@ import (
"github.com/stretchr/testify/assert"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v12 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
var configMapList = []v1.ConfigMap{
{
TypeMeta: metav1.TypeMeta{
Kind: "ConfigMap",
},
ObjectMeta: metav1.ObjectMeta{
Name: "bar",
},
Data: map[string]string{
"myvar": "bar",
},
},
{
TypeMeta: metav1.TypeMeta{
Kind: "ConfigMap",
},
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
},
Data: map[string]string{
"myvar": "foo",
},
},
}
func TestReadConfigMapFromFile(t *testing.T) {
tests := []struct {
name string
@ -55,11 +30,11 @@ data:
false,
"",
v1.ConfigMap{
TypeMeta: metav1.TypeMeta{
TypeMeta: v12.TypeMeta{
Kind: "ConfigMap",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
ObjectMeta: v12.ObjectMeta{
Name: "foo",
},
Data: map[string]string{
@ -114,141 +89,3 @@ data:
})
}
}
func TestEnvVarsFromConfigMap(t *testing.T) {
tests := []struct {
name string
envFrom v1.EnvFromSource
configMapList []v1.ConfigMap
expected map[string]string
}{
{
"ConfigMapExists",
v1.EnvFromSource{
ConfigMapRef: &v1.ConfigMapEnvSource{
LocalObjectReference: v1.LocalObjectReference{
Name: "foo",
},
},
},
configMapList,
map[string]string{
"myvar": "foo",
},
},
{
"ConfigMapDoesNotExist",
v1.EnvFromSource{
ConfigMapRef: &v1.ConfigMapEnvSource{
LocalObjectReference: v1.LocalObjectReference{
Name: "doesnotexist",
},
},
},
configMapList,
map[string]string{},
},
{
"EmptyConfigMapList",
v1.EnvFromSource{
ConfigMapRef: &v1.ConfigMapEnvSource{
LocalObjectReference: v1.LocalObjectReference{
Name: "foo",
},
},
},
[]v1.ConfigMap{},
map[string]string{},
},
}
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
result := envVarsFromConfigMap(test.envFrom, test.configMapList)
assert.Equal(t, test.expected, result)
})
}
}
func TestEnvVarValue(t *testing.T) {
tests := []struct {
name string
envVar v1.EnvVar
configMapList []v1.ConfigMap
expected string
}{
{
"ConfigMapExists",
v1.EnvVar{
Name: "FOO",
ValueFrom: &v1.EnvVarSource{
ConfigMapKeyRef: &v1.ConfigMapKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: "foo",
},
Key: "myvar",
},
},
},
configMapList,
"foo",
},
{
"ContainerKeyDoesNotExistInConfigMap",
v1.EnvVar{
Name: "FOO",
ValueFrom: &v1.EnvVarSource{
ConfigMapKeyRef: &v1.ConfigMapKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: "foo",
},
Key: "doesnotexist",
},
},
},
configMapList,
"",
},
{
"ConfigMapDoesNotExist",
v1.EnvVar{
Name: "FOO",
ValueFrom: &v1.EnvVarSource{
ConfigMapKeyRef: &v1.ConfigMapKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: "doesnotexist",
},
Key: "myvar",
},
},
},
configMapList,
"",
},
{
"EmptyConfigMapList",
v1.EnvVar{
Name: "FOO",
ValueFrom: &v1.EnvVarSource{
ConfigMapKeyRef: &v1.ConfigMapKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: "foo",
},
Key: "myvar",
},
},
},
[]v1.ConfigMap{},
"",
},
}
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
result := envVarValue(test.envVar, test.configMapList)
assert.Equal(t, test.expected, result)
})
}
}

View File

@ -0,0 +1,312 @@
package kube
import (
"context"
"fmt"
"strings"
"github.com/containers/buildah/pkg/parse"
"github.com/containers/podman/v2/libpod/image"
ann "github.com/containers/podman/v2/pkg/annotations"
"github.com/containers/podman/v2/pkg/specgen"
"github.com/containers/podman/v2/pkg/util"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
)
func ToPodGen(ctx context.Context, podName string, podYAML *v1.PodTemplateSpec) (*specgen.PodSpecGenerator, error) {
p := specgen.NewPodSpecGenerator()
p.Name = podName
p.Labels = podYAML.ObjectMeta.Labels
// TODO we only configure Process namespace. We also need to account for Host{IPC,Network,PID}
// which is not currently possible with pod create
if podYAML.Spec.ShareProcessNamespace != nil && *podYAML.Spec.ShareProcessNamespace {
p.SharedNamespaces = append(p.SharedNamespaces, "pid")
}
p.Hostname = podYAML.Spec.Hostname
if p.Hostname == "" {
p.Hostname = podName
}
if podYAML.Spec.HostNetwork {
p.NetNS.Value = "host"
}
if podYAML.Spec.HostAliases != nil {
hosts := make([]string, 0, len(podYAML.Spec.HostAliases))
for _, hostAlias := range podYAML.Spec.HostAliases {
for _, host := range hostAlias.Hostnames {
hosts = append(hosts, host+":"+hostAlias.IP)
}
}
p.HostAdd = hosts
}
podPorts := getPodPorts(podYAML.Spec.Containers)
p.PortMappings = podPorts
return p, nil
}
func ToSpecGen(ctx context.Context, containerYAML v1.Container, iid string, newImage *image.Image, volumes map[string]string, podID, podName, infraID string, configMaps []v1.ConfigMap, seccompPaths *KubeSeccompPaths, restartPolicy string) (*specgen.SpecGenerator, error) {
s := specgen.NewSpecGenerator(iid, false)
// podName should be non-empty for Deployment objects to be able to create
// multiple pods having containers with unique names
if len(podName) < 1 {
return nil, errors.Errorf("kubeContainerToCreateConfig got empty podName")
}
s.Name = fmt.Sprintf("%s-%s", podName, containerYAML.Name)
s.Terminal = containerYAML.TTY
s.Pod = podID
setupSecurityContext(s, containerYAML)
// Since we prefix the container name with pod name to work-around the uniqueness requirement,
// the seccomp profile should reference the actual container name from the YAML
// but apply to the containers with the prefixed name
s.SeccompProfilePath = seccompPaths.FindForContainer(containerYAML.Name)
s.ResourceLimits = &spec.LinuxResources{}
milliCPU, err := quantityToInt64(containerYAML.Resources.Limits.Cpu())
if err != nil {
return nil, errors.Wrap(err, "Failed to set CPU quota")
}
if milliCPU > 0 {
period, quota := util.CoresToPeriodAndQuota(float64(milliCPU) / 1000)
s.ResourceLimits.CPU = &spec.LinuxCPU{
Quota: &quota,
Period: &period,
}
}
limit, err := quantityToInt64(containerYAML.Resources.Limits.Memory())
if err != nil {
return nil, errors.Wrap(err, "Failed to set memory limit")
}
memoryRes, err := quantityToInt64(containerYAML.Resources.Requests.Memory())
if err != nil {
return nil, errors.Wrap(err, "Failed to set memory reservation")
}
if limit > 0 || memoryRes > 0 {
s.ResourceLimits.Memory = &spec.LinuxMemory{}
}
if limit > 0 {
s.ResourceLimits.Memory.Limit = &limit
}
if memoryRes > 0 {
s.ResourceLimits.Memory.Reservation = &memoryRes
}
// TODO: We dont understand why specgen does not take of this, but
// integration tests clearly pointed out that it was required.
s.Command = []string{}
imageData, err := newImage.Inspect(ctx)
if err != nil {
return nil, err
}
s.WorkDir = "/"
if imageData != nil && imageData.Config != nil {
if imageData.Config.WorkingDir != "" {
s.WorkDir = imageData.Config.WorkingDir
}
s.Command = imageData.Config.Entrypoint
s.Labels = imageData.Config.Labels
if len(imageData.Config.StopSignal) > 0 {
stopSignal, err := util.ParseSignal(imageData.Config.StopSignal)
if err != nil {
return nil, err
}
s.StopSignal = &stopSignal
}
}
if len(containerYAML.Command) != 0 {
s.Command = containerYAML.Command
}
// doc https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#notes
if len(containerYAML.Args) != 0 {
s.Command = append(s.Command, containerYAML.Args...)
}
// FIXME,
// we are currently ignoring imageData.Config.ExposedPorts
if containerYAML.WorkingDir != "" {
s.WorkDir = containerYAML.WorkingDir
}
annotations := make(map[string]string)
if infraID != "" {
annotations[ann.SandboxID] = infraID
annotations[ann.ContainerType] = ann.ContainerTypeContainer
}
s.Annotations = annotations
// Environment Variables
envs := map[string]string{}
for _, env := range containerYAML.Env {
value := envVarValue(env, configMaps)
envs[env.Name] = value
}
for _, envFrom := range containerYAML.EnvFrom {
cmEnvs := envVarsFromConfigMap(envFrom, configMaps)
for k, v := range cmEnvs {
envs[k] = v
}
}
s.Env = envs
for _, volume := range containerYAML.VolumeMounts {
hostPath, exists := volumes[volume.Name]
if !exists {
return nil, errors.Errorf("Volume mount %s specified for container but not configured in volumes", volume.Name)
}
if err := parse.ValidateVolumeCtrDir(volume.MountPath); err != nil {
return nil, errors.Wrapf(err, "error in parsing MountPath")
}
mount := spec.Mount{
Destination: volume.MountPath,
Source: hostPath,
Type: "bind",
}
if volume.ReadOnly {
mount.Options = []string{"ro"}
}
s.Mounts = append(s.Mounts, mount)
}
s.RestartPolicy = restartPolicy
return s, nil
}
func setupSecurityContext(s *specgen.SpecGenerator, containerYAML v1.Container) {
if containerYAML.SecurityContext == nil {
return
}
if containerYAML.SecurityContext.ReadOnlyRootFilesystem != nil {
s.ReadOnlyFilesystem = *containerYAML.SecurityContext.ReadOnlyRootFilesystem
}
if containerYAML.SecurityContext.Privileged != nil {
s.Privileged = *containerYAML.SecurityContext.Privileged
}
if containerYAML.SecurityContext.AllowPrivilegeEscalation != nil {
s.NoNewPrivileges = !*containerYAML.SecurityContext.AllowPrivilegeEscalation
}
if seopt := containerYAML.SecurityContext.SELinuxOptions; seopt != nil {
if seopt.User != "" {
s.SelinuxOpts = append(s.SelinuxOpts, fmt.Sprintf("role:%s", seopt.User))
}
if seopt.Role != "" {
s.SelinuxOpts = append(s.SelinuxOpts, fmt.Sprintf("role:%s", seopt.Role))
}
if seopt.Type != "" {
s.SelinuxOpts = append(s.SelinuxOpts, fmt.Sprintf("role:%s", seopt.Type))
}
if seopt.Level != "" {
s.SelinuxOpts = append(s.SelinuxOpts, fmt.Sprintf("role:%s", seopt.Level))
}
}
if caps := containerYAML.SecurityContext.Capabilities; caps != nil {
for _, capability := range caps.Add {
s.CapAdd = append(s.CapAdd, string(capability))
}
for _, capability := range caps.Drop {
s.CapDrop = append(s.CapDrop, string(capability))
}
}
if containerYAML.SecurityContext.RunAsUser != nil {
s.User = fmt.Sprintf("%d", *containerYAML.SecurityContext.RunAsUser)
}
if containerYAML.SecurityContext.RunAsGroup != nil {
if s.User == "" {
s.User = "0"
}
s.User = fmt.Sprintf("%s:%d", s.User, *containerYAML.SecurityContext.RunAsGroup)
}
}
func quantityToInt64(quantity *resource.Quantity) (int64, error) {
if i, ok := quantity.AsInt64(); ok {
return i, nil
}
if i, ok := quantity.AsDec().Unscaled(); ok {
return i, nil
}
return 0, errors.Errorf("Quantity cannot be represented as int64: %v", quantity)
}
// envVarsFromConfigMap returns all key-value pairs as env vars from a configMap that matches the envFrom setting of a container
func envVarsFromConfigMap(envFrom v1.EnvFromSource, configMaps []v1.ConfigMap) map[string]string {
envs := map[string]string{}
if envFrom.ConfigMapRef != nil {
cmName := envFrom.ConfigMapRef.Name
for _, c := range configMaps {
if cmName == c.Name {
envs = c.Data
break
}
}
}
return envs
}
// envVarValue returns the environment variable value configured within the container's env setting.
// It gets the value from a configMap if specified, otherwise returns env.Value
func envVarValue(env v1.EnvVar, configMaps []v1.ConfigMap) string {
for _, c := range configMaps {
if env.ValueFrom != nil {
if env.ValueFrom.ConfigMapKeyRef != nil {
if env.ValueFrom.ConfigMapKeyRef.Name == c.Name {
if value, ok := c.Data[env.ValueFrom.ConfigMapKeyRef.Key]; ok {
return value
}
}
}
}
}
return env.Value
}
// getPodPorts converts a slice of kube container descriptions to an
// array of portmapping
func getPodPorts(containers []v1.Container) []specgen.PortMapping {
var infraPorts []specgen.PortMapping
for _, container := range containers {
for _, p := range container.Ports {
if p.HostPort != 0 && p.ContainerPort == 0 {
p.ContainerPort = p.HostPort
}
if p.Protocol == "" {
p.Protocol = "tcp"
}
portBinding := specgen.PortMapping{
HostPort: uint16(p.HostPort),
ContainerPort: uint16(p.ContainerPort),
Protocol: strings.ToLower(string(p.Protocol)),
HostIP: p.HostIP,
}
// only hostPort is utilized in podman context, all container ports
// are accessible inside the shared network namespace
if p.HostPort != 0 {
infraPorts = append(infraPorts, portBinding)
}
}
}
return infraPorts
}

View File

@ -0,0 +1,172 @@
package kube
import (
"testing"
"github.com/stretchr/testify/assert"
v1 "k8s.io/api/core/v1"
v12 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func TestEnvVarsFromConfigMap(t *testing.T) {
tests := []struct {
name string
envFrom v1.EnvFromSource
configMapList []v1.ConfigMap
expected map[string]string
}{
{
"ConfigMapExists",
v1.EnvFromSource{
ConfigMapRef: &v1.ConfigMapEnvSource{
LocalObjectReference: v1.LocalObjectReference{
Name: "foo",
},
},
},
configMapList,
map[string]string{
"myvar": "foo",
},
},
{
"ConfigMapDoesNotExist",
v1.EnvFromSource{
ConfigMapRef: &v1.ConfigMapEnvSource{
LocalObjectReference: v1.LocalObjectReference{
Name: "doesnotexist",
},
},
},
configMapList,
map[string]string{},
},
{
"EmptyConfigMapList",
v1.EnvFromSource{
ConfigMapRef: &v1.ConfigMapEnvSource{
LocalObjectReference: v1.LocalObjectReference{
Name: "foo",
},
},
},
[]v1.ConfigMap{},
map[string]string{},
},
}
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
result := envVarsFromConfigMap(test.envFrom, test.configMapList)
assert.Equal(t, test.expected, result)
})
}
}
func TestEnvVarValue(t *testing.T) {
tests := []struct {
name string
envVar v1.EnvVar
configMapList []v1.ConfigMap
expected string
}{
{
"ConfigMapExists",
v1.EnvVar{
Name: "FOO",
ValueFrom: &v1.EnvVarSource{
ConfigMapKeyRef: &v1.ConfigMapKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: "foo",
},
Key: "myvar",
},
},
},
configMapList,
"foo",
},
{
"ContainerKeyDoesNotExistInConfigMap",
v1.EnvVar{
Name: "FOO",
ValueFrom: &v1.EnvVarSource{
ConfigMapKeyRef: &v1.ConfigMapKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: "foo",
},
Key: "doesnotexist",
},
},
},
configMapList,
"",
},
{
"ConfigMapDoesNotExist",
v1.EnvVar{
Name: "FOO",
ValueFrom: &v1.EnvVarSource{
ConfigMapKeyRef: &v1.ConfigMapKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: "doesnotexist",
},
Key: "myvar",
},
},
},
configMapList,
"",
},
{
"EmptyConfigMapList",
v1.EnvVar{
Name: "FOO",
ValueFrom: &v1.EnvVarSource{
ConfigMapKeyRef: &v1.ConfigMapKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: "foo",
},
Key: "myvar",
},
},
},
[]v1.ConfigMap{},
"",
},
}
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
result := envVarValue(test.envVar, test.configMapList)
assert.Equal(t, test.expected, result)
})
}
}
var configMapList = []v1.ConfigMap{
{
TypeMeta: v12.TypeMeta{
Kind: "ConfigMap",
},
ObjectMeta: v12.ObjectMeta{
Name: "bar",
},
Data: map[string]string{
"myvar": "bar",
},
},
{
TypeMeta: v12.TypeMeta{
Kind: "ConfigMap",
},
ObjectMeta: v12.ObjectMeta{
Name: "foo",
},
Data: map[string]string{
"myvar": "foo",
},
},
}

View File

@ -0,0 +1,84 @@
package kube
import (
"path/filepath"
"strings"
"github.com/containers/podman/v2/libpod"
"github.com/pkg/errors"
v1 "k8s.io/api/core/v1"
)
// KubeSeccompPaths holds information about a pod YAML's seccomp configuration
// it holds both container and pod seccomp paths
type KubeSeccompPaths struct {
containerPaths map[string]string
podPath string
}
// FindForContainer checks whether a container has a seccomp path configured for it
// if not, it returns the podPath, which should always have a value
func (k *KubeSeccompPaths) FindForContainer(ctrName string) string {
if path, ok := k.containerPaths[ctrName]; ok {
return path
}
return k.podPath
}
// InitializeSeccompPaths takes annotations from the pod object metadata and finds annotations pertaining to seccomp
// it parses both pod and container level
// if the annotation is of the form "localhost/%s", the seccomp profile will be set to profileRoot/%s
func InitializeSeccompPaths(annotations map[string]string, profileRoot string) (*KubeSeccompPaths, error) {
seccompPaths := &KubeSeccompPaths{containerPaths: make(map[string]string)}
var err error
if annotations != nil {
for annKeyValue, seccomp := range annotations {
// check if it is prefaced with container.seccomp.security.alpha.kubernetes.io/
prefixAndCtr := strings.Split(annKeyValue, "/")
if prefixAndCtr[0]+"/" != v1.SeccompContainerAnnotationKeyPrefix {
continue
} else if len(prefixAndCtr) != 2 {
// this could be caused by a user inputting either of
// container.seccomp.security.alpha.kubernetes.io{,/}
// both of which are invalid
return nil, errors.Errorf("Invalid seccomp path: %s", prefixAndCtr[0])
}
path, err := verifySeccompPath(seccomp, profileRoot)
if err != nil {
return nil, err
}
seccompPaths.containerPaths[prefixAndCtr[1]] = path
}
podSeccomp, ok := annotations[v1.SeccompPodAnnotationKey]
if ok {
seccompPaths.podPath, err = verifySeccompPath(podSeccomp, profileRoot)
} else {
seccompPaths.podPath, err = libpod.DefaultSeccompPath()
}
if err != nil {
return nil, err
}
}
return seccompPaths, nil
}
// verifySeccompPath takes a path and checks whether it is a default, unconfined, or a path
// the available options are parsed as defined in https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp
func verifySeccompPath(path string, profileRoot string) (string, error) {
switch path {
case v1.DeprecatedSeccompProfileDockerDefault:
fallthrough
case v1.SeccompProfileRuntimeDefault:
return libpod.DefaultSeccompPath()
case "unconfined":
return path, nil
default:
parts := strings.Split(path, "/")
if parts[0] == "localhost" {
return filepath.Join(profileRoot, parts[1]), nil
}
return "", errors.Errorf("invalid seccomp path: %s", path)
}
}

View File

@ -959,7 +959,7 @@ var _ = Describe("Podman play kube", func() {
kube.WaitWithDefaultTimeout()
Expect(kube.ExitCode()).To(Equal(0))
inspect := podmanTest.Podman([]string{"inspect", getCtrNameInPod(pod), "--format", "{{ .HostConfig.ExtraHosts }}"})
inspect := podmanTest.Podman([]string{"inspect", pod.Name, "--format", "{{ .InfraConfig.HostAdd}}"})
inspect.WaitWithDefaultTimeout()
Expect(inspect.ExitCode()).To(Equal(0))
Expect(inspect.OutputToString()).