Fix handling of readonly containers when defined in kube.yaml

The containers should be able to write to tmpfs mounted directories.

Also cleanup output of podman kube generate to not show default values.

Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
This commit is contained in:
Daniel J Walsh
2022-11-17 13:24:00 -05:00
parent 480c7fbf53
commit af86b4f62a
6 changed files with 102 additions and 64 deletions

View File

@ -435,7 +435,7 @@ func (p *Pod) podWithContainers(ctx context.Context, containers []*Container, po
}
}
}
podVolumes := make([]v1.Volume, 0, len(deDupPodVolumes))
podVolumes := []v1.Volume{}
for _, vol := range deDupPodVolumes {
podVolumes = append(podVolumes, *vol)
}
@ -471,18 +471,12 @@ func newPodObject(podName string, annotations map[string]string, initCtrs, conta
CreationTimestamp: v12.Now(),
Annotations: annotations,
}
// Set enableServiceLinks to false as podman doesn't use the service port environment variables
enableServiceLinks := false
// Set automountServiceAccountToken to false as podman doesn't use service account tokens
automountServiceAccountToken := false
ps := v1.PodSpec{
Containers: containers,
Hostname: hostname,
HostNetwork: hostNetwork,
InitContainers: initCtrs,
Volumes: volumes,
EnableServiceLinks: &enableServiceLinks,
AutomountServiceAccountToken: &automountServiceAccountToken,
Containers: containers,
Hostname: hostname,
HostNetwork: hostNetwork,
InitContainers: initCtrs,
Volumes: volumes,
}
if !hostUsers {
ps.HostUsers = &hostUsers
@ -894,35 +888,46 @@ func generateKubeVolumeMount(m specs.Mount) (v1.VolumeMount, v1.Volume, error) {
vm := v1.VolumeMount{}
vo := v1.Volume{}
name, err := convertVolumePathToName(m.Source)
if err != nil {
return vm, vo, err
var (
name string
err error
)
if m.Type == define.TypeTmpfs {
name = "tmp"
vo.EmptyDir = &v1.EmptyDirVolumeSource{
Medium: v1.StorageMediumMemory,
}
vo.Name = name
} else {
name, err = convertVolumePathToName(m.Source)
if err != nil {
return vm, vo, err
}
// To avoid naming conflicts with any persistent volume mounts, add a unique suffix to the volume's name.
name += "-host"
vo.Name = name
vo.HostPath = &v1.HostPathVolumeSource{}
vo.HostPath.Path = m.Source
isDir, err := isHostPathDirectory(m.Source)
// neither a directory or a file lives here, default to creating a directory
// TODO should this be an error instead?
var hostPathType v1.HostPathType
switch {
case err != nil:
hostPathType = v1.HostPathDirectoryOrCreate
case isDir:
hostPathType = v1.HostPathDirectory
default:
hostPathType = v1.HostPathFile
}
vo.HostPath.Type = &hostPathType
}
// To avoid naming conflicts with any persistent volume mounts, add a unique suffix to the volume's name.
name += "-host"
vm.Name = name
vm.MountPath = m.Destination
if cutil.StringInSlice("ro", m.Options) {
vm.ReadOnly = true
}
vo.Name = name
vo.HostPath = &v1.HostPathVolumeSource{}
vo.HostPath.Path = m.Source
isDir, err := isHostPathDirectory(m.Source)
// neither a directory or a file lives here, default to creating a directory
// TODO should this be an error instead?
var hostPathType v1.HostPathType
switch {
case err != nil:
hostPathType = v1.HostPathDirectoryOrCreate
case isDir:
hostPathType = v1.HostPathDirectory
default:
hostPathType = v1.HostPathFile
}
vo.HostPath.Type = &hostPathType
return vm, vo, nil
}

View File

@ -20,6 +20,7 @@ import (
"github.com/containers/common/pkg/secrets"
cutil "github.com/containers/common/pkg/util"
"github.com/containers/image/v5/manifest"
itypes "github.com/containers/image/v5/types"
"github.com/containers/podman/v4/libpod/define"
ann "github.com/containers/podman/v4/pkg/annotations"
"github.com/containers/podman/v4/pkg/domain/entities"
@ -119,6 +120,8 @@ type CtrSpecGenOptions struct {
ConfigMaps []v1.ConfigMap
// SeccompPaths for finding the seccomp profile path
SeccompPaths *KubeSeccompPaths
// ReadOnly make all containers root file system readonly
ReadOnly itypes.OptionalBool
// RestartPolicy defines the restart policy of the container
RestartPolicy string
// NetNSIsHost tells the container to use the host netns
@ -444,6 +447,10 @@ func ToSpecGen(ctx context.Context, opts *CtrSpecGenOptions) (*specgen.SpecGener
}
}
if ro := opts.ReadOnly; ro != itypes.OptionalBoolUndefined {
s.ReadOnlyFilesystem = (ro == itypes.OptionalBoolTrue)
}
// Make sure the container runs in a systemd unit which is
// stored as a label at container creation.
if unit := os.Getenv(systemdDefine.EnvVariable); unit != "" {

View File

@ -676,7 +676,7 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions
// Only add read-only tmpfs mounts in case that we are read-only and the
// read-only tmpfs flag has been set.
mounts, volumes, overlayVolumes, imageVolumes, err := parseVolumes(c.Volume, c.Mount, c.TmpFS, c.ReadOnlyTmpFS && c.ReadOnly)
mounts, volumes, overlayVolumes, imageVolumes, err := parseVolumes(c.Volume, c.Mount, c.TmpFS)
if err != nil {
return err
}
@ -854,6 +854,10 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions
s.PasswdEntry = c.PasswdEntry
}
if c.ReadOnly && c.ReadOnlyTmpFS {
s.Mounts = addReadOnlyMounts(s.Mounts)
}
return nil
}

View File

@ -26,7 +26,7 @@ var (
// Does not handle image volumes, init, and --volumes-from flags.
// Can also add tmpfs mounts from read-only tmpfs.
// TODO: handle options parsing/processing via containers/storage/pkg/mount
func parseVolumes(volumeFlag, mountFlag, tmpfsFlag []string, addReadOnlyTmpfs bool) ([]spec.Mount, []*specgen.NamedVolume, []*specgen.OverlayVolume, []*specgen.ImageVolume, error) {
func parseVolumes(volumeFlag, mountFlag, tmpfsFlag []string) ([]spec.Mount, []*specgen.NamedVolume, []*specgen.OverlayVolume, []*specgen.ImageVolume, error) {
// Get mounts from the --mounts flag.
unifiedMounts, unifiedVolumes, unifiedImageVolumes, err := Mounts(mountFlag)
if err != nil {
@ -78,26 +78,6 @@ func parseVolumes(volumeFlag, mountFlag, tmpfsFlag []string, addReadOnlyTmpfs bo
unifiedMounts[dest] = tmpfs
}
// If requested, add tmpfs filesystems for read-only containers.
if addReadOnlyTmpfs {
readonlyTmpfs := []string{"/tmp", "/var/tmp", "/run"}
options := []string{"rw", "rprivate", "nosuid", "nodev", "tmpcopyup"}
for _, dest := range readonlyTmpfs {
if _, ok := unifiedMounts[dest]; ok {
continue
}
if _, ok := unifiedVolumes[dest]; ok {
continue
}
unifiedMounts[dest] = spec.Mount{
Destination: dest,
Type: define.TypeTmpfs,
Source: "tmpfs",
Options: options,
}
}
}
// Check for conflicts between named volumes, overlay & image volumes,
// and mounts
allMounts := make(map[string]bool)
@ -723,3 +703,26 @@ func validChownFlag(flag string) (bool, error) {
func unixPathClean(p string) string {
return path.Clean(p)
}
func addReadOnlyMounts(mounts []spec.Mount) []spec.Mount {
readonlyTmpfs := []string{"/tmp", "/var/tmp", "/run"}
options := []string{"rw", "rprivate", "nosuid", "nodev", "tmpcopyup"}
for _, dest := range readonlyTmpfs {
for _, m := range mounts {
if m.Destination == dest {
continue
}
}
mnt := spec.Mount{
Destination: dest,
Type: define.TypeTmpfs,
Source: define.TypeTmpfs,
Options: options,
}
if dest != "/run" {
mnt.Options = append(mnt.Options, "noexec")
}
mounts = append(mounts, mnt)
}
return mounts
}

View File

@ -71,10 +71,6 @@ var _ = Describe("Podman generate kube", func() {
Expect(pod.Spec.Containers[0]).To(HaveField("WorkingDir", ""))
Expect(pod.Spec.Containers[0].Env).To(BeNil())
Expect(pod).To(HaveField("Name", "top-pod"))
enableServiceLinks := false
Expect(pod.Spec).To(HaveField("EnableServiceLinks", &enableServiceLinks))
automountServiceAccountToken := false
Expect(pod.Spec).To(HaveField("AutomountServiceAccountToken", &automountServiceAccountToken))
numContainers := 0
for range pod.Spec.Containers {
@ -169,10 +165,6 @@ var _ = Describe("Podman generate kube", func() {
err := yaml.Unmarshal(kube.Out.Contents(), pod)
Expect(err).ToNot(HaveOccurred())
Expect(pod.Spec).To(HaveField("HostNetwork", false))
enableServiceLinks := false
Expect(pod.Spec).To(HaveField("EnableServiceLinks", &enableServiceLinks))
automountServiceAccountToken := false
Expect(pod.Spec).To(HaveField("AutomountServiceAccountToken", &automountServiceAccountToken))
numContainers := 0
for range pod.Spec.Containers {

View File

@ -196,6 +196,30 @@ EOF
run_podman rm -a
}
@test "podman kube play read-only" {
YAML=$PODMAN_TMPDIR/test.yml
run_podman create --pod new:pod1 --name test1 $IMAGE touch /testrw
run_podman create --pod pod1 --read-only --name test2 $IMAGE touch /testro
run_podman create --pod pod1 --read-only --name test3 $IMAGE touch /tmp/testtmp
run_podman kube generate pod1 -f $YAML
run_podman kube play --replace $YAML
run_podman container inspect --format '{{.HostConfig.ReadonlyRootfs}}' pod1-test1 pod1-test2 pod1-test3
is "$output" "false.*true.*true" "Rootfs should be read/only"
run_podman inspect --format "{{.State.ExitCode}}" pod1-test1
is "$output" "0" "Container / should be read/write"
run_podman inspect --format "{{.State.ExitCode}}" pod1-test2
is "$output" "1" "Container / should be read/only"
run_podman inspect --format "{{.State.ExitCode}}" pod1-test3
is "$output" "0" "/tmp in a read-only container should be read/write"
run_podman kube down - < $YAML
run_podman 1 container exists pod1-test1
run_podman 1 container exists pod1-test2
run_podman 1 container exists pod1-test3
}
@test "podman play with user from image" {
TESTDIR=$PODMAN_TMPDIR/testdir
mkdir -p $TESTDIR
@ -416,4 +440,7 @@ spec:
run_podman pod inspect test_pod --format "{{.InfraConfig.PortBindings}}"
assert "$output" = "map[$HOST_PORT/tcp:[{ $HOST_PORT}]]"
run_podman kube down $PODMAN_TMPDIR/testpod.yaml
run_podman pod rm -a -f
run_podman rm -a -f
}