mirror of
https://github.com/containers/podman.git
synced 2025-08-06 11:32:07 +08:00
Add kube play support for image volume source
Signed-off-by: Mario Loriedo <mario.loriedo@gmail.com>
This commit is contained in:
@ -28,11 +28,12 @@ Currently, the supported Kubernetes kinds are:
|
||||
|
||||
`Kubernetes Pods or Deployments`
|
||||
|
||||
Only three volume types are supported by kube play, the *hostPath*, *emptyDir*, and *persistentVolumeClaim* volume types.
|
||||
Only four volume types are supported by kube play, the *hostPath*, *emptyDir*, *persistentVolumeClaim*, and *image* volume types.
|
||||
|
||||
- When using the *hostPath* volume type, only the *default (empty)*, *DirectoryOrCreate*, *Directory*, *FileOrCreate*, *File*, *Socket*, *CharDevice* and *BlockDevice* subtypes are supported. Podman interprets the value of *hostPath* *path* as a file path when it contains at least one forward slash, otherwise Podman treats the value as the name of a named volume.
|
||||
- When using a *persistentVolumeClaim*, the value for *claimName* is the name for the Podman named volume.
|
||||
- When using an *emptyDir* volume, Podman creates an anonymous volume that is attached the containers running inside the pod and is deleted once the pod is removed.
|
||||
- When using an *image* volume, Podman creates a read-only image volume with an empty subpath (the whole image is mounted). The image must already exist locally. It is supported in rootful mode only.
|
||||
|
||||
Note: The default restart policy for containers is `always`. You can change the default by setting the `restartPolicy` field in the spec.
|
||||
|
||||
@ -159,7 +160,9 @@ spec:
|
||||
|
||||
and as a result environment variable `FOO` is set to `bar` for container `container-1`.
|
||||
|
||||
`Automounting Volumes`
|
||||
`Automounting Volumes (deprecated)`
|
||||
|
||||
Note: The automounting annotation is deprecated. Kubernetes has [native support for image volumes](https://kubernetes.io/docs/tasks/configure-pod-container/image-volumes/) and that should be used rather than this podman-specific annotation.
|
||||
|
||||
An image can be automatically mounted into a container if the annotation `io.podman.annotations.kube.image.automount/$ctrname` is given. The following rules apply:
|
||||
|
||||
|
@ -795,6 +795,21 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
} else if v.Type == kube.KubeVolumeTypeImage {
|
||||
var cwd string
|
||||
if options.ContextDir != "" {
|
||||
cwd = options.ContextDir
|
||||
} else {
|
||||
cwd, err = os.Getwd()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
_, err := ic.buildOrPullImage(ctx, cwd, writer, v.Source, v.ImagePullPolicy, options)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1168,19 +1183,18 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
|
||||
return &report, sdNotifyProxies, nil
|
||||
}
|
||||
|
||||
// getImageAndLabelInfo returns the image information and how the image should be pulled plus as well as labels to be used for the container in the pod.
|
||||
// Moved this to a separate function so that it can be used for both init and regular containers when playing a kube yaml.
|
||||
func (ic *ContainerEngine) getImageAndLabelInfo(ctx context.Context, cwd string, annotations map[string]string, writer io.Writer, container v1.Container, options entities.PlayKubeOptions) (*libimage.Image, map[string]string, error) {
|
||||
// Contains all labels obtained from kube
|
||||
labels := make(map[string]string)
|
||||
var pulledImage *libimage.Image
|
||||
buildFile, err := getBuildFile(container.Image, cwd)
|
||||
// buildImageFromContainerfile builds the container image and returns its details if these conditions are met:
|
||||
// - A folder with the name of the image exists in current directory
|
||||
// - A Dockerfile or Containerfile exists in that folder
|
||||
// - The image doesn't exist locally OR the user explicitly provided the option `--build`
|
||||
func (ic *ContainerEngine) buildImageFromContainerfile(ctx context.Context, cwd string, writer io.Writer, image string, options entities.PlayKubeOptions) (*libimage.Image, error) {
|
||||
buildFile, err := getBuildFile(image, cwd)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
existsLocally, err := ic.Libpod.LibimageRuntime().Exists(container.Image)
|
||||
existsLocally, err := ic.Libpod.LibimageRuntime().Exists(image)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
if (len(buildFile) > 0) && ((!existsLocally && options.Build != types.OptionalBoolFalse) || (options.Build == types.OptionalBoolTrue)) {
|
||||
buildOpts := new(buildahDefine.BuildOptions)
|
||||
@ -1188,56 +1202,91 @@ func (ic *ContainerEngine) getImageAndLabelInfo(ctx context.Context, cwd string,
|
||||
buildOpts.ConfigureNetwork = buildahDefine.NetworkDefault
|
||||
isolation, err := bparse.IsolationOption("")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
buildOpts.Isolation = isolation
|
||||
buildOpts.CommonBuildOpts = commonOpts
|
||||
buildOpts.SystemContext = options.SystemContext
|
||||
buildOpts.Output = container.Image
|
||||
buildOpts.Output = image
|
||||
buildOpts.ContextDirectory = filepath.Dir(buildFile)
|
||||
buildOpts.ReportWriter = writer
|
||||
if _, _, err := ic.Libpod.Build(ctx, *buildOpts, []string{buildFile}...); err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
i, _, err := ic.Libpod.LibimageRuntime().LookupImage(container.Image, new(libimage.LookupImageOptions))
|
||||
builtImage, _, err := ic.Libpod.LibimageRuntime().LookupImage(image, new(libimage.LookupImageOptions))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
pulledImage = i
|
||||
} else {
|
||||
pullPolicy := config.PullPolicyMissing
|
||||
if len(container.ImagePullPolicy) > 0 {
|
||||
// Make sure to lower the strings since K8s pull policy
|
||||
// may be capitalized (see bugzilla.redhat.com/show_bug.cgi?id=1985905).
|
||||
rawPolicy := string(container.ImagePullPolicy)
|
||||
pullPolicy, err = config.ParsePullPolicy(strings.ToLower(rawPolicy))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
} else {
|
||||
if named, err := reference.ParseNamed(container.Image); err == nil {
|
||||
tagged, isTagged := named.(reference.NamedTagged)
|
||||
if !isTagged || tagged.Tag() == "latest" {
|
||||
// Make sure to always pull the latest image in case it got updated.
|
||||
pullPolicy = config.PullPolicyNewer
|
||||
}
|
||||
}
|
||||
}
|
||||
// This ensures the image is the image store
|
||||
pullOptions := &libimage.PullOptions{}
|
||||
pullOptions.AuthFilePath = options.Authfile
|
||||
pullOptions.CertDirPath = options.CertDir
|
||||
pullOptions.SignaturePolicyPath = options.SignaturePolicy
|
||||
pullOptions.Writer = writer
|
||||
pullOptions.Username = options.Username
|
||||
pullOptions.Password = options.Password
|
||||
pullOptions.InsecureSkipTLSVerify = options.SkipTLSVerify
|
||||
return builtImage, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
pulledImages, err := ic.Libpod.LibimageRuntime().Pull(ctx, container.Image, pullPolicy, pullOptions)
|
||||
// pullImageWithPolicy invokes libimage.Pull() to pull an image with the given PullPolicy.
|
||||
// If the PullPolicy is not set:
|
||||
// - use PullPolicyNewer if the image tag is set to "latest" or is not set
|
||||
// - use PullPolicyMissing the policy is set to PullPolicyNewer.
|
||||
func (ic *ContainerEngine) pullImageWithPolicy(ctx context.Context, writer io.Writer, image string, policy v1.PullPolicy, options entities.PlayKubeOptions) (*libimage.Image, error) {
|
||||
pullPolicy := config.PullPolicyMissing
|
||||
if len(policy) > 0 {
|
||||
// Make sure to lower the strings since K8s pull policy
|
||||
// may be capitalized (see bugzilla.redhat.com/show_bug.cgi?id=1985905).
|
||||
rawPolicy := string(policy)
|
||||
parsedPolicy, err := config.ParsePullPolicy(strings.ToLower(rawPolicy))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
pulledImage = pulledImages[0]
|
||||
pullPolicy = parsedPolicy
|
||||
} else {
|
||||
if named, err := reference.ParseNamed(image); err == nil {
|
||||
tagged, isTagged := named.(reference.NamedTagged)
|
||||
if !isTagged || tagged.Tag() == "latest" {
|
||||
// Make sure to always pull the latest image in case it got updated.
|
||||
pullPolicy = config.PullPolicyNewer
|
||||
}
|
||||
}
|
||||
}
|
||||
// This ensures the image is the image store
|
||||
pullOptions := &libimage.PullOptions{}
|
||||
pullOptions.AuthFilePath = options.Authfile
|
||||
pullOptions.CertDirPath = options.CertDir
|
||||
pullOptions.SignaturePolicyPath = options.SignaturePolicy
|
||||
pullOptions.Writer = writer
|
||||
pullOptions.Username = options.Username
|
||||
pullOptions.Password = options.Password
|
||||
pullOptions.InsecureSkipTLSVerify = options.SkipTLSVerify
|
||||
|
||||
pulledImages, err := ic.Libpod.LibimageRuntime().Pull(ctx, image, pullPolicy, pullOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return pulledImages[0], err
|
||||
}
|
||||
|
||||
// buildOrPullImage builds the image if a Containerfile is present in a directory
|
||||
// with the name of the image. It pulls the image otherwise. It returns the image
|
||||
// details.
|
||||
func (ic *ContainerEngine) buildOrPullImage(ctx context.Context, cwd string, writer io.Writer, image string, policy v1.PullPolicy, options entities.PlayKubeOptions) (*libimage.Image, error) {
|
||||
buildImage, err := ic.buildImageFromContainerfile(ctx, cwd, writer, image, options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if buildImage != nil {
|
||||
return buildImage, nil
|
||||
} else {
|
||||
return ic.pullImageWithPolicy(ctx, writer, image, policy, options)
|
||||
}
|
||||
}
|
||||
|
||||
// getImageAndLabelInfo returns the image information and how the image should be pulled plus as well as labels to be used for the container in the pod.
|
||||
// Moved this to a separate function so that it can be used for both init and regular containers when playing a kube yaml.
|
||||
func (ic *ContainerEngine) getImageAndLabelInfo(ctx context.Context, cwd string, annotations map[string]string, writer io.Writer, container v1.Container, options entities.PlayKubeOptions) (*libimage.Image, map[string]string, error) {
|
||||
// Contains all labels obtained from kube
|
||||
labels := make(map[string]string)
|
||||
|
||||
pulledImage, err := ic.buildOrPullImage(ctx, cwd, writer, container.Image, container.ImagePullPolicy, options)
|
||||
if err != nil {
|
||||
return nil, labels, err
|
||||
}
|
||||
|
||||
// Handle kube annotations
|
||||
|
@ -62,6 +62,21 @@ type VolumeSource struct {
|
||||
// More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
|
||||
// +optional
|
||||
EmptyDir *EmptyDirVolumeSource `json:"emptyDir,omitempty"`
|
||||
// image represents a container image pulled and mounted on the host machine.
|
||||
// The volume is resolved at pod startup depending on which PullPolicy value is provided:
|
||||
//
|
||||
// - Always: podman always attempts to pull the reference. Container creation will fail If the pull fails.
|
||||
// - Never: podman never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present.
|
||||
// - IfNotPresent: podman pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails.
|
||||
//
|
||||
// The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation.
|
||||
// A failure to resolve or pull the image during pod startup will block containers from starting and the pod won't be created.
|
||||
// The container image gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images.
|
||||
// The volume will be mounted read-only (ro) and non-executable files (noexec).
|
||||
// Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath).
|
||||
// The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.
|
||||
// +optional
|
||||
Image *ImageVolumeSource `json:"image,omitempty"`
|
||||
}
|
||||
|
||||
// PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace.
|
||||
@ -465,6 +480,24 @@ type EmptyDirVolumeSource struct {
|
||||
SizeLimit *resource.Quantity `json:"sizeLimit,omitempty"`
|
||||
}
|
||||
|
||||
// ImageVolumeSource represents a image volume resource.
|
||||
type ImageVolumeSource struct {
|
||||
// Required: Container image reference to be used.
|
||||
// Behaves in the same way as pod.spec.containers[*].image.
|
||||
// This field is optional to allow higher level config management to default or override
|
||||
// container images in workload controllers like Deployments and StatefulSets.
|
||||
// +optional
|
||||
Reference string `json:"reference,omitempty"`
|
||||
|
||||
// Policy for pulling OCI objects. Possible values are:
|
||||
// Always: podman always attempts to pull the reference. Container creation will fail If the pull fails.
|
||||
// Never: podman never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present.
|
||||
// IfNotPresent: podman pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails.
|
||||
// Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
|
||||
// +optional
|
||||
PullPolicy PullPolicy `json:"pullPolicy,omitempty"`
|
||||
}
|
||||
|
||||
// SecretReference represents a Secret Reference. It has enough information to retrieve secret
|
||||
// in any namespace
|
||||
// +structType=atomic
|
||||
|
@ -567,6 +567,14 @@ func ToSpecGen(ctx context.Context, opts *CtrSpecGenOptions) (*specgen.SpecGener
|
||||
Source: define.TypeTmpfs,
|
||||
}
|
||||
s.Mounts = append(s.Mounts, memVolume)
|
||||
case KubeVolumeTypeImage:
|
||||
imageVolume := specgen.ImageVolume{
|
||||
Destination: volume.MountPath,
|
||||
ReadWrite: false,
|
||||
Source: volumeSource.Source,
|
||||
SubPath: "",
|
||||
}
|
||||
s.ImageVolumes = append(s.ImageVolumes, &imageVolume)
|
||||
default:
|
||||
return nil, errors.New("unsupported volume source type")
|
||||
}
|
||||
|
@ -37,13 +37,14 @@ const (
|
||||
KubeVolumeTypeSecret
|
||||
KubeVolumeTypeEmptyDir
|
||||
KubeVolumeTypeEmptyDirTmpfs
|
||||
KubeVolumeTypeImage
|
||||
)
|
||||
|
||||
//nolint:revive
|
||||
type KubeVolume struct {
|
||||
// Type of volume to create
|
||||
Type KubeVolumeType
|
||||
// Path for bind mount or volume name for named volume
|
||||
// Path for bind mount, volume name for named volume or image name for image volume
|
||||
Source string
|
||||
// Items to add to a named volume created where the key is the file name and the value is the data
|
||||
// This is only used when there are volumes in the yaml that refer to a configmap
|
||||
@ -56,6 +57,8 @@ type KubeVolume struct {
|
||||
// DefaultMode sets the permissions on files created for the volume
|
||||
// This is optional and defaults to 0644
|
||||
DefaultMode int32
|
||||
// Used for volumes of type Image. Ignored for other volumes types.
|
||||
ImagePullPolicy v1.PullPolicy
|
||||
}
|
||||
|
||||
// Create a KubeVolume from an HostPathVolumeSource
|
||||
@ -279,6 +282,14 @@ func VolumeFromEmptyDir(emptyDirVolumeSource *v1.EmptyDirVolumeSource, name stri
|
||||
}
|
||||
}
|
||||
|
||||
func VolumeFromImage(imageVolumeSource *v1.ImageVolumeSource, name string) (*KubeVolume, error) {
|
||||
return &KubeVolume{
|
||||
Type: KubeVolumeTypeImage,
|
||||
Source: imageVolumeSource.Reference,
|
||||
ImagePullPolicy: imageVolumeSource.PullPolicy,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Create a KubeVolume from one of the supported VolumeSource
|
||||
func VolumeFromSource(volumeSource v1.VolumeSource, configMaps []v1.ConfigMap, secretsManager *secrets.SecretsManager, volName, mountLabel string) (*KubeVolume, error) {
|
||||
switch {
|
||||
@ -292,6 +303,8 @@ func VolumeFromSource(volumeSource v1.VolumeSource, configMaps []v1.ConfigMap, s
|
||||
return VolumeFromSecret(volumeSource.Secret, secretsManager)
|
||||
case volumeSource.EmptyDir != nil:
|
||||
return VolumeFromEmptyDir(volumeSource.EmptyDir, volName)
|
||||
case volumeSource.Image != nil:
|
||||
return VolumeFromImage(volumeSource.Image, volName)
|
||||
default:
|
||||
return nil, errors.New("HostPath, ConfigMap, EmptyDir, Secret, and PersistentVolumeClaim are currently the only supported VolumeSource")
|
||||
}
|
||||
|
@ -1001,7 +1001,7 @@ _EOF
|
||||
run_podman rmi -f $userimage $from_image
|
||||
}
|
||||
|
||||
@test "podman play with automount volume" {
|
||||
@test "podman play with image volume (automount annotation and OCI VolumeSource)" {
|
||||
imgname1="automount-img1-$(safename)"
|
||||
imgname2="automount-img2-$(safename)"
|
||||
podname="p-$(safename)"
|
||||
@ -1046,6 +1046,7 @@ EOF
|
||||
|
||||
run_podman kube down $TESTYAML
|
||||
|
||||
# Testing the first technique to mount an OCI image: through a Pod annotation
|
||||
fname="/$PODMAN_TMPDIR/play_kube_wait_$(random_string 6).yaml"
|
||||
cat >$fname <<EOF
|
||||
apiVersion: v1
|
||||
@ -1086,10 +1087,153 @@ EOF
|
||||
run_podman exec "$podname-$ctrname_not_mounted" ls /
|
||||
assert "$output" !~ "test" "No volume should be mounted in no-mount container"
|
||||
|
||||
run_podman kube down $fname
|
||||
|
||||
# Testing the second technique to mount an OCI image: using image volume
|
||||
fname="/$PODMAN_TMPDIR/play_kube_wait_$(random_string 6).yaml"
|
||||
cat >$fname <<EOF
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
app: test
|
||||
name: $podname
|
||||
spec:
|
||||
restartPolicy: Never
|
||||
containers:
|
||||
- name: $ctrname
|
||||
image: $IMAGE
|
||||
command:
|
||||
- top
|
||||
volumeMounts:
|
||||
- name: volume1
|
||||
mountPath: /image1
|
||||
- name: volume2
|
||||
mountPath: /image2
|
||||
- name: $ctrname_not_mounted
|
||||
image: $IMAGE
|
||||
command:
|
||||
- top
|
||||
volumes:
|
||||
- name: volume1
|
||||
image:
|
||||
reference: $imgname1
|
||||
- name: volume2
|
||||
image:
|
||||
reference: $imgname2
|
||||
EOF
|
||||
|
||||
run_podman kube play $fname
|
||||
|
||||
run_podman exec "$podname-$ctrname" ls -x /image1/test1
|
||||
assert "a b c" "ls /test1 inside container"
|
||||
|
||||
run_podman exec "$podname-$ctrname" ls -x /image2/test2
|
||||
assert "asdf ejgre lteghe" "ls /test2 inside container"
|
||||
|
||||
run_podman 1 exec "$podname-$ctrname" touch /image1/test1/readonly
|
||||
assert "$output" =~ "Read-only file system" "image mounted as readonly"
|
||||
|
||||
run_podman exec "$podname-$ctrname_not_mounted" ls /
|
||||
assert "$output" !~ "image" "No volume should be mounted in no-mount container"
|
||||
|
||||
run_podman kube down $fname
|
||||
run_podman rmi $imgname1 $imgname2
|
||||
}
|
||||
|
||||
@test "podman play with image volume pull policies" {
|
||||
podname="p-$(safename)"
|
||||
ctrname="c-$(safename)"
|
||||
volimg_local="localhost/i-$(safename):latest" # only exists locally
|
||||
volimg_remote=${PODMAN_NONLOCAL_IMAGE_FQN} # only exists remotely
|
||||
volimg_both="quay.io/libpod/alpine:latest" # exists both remotely and locally
|
||||
|
||||
localfile="localfile"
|
||||
|
||||
# Pull $volimg_both and commit a local modification. As a result
|
||||
# the image exists both locally and remotely but the two versions
|
||||
# are slightly different.
|
||||
run_podman pull $volimg_both
|
||||
run_podman run --name $ctrname $volimg_both sh -c "touch /$localfile"
|
||||
run_podman commit $ctrname $volimg_both
|
||||
run_podman rm $ctrname
|
||||
|
||||
# Tag $volimg_both as $volimg_local
|
||||
run_podman tag $volimg_both $volimg_local
|
||||
|
||||
# Check that $volimg_both and $volimg_local exists locally
|
||||
run_podman image exists $volimg_both
|
||||
run_podman image exists $volimg_local
|
||||
|
||||
# Check that $volimg_remote doesn't exist locally
|
||||
run_podman 1 image exists $volimg_remote
|
||||
|
||||
# The test scenarios:
|
||||
# We verify the return code of kube play for different
|
||||
# combinations of image locations (remote/local/both)
|
||||
# and pull policies (IfNotPresent/Always/Never).
|
||||
# When the image exists both locally and remotely the
|
||||
# tests verify that the correct version of image is
|
||||
# mounted (0 for the local version, 1 for the remote
|
||||
# version).
|
||||
# When running the tests with $volimg_both, the test
|
||||
# with policy "Always" should be run as the last one,
|
||||
# as it pulls the remote image and overwrites the local
|
||||
# one.
|
||||
tests="
|
||||
$volimg_local | IfNotPresent | 0 |
|
||||
$volimg_local | Never | 0 |
|
||||
$volimg_local | Always | 125 |
|
||||
$volimg_remote | IfNotPresent | 0 |
|
||||
$volimg_remote | Never | 125 |
|
||||
$volimg_remote | Always | 0 |
|
||||
$volimg_both | IfNotPresent | 0 | 0
|
||||
$volimg_both | Never | 0 | 0
|
||||
$volimg_both | Always | 0 | 1
|
||||
"
|
||||
|
||||
while read volimg policy playrc islocal; do
|
||||
|
||||
fname="/$PODMAN_TMPDIR/play_kube_volimg_${policy}.yaml"
|
||||
cat >$fname <<EOF
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
app: test
|
||||
name: $podname
|
||||
spec:
|
||||
restartPolicy: Never
|
||||
containers:
|
||||
- name: $ctrname
|
||||
image: $IMAGE
|
||||
command:
|
||||
- top
|
||||
volumeMounts:
|
||||
- name: volume
|
||||
mountPath: /image
|
||||
volumes:
|
||||
- name: volume
|
||||
image:
|
||||
reference: $volimg
|
||||
pullPolicy: $policy
|
||||
EOF
|
||||
run_podman $playrc kube play $fname
|
||||
|
||||
if [[ "$islocal" != "''" ]]; then
|
||||
run_podman $islocal exec "$podname-$ctrname" ls /image/$localfile
|
||||
fi
|
||||
|
||||
run_podman kube down $fname
|
||||
|
||||
# If the remote-only image was pulled, remove it
|
||||
run_podman rmi -f $volimg_remote
|
||||
|
||||
done < <(parse_table "$tests")
|
||||
|
||||
run_podman rmi $volimg_local $volimg_both
|
||||
}
|
||||
|
||||
@test "podman kube restore user namespace" {
|
||||
if ! is_rootless; then
|
||||
grep -E -q "^containers:" /etc/subuid || skip "no IDs allocated for user 'containers'"
|
||||
|
@ -14,7 +14,7 @@ one and only one (standard) image, and no running containers.
|
||||
* `parse_table` - you can define tables of inputs and expected results,
|
||||
then read those in a `while` loop. This makes it easy to add new tests.
|
||||
Because bash is not a programming language, the caller of `parse_table`
|
||||
sometimes needs to massage the returned values; `015-run.bats` offers
|
||||
sometimes needs to massage the returned values; `030-run.bats` offers
|
||||
examples of how to deal with the more typical such issues.
|
||||
|
||||
* `run_podman` - runs command defined in `$PODMAN` (default: 'podman'
|
||||
|
Reference in New Issue
Block a user