Add support for Job to kube generate & play

The kube generate command can now generate a yaml for
the Job kind and the kube play command can create a pod
and containers with podman when passed in a Job yaml.
Add relevant tests and docs for this.

Signed-off-by: Urvashi Mohnani <umohnani@redhat.com>
This commit is contained in:
Urvashi Mohnani
2024-07-25 13:37:23 -04:00
committed by Urvashi
parent 8578fddf0f
commit bdf96e7df2
13 changed files with 1101 additions and 17 deletions

View File

@ -48,7 +48,7 @@ var (
playOptions = playKubeOptionsWrapper{}
playDescription = `Reads in a structured file of Kubernetes YAML.
Creates pods or volumes based on the Kubernetes kind described in the YAML. Supported kinds are Pods, Deployments, DaemonSets and PersistentVolumeClaims.`
Creates pods or volumes based on the Kubernetes kind described in the YAML. Supported kinds are Pods, Deployments, DaemonSets, Jobs, and PersistentVolumeClaims.`
playCmd = &cobra.Command{
Use: "play [options] KUBEFILE|-",

View File

@ -176,12 +176,28 @@ Note: **N/A** means that the option cannot be supported in a single-node Podman
## DaemonSet Fields
| Field | Support |
|-----------------------------------------|-------------------------------------------------------|
| selector | ✅ |
| template | ✅ |
| minReadySeconds | no |
| strategy\.type | no |
| strategy\.rollingUpdate\.maxSurge | no |
| strategy\.rollingUpdate\.maxUnavailable | no |
| revisionHistoryLimit | no |
| Field | Support |
|-----------------------------------------|---------|
| selector | ✅ |
| template | ✅ |
| minReadySeconds | no |
| strategy\.type | no |
| strategy\.rollingUpdate\.maxSurge | no |
| strategy\.rollingUpdate\.maxUnavailable | no |
| revisionHistoryLimit | no |
## Job Fields
| Field | Support |
|-------------------------|----------------------------------|
| activeDeadlineSeconds | no |
| selector | no (automatically set by k8s) |
| template | ✅ |
| backoffLimit | no |
| completionMode | no |
| completions | no (set to 1 with kube generate) |
| manualSelector | no |
| parallelism | no (set to 1 with kube generate) |
| podFailurePolicy | no |
| suspend | no |
| ttlSecondsAfterFinished | no |

View File

@ -31,7 +31,9 @@ Note that the generated Kubernetes YAML file can be used to re-run the deploymen
Note that if the pod being generated was created with the **--infra-name** flag set, then the generated kube yaml will have the **io.podman.annotations.infra.name** set where the value is the name of the infra container set by the user.
Also note that both Deployment and DaemonSet can only have `restartPolicy` set to `Always`.
Note that both Deployment and DaemonSet can only have `restartPolicy` set to `Always`.
Note that Job can only have `restartPolicy` set to `OnFailure` or `Never`. By default, podman sets it to `Never` when generating a kube yaml using `kube generate`.
## OPTIONS
@ -52,9 +54,9 @@ Note: this can only be set with the option `--type=deployment`.
Generate a Kubernetes service object in addition to the Pods. Used to generate a Service specification for the corresponding Pod output. In particular, if the object has portmap bindings, the service specification includes a NodePort declaration to expose the service. A random port is assigned by Podman in the specification.
#### **--type**, **-t**=*pod* | *deployment* | *daemonset*
#### **--type**, **-t**=*pod* | *deployment* | *daemonset* | *job*
The Kubernetes kind to generate in the YAML file. Currently, the only supported Kubernetes specifications are `Pod`, `Deployment` and `DaemonSet`. By default, the `Pod` specification is generated.
The Kubernetes kind to generate in the YAML file. Currently, the only supported Kubernetes specifications are `Pod`, `Deployment`, `Job`, and `DaemonSet`. By default, the `Pod` specification is generated.
## EXAMPLES

View File

@ -24,6 +24,7 @@ Currently, the supported Kubernetes kinds are:
- ConfigMap
- Secret
- DaemonSet
- Job
`Kubernetes Pods or Deployments`

View File

@ -61,4 +61,6 @@ const (
K8sKindDeployment = "deployment"
// A DaemonSet kube yaml spec
K8sKindDaemonSet = "daemonset"
// a Job kube yaml spec
K8sKindJob = "job"
)

View File

@ -233,6 +233,61 @@ func GenerateForKubeDeployment(ctx context.Context, pod *YAMLPod, options entiti
return &dep, nil
}
// GenerateForKubeJob returns a YAMLDeployment from a YAMLPod that is then used to create a kubernetes Job
// kind YAML.
func GenerateForKubeJob(ctx context.Context, pod *YAMLPod, options entities.GenerateKubeOptions) (*YAMLJob, error) {
// Restart policy for Job cannot be set to Always
if options.Type == define.K8sKindJob && pod.Spec.RestartPolicy == v1.RestartPolicyAlways {
return nil, fmt.Errorf("k8s Jobs can not have restartPolicy set to Always; only Never and OnFailure policies allowed")
}
// Create label map that will be added to podSpec and Job metadata
// The matching label lets the job know which pods to manage
appKey := "app"
matchLabels := map[string]string{appKey: pod.Name}
// Add the key:value (app:pod-name) to the podSpec labels
if pod.Labels == nil {
pod.Labels = matchLabels
} else {
pod.Labels[appKey] = pod.Name
}
jobSpec := YAMLJobSpec{
Template: &YAMLPodTemplateSpec{
PodTemplateSpec: v1.PodTemplateSpec{
ObjectMeta: pod.ObjectMeta,
},
Spec: pod.Spec,
},
}
// Set the completions and parallelism to 1 by default for the Job
completions, parallelism := int32(1), int32(1)
jobSpec.Completions = &completions
jobSpec.Parallelism = &parallelism
// Set the restart policy to never as k8s requires a job to have a restart policy
// of onFailure or never set in the kube yaml
jobSpec.Template.Spec.RestartPolicy = v1.RestartPolicyNever
// Create the Deployment object
job := YAMLJob{
Job: v1.Job{
ObjectMeta: v12.ObjectMeta{
Name: pod.Name + "-job",
CreationTimestamp: pod.CreationTimestamp,
Labels: pod.Labels,
},
TypeMeta: v12.TypeMeta{
Kind: "Job",
APIVersion: "batch/v1",
},
},
Spec: &jobSpec,
}
return &job, nil
}
// GenerateForKube generates a v1.PersistentVolumeClaim from a libpod volume.
func (v *Volume) GenerateForKube() *v1.PersistentVolumeClaim {
annotations := make(map[string]string)
@ -328,6 +383,15 @@ type YAMLDaemonSetSpec struct {
Strategy *v1.DaemonSetUpdateStrategy `json:"strategy,omitempty"`
}
// YAMLJobSpec represents the same k8s API core JobSpec with a small
// change and that is having Template as a pointer to YAMLPodTemplateSpec
// because Go doesn't omit empty struct and we want to omit Strategy and any fields in the Pod YAML
// if it's empty.
type YAMLJobSpec struct {
v1.JobSpec
Template *YAMLPodTemplateSpec `json:"template,omitempty"`
}
// YAMLDaemonSet represents the same k8s API core DaemonSet with a small change
// and that is having Spec as a pointer to YAMLDaemonSetSpec and Status as a pointer to
// k8s API core DaemonSetStatus.
@ -350,6 +414,12 @@ type YAMLDeployment struct {
Status *v1.DeploymentStatus `json:"status,omitempty"`
}
type YAMLJob struct {
v1.Job
Spec *YAMLJobSpec `json:"spec,omitempty"`
Status *v1.JobStatus `json:"status,omitempty"`
}
// YAMLService represents the same k8s API core Service struct with a small
// change and that is having Status as a pointer to k8s API core ServiceStatus.
// Because Go doesn't omit empty struct and we want to omit Status in YAML

View File

@ -244,6 +244,16 @@ func (ic *ContainerEngine) GenerateKube(ctx context.Context, nameOrIDs []string,
return nil, err
}
typeContent = append(typeContent, b)
case define.K8sKindJob:
job, err := libpod.GenerateForKubeJob(ctx, libpod.ConvertV1PodToYAMLPod(po), options)
if err != nil {
return nil, err
}
b, err := generateKubeYAML(job)
if err != nil {
return nil, err
}
typeContent = append(typeContent, b)
case define.K8sKindPod:
b, err := generateKubeYAML(libpod.ConvertV1PodToYAMLPod(po))
if err != nil {
@ -251,7 +261,7 @@ func (ic *ContainerEngine) GenerateKube(ctx context.Context, nameOrIDs []string,
}
typeContent = append(typeContent, b)
default:
return nil, fmt.Errorf("invalid generation type - only pods, deployments and daemonsets are currently supported: %+v", options.Type)
return nil, fmt.Errorf("invalid generation type - only pods, deployments, jobs, and daemonsets are currently supported: %+v", options.Type)
}
if options.Service {
@ -311,6 +321,16 @@ func getKubePods(ctx context.Context, pods []*libpod.Pod, options entities.Gener
return nil, nil, err
}
out = append(out, b)
case define.K8sKindJob:
job, err := libpod.GenerateForKubeJob(ctx, libpod.ConvertV1PodToYAMLPod(po), options)
if err != nil {
return nil, nil, err
}
b, err := generateKubeYAML(job)
if err != nil {
return nil, nil, err
}
out = append(out, b)
case define.K8sKindPod:
b, err := generateKubeYAML(libpod.ConvertV1PodToYAMLPod(po))
if err != nil {
@ -318,7 +338,7 @@ func getKubePods(ctx context.Context, pods []*libpod.Pod, options entities.Gener
}
out = append(out, b)
default:
return nil, nil, fmt.Errorf("invalid generation type - only pods, deployments and daemonsets are currently supported")
return nil, nil, fmt.Errorf("invalid generation type - only pods, deployments, jobs, and daemonsets are currently supported")
}
if options.Service {

View File

@ -391,6 +391,22 @@ func (ic *ContainerEngine) PlayKube(ctx context.Context, body io.Reader, options
}
notifyProxies = append(notifyProxies, proxies...)
report.Pods = append(report.Pods, r.Pods...)
validKinds++
ranContainers = true
case "Job":
var jobYAML v1.Job
if err := yaml.Unmarshal(document, &jobYAML); err != nil {
return nil, fmt.Errorf("unable to read YAML as Kube Job: %w", err)
}
r, proxies, err := ic.playKubeJob(ctx, &jobYAML, options, &ipIndex, configMaps, serviceContainer)
if err != nil {
return nil, err
}
notifyProxies = append(notifyProxies, proxies...)
report.Pods = append(report.Pods, r.Pods...)
validKinds++
ranContainers = true
@ -549,6 +565,29 @@ func (ic *ContainerEngine) playKubeDeployment(ctx context.Context, deploymentYAM
return &report, proxies, nil
}
func (ic *ContainerEngine) playKubeJob(ctx context.Context, jobYAML *v1.Job, options entities.PlayKubeOptions, ipIndex *int, configMaps []v1.ConfigMap, serviceContainer *libpod.Container) (*entities.PlayKubeReport, []*notifyproxy.NotifyProxy, error) {
var (
jobName string
podSpec v1.PodTemplateSpec
report entities.PlayKubeReport
)
jobName = jobYAML.ObjectMeta.Name
if jobName == "" {
return nil, nil, errors.New("job does not have a name")
}
podSpec = jobYAML.Spec.Template
podName := fmt.Sprintf("%s-pod", jobName)
podReport, proxies, err := ic.playKubePod(ctx, podName, &podSpec, options, ipIndex, jobYAML.Annotations, configMaps, serviceContainer)
if err != nil {
return nil, nil, fmt.Errorf("encountered while bringing up pod %s: %w", podName, err)
}
report.Pods = podReport.Pods
return &report, proxies, nil
}
func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podYAML *v1.PodTemplateSpec, options entities.PlayKubeOptions, ipIndex *int, annotations map[string]string, configMaps []v1.ConfigMap, serviceContainer *libpod.Container) (*entities.PlayKubeReport, []*notifyproxy.NotifyProxy, error) {
var (
writer io.Writer
@ -1502,7 +1541,7 @@ func sortKubeKinds(documentList [][]byte) ([][]byte, error) {
}
switch kind {
case "Pod", "Deployment", "DaemonSet":
case "Pod", "Deployment", "DaemonSet", "Job":
sortedDocumentList = append(sortedDocumentList, document)
default:
sortedDocumentList = append([][]byte{document}, sortedDocumentList...)
@ -1633,6 +1672,15 @@ func (ic *ContainerEngine) PlayKubeDown(ctx context.Context, body io.Reader, opt
}
podName := fmt.Sprintf("%s-pod", deploymentName)
podNames = append(podNames, podName)
case "Job":
var jobYAML v1.Job
if err := yaml.Unmarshal(document, &jobYAML); err != nil {
return nil, fmt.Errorf("unable to read YAML as Kube Job: %w", err)
}
jobName := jobYAML.ObjectMeta.Name
podName := fmt.Sprintf("%s-pod", jobName)
podNames = append(podNames, podName)
case "PersistentVolumeClaim":
var pvcYAML v1.PersistentVolumeClaim
if err := yaml.Unmarshal(document, &pvcYAML); err != nil {

View File

@ -5176,3 +5176,629 @@ type DaemonSetList struct {
// A list of daemon sets.
Items []DaemonSet `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Job represents the configuration of a single job.
type Job struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Specification of the desired behavior of a job.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Spec JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Current status of a job.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Status JobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// JobList is a collection of jobs.
type JobList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// items is the list of Jobs.
Items []Job `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// CompletionMode specifies how Pod completions of a Job are tracked.
// +enum
type CompletionMode string
const (
// NonIndexedCompletion is a Job completion mode. In this mode, the Job is
// considered complete when there have been .spec.completions
// successfully completed Pods. Pod completions are homologous to each other.
NonIndexedCompletion CompletionMode = "NonIndexed"
// IndexedCompletion is a Job completion mode. In this mode, the Pods of a
// Job get an associated completion index from 0 to (.spec.completions - 1).
// The Job is considered complete when a Pod completes for each completion
// index.
IndexedCompletion CompletionMode = "Indexed"
)
// PodFailurePolicyAction specifies how a Pod failure is handled.
// +enum
type PodFailurePolicyAction string
const (
// This is an action which might be taken on a pod failure - mark the
// pod's job as Failed and terminate all running pods.
PodFailurePolicyActionFailJob PodFailurePolicyAction = "FailJob"
// This is an action which might be taken on a pod failure - mark the
// Job's index as failed to avoid restarts within this index. This action
// can only be used when backoffLimitPerIndex is set.
// This value is beta-level.
PodFailurePolicyActionFailIndex PodFailurePolicyAction = "FailIndex"
// This is an action which might be taken on a pod failure - the counter towards
// .backoffLimit, represented by the job's .status.failed field, is not
// incremented and a replacement pod is created.
PodFailurePolicyActionIgnore PodFailurePolicyAction = "Ignore"
// This is an action which might be taken on a pod failure - the pod failure
// is handled in the default way - the counter towards .backoffLimit,
// represented by the job's .status.failed field, is incremented.
PodFailurePolicyActionCount PodFailurePolicyAction = "Count"
)
// +enum
type PodFailurePolicyOnExitCodesOperator string
const (
PodFailurePolicyOnExitCodesOpIn PodFailurePolicyOnExitCodesOperator = "In"
PodFailurePolicyOnExitCodesOpNotIn PodFailurePolicyOnExitCodesOperator = "NotIn"
)
// PodReplacementPolicy specifies the policy for creating pod replacements.
// +enum
type PodReplacementPolicy string
const (
// TerminatingOrFailed means that we recreate pods
// when they are terminating (has a metadata.deletionTimestamp) or failed.
TerminatingOrFailed PodReplacementPolicy = "TerminatingOrFailed"
// Failed means to wait until a previously created Pod is fully terminated (has phase
// Failed or Succeeded) before creating a replacement Pod.
Failed PodReplacementPolicy = "Failed"
)
// PodFailurePolicyOnExitCodesRequirement describes the requirement for handling
// a failed pod based on its container exit codes. In particular, it lookups the
// .state.terminated.exitCode for each app container and init container status,
// represented by the .status.containerStatuses and .status.initContainerStatuses
// fields in the Pod status, respectively. Containers completed with success
// (exit code 0) are excluded from the requirement check.
type PodFailurePolicyOnExitCodesRequirement struct {
// Restricts the check for exit codes to the container with the
// specified name. When null, the rule applies to all containers.
// When specified, it should match one the container or initContainer
// names in the pod template.
// +optional
ContainerName *string `json:"containerName,omitempty" protobuf:"bytes,1,opt,name=containerName"`
// Represents the relationship between the container exit code(s) and the
// specified values. Containers completed with success (exit code 0) are
// excluded from the requirement check. Possible values are:
//
// - In: the requirement is satisfied if at least one container exit code
// (might be multiple if there are multiple containers not restricted
// by the 'containerName' field) is in the set of specified values.
// - NotIn: the requirement is satisfied if at least one container exit code
// (might be multiple if there are multiple containers not restricted
// by the 'containerName' field) is not in the set of specified values.
// Additional values are considered to be added in the future. Clients should
// react to an unknown operator by assuming the requirement is not satisfied.
Operator PodFailurePolicyOnExitCodesOperator `json:"operator" protobuf:"bytes,2,req,name=operator"`
// Specifies the set of values. Each returned container exit code (might be
// multiple in case of multiple containers) is checked against this set of
// values with respect to the operator. The list of values must be ordered
// and must not contain duplicates. Value '0' cannot be used for the In operator.
// At least one element is required. At most 255 elements are allowed.
// +listType=set
Values []int32 `json:"values" protobuf:"varint,3,rep,name=values"`
}
// PodFailurePolicyOnPodConditionsPattern describes a pattern for matching
// an actual pod condition type.
type PodFailurePolicyOnPodConditionsPattern struct {
// Specifies the required Pod condition type. To match a pod condition
// it is required that specified type equals the pod condition type.
Type PodConditionType `json:"type" protobuf:"bytes,1,req,name=type"`
// Specifies the required Pod condition status. To match a pod condition
// it is required that the specified status equals the pod condition status.
// Defaults to True.
Status ConditionStatus `json:"status" protobuf:"bytes,2,req,name=status"`
}
// PodFailurePolicyRule describes how a pod failure is handled when the requirements are met.
// One of onExitCodes and onPodConditions, but not both, can be used in each rule.
type PodFailurePolicyRule struct {
// Specifies the action taken on a pod failure when the requirements are satisfied.
// Possible values are:
//
// - FailJob: indicates that the pod's job is marked as Failed and all
// running pods are terminated.
// - FailIndex: indicates that the pod's index is marked as Failed and will
// not be restarted.
// This value is beta-level. It can be used when the
// `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).
// - Ignore: indicates that the counter towards the .backoffLimit is not
// incremented and a replacement pod is created.
// - Count: indicates that the pod is handled in the default way - the
// counter towards the .backoffLimit is incremented.
// Additional values are considered to be added in the future. Clients should
// react to an unknown action by skipping the rule.
Action PodFailurePolicyAction `json:"action" protobuf:"bytes,1,req,name=action"`
// Represents the requirement on the container exit codes.
// +optional
OnExitCodes *PodFailurePolicyOnExitCodesRequirement `json:"onExitCodes,omitempty" protobuf:"bytes,2,opt,name=onExitCodes"`
// Represents the requirement on the pod conditions. The requirement is represented
// as a list of pod condition patterns. The requirement is satisfied if at
// least one pattern matches an actual pod condition. At most 20 elements are allowed.
// +listType=atomic
// +optional
OnPodConditions []PodFailurePolicyOnPodConditionsPattern `json:"onPodConditions,omitempty" protobuf:"bytes,3,opt,name=onPodConditions"`
}
// PodFailurePolicy describes how failed pods influence the backoffLimit.
type PodFailurePolicy struct {
// A list of pod failure policy rules. The rules are evaluated in order.
// Once a rule matches a Pod failure, the remaining of the rules are ignored.
// When no rule matches the Pod failure, the default handling applies - the
// counter of pod failures is incremented and it is checked against
// the backoffLimit. At most 20 elements are allowed.
// +listType=atomic
Rules []PodFailurePolicyRule `json:"rules" protobuf:"bytes,1,opt,name=rules"`
}
// SuccessPolicy describes when a Job can be declared as succeeded based on the success of some indexes.
type SuccessPolicy struct {
// rules represents the list of alternative rules for the declaring the Jobs
// as successful before `.status.succeeded >= .spec.completions`. Once any of the rules are met,
// the "SucceededCriteriaMet" condition is added, and the lingering pods are removed.
// The terminal state for such a Job has the "Complete" condition.
// Additionally, these rules are evaluated in order; Once the Job meets one of the rules,
// other rules are ignored. At most 20 elements are allowed.
// +listType=atomic
Rules []SuccessPolicyRule `json:"rules" protobuf:"bytes,1,opt,name=rules"`
}
// SuccessPolicyRule describes rule for declaring a Job as succeeded.
// Each rule must have at least one of the "succeededIndexes" or "succeededCount" specified.
type SuccessPolicyRule struct {
// succeededIndexes specifies the set of indexes
// which need to be contained in the actual set of the succeeded indexes for the Job.
// The list of indexes must be within 0 to ".spec.completions-1" and
// must not contain duplicates. At least one element is required.
// The indexes are represented as intervals separated by commas.
// The intervals can be a decimal integer or a pair of decimal integers separated by a hyphen.
// The number are listed in represented by the first and last element of the series,
// separated by a hyphen.
// For example, if the completed indexes are 1, 3, 4, 5 and 7, they are
// represented as "1,3-5,7".
// When this field is null, this field doesn't default to any value
// and is never evaluated at any time.
//
// +optional
SucceededIndexes *string `json:"succeededIndexes,omitempty" protobuf:"bytes,1,opt,name=succeededIndexes"`
// succeededCount specifies the minimal required size of the actual set of the succeeded indexes
// for the Job. When succeededCount is used along with succeededIndexes, the check is
// constrained only to the set of indexes specified by succeededIndexes.
// For example, given that succeededIndexes is "1-4", succeededCount is "3",
// and completed indexes are "1", "3", and "5", the Job isn't declared as succeeded
// because only "1" and "3" indexes are considered in that rules.
// When this field is null, this doesn't default to any value and
// is never evaluated at any time.
// When specified it needs to be a positive integer.
//
// +optional
SucceededCount *int32 `json:"succeededCount,omitempty" protobuf:"varint,2,opt,name=succeededCount"`
}
// JobSpec describes how the job execution will look like.
type JobSpec struct {
// Specifies the maximum desired number of pods the job should
// run at any given time. The actual number of pods running in steady state will
// be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism),
// i.e. when the work left to do is less than max parallelism.
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/
// +optional
Parallelism *int32 `json:"parallelism,omitempty" protobuf:"varint,1,opt,name=parallelism"`
// Specifies the desired number of successfully finished pods the
// job should be run with. Setting to null means that the success of any
// pod signals the success of all pods, and allows parallelism to have any positive
// value. Setting to 1 means that parallelism is limited to 1 and the success of that
// pod signals the success of the job.
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/
// +optional
Completions *int32 `json:"completions,omitempty" protobuf:"varint,2,opt,name=completions"`
// Specifies the duration in seconds relative to the startTime that the job
// may be continuously active before the system tries to terminate it; value
// must be positive integer. If a Job is suspended (at creation or through an
// update), this timer will effectively be stopped and reset when the Job is
// resumed again.
// +optional
ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,3,opt,name=activeDeadlineSeconds"`
// Specifies the policy of handling failed pods. In particular, it allows to
// specify the set of actions and conditions which need to be
// satisfied to take the associated action.
// If empty, the default behaviour applies - the counter of failed pods,
// represented by the jobs's .status.failed field, is incremented and it is
// checked against the backoffLimit. This field cannot be used in combination
// with restartPolicy=OnFailure.
//
// +optional
PodFailurePolicy *PodFailurePolicy `json:"podFailurePolicy,omitempty" protobuf:"bytes,11,opt,name=podFailurePolicy"`
// successPolicy specifies the policy when the Job can be declared as succeeded.
// If empty, the default behavior applies - the Job is declared as succeeded
// only when the number of succeeded pods equals to the completions.
// When the field is specified, it must be immutable and works only for the Indexed Jobs.
// Once the Job meets the SuccessPolicy, the lingering pods are terminated.
//
// This field is beta-level. To use this field, you must enable the
// `JobSuccessPolicy` feature gate (enabled by default).
// +optional
SuccessPolicy *SuccessPolicy `json:"successPolicy,omitempty" protobuf:"bytes,16,opt,name=successPolicy"`
// Specifies the number of retries before marking this job failed.
// Defaults to 6
// +optional
BackoffLimit *int32 `json:"backoffLimit,omitempty" protobuf:"varint,7,opt,name=backoffLimit"`
// Specifies the limit for the number of retries within an
// index before marking this index as failed. When enabled the number of
// failures per index is kept in the pod's
// batch.kubernetes.io/job-index-failure-count annotation. It can only
// be set when Job's completionMode=Indexed, and the Pod's restart
// policy is Never. The field is immutable.
// This field is beta-level. It can be used when the `JobBackoffLimitPerIndex`
// feature gate is enabled (enabled by default).
// +optional
BackoffLimitPerIndex *int32 `json:"backoffLimitPerIndex,omitempty" protobuf:"varint,12,opt,name=backoffLimitPerIndex"`
// Specifies the maximal number of failed indexes before marking the Job as
// failed, when backoffLimitPerIndex is set. Once the number of failed
// indexes exceeds this number the entire Job is marked as Failed and its
// execution is terminated. When left as null the job continues execution of
// all of its indexes and is marked with the `Complete` Job condition.
// It can only be specified when backoffLimitPerIndex is set.
// It can be null or up to completions. It is required and must be
// less than or equal to 10^4 when is completions greater than 10^5.
// This field is beta-level. It can be used when the `JobBackoffLimitPerIndex`
// feature gate is enabled (enabled by default).
// +optional
MaxFailedIndexes *int32 `json:"maxFailedIndexes,omitempty" protobuf:"varint,13,opt,name=maxFailedIndexes"`
// TODO enabled it when https://github.com/kubernetes/kubernetes/issues/28486 has been fixed
// Optional number of failed pods to retain.
// +optional
// FailedPodsLimit *int32 `json:"failedPodsLimit,omitempty" protobuf:"varint,9,opt,name=failedPodsLimit"`
// A label query over pods that should match the pod count.
// Normally, the system sets this field for you.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
// +optional
Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"`
// manualSelector controls generation of pod labels and pod selectors.
// Leave `manualSelector` unset unless you are certain what you are doing.
// When false or unset, the system pick labels unique to this job
// and appends those labels to the pod template. When true,
// the user is responsible for picking unique labels and specifying
// the selector. Failure to pick a unique label may cause this
// and other jobs to not function correctly. However, You may see
// `manualSelector=true` in jobs that were created with the old `extensions/v1beta1`
// API.
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector
// +optional
ManualSelector *bool `json:"manualSelector,omitempty" protobuf:"varint,5,opt,name=manualSelector"`
// Describes the pod that will be created when executing a job.
// The only allowed template.spec.restartPolicy values are "Never" or "OnFailure".
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/
Template PodTemplateSpec `json:"template" protobuf:"bytes,6,opt,name=template"`
// ttlSecondsAfterFinished limits the lifetime of a Job that has finished
// execution (either Complete or Failed). If this field is set,
// ttlSecondsAfterFinished after the Job finishes, it is eligible to be
// automatically deleted. When the Job is being deleted, its lifecycle
// guarantees (e.g. finalizers) will be honored. If this field is unset,
// the Job won't be automatically deleted. If this field is set to zero,
// the Job becomes eligible to be deleted immediately after it finishes.
// +optional
TTLSecondsAfterFinished *int32 `json:"ttlSecondsAfterFinished,omitempty" protobuf:"varint,8,opt,name=ttlSecondsAfterFinished"`
// completionMode specifies how Pod completions are tracked. It can be
// `NonIndexed` (default) or `Indexed`.
//
// `NonIndexed` means that the Job is considered complete when there have
// been .spec.completions successfully completed Pods. Each Pod completion is
// homologous to each other.
//
// `Indexed` means that the Pods of a
// Job get an associated completion index from 0 to (.spec.completions - 1),
// available in the annotation batch.kubernetes.io/job-completion-index.
// The Job is considered complete when there is one successfully completed Pod
// for each index.
// When value is `Indexed`, .spec.completions must be specified and
// `.spec.parallelism` must be less than or equal to 10^5.
// In addition, The Pod name takes the form
// `$(job-name)-$(index)-$(random-string)`,
// the Pod hostname takes the form `$(job-name)-$(index)`.
//
// More completion modes can be added in the future.
// If the Job controller observes a mode that it doesn't recognize, which
// is possible during upgrades due to version skew, the controller
// skips updates for the Job.
// +optional
CompletionMode *CompletionMode `json:"completionMode,omitempty" protobuf:"bytes,9,opt,name=completionMode,casttype=CompletionMode"`
// suspend specifies whether the Job controller should create Pods or not. If
// a Job is created with suspend set to true, no Pods are created by the Job
// controller. If a Job is suspended after creation (i.e. the flag goes from
// false to true), the Job controller will delete all active Pods associated
// with this Job. Users must design their workload to gracefully handle this.
// Suspending a Job will reset the StartTime field of the Job, effectively
// resetting the ActiveDeadlineSeconds timer too. Defaults to false.
//
// +optional
Suspend *bool `json:"suspend,omitempty" protobuf:"varint,10,opt,name=suspend"`
// podReplacementPolicy specifies when to create replacement Pods.
// Possible values are:
// - TerminatingOrFailed means that we recreate pods
// when they are terminating (has a metadata.deletionTimestamp) or failed.
// - Failed means to wait until a previously created Pod is fully terminated (has phase
// Failed or Succeeded) before creating a replacement Pod.
//
// When using podFailurePolicy, Failed is the the only allowed value.
// TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use.
// This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle.
// This is on by default.
// +optional
PodReplacementPolicy *PodReplacementPolicy `json:"podReplacementPolicy,omitempty" protobuf:"bytes,14,opt,name=podReplacementPolicy,casttype=podReplacementPolicy"`
// ManagedBy field indicates the controller that manages a Job. The k8s Job
// controller reconciles jobs which don't have this field at all or the field
// value is the reserved string `kubernetes.io/job-controller`, but skips
// reconciling Jobs with a custom value for this field.
// The value must be a valid domain-prefixed path (e.g. acme.io/foo) -
// all characters before the first "/" must be a valid subdomain as defined
// by RFC 1123. All characters trailing the first "/" must be valid HTTP Path
// characters as defined by RFC 3986. The value cannot exceed 63 characters.
// This field is immutable.
//
// This field is alpha-level. The job controller accepts setting the field
// when the feature gate JobManagedBy is enabled (disabled by default).
// +optional
ManagedBy *string `json:"managedBy,omitempty" protobuf:"bytes,15,opt,name=managedBy"`
}
// JobStatus represents the current state of a Job.
type JobStatus struct {
// The latest available observations of an object's current state. When a Job
// fails, one of the conditions will have type "Failed" and status true. When
// a Job is suspended, one of the conditions will have type "Suspended" and
// status true; when the Job is resumed, the status of this condition will
// become false. When a Job is completed, one of the conditions will have
// type "Complete" and status true.
//
// A job is considered finished when it is in a terminal condition, either
// "Complete" or "Failed". A Job cannot have both the "Complete" and "Failed" conditions.
// Additionally, it cannot be in the "Complete" and "FailureTarget" conditions.
// The "Complete", "Failed" and "FailureTarget" conditions cannot be disabled.
//
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
// +listType=atomic
Conditions []JobCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
// Represents time when the job controller started processing a job. When a
// Job is created in the suspended state, this field is not set until the
// first time it is resumed. This field is reset every time a Job is resumed
// from suspension. It is represented in RFC3339 form and is in UTC.
//
// Once set, the field can only be removed when the job is suspended.
// The field cannot be modified while the job is unsuspended or finished.
//
// +optional
StartTime *metav1.Time `json:"startTime,omitempty" protobuf:"bytes,2,opt,name=startTime"`
// Represents time when the job was completed. It is not guaranteed to
// be set in happens-before order across separate operations.
// It is represented in RFC3339 form and is in UTC.
// The completion time is set when the job finishes successfully, and only then.
// The value cannot be updated or removed. The value indicates the same or
// later point in time as the startTime field.
// +optional
CompletionTime *metav1.Time `json:"completionTime,omitempty" protobuf:"bytes,3,opt,name=completionTime"`
// The number of pending and running pods which are not terminating (without
// a deletionTimestamp).
// The value is zero for finished jobs.
// +optional
Active int32 `json:"active,omitempty" protobuf:"varint,4,opt,name=active"`
// The number of pods which reached phase Succeeded.
// The value increases monotonically for a given spec. However, it may
// decrease in reaction to scale down of elastic indexed jobs.
// +optional
Succeeded int32 `json:"succeeded,omitempty" protobuf:"varint,5,opt,name=succeeded"`
// The number of pods which reached phase Failed.
// The value increases monotonically.
// +optional
Failed int32 `json:"failed,omitempty" protobuf:"varint,6,opt,name=failed"`
// The number of pods which are terminating (in phase Pending or Running
// and have a deletionTimestamp).
//
// This field is beta-level. The job controller populates the field when
// the feature gate JobPodReplacementPolicy is enabled (enabled by default).
// +optional
Terminating *int32 `json:"terminating,omitempty" protobuf:"varint,11,opt,name=terminating"`
// completedIndexes holds the completed indexes when .spec.completionMode =
// "Indexed" in a text format. The indexes are represented as decimal integers
// separated by commas. The numbers are listed in increasing order. Three or
// more consecutive numbers are compressed and represented by the first and
// last element of the series, separated by a hyphen.
// For example, if the completed indexes are 1, 3, 4, 5 and 7, they are
// represented as "1,3-5,7".
// +optional
CompletedIndexes string `json:"completedIndexes,omitempty" protobuf:"bytes,7,opt,name=completedIndexes"`
// FailedIndexes holds the failed indexes when spec.backoffLimitPerIndex is set.
// The indexes are represented in the text format analogous as for the
// `completedIndexes` field, ie. they are kept as decimal integers
// separated by commas. The numbers are listed in increasing order. Three or
// more consecutive numbers are compressed and represented by the first and
// last element of the series, separated by a hyphen.
// For example, if the failed indexes are 1, 3, 4, 5 and 7, they are
// represented as "1,3-5,7".
// The set of failed indexes cannot overlap with the set of completed indexes.
//
// This field is beta-level. It can be used when the `JobBackoffLimitPerIndex`
// feature gate is enabled (enabled by default).
// +optional
FailedIndexes *string `json:"failedIndexes,omitempty" protobuf:"bytes,10,opt,name=failedIndexes"`
// uncountedTerminatedPods holds the UIDs of Pods that have terminated but
// the job controller hasn't yet accounted for in the status counters.
//
// The job controller creates pods with a finalizer. When a pod terminates
// (succeeded or failed), the controller does three steps to account for it
// in the job status:
//
// 1. Add the pod UID to the arrays in this field.
// 2. Remove the pod finalizer.
// 3. Remove the pod UID from the arrays while increasing the corresponding
// counter.
//
// Old jobs might not be tracked using this field, in which case the field
// remains null.
// The structure is empty for finished jobs.
// +optional
UncountedTerminatedPods *UncountedTerminatedPods `json:"uncountedTerminatedPods,omitempty" protobuf:"bytes,8,opt,name=uncountedTerminatedPods"`
// The number of active pods which have a Ready condition and are not
// terminating (without a deletionTimestamp).
Ready *int32 `json:"ready,omitempty" protobuf:"varint,9,opt,name=ready"`
}
// UncountedTerminatedPods holds UIDs of Pods that have terminated but haven't
// been accounted in Job status counters.
type UncountedTerminatedPods struct {
// succeeded holds UIDs of succeeded Pods.
// +listType=set
// +optional
Succeeded []types.UID `json:"succeeded,omitempty" protobuf:"bytes,1,rep,name=succeeded,casttype=k8s.io/apimachinery/pkg/types.UID"`
// failed holds UIDs of failed Pods.
// +listType=set
// +optional
Failed []types.UID `json:"failed,omitempty" protobuf:"bytes,2,rep,name=failed,casttype=k8s.io/apimachinery/pkg/types.UID"`
}
type JobConditionType string
// These are built-in conditions of a job.
const (
// JobSuspended means the job has been suspended.
JobSuspended JobConditionType = "Suspended"
// JobComplete means the job has completed its execution.
JobComplete JobConditionType = "Complete"
// JobFailed means the job has failed its execution.
JobFailed JobConditionType = "Failed"
// FailureTarget means the job is about to fail its execution.
JobFailureTarget JobConditionType = "FailureTarget"
// JobSuccessCriteriaMet means the Job has been succeeded.
JobSuccessCriteriaMet JobConditionType = "SuccessCriteriaMet"
)
const (
// JobReasonPodFailurePolicy reason indicates a job failure condition is added due to
// a failed pod matching a pod failure policy rule
// https://kep.k8s.io/3329
JobReasonPodFailurePolicy string = "PodFailurePolicy"
// JobReasonBackOffLimitExceeded reason indicates that pods within a job have failed a number of
// times higher than backOffLimit times.
JobReasonBackoffLimitExceeded string = "BackoffLimitExceeded"
// JobReasponDeadlineExceeded means job duration is past ActiveDeadline
JobReasonDeadlineExceeded string = "DeadlineExceeded"
// JobReasonMaxFailedIndexesExceeded indicates that an indexed of a job failed
// This const is used in beta-level feature: https://kep.k8s.io/3850.
JobReasonMaxFailedIndexesExceeded string = "MaxFailedIndexesExceeded"
// JobReasonFailedIndexes means Job has failed indexes.
// This const is used in beta-level feature: https://kep.k8s.io/3850.
JobReasonFailedIndexes string = "FailedIndexes"
// JobReasonSuccessPolicy reason indicates a SuccessCriteriaMet condition is added due to
// a Job met successPolicy.
// https://kep.k8s.io/3998
// This is currently a beta field.
JobReasonSuccessPolicy string = "SuccessPolicy"
// JobReasonCompletionsReached reason indicates a SuccessCriteriaMet condition is added due to
// a number of succeeded Job pods met completions.
// - https://kep.k8s.io/3998
// This is currently a beta field.
JobReasonCompletionsReached string = "CompletionsReached"
)
// JobCondition describes current state of a job.
type JobCondition struct {
// Type of job condition, Complete or Failed.
Type JobConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=JobConditionType"`
// Status of the condition, one of True, False, Unknown.
Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"`
// Last time the condition was checked.
// +optional
LastProbeTime metav1.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"`
// Last time the condition transit from one status to another.
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"`
// (brief) reason for the condition's last transition.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"`
// Human readable message indicating details about last transition.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"`
}
// JobTemplateSpec describes the data a Job should have when created from a template
type JobTemplateSpec struct {
// Standard object's metadata of the jobs created from this template.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Specification of the desired behavior of the job.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Spec JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
}

View File

@ -35,6 +35,12 @@ like "$output" ".*kind:\\sDeployment.*" "Check generated kube yaml - kind: Deplo
like "$output" ".*metadata:.*" "Check generated kube yaml - metadata"
like "$output" ".*spec:.*" "Check generated kube yaml - spec"
t GET "libpod/generate/kube?type=job&names=$cid" 200
like "$output" ".*apiVersion:.*" "Check generated kube yaml - apiVersion"
like "$output" ".*kind:\\sJob.*" "Check generated kube yaml - kind: Job"
like "$output" ".*metadata:.*" "Check generated kube yaml - metadata"
like "$output" ".*spec:.*" "Check generated kube yaml - spec"
TMPD=$(mktemp -d podman-apiv2-test-kube.XXXXXX)
YAML="${TMPD}/kube.yaml"
echo "$output" > $YAML

View File

@ -1541,6 +1541,54 @@ USER test1`
Expect(kube).Should(ExitWithError(125, "k8s Deployments can only have restartPolicy set to Always"))
})
It("on pod with --type=job", func() {
podName := "test-pod"
session := podmanTest.Podman([]string{"pod", "create", podName})
session.WaitWithDefaultTimeout()
Expect(session).Should(ExitCleanly())
session = podmanTest.Podman([]string{"create", "--pod", podName, CITEST_IMAGE, "top"})
session.WaitWithDefaultTimeout()
Expect(session).Should(ExitCleanly())
session = podmanTest.Podman([]string{"create", "--pod", podName, CITEST_IMAGE, "sleep", "100"})
session.WaitWithDefaultTimeout()
Expect(session).Should(ExitCleanly())
kube := podmanTest.Podman([]string{"kube", "generate", "--type", "job", podName})
kube.WaitWithDefaultTimeout()
Expect(kube).Should(ExitCleanly())
dep := new(v1.Job)
err := yaml.Unmarshal(kube.Out.Contents(), dep)
Expect(err).ToNot(HaveOccurred())
Expect(dep.Name).To(Equal(podName + "-job"))
Expect(dep.Spec.Template.Name).To(Equal(podName))
var intone int32 = 1
Expect(dep.Spec.Parallelism).To(Equal(&intone))
Expect(dep.Spec.Completions).To(Equal(&intone))
numContainers := 0
for range dep.Spec.Template.Spec.Containers {
numContainers++
}
Expect(numContainers).To(Equal(2))
})
It("on pod with --type=job and --restart=always should fail", func() {
podName := "test-pod"
session := podmanTest.Podman([]string{"pod", "create", "--restart", "always", podName})
session.WaitWithDefaultTimeout()
Expect(session).Should(ExitCleanly())
session = podmanTest.Podman([]string{"create", "--pod", podName, CITEST_IMAGE, "top"})
session.WaitWithDefaultTimeout()
Expect(session).Should(ExitCleanly())
kube := podmanTest.Podman([]string{"kube", "generate", "--type", "job", podName})
kube.WaitWithDefaultTimeout()
Expect(kube).Should(ExitWithError(125, "k8s Jobs can not have restartPolicy set to Always; only Never and OnFailure policies allowed"))
})
It("on pod with invalid name", func() {
podName := "test_pod"
session := podmanTest.Podman([]string{"pod", "create", podName})

View File

@ -1103,6 +1103,157 @@ spec:
{{ end }}
`
var jobYamlTemplate = `
apiVersion: batch/v1
kind: Job
metadata:
creationTimestamp: "2019-07-17T14:44:08Z"
name: {{ .Name }}
labels:
app: {{ .Name }}
{{ with .Labels }}
{{ range $key, $value := . }}
{{ $key }}: {{ $value }}
{{ end }}
{{ end }}
{{ with .Annotations }}
annotations:
{{ range $key, $value := . }}
{{ $key }}: {{ $value }}
{{ end }}
{{ end }}
spec:
template:
{{ with .PodTemplate }}
metadata:
labels:
app: {{ .Name }}
{{- with .Labels }}{{ range $key, $value := . }}
{{ $key }}: {{ $value }}
{{- end }}{{ end }}
{{- with .Annotations }}
annotations:
{{- range $key, $value := . }}
{{ $key }}: {{ $value }}
{{- end }}
{{- end }}
spec:
restartPolicy: {{ .RestartPolicy }}
hostname: {{ .Hostname }}
hostNetwork: {{ .HostNetwork }}
containers:
{{ with .Ctrs }}
{{ range . }}
- command:
{{ range .Cmd }}
- {{.}}
{{ end }}
args:
{{ range .Arg }}
- {{.}}
{{ end }}
env:
- name: HOSTNAME
{{ range .Env }}
- name: {{ .Name }}
{{ if (eq .ValueFrom "configmap") }}
valueFrom:
configMapKeyRef:
name: {{ .RefName }}
key: {{ .RefKey }}
optional: {{ .Optional }}
{{ end }}
{{ if (eq .ValueFrom "secret") }}
valueFrom:
secretKeyRef:
name: {{ .RefName }}
key: {{ .RefKey }}
optional: {{ .Optional }}
{{ end }}
{{ if (eq .ValueFrom "") }}
value: {{ .Value }}
{{ end }}
{{ end }}
{{ with .EnvFrom}}
envFrom:
{{ range . }}
{{ if (eq .From "configmap") }}
- configMapRef:
name: {{ .Name }}
optional: {{ .Optional }}
{{ end }}
{{ if (eq .From "secret") }}
- secretRef:
name: {{ .Name }}
optional: {{ .Optional }}
{{ end }}
{{ end }}
{{ end }}
image: {{ .Image }}
name: {{ .Name }}
imagePullPolicy: {{ .PullPolicy }}
{{- if or .CPURequest .CPULimit .MemoryRequest .MemoryLimit }}
resources:
{{- if or .CPURequest .MemoryRequest }}
requests:
{{if .CPURequest }}cpu: {{ .CPURequest }}{{ end }}
{{if .MemoryRequest }}memory: {{ .MemoryRequest }}{{ end }}
{{- end }}
{{- if or .CPULimit .MemoryLimit }}
limits:
{{if .CPULimit }}cpu: {{ .CPULimit }}{{ end }}
{{if .MemoryLimit }}memory: {{ .MemoryLimit }}{{ end }}
{{- end }}
{{- end }}
{{ if .SecurityContext }}
securityContext:
allowPrivilegeEscalation: true
{{ if .Caps }}
capabilities:
{{ with .CapAdd }}
add:
{{ range . }}
- {{.}}
{{ end }}
{{ end }}
{{ with .CapDrop }}
drop:
{{ range . }}
- {{.}}
{{ end }}
{{ end }}
{{ end }}
privileged: false
readOnlyRootFilesystem: false
workingDir: /
volumeMounts:
{{ if .VolumeMount }}
- name: {{.VolumeName}}
mountPath: {{ .VolumeMountPath }}
readonly: {{.VolumeReadOnly}}
{{ end }}
{{ end }}
{{ end }}
{{ end }}
{{ with .Volumes }}
volumes:
{{ range . }}
- name: {{ .Name }}
{{- if (eq .VolumeType "HostPath") }}
hostPath:
path: {{ .HostPath.Path }}
type: {{ .HostPath.Type }}
{{- end }}
{{- if (eq .VolumeType "PersistentVolumeClaim") }}
persistentVolumeClaim:
claimName: {{ .PersistentVolumeClaim.ClaimName }}
{{- end }}
{{ end }}
{{ end }}
{{ end }}
`
var publishPortsPodWithoutPorts = `
apiVersion: v1
kind: Pod
@ -1302,6 +1453,7 @@ var (
defaultVolName = "testVol"
defaultDaemonSetName = "testDaemonSet"
defaultDeploymentName = "testDeployment"
defaultJobName = "testJob"
defaultConfigMapName = "testConfigMap"
defaultSecretName = "testSecret"
defaultPVCName = "testPVC"
@ -1326,6 +1478,8 @@ func getKubeYaml(kind string, object interface{}) (string, error) {
yamlTemplate = daemonSetYamlTemplate
case "deployment":
yamlTemplate = deploymentYamlTemplate
case "job":
yamlTemplate = jobYamlTemplate
case "persistentVolumeClaim":
yamlTemplate = persistentVolumeClaimYamlTemplate
case "secret":
@ -1614,7 +1768,7 @@ func withHostUsers(val bool) podOption {
}
}
// Deployment describes the options a kube yaml can be configured at deployment level
// Daemonset describes the options a kube yaml can be configured at daemoneset level
type DaemonSet struct {
Name string
Labels map[string]string
@ -1701,6 +1855,39 @@ func getPodNameInDeployment(d *Deployment) Pod {
return p
}
type Job struct {
Name string
Labels map[string]string
Annotations map[string]string
PodTemplate *Pod
}
func getJob(options ...jobOption) *Job {
j := Job{
Name: defaultJobName,
Labels: make(map[string]string),
Annotations: make(map[string]string),
PodTemplate: getPod(),
}
for _, option := range options {
option(&j)
}
return &j
}
type jobOption func(*Job)
// getPodNameInJob returns the Pod object
// with just its name set, so that it can be passed around
// and into getCtrNameInPod for ease of testing
func getPodNameInJob(d *Job) Pod {
p := Pod{}
p.Name = fmt.Sprintf("%s-pod", d.Name)
return p
}
// Ctr describes the options a kube yaml can be configured at container level
type Ctr struct {
Name string
@ -3360,6 +3547,23 @@ spec:
})
It("job sanity", func() {
job := getJob()
err := generateKubeYaml("job", job, kubeYaml)
Expect(err).ToNot(HaveOccurred())
kube := podmanTest.Podman([]string{"kube", "play", kubeYaml})
kube.WaitWithDefaultTimeout()
Expect(kube).Should(ExitCleanly())
podName := getPodNameInJob(job)
inspect := podmanTest.Podman([]string{"inspect", getCtrNameInPod(&podName), "--format", "'{{ .Config.Entrypoint }}'"})
inspect.WaitWithDefaultTimeout()
Expect(inspect).Should(ExitCleanly())
// yaml's command should override the image's Entrypoint
Expect(inspect.OutputToString()).To(ContainSubstring(strings.Join(defaultCtrCmd, " ")))
})
It("--ip and --mac-address", func() {
var i, numReplicas int32
numReplicas = 3

View File

@ -192,4 +192,45 @@ metadata.name | = | ${pname}-deployment
run_podman rmi $(pause_image)
}
@test "podman kube generate - job" {
skip_if_remote "containersconf needs to be set on server side"
local pname=p$(random_string 15)
local cname1=c1$(random_string 15)
local cname2=c2$(random_string 15)
run_podman pod create --name $pname
run_podman container create --name $cname1 --pod $pname $IMAGE top
run_podman container create --name $cname2 --pod $pname $IMAGE bottom
containersconf=$PODMAN_TMPDIR/containers.conf
cat >$containersconf <<EOF
[engine]
kube_generate_type="job"
EOF
CONTAINERS_CONF_OVERRIDE=$containersconf run_podman kube generate $pname
json=$(yaml2json <<<"$output")
# For debugging purposes in the event we regress, we can see the generate output to know what went wrong
jq . <<<"$json"
# See container test above for description of this table
expect="
apiVersion | = | batch/v1
kind | = | Job
metadata.creationTimestamp | =~ | [0-9T:-]\\+Z
metadata.labels.app | = | ${pname}
metadata.name | = | ${pname}-job
"
while read key op expect; do
actual=$(jq -r -c ".$key" <<<"$json")
assert "$actual" $op "$expect" ".$key"
done < <(parse_table "$expect")
run_podman rm $cname1 $cname2
run_podman pod rm $pname
run_podman rmi $(pause_image)
}
# vim: filetype=sh