mirror of
https://github.com/containers/podman.git
synced 2025-06-19 16:33:24 +08:00
kube: add DaemonSet support for generate
Signed-off-by: danishprakash <danish.prakash@suse.com>
This commit is contained in:
@ -46,7 +46,7 @@ var (
|
|||||||
playOptions = playKubeOptionsWrapper{}
|
playOptions = playKubeOptionsWrapper{}
|
||||||
playDescription = `Reads in a structured file of Kubernetes YAML.
|
playDescription = `Reads in a structured file of Kubernetes YAML.
|
||||||
|
|
||||||
Creates pods or volumes based on the Kubernetes kind described in the YAML. Supported kinds are Pods, Deployments and PersistentVolumeClaims.`
|
Creates pods or volumes based on the Kubernetes kind described in the YAML. Supported kinds are Pods, Deployments, DaemonSets and PersistentVolumeClaims.`
|
||||||
|
|
||||||
playCmd = &cobra.Command{
|
playCmd = &cobra.Command{
|
||||||
Use: "play [options] KUBEFILE|-",
|
Use: "play [options] KUBEFILE|-",
|
||||||
|
@ -173,3 +173,15 @@ Note: **N/A** means that the option cannot be supported in a single-node Podman
|
|||||||
| revisionHistoryLimit | no |
|
| revisionHistoryLimit | no |
|
||||||
| progressDeadlineSeconds | no |
|
| progressDeadlineSeconds | no |
|
||||||
| paused | no |
|
| paused | no |
|
||||||
|
|
||||||
|
## DaemonSet Fields
|
||||||
|
|
||||||
|
| Field | Support |
|
||||||
|
|-----------------------------------------|-------------------------------------------------------|
|
||||||
|
| selector | ✅ |
|
||||||
|
| template | ✅ |
|
||||||
|
| minReadySeconds | no |
|
||||||
|
| strategy\.type | no |
|
||||||
|
| strategy\.rollingUpdate\.maxSurge | no |
|
||||||
|
| strategy\.rollingUpdate\.maxUnavailable | no |
|
||||||
|
| revisionHistoryLimit | no |
|
||||||
|
@ -30,6 +30,8 @@ Note that the generated Kubernetes YAML file can be used to re-run the deploymen
|
|||||||
|
|
||||||
Note that if the pod being generated was created with the **--infra-name** flag set, then the generated kube yaml will have the **io.podman.annotations.infra.name** set where the value is the name of the infra container set by the user.
|
Note that if the pod being generated was created with the **--infra-name** flag set, then the generated kube yaml will have the **io.podman.annotations.infra.name** set where the value is the name of the infra container set by the user.
|
||||||
|
|
||||||
|
Also note that both Deployment and DaemonSet can only have `restartPolicy` set to `Always`.
|
||||||
|
|
||||||
## OPTIONS
|
## OPTIONS
|
||||||
|
|
||||||
#### **--filename**, **-f**=*filename*
|
#### **--filename**, **-f**=*filename*
|
||||||
@ -54,9 +56,9 @@ Note: this can only be set with the option `--type=deployment`.
|
|||||||
|
|
||||||
Generate a Kubernetes service object in addition to the Pods. Used to generate a Service specification for the corresponding Pod output. In particular, if the object has portmap bindings, the service specification includes a NodePort declaration to expose the service. A random port is assigned by Podman in the specification.
|
Generate a Kubernetes service object in addition to the Pods. Used to generate a Service specification for the corresponding Pod output. In particular, if the object has portmap bindings, the service specification includes a NodePort declaration to expose the service. A random port is assigned by Podman in the specification.
|
||||||
|
|
||||||
#### **--type**, **-t**=*pod | deployment*
|
#### **--type**, **-t**=*pod* | *deployment* | *daemonset*
|
||||||
|
|
||||||
The Kubernetes kind to generate in the YAML file. Currently, the only supported Kubernetes specifications are `Pod` and `Deployment`. By default, the `Pod` specification is generated.
|
The Kubernetes kind to generate in the YAML file. Currently, the only supported Kubernetes specifications are `Pod`, `Deployment` and `DaemonSet`. By default, the `Pod` specification is generated.
|
||||||
|
|
||||||
## EXAMPLES
|
## EXAMPLES
|
||||||
|
|
||||||
|
@ -23,6 +23,7 @@ Currently, the supported Kubernetes kinds are:
|
|||||||
- PersistentVolumeClaim
|
- PersistentVolumeClaim
|
||||||
- ConfigMap
|
- ConfigMap
|
||||||
- Secret
|
- Secret
|
||||||
|
- DaemonSet
|
||||||
|
|
||||||
`Kubernetes Pods or Deployments`
|
`Kubernetes Pods or Deployments`
|
||||||
|
|
||||||
|
@ -45,4 +45,6 @@ const (
|
|||||||
K8sKindPod = "pod"
|
K8sKindPod = "pod"
|
||||||
// A Deployment kube yaml spec
|
// A Deployment kube yaml spec
|
||||||
K8sKindDeployment = "deployment"
|
K8sKindDeployment = "deployment"
|
||||||
|
// A DaemonSet kube yaml spec
|
||||||
|
K8sKindDaemonSet = "daemonset"
|
||||||
)
|
)
|
||||||
|
@ -119,6 +119,61 @@ func (p *Pod) getInfraContainer() (*Container, error) {
|
|||||||
return p.runtime.GetContainer(infraID)
|
return p.runtime.GetContainer(infraID)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func GenerateForKubeDaemonSet(ctx context.Context, pod *YAMLPod, options entities.GenerateKubeOptions) (*YAMLDaemonSet, error) {
|
||||||
|
// Restart policy for DaemonSets can only be set to Always
|
||||||
|
if !(pod.Spec.RestartPolicy == "" || pod.Spec.RestartPolicy == v1.RestartPolicyAlways) {
|
||||||
|
return nil, fmt.Errorf("k8s DaemonSets can only have restartPolicy set to Always")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error out if the user tries to set replica count
|
||||||
|
if options.Replicas > 1 {
|
||||||
|
return nil, fmt.Errorf("k8s DaemonSets don't allow setting replicas")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create label map that will be added to podSpec and DaemonSet metadata
|
||||||
|
// The matching label lets the daemonset know which pod to manage
|
||||||
|
appKey := "app"
|
||||||
|
matchLabels := map[string]string{appKey: pod.Name}
|
||||||
|
// Add the key:value (app:pod-name) to the podSpec labels
|
||||||
|
if pod.Labels == nil {
|
||||||
|
pod.Labels = matchLabels
|
||||||
|
} else {
|
||||||
|
pod.Labels[appKey] = pod.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
depSpec := YAMLDaemonSetSpec{
|
||||||
|
DaemonSetSpec: v1.DaemonSetSpec{
|
||||||
|
Selector: &v12.LabelSelector{
|
||||||
|
MatchLabels: matchLabels,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Template: &YAMLPodTemplateSpec{
|
||||||
|
PodTemplateSpec: v1.PodTemplateSpec{
|
||||||
|
ObjectMeta: pod.ObjectMeta,
|
||||||
|
},
|
||||||
|
Spec: pod.Spec,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create the DaemonSet object
|
||||||
|
dep := YAMLDaemonSet{
|
||||||
|
DaemonSet: v1.DaemonSet{
|
||||||
|
ObjectMeta: v12.ObjectMeta{
|
||||||
|
Name: pod.Name + "-daemonset",
|
||||||
|
CreationTimestamp: pod.CreationTimestamp,
|
||||||
|
Labels: pod.Labels,
|
||||||
|
},
|
||||||
|
TypeMeta: v12.TypeMeta{
|
||||||
|
Kind: "DaemonSet",
|
||||||
|
APIVersion: "apps/v1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Spec: &depSpec,
|
||||||
|
}
|
||||||
|
|
||||||
|
return &dep, nil
|
||||||
|
}
|
||||||
|
|
||||||
// GenerateForKubeDeployment returns a YAMLDeployment from a YAMLPod that is then used to create a kubernetes Deployment
|
// GenerateForKubeDeployment returns a YAMLDeployment from a YAMLPod that is then used to create a kubernetes Deployment
|
||||||
// kind YAML.
|
// kind YAML.
|
||||||
func GenerateForKubeDeployment(ctx context.Context, pod *YAMLPod, options entities.GenerateKubeOptions) (*YAMLDeployment, error) {
|
func GenerateForKubeDeployment(ctx context.Context, pod *YAMLPod, options entities.GenerateKubeOptions) (*YAMLDeployment, error) {
|
||||||
@ -262,6 +317,28 @@ type YAMLDeploymentSpec struct {
|
|||||||
Strategy *v1.DeploymentStrategy `json:"strategy,omitempty"`
|
Strategy *v1.DeploymentStrategy `json:"strategy,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// YAMLDaemonSetSpec represents the same k8s API core DeploymentSpec with a small
|
||||||
|
// change and that is having Template as a pointer to YAMLPodTemplateSpec and Strategy
|
||||||
|
// as a pointer to k8s API core DaemonSetStrategy.
|
||||||
|
// Because Go doesn't omit empty struct and we want to omit Strategy and any fields in the Pod YAML
|
||||||
|
// if it's empty.
|
||||||
|
type YAMLDaemonSetSpec struct {
|
||||||
|
v1.DaemonSetSpec
|
||||||
|
Template *YAMLPodTemplateSpec `json:"template,omitempty"`
|
||||||
|
Strategy *v1.DaemonSetUpdateStrategy `json:"strategy,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// YAMLDaemonSet represents the same k8s API core DaemonSet with a small change
|
||||||
|
// and that is having Spec as a pointer to YAMLDaemonSetSpec and Status as a pointer to
|
||||||
|
// k8s API core DaemonSetStatus.
|
||||||
|
// Because Go doesn't omit empty struct and we want to omit Status and any fields in the DaemonSetSpec
|
||||||
|
// if it's empty.
|
||||||
|
type YAMLDaemonSet struct {
|
||||||
|
v1.DaemonSet
|
||||||
|
Spec *YAMLDaemonSetSpec `json:"spec,omitempty"`
|
||||||
|
Status *v1.DaemonSetStatus `json:"status,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
// YAMLDeployment represents the same k8s API core Deployment with a small change
|
// YAMLDeployment represents the same k8s API core Deployment with a small change
|
||||||
// and that is having Spec as a pointer to YAMLDeploymentSpec and Status as a pointer to
|
// and that is having Spec as a pointer to YAMLDeploymentSpec and Status as a pointer to
|
||||||
// k8s API core DeploymentStatus.
|
// k8s API core DeploymentStatus.
|
||||||
|
@ -232,6 +232,16 @@ func (ic *ContainerEngine) GenerateKube(ctx context.Context, nameOrIDs []string,
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
typeContent = append(typeContent, b)
|
typeContent = append(typeContent, b)
|
||||||
|
case define.K8sKindDaemonSet:
|
||||||
|
dep, err := libpod.GenerateForKubeDaemonSet(ctx, libpod.ConvertV1PodToYAMLPod(po), options)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
b, err := generateKubeYAML(dep)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
typeContent = append(typeContent, b)
|
||||||
case define.K8sKindPod:
|
case define.K8sKindPod:
|
||||||
b, err := generateKubeYAML(libpod.ConvertV1PodToYAMLPod(po))
|
b, err := generateKubeYAML(libpod.ConvertV1PodToYAMLPod(po))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -239,7 +249,7 @@ func (ic *ContainerEngine) GenerateKube(ctx context.Context, nameOrIDs []string,
|
|||||||
}
|
}
|
||||||
typeContent = append(typeContent, b)
|
typeContent = append(typeContent, b)
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("invalid generation type - only pods and deployments are currently supported")
|
return nil, fmt.Errorf("invalid generation type - only pods, deployments and daemonsets are currently supported: %+v", options.Type)
|
||||||
}
|
}
|
||||||
|
|
||||||
if options.Service {
|
if options.Service {
|
||||||
@ -289,6 +299,16 @@ func getKubePods(ctx context.Context, pods []*libpod.Pod, options entities.Gener
|
|||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
out = append(out, b)
|
out = append(out, b)
|
||||||
|
case define.K8sKindDaemonSet:
|
||||||
|
dep, err := libpod.GenerateForKubeDaemonSet(ctx, libpod.ConvertV1PodToYAMLPod(po), options)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
b, err := generateKubeYAML(dep)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
out = append(out, b)
|
||||||
case define.K8sKindPod:
|
case define.K8sKindPod:
|
||||||
b, err := generateKubeYAML(libpod.ConvertV1PodToYAMLPod(po))
|
b, err := generateKubeYAML(libpod.ConvertV1PodToYAMLPod(po))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -296,7 +316,7 @@ func getKubePods(ctx context.Context, pods []*libpod.Pod, options entities.Gener
|
|||||||
}
|
}
|
||||||
out = append(out, b)
|
out = append(out, b)
|
||||||
default:
|
default:
|
||||||
return nil, nil, fmt.Errorf("invalid generation type - only pods and deployments are currently supported")
|
return nil, nil, fmt.Errorf("invalid generation type - only pods, deployments and daemonsets are currently supported")
|
||||||
}
|
}
|
||||||
|
|
||||||
if options.Service {
|
if options.Service {
|
||||||
|
@ -4955,3 +4955,224 @@ type DeploymentList struct {
|
|||||||
// Items is the list of Deployments.
|
// Items is the list of Deployments.
|
||||||
Items []Deployment `json:"items" protobuf:"bytes,2,rep,name=items"`
|
Items []Deployment `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DaemonSetUpdateStrategy is a struct used to control the update strategy for a DaemonSet.
|
||||||
|
type DaemonSetUpdateStrategy struct {
|
||||||
|
// Type of daemon set update. Can be "RollingUpdate" or "OnDelete". Default is RollingUpdate.
|
||||||
|
// +optional
|
||||||
|
Type DaemonSetUpdateStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type"`
|
||||||
|
|
||||||
|
// Rolling update config params. Present only if type = "RollingUpdate".
|
||||||
|
//---
|
||||||
|
// TODO: Update this to follow our convention for oneOf, whatever we decide it
|
||||||
|
// to be. Same as Deployment `strategy.rollingUpdate`.
|
||||||
|
// See https://github.com/kubernetes/kubernetes/issues/35345
|
||||||
|
// +optional
|
||||||
|
RollingUpdate *RollingUpdateDaemonSet `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type DaemonSetUpdateStrategyType string
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Replace the old daemons by new ones using rolling update i.e replace them on each node one after the other.
|
||||||
|
RollingUpdateDaemonSetStrategyType DaemonSetUpdateStrategyType = "RollingUpdate"
|
||||||
|
|
||||||
|
// Replace the old daemons only when it's killed
|
||||||
|
OnDeleteDaemonSetStrategyType DaemonSetUpdateStrategyType = "OnDelete"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Spec to control the desired behavior of daemon set rolling update.
|
||||||
|
type RollingUpdateDaemonSet struct {
|
||||||
|
// The maximum number of DaemonSet pods that can be unavailable during the
|
||||||
|
// update. Value can be an absolute number (ex: 5) or a percentage of total
|
||||||
|
// number of DaemonSet pods at the start of the update (ex: 10%). Absolute
|
||||||
|
// number is calculated from percentage by rounding up.
|
||||||
|
// This cannot be 0 if MaxSurge is 0
|
||||||
|
// Default value is 1.
|
||||||
|
// Example: when this is set to 30%, at most 30% of the total number of nodes
|
||||||
|
// that should be running the daemon pod (i.e. status.desiredNumberScheduled)
|
||||||
|
// can have their pods stopped for an update at any given time. The update
|
||||||
|
// starts by stopping at most 30% of those DaemonSet pods and then brings
|
||||||
|
// up new DaemonSet pods in their place. Once the new pods are available,
|
||||||
|
// it then proceeds onto other DaemonSet pods, thus ensuring that at least
|
||||||
|
// 70% of original number of DaemonSet pods are available at all times during
|
||||||
|
// the update.
|
||||||
|
// +optional
|
||||||
|
MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"`
|
||||||
|
|
||||||
|
// The maximum number of nodes with an existing available DaemonSet pod that
|
||||||
|
// can have an updated DaemonSet pod during during an update.
|
||||||
|
// Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
|
||||||
|
// This can not be 0 if MaxUnavailable is 0.
|
||||||
|
// Absolute number is calculated from percentage by rounding up to a minimum of 1.
|
||||||
|
// Default value is 0.
|
||||||
|
// Example: when this is set to 30%, at most 30% of the total number of nodes
|
||||||
|
// that should be running the daemon pod (i.e. status.desiredNumberScheduled)
|
||||||
|
// can have their a new pod created before the old pod is marked as deleted.
|
||||||
|
// The update starts by launching new pods on 30% of nodes. Once an updated
|
||||||
|
// pod is available (Ready for at least minReadySeconds) the old DaemonSet pod
|
||||||
|
// on that node is marked deleted. If the old pod becomes unavailable for any
|
||||||
|
// reason (Ready transitions to false, is evicted, or is drained) an updated
|
||||||
|
// pod is immediatedly created on that node without considering surge limits.
|
||||||
|
// Allowing surge implies the possibility that the resources consumed by the
|
||||||
|
// daemonset on any given node can double if the readiness check fails, and
|
||||||
|
// so resource intensive daemonsets should take into account that they may
|
||||||
|
// cause evictions during disruption.
|
||||||
|
// This is beta field and enabled/disabled by DaemonSetUpdateSurge feature gate.
|
||||||
|
// +optional
|
||||||
|
MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// DaemonSetSpec is the specification of a daemon set.
|
||||||
|
type DaemonSetSpec struct {
|
||||||
|
// A label query over pods that are managed by the daemon set.
|
||||||
|
// Must match in order to be controlled.
|
||||||
|
// It must match the pod template's labels.
|
||||||
|
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
|
||||||
|
Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,1,opt,name=selector"`
|
||||||
|
|
||||||
|
// An object that describes the pod that will be created.
|
||||||
|
// The DaemonSet will create exactly one copy of this pod on every node
|
||||||
|
// that matches the template's node selector (or on every node if no node
|
||||||
|
// selector is specified).
|
||||||
|
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
|
||||||
|
Template PodTemplateSpec `json:"template" protobuf:"bytes,2,opt,name=template"`
|
||||||
|
|
||||||
|
// An update strategy to replace existing DaemonSet pods with new pods.
|
||||||
|
// +optional
|
||||||
|
UpdateStrategy DaemonSetUpdateStrategy `json:"updateStrategy,omitempty" protobuf:"bytes,3,opt,name=updateStrategy"`
|
||||||
|
|
||||||
|
// The minimum number of seconds for which a newly created DaemonSet pod should
|
||||||
|
// be ready without any of its container crashing, for it to be considered
|
||||||
|
// available. Defaults to 0 (pod will be considered available as soon as it
|
||||||
|
// is ready).
|
||||||
|
// +optional
|
||||||
|
MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"`
|
||||||
|
|
||||||
|
// The number of old history to retain to allow rollback.
|
||||||
|
// This is a pointer to distinguish between explicit zero and not specified.
|
||||||
|
// Defaults to 10.
|
||||||
|
// +optional
|
||||||
|
RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,6,opt,name=revisionHistoryLimit"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// DaemonSetStatus represents the current status of a daemon set.
|
||||||
|
type DaemonSetStatus struct {
|
||||||
|
// The number of nodes that are running at least 1
|
||||||
|
// daemon pod and are supposed to run the daemon pod.
|
||||||
|
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
|
||||||
|
CurrentNumberScheduled int32 `json:"currentNumberScheduled" protobuf:"varint,1,opt,name=currentNumberScheduled"`
|
||||||
|
|
||||||
|
// The number of nodes that are running the daemon pod, but are
|
||||||
|
// not supposed to run the daemon pod.
|
||||||
|
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
|
||||||
|
NumberMisscheduled int32 `json:"numberMisscheduled" protobuf:"varint,2,opt,name=numberMisscheduled"`
|
||||||
|
|
||||||
|
// The total number of nodes that should be running the daemon
|
||||||
|
// pod (including nodes correctly running the daemon pod).
|
||||||
|
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
|
||||||
|
DesiredNumberScheduled int32 `json:"desiredNumberScheduled" protobuf:"varint,3,opt,name=desiredNumberScheduled"`
|
||||||
|
|
||||||
|
// The number of nodes that should be running the daemon pod and have one
|
||||||
|
// or more of the daemon pod running and ready.
|
||||||
|
NumberReady int32 `json:"numberReady" protobuf:"varint,4,opt,name=numberReady"`
|
||||||
|
|
||||||
|
// The most recent generation observed by the daemon set controller.
|
||||||
|
// +optional
|
||||||
|
ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,5,opt,name=observedGeneration"`
|
||||||
|
|
||||||
|
// The total number of nodes that are running updated daemon pod
|
||||||
|
// +optional
|
||||||
|
UpdatedNumberScheduled int32 `json:"updatedNumberScheduled,omitempty" protobuf:"varint,6,opt,name=updatedNumberScheduled"`
|
||||||
|
|
||||||
|
// The number of nodes that should be running the
|
||||||
|
// daemon pod and have one or more of the daemon pod running and
|
||||||
|
// available (ready for at least spec.minReadySeconds)
|
||||||
|
// +optional
|
||||||
|
NumberAvailable int32 `json:"numberAvailable,omitempty" protobuf:"varint,7,opt,name=numberAvailable"`
|
||||||
|
|
||||||
|
// The number of nodes that should be running the
|
||||||
|
// daemon pod and have none of the daemon pod running and available
|
||||||
|
// (ready for at least spec.minReadySeconds)
|
||||||
|
// +optional
|
||||||
|
NumberUnavailable int32 `json:"numberUnavailable,omitempty" protobuf:"varint,8,opt,name=numberUnavailable"`
|
||||||
|
|
||||||
|
// Count of hash collisions for the DaemonSet. The DaemonSet controller
|
||||||
|
// uses this field as a collision avoidance mechanism when it needs to
|
||||||
|
// create the name for the newest ControllerRevision.
|
||||||
|
// +optional
|
||||||
|
CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,9,opt,name=collisionCount"`
|
||||||
|
|
||||||
|
// Represents the latest available observations of a DaemonSet's current state.
|
||||||
|
// +optional
|
||||||
|
// +patchMergeKey=type
|
||||||
|
// +patchStrategy=merge
|
||||||
|
Conditions []DaemonSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type DaemonSetConditionType string
|
||||||
|
|
||||||
|
// TODO: Add valid condition types of a DaemonSet.
|
||||||
|
|
||||||
|
// DaemonSetCondition describes the state of a DaemonSet at a certain point.
|
||||||
|
type DaemonSetCondition struct {
|
||||||
|
// Type of DaemonSet condition.
|
||||||
|
Type DaemonSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DaemonSetConditionType"`
|
||||||
|
// Status of the condition, one of True, False, Unknown.
|
||||||
|
Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"`
|
||||||
|
// Last time the condition transitioned from one status to another.
|
||||||
|
// +optional
|
||||||
|
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"`
|
||||||
|
// The reason for the condition's last transition.
|
||||||
|
// +optional
|
||||||
|
Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
|
||||||
|
// A human readable message indicating details about the transition.
|
||||||
|
// +optional
|
||||||
|
Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// +genclient
|
||||||
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
|
||||||
|
// DaemonSet represents the configuration of a daemon set.
|
||||||
|
type DaemonSet struct {
|
||||||
|
metav1.TypeMeta `json:",inline"`
|
||||||
|
// Standard object's metadata.
|
||||||
|
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||||
|
// +optional
|
||||||
|
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||||
|
|
||||||
|
// The desired behavior of this daemon set.
|
||||||
|
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
|
||||||
|
// +optional
|
||||||
|
Spec DaemonSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
|
||||||
|
|
||||||
|
// The current status of this daemon set. This data may be
|
||||||
|
// out of date by some window of time.
|
||||||
|
// Populated by the system.
|
||||||
|
// Read-only.
|
||||||
|
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
|
||||||
|
// +optional
|
||||||
|
Status DaemonSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// DefaultDaemonSetUniqueLabelKey is the default label key that is added
|
||||||
|
// to existing DaemonSet pods to distinguish between old and new
|
||||||
|
// DaemonSet pods during DaemonSet template updates.
|
||||||
|
DefaultDaemonSetUniqueLabelKey = "pod-template-has"
|
||||||
|
)
|
||||||
|
|
||||||
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
|
||||||
|
// DaemonSetList is a collection of daemon sets.
|
||||||
|
type DaemonSetList struct {
|
||||||
|
metav1.TypeMeta `json:",inline"`
|
||||||
|
// Standard list metadata.
|
||||||
|
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||||
|
// +optional
|
||||||
|
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||||
|
|
||||||
|
// A list of daemon sets.
|
||||||
|
Items []DaemonSet `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||||
|
}
|
||||||
|
@ -1902,4 +1902,58 @@ EXPOSE 2004-2005/tcp`, ALPINE)
|
|||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(int(*pod.Spec.TerminationGracePeriodSeconds)).To(Equal(20))
|
Expect(int(*pod.Spec.TerminationGracePeriodSeconds)).To(Equal(20))
|
||||||
})
|
})
|
||||||
|
|
||||||
|
It("podman generate kube on pod with --type=daemonset", func() {
|
||||||
|
podName := "test-pod"
|
||||||
|
session := podmanTest.Podman([]string{"pod", "create", podName})
|
||||||
|
session.WaitWithDefaultTimeout()
|
||||||
|
Expect(session).Should(Exit(0))
|
||||||
|
|
||||||
|
session = podmanTest.Podman([]string{"create", "--pod", podName, ALPINE, "top"})
|
||||||
|
session.WaitWithDefaultTimeout()
|
||||||
|
Expect(session).Should(Exit(0))
|
||||||
|
session = podmanTest.Podman([]string{"create", "--pod", podName, ALPINE, "sleep", "100"})
|
||||||
|
session.WaitWithDefaultTimeout()
|
||||||
|
Expect(session).Should(Exit(0))
|
||||||
|
|
||||||
|
kube := podmanTest.Podman([]string{"generate", "kube", "--type", "daemonset", podName})
|
||||||
|
kube.WaitWithDefaultTimeout()
|
||||||
|
Expect(kube).Should(Exit(0))
|
||||||
|
|
||||||
|
dep := new(v1.DaemonSet)
|
||||||
|
err := yaml.Unmarshal(kube.Out.Contents(), dep)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(dep.Name).To(Equal(podName + "-daemonset"))
|
||||||
|
Expect(dep.Spec.Selector.MatchLabels).To(HaveKeyWithValue("app", podName))
|
||||||
|
Expect(dep.Spec.Template.Name).To(Equal(podName))
|
||||||
|
Expect(dep.Spec.Template.Spec.Containers).To(HaveLen(2))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("podman generate kube on ctr with --type=daemonset and --replicas=3 should fail", func() {
|
||||||
|
ctrName := "test-ctr"
|
||||||
|
session := podmanTest.Podman([]string{"create", "--name", ctrName, ALPINE, "top"})
|
||||||
|
session.WaitWithDefaultTimeout()
|
||||||
|
Expect(session).Should(Exit(0))
|
||||||
|
|
||||||
|
kube := podmanTest.Podman([]string{"generate", "kube", "--type", "daemonset", "--replicas", "3", ctrName})
|
||||||
|
kube.WaitWithDefaultTimeout()
|
||||||
|
Expect(kube).Should(Exit(125))
|
||||||
|
Expect(kube.ErrorToString()).To(ContainSubstring("--replicas can only be set when --type is set to deployment"))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("podman generate kube on pod with --type=daemonset and --restart=no should fail", func() {
|
||||||
|
podName := "test-pod"
|
||||||
|
session := podmanTest.Podman([]string{"pod", "create", "--restart", "no", podName})
|
||||||
|
session.WaitWithDefaultTimeout()
|
||||||
|
Expect(session).Should(Exit(0))
|
||||||
|
|
||||||
|
session = podmanTest.Podman([]string{"create", "--pod", podName, ALPINE, "top"})
|
||||||
|
session.WaitWithDefaultTimeout()
|
||||||
|
Expect(session).Should(Exit(0))
|
||||||
|
|
||||||
|
kube := podmanTest.Podman([]string{"generate", "kube", "--type", "daemonset", podName})
|
||||||
|
kube.WaitWithDefaultTimeout()
|
||||||
|
Expect(kube).Should(Exit(125))
|
||||||
|
Expect(kube.ErrorToString()).To(ContainSubstring("k8s DaemonSets can only have restartPolicy set to Always"))
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
@ -203,3 +203,44 @@ load helpers.bash
|
|||||||
run minikube kubectl delete namespace $project
|
run minikube kubectl delete namespace $project
|
||||||
assert $status -eq 0 "delete namespace $project"
|
assert $status -eq 0 "delete namespace $project"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@test "minikube - deploy generated container yaml to minikube --type=daemonset" {
|
||||||
|
cname="test-ctr"
|
||||||
|
fname="/tmp/minikube_deploy_$(random_string 6).yaml"
|
||||||
|
run_podman container create --name $cname $IMAGE top
|
||||||
|
run_podman kube generate --type daemonset -f $fname $cname
|
||||||
|
|
||||||
|
# deploy to the minikube cluster
|
||||||
|
project="dep-ctr-ns"
|
||||||
|
run minikube kubectl create namespace $project
|
||||||
|
assert "$status" -eq 0 "create new namespace $project"
|
||||||
|
run minikube kubectl -- apply -f $fname
|
||||||
|
assert "$status" -eq 0 "deploy $fname to the cluster"
|
||||||
|
assert "$output" == "daemonset.apps/$cname-pod-daemonset created"
|
||||||
|
wait_for_pods_to_start
|
||||||
|
run minikube kubectl delete namespace $project
|
||||||
|
assert $status -eq 0 "delete namespace $project"
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "minikube - deploy generated pod yaml to minikube --type=daemonset" {
|
||||||
|
pname="test-pod"
|
||||||
|
cname1="test-ctr1"
|
||||||
|
cname2="test-ctr2"
|
||||||
|
fname="/tmp/minikube_deploy_$(random_string 6).yaml"
|
||||||
|
|
||||||
|
run_podman pod create --name $pname --publish 9999:8888
|
||||||
|
run_podman container create --name $cname1 --pod $pname $IMAGE sleep 1000
|
||||||
|
run_podman container create --name $cname2 --pod $pname $IMAGE sleep 2000
|
||||||
|
run_podman kube generate --type daemonset -f $fname $pname
|
||||||
|
|
||||||
|
# deploy to the minikube cluster
|
||||||
|
project="dep-pod-ns"
|
||||||
|
run minikube kubectl create namespace $project
|
||||||
|
assert "$status" -eq 0 "create new namespace $project"
|
||||||
|
run minikube kubectl -- apply -f $fname
|
||||||
|
assert "$status" -eq 0 "deploy $fname to the cluster"
|
||||||
|
assert "$output" == "daemonset.apps/$pname-daemonset created"
|
||||||
|
wait_for_pods_to_start
|
||||||
|
run minikube kubectl delete namespace $project
|
||||||
|
assert $status -eq 0 "delete namespace $project"
|
||||||
|
}
|
||||||
|
Reference in New Issue
Block a user