mirror of
https://github.com/containers/podman.git
synced 2025-06-25 20:26:51 +08:00
Merge pull request #16794 from karta0807913/main
podman play kube support container startup probe
This commit is contained in:
@ -220,6 +220,10 @@ func ToSpecGen(ctx context.Context, opts *CtrSpecGenOptions) (*specgen.SpecGener
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to configure livenessProbe: %w", err)
|
||||
}
|
||||
err = setupStartupProbe(s, opts.Container, opts.RestartPolicy)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to configure startupProbe: %w", err)
|
||||
}
|
||||
|
||||
// Since we prefix the container name with pod name to work-around the uniqueness requirement,
|
||||
// the seccomp profile should reference the actual container name from the YAML
|
||||
@ -513,6 +517,41 @@ func parseMountPath(mountPath string, readOnly bool, propagationMode *v1.MountPr
|
||||
return dest, opts, nil
|
||||
}
|
||||
|
||||
func probeToHealthConfig(probe *v1.Probe) (*manifest.Schema2HealthConfig, error) {
|
||||
var commandString string
|
||||
failureCmd := "exit 1"
|
||||
probeHandler := probe.Handler
|
||||
|
||||
// configure healthcheck on the basis of Handler Actions.
|
||||
switch {
|
||||
case probeHandler.Exec != nil:
|
||||
// `makeHealthCheck` function can accept a json array as the command.
|
||||
cmd, err := json.Marshal(probeHandler.Exec.Command)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
commandString = string(cmd)
|
||||
case probeHandler.HTTPGet != nil:
|
||||
// set defaults as in https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#http-probes
|
||||
uriScheme := v1.URISchemeHTTP
|
||||
if probeHandler.HTTPGet.Scheme != "" {
|
||||
uriScheme = probeHandler.HTTPGet.Scheme
|
||||
}
|
||||
host := "localhost" // Kubernetes default is host IP, but with Podman there is only one node
|
||||
if probeHandler.HTTPGet.Host != "" {
|
||||
host = probeHandler.HTTPGet.Host
|
||||
}
|
||||
path := "/"
|
||||
if probeHandler.HTTPGet.Path != "" {
|
||||
path = probeHandler.HTTPGet.Path
|
||||
}
|
||||
commandString = fmt.Sprintf("curl -f %s://%s:%d%s || %s", uriScheme, host, probeHandler.HTTPGet.Port.IntValue(), path, failureCmd)
|
||||
case probeHandler.TCPSocket != nil:
|
||||
commandString = fmt.Sprintf("nc -z -v %s %d || %s", probeHandler.TCPSocket.Host, probeHandler.TCPSocket.Port.IntValue(), failureCmd)
|
||||
}
|
||||
return makeHealthCheck(commandString, probe.PeriodSeconds, probe.FailureThreshold, probe.TimeoutSeconds, probe.InitialDelaySeconds)
|
||||
}
|
||||
|
||||
func setupLivenessProbe(s *specgen.SpecGenerator, containerYAML v1.Container, restartPolicy string) error {
|
||||
var err error
|
||||
if containerYAML.LivenessProbe == nil {
|
||||
@ -520,35 +559,7 @@ func setupLivenessProbe(s *specgen.SpecGenerator, containerYAML v1.Container, re
|
||||
}
|
||||
emptyHandler := v1.Handler{}
|
||||
if containerYAML.LivenessProbe.Handler != emptyHandler {
|
||||
var commandString string
|
||||
failureCmd := "exit 1"
|
||||
probe := containerYAML.LivenessProbe
|
||||
probeHandler := probe.Handler
|
||||
|
||||
// configure healthcheck on the basis of Handler Actions.
|
||||
switch {
|
||||
case probeHandler.Exec != nil:
|
||||
execString := strings.Join(probeHandler.Exec.Command, " ")
|
||||
commandString = fmt.Sprintf("%s || %s", execString, failureCmd)
|
||||
case probeHandler.HTTPGet != nil:
|
||||
// set defaults as in https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#http-probes
|
||||
uriScheme := v1.URISchemeHTTP
|
||||
if probeHandler.HTTPGet.Scheme != "" {
|
||||
uriScheme = probeHandler.HTTPGet.Scheme
|
||||
}
|
||||
host := "localhost" // Kubernetes default is host IP, but with Podman there is only one node
|
||||
if probeHandler.HTTPGet.Host != "" {
|
||||
host = probeHandler.HTTPGet.Host
|
||||
}
|
||||
path := "/"
|
||||
if probeHandler.HTTPGet.Path != "" {
|
||||
path = probeHandler.HTTPGet.Path
|
||||
}
|
||||
commandString = fmt.Sprintf("curl -f %s://%s:%d%s || %s", uriScheme, host, probeHandler.HTTPGet.Port.IntValue(), path, failureCmd)
|
||||
case probeHandler.TCPSocket != nil:
|
||||
commandString = fmt.Sprintf("nc -z -v %s %d || %s", probeHandler.TCPSocket.Host, probeHandler.TCPSocket.Port.IntValue(), failureCmd)
|
||||
}
|
||||
s.HealthConfig, err = makeHealthCheck(commandString, probe.PeriodSeconds, probe.FailureThreshold, probe.TimeoutSeconds, probe.InitialDelaySeconds)
|
||||
s.HealthConfig, err = probeToHealthConfig(containerYAML.LivenessProbe)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -561,6 +572,38 @@ func setupLivenessProbe(s *specgen.SpecGenerator, containerYAML v1.Container, re
|
||||
return nil
|
||||
}
|
||||
|
||||
func setupStartupProbe(s *specgen.SpecGenerator, containerYAML v1.Container, restartPolicy string) error {
|
||||
if containerYAML.StartupProbe == nil {
|
||||
return nil
|
||||
}
|
||||
emptyHandler := v1.Handler{}
|
||||
if containerYAML.StartupProbe.Handler != emptyHandler {
|
||||
healthConfig, err := probeToHealthConfig(containerYAML.StartupProbe)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// currently, StartupProbe still an optional feature, and it requires HealthConfig.
|
||||
if s.HealthConfig == nil {
|
||||
probe := containerYAML.StartupProbe
|
||||
s.HealthConfig, err = makeHealthCheck("exit 0", probe.PeriodSeconds, probe.FailureThreshold, probe.TimeoutSeconds, probe.InitialDelaySeconds)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
s.StartupHealthConfig = &define.StartupHealthCheck{
|
||||
Schema2HealthConfig: *healthConfig,
|
||||
Successes: int(containerYAML.StartupProbe.SuccessThreshold),
|
||||
}
|
||||
// if restart policy is in place, ensure the health check enforces it
|
||||
if restartPolicy == "always" || restartPolicy == "onfailure" {
|
||||
s.HealthCheckOnFailureAction = define.HealthCheckOnFailureActionRestart
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func makeHealthCheck(inCmd string, interval int32, retries int32, timeout int32, startPeriod int32) (*manifest.Schema2HealthConfig, error) {
|
||||
// Every healthcheck requires a command
|
||||
if len(inCmd) == 0 {
|
||||
@ -578,6 +621,8 @@ func makeHealthCheck(inCmd string, interval int32, retries int32, timeout int32,
|
||||
// ...otherwise pass it to "/bin/sh -c" inside the container
|
||||
cmd = []string{define.HealthConfigTestCmdShell}
|
||||
cmd = append(cmd, strings.Split(inCmd, " ")...)
|
||||
} else {
|
||||
cmd = append([]string{define.HealthConfigTestCmd}, cmd...)
|
||||
}
|
||||
}
|
||||
hc := manifest.Schema2HealthConfig{
|
||||
|
@ -402,6 +402,48 @@ spec:
|
||||
periodSeconds: 1
|
||||
`
|
||||
|
||||
var startupProbePodYaml = `
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: startup-healthy-probe
|
||||
labels:
|
||||
app: alpine
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: alpine
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: alpine
|
||||
spec:
|
||||
restartPolicy: Never
|
||||
containers:
|
||||
- command:
|
||||
- top
|
||||
- -d
|
||||
- "1.5"
|
||||
name: alpine
|
||||
image: quay.io/libpod/alpine:latest
|
||||
startupProbe:
|
||||
exec:
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- cat /testfile
|
||||
initialDelaySeconds: 0
|
||||
periodSeconds: 1
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- echo
|
||||
- liveness probe
|
||||
initialDelaySeconds: 0
|
||||
periodSeconds: 1
|
||||
`
|
||||
|
||||
var selinuxLabelPodYaml = `
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
@ -1740,7 +1782,7 @@ var _ = Describe("Podman play kube", func() {
|
||||
inspect.WaitWithDefaultTimeout()
|
||||
healthcheckcmd := inspect.OutputToString()
|
||||
// check if CMD-SHELL based equivalent health check is added to container
|
||||
Expect(healthcheckcmd).To(ContainSubstring("CMD-SHELL"))
|
||||
Expect(healthcheckcmd).To(ContainSubstring("[CMD echo hello]"))
|
||||
})
|
||||
|
||||
It("podman play kube liveness probe should fail", func() {
|
||||
@ -1758,6 +1800,35 @@ var _ = Describe("Podman play kube", func() {
|
||||
Expect(hcoutput).To(ContainSubstring(define.HealthCheckUnhealthy))
|
||||
})
|
||||
|
||||
It("podman play kube support container startup probe", func() {
|
||||
ctrName := "startup-healthy-probe-pod-0-alpine"
|
||||
err := writeYaml(startupProbePodYaml, kubeYaml)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
kube := podmanTest.Podman([]string{"play", "kube", kubeYaml})
|
||||
kube.WaitWithDefaultTimeout()
|
||||
Expect(kube).Should(Exit(0))
|
||||
|
||||
time.Sleep(2 * time.Second)
|
||||
inspect := podmanTest.InspectContainer(ctrName)
|
||||
Expect(inspect[0].State.Health).To(HaveField("Status", "starting"))
|
||||
|
||||
hc := podmanTest.Podman([]string{"healthcheck", "run", ctrName})
|
||||
hc.WaitWithDefaultTimeout()
|
||||
Expect(hc).Should(Exit(1))
|
||||
|
||||
exec := podmanTest.Podman([]string{"exec", ctrName, "sh", "-c", "echo 'startup probe success' > /testfile"})
|
||||
exec.WaitWithDefaultTimeout()
|
||||
Expect(exec).Should(Exit(0))
|
||||
|
||||
hc = podmanTest.Podman([]string{"healthcheck", "run", ctrName})
|
||||
hc.WaitWithDefaultTimeout()
|
||||
Expect(hc).Should(Exit(0))
|
||||
|
||||
inspect = podmanTest.InspectContainer(ctrName)
|
||||
Expect(inspect[0].State.Health).To(HaveField("Status", define.HealthCheckHealthy))
|
||||
})
|
||||
|
||||
It("podman play kube fail with nonexistent authfile", func() {
|
||||
err := generateKubeYaml("pod", getPod(), kubeYaml)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
Reference in New Issue
Block a user