Merge pull request #16947 from ygalblum/kube-service-container-logdriver

Kube Play: use passthrough as the default log-driver if service-container is set
This commit is contained in:
OpenShift Merge Robot
2023-01-03 09:28:00 -05:00
committed by GitHub
4 changed files with 19 additions and 4 deletions

View File

@ -73,6 +73,7 @@ var (
podman play kube --creds user:password --seccomp-profile-root /custom/path apache.yml
podman play kube https://example.com/nginx.yml`,
}
logDriverFlagName = "log-driver"
)
func init() {
@ -116,7 +117,6 @@ func playFlags(cmd *cobra.Command) {
flags.IPSliceVar(&playOptions.StaticIPs, staticIPFlagName, nil, "Static IP addresses to assign to the pods")
_ = cmd.RegisterFlagCompletionFunc(staticIPFlagName, completion.AutocompleteNone)
logDriverFlagName := "log-driver"
flags.StringVar(&playOptions.LogDriver, logDriverFlagName, common.LogDriver(), "Logging driver for the container")
_ = cmd.RegisterFlagCompletionFunc(logDriverFlagName, common.AutocompleteLogDriver)
@ -247,6 +247,15 @@ func play(cmd *cobra.Command, args []string) error {
return errors.New("--force may be specified only with --down")
}
// When running under Systemd use passthrough as the default log-driver.
// When doing so, the journal socket is passed to the containers as-is which has two advantages:
// 1. journald can see who the actual sender of the log event is,
// rather than thinking everything comes from the conmon process
// 2. conmon will not have to copy all the log data
if !cmd.Flags().Changed(logDriverFlagName) && playOptions.ServiceContainer {
playOptions.LogDriver = define.PassthroughLogging
}
reader, err := readerFromArg(args[0])
if err != nil {
return err

View File

@ -414,6 +414,12 @@ EOF
run_podman 125 container rm $service_container
is "$output" "Error: container .* is the service container of pod(s) .* and cannot be removed without removing the pod(s)"
# Verify that the log-driver for the Pod's containers is passthrough
for name in "a" "b"; do
run_podman container inspect test_pod-${name} --format "{{.HostConfig.LogConfig.Type}}"
is $output "passthrough"
done
# Add a simple `auto-update --dry-run` test here to avoid too much redundancy
# with 255-auto-update.bats
run_podman auto-update --dry-run --format "{{.Unit}},{{.Container}},{{.Image}},{{.Updated}},{{.Policy}}"

View File

@ -219,7 +219,7 @@ EOF
wait_for_file $_SOCAT_LOG
# Will run until all containers have stopped.
run_podman play kube --service-container=true $yaml_source
run_podman play kube --service-container=true --log-driver journald $yaml_source
run_podman container wait $service_container test_pod-test
# Make sure the containers have the correct policy.
@ -291,7 +291,7 @@ EOF
# Run `play kube` in the background as it will wait for all containers to
# send the READY=1 message.
timeout --foreground -v --kill=10 60 \
$PODMAN play kube --service-container=true $yaml_source &>/dev/null &
$PODMAN play kube --service-container=true --log-driver journald $yaml_source &>/dev/null &
# Wait for both containers to be running
for i in $(seq 1 20); do

View File

@ -126,7 +126,7 @@ EOF
# Run `play kube` in the background as it will wait for the service
# container to exit.
timeout --foreground -v --kill=10 60 \
$PODMAN play kube --service-container=true $yaml_source &>/dev/null &
$PODMAN play kube --service-container=true --log-driver journald $yaml_source &>/dev/null &
# Wait for the container to be running
container_a=test_pod-test