mirror of
https://github.com/containers/podman.git
synced 2025-05-21 00:56:36 +08:00
service container: less verbose error logs
While manually playing with --service-container, I encountered a number of too verbose logs. For instance, there's no need to error-log when the service-container has already been stopped. For testing, add a new kube test with a multi-pod YAML which will implicitly show that #17024 is now working. Fixes: #17024 Signed-off-by: Valentin Rothberg <vrothberg@redhat.com>
This commit is contained in:
@ -135,7 +135,9 @@ func (p *Pod) maybeStopServiceContainer() error {
|
||||
}
|
||||
logrus.Debugf("Stopping service container %s", serviceCtr.ID())
|
||||
if err := serviceCtr.Stop(); err != nil {
|
||||
logrus.Errorf("Stopping service container %s: %v", serviceCtr.ID(), err)
|
||||
if !errors.Is(err, define.ErrCtrStopped) {
|
||||
logrus.Errorf("Stopping service container %s: %v", serviceCtr.ID(), err)
|
||||
}
|
||||
}
|
||||
})
|
||||
return nil
|
||||
@ -239,7 +241,9 @@ func (p *Pod) maybeRemoveServiceContainer() error {
|
||||
timeout := uint(0)
|
||||
logrus.Debugf("Removing service container %s", serviceCtr.ID())
|
||||
if err := p.runtime.RemoveContainer(context.Background(), serviceCtr, true, false, &timeout); err != nil {
|
||||
logrus.Errorf("Removing service container %s: %v", serviceCtr.ID(), err)
|
||||
if !errors.Is(err, define.ErrNoSuchCtr) {
|
||||
logrus.Errorf("Removing service container %s: %v", serviceCtr.ID(), err)
|
||||
}
|
||||
}
|
||||
})
|
||||
return nil
|
||||
|
@ -531,3 +531,70 @@ spec:
|
||||
run_podman pod rm -a -f
|
||||
run_podman rm -a -f
|
||||
}
|
||||
|
||||
@test "podman kube play - multi-pod YAML" {
|
||||
skip_if_remote "service containers only work locally"
|
||||
skip_if_journald_unavailable
|
||||
|
||||
# Create the YAMl file
|
||||
yaml_source="$PODMAN_TMPDIR/test.yaml"
|
||||
cat >$yaml_source <<EOF
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
app: pod1
|
||||
name: pod1
|
||||
spec:
|
||||
containers:
|
||||
- command:
|
||||
- top
|
||||
image: $IMAGE
|
||||
name: ctr1
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
app: pod2
|
||||
name: pod2
|
||||
spec:
|
||||
containers:
|
||||
- command:
|
||||
- top
|
||||
image: $IMAGE
|
||||
name: ctr2
|
||||
EOF
|
||||
# Run `play kube` in the background as it will wait for the service
|
||||
# container to exit.
|
||||
timeout --foreground -v --kill=10 60 \
|
||||
$PODMAN play kube --service-container=true --log-driver journald $yaml_source &>/dev/null &
|
||||
|
||||
# The name of the service container is predictable: the first 12 characters
|
||||
# of the hash of the YAML file followed by the "-service" suffix
|
||||
yaml_sha=$(sha256sum $yaml_source)
|
||||
service_container="${yaml_sha:0:12}-service"
|
||||
# Wait for the containers to be running
|
||||
container_1=pod1-ctr1
|
||||
container_2=pod1-ctr2
|
||||
for i in $(seq 1 20); do
|
||||
run_podman "?" container wait $container_1 $container_2 $service_container --condition="running"
|
||||
if [[ $status == 0 ]]; then
|
||||
break
|
||||
fi
|
||||
sleep 0.5
|
||||
# Just for debugging
|
||||
run_podman ps -a
|
||||
done
|
||||
if [[ $status != 0 ]]; then
|
||||
die "container $container_1, $container_2 and/or $service_container did not start"
|
||||
fi
|
||||
|
||||
# Stop the pods, make sure that no ugly error logs show up and that the
|
||||
# service container will implicitly get stopped as well
|
||||
run_podman pod stop pod1 pod2
|
||||
assert "$output" !~ "Stopping"
|
||||
_ensure_container_running $service_container false
|
||||
|
||||
run_podman kube down $yaml_source
|
||||
}
|
||||
|
Reference in New Issue
Block a user