mirror of
https://github.com/containers/podman.git
synced 2025-08-06 19:44:14 +08:00
Run codespell to fix spelling
[NO TESTS NEEDED] Just fixing spelling. Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
This commit is contained in:
2
Makefile
2
Makefile
@ -257,7 +257,7 @@ test/goecho/goecho: .gopathok $(wildcard test/goecho/*.go)
|
|||||||
|
|
||||||
.PHONY: codespell
|
.PHONY: codespell
|
||||||
codespell:
|
codespell:
|
||||||
codespell -S bin,vendor,.git,go.sum,changelog.txt,.cirrus.yml,"RELEASE_NOTES.md,*.xz,*.gz,*.tar,*.tgz,bin2img,*ico,*.png,*.1,*.5,copyimg,*.orig,apidoc.go" -L uint,iff,od,seeked,splitted,marge,ERRO,hist,ether -w
|
codespell -S bin,vendor,.git,go.sum,changelog.txt,.cirrus.yml,"RELEASE_NOTES.md,*.xz,*.gz,*.ps1,*.tar,*.tgz,bin2img,*ico,*.png,*.1,*.5,copyimg,*.orig,apidoc.go" -L uint,iff,od,seeked,splitted,marge,ERRO,hist,ether -w
|
||||||
|
|
||||||
.PHONY: validate
|
.PHONY: validate
|
||||||
validate: gofmt lint .gitvalidation validate.completions man-page-check swagger-check tests-included tests-expect-exit
|
validate: gofmt lint .gitvalidation validate.completions man-page-check swagger-check tests-included tests-expect-exit
|
||||||
|
@ -685,7 +685,7 @@ func makeHealthCheckFromCli(inCmd, interval string, retries uint, timeout, start
|
|||||||
concat := ""
|
concat := ""
|
||||||
if cmdArr[0] == "CMD" || cmdArr[0] == "none" { // this is for compat, we are already split properly for most compat cases
|
if cmdArr[0] == "CMD" || cmdArr[0] == "none" { // this is for compat, we are already split properly for most compat cases
|
||||||
cmdArr = strings.Fields(inCmd)
|
cmdArr = strings.Fields(inCmd)
|
||||||
} else if cmdArr[0] != "CMD-SHELL" { // this is for podman side of things, wont contain the keywords
|
} else if cmdArr[0] != "CMD-SHELL" { // this is for podman side of things, won't contain the keywords
|
||||||
if isArr && len(cmdArr) > 1 { // an array of consecutive commands
|
if isArr && len(cmdArr) > 1 { // an array of consecutive commands
|
||||||
cmdArr = append([]string{"CMD"}, cmdArr...)
|
cmdArr = append([]string{"CMD"}, cmdArr...)
|
||||||
} else { // one singular command
|
} else { // one singular command
|
||||||
|
@ -455,8 +455,7 @@ setup operations for the pod's applications.
|
|||||||
|
|
||||||
Valid values for `init-ctr` type are *always* or *oneshot*. The *always* value
|
Valid values for `init-ctr` type are *always* or *oneshot*. The *always* value
|
||||||
means the container will run with each and every `pod start`, whereas the *oneshot*
|
means the container will run with each and every `pod start`, whereas the *oneshot*
|
||||||
value means is will ony run once when the pod is started and then the container is
|
value means the container will only run once when the pod is started and then the container is removed.
|
||||||
removed.
|
|
||||||
|
|
||||||
Init containers are only run on pod `start`. Restarting a pod will not execute any init
|
Init containers are only run on pod `start`. Restarting a pod will not execute any init
|
||||||
containers should they be present. Furthermore, init containers can only be created in a
|
containers should they be present. Furthermore, init containers can only be created in a
|
||||||
|
@ -240,7 +240,7 @@ type ContainerImageVolume struct {
|
|||||||
type ContainerSecret struct {
|
type ContainerSecret struct {
|
||||||
// Secret is the secret
|
// Secret is the secret
|
||||||
*secrets.Secret
|
*secrets.Secret
|
||||||
// UID is tbe UID of the secret file
|
// UID is the UID of the secret file
|
||||||
UID uint32
|
UID uint32
|
||||||
// GID is the GID of the secret file
|
// GID is the GID of the secret file
|
||||||
GID uint32
|
GID uint32
|
||||||
|
@ -1782,7 +1782,7 @@ func (c *Container) generateResolvConf() (string, error) {
|
|||||||
cniResponse := c.state.NetworkStatus
|
cniResponse := c.state.NetworkStatus
|
||||||
for _, i := range cniResponse {
|
for _, i := range cniResponse {
|
||||||
for _, ip := range i.IPs {
|
for _, ip := range i.IPs {
|
||||||
// Note: only using To16() does not work since it also returns a vaild ip for ipv4
|
// Note: only using To16() does not work since it also returns a valid ip for ipv4
|
||||||
if ip.Address.IP.To4() == nil && ip.Address.IP.To16() != nil {
|
if ip.Address.IP.To4() == nil && ip.Address.IP.To16() != nil {
|
||||||
ipv6 = true
|
ipv6 = true
|
||||||
}
|
}
|
||||||
|
@ -79,7 +79,7 @@ func (c *Container) readFromJournal(ctx context.Context, options *logs.LogOption
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
if cursorError != nil {
|
if cursorError != nil {
|
||||||
return errors.Wrap(cursorError, "inital journal cursor")
|
return errors.Wrap(cursorError, "initial journal cursor")
|
||||||
}
|
}
|
||||||
|
|
||||||
// We need the container's events in the same journal to guarantee
|
// We need the container's events in the same journal to guarantee
|
||||||
|
@ -1219,7 +1219,7 @@ func (c *Container) NetworkDisconnect(nameOrID, netName string, force bool) erro
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// OCICNI will set the loopback adpter down on teardown so we should set it up again
|
// OCICNI will set the loopback adapter down on teardown so we should set it up again
|
||||||
err = c.state.NetNS.Do(func(_ ns.NetNS) error {
|
err = c.state.NetNS.Do(func(_ ns.NetNS) error {
|
||||||
link, err := netlink.LinkByName("lo")
|
link, err := netlink.LinkByName("lo")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1229,7 +1229,7 @@ func (c *Container) NetworkDisconnect(nameOrID, netName string, force bool) erro
|
|||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Warnf("failed to set loopback adpter up in the container: %v", err)
|
logrus.Warnf("failed to set loopback adapter up in the container: %v", err)
|
||||||
}
|
}
|
||||||
// Reload ports when there are still connected networks, maybe we removed the network interface with the child ip.
|
// Reload ports when there are still connected networks, maybe we removed the network interface with the child ip.
|
||||||
// Reloading without connected networks does not make sense, so we can skip this step.
|
// Reloading without connected networks does not make sense, so we can skip this step.
|
||||||
|
@ -27,7 +27,7 @@ func IsRegistryReference(name string) error {
|
|||||||
if imageRef.Transport().Name() == docker.Transport.Name() {
|
if imageRef.Transport().Name() == docker.Transport.Name() {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return errors.Errorf("unsupport transport %s in %q: only docker transport is supported", imageRef.Transport().Name(), name)
|
return errors.Errorf("unsupported transport %s in %q: only docker transport is supported", imageRef.Transport().Name(), name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ParseStorageReference parses the specified image name to a
|
// ParseStorageReference parses the specified image name to a
|
||||||
|
@ -214,7 +214,7 @@ func GenerateContainerFilterFuncs(filter string, filterValues []string, r *libpo
|
|||||||
networkMode := c.NetworkMode()
|
networkMode := c.NetworkMode()
|
||||||
// support docker like `--filter network=container:<IDorName>`
|
// support docker like `--filter network=container:<IDorName>`
|
||||||
// check if networkMode is configured as `container:<ctr>`
|
// check if networkMode is configured as `container:<ctr>`
|
||||||
// peform a match against filter `container:<IDorName>`
|
// perform a match against filter `container:<IDorName>`
|
||||||
// networks is already going to be empty if `container:<ctr>` is configured as Mode
|
// networks is already going to be empty if `container:<ctr>` is configured as Mode
|
||||||
if strings.HasPrefix(networkMode, "container:") {
|
if strings.HasPrefix(networkMode, "container:") {
|
||||||
networkModeContainerPart := strings.SplitN(networkMode, ":", 2)
|
networkModeContainerPart := strings.SplitN(networkMode, ":", 2)
|
||||||
|
@ -102,7 +102,7 @@ function _wait_service_ready() {
|
|||||||
let timeout=$timeout-1
|
let timeout=$timeout-1
|
||||||
done
|
done
|
||||||
|
|
||||||
# Print serivce status as debug information before failed the case
|
# Print service status as debug information before failed the case
|
||||||
systemctl status $sname
|
systemctl status $sname
|
||||||
die "Timed out waiting for $sname to start"
|
die "Timed out waiting for $sname to start"
|
||||||
}
|
}
|
||||||
@ -305,7 +305,7 @@ EOF
|
|||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
# Only check the last service is started. Previous services should already actived.
|
# Only check that the last service is started. Previous services should already be activated.
|
||||||
_wait_service_ready container-$cname.service
|
_wait_service_ready container-$cname.service
|
||||||
run_podman commit --change CMD=/bin/bash $local_cname quay.io/libpod/localtest:latest
|
run_podman commit --change CMD=/bin/bash $local_cname quay.io/libpod/localtest:latest
|
||||||
# Exit code is expected, due to invalid 'fakevalue'
|
# Exit code is expected, due to invalid 'fakevalue'
|
||||||
|
@ -94,9 +94,9 @@ RELABEL="system_u:object_r:container_file_t:s0"
|
|||||||
mkdir -p $TESTDIR
|
mkdir -p $TESTDIR
|
||||||
echo "$testYaml" | sed "s|TESTDIR|${TESTDIR}|g" > $PODMAN_TMPDIR/test.yaml
|
echo "$testYaml" | sed "s|TESTDIR|${TESTDIR}|g" > $PODMAN_TMPDIR/test.yaml
|
||||||
run_podman 125 play kube --network bridge $PODMAN_TMPDIR/test.yaml
|
run_podman 125 play kube --network bridge $PODMAN_TMPDIR/test.yaml
|
||||||
is "$output" ".*invalid value passed to --network: bridge or host networking must be configured in YAML" "podman plan-network should fail wth --network host"
|
is "$output" ".*invalid value passed to --network: bridge or host networking must be configured in YAML" "podman plan-network should fail with --network host"
|
||||||
run_podman 125 play kube --network host $PODMAN_TMPDIR/test.yaml
|
run_podman 125 play kube --network host $PODMAN_TMPDIR/test.yaml
|
||||||
is "$output" ".*invalid value passed to --network: bridge or host networking must be configured in YAML" "podman plan-network should fail wth --network host"
|
is "$output" ".*invalid value passed to --network: bridge or host networking must be configured in YAML" "podman plan-network should fail with --network host"
|
||||||
run_podman play kube --network slirp4netns:port_handler=slirp4netns $PODMAN_TMPDIR/test.yaml
|
run_podman play kube --network slirp4netns:port_handler=slirp4netns $PODMAN_TMPDIR/test.yaml
|
||||||
run_podman pod rm -f test_pod
|
run_podman pod rm -f test_pod
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user