mirror of
https://github.com/containers/podman.git
synced 2025-06-21 09:28:09 +08:00
Fixes issue #3577.
Fix punctuation and wording in some places. Signed-off-by: John Hooks <hooksie11@gmail.com>
This commit is contained in:
@ -32,7 +32,7 @@ const (
|
|||||||
cmdTruncLength = 17
|
cmdTruncLength = 17
|
||||||
)
|
)
|
||||||
|
|
||||||
// PsOptions describes the struct being formed for ps
|
// PsOptions describes the struct being formed for ps.
|
||||||
type PsOptions struct {
|
type PsOptions struct {
|
||||||
All bool
|
All bool
|
||||||
Format string
|
Format string
|
||||||
@ -47,8 +47,8 @@ type PsOptions struct {
|
|||||||
Sync bool
|
Sync bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// BatchContainerStruct is the return obkect from BatchContainer and contains
|
// BatchContainerStruct is the return object from BatchContainer and contains
|
||||||
// container related information
|
// container related information.
|
||||||
type BatchContainerStruct struct {
|
type BatchContainerStruct struct {
|
||||||
ConConfig *libpod.ContainerConfig
|
ConConfig *libpod.ContainerConfig
|
||||||
ConState define.ContainerStatus
|
ConState define.ContainerStatus
|
||||||
@ -61,7 +61,7 @@ type BatchContainerStruct struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// PsContainerOutput is the struct being returned from a parallel
|
// PsContainerOutput is the struct being returned from a parallel
|
||||||
// Batch operation
|
// batch operation.
|
||||||
type PsContainerOutput struct {
|
type PsContainerOutput struct {
|
||||||
ID string
|
ID string
|
||||||
Image string
|
Image string
|
||||||
@ -90,7 +90,7 @@ type PsContainerOutput struct {
|
|||||||
Mounts string
|
Mounts string
|
||||||
}
|
}
|
||||||
|
|
||||||
// Namespace describes output for ps namespace
|
// Namespace describes output for ps namespace.
|
||||||
type Namespace struct {
|
type Namespace struct {
|
||||||
PID string `json:"pid,omitempty"`
|
PID string `json:"pid,omitempty"`
|
||||||
Cgroup string `json:"cgroup,omitempty"`
|
Cgroup string `json:"cgroup,omitempty"`
|
||||||
@ -103,14 +103,14 @@ type Namespace struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ContainerSize holds the size of the container's root filesystem and top
|
// ContainerSize holds the size of the container's root filesystem and top
|
||||||
// read-write layer
|
// read-write layer.
|
||||||
type ContainerSize struct {
|
type ContainerSize struct {
|
||||||
RootFsSize int64 `json:"rootFsSize"`
|
RootFsSize int64 `json:"rootFsSize"`
|
||||||
RwSize int64 `json:"rwSize"`
|
RwSize int64 `json:"rwSize"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewBatchContainer runs a batch process under one lock to get container information and only
|
// NewBatchContainer runs a batch process under one lock to get container information and only
|
||||||
// be called in PBatch
|
// be called in PBatch.
|
||||||
func NewBatchContainer(ctr *libpod.Container, opts PsOptions) (PsContainerOutput, error) {
|
func NewBatchContainer(ctr *libpod.Container, opts PsOptions) (PsContainerOutput, error) {
|
||||||
var (
|
var (
|
||||||
conState define.ContainerStatus
|
conState define.ContainerStatus
|
||||||
@ -257,15 +257,15 @@ type workerInput struct {
|
|||||||
job int
|
job int
|
||||||
}
|
}
|
||||||
|
|
||||||
// worker is a "threaded" worker that takes jobs from the channel "queue"
|
// worker is a "threaded" worker that takes jobs from the channel "queue".
|
||||||
func worker(wg *sync.WaitGroup, jobs <-chan workerInput, results chan<- PsContainerOutput, errors chan<- error) {
|
func worker(wg *sync.WaitGroup, jobs <-chan workerInput, results chan<- PsContainerOutput, errors chan<- error) {
|
||||||
for j := range jobs {
|
for j := range jobs {
|
||||||
r, err := j.parallelFunc()
|
r, err := j.parallelFunc()
|
||||||
// If we find an error, we return just the error
|
// If we find an error, we return just the error.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errors <- err
|
errors <- err
|
||||||
} else {
|
} else {
|
||||||
// Return the result
|
// Return the result.
|
||||||
results <- r
|
results <- r
|
||||||
}
|
}
|
||||||
wg.Done()
|
wg.Done()
|
||||||
@ -398,7 +398,7 @@ func generateContainerFilterFuncs(filter, filterValue string, r *libpod.Runtime)
|
|||||||
return nil, errors.Errorf("%s is an invalid filter", filter)
|
return nil, errors.Errorf("%s is an invalid filter", filter)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetPsContainerOutput returns a slice of containers specifically for ps output
|
// GetPsContainerOutput returns a slice of containers specifically for ps output.
|
||||||
func GetPsContainerOutput(r *libpod.Runtime, opts PsOptions, filters []string, maxWorkers int) ([]PsContainerOutput, error) {
|
func GetPsContainerOutput(r *libpod.Runtime, opts PsOptions, filters []string, maxWorkers int) ([]PsContainerOutput, error) {
|
||||||
var (
|
var (
|
||||||
filterFuncs []libpod.ContainerFilter
|
filterFuncs []libpod.ContainerFilter
|
||||||
@ -419,21 +419,21 @@ func GetPsContainerOutput(r *libpod.Runtime, opts PsOptions, filters []string, m
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !opts.Latest {
|
if !opts.Latest {
|
||||||
// Get all containers
|
// Get all containers.
|
||||||
containers, err := r.GetContainers(filterFuncs...)
|
containers, err := r.GetContainers(filterFuncs...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// We only want the last few containers
|
// We only want the last few containers.
|
||||||
if opts.Last > 0 && opts.Last <= len(containers) {
|
if opts.Last > 0 && opts.Last <= len(containers) {
|
||||||
return nil, errors.Errorf("--last not yet supported")
|
return nil, errors.Errorf("--last not yet supported")
|
||||||
} else {
|
} else {
|
||||||
outputContainers = containers
|
outputContainers = containers
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Get just the latest container
|
// Get just the latest container.
|
||||||
// Ignore filters
|
// Ignore filters.
|
||||||
latestCtr, err := r.GetLatestContainer()
|
latestCtr, err := r.GetLatestContainer()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -446,8 +446,8 @@ func GetPsContainerOutput(r *libpod.Runtime, opts PsOptions, filters []string, m
|
|||||||
return pss, nil
|
return pss, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// PBatch is performs batch operations on a container in parallel. It spawns the number of workers
|
// PBatch performs batch operations on a container in parallel. It spawns the
|
||||||
// relative to the the number of parallel operations desired.
|
// number of workers relative to the number of parallel operations desired.
|
||||||
func PBatch(containers []*libpod.Container, workers int, opts PsOptions) []PsContainerOutput {
|
func PBatch(containers []*libpod.Container, workers int, opts PsOptions) []PsContainerOutput {
|
||||||
var (
|
var (
|
||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
@ -455,7 +455,7 @@ func PBatch(containers []*libpod.Container, workers int, opts PsOptions) []PsCon
|
|||||||
)
|
)
|
||||||
|
|
||||||
// If the number of containers in question is less than the number of
|
// If the number of containers in question is less than the number of
|
||||||
// proposed parallel operations, we shouldnt spawn so many workers
|
// proposed parallel operations, we shouldnt spawn so many workers.
|
||||||
if workers > len(containers) {
|
if workers > len(containers) {
|
||||||
workers = len(containers)
|
workers = len(containers)
|
||||||
}
|
}
|
||||||
@ -464,12 +464,12 @@ func PBatch(containers []*libpod.Container, workers int, opts PsOptions) []PsCon
|
|||||||
results := make(chan PsContainerOutput, len(containers))
|
results := make(chan PsContainerOutput, len(containers))
|
||||||
batchErrors := make(chan error, len(containers))
|
batchErrors := make(chan error, len(containers))
|
||||||
|
|
||||||
// Create the workers
|
// Create the workers.
|
||||||
for w := 1; w <= workers; w++ {
|
for w := 1; w <= workers; w++ {
|
||||||
go worker(&wg, jobs, results, batchErrors)
|
go worker(&wg, jobs, results, batchErrors)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add jobs to the workers
|
// Add jobs to the workers.
|
||||||
for i, j := range containers {
|
for i, j := range containers {
|
||||||
j := j
|
j := j
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
@ -504,7 +504,7 @@ func PBatch(containers []*libpod.Container, workers int, opts PsOptions) []PsCon
|
|||||||
return psResults
|
return psResults
|
||||||
}
|
}
|
||||||
|
|
||||||
// BatchContainer is used in ps to reduce performance hits by "batching"
|
// BatchContainerOp is used in ps to reduce performance hits by "batching"
|
||||||
// locks.
|
// locks.
|
||||||
func BatchContainerOp(ctr *libpod.Container, opts PsOptions) (BatchContainerStruct, error) {
|
func BatchContainerOp(ctr *libpod.Container, opts PsOptions) (BatchContainerStruct, error) {
|
||||||
var (
|
var (
|
||||||
@ -582,7 +582,7 @@ func BatchContainerOp(ctr *libpod.Container, opts PsOptions) (BatchContainerStru
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetNamespaces returns a populated namespace struct
|
// GetNamespaces returns a populated namespace struct.
|
||||||
func GetNamespaces(pid int) *Namespace {
|
func GetNamespaces(pid int) *Namespace {
|
||||||
ctrPID := strconv.Itoa(pid)
|
ctrPID := strconv.Itoa(pid)
|
||||||
cgroup, _ := getNamespaceInfo(filepath.Join("/proc", ctrPID, "ns", "cgroup"))
|
cgroup, _ := getNamespaceInfo(filepath.Join("/proc", ctrPID, "ns", "cgroup"))
|
||||||
@ -613,7 +613,7 @@ func getNamespaceInfo(path string) (string, error) {
|
|||||||
return getStrFromSquareBrackets(val), nil
|
return getStrFromSquareBrackets(val), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// getStrFromSquareBrackets gets the string inside [] from a string
|
// getStrFromSquareBrackets gets the string inside [] from a string.
|
||||||
func getStrFromSquareBrackets(cmd string) string {
|
func getStrFromSquareBrackets(cmd string) string {
|
||||||
reg, err := regexp.Compile(`.*\[|\].*`)
|
reg, err := regexp.Compile(`.*\[|\].*`)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -639,8 +639,8 @@ func comparePorts(i, j ocicni.PortMapping) bool {
|
|||||||
return i.Protocol < j.Protocol
|
return i.Protocol < j.Protocol
|
||||||
}
|
}
|
||||||
|
|
||||||
// returns the group as <IP:startPort:lastPort->startPort:lastPort/Proto>
|
// formatGroup returns the group as <IP:startPort:lastPort->startPort:lastPort/Proto>
|
||||||
// e.g 0.0.0.0:1000-1006->1000-1006/tcp
|
// e.g 0.0.0.0:1000-1006->1000-1006/tcp.
|
||||||
func formatGroup(key string, start, last int32) string {
|
func formatGroup(key string, start, last int32) string {
|
||||||
parts := strings.Split(key, "/")
|
parts := strings.Split(key, "/")
|
||||||
groupType := parts[0]
|
groupType := parts[0]
|
||||||
@ -660,7 +660,7 @@ func formatGroup(key string, start, last int32) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// portsToString converts the ports used to a string of the from "port1, port2"
|
// portsToString converts the ports used to a string of the from "port1, port2"
|
||||||
// also groups continuous list of ports in readable format.
|
// and also groups continuous list of ports in readable format.
|
||||||
func portsToString(ports []ocicni.PortMapping) string {
|
func portsToString(ports []ocicni.PortMapping) string {
|
||||||
type portGroup struct {
|
type portGroup struct {
|
||||||
first int32
|
first int32
|
||||||
@ -675,7 +675,7 @@ func portsToString(ports []ocicni.PortMapping) string {
|
|||||||
return comparePorts(ports[i], ports[j])
|
return comparePorts(ports[i], ports[j])
|
||||||
})
|
})
|
||||||
|
|
||||||
// portGroupMap is used for grouping continuous ports
|
// portGroupMap is used for grouping continuous ports.
|
||||||
portGroupMap := make(map[string]*portGroup)
|
portGroupMap := make(map[string]*portGroup)
|
||||||
var groupKeyList []string
|
var groupKeyList []string
|
||||||
|
|
||||||
@ -685,7 +685,7 @@ func portsToString(ports []ocicni.PortMapping) string {
|
|||||||
if hostIP == "" {
|
if hostIP == "" {
|
||||||
hostIP = "0.0.0.0"
|
hostIP = "0.0.0.0"
|
||||||
}
|
}
|
||||||
// if hostPort and containerPort are not same, consider as individual port.
|
// If hostPort and containerPort are not same, consider as individual port.
|
||||||
if v.ContainerPort != v.HostPort {
|
if v.ContainerPort != v.HostPort {
|
||||||
portDisplay = append(portDisplay, fmt.Sprintf("%s:%d->%d/%s", hostIP, v.HostPort, v.ContainerPort, v.Protocol))
|
portDisplay = append(portDisplay, fmt.Sprintf("%s:%d->%d/%s", hostIP, v.HostPort, v.ContainerPort, v.Protocol))
|
||||||
continue
|
continue
|
||||||
@ -696,7 +696,7 @@ func portsToString(ports []ocicni.PortMapping) string {
|
|||||||
portgroup, ok := portGroupMap[portMapKey]
|
portgroup, ok := portGroupMap[portMapKey]
|
||||||
if !ok {
|
if !ok {
|
||||||
portGroupMap[portMapKey] = &portGroup{first: v.ContainerPort, last: v.ContainerPort}
|
portGroupMap[portMapKey] = &portGroup{first: v.ContainerPort, last: v.ContainerPort}
|
||||||
// this list is required to travese portGroupMap
|
// This list is required to travese portGroupMap.
|
||||||
groupKeyList = append(groupKeyList, portMapKey)
|
groupKeyList = append(groupKeyList, portMapKey)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -706,7 +706,7 @@ func portsToString(ports []ocicni.PortMapping) string {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// for each portMapKey, format group list and appned to output string
|
// For each portMapKey, format group list and appned to output string.
|
||||||
for _, portKey := range groupKeyList {
|
for _, portKey := range groupKeyList {
|
||||||
group := portGroupMap[portKey]
|
group := portGroupMap[portKey]
|
||||||
portDisplay = append(portDisplay, formatGroup(portKey, group.first, group.last))
|
portDisplay = append(portDisplay, formatGroup(portKey, group.first, group.last))
|
||||||
@ -715,7 +715,7 @@ func portsToString(ports []ocicni.PortMapping) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetRunlabel is a helper function for runlabel; it gets the image if needed and begins the
|
// GetRunlabel is a helper function for runlabel; it gets the image if needed and begins the
|
||||||
// construction of the runlabel output and environment variables
|
// construction of the runlabel output and environment variables.
|
||||||
func GetRunlabel(label string, runlabelImage string, ctx context.Context, runtime *libpod.Runtime, pull bool, inputCreds string, dockerRegistryOptions image.DockerRegistryOptions, authfile string, signaturePolicyPath string, output io.Writer) (string, string, error) {
|
func GetRunlabel(label string, runlabelImage string, ctx context.Context, runtime *libpod.Runtime, pull bool, inputCreds string, dockerRegistryOptions image.DockerRegistryOptions, authfile string, signaturePolicyPath string, output io.Writer) (string, string, error) {
|
||||||
var (
|
var (
|
||||||
newImage *image.Image
|
newImage *image.Image
|
||||||
@ -750,9 +750,9 @@ func GetRunlabel(label string, runlabelImage string, ctx context.Context, runtim
|
|||||||
return runLabel, imageName, err
|
return runLabel, imageName, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// GenerateRunlabelCommand generates the command that will eventually be execucted by podman
|
// GenerateRunlabelCommand generates the command that will eventually be execucted by podman.
|
||||||
func GenerateRunlabelCommand(runLabel, imageName, name string, opts map[string]string, extraArgs []string, globalOpts string) ([]string, []string, error) {
|
func GenerateRunlabelCommand(runLabel, imageName, name string, opts map[string]string, extraArgs []string, globalOpts string) ([]string, []string, error) {
|
||||||
// If no name is provided, we use the image's basename instead
|
// If no name is provided, we use the image's basename instead.
|
||||||
if name == "" {
|
if name == "" {
|
||||||
baseName, err := image.GetImageBaseName(imageName)
|
baseName, err := image.GetImageBaseName(imageName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -760,7 +760,7 @@ func GenerateRunlabelCommand(runLabel, imageName, name string, opts map[string]s
|
|||||||
}
|
}
|
||||||
name = baseName
|
name = baseName
|
||||||
}
|
}
|
||||||
// The user provided extra arguments that need to be tacked onto the label's command
|
// The user provided extra arguments that need to be tacked onto the label's command.
|
||||||
if len(extraArgs) > 0 {
|
if len(extraArgs) > 0 {
|
||||||
runLabel = fmt.Sprintf("%s %s", runLabel, strings.Join(extraArgs, " "))
|
runLabel = fmt.Sprintf("%s %s", runLabel, strings.Join(extraArgs, " "))
|
||||||
}
|
}
|
||||||
@ -782,7 +782,7 @@ func GenerateRunlabelCommand(runLabel, imageName, name string, opts map[string]s
|
|||||||
case "OPT3":
|
case "OPT3":
|
||||||
return envmap["OPT3"]
|
return envmap["OPT3"]
|
||||||
case "PWD":
|
case "PWD":
|
||||||
// I would prefer to use os.getenv but it appears PWD is not in the os env list
|
// I would prefer to use os.getenv but it appears PWD is not in the os env list.
|
||||||
d, err := os.Getwd()
|
d, err := os.Getwd()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Error("unable to determine current working directory")
|
logrus.Error("unable to determine current working directory")
|
||||||
@ -819,7 +819,7 @@ func GenerateKube(name string, service bool, r *libpod.Runtime) (*v1.Pod, *v1.Se
|
|||||||
servicePorts []v1.ServicePort
|
servicePorts []v1.ServicePort
|
||||||
serviceYAML v1.Service
|
serviceYAML v1.Service
|
||||||
)
|
)
|
||||||
// Get the container in question
|
// Get the container in question.
|
||||||
container, err = r.LookupContainer(name)
|
container, err = r.LookupContainer(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
pod, err = r.LookupPod(name)
|
pod, err = r.LookupPod(name)
|
||||||
|
Reference in New Issue
Block a user