mirror of
https://github.com/containers/podman.git
synced 2025-05-29 14:06:29 +08:00
golangci-lint pass number 2
clean up and prepare to migrate to the golangci-linter Signed-off-by: baude <bbaude@redhat.com>
This commit is contained in:
@ -552,9 +552,6 @@ func generatePodPsOutput(pods []*adapter.Pod, opts podPsOptions) error {
|
|||||||
|
|
||||||
switch opts.Format {
|
switch opts.Format {
|
||||||
case formats.JSONString:
|
case formats.JSONString:
|
||||||
if err != nil {
|
|
||||||
return errors.Wrapf(err, "unable to create JSON for output")
|
|
||||||
}
|
|
||||||
out = formats.JSONStructArray{Output: podPsToGeneric([]podPsTemplateParams{}, psOutput)}
|
out = formats.JSONStructArray{Output: podPsToGeneric([]podPsTemplateParams{}, psOutput)}
|
||||||
default:
|
default:
|
||||||
psOutput, err := getPodTemplateOutput(psOutput, opts)
|
psOutput, err := getPodTemplateOutput(psOutput, opts)
|
||||||
|
@ -319,6 +319,9 @@ func psDisplay(c *cliconfig.PsValues, runtime *adapter.LocalRuntime) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pss, err := runtime.Ps(c, opts)
|
pss, err := runtime.Ps(c, opts)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
// Here and down
|
// Here and down
|
||||||
if opts.Sort != "" {
|
if opts.Sort != "" {
|
||||||
pss, err = sortPsOutput(opts.Sort, pss)
|
pss, err = sortPsOutput(opts.Sort, pss)
|
||||||
@ -376,8 +379,8 @@ func psDisplay(c *cliconfig.PsValues, runtime *adapter.LocalRuntime) error {
|
|||||||
size = units.HumanSizeWithPrecision(0, 0)
|
size = units.HumanSizeWithPrecision(0, 0)
|
||||||
} else {
|
} else {
|
||||||
size = units.HumanSizeWithPrecision(float64(container.Size.RwSize), 3) + " (virtual " + units.HumanSizeWithPrecision(float64(container.Size.RootFsSize), 3) + ")"
|
size = units.HumanSizeWithPrecision(float64(container.Size.RwSize), 3) + " (virtual " + units.HumanSizeWithPrecision(float64(container.Size.RootFsSize), 3) + ")"
|
||||||
fmt.Fprintf(w, "\t%s", size)
|
|
||||||
}
|
}
|
||||||
|
fmt.Fprintf(w, "\t%s", size)
|
||||||
}
|
}
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
|
@ -305,7 +305,7 @@ func generateContainerFilterFuncs(filter, filterValue string, r *libpod.Runtime)
|
|||||||
}
|
}
|
||||||
return func(c *libpod.Container) bool {
|
return func(c *libpod.Container) bool {
|
||||||
ec, exited, err := c.ExitCode()
|
ec, exited, err := c.ExitCode()
|
||||||
if ec == int32(exitCode) && err == nil && exited == true {
|
if ec == int32(exitCode) && err == nil && exited {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
@ -611,7 +611,7 @@ func getNamespaceInfo(path string) (string, error) {
|
|||||||
|
|
||||||
// getStrFromSquareBrackets gets the string inside [] from a string
|
// getStrFromSquareBrackets gets the string inside [] from a string
|
||||||
func getStrFromSquareBrackets(cmd string) string {
|
func getStrFromSquareBrackets(cmd string) string {
|
||||||
reg, err := regexp.Compile(".*\\[|\\].*")
|
reg, err := regexp.Compile(`.*\[|\].*`)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
@ -93,9 +93,8 @@ func CreateContainer(ctx context.Context, c *GenericCLIResults, runtime *libpod.
|
|||||||
imageName = newImage.ID()
|
imageName = newImage.ID()
|
||||||
}
|
}
|
||||||
|
|
||||||
var healthCheckCommandInput string
|
|
||||||
// if the user disabled the healthcheck with "none", we skip adding it
|
// if the user disabled the healthcheck with "none", we skip adding it
|
||||||
healthCheckCommandInput = c.String("healthcheck-command")
|
healthCheckCommandInput := c.String("healthcheck-command")
|
||||||
|
|
||||||
// the user didnt disable the healthcheck but did pass in a healthcheck command
|
// the user didnt disable the healthcheck but did pass in a healthcheck command
|
||||||
// now we need to make a healthcheck from the commandline input
|
// now we need to make a healthcheck from the commandline input
|
||||||
|
@ -133,7 +133,7 @@ func verifyContainerResources(config *cc.CreateConfig, update bool) ([]string, e
|
|||||||
if config.Resources.KernelMemory > 0 && config.Resources.KernelMemory < linuxMinMemory {
|
if config.Resources.KernelMemory > 0 && config.Resources.KernelMemory < linuxMinMemory {
|
||||||
return warnings, fmt.Errorf("minimum kernel memory limit allowed is 4MB")
|
return warnings, fmt.Errorf("minimum kernel memory limit allowed is 4MB")
|
||||||
}
|
}
|
||||||
if config.Resources.DisableOomKiller == true && !sysInfo.OomKillDisable {
|
if config.Resources.DisableOomKiller && !sysInfo.OomKillDisable {
|
||||||
// only produce warnings if the setting wasn't to *disable* the OOM Kill; no point
|
// only produce warnings if the setting wasn't to *disable* the OOM Kill; no point
|
||||||
// warning the caller if they already wanted the feature to be off
|
// warning the caller if they already wanted the feature to be off
|
||||||
warnings = addWarning(warnings, "Your kernel does not support OomKillDisable. OomKillDisable discarded.")
|
warnings = addWarning(warnings, "Your kernel does not support OomKillDisable. OomKillDisable discarded.")
|
||||||
|
@ -101,9 +101,8 @@ func statsCmd(c *cliconfig.StatsValues) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var ctrs []*libpod.Container
|
var ctrs []*libpod.Container
|
||||||
var containerFunc func() ([]*libpod.Container, error)
|
|
||||||
|
|
||||||
containerFunc = runtime.GetRunningContainers
|
containerFunc := runtime.GetRunningContainers
|
||||||
if len(c.InputArgs) > 0 {
|
if len(c.InputArgs) > 0 {
|
||||||
containerFunc = func() ([]*libpod.Container, error) { return runtime.GetContainersByList(c.InputArgs) }
|
containerFunc = func() ([]*libpod.Container, error) { return runtime.GetContainersByList(c.InputArgs) }
|
||||||
} else if latest {
|
} else if latest {
|
||||||
|
@ -107,7 +107,7 @@ func printImageChildren(layerMap map[string]*image.LayerInfo, layerID string, pr
|
|||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("lookup error: layerid %s, not found", layerID)
|
return fmt.Errorf("lookup error: layerid %s, not found", layerID)
|
||||||
}
|
}
|
||||||
fmt.Printf(prefix)
|
fmt.Print(prefix)
|
||||||
|
|
||||||
//initialize intend with middleItem to reduce middleItem checks.
|
//initialize intend with middleItem to reduce middleItem checks.
|
||||||
intend := middleItem
|
intend := middleItem
|
||||||
|
@ -639,10 +639,7 @@ func (c *Container) HostsAdd() []string {
|
|||||||
// trigger some OCI hooks.
|
// trigger some OCI hooks.
|
||||||
func (c *Container) UserVolumes() []string {
|
func (c *Container) UserVolumes() []string {
|
||||||
volumes := make([]string, 0, len(c.config.UserVolumes))
|
volumes := make([]string, 0, len(c.config.UserVolumes))
|
||||||
for _, vol := range c.config.UserVolumes {
|
volumes = append(volumes, c.config.UserVolumes...)
|
||||||
volumes = append(volumes, vol)
|
|
||||||
}
|
|
||||||
|
|
||||||
return volumes
|
return volumes
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -650,10 +647,7 @@ func (c *Container) UserVolumes() []string {
|
|||||||
// This is not added to the spec, but is instead used during image commit.
|
// This is not added to the spec, but is instead used during image commit.
|
||||||
func (c *Container) Entrypoint() []string {
|
func (c *Container) Entrypoint() []string {
|
||||||
entrypoint := make([]string, 0, len(c.config.Entrypoint))
|
entrypoint := make([]string, 0, len(c.config.Entrypoint))
|
||||||
for _, str := range c.config.Entrypoint {
|
entrypoint = append(entrypoint, c.config.Entrypoint...)
|
||||||
entrypoint = append(entrypoint, str)
|
|
||||||
}
|
|
||||||
|
|
||||||
return entrypoint
|
return entrypoint
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -661,10 +655,7 @@ func (c *Container) Entrypoint() []string {
|
|||||||
// This is not added to the spec, but is instead used during image commit
|
// This is not added to the spec, but is instead used during image commit
|
||||||
func (c *Container) Command() []string {
|
func (c *Container) Command() []string {
|
||||||
command := make([]string, 0, len(c.config.Command))
|
command := make([]string, 0, len(c.config.Command))
|
||||||
for _, str := range c.config.Command {
|
command = append(command, c.config.Command...)
|
||||||
command = append(command, str)
|
|
||||||
}
|
|
||||||
|
|
||||||
return command
|
return command
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -277,7 +277,7 @@ func (c *Container) Exec(tty, privileged bool, env, cmd []string, user, workDir
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if found == true {
|
if found {
|
||||||
sessionID = stringid.GenerateNonCryptoID()
|
sessionID = stringid.GenerateNonCryptoID()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -264,6 +264,4 @@ func startNode(ctx context.Context, node *containerNode, setError bool, ctrError
|
|||||||
for _, successor := range node.dependedOn {
|
for _, successor := range node.dependedOn {
|
||||||
startNode(ctx, successor, ctrErrored, ctrErrors, ctrsVisited, restart)
|
startNode(ctx, successor, ctrErrored, ctrErrors, ctrsVisited, restart)
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
@ -454,9 +454,7 @@ func (c *Container) generateInspectContainerConfig(spec *spec.Spec) (*InspectCon
|
|||||||
if spec.Process != nil {
|
if spec.Process != nil {
|
||||||
ctrConfig.Tty = spec.Process.Terminal
|
ctrConfig.Tty = spec.Process.Terminal
|
||||||
ctrConfig.Env = []string{}
|
ctrConfig.Env = []string{}
|
||||||
for _, val := range spec.Process.Env {
|
ctrConfig.Env = append(ctrConfig.Env, spec.Process.Env...)
|
||||||
ctrConfig.Env = append(ctrConfig.Env, val)
|
|
||||||
}
|
|
||||||
ctrConfig.WorkingDir = spec.Process.Cwd
|
ctrConfig.WorkingDir = spec.Process.Cwd
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -466,9 +464,7 @@ func (c *Container) generateInspectContainerConfig(spec *spec.Spec) (*InspectCon
|
|||||||
// Leave empty is not explicitly overwritten by user
|
// Leave empty is not explicitly overwritten by user
|
||||||
if len(c.config.Command) != 0 {
|
if len(c.config.Command) != 0 {
|
||||||
ctrConfig.Cmd = []string{}
|
ctrConfig.Cmd = []string{}
|
||||||
for _, val := range c.config.Command {
|
ctrConfig.Cmd = append(ctrConfig.Cmd, c.config.Command...)
|
||||||
ctrConfig.Cmd = append(ctrConfig.Cmd, val)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Leave empty if not explicitly overwritten by user
|
// Leave empty if not explicitly overwritten by user
|
||||||
|
@ -815,34 +815,6 @@ func (c *Container) checkDependenciesRunning() ([]string, error) {
|
|||||||
return notRunning, nil
|
return notRunning, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if a container's dependencies are running
|
|
||||||
// Returns a []string containing the IDs of dependencies that are not running
|
|
||||||
// Assumes depencies are already locked, and will be passed in
|
|
||||||
// Accepts a map[string]*Container containing, at a minimum, the locked
|
|
||||||
// dependency containers
|
|
||||||
// (This must be a map from container ID to container)
|
|
||||||
func (c *Container) checkDependenciesRunningLocked(depCtrs map[string]*Container) ([]string, error) {
|
|
||||||
deps := c.Dependencies()
|
|
||||||
notRunning := []string{}
|
|
||||||
|
|
||||||
for _, dep := range deps {
|
|
||||||
depCtr, ok := depCtrs[dep]
|
|
||||||
if !ok {
|
|
||||||
return nil, errors.Wrapf(define.ErrNoSuchCtr, "container %s depends on container %s but it is not on containers passed to checkDependenciesRunning", c.ID(), dep)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := c.syncContainer(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if depCtr.state.State != define.ContainerStateRunning {
|
|
||||||
notRunning = append(notRunning, dep)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return notRunning, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Container) completeNetworkSetup() error {
|
func (c *Container) completeNetworkSetup() error {
|
||||||
netDisabled, err := c.NetworkDisabled()
|
netDisabled, err := c.NetworkDisabled()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -1,7 +1,6 @@
|
|||||||
package events
|
package events
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -23,7 +22,7 @@ func generateEventFilter(filter, filterValue string) (func(e *Event) bool, error
|
|||||||
}, nil
|
}, nil
|
||||||
case "EVENT", "STATUS":
|
case "EVENT", "STATUS":
|
||||||
return func(e *Event) bool {
|
return func(e *Event) bool {
|
||||||
return fmt.Sprintf("%s", e.Status) == filterValue
|
return string(e.Status) == filterValue
|
||||||
}, nil
|
}, nil
|
||||||
case "IMAGE":
|
case "IMAGE":
|
||||||
return func(e *Event) bool {
|
return func(e *Event) bool {
|
||||||
@ -54,7 +53,7 @@ func generateEventFilter(filter, filterValue string) (func(e *Event) bool, error
|
|||||||
}, nil
|
}, nil
|
||||||
case "TYPE":
|
case "TYPE":
|
||||||
return func(e *Event) bool {
|
return func(e *Event) bool {
|
||||||
return fmt.Sprintf("%s", e.Type) == filterValue
|
return string(e.Type) == filterValue
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
return nil, errors.Errorf("%s is an invalid filter", filter)
|
return nil, errors.Errorf("%s is an invalid filter", filter)
|
||||||
|
@ -17,7 +17,6 @@ func (e EventToNull) Read(options ReadOptions) error {
|
|||||||
// NewNullEventer returns a new null eventer. You should only do this for
|
// NewNullEventer returns a new null eventer. You should only do this for
|
||||||
// the purposes on internal libpod testing.
|
// the purposes on internal libpod testing.
|
||||||
func NewNullEventer() Eventer {
|
func NewNullEventer() Eventer {
|
||||||
var e Eventer
|
e := EventToNull{}
|
||||||
e = EventToNull{}
|
|
||||||
return e
|
return e
|
||||||
}
|
}
|
||||||
|
@ -62,7 +62,7 @@ func (c *Container) createTimer() error {
|
|||||||
if rootless.IsRootless() {
|
if rootless.IsRootless() {
|
||||||
cmd = append(cmd, "--user")
|
cmd = append(cmd, "--user")
|
||||||
}
|
}
|
||||||
cmd = append(cmd, "--unit", fmt.Sprintf("%s", c.ID()), fmt.Sprintf("--on-unit-inactive=%s", c.HealthCheckConfig().Interval.String()), "--timer-property=AccuracySec=1s", podman, "healthcheck", "run", c.ID())
|
cmd = append(cmd, "--unit", c.ID(), fmt.Sprintf("--on-unit-inactive=%s", c.HealthCheckConfig().Interval.String()), "--timer-property=AccuracySec=1s", podman, "healthcheck", "run", c.ID())
|
||||||
|
|
||||||
conn, err := getConnection()
|
conn, err := getConnection()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -461,7 +461,11 @@ func getImageDigest(ctx context.Context, src types.ImageReference, sc *types.Sys
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
defer newImg.Close()
|
defer func() {
|
||||||
|
if err := newImg.Close(); err != nil {
|
||||||
|
logrus.Errorf("failed to close image: %q", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
imageDigest := newImg.ConfigInfo().Digest
|
imageDigest := newImg.ConfigInfo().Digest
|
||||||
if err = imageDigest.Validate(); err != nil {
|
if err = imageDigest.Validate(); err != nil {
|
||||||
return "", errors.Wrapf(err, "error getting config info")
|
return "", errors.Wrapf(err, "error getting config info")
|
||||||
@ -513,7 +517,7 @@ func (i *Image) TagImage(tag string) error {
|
|||||||
if err := i.reloadImage(); err != nil {
|
if err := i.reloadImage(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer i.newImageEvent(events.Tag)
|
i.newImageEvent(events.Tag)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -538,7 +542,7 @@ func (i *Image) UntagImage(tag string) error {
|
|||||||
if err := i.reloadImage(); err != nil {
|
if err := i.reloadImage(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer i.newImageEvent(events.Untag)
|
i.newImageEvent(events.Untag)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -574,7 +578,11 @@ func (i *Image) PushImageToReference(ctx context.Context, dest types.ImageRefere
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer policyContext.Destroy()
|
defer func() {
|
||||||
|
if err := policyContext.Destroy(); err != nil {
|
||||||
|
logrus.Errorf("failed to destroy policy context: %q", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
// Look up the source image, expecting it to be in local storage
|
// Look up the source image, expecting it to be in local storage
|
||||||
src, err := is.Transport.ParseStoreReference(i.imageruntime.store, i.ID())
|
src, err := is.Transport.ParseStoreReference(i.imageruntime.store, i.ID())
|
||||||
@ -588,7 +596,7 @@ func (i *Image) PushImageToReference(ctx context.Context, dest types.ImageRefere
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "Error copying image to the remote destination")
|
return errors.Wrapf(err, "Error copying image to the remote destination")
|
||||||
}
|
}
|
||||||
defer i.newImageEvent(events.Push)
|
i.newImageEvent(events.Push)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -984,11 +992,15 @@ func (ir *Runtime) Import(ctx context.Context, path, reference string, writer io
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer policyContext.Destroy()
|
defer func() {
|
||||||
|
if err := policyContext.Destroy(); err != nil {
|
||||||
|
logrus.Errorf("failed to destroy policy context: %q", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
copyOptions := getCopyOptions(sc, writer, nil, nil, signingOptions, "", nil)
|
copyOptions := getCopyOptions(sc, writer, nil, nil, signingOptions, "", nil)
|
||||||
dest, err := is.Transport.ParseStoreReference(ir.store, reference)
|
dest, err := is.Transport.ParseStoreReference(ir.store, reference)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errors.Wrapf(err, "error getting image reference for %q", reference)
|
return nil, errors.Wrapf(err, "error getting image reference for %q", reference)
|
||||||
}
|
}
|
||||||
_, err = cp.Image(ctx, policyContext, dest, src, copyOptions)
|
_, err = cp.Image(ctx, policyContext, dest, src, copyOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -996,7 +1008,7 @@ func (ir *Runtime) Import(ctx context.Context, path, reference string, writer io
|
|||||||
}
|
}
|
||||||
newImage, err := ir.NewFromLocal(reference)
|
newImage, err := ir.NewFromLocal(reference)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
defer newImage.newImageEvent(events.Import)
|
newImage.newImageEvent(events.Import)
|
||||||
}
|
}
|
||||||
return newImage, err
|
return newImage, err
|
||||||
}
|
}
|
||||||
@ -1339,7 +1351,7 @@ func (i *Image) Save(ctx context.Context, source, format, output string, moreTag
|
|||||||
if err := i.PushImageToReference(ctx, destRef, manifestType, "", "", writer, compress, SigningOptions{}, &DockerRegistryOptions{}, additionaltags); err != nil {
|
if err := i.PushImageToReference(ctx, destRef, manifestType, "", "", writer, compress, SigningOptions{}, &DockerRegistryOptions{}, additionaltags); err != nil {
|
||||||
return errors.Wrapf(err, "unable to save %q", source)
|
return errors.Wrapf(err, "unable to save %q", source)
|
||||||
}
|
}
|
||||||
defer i.newImageEvent(events.Save)
|
i.newImageEvent(events.Save)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -249,7 +249,11 @@ func (ir *Runtime) doPullImage(ctx context.Context, sc *types.SystemContext, goa
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer policyContext.Destroy()
|
defer func() {
|
||||||
|
if err := policyContext.Destroy(); err != nil {
|
||||||
|
logrus.Errorf("failed to destroy policy context: %q", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
systemRegistriesConfPath := registries.SystemRegistriesConfPath()
|
systemRegistriesConfPath := registries.SystemRegistriesConfPath()
|
||||||
|
|
||||||
@ -304,7 +308,7 @@ func (ir *Runtime) doPullImage(ctx context.Context, sc *types.SystemContext, goa
|
|||||||
return nil, pullErrors
|
return nil, pullErrors
|
||||||
}
|
}
|
||||||
if len(images) > 0 {
|
if len(images) > 0 {
|
||||||
defer ir.newImageEvent(events.Pull, images[0])
|
ir.newImageEvent(events.Pull, images[0])
|
||||||
}
|
}
|
||||||
return images, nil
|
return images, nil
|
||||||
}
|
}
|
||||||
|
@ -1,7 +1,6 @@
|
|||||||
package libpod
|
package libpod
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"os"
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
@ -179,7 +178,7 @@ func addContainersAndVolumesToPodObject(containers []v1.Container, volumes []v1.
|
|||||||
labels["app"] = removeUnderscores(podName)
|
labels["app"] = removeUnderscores(podName)
|
||||||
om := v12.ObjectMeta{
|
om := v12.ObjectMeta{
|
||||||
// The name of the pod is container_name-libpod
|
// The name of the pod is container_name-libpod
|
||||||
Name: fmt.Sprintf("%s", removeUnderscores(podName)),
|
Name: removeUnderscores(podName),
|
||||||
Labels: labels,
|
Labels: labels,
|
||||||
// CreationTimestamp seems to be required, so adding it; in doing so, the timestamp
|
// CreationTimestamp seems to be required, so adding it; in doing so, the timestamp
|
||||||
// will reflect time this is run (not container create time) because the conversion
|
// will reflect time this is run (not container create time) because the conversion
|
||||||
|
@ -156,8 +156,5 @@ func NewLogLine(line string) (*LogLine, error) {
|
|||||||
|
|
||||||
// Partial returns a bool if the log line is a partial log type
|
// Partial returns a bool if the log line is a partial log type
|
||||||
func (l *LogLine) Partial() bool {
|
func (l *LogLine) Partial() bool {
|
||||||
if l.ParseLogType == PartialLogType {
|
return l.ParseLogType == PartialLogType
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
@ -5,7 +5,6 @@ package libpod
|
|||||||
import (
|
import (
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/containers/libpod/pkg/errorhandling"
|
|
||||||
"net"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
@ -17,6 +16,7 @@ import (
|
|||||||
|
|
||||||
cnitypes "github.com/containernetworking/cni/pkg/types/current"
|
cnitypes "github.com/containernetworking/cni/pkg/types/current"
|
||||||
"github.com/containernetworking/plugins/pkg/ns"
|
"github.com/containernetworking/plugins/pkg/ns"
|
||||||
|
"github.com/containers/libpod/pkg/errorhandling"
|
||||||
"github.com/containers/libpod/pkg/firewall"
|
"github.com/containers/libpod/pkg/firewall"
|
||||||
"github.com/containers/libpod/pkg/netns"
|
"github.com/containers/libpod/pkg/netns"
|
||||||
"github.com/containers/libpod/pkg/rootless"
|
"github.com/containers/libpod/pkg/rootless"
|
||||||
@ -151,8 +151,8 @@ func checkSlirpFlags(path string) (bool, bool, error) {
|
|||||||
|
|
||||||
// Configure the network namespace for a rootless container
|
// Configure the network namespace for a rootless container
|
||||||
func (r *Runtime) setupRootlessNetNS(ctr *Container) (err error) {
|
func (r *Runtime) setupRootlessNetNS(ctr *Container) (err error) {
|
||||||
defer ctr.rootlessSlirpSyncR.Close()
|
defer errorhandling.CloseQuiet(ctr.rootlessSlirpSyncR)
|
||||||
defer ctr.rootlessSlirpSyncW.Close()
|
defer errorhandling.CloseQuiet(ctr.rootlessSlirpSyncW)
|
||||||
|
|
||||||
path := r.config.NetworkCmdPath
|
path := r.config.NetworkCmdPath
|
||||||
|
|
||||||
@ -201,7 +201,11 @@ func (r *Runtime) setupRootlessNetNS(ctr *Container) (err error) {
|
|||||||
if err := cmd.Start(); err != nil {
|
if err := cmd.Start(); err != nil {
|
||||||
return errors.Wrapf(err, "failed to start slirp4netns process")
|
return errors.Wrapf(err, "failed to start slirp4netns process")
|
||||||
}
|
}
|
||||||
defer cmd.Process.Release()
|
defer func() {
|
||||||
|
if err := cmd.Process.Release(); err != nil {
|
||||||
|
logrus.Errorf("unable to release comman process: %q", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
b := make([]byte, 16)
|
b := make([]byte, 16)
|
||||||
for {
|
for {
|
||||||
@ -268,7 +272,11 @@ func (r *Runtime) setupRootlessNetNS(ctr *Container) (err error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "cannot open connection to %s", apiSocket)
|
return errors.Wrapf(err, "cannot open connection to %s", apiSocket)
|
||||||
}
|
}
|
||||||
defer conn.Close()
|
defer func() {
|
||||||
|
if err := conn.Close(); err != nil {
|
||||||
|
logrus.Errorf("unable to close connection: %q", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
hostIP := i.HostIP
|
hostIP := i.HostIP
|
||||||
if hostIP == "" {
|
if hostIP == "" {
|
||||||
hostIP = "0.0.0.0"
|
hostIP = "0.0.0.0"
|
||||||
|
@ -273,7 +273,9 @@ func (r *OCIRuntime) updateContainerStatus(ctr *Container, useRuntime bool) erro
|
|||||||
}
|
}
|
||||||
return errors.Wrapf(err, "error getting container %s state. stderr/out: %s", ctr.ID(), out)
|
return errors.Wrapf(err, "error getting container %s state. stderr/out: %s", ctr.ID(), out)
|
||||||
}
|
}
|
||||||
defer cmd.Wait()
|
defer func() {
|
||||||
|
_ = cmd.Wait()
|
||||||
|
}()
|
||||||
|
|
||||||
if err := errPipe.Close(); err != nil {
|
if err := errPipe.Close(); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -124,7 +124,11 @@ func (r *OCIRuntime) createContainer(ctr *Container, cgroupParent string, restor
|
|||||||
if err = unix.Unshare(unix.CLONE_NEWNS); err != nil {
|
if err = unix.Unshare(unix.CLONE_NEWNS); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer unix.Setns(int(fd.Fd()), unix.CLONE_NEWNS)
|
defer func() {
|
||||||
|
if err := unix.Setns(int(fd.Fd()), unix.CLONE_NEWNS); err != nil {
|
||||||
|
logrus.Errorf("unable to clone new namespace: %q", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
// don't spread our mounts around. We are setting only /sys to be slave
|
// don't spread our mounts around. We are setting only /sys to be slave
|
||||||
// so that the cleanup process is still able to umount the storage and the
|
// so that the cleanup process is still able to umount the storage and the
|
||||||
@ -376,7 +380,9 @@ func (r *OCIRuntime) createOCIContainer(ctr *Container, cgroupParent string, res
|
|||||||
errorhandling.CloseQuiet(childPipe)
|
errorhandling.CloseQuiet(childPipe)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer cmd.Wait()
|
defer func() {
|
||||||
|
_ = cmd.Wait()
|
||||||
|
}()
|
||||||
|
|
||||||
// We don't need childPipe on the parent side
|
// We don't need childPipe on the parent side
|
||||||
if err := childPipe.Close(); err != nil {
|
if err := childPipe.Close(); err != nil {
|
||||||
|
@ -1152,10 +1152,7 @@ func WithUserVolumes(volumes []string) CtrCreateOption {
|
|||||||
}
|
}
|
||||||
|
|
||||||
ctr.config.UserVolumes = make([]string, 0, len(volumes))
|
ctr.config.UserVolumes = make([]string, 0, len(volumes))
|
||||||
for _, vol := range volumes {
|
ctr.config.UserVolumes = append(ctr.config.UserVolumes, volumes...)
|
||||||
ctr.config.UserVolumes = append(ctr.config.UserVolumes, vol)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1172,10 +1169,7 @@ func WithEntrypoint(entrypoint []string) CtrCreateOption {
|
|||||||
}
|
}
|
||||||
|
|
||||||
ctr.config.Entrypoint = make([]string, 0, len(entrypoint))
|
ctr.config.Entrypoint = make([]string, 0, len(entrypoint))
|
||||||
for _, str := range entrypoint {
|
ctr.config.Entrypoint = append(ctr.config.Entrypoint, entrypoint...)
|
||||||
ctr.config.Entrypoint = append(ctr.config.Entrypoint, str)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1192,10 +1186,7 @@ func WithCommand(command []string) CtrCreateOption {
|
|||||||
}
|
}
|
||||||
|
|
||||||
ctr.config.Command = make([]string, 0, len(command))
|
ctr.config.Command = make([]string, 0, len(command))
|
||||||
for _, str := range command {
|
ctr.config.Command = append(ctr.config.Command, command...)
|
||||||
ctr.config.Command = append(ctr.config.Command, str)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -4,16 +4,19 @@ package adapter
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"github.com/containers/libpod/libpod"
|
|
||||||
"github.com/containers/libpod/libpod/image"
|
|
||||||
"github.com/containers/storage/pkg/archive"
|
|
||||||
jsoniter "github.com/json-iterator/go"
|
|
||||||
spec "github.com/opencontainers/runtime-spec/specs-go"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/containers/libpod/libpod"
|
||||||
|
"github.com/containers/libpod/libpod/image"
|
||||||
|
"github.com/containers/libpod/pkg/errorhandling"
|
||||||
|
"github.com/containers/storage/pkg/archive"
|
||||||
|
jsoniter "github.com/json-iterator/go"
|
||||||
|
spec "github.com/opencontainers/runtime-spec/specs-go"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Prefixing the checkpoint/restore related functions with 'cr'
|
// Prefixing the checkpoint/restore related functions with 'cr'
|
||||||
@ -25,7 +28,7 @@ func crImportFromJSON(filePath string, v interface{}) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "Failed to open container definition %s for restore", filePath)
|
return errors.Wrapf(err, "Failed to open container definition %s for restore", filePath)
|
||||||
}
|
}
|
||||||
defer jsonFile.Close()
|
defer errorhandling.CloseQuiet(jsonFile)
|
||||||
|
|
||||||
content, err := ioutil.ReadAll(jsonFile)
|
content, err := ioutil.ReadAll(jsonFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -48,7 +51,7 @@ func crImportCheckpoint(ctx context.Context, runtime *libpod.Runtime, input stri
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "Failed to open checkpoint archive %s for import", input)
|
return nil, errors.Wrapf(err, "Failed to open checkpoint archive %s for import", input)
|
||||||
}
|
}
|
||||||
defer archiveFile.Close()
|
defer errorhandling.CloseQuiet(archiveFile)
|
||||||
options := &archive.TarOptions{
|
options := &archive.TarOptions{
|
||||||
// Here we only need the files config.dump and spec.dump
|
// Here we only need the files config.dump and spec.dump
|
||||||
ExcludePatterns: []string{
|
ExcludePatterns: []string{
|
||||||
@ -62,15 +65,19 @@ func crImportCheckpoint(ctx context.Context, runtime *libpod.Runtime, input stri
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer os.RemoveAll(dir)
|
defer func() {
|
||||||
|
if err := os.RemoveAll(dir); err != nil {
|
||||||
|
logrus.Errorf("could not recursively remove %s: %q", dir, err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
err = archive.Untar(archiveFile, dir, options)
|
err = archive.Untar(archiveFile, dir, options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "Unpacking of checkpoint archive %s failed", input)
|
return nil, errors.Wrapf(err, "Unpacking of checkpoint archive %s failed", input)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Load spec.dump from temporary directory
|
// Load spec.dump from temporary directory
|
||||||
spec := new(spec.Spec)
|
dumpSpec := new(spec.Spec)
|
||||||
if err := crImportFromJSON(filepath.Join(dir, "spec.dump"), spec); err != nil {
|
if err := crImportFromJSON(filepath.Join(dir, "spec.dump"), dumpSpec); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -112,7 +119,7 @@ func crImportCheckpoint(ctx context.Context, runtime *libpod.Runtime, input stri
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Now create a new container from the just loaded information
|
// Now create a new container from the just loaded information
|
||||||
container, err := runtime.RestoreContainer(ctx, spec, config)
|
container, err := runtime.RestoreContainer(ctx, dumpSpec, config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -127,7 +134,7 @@ func crImportCheckpoint(ctx context.Context, runtime *libpod.Runtime, input stri
|
|||||||
return nil, errors.Errorf("Name of restored container (%s) does not match requested name (%s)", containerConfig.Name, ctrName)
|
return nil, errors.Errorf("Name of restored container (%s) does not match requested name (%s)", containerConfig.Name, ctrName)
|
||||||
}
|
}
|
||||||
|
|
||||||
if newName == false {
|
if !newName {
|
||||||
// Only check ID for a restore with the same name.
|
// Only check ID for a restore with the same name.
|
||||||
// Using -n to request a new name for the restored container, will also create a new ID
|
// Using -n to request a new name for the restored container, will also create a new ID
|
||||||
if containerConfig.ID != ctrID {
|
if containerConfig.ID != ctrID {
|
||||||
|
@ -213,8 +213,8 @@ func (r *LocalRuntime) RemoveContainers(ctx context.Context, cli *cliconfig.RmVa
|
|||||||
c := c
|
c := c
|
||||||
|
|
||||||
pool.Add(shared.Job{
|
pool.Add(shared.Job{
|
||||||
c.ID(),
|
ID: c.ID(),
|
||||||
func() error {
|
Fn: func() error {
|
||||||
err := r.RemoveContainer(ctx, c, cli.Force, cli.Volumes)
|
err := r.RemoveContainer(ctx, c, cli.Force, cli.Volumes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Debugf("Failed to remove container %s: %s", c.ID(), err.Error())
|
logrus.Debugf("Failed to remove container %s: %s", c.ID(), err.Error())
|
||||||
|
@ -70,8 +70,9 @@ func (r *LocalRuntime) PrunePods(ctx context.Context, cli *cliconfig.PodPruneVal
|
|||||||
for _, p := range pods {
|
for _, p := range pods {
|
||||||
p := p
|
p := p
|
||||||
|
|
||||||
pool.Add(shared.Job{p.ID(),
|
pool.Add(shared.Job{
|
||||||
func() error {
|
ID: p.ID(),
|
||||||
|
Fn: func() error {
|
||||||
err := r.Runtime.RemovePod(ctx, p, cli.Force, cli.Force)
|
err := r.Runtime.RemovePod(ctx, p, cli.Force, cli.Force)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Debugf("Failed to remove pod %s: %s", p.ID(), err.Error())
|
logrus.Debugf("Failed to remove pod %s: %s", p.ID(), err.Error())
|
||||||
|
@ -359,9 +359,6 @@ func (r *LocalRuntime) Events(c *cliconfig.EventValues) error {
|
|||||||
if eventsError != nil {
|
if eventsError != nil {
|
||||||
return eventsError
|
return eventsError
|
||||||
}
|
}
|
||||||
if err != nil {
|
|
||||||
return errors.Wrapf(err, "unable to tail the events log")
|
|
||||||
}
|
|
||||||
w := bufio.NewWriter(os.Stdout)
|
w := bufio.NewWriter(os.Stdout)
|
||||||
for event := range eventChannel {
|
for event := range eventChannel {
|
||||||
if len(c.Format) > 0 {
|
if len(c.Format) > 0 {
|
||||||
|
@ -39,7 +39,11 @@ func StartAttachCtr(ctx context.Context, ctr *libpod.Container, stdout, stderr,
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
defer restoreTerminal(oldTermState)
|
defer func() {
|
||||||
|
if err := restoreTerminal(oldTermState); err != nil {
|
||||||
|
logrus.Errorf("unable to restore terminal: %q", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
streams := new(libpod.AttachStreams)
|
streams := new(libpod.AttachStreams)
|
||||||
|
@ -28,6 +28,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/containernetworking/plugins/pkg/ns"
|
"github.com/containernetworking/plugins/pkg/ns"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
"golang.org/x/sys/unix"
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -90,7 +91,9 @@ func NewNS() (ns.NetNS, error) {
|
|||||||
// Ensure the mount point is cleaned up on errors; if the namespace
|
// Ensure the mount point is cleaned up on errors; if the namespace
|
||||||
// was successfully mounted this will have no effect because the file
|
// was successfully mounted this will have no effect because the file
|
||||||
// is in-use
|
// is in-use
|
||||||
defer os.RemoveAll(nsPath)
|
defer func() {
|
||||||
|
_ = os.RemoveAll(nsPath)
|
||||||
|
}()
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
@ -109,7 +112,11 @@ func NewNS() (ns.NetNS, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
defer origNS.Close()
|
defer func() {
|
||||||
|
if err := origNS.Close(); err != nil {
|
||||||
|
logrus.Errorf("unable to close namespace: %q", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
// create a new netns on the current thread
|
// create a new netns on the current thread
|
||||||
err = unix.Unshare(unix.CLONE_NEWNET)
|
err = unix.Unshare(unix.CLONE_NEWNET)
|
||||||
@ -118,7 +125,11 @@ func NewNS() (ns.NetNS, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Put this thread back to the orig ns, since it might get reused (pre go1.10)
|
// Put this thread back to the orig ns, since it might get reused (pre go1.10)
|
||||||
defer origNS.Set()
|
defer func() {
|
||||||
|
if err := origNS.Set(); err != nil {
|
||||||
|
logrus.Errorf("unable to set namespace: %q", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
// bind mount the netns from the current thread (from /proc) onto the
|
// bind mount the netns from the current thread (from /proc) onto the
|
||||||
// mount point. This causes the namespace to persist, even when there
|
// mount point. This causes the namespace to persist, even when there
|
||||||
|
@ -220,7 +220,11 @@ func EnableLinger() (string, error) {
|
|||||||
|
|
||||||
conn, err := dbus.SystemBus()
|
conn, err := dbus.SystemBus()
|
||||||
if err == nil {
|
if err == nil {
|
||||||
defer conn.Close()
|
defer func() {
|
||||||
|
if err := conn.Close(); err != nil {
|
||||||
|
logrus.Errorf("unable to close dbus connection: %q", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
lingerEnabled := false
|
lingerEnabled := false
|
||||||
@ -310,13 +314,21 @@ func joinUserAndMountNS(pid uint, pausePid string) (bool, int, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return false, -1, err
|
return false, -1, err
|
||||||
}
|
}
|
||||||
defer userNS.Close()
|
defer func() {
|
||||||
|
if err := userNS.Close(); err != nil {
|
||||||
|
logrus.Errorf("unable to close namespace: %q", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
mountNS, err := os.Open(fmt.Sprintf("/proc/%d/ns/mnt", pid))
|
mountNS, err := os.Open(fmt.Sprintf("/proc/%d/ns/mnt", pid))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, -1, err
|
return false, -1, err
|
||||||
}
|
}
|
||||||
defer userNS.Close()
|
defer func() {
|
||||||
|
if err := mountNS.Close(); err != nil {
|
||||||
|
logrus.Errorf("unable to close namespace: %q", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
fd, err := getUserNSFirstChild(userNS.Fd())
|
fd, err := getUserNSFirstChild(userNS.Fd())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -364,7 +376,11 @@ func becomeRootInUserNS(pausePid, fileToRead string, fileOutput *os.File) (bool,
|
|||||||
|
|
||||||
defer errorhandling.CloseQuiet(r)
|
defer errorhandling.CloseQuiet(r)
|
||||||
defer errorhandling.CloseQuiet(w)
|
defer errorhandling.CloseQuiet(w)
|
||||||
defer w.Write([]byte("0"))
|
defer func() {
|
||||||
|
if _, err := w.Write([]byte("0")); err != nil {
|
||||||
|
logrus.Errorf("failed to write byte 0: %q", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
pidC := C.reexec_in_user_namespace(C.int(r.Fd()), cPausePid, cFileToRead, fileOutputFD)
|
pidC := C.reexec_in_user_namespace(C.int(r.Fd()), cPausePid, cFileToRead, fileOutputFD)
|
||||||
pid := int(pidC)
|
pid := int(pidC)
|
||||||
|
Reference in New Issue
Block a user