mirror of
https://github.com/containers/podman.git
synced 2025-10-18 03:33:32 +08:00
golangci-lint round #3
this is the third round of preparing to use the golangci-lint on our code base. Signed-off-by: baude <bbaude@redhat.com>
This commit is contained in:
55
.golangci.yml
Normal file
55
.golangci.yml
Normal file
@ -0,0 +1,55 @@
|
||||
---
|
||||
run:
|
||||
build-tags:
|
||||
- apparmor
|
||||
- ostree
|
||||
- seccomp
|
||||
- selinux
|
||||
concurrency: 6
|
||||
deadline: 5m
|
||||
skip-dirs:
|
||||
- dependencies/*
|
||||
- contrib
|
||||
- test/e2e
|
||||
- docs
|
||||
- test/
|
||||
- tracing
|
||||
skip-files:
|
||||
- iopodman.go
|
||||
linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
- bodyclose
|
||||
- deadcode
|
||||
- depguard
|
||||
# dupl really overdid it; disabling
|
||||
# - dupl
|
||||
- errcheck
|
||||
- gofmt
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
- nakedret
|
||||
- staticcheck
|
||||
- structcheck
|
||||
- typecheck
|
||||
- unused
|
||||
- varcheck
|
||||
# - gochecknoglobals
|
||||
# - gochecknoinits
|
||||
# - goconst
|
||||
# - gocritic
|
||||
# - gocyclo
|
||||
# - goimports
|
||||
# - golint
|
||||
# - gosec
|
||||
- interfacer
|
||||
# - lll
|
||||
# - maligned
|
||||
# - misspell
|
||||
# - prealloc
|
||||
- scopelint
|
||||
- stylecheck
|
||||
- unconvert
|
||||
# I think we should uncomment this one and used it
|
||||
# - unparam
|
@ -313,7 +313,7 @@ func buildCmd(c *cliconfig.BuildValues) error {
|
||||
// the urfavecli Tail method for args
|
||||
func Tail(a []string) []string {
|
||||
if len(a) >= 2 {
|
||||
return []string(a)[1:]
|
||||
return a[1:]
|
||||
}
|
||||
return []string{}
|
||||
}
|
||||
|
@ -122,7 +122,7 @@ func copyBetweenHostAndContainer(runtime *libpod.Runtime, src string, dest strin
|
||||
if errors.Cause(err) != define.ErrCtrStateInvalid {
|
||||
return err
|
||||
}
|
||||
} else if err == nil {
|
||||
} else {
|
||||
// Only add the defer if we actually paused
|
||||
defer func() {
|
||||
if err := ctr.Unpause(); err != nil {
|
||||
@ -486,10 +486,7 @@ func matchVolumePath(path, target string) bool {
|
||||
for len(pathStr) > len(target) && strings.Contains(pathStr, string(os.PathSeparator)) {
|
||||
pathStr = pathStr[:strings.LastIndex(pathStr, string(os.PathSeparator))]
|
||||
}
|
||||
if pathStr == target {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
return pathStr == target
|
||||
}
|
||||
|
||||
func pathWithBindMountSource(m specs.Mount, path string) (string, error) {
|
||||
|
@ -136,5 +136,5 @@ func diffCmd(c *cliconfig.DiffValues) error {
|
||||
} else {
|
||||
out = stdoutStruct{output: diffOutput}
|
||||
}
|
||||
return formats.Writer(out).Out()
|
||||
return out.Out()
|
||||
}
|
||||
|
@ -141,11 +141,12 @@ func (h *historyTemplateParams) headerMap() map[string]string {
|
||||
}
|
||||
|
||||
// getHistorytemplateOutput gets the modified history information to be printed in human readable format
|
||||
func getHistoryTemplateOutput(history []*image.History, opts historyOptions) (historyOutput []historyTemplateParams) {
|
||||
func getHistoryTemplateOutput(history []*image.History, opts historyOptions) []historyTemplateParams {
|
||||
var (
|
||||
outputSize string
|
||||
createdTime string
|
||||
createdBy string
|
||||
outputSize string
|
||||
createdTime string
|
||||
createdBy string
|
||||
historyOutput []historyTemplateParams
|
||||
)
|
||||
for _, hist := range history {
|
||||
imageID := hist.ID
|
||||
@ -175,7 +176,7 @@ func getHistoryTemplateOutput(history []*image.History, opts historyOptions) (hi
|
||||
}
|
||||
historyOutput = append(historyOutput, params)
|
||||
}
|
||||
return
|
||||
return historyOutput
|
||||
}
|
||||
|
||||
// generateHistoryOutput generates the history based on the format given
|
||||
@ -194,5 +195,5 @@ func generateHistoryOutput(history []*image.History, opts historyOptions) error
|
||||
out = formats.StdoutTemplateArray{Output: historyToGeneric(historyOutput, []*image.History{}), Template: opts.format, Fields: historyOutput[0].headerMap()}
|
||||
}
|
||||
|
||||
return formats.Writer(out).Out()
|
||||
return out.Out()
|
||||
}
|
||||
|
@ -248,7 +248,8 @@ func sortImagesOutput(sortBy string, imagesOutput imagesSorted) imagesSorted {
|
||||
}
|
||||
|
||||
// getImagesTemplateOutput returns the images information to be printed in human readable format
|
||||
func getImagesTemplateOutput(ctx context.Context, images []*adapter.ContainerImage, opts imagesOptions) (imagesOutput imagesSorted) {
|
||||
func getImagesTemplateOutput(ctx context.Context, images []*adapter.ContainerImage, opts imagesOptions) imagesSorted {
|
||||
var imagesOutput imagesSorted
|
||||
for _, img := range images {
|
||||
// If all is false and the image doesn't have a name, check to see if the top layer of the image is a parent
|
||||
// to another image's top layer. If it is, then it is an intermediate image so don't print out if the --all flag
|
||||
@ -305,7 +306,7 @@ func getImagesTemplateOutput(ctx context.Context, images []*adapter.ContainerIma
|
||||
|
||||
// Sort images by created time
|
||||
sortImagesOutput(opts.sort, imagesOutput)
|
||||
return
|
||||
return imagesOutput
|
||||
}
|
||||
|
||||
// getImagesJSONOutput returns the images information in its raw form
|
||||
@ -346,7 +347,7 @@ func generateImagesOutput(ctx context.Context, images []*adapter.ContainerImage,
|
||||
imagesOutput := getImagesTemplateOutput(ctx, images, opts)
|
||||
out = formats.StdoutTemplateArray{Output: imagesToGeneric(imagesOutput, []imagesJSONParams{}), Template: opts.outputformat, Fields: templateMap}
|
||||
}
|
||||
return formats.Writer(out).Out()
|
||||
return out.Out()
|
||||
}
|
||||
|
||||
// GenImageOutputMap generates the map used for outputting the images header
|
||||
|
@ -97,7 +97,7 @@ func infoCmd(c *cliconfig.InfoValues) error {
|
||||
out = formats.StdoutTemplate{Output: info, Template: infoOutputFormat}
|
||||
}
|
||||
|
||||
return formats.Writer(out).Out()
|
||||
return out.Out()
|
||||
}
|
||||
|
||||
// top-level "debug" info
|
||||
|
@ -127,7 +127,7 @@ func inspectCmd(c *cliconfig.InspectValues) error {
|
||||
out = formats.JSONStructArray{Output: inspectedObjects}
|
||||
}
|
||||
|
||||
return formats.Writer(out).Out()
|
||||
return out.Out()
|
||||
}
|
||||
|
||||
// func iterateInput iterates the images|containers the user has requested and returns the inspect data and error
|
||||
|
@ -56,7 +56,7 @@ func podPauseCmd(c *cliconfig.PodPauseValues) error {
|
||||
for _, p := range pauseIDs {
|
||||
fmt.Println(p)
|
||||
}
|
||||
if conErrors != nil && len(conErrors) > 0 {
|
||||
if len(conErrors) > 0 {
|
||||
for ctr, err := range conErrors {
|
||||
if lastError != nil {
|
||||
logrus.Errorf("%q", lastError)
|
||||
|
@ -561,5 +561,5 @@ func generatePodPsOutput(pods []*adapter.Pod, opts podPsOptions) error {
|
||||
out = formats.StdoutTemplateArray{Output: podPsToGeneric(psOutput, []podPsJSONParams{}), Template: opts.Format, Fields: psOutput[0].podHeaderMap()}
|
||||
}
|
||||
|
||||
return formats.Writer(out).Out()
|
||||
return out.Out()
|
||||
}
|
||||
|
@ -58,7 +58,7 @@ func podRestartCmd(c *cliconfig.PodRestartValues) error {
|
||||
for _, p := range restartIDs {
|
||||
fmt.Println(p)
|
||||
}
|
||||
if conErrors != nil && len(conErrors) > 0 {
|
||||
if len(conErrors) > 0 {
|
||||
for ctr, err := range conErrors {
|
||||
if lastError != nil {
|
||||
logrus.Errorf("%q", lastError)
|
||||
|
@ -91,24 +91,6 @@ func podStatsCmd(c *cliconfig.PodStatsValues) error {
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to get a list of pods")
|
||||
}
|
||||
// First we need to get an initial pass of pod/ctr stats (these are not printed)
|
||||
var podStats []*adapter.PodContainerStats
|
||||
for _, p := range pods {
|
||||
cons, err := p.AllContainersByID()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
emptyStats := make(map[string]*libpod.ContainerStats)
|
||||
// Iterate the pods container ids and make blank stats for them
|
||||
for _, c := range cons {
|
||||
emptyStats[c] = &libpod.ContainerStats{}
|
||||
}
|
||||
ps := adapter.PodContainerStats{
|
||||
Pod: p,
|
||||
ContainerStats: emptyStats,
|
||||
}
|
||||
podStats = append(podStats, &ps)
|
||||
}
|
||||
|
||||
// Create empty container stat results for our first pass
|
||||
var previousPodStats []*adapter.PodContainerStats
|
||||
|
@ -57,7 +57,7 @@ func podUnpauseCmd(c *cliconfig.PodUnpauseValues) error {
|
||||
for _, p := range unpauseIDs {
|
||||
fmt.Println(p)
|
||||
}
|
||||
if conErrors != nil && len(conErrors) > 0 {
|
||||
if len(conErrors) > 0 {
|
||||
for ctr, err := range conErrors {
|
||||
if lastError != nil {
|
||||
logrus.Errorf("%q", lastError)
|
||||
|
@ -35,6 +35,7 @@ func (r *RemoteConfig) GetDefault() (*RemoteConnection, error) {
|
||||
return nil, ErrNoDefinedConnections
|
||||
}
|
||||
for _, v := range r.Connections {
|
||||
v := v
|
||||
if len(r.Connections) == 1 {
|
||||
// if there is only one defined connection, we assume it is
|
||||
// the default whether tagged as such or not
|
||||
@ -54,6 +55,7 @@ func (r *RemoteConfig) GetRemoteConnection(name string) (*RemoteConnection, erro
|
||||
return nil, ErrNoDefinedConnections
|
||||
}
|
||||
for k, v := range r.Connections {
|
||||
v := v
|
||||
if k == name {
|
||||
return &v, nil
|
||||
}
|
||||
|
@ -84,7 +84,7 @@ func searchCmd(c *cliconfig.SearchValues) error {
|
||||
return nil
|
||||
}
|
||||
out := formats.StdoutTemplateArray{Output: searchToGeneric(results), Template: format, Fields: searchHeaderMap()}
|
||||
return formats.Writer(out).Out()
|
||||
return out.Out()
|
||||
}
|
||||
|
||||
// searchHeaderMap returns the headers of a SearchResult.
|
||||
|
@ -217,7 +217,7 @@ func parseSecurityOpt(config *cc.CreateConfig, securityOpts []string, runtime *l
|
||||
} else {
|
||||
con := strings.SplitN(opt, "=", 2)
|
||||
if len(con) != 2 {
|
||||
return fmt.Errorf("Invalid --security-opt 1: %q", opt)
|
||||
return fmt.Errorf("invalid --security-opt 1: %q", opt)
|
||||
}
|
||||
|
||||
switch con[0] {
|
||||
@ -228,7 +228,7 @@ func parseSecurityOpt(config *cc.CreateConfig, securityOpts []string, runtime *l
|
||||
case "seccomp":
|
||||
config.SeccompProfilePath = con[1]
|
||||
default:
|
||||
return fmt.Errorf("Invalid --security-opt 2: %q", opt)
|
||||
return fmt.Errorf("invalid --security-opt 2: %q", opt)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -841,7 +841,7 @@ func makeHealthCheckFromCli(c *GenericCLIResults) (*manifest.Schema2HealthConfig
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "invalid healthcheck-timeout %s", inTimeout)
|
||||
}
|
||||
if timeoutDuration < time.Duration(time.Second*1) {
|
||||
if timeoutDuration < time.Duration(1) {
|
||||
return nil, errors.New("healthcheck-timeout must be at least 1 second")
|
||||
}
|
||||
hc.Timeout = timeoutDuration
|
||||
|
@ -200,7 +200,7 @@ func outputStats(stats []*libpod.ContainerStats, format string) error {
|
||||
}
|
||||
out = formats.StdoutTemplateArray{Output: statsToGeneric(outputStats, []statsOutputParams{}), Template: format, Fields: mapOfHeaders}
|
||||
}
|
||||
return formats.Writer(out).Out()
|
||||
return out.Out()
|
||||
}
|
||||
|
||||
func genStatsFormat(format string) string {
|
||||
|
@ -143,7 +143,7 @@ func generateSysDfOutput(systemDfDiskUsages []systemDfDiskUsage, format string)
|
||||
"Reclaimable": "RECLAIMABLE",
|
||||
}
|
||||
out := formats.StdoutTemplateArray{Output: systemDfDiskUsageToGeneric(systemDfDiskUsages), Template: format, Fields: systemDfHeader}
|
||||
return formats.Writer(out).Out()
|
||||
return out.Out()
|
||||
}
|
||||
|
||||
func getDiskUsage(ctx context.Context, runtime *libpod.Runtime, metaData dfMetaData) ([]systemDfDiskUsage, error) {
|
||||
@ -557,7 +557,7 @@ func imagesVerboseOutput(ctx context.Context, metaData dfMetaData) error {
|
||||
return err
|
||||
}
|
||||
out := formats.StdoutTemplateArray{Output: systemDfImageVerboseDiskUsageToGeneric(imagesVerboseDiskUsage), Template: imageVerboseFormat, Fields: imageVerboseHeader}
|
||||
return formats.Writer(out).Out()
|
||||
return out.Out()
|
||||
}
|
||||
|
||||
func containersVerboseOutput(ctx context.Context, metaData dfMetaData) error {
|
||||
@ -579,7 +579,7 @@ func containersVerboseOutput(ctx context.Context, metaData dfMetaData) error {
|
||||
return err
|
||||
}
|
||||
out := formats.StdoutTemplateArray{Output: systemDfContainerVerboseDiskUsageToGeneric(containersVerboseDiskUsage), Template: containerVerboseFormat, Fields: containerVerboseHeader}
|
||||
return formats.Writer(out).Out()
|
||||
return out.Out()
|
||||
|
||||
}
|
||||
|
||||
@ -597,7 +597,7 @@ func volumesVerboseOutput(ctx context.Context, metaData dfMetaData) error {
|
||||
return err
|
||||
}
|
||||
out := formats.StdoutTemplateArray{Output: systemDfVolumeVerboseDiskUsageToGeneric(volumesVerboseDiskUsage), Template: volumeVerboseFormat, Fields: volumeVerboseHeader}
|
||||
return formats.Writer(out).Out()
|
||||
return out.Out()
|
||||
}
|
||||
|
||||
func verboseOutput(ctx context.Context, metaData dfMetaData) error {
|
||||
|
@ -118,7 +118,7 @@ func showTrustCmd(c *cliconfig.ShowTrustValues) error {
|
||||
}
|
||||
outjson = policyJSON
|
||||
out := formats.JSONStruct{Output: outjson}
|
||||
return formats.Writer(out).Out()
|
||||
return out.Out()
|
||||
}
|
||||
|
||||
showOutputMap, err := getPolicyShowOutput(policyContentStruct, systemRegistriesDirPath)
|
||||
@ -126,7 +126,7 @@ func showTrustCmd(c *cliconfig.ShowTrustValues) error {
|
||||
return errors.Wrapf(err, "could not show trust policies")
|
||||
}
|
||||
out := formats.StdoutTemplateArray{Output: showOutputMap, Template: "{{.Repo}}\t{{.Trusttype}}\t{{.GPGid}}\t{{.Sigstore}}"}
|
||||
return formats.Writer(out).Out()
|
||||
return out.Out()
|
||||
}
|
||||
|
||||
func setTrustCmd(c *cliconfig.SetTrustValues) error {
|
||||
@ -254,15 +254,12 @@ func getPolicyJSON(policyContentStruct trust.PolicyContent, systemRegistriesDirP
|
||||
policyJSON[repo]["type"] = repoval[0].Type
|
||||
policyJSON[repo]["transport"] = transname
|
||||
keyarr := []string{}
|
||||
uids := []string{}
|
||||
for _, repoele := range repoval {
|
||||
if len(repoele.KeyPath) > 0 {
|
||||
keyarr = append(keyarr, repoele.KeyPath)
|
||||
uids = append(uids, trust.GetGPGIdFromKeyPath(repoele.KeyPath)...)
|
||||
}
|
||||
if len(repoele.KeyData) > 0 {
|
||||
keyarr = append(keyarr, string(repoele.KeyData))
|
||||
uids = append(uids, trust.GetGPGIdFromKeyData(string(repoele.KeyData))...)
|
||||
keyarr = append(keyarr, repoele.KeyData)
|
||||
}
|
||||
}
|
||||
policyJSON[repo]["keys"] = keyarr
|
||||
@ -308,16 +305,17 @@ func getPolicyShowOutput(policyContentStruct trust.PolicyContent, systemRegistri
|
||||
Repo: repo,
|
||||
Trusttype: repoval[0].Type,
|
||||
}
|
||||
keyarr := []string{}
|
||||
// TODO - keyarr is not used and I don't know its intent; commenting out for now for someone to fix later
|
||||
//keyarr := []string{}
|
||||
uids := []string{}
|
||||
for _, repoele := range repoval {
|
||||
if len(repoele.KeyPath) > 0 {
|
||||
keyarr = append(keyarr, repoele.KeyPath)
|
||||
//keyarr = append(keyarr, repoele.KeyPath)
|
||||
uids = append(uids, trust.GetGPGIdFromKeyPath(repoele.KeyPath)...)
|
||||
}
|
||||
if len(repoele.KeyData) > 0 {
|
||||
keyarr = append(keyarr, string(repoele.KeyData))
|
||||
uids = append(uids, trust.GetGPGIdFromKeyData(string(repoele.KeyData))...)
|
||||
//keyarr = append(keyarr, string(repoele.KeyData))
|
||||
uids = append(uids, trust.GetGPGIdFromKeyData(repoele.KeyData)...)
|
||||
}
|
||||
}
|
||||
tempTrustShowOutput.GPGid = strings.Join(uids, ", ")
|
||||
|
@ -57,7 +57,7 @@ func versionCmd(c *cliconfig.VersionValues) error {
|
||||
default:
|
||||
out = formats.StdoutTemplate{Output: clientVersion, Template: versionOutputFormat}
|
||||
}
|
||||
return formats.Writer(out).Out()
|
||||
return out.Out()
|
||||
}
|
||||
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
|
||||
defer w.Flush()
|
||||
|
@ -238,7 +238,7 @@ func generateVolLsOutput(volumes []*adapter.Volume, opts volumeLsOptions) error
|
||||
}
|
||||
out = formats.StdoutTemplateArray{Output: volLsToGeneric(lsOutput, []volumeLsJSONParams{}), Template: opts.Format, Fields: lsOutput[0].volHeaderMap()}
|
||||
}
|
||||
return formats.Writer(out).Out()
|
||||
return out.Out()
|
||||
}
|
||||
|
||||
// generateVolumeFilterFuncs returns the true if the volume matches the filter set, otherwise it returns false.
|
||||
|
@ -445,7 +445,7 @@ func (c *Container) specFromState() (*spec.Spec, error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading container config")
|
||||
}
|
||||
if err := json.Unmarshal([]byte(content), &returnSpec); err != nil {
|
||||
if err := json.Unmarshal(content, &returnSpec); err != nil {
|
||||
return nil, errors.Wrapf(err, "error unmarshalling container config")
|
||||
}
|
||||
} else {
|
||||
@ -1030,7 +1030,7 @@ func (c *Container) StoppedByUser() (bool, error) {
|
||||
|
||||
// NamespacePath returns the path of one of the container's namespaces
|
||||
// If the container is not running, an error will be returned
|
||||
func (c *Container) NamespacePath(ns LinuxNS) (string, error) {
|
||||
func (c *Container) NamespacePath(linuxNS LinuxNS) (string, error) { //nolint:interfacer
|
||||
if !c.batched {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
@ -1043,11 +1043,11 @@ func (c *Container) NamespacePath(ns LinuxNS) (string, error) {
|
||||
return "", errors.Wrapf(define.ErrCtrStopped, "cannot get namespace path unless container %s is running", c.ID())
|
||||
}
|
||||
|
||||
if ns == InvalidNS {
|
||||
if linuxNS == InvalidNS {
|
||||
return "", errors.Wrapf(define.ErrInvalidArg, "invalid namespace requested from container %s", c.ID())
|
||||
}
|
||||
|
||||
return fmt.Sprintf("/proc/%d/ns/%s", c.state.PID, ns.String()), nil
|
||||
return fmt.Sprintf("/proc/%d/ns/%s", c.state.PID, linuxNS.String()), nil
|
||||
}
|
||||
|
||||
// CGroupPath returns a cgroups "path" for a given container.
|
||||
|
@ -115,7 +115,6 @@ func (c *Container) StartAndAttach(ctx context.Context, streams *AttachStreams,
|
||||
if err := c.prepareToStart(ctx, recursive); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
attachChan := make(chan error)
|
||||
|
||||
// We need to ensure that we don't return until start() fired in attach.
|
||||
|
@ -88,7 +88,11 @@ func (c *Container) attachContainerSocket(resize <-chan remotecommand.TerminalSi
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to connect to container's attach socket: %v", socketPath)
|
||||
}
|
||||
defer conn.Close()
|
||||
defer func() {
|
||||
if err := conn.Close(); err != nil {
|
||||
logrus.Errorf("unable to close socket: %q", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// If starting was requested, start the container and notify when that's
|
||||
// done.
|
||||
|
@ -1264,6 +1264,7 @@ func (c *Container) postDeleteHooks(ctx context.Context) (err error) {
|
||||
return err
|
||||
}
|
||||
for i, hook := range extensionHooks {
|
||||
hook := hook
|
||||
logrus.Debugf("container %s: invoke poststop hook %d, path %s", c.ID(), i, hook.Path)
|
||||
var stderr, stdout bytes.Buffer
|
||||
hookErr, err := exec.Run(ctx, &hook, state, &stdout, &stderr, exec.DefaultPostKillTimeout)
|
||||
@ -1513,7 +1514,7 @@ func (c *Container) prepareCheckpointExport() (err error) {
|
||||
logrus.Debugf("generating spec for container %q failed with %v", c.ID(), err)
|
||||
return err
|
||||
}
|
||||
if err := c.writeJSONFile(g.Spec(), "spec.dump"); err != nil {
|
||||
if err := c.writeJSONFile(g.Config, "spec.dump"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -686,8 +686,8 @@ func (c *Container) importCheckpoint(input string) (err error) {
|
||||
}
|
||||
|
||||
// Make sure the newly created config.json exists on disk
|
||||
g := generate.NewFromSpec(c.config.Spec)
|
||||
if err = c.saveSpec(g.Spec()); err != nil {
|
||||
g := generate.Generator{Config: c.config.Spec}
|
||||
if err = c.saveSpec(g.Config); err != nil {
|
||||
return errors.Wrap(err, "Saving imported container specification for restore failed")
|
||||
}
|
||||
|
||||
@ -814,7 +814,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
|
||||
}
|
||||
|
||||
// Save the OCI spec to disk
|
||||
if err := c.saveSpec(g.Spec()); err != nil {
|
||||
if err := c.saveSpec(g.Config); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -95,7 +95,7 @@ var (
|
||||
|
||||
// ErrOSNotSupported indicates the function is not available on the particular
|
||||
// OS.
|
||||
ErrOSNotSupported = errors.New("No support for this OS yet")
|
||||
ErrOSNotSupported = errors.New("no support for this OS yet")
|
||||
|
||||
// ErrOCIRuntime indicates an error from the OCI runtime
|
||||
ErrOCIRuntime = errors.New("OCI runtime error")
|
||||
|
@ -230,7 +230,7 @@ func (c *Container) updateHealthCheckLog(hcl HealthCheckLog, inStartPeriod bool)
|
||||
// increment failing streak
|
||||
healthCheck.FailingStreak = healthCheck.FailingStreak + 1
|
||||
// if failing streak > retries, then status to unhealthy
|
||||
if int(healthCheck.FailingStreak) >= c.HealthCheckConfig().Retries {
|
||||
if healthCheck.FailingStreak >= c.HealthCheckConfig().Retries {
|
||||
healthCheck.Status = HealthCheckUnhealthy
|
||||
}
|
||||
}
|
||||
|
@ -217,21 +217,18 @@ func ParseSearchFilter(filter []string) (*SearchFilter, error) {
|
||||
return nil, errors.Wrapf(err, "incorrect value type for stars filter")
|
||||
}
|
||||
sFilter.Stars = stars
|
||||
break
|
||||
case "is-automated":
|
||||
if len(arr) == 2 && arr[1] == "false" {
|
||||
sFilter.IsAutomated = types.OptionalBoolFalse
|
||||
} else {
|
||||
sFilter.IsAutomated = types.OptionalBoolTrue
|
||||
}
|
||||
break
|
||||
case "is-official":
|
||||
if len(arr) == 2 && arr[1] == "false" {
|
||||
sFilter.IsOfficial = types.OptionalBoolFalse
|
||||
} else {
|
||||
sFilter.IsOfficial = types.OptionalBoolTrue
|
||||
}
|
||||
break
|
||||
default:
|
||||
return nil, errors.Errorf("invalid filter type %q", f)
|
||||
}
|
||||
|
@ -155,6 +155,7 @@ func (p *Pod) podWithContainers(containers []*Container, ports []v1.ContainerPor
|
||||
// Deduplicate volumes, so if containers in the pod share a volume, it's only
|
||||
// listed in the volumes section once
|
||||
for _, vol := range volumes {
|
||||
vol := vol
|
||||
deDupPodVolumes[vol.Name] = &vol
|
||||
}
|
||||
}
|
||||
|
@ -169,7 +169,6 @@ func bindPorts(ports []ocicni.PortMapping) ([]*os.File, error) {
|
||||
return nil, errors.Wrapf(err, "cannot get file for UDP socket")
|
||||
}
|
||||
files = append(files, f)
|
||||
break
|
||||
|
||||
case "tcp":
|
||||
addr, err := net.ResolveTCPAddr("tcp4", fmt.Sprintf("%s:%d", i.HostIP, i.HostPort))
|
||||
@ -186,13 +185,11 @@ func bindPorts(ports []ocicni.PortMapping) ([]*os.File, error) {
|
||||
return nil, errors.Wrapf(err, "cannot get file for TCP socket")
|
||||
}
|
||||
files = append(files, f)
|
||||
break
|
||||
case "sctp":
|
||||
if !notifySCTP {
|
||||
notifySCTP = true
|
||||
logrus.Warnf("port reservation for SCTP is not supported")
|
||||
}
|
||||
break
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown protocol %s", i.Protocol)
|
||||
|
||||
|
@ -89,7 +89,7 @@ func makeAccessible(path string, uid, gid int) error {
|
||||
continue
|
||||
}
|
||||
if st.Mode()&0111 != 0111 {
|
||||
if err := os.Chmod(path, os.FileMode(st.Mode()|0111)); err != nil {
|
||||
if err := os.Chmod(path, st.Mode()|0111); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -432,20 +432,12 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool,
|
||||
// from the state elsewhere
|
||||
if !removePod {
|
||||
if err := r.state.RemoveContainerFromPod(pod, c); err != nil {
|
||||
if cleanupErr == nil {
|
||||
cleanupErr = err
|
||||
} else {
|
||||
logrus.Errorf("removing container from pod: %v", err)
|
||||
}
|
||||
cleanupErr = err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if err := r.state.RemoveContainer(c); err != nil {
|
||||
if cleanupErr == nil {
|
||||
cleanupErr = err
|
||||
} else {
|
||||
logrus.Errorf("removing container: %v", err)
|
||||
}
|
||||
cleanupErr = err
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -201,11 +201,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool)
|
||||
conmonCgroupPath := filepath.Join(p.state.CgroupPath, "conmon")
|
||||
conmonCgroup, err := cgroups.Load(conmonCgroupPath)
|
||||
if err != nil && err != cgroups.ErrCgroupDeleted {
|
||||
if removalErr == nil {
|
||||
removalErr = errors.Wrapf(err, "error retrieving pod %s conmon cgroup %s", p.ID(), conmonCgroupPath)
|
||||
} else {
|
||||
logrus.Errorf("Error retrieving pod %s conmon cgroup %s: %v", p.ID(), conmonCgroupPath, err)
|
||||
}
|
||||
removalErr = errors.Wrapf(err, "error retrieving pod %s conmon cgroup %s", p.ID(), conmonCgroupPath)
|
||||
}
|
||||
|
||||
// New resource limits
|
||||
|
@ -86,7 +86,7 @@ func getMemLimit(cgroupLimit uint64) uint64 {
|
||||
return cgroupLimit
|
||||
}
|
||||
|
||||
physicalLimit := uint64(si.Totalram)
|
||||
physicalLimit := si.Totalram
|
||||
if cgroupLimit > physicalLimit {
|
||||
return physicalLimit
|
||||
}
|
||||
|
@ -4,7 +4,6 @@ package adapter
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@ -35,7 +34,7 @@ func crImportFromJSON(filePath string, v interface{}) error {
|
||||
return errors.Wrapf(err, "Failed to read container definition %s for restore", filePath)
|
||||
}
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
if err = json.Unmarshal([]byte(content), v); err != nil {
|
||||
if err = json.Unmarshal(content, v); err != nil {
|
||||
return errors.Wrapf(err, "Failed to unmarshal container definition %s for restore", filePath)
|
||||
}
|
||||
|
||||
@ -106,9 +105,8 @@ func crImportCheckpoint(ctx context.Context, runtime *libpod.Runtime, input stri
|
||||
ctrName := config.Name
|
||||
|
||||
// The code to load the images is copied from create.go
|
||||
var writer io.Writer
|
||||
// In create.go this only set if '--quiet' does not exist.
|
||||
writer = os.Stderr
|
||||
writer := os.Stderr
|
||||
rtc, err := runtime.GetConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -69,7 +69,7 @@ func (r *LocalRuntime) LookupContainer(idOrName string) (*Container, error) {
|
||||
func (r *LocalRuntime) StopContainers(ctx context.Context, cli *cliconfig.StopValues) ([]string, map[string]error, error) {
|
||||
var timeout *uint
|
||||
if cli.Flags().Changed("timeout") || cli.Flags().Changed("time") {
|
||||
t := uint(cli.Timeout)
|
||||
t := cli.Timeout
|
||||
timeout = &t
|
||||
}
|
||||
|
||||
@ -342,7 +342,7 @@ func (r *LocalRuntime) Run(ctx context.Context, c *cliconfig.RunValues, exitCode
|
||||
if err := ctr.Start(ctx, c.IsSet("pod")); err != nil {
|
||||
// This means the command did not exist
|
||||
exitCode = 127
|
||||
if strings.Index(err.Error(), "permission denied") > -1 {
|
||||
if strings.Contains(err.Error(), "permission denied") {
|
||||
exitCode = 126
|
||||
}
|
||||
return exitCode, err
|
||||
@ -405,7 +405,7 @@ func (r *LocalRuntime) Run(ctx context.Context, c *cliconfig.RunValues, exitCode
|
||||
}
|
||||
// This means the command did not exist
|
||||
exitCode = 127
|
||||
if strings.Index(err.Error(), "permission denied") > -1 {
|
||||
if strings.Contains(err.Error(), "permission denied") {
|
||||
exitCode = 126
|
||||
}
|
||||
if c.IsSet("rm") {
|
||||
@ -1057,7 +1057,7 @@ func (r *LocalRuntime) GenerateSystemd(c *cliconfig.GenerateSystemdValues) (stri
|
||||
}
|
||||
timeout := int(ctr.StopTimeout())
|
||||
if c.StopTimeout >= 0 {
|
||||
timeout = int(c.StopTimeout)
|
||||
timeout = c.StopTimeout
|
||||
}
|
||||
name := ctr.ID()
|
||||
if c.Name {
|
||||
@ -1153,9 +1153,7 @@ func (r *LocalRuntime) Exec(c *cliconfig.ExecValues, cmd []string) error {
|
||||
for _, e := range entries {
|
||||
i, err := strconv.Atoi(e.Name())
|
||||
if err != nil {
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "cannot parse %s in /proc/self/fd", e.Name())
|
||||
}
|
||||
return errors.Wrapf(err, "cannot parse %s in /proc/self/fd", e.Name())
|
||||
}
|
||||
m[i] = true
|
||||
}
|
||||
|
@ -155,7 +155,7 @@ func (r *LocalRuntime) StopPods(ctx context.Context, cli *cliconfig.PodStopValue
|
||||
|
||||
for _, p := range pods {
|
||||
stopped := true
|
||||
conErrs, stopErr := p.StopWithTimeout(ctx, true, int(timeout))
|
||||
conErrs, stopErr := p.StopWithTimeout(ctx, true, timeout)
|
||||
if stopErr != nil {
|
||||
errs = append(errs, stopErr)
|
||||
stopped = false
|
||||
@ -532,7 +532,6 @@ func (r *LocalRuntime) PlayKubeYAML(ctx context.Context, c *cliconfig.KubePlayVa
|
||||
if err := libpod.LabelVolumePath(hostPath.Path, false); err != nil {
|
||||
return nil, errors.Wrapf(err, "Error giving %s a label", hostPath.Path)
|
||||
}
|
||||
break
|
||||
case v1.HostPathFileOrCreate:
|
||||
if _, err := os.Stat(hostPath.Path); os.IsNotExist(err) {
|
||||
f, err := os.OpenFile(hostPath.Path, os.O_RDONLY|os.O_CREATE, createFilePermission)
|
||||
@ -547,7 +546,6 @@ func (r *LocalRuntime) PlayKubeYAML(ctx context.Context, c *cliconfig.KubePlayVa
|
||||
if err := libpod.LabelVolumePath(hostPath.Path, false); err != nil {
|
||||
return nil, errors.Wrapf(err, "Error giving %s a label", hostPath.Path)
|
||||
}
|
||||
break
|
||||
case v1.HostPathDirectory:
|
||||
case v1.HostPathFile:
|
||||
case v1.HostPathUnset:
|
||||
|
@ -332,10 +332,7 @@ func (r *LocalRuntime) LoadImage(ctx context.Context, name string, cli *cliconfi
|
||||
|
||||
// IsImageNotFound checks if the error indicates that no image was found.
|
||||
func IsImageNotFound(err error) bool {
|
||||
if errors.Cause(err) == image.ErrNoSuchImage {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
return errors.Cause(err) == image.ErrNoSuchImage
|
||||
}
|
||||
|
||||
// HealthCheck is a wrapper to same named function in libpod
|
||||
|
@ -33,6 +33,4 @@ func ProxySignals(ctr *libpod.Container) {
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return
|
||||
}
|
||||
|
@ -14,6 +14,8 @@ import (
|
||||
)
|
||||
|
||||
// StartAttachCtr starts and (if required) attaches to a container
|
||||
// if you change the signature of this function from os.File to io.Writer, it will trigger a downstream
|
||||
// error. we may need to just lint disable this one.
|
||||
func StartAttachCtr(ctx context.Context, ctr *libpod.Container, stdout, stderr, stdin *os.File, detachKeys string, sigProxy bool, startContainer bool, recursive bool) error {
|
||||
resize := make(chan remotecommand.TerminalSize)
|
||||
|
||||
|
@ -92,16 +92,24 @@ func InstallDefault(name string) error {
|
||||
return err
|
||||
}
|
||||
if err := cmd.Start(); err != nil {
|
||||
pipe.Close()
|
||||
if pipeErr := pipe.Close(); pipeErr != nil {
|
||||
logrus.Errorf("unable to close apparmor pipe: %q", pipeErr)
|
||||
}
|
||||
return err
|
||||
}
|
||||
if err := p.generateDefault(pipe); err != nil {
|
||||
pipe.Close()
|
||||
cmd.Wait()
|
||||
if pipeErr := pipe.Close(); pipeErr != nil {
|
||||
logrus.Errorf("unable to close apparmor pipe: %q", pipeErr)
|
||||
}
|
||||
if cmdErr := cmd.Wait(); cmdErr != nil {
|
||||
logrus.Errorf("unable to wait for apparmor command: %q", cmdErr)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
pipe.Close()
|
||||
if pipeErr := pipe.Close(); pipeErr != nil {
|
||||
logrus.Errorf("unable to close apparmor pipe: %q", pipeErr)
|
||||
}
|
||||
return cmd.Wait()
|
||||
}
|
||||
|
||||
|
@ -155,7 +155,7 @@ func createCgroupv2Path(path string) (Err error) {
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "read /sys/fs/cgroup/cgroup.controllers")
|
||||
}
|
||||
if !filepath.HasPrefix(path, "/sys/fs/cgroup") {
|
||||
if !strings.HasPrefix(path, "/sys/fs/cgroup/") {
|
||||
return fmt.Errorf("invalid cgroup path %s", path)
|
||||
}
|
||||
|
||||
@ -274,12 +274,6 @@ func readFileAsUint64(path string) (uint64, error) {
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (c *CgroupControl) writePidToTasks(pid int, name string) error {
|
||||
path := filepath.Join(c.getCgroupv1Path(name), "tasks")
|
||||
payload := []byte(fmt.Sprintf("%d", pid))
|
||||
return ioutil.WriteFile(path, payload, 0644)
|
||||
}
|
||||
|
||||
// New creates a new cgroup control
|
||||
func New(path string, resources *spec.LinuxResources) (*CgroupControl, error) {
|
||||
cgroup2, err := IsCgroup2UnifiedMode()
|
||||
@ -384,7 +378,7 @@ func rmDirRecursively(path string) error {
|
||||
}
|
||||
}
|
||||
}
|
||||
if os.Remove(path); err != nil {
|
||||
if err := os.Remove(path); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return errors.Wrapf(err, "remove %s", path)
|
||||
}
|
||||
|
@ -10,5 +10,5 @@ import (
|
||||
|
||||
func created(fi os.FileInfo) time.Time {
|
||||
st := fi.Sys().(*syscall.Stat_t)
|
||||
return time.Unix(int64(st.Ctim.Sec), int64(st.Ctim.Nsec))
|
||||
return time.Unix(st.Ctim.Sec, st.Ctim.Nsec)
|
||||
}
|
||||
|
@ -151,7 +151,6 @@ type iptablesBackend struct {
|
||||
protos map[iptables.Protocol]*iptables.IPTables
|
||||
privChainName string
|
||||
adminChainName string
|
||||
ifName string
|
||||
}
|
||||
|
||||
// iptablesBackend implements the FirewallBackend interface
|
||||
|
@ -27,7 +27,11 @@ var spewConfig = spew.ConfigState{
|
||||
// reads back a possibly-altered form from their standard output).
|
||||
func RuntimeConfigFilter(ctx context.Context, hooks []spec.Hook, config *spec.Spec, postKillTimeout time.Duration) (hookErr, err error) {
|
||||
data, err := json.Marshal(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for i, hook := range hooks {
|
||||
hook := hook
|
||||
var stdout bytes.Buffer
|
||||
hookErr, err = Run(ctx, &hook, data, &stdout, nil, postKillTimeout)
|
||||
if err != nil {
|
||||
@ -43,11 +47,11 @@ func RuntimeConfigFilter(ctx context.Context, hooks []spec.Hook, config *spec.Sp
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(config, &newConfig) {
|
||||
old := spewConfig.Sdump(config)
|
||||
new := spewConfig.Sdump(&newConfig)
|
||||
oldConfig := spewConfig.Sdump(config)
|
||||
newConfig := spewConfig.Sdump(&newConfig)
|
||||
diff, err := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{
|
||||
A: difflib.SplitLines(old),
|
||||
B: difflib.SplitLines(new),
|
||||
A: difflib.SplitLines(oldConfig),
|
||||
B: difflib.SplitLines(newConfig),
|
||||
FromFile: "Old",
|
||||
FromDate: "",
|
||||
ToFile: "New",
|
||||
|
@ -111,10 +111,8 @@ func tryMappingTool(tool string, pid int, hostID int, mappings []idtools.IDMap)
|
||||
|
||||
args := []string{path, fmt.Sprintf("%d", pid)}
|
||||
args = appendTriplet(args, 0, hostID, 1)
|
||||
if mappings != nil {
|
||||
for _, i := range mappings {
|
||||
args = appendTriplet(args, i.ContainerID+1, i.HostID, i.Size)
|
||||
}
|
||||
for _, i := range mappings {
|
||||
args = appendTriplet(args, i.ContainerID+1, i.HostID, i.Size)
|
||||
}
|
||||
cmd := exec.Cmd{
|
||||
Path: path,
|
||||
@ -442,7 +440,7 @@ func becomeRootInUserNS(pausePid, fileToRead string, fileOutput *os.File) (bool,
|
||||
return false, -1, errors.Wrapf(err, "write to sync pipe")
|
||||
}
|
||||
|
||||
b := make([]byte, 1, 1)
|
||||
b := make([]byte, 1)
|
||||
_, err = w.Read(b)
|
||||
if err != nil {
|
||||
return false, -1, errors.Wrapf(err, "read from sync pipe")
|
||||
|
@ -160,7 +160,7 @@ func (c *CreateConfig) addPrivilegedDevices(g *generate.Generator) error {
|
||||
}
|
||||
|
||||
// Add resources device - need to clear the existing one first.
|
||||
g.Spec().Linux.Resources.Devices = nil
|
||||
g.Config.Linux.Resources.Devices = nil
|
||||
g.AddLinuxResourcesDevice(true, "", nil, nil, "rwm")
|
||||
return nil
|
||||
}
|
||||
|
@ -126,13 +126,9 @@ func validateIOpsDevice(val string) (*throttleDevice, error) { //nolint
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid rate for device: %s. The correct format is <device-path>:<number>. Number must be a positive integer", val)
|
||||
}
|
||||
if rate < 0 {
|
||||
return nil, fmt.Errorf("invalid rate for device: %s. The correct format is <device-path>:<number>. Number must be a positive integer", val)
|
||||
}
|
||||
|
||||
return &throttleDevice{
|
||||
path: split[0],
|
||||
rate: uint64(rate),
|
||||
rate: rate,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -15,7 +15,7 @@ import (
|
||||
func findCgroupMountpoints() (map[string]string, error) {
|
||||
cgMounts, err := cgroups.GetCgroupMounts(false)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to parse cgroup information: %v", err)
|
||||
return nil, fmt.Errorf("failed to parse cgroup information: %v", err)
|
||||
}
|
||||
mps := make(map[string]string)
|
||||
for _, m := range cgMounts {
|
||||
|
@ -6,8 +6,6 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"k8s.io/client-go/tools/remotecommand"
|
||||
)
|
||||
|
||||
@ -90,7 +88,7 @@ func (v VirtWriteCloser) Write(input []byte) (int, error) {
|
||||
}
|
||||
|
||||
// Reader decodes the content that comes over the wire and directs it to the proper destination.
|
||||
func Reader(r *bufio.Reader, output, errput *os.File, input *io.PipeWriter, resize chan remotecommand.TerminalSize) error {
|
||||
func Reader(r *bufio.Reader, output io.Writer, errput io.Writer, input io.Writer, resize chan remotecommand.TerminalSize) error {
|
||||
var messageSize int64
|
||||
headerBytes := make([]byte, 8)
|
||||
|
||||
@ -149,7 +147,7 @@ func Reader(r *bufio.Reader, output, errput *os.File, input *io.PipeWriter, resi
|
||||
|
||||
default:
|
||||
// Something really went wrong
|
||||
return errors.New("Unknown multiplex destination")
|
||||
return errors.New("unknown multiplex destination")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user