Merge pull request #2663 from jwhonce/wip/remote_umount

Implement podman-remote umount and rm command
This commit is contained in:
OpenShift Merge Robot
2019-04-09 17:46:28 -07:00
committed by GitHub
21 changed files with 597 additions and 277 deletions

View File

@@ -24,12 +24,10 @@ func getMainCommands() []*cobra.Command {
_portCommand,
_refreshCommand,
_restartCommand,
_rmCommand,
_searchCommand,
_startCommand,
_statsCommand,
_topCommand,
_umountCommand,
_unpauseCommand,
}

View File

@@ -1,9 +1,6 @@
package main
import (
"fmt"
"reflect"
"github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/pkg/adapter"
"github.com/docker/docker/pkg/signal"
@@ -71,21 +68,5 @@ func killCmd(c *cliconfig.KillValues) error {
if err != nil {
return err
}
for _, id := range ok {
fmt.Println(id)
}
if len(failures) > 0 {
keys := reflect.ValueOf(failures).MapKeys()
lastKey := keys[len(keys)-1].String()
lastErr := failures[lastKey]
delete(failures, lastKey)
for _, err := range failures {
outputError(err)
}
return lastErr
}
return nil
return printCmdResults(ok, failures)
}

View File

@@ -54,11 +54,13 @@ var mainCommands = []*cobra.Command{
podCommand.Command,
_pullCommand,
_pushCommand,
_rmCommand,
&_rmiCommand,
_runCommand,
_saveCommand,
_stopCommand,
_tagCommand,
_umountCommand,
_versionCommand,
_waitCommand,
imageCommand.Command,

View File

@@ -4,12 +4,9 @@ import (
"fmt"
"github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/libpodruntime"
"github.com/containers/libpod/cmd/podman/shared"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/libpod/image"
"github.com/containers/libpod/pkg/adapter"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
@@ -48,78 +45,29 @@ func init() {
markFlagHiddenForRemoteClient("latest", flags)
}
// saveCmd saves the image to either docker-archive or oci
// rmCmd removes one or more containers
func rmCmd(c *cliconfig.RmValues) error {
var (
deleteFuncs []shared.ParallelWorkerInput
)
ctx := getContext()
runtime, err := libpodruntime.GetRuntime(&c.PodmanCommand)
runtime, err := adapter.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
failureCnt := 0
delContainers, err := getAllOrLatestContainers(&c.PodmanCommand, runtime, -1, "all")
ok, failures, err := runtime.RemoveContainers(getContext(), c)
if err != nil {
if c.Force && len(c.InputArgs) > 0 {
if errors.Cause(err) == libpod.ErrNoSuchCtr {
err = nil
if errors.Cause(err) == libpod.ErrNoSuchCtr {
if len(c.InputArgs) > 1 {
exitCode = 125
} else {
failureCnt++
}
runtime.RemoveContainersFromStorage(c.InputArgs)
}
if len(delContainers) == 0 {
if err != nil && failureCnt == 0 {
exitCode = 1
}
return err
}
if err != nil {
if errors.Cause(err) == libpod.ErrNoSuchCtr {
exitCode = 1
}
fmt.Println(err.Error())
}
return err
}
for _, container := range delContainers {
con := container
f := func() error {
return runtime.RemoveContainer(ctx, con, c.Force, c.Volumes)
}
deleteFuncs = append(deleteFuncs, shared.ParallelWorkerInput{
ContainerID: con.ID(),
ParallelFunc: f,
})
}
maxWorkers := shared.Parallelize("rm")
if c.GlobalIsSet("max-workers") {
maxWorkers = c.GlobalFlags.MaxWorks
}
logrus.Debugf("Setting maximum workers to %d", maxWorkers)
// Run the parallel funcs
deleteErrors, errCount := shared.ParallelExecuteWorkerPool(maxWorkers, deleteFuncs)
err = printParallelOutput(deleteErrors, errCount)
if err != nil {
for _, result := range deleteErrors {
if result != nil && errors.Cause(result) != image.ErrNoSuchCtr {
failureCnt++
}
}
if failureCnt == 0 {
exitCode = 1
}
}
if failureCnt > 0 {
if len(failures) > 0 {
exitCode = 125
}
return err
return printCmdResults(ok, failures)
}

View File

@@ -0,0 +1,133 @@
package shared
import (
"reflect"
"runtime"
"strings"
"sync"
"github.com/sirupsen/logrus"
)
// JobFunc provides the function signature for the pool'ed functions
type JobFunc func() error
// Job defines the function to run
type Job struct {
ID string
Fn JobFunc
}
// JobResult defines the results from the function ran
type JobResult struct {
Job Job
Err error
}
// Pool defines the worker pool and queues
type Pool struct {
id string
wg *sync.WaitGroup
jobs chan Job
results chan JobResult
size int
capacity int
}
// NewPool creates and initializes a new Pool
func NewPool(id string, size int, capacity int) *Pool {
var wg sync.WaitGroup
// min for int...
s := size
if s > capacity {
s = capacity
}
return &Pool{
id,
&wg,
make(chan Job, capacity),
make(chan JobResult, capacity),
s,
capacity,
}
}
// Add Job to pool for parallel processing
func (p *Pool) Add(job Job) {
p.wg.Add(1)
p.jobs <- job
}
// Run the Job's in the pool, gather and return results
func (p *Pool) Run() ([]string, map[string]error, error) {
var (
ok = []string{}
failures = map[string]error{}
)
for w := 0; w < p.size; w++ {
w := w
go p.newWorker(w)
}
close(p.jobs)
p.wg.Wait()
close(p.results)
for r := range p.results {
if r.Err == nil {
ok = append(ok, r.Job.ID)
} else {
failures[r.Job.ID] = r.Err
}
}
if logrus.GetLevel() == logrus.DebugLevel {
for i, f := range failures {
logrus.Debugf("Pool[%s, %s: %s]", p.id, i, f.Error())
}
}
return ok, failures, nil
}
// newWorker creates new parallel workers to monitor jobs channel from Pool
func (p *Pool) newWorker(slot int) {
for job := range p.jobs {
err := job.Fn()
p.results <- JobResult{job, err}
if logrus.GetLevel() == logrus.DebugLevel {
n := strings.Split(runtime.FuncForPC(reflect.ValueOf(job.Fn).Pointer()).Name(), ".")
logrus.Debugf("Worker#%d finished job %s/%s (%v)", slot, n[2:], job.ID, err)
}
p.wg.Done()
}
}
// DefaultPoolSize provides the maximum number of parallel workers (int) as calculated by a basic
// heuristic. This can be overriden by the --max-workers primary switch to podman.
func DefaultPoolSize(name string) int {
numCpus := runtime.NumCPU()
switch name {
case "kill":
case "pause":
case "rm":
case "unpause":
if numCpus <= 3 {
return numCpus * 3
}
return numCpus * 4
case "ps":
return 8
case "restart":
return numCpus * 2
case "stop":
if numCpus <= 2 {
return 4
} else {
return numCpus * 3
}
}
return 3
}

View File

@@ -1,9 +1,6 @@
package main
import (
"fmt"
"reflect"
"github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/pkg/adapter"
@@ -68,21 +65,5 @@ func stopCmd(c *cliconfig.StopValues) error {
if err != nil {
return err
}
for _, id := range ok {
fmt.Println(id)
}
if len(failures) > 0 {
keys := reflect.ValueOf(failures).MapKeys()
lastKey := keys[len(keys)-1].String()
lastErr := failures[lastKey]
delete(failures, lastKey)
for _, err := range failures {
outputError(err)
}
return lastErr
}
return nil
return printCmdResults(ok, failures)
}

View File

@@ -1,20 +1,16 @@
package main
import (
"fmt"
"github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/libpodruntime"
"github.com/containers/libpod/libpod"
"github.com/containers/storage"
"github.com/containers/libpod/pkg/adapter"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
var (
umountCommand cliconfig.UmountValues
description = `Container storage increments a mount counter each time a container is mounted.
description = `Container storage increments a mount counter each time a container is mounted.
When a container is unmounted, the mount counter is decremented. The container's root filesystem is physically unmounted only when the mount counter reaches zero indicating no other processes are using the mount.
@@ -51,42 +47,15 @@ func init() {
}
func umountCmd(c *cliconfig.UmountValues) error {
runtime, err := libpodruntime.GetRuntime(&c.PodmanCommand)
runtime, err := adapter.GetRuntime(&c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "could not get runtime")
return errors.Wrapf(err, "error creating runtime")
}
defer runtime.Shutdown(false)
force := c.Force
umountAll := c.All
containers, err := getAllOrLatestContainers(&c.PodmanCommand, runtime, -1, "all")
ok, failures, err := runtime.UmountRootFilesystems(getContext(), c)
if err != nil {
if len(containers) == 0 {
return err
}
fmt.Println(err.Error())
return err
}
umountContainerErrStr := "error unmounting container"
var lastError error
for _, ctr := range containers {
ctrState, err := ctr.State()
if ctrState == libpod.ContainerStateRunning || err != nil {
continue
}
if err = ctr.Unmount(force); err != nil {
if umountAll && errors.Cause(err) == storage.ErrLayerNotMounted {
continue
}
if lastError != nil {
logrus.Error(lastError)
}
lastError = errors.Wrapf(err, "%s %s", umountContainerErrStr, ctr.ID())
continue
}
fmt.Printf("%s\n", ctr.ID())
}
return lastError
return printCmdResults(ok, failures)
}

View File

@@ -2,11 +2,12 @@ package main
import (
"fmt"
"reflect"
"github.com/spf13/pflag"
)
//printParallelOutput takes the map of parallel worker results and outputs them
// printParallelOutput takes the map of parallel worker results and outputs them
// to stdout
func printParallelOutput(m map[string]error, errCount int) error {
var lastError error
@@ -23,6 +24,26 @@ func printParallelOutput(m map[string]error, errCount int) error {
return lastError
}
// print results from CLI command
func printCmdResults(ok []string, failures map[string]error) error {
for _, id := range ok {
fmt.Println(id)
}
if len(failures) > 0 {
keys := reflect.ValueOf(failures).MapKeys()
lastKey := keys[len(keys)-1].String()
lastErr := failures[lastKey]
delete(failures, lastKey)
for _, err := range failures {
outputError(err)
}
return lastErr
}
return nil
}
// markFlagHiddenForRemoteClient makes the flag not appear as part of the CLI
// on the remote-client
func markFlagHiddenForRemoteClient(flagName string, flags *pflag.FlagSet) {
@@ -30,3 +51,29 @@ func markFlagHiddenForRemoteClient(flagName string, flags *pflag.FlagSet) {
flags.MarkHidden(flagName)
}
}
// TODO: remove when adapter package takes over this functionality
// func joinContainerOrCreateRootlessUserNS(runtime *libpod.Runtime, ctr *libpod.Container) (bool, int, error) {
// if os.Geteuid() == 0 {
// return false, 0, nil
// }
// s, err := ctr.State()
// if err != nil {
// return false, -1, err
// }
// opts := rootless.Opts{
// Argument: ctr.ID(),
// }
// if s == libpod.ContainerStateRunning || s == libpod.ContainerStatePaused {
// data, err := ioutil.ReadFile(ctr.Config().ConmonPidFile)
// if err != nil {
// return false, -1, errors.Wrapf(err, "cannot read conmon PID file %q", ctr.Config().ConmonPidFile)
// }
// conmonPid, err := strconv.Atoi(string(data))
// if err != nil {
// return false, -1, errors.Wrapf(err, "cannot parse PID %q", data)
// }
// return rootless.JoinDirectUserAndMountNSWithOpts(uint(conmonPid), &opts)
// }
// return rootless.BecomeRootInUserNSWithOpts(&opts)
// }

View File

@@ -1,8 +1,6 @@
package main
import (
"fmt"
"reflect"
"time"
"github.com/containers/libpod/cmd/podman/cliconfig"
@@ -62,21 +60,5 @@ func waitCmd(c *cliconfig.WaitValues) error {
if err != nil {
return err
}
for _, id := range ok {
fmt.Println(id)
}
if len(failures) > 0 {
keys := reflect.ValueOf(failures).MapKeys()
lastKey := keys[len(keys)-1].String()
lastErr := failures[lastKey]
delete(failures, lastKey)
for _, err := range failures {
outputError(err)
}
return lastErr
}
return nil
return printCmdResults(ok, failures)
}