mirror of
https://github.com/containers/podman.git
synced 2025-06-23 02:18:13 +08:00
Merge pull request #2663 from jwhonce/wip/remote_umount
Implement podman-remote umount and rm command
This commit is contained in:
@ -24,12 +24,10 @@ func getMainCommands() []*cobra.Command {
|
||||
_portCommand,
|
||||
_refreshCommand,
|
||||
_restartCommand,
|
||||
_rmCommand,
|
||||
_searchCommand,
|
||||
_startCommand,
|
||||
_statsCommand,
|
||||
_topCommand,
|
||||
_umountCommand,
|
||||
_unpauseCommand,
|
||||
}
|
||||
|
||||
|
@ -1,9 +1,6 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/containers/libpod/cmd/podman/cliconfig"
|
||||
"github.com/containers/libpod/pkg/adapter"
|
||||
"github.com/docker/docker/pkg/signal"
|
||||
@ -71,21 +68,5 @@ func killCmd(c *cliconfig.KillValues) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, id := range ok {
|
||||
fmt.Println(id)
|
||||
}
|
||||
|
||||
if len(failures) > 0 {
|
||||
keys := reflect.ValueOf(failures).MapKeys()
|
||||
lastKey := keys[len(keys)-1].String()
|
||||
lastErr := failures[lastKey]
|
||||
delete(failures, lastKey)
|
||||
|
||||
for _, err := range failures {
|
||||
outputError(err)
|
||||
}
|
||||
return lastErr
|
||||
}
|
||||
return nil
|
||||
return printCmdResults(ok, failures)
|
||||
}
|
||||
|
@ -54,11 +54,13 @@ var mainCommands = []*cobra.Command{
|
||||
podCommand.Command,
|
||||
_pullCommand,
|
||||
_pushCommand,
|
||||
_rmCommand,
|
||||
&_rmiCommand,
|
||||
_runCommand,
|
||||
_saveCommand,
|
||||
_stopCommand,
|
||||
_tagCommand,
|
||||
_umountCommand,
|
||||
_versionCommand,
|
||||
_waitCommand,
|
||||
imageCommand.Command,
|
||||
|
@ -4,12 +4,9 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/containers/libpod/cmd/podman/cliconfig"
|
||||
"github.com/containers/libpod/cmd/podman/libpodruntime"
|
||||
"github.com/containers/libpod/cmd/podman/shared"
|
||||
"github.com/containers/libpod/libpod"
|
||||
"github.com/containers/libpod/libpod/image"
|
||||
"github.com/containers/libpod/pkg/adapter"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -48,78 +45,29 @@ func init() {
|
||||
markFlagHiddenForRemoteClient("latest", flags)
|
||||
}
|
||||
|
||||
// saveCmd saves the image to either docker-archive or oci
|
||||
// rmCmd removes one or more containers
|
||||
func rmCmd(c *cliconfig.RmValues) error {
|
||||
var (
|
||||
deleteFuncs []shared.ParallelWorkerInput
|
||||
)
|
||||
|
||||
ctx := getContext()
|
||||
runtime, err := libpodruntime.GetRuntime(&c.PodmanCommand)
|
||||
runtime, err := adapter.GetRuntime(&c.PodmanCommand)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get runtime")
|
||||
}
|
||||
defer runtime.Shutdown(false)
|
||||
|
||||
failureCnt := 0
|
||||
delContainers, err := getAllOrLatestContainers(&c.PodmanCommand, runtime, -1, "all")
|
||||
ok, failures, err := runtime.RemoveContainers(getContext(), c)
|
||||
if err != nil {
|
||||
if c.Force && len(c.InputArgs) > 0 {
|
||||
if errors.Cause(err) == libpod.ErrNoSuchCtr {
|
||||
err = nil
|
||||
if errors.Cause(err) == libpod.ErrNoSuchCtr {
|
||||
if len(c.InputArgs) > 1 {
|
||||
exitCode = 125
|
||||
} else {
|
||||
failureCnt++
|
||||
}
|
||||
runtime.RemoveContainersFromStorage(c.InputArgs)
|
||||
}
|
||||
if len(delContainers) == 0 {
|
||||
if err != nil && failureCnt == 0 {
|
||||
exitCode = 1
|
||||
}
|
||||
return err
|
||||
}
|
||||
if err != nil {
|
||||
if errors.Cause(err) == libpod.ErrNoSuchCtr {
|
||||
exitCode = 1
|
||||
}
|
||||
fmt.Println(err.Error())
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
for _, container := range delContainers {
|
||||
con := container
|
||||
f := func() error {
|
||||
return runtime.RemoveContainer(ctx, con, c.Force, c.Volumes)
|
||||
}
|
||||
|
||||
deleteFuncs = append(deleteFuncs, shared.ParallelWorkerInput{
|
||||
ContainerID: con.ID(),
|
||||
ParallelFunc: f,
|
||||
})
|
||||
}
|
||||
maxWorkers := shared.Parallelize("rm")
|
||||
if c.GlobalIsSet("max-workers") {
|
||||
maxWorkers = c.GlobalFlags.MaxWorks
|
||||
}
|
||||
logrus.Debugf("Setting maximum workers to %d", maxWorkers)
|
||||
|
||||
// Run the parallel funcs
|
||||
deleteErrors, errCount := shared.ParallelExecuteWorkerPool(maxWorkers, deleteFuncs)
|
||||
err = printParallelOutput(deleteErrors, errCount)
|
||||
if err != nil {
|
||||
for _, result := range deleteErrors {
|
||||
if result != nil && errors.Cause(result) != image.ErrNoSuchCtr {
|
||||
failureCnt++
|
||||
}
|
||||
}
|
||||
if failureCnt == 0 {
|
||||
exitCode = 1
|
||||
}
|
||||
}
|
||||
|
||||
if failureCnt > 0 {
|
||||
if len(failures) > 0 {
|
||||
exitCode = 125
|
||||
}
|
||||
|
||||
return err
|
||||
return printCmdResults(ok, failures)
|
||||
}
|
||||
|
133
cmd/podman/shared/workers.go
Normal file
133
cmd/podman/shared/workers.go
Normal file
@ -0,0 +1,133 @@
|
||||
package shared
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// JobFunc provides the function signature for the pool'ed functions
|
||||
type JobFunc func() error
|
||||
|
||||
// Job defines the function to run
|
||||
type Job struct {
|
||||
ID string
|
||||
Fn JobFunc
|
||||
}
|
||||
|
||||
// JobResult defines the results from the function ran
|
||||
type JobResult struct {
|
||||
Job Job
|
||||
Err error
|
||||
}
|
||||
|
||||
// Pool defines the worker pool and queues
|
||||
type Pool struct {
|
||||
id string
|
||||
wg *sync.WaitGroup
|
||||
jobs chan Job
|
||||
results chan JobResult
|
||||
size int
|
||||
capacity int
|
||||
}
|
||||
|
||||
// NewPool creates and initializes a new Pool
|
||||
func NewPool(id string, size int, capacity int) *Pool {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// min for int...
|
||||
s := size
|
||||
if s > capacity {
|
||||
s = capacity
|
||||
}
|
||||
|
||||
return &Pool{
|
||||
id,
|
||||
&wg,
|
||||
make(chan Job, capacity),
|
||||
make(chan JobResult, capacity),
|
||||
s,
|
||||
capacity,
|
||||
}
|
||||
}
|
||||
|
||||
// Add Job to pool for parallel processing
|
||||
func (p *Pool) Add(job Job) {
|
||||
p.wg.Add(1)
|
||||
p.jobs <- job
|
||||
}
|
||||
|
||||
// Run the Job's in the pool, gather and return results
|
||||
func (p *Pool) Run() ([]string, map[string]error, error) {
|
||||
var (
|
||||
ok = []string{}
|
||||
failures = map[string]error{}
|
||||
)
|
||||
|
||||
for w := 0; w < p.size; w++ {
|
||||
w := w
|
||||
go p.newWorker(w)
|
||||
}
|
||||
close(p.jobs)
|
||||
p.wg.Wait()
|
||||
|
||||
close(p.results)
|
||||
for r := range p.results {
|
||||
if r.Err == nil {
|
||||
ok = append(ok, r.Job.ID)
|
||||
} else {
|
||||
failures[r.Job.ID] = r.Err
|
||||
}
|
||||
}
|
||||
|
||||
if logrus.GetLevel() == logrus.DebugLevel {
|
||||
for i, f := range failures {
|
||||
logrus.Debugf("Pool[%s, %s: %s]", p.id, i, f.Error())
|
||||
}
|
||||
}
|
||||
|
||||
return ok, failures, nil
|
||||
}
|
||||
|
||||
// newWorker creates new parallel workers to monitor jobs channel from Pool
|
||||
func (p *Pool) newWorker(slot int) {
|
||||
for job := range p.jobs {
|
||||
err := job.Fn()
|
||||
p.results <- JobResult{job, err}
|
||||
if logrus.GetLevel() == logrus.DebugLevel {
|
||||
n := strings.Split(runtime.FuncForPC(reflect.ValueOf(job.Fn).Pointer()).Name(), ".")
|
||||
logrus.Debugf("Worker#%d finished job %s/%s (%v)", slot, n[2:], job.ID, err)
|
||||
}
|
||||
p.wg.Done()
|
||||
}
|
||||
}
|
||||
|
||||
// DefaultPoolSize provides the maximum number of parallel workers (int) as calculated by a basic
|
||||
// heuristic. This can be overriden by the --max-workers primary switch to podman.
|
||||
func DefaultPoolSize(name string) int {
|
||||
numCpus := runtime.NumCPU()
|
||||
switch name {
|
||||
case "kill":
|
||||
case "pause":
|
||||
case "rm":
|
||||
case "unpause":
|
||||
if numCpus <= 3 {
|
||||
return numCpus * 3
|
||||
}
|
||||
return numCpus * 4
|
||||
case "ps":
|
||||
return 8
|
||||
case "restart":
|
||||
return numCpus * 2
|
||||
case "stop":
|
||||
if numCpus <= 2 {
|
||||
return 4
|
||||
} else {
|
||||
return numCpus * 3
|
||||
}
|
||||
}
|
||||
return 3
|
||||
}
|
@ -1,9 +1,6 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/containers/libpod/cmd/podman/cliconfig"
|
||||
"github.com/containers/libpod/libpod"
|
||||
"github.com/containers/libpod/pkg/adapter"
|
||||
@ -68,21 +65,5 @@ func stopCmd(c *cliconfig.StopValues) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, id := range ok {
|
||||
fmt.Println(id)
|
||||
}
|
||||
|
||||
if len(failures) > 0 {
|
||||
keys := reflect.ValueOf(failures).MapKeys()
|
||||
lastKey := keys[len(keys)-1].String()
|
||||
lastErr := failures[lastKey]
|
||||
delete(failures, lastKey)
|
||||
|
||||
for _, err := range failures {
|
||||
outputError(err)
|
||||
}
|
||||
return lastErr
|
||||
}
|
||||
return nil
|
||||
return printCmdResults(ok, failures)
|
||||
}
|
||||
|
@ -1,20 +1,16 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/containers/libpod/cmd/podman/cliconfig"
|
||||
"github.com/containers/libpod/cmd/podman/libpodruntime"
|
||||
"github.com/containers/libpod/libpod"
|
||||
"github.com/containers/storage"
|
||||
"github.com/containers/libpod/pkg/adapter"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
umountCommand cliconfig.UmountValues
|
||||
description = `Container storage increments a mount counter each time a container is mounted.
|
||||
|
||||
description = `Container storage increments a mount counter each time a container is mounted.
|
||||
|
||||
When a container is unmounted, the mount counter is decremented. The container's root filesystem is physically unmounted only when the mount counter reaches zero indicating no other processes are using the mount.
|
||||
|
||||
@ -51,42 +47,15 @@ func init() {
|
||||
}
|
||||
|
||||
func umountCmd(c *cliconfig.UmountValues) error {
|
||||
runtime, err := libpodruntime.GetRuntime(&c.PodmanCommand)
|
||||
runtime, err := adapter.GetRuntime(&c.PodmanCommand)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get runtime")
|
||||
return errors.Wrapf(err, "error creating runtime")
|
||||
}
|
||||
defer runtime.Shutdown(false)
|
||||
|
||||
force := c.Force
|
||||
umountAll := c.All
|
||||
|
||||
containers, err := getAllOrLatestContainers(&c.PodmanCommand, runtime, -1, "all")
|
||||
ok, failures, err := runtime.UmountRootFilesystems(getContext(), c)
|
||||
if err != nil {
|
||||
if len(containers) == 0 {
|
||||
return err
|
||||
}
|
||||
fmt.Println(err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
umountContainerErrStr := "error unmounting container"
|
||||
var lastError error
|
||||
for _, ctr := range containers {
|
||||
ctrState, err := ctr.State()
|
||||
if ctrState == libpod.ContainerStateRunning || err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if err = ctr.Unmount(force); err != nil {
|
||||
if umountAll && errors.Cause(err) == storage.ErrLayerNotMounted {
|
||||
continue
|
||||
}
|
||||
if lastError != nil {
|
||||
logrus.Error(lastError)
|
||||
}
|
||||
lastError = errors.Wrapf(err, "%s %s", umountContainerErrStr, ctr.ID())
|
||||
continue
|
||||
}
|
||||
fmt.Printf("%s\n", ctr.ID())
|
||||
}
|
||||
return lastError
|
||||
return printCmdResults(ok, failures)
|
||||
}
|
||||
|
@ -2,11 +2,12 @@ package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
//printParallelOutput takes the map of parallel worker results and outputs them
|
||||
// printParallelOutput takes the map of parallel worker results and outputs them
|
||||
// to stdout
|
||||
func printParallelOutput(m map[string]error, errCount int) error {
|
||||
var lastError error
|
||||
@ -23,6 +24,26 @@ func printParallelOutput(m map[string]error, errCount int) error {
|
||||
return lastError
|
||||
}
|
||||
|
||||
// print results from CLI command
|
||||
func printCmdResults(ok []string, failures map[string]error) error {
|
||||
for _, id := range ok {
|
||||
fmt.Println(id)
|
||||
}
|
||||
|
||||
if len(failures) > 0 {
|
||||
keys := reflect.ValueOf(failures).MapKeys()
|
||||
lastKey := keys[len(keys)-1].String()
|
||||
lastErr := failures[lastKey]
|
||||
delete(failures, lastKey)
|
||||
|
||||
for _, err := range failures {
|
||||
outputError(err)
|
||||
}
|
||||
return lastErr
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// markFlagHiddenForRemoteClient makes the flag not appear as part of the CLI
|
||||
// on the remote-client
|
||||
func markFlagHiddenForRemoteClient(flagName string, flags *pflag.FlagSet) {
|
||||
@ -30,3 +51,29 @@ func markFlagHiddenForRemoteClient(flagName string, flags *pflag.FlagSet) {
|
||||
flags.MarkHidden(flagName)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: remove when adapter package takes over this functionality
|
||||
// func joinContainerOrCreateRootlessUserNS(runtime *libpod.Runtime, ctr *libpod.Container) (bool, int, error) {
|
||||
// if os.Geteuid() == 0 {
|
||||
// return false, 0, nil
|
||||
// }
|
||||
// s, err := ctr.State()
|
||||
// if err != nil {
|
||||
// return false, -1, err
|
||||
// }
|
||||
// opts := rootless.Opts{
|
||||
// Argument: ctr.ID(),
|
||||
// }
|
||||
// if s == libpod.ContainerStateRunning || s == libpod.ContainerStatePaused {
|
||||
// data, err := ioutil.ReadFile(ctr.Config().ConmonPidFile)
|
||||
// if err != nil {
|
||||
// return false, -1, errors.Wrapf(err, "cannot read conmon PID file %q", ctr.Config().ConmonPidFile)
|
||||
// }
|
||||
// conmonPid, err := strconv.Atoi(string(data))
|
||||
// if err != nil {
|
||||
// return false, -1, errors.Wrapf(err, "cannot parse PID %q", data)
|
||||
// }
|
||||
// return rootless.JoinDirectUserAndMountNSWithOpts(uint(conmonPid), &opts)
|
||||
// }
|
||||
// return rootless.BecomeRootInUserNSWithOpts(&opts)
|
||||
// }
|
||||
|
@ -1,8 +1,6 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/containers/libpod/cmd/podman/cliconfig"
|
||||
@ -62,21 +60,5 @@ func waitCmd(c *cliconfig.WaitValues) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, id := range ok {
|
||||
fmt.Println(id)
|
||||
}
|
||||
|
||||
if len(failures) > 0 {
|
||||
keys := reflect.ValueOf(failures).MapKeys()
|
||||
lastKey := keys[len(keys)-1].String()
|
||||
lastErr := failures[lastKey]
|
||||
delete(failures, lastKey)
|
||||
|
||||
for _, err := range failures {
|
||||
outputError(err)
|
||||
}
|
||||
return lastErr
|
||||
}
|
||||
return nil
|
||||
return printCmdResults(ok, failures)
|
||||
}
|
||||
|
@ -15,7 +15,7 @@ import (
|
||||
"github.com/containers/libpod/pkg/lookup"
|
||||
"github.com/containers/storage/pkg/stringid"
|
||||
"github.com/docker/docker/oci/caps"
|
||||
opentracing "github.com/opentracing/opentracing-go"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
@ -174,7 +174,7 @@ func (c *Container) StopWithTimeout(timeout uint) error {
|
||||
if c.state.State == ContainerStateConfigured ||
|
||||
c.state.State == ContainerStateUnknown ||
|
||||
c.state.State == ContainerStatePaused {
|
||||
return errors.Wrapf(ErrCtrStateInvalid, "can only stop created, running, or stopped containers")
|
||||
return errors.Wrapf(ErrCtrStateInvalid, "can only stop created, running, or stopped containers. %s in state %s", c.ID(), c.state.State.String())
|
||||
}
|
||||
|
||||
if c.state.State == ContainerStateStopped ||
|
||||
|
@ -948,7 +948,7 @@ func (c *Container) start() error {
|
||||
|
||||
// Internal, non-locking function to stop container
|
||||
func (c *Container) stop(timeout uint) error {
|
||||
logrus.Debugf("Stopping ctr %s with timeout %d", c.ID(), timeout)
|
||||
logrus.Debugf("Stopping ctr %s (timeout %d)", c.ID(), timeout)
|
||||
|
||||
if err := c.runtime.ociRuntime.stopContainer(c, timeout); err != nil {
|
||||
return err
|
||||
@ -1064,14 +1064,16 @@ func (c *Container) mountStorage() (string, error) {
|
||||
func (c *Container) cleanupStorage() error {
|
||||
if !c.state.Mounted {
|
||||
// Already unmounted, do nothing
|
||||
logrus.Debugf("Storage is already unmounted, skipping...")
|
||||
logrus.Debugf("Container %s storage is already unmounted, skipping...", c.ID())
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, mount := range c.config.Mounts {
|
||||
if err := c.unmountSHM(mount); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if c.config.Rootfs != "" {
|
||||
return nil
|
||||
}
|
||||
|
@ -30,7 +30,7 @@ import (
|
||||
spec "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/opencontainers/runtime-tools/generate"
|
||||
"github.com/opencontainers/selinux/go-selinux/label"
|
||||
opentracing "github.com/opentracing/opentracing-go"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/unix"
|
||||
@ -48,6 +48,8 @@ func (c *Container) unmountSHM(mount string) error {
|
||||
if err := unix.Unmount(mount, unix.MNT_DETACH); err != nil {
|
||||
if err != syscall.EINVAL {
|
||||
logrus.Warnf("container %s failed to unmount %s : %v", c.ID(), mount, err)
|
||||
} else {
|
||||
logrus.Debugf("container %s failed to unmount %s : %v", c.ID(), mount, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
@ -143,6 +143,7 @@ func waitContainerStop(ctr *Container, timeout time.Duration) error {
|
||||
return nil
|
||||
case <-time.After(timeout):
|
||||
close(chControl)
|
||||
logrus.Debugf("container %s did not die within timeout %d", ctr.ID(), timeout)
|
||||
return errors.Errorf("container %s did not die within timeout", ctr.ID())
|
||||
}
|
||||
}
|
||||
|
@ -18,6 +18,7 @@ import (
|
||||
"github.com/containers/libpod/cmd/podman/shared"
|
||||
"github.com/containers/libpod/libpod"
|
||||
"github.com/containers/libpod/pkg/adapter/shortcuts"
|
||||
"github.com/containers/storage"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@ -62,37 +63,115 @@ func (r *LocalRuntime) StopContainers(ctx context.Context, cli *cliconfig.StopVa
|
||||
timeout = &t
|
||||
}
|
||||
|
||||
var (
|
||||
ok = []string{}
|
||||
failures = map[string]error{}
|
||||
)
|
||||
maxWorkers := shared.DefaultPoolSize("stop")
|
||||
if cli.GlobalIsSet("max-workers") {
|
||||
maxWorkers = cli.GlobalFlags.MaxWorks
|
||||
}
|
||||
logrus.Debugf("Setting maximum stop workers to %d", maxWorkers)
|
||||
|
||||
ctrs, err := shortcuts.GetContainersByContext(cli.All, cli.Latest, cli.InputArgs, r.Runtime)
|
||||
if err != nil {
|
||||
return ok, failures, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
pool := shared.NewPool("stop", maxWorkers, len(ctrs))
|
||||
for _, c := range ctrs {
|
||||
c := c
|
||||
|
||||
if timeout == nil {
|
||||
t := c.StopTimeout()
|
||||
timeout = &t
|
||||
logrus.Debugf("Set timeout to container %s default (%d)", c.ID(), *timeout)
|
||||
}
|
||||
if err := c.StopWithTimeout(*timeout); err == nil {
|
||||
ok = append(ok, c.ID())
|
||||
} else if errors.Cause(err) == libpod.ErrCtrStopped {
|
||||
ok = append(ok, c.ID())
|
||||
logrus.Debugf("Container %s is already stopped", c.ID())
|
||||
} else {
|
||||
failures[c.ID()] = err
|
||||
}
|
||||
|
||||
pool.Add(shared.Job{
|
||||
c.ID(),
|
||||
func() error {
|
||||
err := c.StopWithTimeout(*timeout)
|
||||
if err != nil {
|
||||
if errors.Cause(err) == libpod.ErrCtrStopped {
|
||||
logrus.Debugf("Container %s is already stopped", c.ID())
|
||||
return nil
|
||||
}
|
||||
logrus.Debugf("Failed to stop container %s: %s", c.ID(), err.Error())
|
||||
}
|
||||
return err
|
||||
},
|
||||
})
|
||||
}
|
||||
return ok, failures, nil
|
||||
return pool.Run()
|
||||
}
|
||||
|
||||
// KillContainers sends signal to container(s) based on CLI inputs.
|
||||
// Returns list of successful id(s), map of failed id(s) + error, or error not from container
|
||||
func (r *LocalRuntime) KillContainers(ctx context.Context, cli *cliconfig.KillValues, signal syscall.Signal) ([]string, map[string]error, error) {
|
||||
maxWorkers := shared.DefaultPoolSize("kill")
|
||||
if cli.GlobalIsSet("max-workers") {
|
||||
maxWorkers = cli.GlobalFlags.MaxWorks
|
||||
}
|
||||
logrus.Debugf("Setting maximum kill workers to %d", maxWorkers)
|
||||
|
||||
ctrs, err := shortcuts.GetContainersByContext(cli.All, cli.Latest, cli.InputArgs, r.Runtime)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
pool := shared.NewPool("kill", maxWorkers, len(ctrs))
|
||||
for _, c := range ctrs {
|
||||
c := c
|
||||
|
||||
pool.Add(shared.Job{
|
||||
c.ID(),
|
||||
func() error {
|
||||
return c.Kill(uint(signal))
|
||||
},
|
||||
})
|
||||
}
|
||||
return pool.Run()
|
||||
}
|
||||
|
||||
// RemoveContainers removes container(s) based on CLI inputs.
|
||||
func (r *LocalRuntime) RemoveContainers(ctx context.Context, cli *cliconfig.RmValues) ([]string, map[string]error, error) {
|
||||
var (
|
||||
ok = []string{}
|
||||
failures = map[string]error{}
|
||||
)
|
||||
|
||||
maxWorkers := shared.DefaultPoolSize("rm")
|
||||
if cli.GlobalIsSet("max-workers") {
|
||||
maxWorkers = cli.GlobalFlags.MaxWorks
|
||||
}
|
||||
logrus.Debugf("Setting maximum rm workers to %d", maxWorkers)
|
||||
|
||||
ctrs, err := shortcuts.GetContainersByContext(cli.All, cli.Latest, cli.InputArgs, r.Runtime)
|
||||
if err != nil {
|
||||
// Force may be used to remove containers no longer found in the database
|
||||
if cli.Force && len(cli.InputArgs) > 0 && errors.Cause(err) == libpod.ErrNoSuchCtr {
|
||||
r.RemoveContainersFromStorage(cli.InputArgs)
|
||||
}
|
||||
return ok, failures, err
|
||||
}
|
||||
|
||||
pool := shared.NewPool("rm", maxWorkers, len(ctrs))
|
||||
for _, c := range ctrs {
|
||||
c := c
|
||||
|
||||
pool.Add(shared.Job{
|
||||
c.ID(),
|
||||
func() error {
|
||||
err := r.RemoveContainer(ctx, c, cli.Force, cli.Volumes)
|
||||
if err != nil {
|
||||
logrus.Debugf("Failed to remove container %s: %s", c.ID(), err.Error())
|
||||
}
|
||||
return err
|
||||
},
|
||||
})
|
||||
}
|
||||
return pool.Run()
|
||||
}
|
||||
|
||||
// UmountRootFilesystems removes container(s) based on CLI inputs.
|
||||
func (r *LocalRuntime) UmountRootFilesystems(ctx context.Context, cli *cliconfig.UmountValues) ([]string, map[string]error, error) {
|
||||
var (
|
||||
ok = []string{}
|
||||
failures = map[string]error{}
|
||||
@ -103,11 +182,25 @@ func (r *LocalRuntime) KillContainers(ctx context.Context, cli *cliconfig.KillVa
|
||||
return ok, failures, err
|
||||
}
|
||||
|
||||
for _, c := range ctrs {
|
||||
if err := c.Kill(uint(signal)); err == nil {
|
||||
ok = append(ok, c.ID())
|
||||
for _, ctr := range ctrs {
|
||||
state, err := ctr.State()
|
||||
if err != nil {
|
||||
logrus.Debugf("Error umounting container %s state: %s", ctr.ID(), err.Error())
|
||||
continue
|
||||
}
|
||||
if state == libpod.ContainerStateRunning {
|
||||
logrus.Debugf("Error umounting container %s, is running", ctr.ID())
|
||||
continue
|
||||
}
|
||||
|
||||
if err := ctr.Unmount(cli.Force); err != nil {
|
||||
if cli.All && errors.Cause(err) == storage.ErrLayerNotMounted {
|
||||
logrus.Debugf("Error umounting container %s, storage.ErrLayerNotMounted", ctr.ID())
|
||||
continue
|
||||
}
|
||||
failures[ctr.ID()] = errors.Wrapf(err, "error unmounting continaner %s", ctr.ID())
|
||||
} else {
|
||||
failures[c.ID()] = err
|
||||
ok = append(ok, ctr.ID())
|
||||
}
|
||||
}
|
||||
return ok, failures, nil
|
||||
|
@ -12,11 +12,12 @@ import (
|
||||
|
||||
"github.com/containers/libpod/cmd/podman/cliconfig"
|
||||
"github.com/containers/libpod/cmd/podman/shared"
|
||||
"github.com/containers/libpod/cmd/podman/varlink"
|
||||
"github.com/containers/libpod/libpod"
|
||||
"github.com/containers/libpod/pkg/inspect"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
iopodman "github.com/containers/libpod/cmd/podman/varlink"
|
||||
"github.com/containers/libpod/libpod"
|
||||
"github.com/containers/libpod/pkg/inspect"
|
||||
"github.com/varlink/go/varlink"
|
||||
)
|
||||
|
||||
@ -128,7 +129,7 @@ func (c *Container) Name() string {
|
||||
return c.config.Name
|
||||
}
|
||||
|
||||
// StopContainers stops requested containers using CLI inputs.
|
||||
// StopContainers stops requested containers using varlink.
|
||||
// Returns the list of stopped container ids, map of failed to stop container ids + errors, or any non-container error
|
||||
func (r *LocalRuntime) StopContainers(ctx context.Context, cli *cliconfig.StopValues) ([]string, map[string]error, error) {
|
||||
var (
|
||||
@ -152,7 +153,7 @@ func (r *LocalRuntime) StopContainers(ctx context.Context, cli *cliconfig.StopVa
|
||||
return ok, failures, nil
|
||||
}
|
||||
|
||||
// KillContainers sends signal to container(s) based on CLI inputs.
|
||||
// KillContainers sends signal to container(s) based on varlink.
|
||||
// Returns list of successful id(s), map of failed id(s) + error, or error not from container
|
||||
func (r *LocalRuntime) KillContainers(ctx context.Context, cli *cliconfig.KillValues, signal syscall.Signal) ([]string, map[string]error, error) {
|
||||
var (
|
||||
@ -176,6 +177,52 @@ func (r *LocalRuntime) KillContainers(ctx context.Context, cli *cliconfig.KillVa
|
||||
return ok, failures, nil
|
||||
}
|
||||
|
||||
// RemoveContainer removes container(s) based on varlink inputs.
|
||||
func (r *LocalRuntime) RemoveContainers(ctx context.Context, cli *cliconfig.RmValues) ([]string, map[string]error, error) {
|
||||
ids, err := iopodman.GetContainersByContext().Call(r.Conn, cli.All, cli.Latest, cli.InputArgs)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
var (
|
||||
ok = []string{}
|
||||
failures = map[string]error{}
|
||||
)
|
||||
|
||||
for _, id := range ids {
|
||||
_, err := iopodman.RemoveContainer().Call(r.Conn, id, cli.Force, cli.Volumes)
|
||||
if err != nil {
|
||||
failures[id] = err
|
||||
} else {
|
||||
ok = append(ok, id)
|
||||
}
|
||||
}
|
||||
return ok, failures, nil
|
||||
}
|
||||
|
||||
// UmountRootFilesystems umounts container(s) root filesystems based on varlink inputs
|
||||
func (r *LocalRuntime) UmountRootFilesystems(ctx context.Context, cli *cliconfig.UmountValues) ([]string, map[string]error, error) {
|
||||
ids, err := iopodman.GetContainersByContext().Call(r.Conn, cli.All, cli.Latest, cli.InputArgs)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
var (
|
||||
ok = []string{}
|
||||
failures = map[string]error{}
|
||||
)
|
||||
|
||||
for _, id := range ids {
|
||||
err := iopodman.UnmountContainer().Call(r.Conn, id, cli.Force)
|
||||
if err != nil {
|
||||
failures[id] = err
|
||||
} else {
|
||||
ok = append(ok, id)
|
||||
}
|
||||
}
|
||||
return ok, failures, nil
|
||||
}
|
||||
|
||||
// WaitOnContainers waits for all given container(s) to stop.
|
||||
// interval is currently ignored.
|
||||
func (r *LocalRuntime) WaitOnContainers(ctx context.Context, cli *cliconfig.WaitValues, interval time.Duration) ([]string, map[string]error, error) {
|
||||
@ -227,7 +274,7 @@ func BatchContainerOp(ctr *Container, opts shared.PsOptions) (shared.BatchContai
|
||||
|
||||
// Logs one or more containers over a varlink connection
|
||||
func (r *LocalRuntime) Log(c *cliconfig.LogsValues, options *libpod.LogOptions) error {
|
||||
//GetContainersLogs
|
||||
// GetContainersLogs
|
||||
reply, err := iopodman.GetContainersLogs().Send(r.Conn, uint64(varlink.More), c.InputArgs, c.Follow, c.Latest, options.Since.Format(time.RFC3339Nano), int64(c.Tail), c.Timestamps)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to get container logs")
|
||||
|
@ -310,6 +310,46 @@ func (r *LocalRuntime) HealthCheck(c *cliconfig.HealthCheckValues) (libpod.Healt
|
||||
return r.Runtime.HealthCheck(c.InputArgs[0])
|
||||
}
|
||||
|
||||
// JoinOrCreateRootlessPod joins the specified pod if it is running or it creates a new user namespace
|
||||
// if the pod is stopped
|
||||
// func (r *LocalRuntime) JoinOrCreateRootlessPod(pod *Pod) (bool, int, error) {
|
||||
// if os.Geteuid() == 0 {
|
||||
// return false, 0, nil
|
||||
// }
|
||||
// opts := rootless.Opts{
|
||||
// Argument: pod.ID(),
|
||||
// }
|
||||
//
|
||||
// inspect, err := pod.Inspect()
|
||||
// if err != nil {
|
||||
// return false, 0, err
|
||||
// }
|
||||
// for _, ctr := range inspect.Containers {
|
||||
// prevCtr, err := r.LookupContainer(ctr.ID)
|
||||
// if err != nil {
|
||||
// return false, -1, err
|
||||
// }
|
||||
// s, err := prevCtr.State()
|
||||
// if err != nil {
|
||||
// return false, -1, err
|
||||
// }
|
||||
// if s != libpod.ContainerStateRunning && s != libpod.ContainerStatePaused {
|
||||
// continue
|
||||
// }
|
||||
// data, err := ioutil.ReadFile(prevCtr.Config().ConmonPidFile)
|
||||
// if err != nil {
|
||||
// return false, -1, errors.Wrapf(err, "cannot read conmon PID file %q", prevCtr.Config().ConmonPidFile)
|
||||
// }
|
||||
// conmonPid, err := strconv.Atoi(string(data))
|
||||
// if err != nil {
|
||||
// return false, -1, errors.Wrapf(err, "cannot parse PID %q", data)
|
||||
// }
|
||||
// return rootless.JoinDirectUserAndMountNSWithOpts(uint(conmonPid), &opts)
|
||||
// }
|
||||
//
|
||||
// return rootless.BecomeRootInUserNSWithOpts(&opts)
|
||||
// }
|
||||
|
||||
// Events is a wrapper to libpod to obtain libpod/podman events
|
||||
func (r *LocalRuntime) Events(c *cliconfig.EventValues) error {
|
||||
var (
|
||||
@ -363,3 +403,28 @@ func (r *LocalRuntime) Events(c *cliconfig.EventValues) error {
|
||||
func (r *LocalRuntime) Diff(c *cliconfig.DiffValues, to string) ([]archive.Change, error) {
|
||||
return r.Runtime.GetDiff("", to)
|
||||
}
|
||||
|
||||
// func (r *LocalRuntime) joinContainerOrCreateRootlessUserNS(ctr *libpod.Container) (bool, int, error) {
|
||||
// if os.Geteuid() == 0 {
|
||||
// return false, 0, nil
|
||||
// }
|
||||
// s, err := ctr.State()
|
||||
// if err != nil {
|
||||
// return false, -1, err
|
||||
// }
|
||||
// opts := rootless.Opts{
|
||||
// Argument: ctr.ID(),
|
||||
// }
|
||||
// if s == libpod.ContainerStateRunning || s == libpod.ContainerStatePaused {
|
||||
// data, err := ioutil.ReadFile(ctr.Config().ConmonPidFile)
|
||||
// if err != nil {
|
||||
// return false, -1, errors.Wrapf(err, "Container %s cannot read conmon PID file %q", ctr.ID(), ctr.Config().ConmonPidFile)
|
||||
// }
|
||||
// conmonPid, err := strconv.Atoi(string(data))
|
||||
// if err != nil {
|
||||
// return false, -1, errors.Wrapf(err, "Container %s cannot parse PID %q", ctr.ID(), data)
|
||||
// }
|
||||
// return rootless.JoinDirectUserAndMountNSWithOpts(uint(conmonPid), &opts)
|
||||
// }
|
||||
// return rootless.BecomeRootInUserNSWithOpts(&opts)
|
||||
// }
|
||||
|
@ -1,6 +1,8 @@
|
||||
package shortcuts
|
||||
|
||||
import "github.com/containers/libpod/libpod"
|
||||
import (
|
||||
"github.com/containers/libpod/libpod"
|
||||
)
|
||||
|
||||
// GetPodsByContext gets pods whether all, latest, or a slice of names/ids
|
||||
func GetPodsByContext(all, latest bool, pods []string, runtime *libpod.Runtime) ([]*libpod.Pod, error) {
|
||||
@ -27,28 +29,23 @@ func GetPodsByContext(all, latest bool, pods []string, runtime *libpod.Runtime)
|
||||
}
|
||||
|
||||
// GetContainersByContext gets pods whether all, latest, or a slice of names/ids
|
||||
func GetContainersByContext(all, latest bool, names []string, runtime *libpod.Runtime) ([]*libpod.Container, error) {
|
||||
var ctrs = []*libpod.Container{}
|
||||
func GetContainersByContext(all, latest bool, names []string, runtime *libpod.Runtime) (ctrs []*libpod.Container, err error) {
|
||||
var ctr *libpod.Container
|
||||
ctrs = []*libpod.Container{}
|
||||
|
||||
if all {
|
||||
return runtime.GetAllContainers()
|
||||
}
|
||||
|
||||
if latest {
|
||||
c, err := runtime.GetLatestContainer()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ctrs = append(ctrs, c)
|
||||
return ctrs, nil
|
||||
}
|
||||
|
||||
for _, c := range names {
|
||||
ctr, err := runtime.LookupContainer(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ctrs, err = runtime.GetAllContainers()
|
||||
} else if latest {
|
||||
ctr, err = runtime.GetLatestContainer()
|
||||
ctrs = append(ctrs, ctr)
|
||||
} else {
|
||||
for _, n := range names {
|
||||
ctr, e := runtime.LookupContainer(n)
|
||||
if e != nil && err == nil {
|
||||
err = e
|
||||
}
|
||||
ctrs = append(ctrs, ctr)
|
||||
}
|
||||
}
|
||||
return ctrs, nil
|
||||
return
|
||||
}
|
||||
|
@ -3,7 +3,6 @@ package integration
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/containers/libpod/pkg/rootless"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
@ -12,6 +11,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/containers/libpod/pkg/rootless"
|
||||
"github.com/containers/storage"
|
||||
|
||||
"github.com/containers/libpod/pkg/inspect"
|
||||
@ -86,7 +86,7 @@ func TestLibpod(t *testing.T) {
|
||||
}
|
||||
|
||||
var _ = SynchronizedBeforeSuite(func() []byte {
|
||||
//Cache images
|
||||
// Cache images
|
||||
cwd, _ := os.Getwd()
|
||||
INTEGRATION_ROOT = filepath.Join(cwd, "../../")
|
||||
podman := PodmanTestCreate("/tmp")
|
||||
@ -134,18 +134,18 @@ func (p *PodmanTestIntegration) Setup() {
|
||||
p.ArtifactPath = ARTIFACT_DIR
|
||||
}
|
||||
|
||||
//var _ = BeforeSuite(func() {
|
||||
// cwd, _ := os.Getwd()
|
||||
// INTEGRATION_ROOT = filepath.Join(cwd, "../../")
|
||||
// podman := PodmanTestCreate("/tmp")
|
||||
// podman.ArtifactPath = ARTIFACT_DIR
|
||||
// if _, err := os.Stat(ARTIFACT_DIR); os.IsNotExist(err) {
|
||||
// if err = os.Mkdir(ARTIFACT_DIR, 0777); err != nil {
|
||||
// fmt.Printf("%q\n", err)
|
||||
// os.Exit(1)
|
||||
// }
|
||||
// }
|
||||
//})
|
||||
// var _ = BeforeSuite(func() {
|
||||
// cwd, _ := os.Getwd()
|
||||
// INTEGRATION_ROOT = filepath.Join(cwd, "../../")
|
||||
// podman := PodmanTestCreate("/tmp")
|
||||
// podman.ArtifactPath = ARTIFACT_DIR
|
||||
// if _, err := os.Stat(ARTIFACT_DIR); os.IsNotExist(err) {
|
||||
// if err = os.Mkdir(ARTIFACT_DIR, 0777); err != nil {
|
||||
// fmt.Printf("%q\n", err)
|
||||
// os.Exit(1)
|
||||
// }
|
||||
// }
|
||||
// })
|
||||
// for _, image := range CACHE_IMAGES {
|
||||
// if err := podman.CreateArtifact(image); err != nil {
|
||||
// fmt.Printf("%q\n", err)
|
||||
@ -172,7 +172,7 @@ func (p *PodmanTestIntegration) Setup() {
|
||||
// os.Exit(1)
|
||||
// }
|
||||
// LockTmpDir = path
|
||||
//})
|
||||
// })
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
sort.Sort(testResultsSortedLength{testResults})
|
||||
|
@ -61,9 +61,12 @@ func (p *PodmanTestIntegration) PodmanPID(args []string) (*PodmanSessionIntegrat
|
||||
func (p *PodmanTestIntegration) Cleanup() {
|
||||
// Remove all containers
|
||||
stopall := p.Podman([]string{"stop", "-a", "--timeout", "0"})
|
||||
stopall.WaitWithDefaultTimeout()
|
||||
// stopall.WaitWithDefaultTimeout()
|
||||
stopall.Wait(90)
|
||||
|
||||
session := p.Podman([]string{"rm", "-fa"})
|
||||
session.Wait(90)
|
||||
|
||||
// Nuke tempdir
|
||||
if err := os.RemoveAll(p.TempDir); err != nil {
|
||||
fmt.Printf("%q\n", err)
|
||||
@ -141,7 +144,7 @@ func (p *PodmanTestIntegration) CreatePod(name string) (*PodmanSessionIntegratio
|
||||
return session, session.ExitCode(), session.OutputToString()
|
||||
}
|
||||
|
||||
//RunTopContainer runs a simple container in the background that
|
||||
// RunTopContainer runs a simple container in the background that
|
||||
// runs top. If the name passed != "", it will have a name
|
||||
func (p *PodmanTestIntegration) RunTopContainer(name string) *PodmanSessionIntegration {
|
||||
var podmanArgs = []string{"run"}
|
||||
@ -161,7 +164,7 @@ func (p *PodmanTestIntegration) RunTopContainerInPod(name, pod string) *PodmanSe
|
||||
return p.Podman(podmanArgs)
|
||||
}
|
||||
|
||||
//RunLsContainer runs a simple container in the background that
|
||||
// RunLsContainer runs a simple container in the background that
|
||||
// simply runs ls. If the name passed != "", it will have a name
|
||||
func (p *PodmanTestIntegration) RunLsContainer(name string) (*PodmanSessionIntegration, int, string) {
|
||||
var podmanArgs = []string{"run"}
|
||||
@ -215,13 +218,19 @@ func PodmanTestCreate(tempDir string) *PodmanTestIntegration {
|
||||
return PodmanTestCreateUtil(tempDir, false)
|
||||
}
|
||||
|
||||
//MakeOptions assembles all the podman main options
|
||||
// MakeOptions assembles all the podman main options
|
||||
func (p *PodmanTestIntegration) makeOptions(args []string) []string {
|
||||
podmanOptions := strings.Split(fmt.Sprintf("--root %s --runroot %s --runtime %s --conmon %s --cni-config-dir %s --cgroup-manager %s --tmpdir %s",
|
||||
p.CrioRoot, p.RunRoot, p.OCIRuntime, p.ConmonBinary, p.CNIConfigDir, p.CgroupManager, p.TmpDir), " ")
|
||||
var debug string
|
||||
if _, ok := os.LookupEnv("DEBUG"); ok {
|
||||
debug = "--log-level=debug --syslog=true "
|
||||
}
|
||||
|
||||
podmanOptions := strings.Split(fmt.Sprintf("%s--root %s --runroot %s --runtime %s --conmon %s --cni-config-dir %s --cgroup-manager %s --tmpdir %s",
|
||||
debug, p.CrioRoot, p.RunRoot, p.OCIRuntime, p.ConmonBinary, p.CNIConfigDir, p.CgroupManager, p.TmpDir), " ")
|
||||
if os.Getenv("HOOK_OPTION") != "" {
|
||||
podmanOptions = append(podmanOptions, os.Getenv("HOOK_OPTION"))
|
||||
}
|
||||
|
||||
podmanOptions = append(podmanOptions, strings.Split(p.StorageOptions, " ")...)
|
||||
podmanOptions = append(podmanOptions, args...)
|
||||
return podmanOptions
|
||||
|
@ -3,47 +3,79 @@
|
||||
package integration
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strconv"
|
||||
"text/template"
|
||||
|
||||
. "github.com/containers/libpod/test/utils"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
type endpoint struct {
|
||||
Host string
|
||||
Port string
|
||||
}
|
||||
|
||||
func (e *endpoint) Address() string {
|
||||
return fmt.Sprintf("%s:%s", e.Host, e.Port)
|
||||
}
|
||||
|
||||
var _ = Describe("Podman search", func() {
|
||||
var (
|
||||
tempdir string
|
||||
err error
|
||||
podmanTest *PodmanTestIntegration
|
||||
)
|
||||
const regFileContents = `
|
||||
[registries.search]
|
||||
registries = ['localhost:5000']
|
||||
|
||||
[registries.insecure]
|
||||
registries = ['localhost:5000']`
|
||||
var registryEndpoints = []endpoint{
|
||||
{"localhost", "5001"},
|
||||
{"localhost", "5002"},
|
||||
{"localhost", "5003"},
|
||||
{"localhost", "5004"},
|
||||
{"localhost", "5005"},
|
||||
{"localhost", "5006"},
|
||||
{"localhost", "5007"},
|
||||
{"localhost", "5008"},
|
||||
{"localhost", "5009"},
|
||||
}
|
||||
|
||||
const regFileContents = `
|
||||
[registries.search]
|
||||
registries = ['{{.Host}}:{{.Port}}']
|
||||
|
||||
[registries.insecure]
|
||||
registries = ['{{.Host}}:{{.Port}}']`
|
||||
registryFileTmpl := template.Must(template.New("registryFile").Parse(regFileContents))
|
||||
|
||||
const badRegFileContents = `
|
||||
[registries.search]
|
||||
registries = ['localhost:5000']
|
||||
# empty
|
||||
[registries.insecure]
|
||||
registries = []`
|
||||
[registries.search]
|
||||
registries = ['{{.Host}}:{{.Port}}']
|
||||
# empty
|
||||
[registries.insecure]
|
||||
registries = []`
|
||||
registryFileBadTmpl := template.Must(template.New("registryFileBad").Parse(badRegFileContents))
|
||||
|
||||
const regFileContents2 = `
|
||||
[registries.search]
|
||||
registries = ['localhost:5000', 'localhost:6000']
|
||||
[registries.search]
|
||||
registries = ['{{.Host}}:{{.Port}}', '{{.Host}}:6000']
|
||||
|
||||
[registries.insecure]
|
||||
registries = ['{{.Host}}:{{.Port}}']`
|
||||
registryFileTwoTmpl := template.Must(template.New("registryFileTwo").Parse(regFileContents2))
|
||||
|
||||
[registries.insecure]
|
||||
registries = ['localhost:5000']`
|
||||
BeforeEach(func() {
|
||||
tempdir, err = CreateTempDirInTempDir()
|
||||
if err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
podmanTest = PodmanTestCreate(tempdir)
|
||||
podmanTest.Setup()
|
||||
|
||||
podmanTest.RestoreAllArtifacts()
|
||||
})
|
||||
|
||||
@ -51,7 +83,6 @@ var _ = Describe("Podman search", func() {
|
||||
podmanTest.Cleanup()
|
||||
f := CurrentGinkgoTestDescription()
|
||||
processTestResult(f)
|
||||
|
||||
})
|
||||
|
||||
It("podman search", func() {
|
||||
@ -134,11 +165,13 @@ var _ = Describe("Podman search", func() {
|
||||
if podmanTest.Host.Arch == "ppc64le" {
|
||||
Skip("No registry image for ppc64le")
|
||||
}
|
||||
lock := GetPortLock("5000")
|
||||
lock := GetPortLock(registryEndpoints[0].Port)
|
||||
defer lock.Unlock()
|
||||
|
||||
podmanTest.RestoreArtifact(registry)
|
||||
fakereg := podmanTest.Podman([]string{"run", "-d", "--name", "registry", "-p", "5000:5000", registry, "/entrypoint.sh", "/etc/docker/registry/config.yml"})
|
||||
fakereg := podmanTest.Podman([]string{"run", "-d", "--name", "registry",
|
||||
"-p", fmt.Sprintf("%s:5000", registryEndpoints[0].Port),
|
||||
registry, "/entrypoint.sh", "/etc/docker/registry/config.yml"})
|
||||
fakereg.WaitWithDefaultTimeout()
|
||||
Expect(fakereg.ExitCode()).To(Equal(0))
|
||||
|
||||
@ -146,7 +179,8 @@ var _ = Describe("Podman search", func() {
|
||||
Skip("Can not start docker registry.")
|
||||
}
|
||||
|
||||
search := podmanTest.Podman([]string{"search", "localhost:5000/fake/image:andtag", "--tls-verify=false"})
|
||||
search := podmanTest.Podman([]string{"search",
|
||||
fmt.Sprintf("%s/fake/image:andtag", registryEndpoints[0].Address()), "--tls-verify=false"})
|
||||
search.WaitWithDefaultTimeout()
|
||||
|
||||
// if this test succeeded, there will be no output (there is no entry named fake/image:andtag in an empty registry)
|
||||
@ -160,10 +194,12 @@ var _ = Describe("Podman search", func() {
|
||||
if podmanTest.Host.Arch == "ppc64le" {
|
||||
Skip("No registry image for ppc64le")
|
||||
}
|
||||
lock := GetPortLock("5000")
|
||||
lock := GetPortLock(registryEndpoints[3].Port)
|
||||
defer lock.Unlock()
|
||||
podmanTest.RestoreArtifact(registry)
|
||||
registry := podmanTest.Podman([]string{"run", "-d", "--name", "registry3", "-p", "5000:5000", registry, "/entrypoint.sh", "/etc/docker/registry/config.yml"})
|
||||
registry := podmanTest.Podman([]string{"run", "-d", "--name", "registry3",
|
||||
"-p", fmt.Sprintf("%s:5000", registryEndpoints[3].Port), registry,
|
||||
"/entrypoint.sh", "/etc/docker/registry/config.yml"})
|
||||
registry.WaitWithDefaultTimeout()
|
||||
Expect(registry.ExitCode()).To(Equal(0))
|
||||
|
||||
@ -171,10 +207,11 @@ var _ = Describe("Podman search", func() {
|
||||
Skip("Can not start docker registry.")
|
||||
}
|
||||
|
||||
push := podmanTest.Podman([]string{"push", "--tls-verify=false", "--remove-signatures", ALPINE, "localhost:5000/my-alpine"})
|
||||
image := fmt.Sprintf("%s/my-alpine", registryEndpoints[3].Address())
|
||||
push := podmanTest.Podman([]string{"push", "--tls-verify=false", "--remove-signatures", ALPINE, image})
|
||||
push.WaitWithDefaultTimeout()
|
||||
Expect(push.ExitCode()).To(Equal(0))
|
||||
search := podmanTest.Podman([]string{"search", "localhost:5000/my-alpine", "--tls-verify=false"})
|
||||
search := podmanTest.Podman([]string{"search", image, "--tls-verify=false"})
|
||||
search.WaitWithDefaultTimeout()
|
||||
|
||||
Expect(search.ExitCode()).To(Equal(0))
|
||||
@ -185,10 +222,12 @@ var _ = Describe("Podman search", func() {
|
||||
if podmanTest.Host.Arch == "ppc64le" {
|
||||
Skip("No registry image for ppc64le")
|
||||
}
|
||||
lock := GetPortLock("5000")
|
||||
|
||||
lock := GetPortLock(registryEndpoints[4].Port)
|
||||
defer lock.Unlock()
|
||||
podmanTest.RestoreArtifact(registry)
|
||||
registry := podmanTest.Podman([]string{"run", "-d", "--name", "registry4", "-p", "5000:5000", registry, "/entrypoint.sh", "/etc/docker/registry/config.yml"})
|
||||
registry := podmanTest.Podman([]string{"run", "-d", "-p", fmt.Sprintf("%s:5000", registryEndpoints[4].Port),
|
||||
"--name", "registry4", registry, "/entrypoint.sh", "/etc/docker/registry/config.yml"})
|
||||
registry.WaitWithDefaultTimeout()
|
||||
Expect(registry.ExitCode()).To(Equal(0))
|
||||
|
||||
@ -196,14 +235,18 @@ var _ = Describe("Podman search", func() {
|
||||
Skip("Can not start docker registry.")
|
||||
}
|
||||
|
||||
push := podmanTest.Podman([]string{"push", "--tls-verify=false", "--remove-signatures", ALPINE, "localhost:5000/my-alpine"})
|
||||
image := fmt.Sprintf("%s/my-alpine", registryEndpoints[4].Address())
|
||||
push := podmanTest.Podman([]string{"push", "--tls-verify=false", "--remove-signatures", ALPINE, image})
|
||||
push.WaitWithDefaultTimeout()
|
||||
Expect(push.ExitCode()).To(Equal(0))
|
||||
|
||||
// registries.conf set up
|
||||
podmanTest.setRegistriesConfigEnv([]byte(regFileContents))
|
||||
var buffer bytes.Buffer
|
||||
registryFileTmpl.Execute(&buffer, registryEndpoints[4])
|
||||
podmanTest.setRegistriesConfigEnv(buffer.Bytes())
|
||||
ioutil.WriteFile(fmt.Sprintf("%s/registry4.conf", tempdir), buffer.Bytes(), 0644)
|
||||
|
||||
search := podmanTest.Podman([]string{"search", "localhost:5000/my-alpine"})
|
||||
search := podmanTest.Podman([]string{"search", image})
|
||||
search.WaitWithDefaultTimeout()
|
||||
|
||||
Expect(search.ExitCode()).To(Equal(0))
|
||||
@ -219,24 +262,29 @@ var _ = Describe("Podman search", func() {
|
||||
if podmanTest.Host.Arch == "ppc64le" {
|
||||
Skip("No registry image for ppc64le")
|
||||
}
|
||||
lock := GetPortLock("5000")
|
||||
lock := GetPortLock(registryEndpoints[5].Port)
|
||||
defer lock.Unlock()
|
||||
podmanTest.RestoreArtifact(registry)
|
||||
registry := podmanTest.Podman([]string{"run", "-d", "-p", "5000:5000", "--name", "registry5", registry})
|
||||
registry := podmanTest.Podman([]string{"run", "-d", "-p", fmt.Sprintf("%s:5000", registryEndpoints[5].Port),
|
||||
"--name", "registry5", registry})
|
||||
registry.WaitWithDefaultTimeout()
|
||||
Expect(registry.ExitCode()).To(Equal(0))
|
||||
|
||||
if !WaitContainerReady(podmanTest, "registry5", "listening on", 20, 1) {
|
||||
Skip("Can not start docker registry.")
|
||||
}
|
||||
push := podmanTest.Podman([]string{"push", "--tls-verify=false", "--remove-signatures", ALPINE, "localhost:5000/my-alpine"})
|
||||
|
||||
image := fmt.Sprintf("%s/my-alpine", registryEndpoints[5].Address())
|
||||
push := podmanTest.Podman([]string{"push", "--tls-verify=false", "--remove-signatures", ALPINE, image})
|
||||
push.WaitWithDefaultTimeout()
|
||||
Expect(push.ExitCode()).To(Equal(0))
|
||||
|
||||
// registries.conf set up
|
||||
podmanTest.setRegistriesConfigEnv([]byte(regFileContents))
|
||||
var buffer bytes.Buffer
|
||||
registryFileTmpl.Execute(&buffer, registryEndpoints[5])
|
||||
podmanTest.setRegistriesConfigEnv(buffer.Bytes())
|
||||
ioutil.WriteFile(fmt.Sprintf("%s/registry5.conf", tempdir), buffer.Bytes(), 0644)
|
||||
|
||||
search := podmanTest.Podman([]string{"search", "localhost:5000/my-alpine", "--tls-verify=true"})
|
||||
search := podmanTest.Podman([]string{"search", image, "--tls-verify=true"})
|
||||
search.WaitWithDefaultTimeout()
|
||||
|
||||
Expect(search.ExitCode()).To(Equal(0))
|
||||
@ -252,24 +300,29 @@ var _ = Describe("Podman search", func() {
|
||||
if podmanTest.Host.Arch == "ppc64le" {
|
||||
Skip("No registry image for ppc64le")
|
||||
}
|
||||
lock := GetPortLock("5000")
|
||||
lock := GetPortLock(registryEndpoints[6].Port)
|
||||
defer lock.Unlock()
|
||||
podmanTest.RestoreArtifact(registry)
|
||||
registry := podmanTest.Podman([]string{"run", "-d", "-p", "5000:5000", "--name", "registry6", registry})
|
||||
registry := podmanTest.Podman([]string{"run", "-d", "-p", fmt.Sprintf("%s:5000", registryEndpoints[6].Port),
|
||||
"--name", "registry6", registry})
|
||||
registry.WaitWithDefaultTimeout()
|
||||
Expect(registry.ExitCode()).To(Equal(0))
|
||||
|
||||
if !WaitContainerReady(podmanTest, "registry6", "listening on", 20, 1) {
|
||||
Skip("Can not start docker registry.")
|
||||
}
|
||||
push := podmanTest.Podman([]string{"push", "--tls-verify=false", "--remove-signatures", ALPINE, "localhost:5000/my-alpine"})
|
||||
|
||||
image := fmt.Sprintf("%s/my-alpine", registryEndpoints[6].Address())
|
||||
push := podmanTest.Podman([]string{"push", "--tls-verify=false", "--remove-signatures", ALPINE, image})
|
||||
push.WaitWithDefaultTimeout()
|
||||
Expect(push.ExitCode()).To(Equal(0))
|
||||
|
||||
// registries.conf set up
|
||||
podmanTest.setRegistriesConfigEnv([]byte(badRegFileContents))
|
||||
var buffer bytes.Buffer
|
||||
registryFileBadTmpl.Execute(&buffer, registryEndpoints[6])
|
||||
podmanTest.setRegistriesConfigEnv(buffer.Bytes())
|
||||
ioutil.WriteFile(fmt.Sprintf("%s/registry6.conf", tempdir), buffer.Bytes(), 0644)
|
||||
|
||||
search := podmanTest.Podman([]string{"search", "localhost:5000/my-alpine"})
|
||||
search := podmanTest.Podman([]string{"search", image})
|
||||
search.WaitWithDefaultTimeout()
|
||||
|
||||
Expect(search.ExitCode()).To(Equal(0))
|
||||
@ -285,10 +338,14 @@ var _ = Describe("Podman search", func() {
|
||||
if podmanTest.Host.Arch == "ppc64le" {
|
||||
Skip("No registry image for ppc64le")
|
||||
}
|
||||
lock := GetPortLock("5000")
|
||||
defer lock.Unlock()
|
||||
lock7 := GetPortLock(registryEndpoints[7].Port)
|
||||
defer lock7.Unlock()
|
||||
lock8 := GetPortLock("6000")
|
||||
defer lock8.Unlock()
|
||||
|
||||
podmanTest.RestoreArtifact(registry)
|
||||
registryLocal := podmanTest.Podman([]string{"run", "-d", "-p", "5000:5000", "--name", "registry7", registry})
|
||||
registryLocal := podmanTest.Podman([]string{"run", "-d", "-p", fmt.Sprintf("%s:5000", registryEndpoints[7].Port),
|
||||
"--name", "registry7", registry})
|
||||
registryLocal.WaitWithDefaultTimeout()
|
||||
Expect(registryLocal.ExitCode()).To(Equal(0))
|
||||
|
||||
@ -303,12 +360,16 @@ var _ = Describe("Podman search", func() {
|
||||
if !WaitContainerReady(podmanTest, "registry8", "listening on", 20, 1) {
|
||||
Skip("Can not start docker registry.")
|
||||
}
|
||||
|
||||
push := podmanTest.Podman([]string{"push", "--tls-verify=false", "--remove-signatures", ALPINE, "localhost:6000/my-alpine"})
|
||||
push.WaitWithDefaultTimeout()
|
||||
Expect(push.ExitCode()).To(Equal(0))
|
||||
|
||||
// registries.conf set up
|
||||
podmanTest.setRegistriesConfigEnv([]byte(regFileContents2))
|
||||
var buffer bytes.Buffer
|
||||
registryFileTwoTmpl.Execute(&buffer, registryEndpoints[8])
|
||||
podmanTest.setRegistriesConfigEnv(buffer.Bytes())
|
||||
ioutil.WriteFile(fmt.Sprintf("%s/registry8.conf", tempdir), buffer.Bytes(), 0644)
|
||||
|
||||
search := podmanTest.Podman([]string{"search", "my-alpine"})
|
||||
search.WaitWithDefaultTimeout()
|
||||
|
@ -311,6 +311,8 @@ func (s *PodmanSession) IsJSONOutputValid() bool {
|
||||
// WaitWithDefaultTimeout waits for process finished with defaultWaitTimeout
|
||||
func (s *PodmanSession) WaitWithDefaultTimeout() {
|
||||
s.Wait(defaultWaitTimeout)
|
||||
os.Stdout.Sync()
|
||||
os.Stderr.Sync()
|
||||
fmt.Println("output:", s.OutputToString())
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user