mirror of
https://github.com/containers/podman.git
synced 2025-05-21 17:16:22 +08:00
Merge pull request #3031 from baude/remotewindows
enable podman-remote on windows
This commit is contained in:
@ -135,6 +135,7 @@ gating_task:
|
||||
- '/usr/local/bin/entrypoint.sh clean podman-remote'
|
||||
- '/usr/local/bin/entrypoint.sh clean podman BUILDTAGS="exclude_graphdriver_devicemapper selinux seccomp"'
|
||||
- '/usr/local/bin/entrypoint.sh podman-remote-darwin'
|
||||
- '/usr/local/bin/entrypoint.sh podman-remote-windows'
|
||||
|
||||
on_failure:
|
||||
failed_master_script: '$CIRRUS_WORKING_DIR/$SCRIPT_BASE/notice_master_failure.sh'
|
||||
|
3
Makefile
3
Makefile
@ -120,6 +120,9 @@ podman-remote: .gopathok $(PODMAN_VARLINK_DEPENDENCIES) ## Build with podman on
|
||||
podman-remote-darwin: .gopathok $(PODMAN_VARLINK_DEPENDENCIES) ## Build with podman on remote OSX environment
|
||||
GOOS=darwin $(GO) build -ldflags '$(LDFLAGS_PODMAN)' -tags "remoteclient containers_image_openpgp exclude_graphdriver_devicemapper" -o bin/$@ $(PROJECT)/cmd/podman
|
||||
|
||||
podman-remote-windows: .gopathok $(PODMAN_VARLINK_DEPENDENCIES) ## Build with podman for a remote windows environment
|
||||
GOOS=windows $(GO) build -ldflags '$(LDFLAGS_PODMAN)' -tags "remoteclient containers_image_openpgp exclude_graphdriver_devicemapper" -o bin/$@.exe $(PROJECT)/cmd/podman
|
||||
|
||||
local-cross: $(CROSS_BUILD_TARGETS) ## Cross local compilation
|
||||
|
||||
bin/podman.cross.%: .gopathok
|
||||
|
@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"github.com/containers/libpod/cmd/podman/cliconfig"
|
||||
"github.com/containers/libpod/libpod"
|
||||
@ -13,7 +12,6 @@ import (
|
||||
"github.com/containers/libpod/version"
|
||||
"github.com/containers/storage/pkg/reexec"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@ -118,25 +116,13 @@ func before(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
logrus.SetLevel(level)
|
||||
|
||||
rlimits := new(syscall.Rlimit)
|
||||
rlimits.Cur = 1048576
|
||||
rlimits.Max = 1048576
|
||||
if err := syscall.Setrlimit(syscall.RLIMIT_NOFILE, rlimits); err != nil {
|
||||
if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, rlimits); err != nil {
|
||||
return errors.Wrapf(err, "error getting rlimits")
|
||||
if err := setRLimits(); err != nil {
|
||||
return err
|
||||
}
|
||||
rlimits.Cur = rlimits.Max
|
||||
if err := syscall.Setrlimit(syscall.RLIMIT_NOFILE, rlimits); err != nil {
|
||||
return errors.Wrapf(err, "error setting new rlimits")
|
||||
}
|
||||
}
|
||||
|
||||
if rootless.IsRootless() {
|
||||
logrus.Info("running as rootless")
|
||||
}
|
||||
|
||||
// Be sure we can create directories with 0755 mode.
|
||||
syscall.Umask(0022)
|
||||
setUMask()
|
||||
return profileOn(cmd)
|
||||
}
|
||||
|
||||
|
@ -4,16 +4,17 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/containers/libpod/cmd/podman/cliconfig"
|
||||
"github.com/containers/libpod/cmd/podman/libpodruntime"
|
||||
"github.com/containers/libpod/pkg/rootless"
|
||||
"io/ioutil"
|
||||
"log/syslog"
|
||||
"os"
|
||||
"runtime/pprof"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/containers/libpod/cmd/podman/cliconfig"
|
||||
"github.com/containers/libpod/cmd/podman/libpodruntime"
|
||||
"github.com/containers/libpod/pkg/rootless"
|
||||
"github.com/containers/libpod/pkg/tracing"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/pkg/errors"
|
||||
@ -154,3 +155,23 @@ func setupRootless(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func setRLimits() error {
|
||||
rlimits := new(syscall.Rlimit)
|
||||
rlimits.Cur = 1048576
|
||||
rlimits.Max = 1048576
|
||||
if err := syscall.Setrlimit(syscall.RLIMIT_NOFILE, rlimits); err != nil {
|
||||
if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, rlimits); err != nil {
|
||||
return errors.Wrapf(err, "error getting rlimits")
|
||||
}
|
||||
rlimits.Cur = rlimits.Max
|
||||
if err := syscall.Setrlimit(syscall.RLIMIT_NOFILE, rlimits); err != nil {
|
||||
return errors.Wrapf(err, "error setting new rlimits")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func setUMask() {
|
||||
// Be sure we can create directories with 0755 mode.
|
||||
syscall.Umask(0022)
|
||||
}
|
||||
|
@ -41,3 +41,9 @@ func setupRootless(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func setRLimits() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func setUMask() {}
|
||||
|
@ -3,16 +3,13 @@ package libpod
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/containers/libpod/pkg/inspect"
|
||||
"github.com/coreos/go-systemd/dbus"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@ -241,62 +238,6 @@ func (c *Container) GetHealthCheckLog() (inspect.HealthCheckResults, error) {
|
||||
return healthCheck, nil
|
||||
}
|
||||
|
||||
// createTimer systemd timers for healthchecks of a container
|
||||
func (c *Container) createTimer() error {
|
||||
if c.disableHealthCheckSystemd() {
|
||||
return nil
|
||||
}
|
||||
podman, err := os.Executable()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to get path for podman for a health check timer")
|
||||
}
|
||||
|
||||
var cmd = []string{"--unit", fmt.Sprintf("%s", c.ID()), fmt.Sprintf("--on-unit-inactive=%s", c.HealthCheckConfig().Interval.String()), "--timer-property=AccuracySec=1s", podman, "healthcheck", "run", c.ID()}
|
||||
|
||||
conn, err := dbus.NewSystemdConnection()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to get systemd connection to add healthchecks")
|
||||
}
|
||||
conn.Close()
|
||||
logrus.Debugf("creating systemd-transient files: %s %s", "systemd-run", cmd)
|
||||
systemdRun := exec.Command("systemd-run", cmd...)
|
||||
_, err = systemdRun.CombinedOutput()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// startTimer starts a systemd timer for the healthchecks
|
||||
func (c *Container) startTimer() error {
|
||||
if c.disableHealthCheckSystemd() {
|
||||
return nil
|
||||
}
|
||||
conn, err := dbus.NewSystemdConnection()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to get systemd connection to start healthchecks")
|
||||
}
|
||||
defer conn.Close()
|
||||
_, err = conn.StartUnit(fmt.Sprintf("%s.service", c.ID()), "fail", nil)
|
||||
return err
|
||||
}
|
||||
|
||||
// removeTimer removes the systemd timer and unit files
|
||||
// for the container
|
||||
func (c *Container) removeTimer() error {
|
||||
if c.disableHealthCheckSystemd() {
|
||||
return nil
|
||||
}
|
||||
conn, err := dbus.NewSystemdConnection()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to get systemd connection to remove healthchecks")
|
||||
}
|
||||
defer conn.Close()
|
||||
serviceFile := fmt.Sprintf("%s.timer", c.ID())
|
||||
_, err = conn.StopUnit(serviceFile, "fail", nil)
|
||||
return err
|
||||
}
|
||||
|
||||
// HealthCheckStatus returns the current state of a container with a healthcheck
|
||||
func (c *Container) HealthCheckStatus() (string, error) {
|
||||
if !c.HasHealthCheck() {
|
||||
|
67
libpod/healthcheck_linux.go
Normal file
67
libpod/healthcheck_linux.go
Normal file
@ -0,0 +1,67 @@
|
||||
package libpod
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
|
||||
"github.com/coreos/go-systemd/dbus"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// createTimer systemd timers for healthchecks of a container
|
||||
func (c *Container) createTimer() error {
|
||||
if c.disableHealthCheckSystemd() {
|
||||
return nil
|
||||
}
|
||||
podman, err := os.Executable()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to get path for podman for a health check timer")
|
||||
}
|
||||
|
||||
var cmd = []string{"--unit", fmt.Sprintf("%s", c.ID()), fmt.Sprintf("--on-unit-inactive=%s", c.HealthCheckConfig().Interval.String()), "--timer-property=AccuracySec=1s", podman, "healthcheck", "run", c.ID()}
|
||||
|
||||
conn, err := dbus.NewSystemdConnection()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to get systemd connection to add healthchecks")
|
||||
}
|
||||
conn.Close()
|
||||
logrus.Debugf("creating systemd-transient files: %s %s", "systemd-run", cmd)
|
||||
systemdRun := exec.Command("systemd-run", cmd...)
|
||||
_, err = systemdRun.CombinedOutput()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// startTimer starts a systemd timer for the healthchecks
|
||||
func (c *Container) startTimer() error {
|
||||
if c.disableHealthCheckSystemd() {
|
||||
return nil
|
||||
}
|
||||
conn, err := dbus.NewSystemdConnection()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to get systemd connection to start healthchecks")
|
||||
}
|
||||
defer conn.Close()
|
||||
_, err = conn.StartUnit(fmt.Sprintf("%s.service", c.ID()), "fail", nil)
|
||||
return err
|
||||
}
|
||||
|
||||
// removeTimer removes the systemd timer and unit files
|
||||
// for the container
|
||||
func (c *Container) removeTimer() error {
|
||||
if c.disableHealthCheckSystemd() {
|
||||
return nil
|
||||
}
|
||||
conn, err := dbus.NewSystemdConnection()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to get systemd connection to remove healthchecks")
|
||||
}
|
||||
defer conn.Close()
|
||||
serviceFile := fmt.Sprintf("%s.timer", c.ID())
|
||||
_, err = conn.StopUnit(serviceFile, "fail", nil)
|
||||
return err
|
||||
}
|
19
libpod/healthcheck_unsupported.go
Normal file
19
libpod/healthcheck_unsupported.go
Normal file
@ -0,0 +1,19 @@
|
||||
// +build !linux
|
||||
|
||||
package libpod
|
||||
|
||||
// createTimer systemd timers for healthchecks of a container
|
||||
func (c *Container) createTimer() error {
|
||||
return ErrNotImplemented
|
||||
}
|
||||
|
||||
// startTimer starts a systemd timer for the healthchecks
|
||||
func (c *Container) startTimer() error {
|
||||
return ErrNotImplemented
|
||||
}
|
||||
|
||||
// removeTimer removes the systemd timer and unit files
|
||||
// for the container
|
||||
func (c *Container) removeTimer() error {
|
||||
return ErrNotImplemented
|
||||
}
|
447
libpod/oci.go
447
libpod/oci.go
@ -1,7 +1,6 @@
|
||||
package libpod
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
@ -9,21 +8,15 @@ import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/containers/libpod/pkg/rootless"
|
||||
"github.com/containers/libpod/pkg/util"
|
||||
"github.com/coreos/go-systemd/activation"
|
||||
"github.com/cri-o/ocicni/pkg/ocicni"
|
||||
spec "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/opencontainers/selinux/go-selinux"
|
||||
"github.com/opencontainers/selinux/go-selinux/label"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/unix"
|
||||
kwait "k8s.io/apimachinery/pkg/util/wait"
|
||||
|
||||
// TODO import these functions into libpod and remove the import
|
||||
@ -118,70 +111,6 @@ func createUnitName(prefix string, name string) string {
|
||||
return fmt.Sprintf("%s-%s.scope", prefix, name)
|
||||
}
|
||||
|
||||
// Wait for a container which has been sent a signal to stop
|
||||
func waitContainerStop(ctr *Container, timeout time.Duration) error {
|
||||
done := make(chan struct{})
|
||||
chControl := make(chan struct{})
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-chControl:
|
||||
return
|
||||
default:
|
||||
// Check if the process is still around
|
||||
err := unix.Kill(ctr.state.PID, 0)
|
||||
if err == unix.ESRCH {
|
||||
close(done)
|
||||
return
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
}()
|
||||
select {
|
||||
case <-done:
|
||||
return nil
|
||||
case <-time.After(timeout):
|
||||
close(chControl)
|
||||
logrus.Debugf("container %s did not die within timeout %d", ctr.ID(), timeout)
|
||||
return errors.Errorf("container %s did not die within timeout", ctr.ID())
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for a set of given PIDs to stop
|
||||
func waitPidsStop(pids []int, timeout time.Duration) error {
|
||||
done := make(chan struct{})
|
||||
chControl := make(chan struct{})
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-chControl:
|
||||
return
|
||||
default:
|
||||
allClosed := true
|
||||
for _, pid := range pids {
|
||||
if err := unix.Kill(pid, 0); err != unix.ESRCH {
|
||||
allClosed = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if allClosed {
|
||||
close(done)
|
||||
return
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
}()
|
||||
select {
|
||||
case <-done:
|
||||
return nil
|
||||
case <-time.After(timeout):
|
||||
close(chControl)
|
||||
return errors.Errorf("given PIDs did not die within timeout")
|
||||
}
|
||||
}
|
||||
|
||||
func bindPorts(ports []ocicni.PortMapping) ([]*os.File, error) {
|
||||
var files []*os.File
|
||||
notifySCTP := false
|
||||
@ -234,241 +163,6 @@ func bindPorts(ports []ocicni.PortMapping) ([]*os.File, error) {
|
||||
return files, nil
|
||||
}
|
||||
|
||||
func (r *OCIRuntime) createOCIContainer(ctr *Container, cgroupParent string, restoreOptions *ContainerCheckpointOptions) (err error) {
|
||||
var stderrBuf bytes.Buffer
|
||||
|
||||
runtimeDir, err := util.GetRootlessRuntimeDir()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
parentPipe, childPipe, err := newPipe()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error creating socket pair")
|
||||
}
|
||||
|
||||
childStartPipe, parentStartPipe, err := newPipe()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error creating socket pair for start pipe")
|
||||
}
|
||||
|
||||
defer parentPipe.Close()
|
||||
defer parentStartPipe.Close()
|
||||
|
||||
args := []string{}
|
||||
if r.cgroupManager == SystemdCgroupsManager {
|
||||
args = append(args, "-s")
|
||||
}
|
||||
args = append(args, "-c", ctr.ID())
|
||||
args = append(args, "-u", ctr.ID())
|
||||
args = append(args, "-r", r.path)
|
||||
args = append(args, "-b", ctr.bundlePath())
|
||||
args = append(args, "-p", filepath.Join(ctr.state.RunDir, "pidfile"))
|
||||
args = append(args, "-l", ctr.LogPath())
|
||||
args = append(args, "--exit-dir", r.exitsDir)
|
||||
if ctr.config.ConmonPidFile != "" {
|
||||
args = append(args, "--conmon-pidfile", ctr.config.ConmonPidFile)
|
||||
}
|
||||
if len(ctr.config.ExitCommand) > 0 {
|
||||
args = append(args, "--exit-command", ctr.config.ExitCommand[0])
|
||||
for _, arg := range ctr.config.ExitCommand[1:] {
|
||||
args = append(args, []string{"--exit-command-arg", arg}...)
|
||||
}
|
||||
}
|
||||
args = append(args, "--socket-dir-path", r.socketsDir)
|
||||
if ctr.config.Spec.Process.Terminal {
|
||||
args = append(args, "-t")
|
||||
} else if ctr.config.Stdin {
|
||||
args = append(args, "-i")
|
||||
}
|
||||
if r.logSizeMax >= 0 {
|
||||
args = append(args, "--log-size-max", fmt.Sprintf("%v", r.logSizeMax))
|
||||
}
|
||||
if r.noPivot {
|
||||
args = append(args, "--no-pivot")
|
||||
}
|
||||
|
||||
logLevel := logrus.GetLevel()
|
||||
args = append(args, "--log-level", logLevel.String())
|
||||
|
||||
if logLevel == logrus.DebugLevel {
|
||||
logrus.Debugf("%s messages will be logged to syslog", r.conmonPath)
|
||||
args = append(args, "--syslog")
|
||||
}
|
||||
|
||||
if restoreOptions != nil {
|
||||
args = append(args, "--restore", ctr.CheckpointPath())
|
||||
if restoreOptions.TCPEstablished {
|
||||
args = append(args, "--restore-arg", "--tcp-established")
|
||||
}
|
||||
}
|
||||
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"args": args,
|
||||
}).Debugf("running conmon: %s", r.conmonPath)
|
||||
|
||||
cmd := exec.Command(r.conmonPath, args...)
|
||||
cmd.Dir = ctr.bundlePath()
|
||||
cmd.SysProcAttr = &syscall.SysProcAttr{
|
||||
Setpgid: true,
|
||||
}
|
||||
// TODO this is probably a really bad idea for some uses
|
||||
// Make this configurable
|
||||
cmd.Stdin = os.Stdin
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
if ctr.config.Spec.Process.Terminal {
|
||||
cmd.Stderr = &stderrBuf
|
||||
}
|
||||
|
||||
cmd.ExtraFiles = append(cmd.ExtraFiles, childPipe, childStartPipe)
|
||||
// 0, 1 and 2 are stdin, stdout and stderr
|
||||
cmd.Env = append(r.conmonEnv, fmt.Sprintf("_OCI_SYNCPIPE=%d", 3))
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("_OCI_STARTPIPE=%d", 4))
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir))
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("_CONTAINERS_USERNS_CONFIGURED=%s", os.Getenv("_CONTAINERS_USERNS_CONFIGURED")))
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("_CONTAINERS_ROOTLESS_UID=%s", os.Getenv("_CONTAINERS_ROOTLESS_UID")))
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("HOME=%s", os.Getenv("HOME")))
|
||||
|
||||
if r.reservePorts && !ctr.config.NetMode.IsSlirp4netns() {
|
||||
ports, err := bindPorts(ctr.config.PortMappings)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Leak the port we bound in the conmon process. These fd's won't be used
|
||||
// by the container and conmon will keep the ports busy so that another
|
||||
// process cannot use them.
|
||||
cmd.ExtraFiles = append(cmd.ExtraFiles, ports...)
|
||||
}
|
||||
|
||||
if ctr.config.NetMode.IsSlirp4netns() {
|
||||
ctr.rootlessSlirpSyncR, ctr.rootlessSlirpSyncW, err = os.Pipe()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to create rootless network sync pipe")
|
||||
}
|
||||
// Leak one end in conmon, the other one will be leaked into slirp4netns
|
||||
cmd.ExtraFiles = append(cmd.ExtraFiles, ctr.rootlessSlirpSyncW)
|
||||
}
|
||||
|
||||
if notify, ok := os.LookupEnv("NOTIFY_SOCKET"); ok {
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("NOTIFY_SOCKET=%s", notify))
|
||||
}
|
||||
if listenfds, ok := os.LookupEnv("LISTEN_FDS"); ok {
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("LISTEN_FDS=%s", listenfds), "LISTEN_PID=1")
|
||||
fds := activation.Files(false)
|
||||
cmd.ExtraFiles = append(cmd.ExtraFiles, fds...)
|
||||
}
|
||||
if selinux.GetEnabled() {
|
||||
// Set the label of the conmon process to be level :s0
|
||||
// This will allow the container processes to talk to fifo-files
|
||||
// passed into the container by conmon
|
||||
var (
|
||||
plabel string
|
||||
con selinux.Context
|
||||
)
|
||||
plabel, err = selinux.CurrentLabel()
|
||||
if err != nil {
|
||||
childPipe.Close()
|
||||
return errors.Wrapf(err, "Failed to get current SELinux label")
|
||||
}
|
||||
|
||||
con, err = selinux.NewContext(plabel)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Failed to get new context from SELinux label")
|
||||
}
|
||||
|
||||
runtime.LockOSThread()
|
||||
if con["level"] != "s0" && con["level"] != "" {
|
||||
con["level"] = "s0"
|
||||
if err = label.SetProcessLabel(con.Get()); err != nil {
|
||||
runtime.UnlockOSThread()
|
||||
return err
|
||||
}
|
||||
}
|
||||
err = cmd.Start()
|
||||
// Ignore error returned from SetProcessLabel("") call,
|
||||
// can't recover.
|
||||
label.SetProcessLabel("")
|
||||
runtime.UnlockOSThread()
|
||||
} else {
|
||||
err = cmd.Start()
|
||||
}
|
||||
if err != nil {
|
||||
childPipe.Close()
|
||||
return err
|
||||
}
|
||||
defer cmd.Wait()
|
||||
|
||||
// We don't need childPipe on the parent side
|
||||
childPipe.Close()
|
||||
childStartPipe.Close()
|
||||
|
||||
// Move conmon to specified cgroup
|
||||
if err := r.moveConmonToCgroup(ctr, cgroupParent, cmd); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
/* We set the cgroup, now the child can start creating children */
|
||||
someData := []byte{0}
|
||||
_, err = parentStartPipe.Write(someData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
/* Wait for initial setup and fork, and reap child */
|
||||
err = cmd.Wait()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if err2 := r.deleteContainer(ctr); err2 != nil {
|
||||
logrus.Errorf("Error removing container %s from runtime after creation failed", ctr.ID())
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Wait to get container pid from conmon
|
||||
type syncStruct struct {
|
||||
si *syncInfo
|
||||
err error
|
||||
}
|
||||
ch := make(chan syncStruct)
|
||||
go func() {
|
||||
var si *syncInfo
|
||||
rdr := bufio.NewReader(parentPipe)
|
||||
b, err := rdr.ReadBytes('\n')
|
||||
if err != nil {
|
||||
ch <- syncStruct{err: err}
|
||||
}
|
||||
if err := json.Unmarshal(b, &si); err != nil {
|
||||
ch <- syncStruct{err: err}
|
||||
return
|
||||
}
|
||||
ch <- syncStruct{si: si}
|
||||
}()
|
||||
|
||||
select {
|
||||
case ss := <-ch:
|
||||
if ss.err != nil {
|
||||
return errors.Wrapf(ss.err, "error reading container (probably exited) json message")
|
||||
}
|
||||
logrus.Debugf("Received container pid: %d", ss.si.Pid)
|
||||
if ss.si.Pid == -1 {
|
||||
if ss.si.Message != "" {
|
||||
return errors.Wrapf(ErrInternal, "container create failed: %s", ss.si.Message)
|
||||
}
|
||||
return errors.Wrapf(ErrInternal, "container create failed")
|
||||
}
|
||||
ctr.state.PID = ss.si.Pid
|
||||
case <-time.After(ContainerCreateTimeout):
|
||||
return errors.Wrapf(ErrInternal, "container creation timeout")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateContainerStatus retrieves the current status of the container from the
|
||||
// runtime. It updates the container's state but does not save it.
|
||||
// If useRunc is false, we will not directly hit runc to see the container's
|
||||
@ -631,82 +325,6 @@ func (r *OCIRuntime) killContainer(ctr *Container, signal uint) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// stopContainer stops a container, first using its given stop signal (or
|
||||
// SIGTERM if no signal was specified), then using SIGKILL
|
||||
// Timeout is given in seconds. If timeout is 0, the container will be
|
||||
// immediately kill with SIGKILL
|
||||
// Does not set finished time for container, assumes you will run updateStatus
|
||||
// after to pull the exit code
|
||||
func (r *OCIRuntime) stopContainer(ctr *Container, timeout uint) error {
|
||||
logrus.Debugf("Stopping container %s (PID %d)", ctr.ID(), ctr.state.PID)
|
||||
|
||||
// Ping the container to see if it's alive
|
||||
// If it's not, it's already stopped, return
|
||||
err := unix.Kill(ctr.state.PID, 0)
|
||||
if err == unix.ESRCH {
|
||||
return nil
|
||||
}
|
||||
|
||||
stopSignal := ctr.config.StopSignal
|
||||
if stopSignal == 0 {
|
||||
stopSignal = uint(syscall.SIGTERM)
|
||||
}
|
||||
|
||||
if timeout > 0 {
|
||||
if err := r.killContainer(ctr, stopSignal); err != nil {
|
||||
// Is the container gone?
|
||||
// If so, it probably died between the first check and
|
||||
// our sending the signal
|
||||
// The container is stopped, so exit cleanly
|
||||
err := unix.Kill(ctr.state.PID, 0)
|
||||
if err == unix.ESRCH {
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if err := waitContainerStop(ctr, time.Duration(timeout)*time.Second); err != nil {
|
||||
logrus.Warnf("Timed out stopping container %s, resorting to SIGKILL", ctr.ID())
|
||||
} else {
|
||||
// No error, the container is dead
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
var args []string
|
||||
if rootless.IsRootless() {
|
||||
// we don't use --all for rootless containers as the OCI runtime might use
|
||||
// the cgroups to determine the PIDs, but for rootless containers there is
|
||||
// not any.
|
||||
args = []string{"kill", ctr.ID(), "KILL"}
|
||||
} else {
|
||||
args = []string{"kill", "--all", ctr.ID(), "KILL"}
|
||||
}
|
||||
|
||||
runtimeDir, err := util.GetRootlessRuntimeDir()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
env := []string{fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir)}
|
||||
if err := utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, env, r.path, args...); err != nil {
|
||||
// Again, check if the container is gone. If it is, exit cleanly.
|
||||
err := unix.Kill(ctr.state.PID, 0)
|
||||
if err == unix.ESRCH {
|
||||
return nil
|
||||
}
|
||||
|
||||
return errors.Wrapf(err, "error sending SIGKILL to container %s", ctr.ID())
|
||||
}
|
||||
|
||||
// Give runtime a few seconds to make it happen
|
||||
if err := waitContainerStop(ctr, killContainerTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// deleteContainer deletes a container from the OCI runtime
|
||||
func (r *OCIRuntime) deleteContainer(ctr *Container) error {
|
||||
runtimeDir, err := util.GetRootlessRuntimeDir()
|
||||
@ -834,71 +452,6 @@ func (r *OCIRuntime) execContainer(c *Container, cmd, capAdd, env []string, tty
|
||||
return execCmd, nil
|
||||
}
|
||||
|
||||
// execStopContainer stops all active exec sessions in a container
|
||||
// It will also stop all other processes in the container. It is only intended
|
||||
// to be used to assist in cleanup when removing a container.
|
||||
// SIGTERM is used by default to stop processes. If SIGTERM fails, SIGKILL will be used.
|
||||
func (r *OCIRuntime) execStopContainer(ctr *Container, timeout uint) error {
|
||||
// Do we have active exec sessions?
|
||||
if len(ctr.state.ExecSessions) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get a list of active exec sessions
|
||||
execSessions := []int{}
|
||||
for _, session := range ctr.state.ExecSessions {
|
||||
pid := session.PID
|
||||
// Ping the PID with signal 0 to see if it still exists
|
||||
if err := unix.Kill(pid, 0); err == unix.ESRCH {
|
||||
continue
|
||||
}
|
||||
|
||||
execSessions = append(execSessions, pid)
|
||||
}
|
||||
|
||||
// All the sessions may be dead
|
||||
// If they are, just return
|
||||
if len(execSessions) == 0 {
|
||||
return nil
|
||||
}
|
||||
runtimeDir, err := util.GetRootlessRuntimeDir()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
env := []string{fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir)}
|
||||
|
||||
// If timeout is 0, just use SIGKILL
|
||||
if timeout > 0 {
|
||||
// Stop using SIGTERM by default
|
||||
// Use SIGSTOP after a timeout
|
||||
logrus.Debugf("Killing all processes in container %s with SIGTERM", ctr.ID())
|
||||
if err := utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, env, r.path, "kill", "--all", ctr.ID(), "TERM"); err != nil {
|
||||
return errors.Wrapf(err, "error sending SIGTERM to container %s processes", ctr.ID())
|
||||
}
|
||||
|
||||
// Wait for all processes to stop
|
||||
if err := waitPidsStop(execSessions, time.Duration(timeout)*time.Second); err != nil {
|
||||
logrus.Warnf("Timed out stopping container %s exec sessions", ctr.ID())
|
||||
} else {
|
||||
// No error, all exec sessions are dead
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Send SIGKILL
|
||||
logrus.Debugf("Killing all processes in container %s with SIGKILL", ctr.ID())
|
||||
if err := utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, env, r.path, "kill", "--all", ctr.ID(), "KILL"); err != nil {
|
||||
return errors.Wrapf(err, "error sending SIGKILL to container %s processes", ctr.ID())
|
||||
}
|
||||
|
||||
// Give the processes a few seconds to go down
|
||||
if err := waitPidsStop(execSessions, killContainerTimeout); err != nil {
|
||||
return errors.Wrapf(err, "failed to kill container %s exec sessions", ctr.ID())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkpointContainer checkpoints the given container
|
||||
func (r *OCIRuntime) checkpointContainer(ctr *Container, options ContainerCheckpointOptions) error {
|
||||
label.SetSocketLabel(ctr.ProcessLabel())
|
||||
|
@ -3,6 +3,8 @@
|
||||
package libpod
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
@ -10,12 +12,17 @@ import (
|
||||
"runtime"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/cgroups"
|
||||
"github.com/containers/libpod/pkg/rootless"
|
||||
"github.com/containers/libpod/pkg/util"
|
||||
"github.com/containers/libpod/utils"
|
||||
pmount "github.com/containers/storage/pkg/mount"
|
||||
"github.com/coreos/go-systemd/activation"
|
||||
spec "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/opencontainers/selinux/go-selinux"
|
||||
"github.com/opencontainers/selinux/go-selinux/label"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/unix"
|
||||
@ -179,3 +186,443 @@ func (r *OCIRuntime) conmonPackage() string {
|
||||
}
|
||||
return dpkgVersion(r.conmonPath)
|
||||
}
|
||||
|
||||
func (r *OCIRuntime) createOCIContainer(ctr *Container, cgroupParent string, restoreOptions *ContainerCheckpointOptions) (err error) {
|
||||
var stderrBuf bytes.Buffer
|
||||
|
||||
runtimeDir, err := util.GetRootlessRuntimeDir()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
parentPipe, childPipe, err := newPipe()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error creating socket pair")
|
||||
}
|
||||
|
||||
childStartPipe, parentStartPipe, err := newPipe()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error creating socket pair for start pipe")
|
||||
}
|
||||
|
||||
defer parentPipe.Close()
|
||||
defer parentStartPipe.Close()
|
||||
|
||||
args := []string{}
|
||||
if r.cgroupManager == SystemdCgroupsManager {
|
||||
args = append(args, "-s")
|
||||
}
|
||||
args = append(args, "-c", ctr.ID())
|
||||
args = append(args, "-u", ctr.ID())
|
||||
args = append(args, "-r", r.path)
|
||||
args = append(args, "-b", ctr.bundlePath())
|
||||
args = append(args, "-p", filepath.Join(ctr.state.RunDir, "pidfile"))
|
||||
args = append(args, "-l", ctr.LogPath())
|
||||
args = append(args, "--exit-dir", r.exitsDir)
|
||||
if ctr.config.ConmonPidFile != "" {
|
||||
args = append(args, "--conmon-pidfile", ctr.config.ConmonPidFile)
|
||||
}
|
||||
if len(ctr.config.ExitCommand) > 0 {
|
||||
args = append(args, "--exit-command", ctr.config.ExitCommand[0])
|
||||
for _, arg := range ctr.config.ExitCommand[1:] {
|
||||
args = append(args, []string{"--exit-command-arg", arg}...)
|
||||
}
|
||||
}
|
||||
args = append(args, "--socket-dir-path", r.socketsDir)
|
||||
if ctr.config.Spec.Process.Terminal {
|
||||
args = append(args, "-t")
|
||||
} else if ctr.config.Stdin {
|
||||
args = append(args, "-i")
|
||||
}
|
||||
if r.logSizeMax >= 0 {
|
||||
args = append(args, "--log-size-max", fmt.Sprintf("%v", r.logSizeMax))
|
||||
}
|
||||
if r.noPivot {
|
||||
args = append(args, "--no-pivot")
|
||||
}
|
||||
|
||||
logLevel := logrus.GetLevel()
|
||||
args = append(args, "--log-level", logLevel.String())
|
||||
|
||||
if logLevel == logrus.DebugLevel {
|
||||
logrus.Debugf("%s messages will be logged to syslog", r.conmonPath)
|
||||
args = append(args, "--syslog")
|
||||
}
|
||||
|
||||
if restoreOptions != nil {
|
||||
args = append(args, "--restore", ctr.CheckpointPath())
|
||||
if restoreOptions.TCPEstablished {
|
||||
args = append(args, "--restore-arg", "--tcp-established")
|
||||
}
|
||||
}
|
||||
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"args": args,
|
||||
}).Debugf("running conmon: %s", r.conmonPath)
|
||||
|
||||
cmd := exec.Command(r.conmonPath, args...)
|
||||
cmd.Dir = ctr.bundlePath()
|
||||
cmd.SysProcAttr = &syscall.SysProcAttr{
|
||||
Setpgid: true,
|
||||
}
|
||||
// TODO this is probably a really bad idea for some uses
|
||||
// Make this configurable
|
||||
cmd.Stdin = os.Stdin
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
if ctr.config.Spec.Process.Terminal {
|
||||
cmd.Stderr = &stderrBuf
|
||||
}
|
||||
|
||||
cmd.ExtraFiles = append(cmd.ExtraFiles, childPipe, childStartPipe)
|
||||
// 0, 1 and 2 are stdin, stdout and stderr
|
||||
cmd.Env = append(r.conmonEnv, fmt.Sprintf("_OCI_SYNCPIPE=%d", 3))
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("_OCI_STARTPIPE=%d", 4))
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir))
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("_CONTAINERS_USERNS_CONFIGURED=%s", os.Getenv("_CONTAINERS_USERNS_CONFIGURED")))
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("_CONTAINERS_ROOTLESS_UID=%s", os.Getenv("_CONTAINERS_ROOTLESS_UID")))
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("HOME=%s", os.Getenv("HOME")))
|
||||
|
||||
if r.reservePorts && !ctr.config.NetMode.IsSlirp4netns() {
|
||||
ports, err := bindPorts(ctr.config.PortMappings)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Leak the port we bound in the conmon process. These fd's won't be used
|
||||
// by the container and conmon will keep the ports busy so that another
|
||||
// process cannot use them.
|
||||
cmd.ExtraFiles = append(cmd.ExtraFiles, ports...)
|
||||
}
|
||||
|
||||
if ctr.config.NetMode.IsSlirp4netns() {
|
||||
ctr.rootlessSlirpSyncR, ctr.rootlessSlirpSyncW, err = os.Pipe()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to create rootless network sync pipe")
|
||||
}
|
||||
// Leak one end in conmon, the other one will be leaked into slirp4netns
|
||||
cmd.ExtraFiles = append(cmd.ExtraFiles, ctr.rootlessSlirpSyncW)
|
||||
}
|
||||
|
||||
if notify, ok := os.LookupEnv("NOTIFY_SOCKET"); ok {
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("NOTIFY_SOCKET=%s", notify))
|
||||
}
|
||||
if listenfds, ok := os.LookupEnv("LISTEN_FDS"); ok {
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("LISTEN_FDS=%s", listenfds), "LISTEN_PID=1")
|
||||
fds := activation.Files(false)
|
||||
cmd.ExtraFiles = append(cmd.ExtraFiles, fds...)
|
||||
}
|
||||
if selinux.GetEnabled() {
|
||||
// Set the label of the conmon process to be level :s0
|
||||
// This will allow the container processes to talk to fifo-files
|
||||
// passed into the container by conmon
|
||||
var (
|
||||
plabel string
|
||||
con selinux.Context
|
||||
)
|
||||
plabel, err = selinux.CurrentLabel()
|
||||
if err != nil {
|
||||
childPipe.Close()
|
||||
return errors.Wrapf(err, "Failed to get current SELinux label")
|
||||
}
|
||||
|
||||
con, err = selinux.NewContext(plabel)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Failed to get new context from SELinux label")
|
||||
}
|
||||
|
||||
runtime.LockOSThread()
|
||||
if con["level"] != "s0" && con["level"] != "" {
|
||||
con["level"] = "s0"
|
||||
if err = label.SetProcessLabel(con.Get()); err != nil {
|
||||
runtime.UnlockOSThread()
|
||||
return err
|
||||
}
|
||||
}
|
||||
err = cmd.Start()
|
||||
// Ignore error returned from SetProcessLabel("") call,
|
||||
// can't recover.
|
||||
label.SetProcessLabel("")
|
||||
runtime.UnlockOSThread()
|
||||
} else {
|
||||
err = cmd.Start()
|
||||
}
|
||||
if err != nil {
|
||||
childPipe.Close()
|
||||
return err
|
||||
}
|
||||
defer cmd.Wait()
|
||||
|
||||
// We don't need childPipe on the parent side
|
||||
childPipe.Close()
|
||||
childStartPipe.Close()
|
||||
|
||||
// Move conmon to specified cgroup
|
||||
if err := r.moveConmonToCgroup(ctr, cgroupParent, cmd); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
/* We set the cgroup, now the child can start creating children */
|
||||
someData := []byte{0}
|
||||
_, err = parentStartPipe.Write(someData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
/* Wait for initial setup and fork, and reap child */
|
||||
err = cmd.Wait()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if err2 := r.deleteContainer(ctr); err2 != nil {
|
||||
logrus.Errorf("Error removing container %s from runtime after creation failed", ctr.ID())
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Wait to get container pid from conmon
|
||||
type syncStruct struct {
|
||||
si *syncInfo
|
||||
err error
|
||||
}
|
||||
ch := make(chan syncStruct)
|
||||
go func() {
|
||||
var si *syncInfo
|
||||
rdr := bufio.NewReader(parentPipe)
|
||||
b, err := rdr.ReadBytes('\n')
|
||||
if err != nil {
|
||||
ch <- syncStruct{err: err}
|
||||
}
|
||||
if err := json.Unmarshal(b, &si); err != nil {
|
||||
ch <- syncStruct{err: err}
|
||||
return
|
||||
}
|
||||
ch <- syncStruct{si: si}
|
||||
}()
|
||||
|
||||
select {
|
||||
case ss := <-ch:
|
||||
if ss.err != nil {
|
||||
return errors.Wrapf(ss.err, "error reading container (probably exited) json message")
|
||||
}
|
||||
logrus.Debugf("Received container pid: %d", ss.si.Pid)
|
||||
if ss.si.Pid == -1 {
|
||||
if ss.si.Message != "" {
|
||||
return errors.Wrapf(ErrInternal, "container create failed: %s", ss.si.Message)
|
||||
}
|
||||
return errors.Wrapf(ErrInternal, "container create failed")
|
||||
}
|
||||
ctr.state.PID = ss.si.Pid
|
||||
case <-time.After(ContainerCreateTimeout):
|
||||
return errors.Wrapf(ErrInternal, "container creation timeout")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Wait for a container which has been sent a signal to stop
|
||||
func waitContainerStop(ctr *Container, timeout time.Duration) error {
|
||||
done := make(chan struct{})
|
||||
chControl := make(chan struct{})
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-chControl:
|
||||
return
|
||||
default:
|
||||
// Check if the process is still around
|
||||
err := unix.Kill(ctr.state.PID, 0)
|
||||
if err == unix.ESRCH {
|
||||
close(done)
|
||||
return
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
}()
|
||||
select {
|
||||
case <-done:
|
||||
return nil
|
||||
case <-time.After(timeout):
|
||||
close(chControl)
|
||||
logrus.Debugf("container %s did not die within timeout %d", ctr.ID(), timeout)
|
||||
return errors.Errorf("container %s did not die within timeout", ctr.ID())
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for a set of given PIDs to stop
|
||||
func waitPidsStop(pids []int, timeout time.Duration) error {
|
||||
done := make(chan struct{})
|
||||
chControl := make(chan struct{})
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-chControl:
|
||||
return
|
||||
default:
|
||||
allClosed := true
|
||||
for _, pid := range pids {
|
||||
if err := unix.Kill(pid, 0); err != unix.ESRCH {
|
||||
allClosed = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if allClosed {
|
||||
close(done)
|
||||
return
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
}()
|
||||
select {
|
||||
case <-done:
|
||||
return nil
|
||||
case <-time.After(timeout):
|
||||
close(chControl)
|
||||
return errors.Errorf("given PIDs did not die within timeout")
|
||||
}
|
||||
}
|
||||
|
||||
// stopContainer stops a container, first using its given stop signal (or
|
||||
// SIGTERM if no signal was specified), then using SIGKILL
|
||||
// Timeout is given in seconds. If timeout is 0, the container will be
|
||||
// immediately kill with SIGKILL
|
||||
// Does not set finished time for container, assumes you will run updateStatus
|
||||
// after to pull the exit code
|
||||
func (r *OCIRuntime) stopContainer(ctr *Container, timeout uint) error {
|
||||
logrus.Debugf("Stopping container %s (PID %d)", ctr.ID(), ctr.state.PID)
|
||||
|
||||
// Ping the container to see if it's alive
|
||||
// If it's not, it's already stopped, return
|
||||
err := unix.Kill(ctr.state.PID, 0)
|
||||
if err == unix.ESRCH {
|
||||
return nil
|
||||
}
|
||||
|
||||
stopSignal := ctr.config.StopSignal
|
||||
if stopSignal == 0 {
|
||||
stopSignal = uint(syscall.SIGTERM)
|
||||
}
|
||||
|
||||
if timeout > 0 {
|
||||
if err := r.killContainer(ctr, stopSignal); err != nil {
|
||||
// Is the container gone?
|
||||
// If so, it probably died between the first check and
|
||||
// our sending the signal
|
||||
// The container is stopped, so exit cleanly
|
||||
err := unix.Kill(ctr.state.PID, 0)
|
||||
if err == unix.ESRCH {
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if err := waitContainerStop(ctr, time.Duration(timeout)*time.Second); err != nil {
|
||||
logrus.Warnf("Timed out stopping container %s, resorting to SIGKILL", ctr.ID())
|
||||
} else {
|
||||
// No error, the container is dead
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
var args []string
|
||||
if rootless.IsRootless() {
|
||||
// we don't use --all for rootless containers as the OCI runtime might use
|
||||
// the cgroups to determine the PIDs, but for rootless containers there is
|
||||
// not any.
|
||||
args = []string{"kill", ctr.ID(), "KILL"}
|
||||
} else {
|
||||
args = []string{"kill", "--all", ctr.ID(), "KILL"}
|
||||
}
|
||||
|
||||
runtimeDir, err := util.GetRootlessRuntimeDir()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
env := []string{fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir)}
|
||||
if err := utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, env, r.path, args...); err != nil {
|
||||
// Again, check if the container is gone. If it is, exit cleanly.
|
||||
err := unix.Kill(ctr.state.PID, 0)
|
||||
if err == unix.ESRCH {
|
||||
return nil
|
||||
}
|
||||
|
||||
return errors.Wrapf(err, "error sending SIGKILL to container %s", ctr.ID())
|
||||
}
|
||||
|
||||
// Give runtime a few seconds to make it happen
|
||||
if err := waitContainerStop(ctr, killContainerTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// execStopContainer stops all active exec sessions in a container
|
||||
// It will also stop all other processes in the container. It is only intended
|
||||
// to be used to assist in cleanup when removing a container.
|
||||
// SIGTERM is used by default to stop processes. If SIGTERM fails, SIGKILL will be used.
|
||||
func (r *OCIRuntime) execStopContainer(ctr *Container, timeout uint) error {
|
||||
// Do we have active exec sessions?
|
||||
if len(ctr.state.ExecSessions) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get a list of active exec sessions
|
||||
execSessions := []int{}
|
||||
for _, session := range ctr.state.ExecSessions {
|
||||
pid := session.PID
|
||||
// Ping the PID with signal 0 to see if it still exists
|
||||
if err := unix.Kill(pid, 0); err == unix.ESRCH {
|
||||
continue
|
||||
}
|
||||
|
||||
execSessions = append(execSessions, pid)
|
||||
}
|
||||
|
||||
// All the sessions may be dead
|
||||
// If they are, just return
|
||||
if len(execSessions) == 0 {
|
||||
return nil
|
||||
}
|
||||
runtimeDir, err := util.GetRootlessRuntimeDir()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
env := []string{fmt.Sprintf("XDG_RUNTIME_DIR=%s", runtimeDir)}
|
||||
|
||||
// If timeout is 0, just use SIGKILL
|
||||
if timeout > 0 {
|
||||
// Stop using SIGTERM by default
|
||||
// Use SIGSTOP after a timeout
|
||||
logrus.Debugf("Killing all processes in container %s with SIGTERM", ctr.ID())
|
||||
if err := utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, env, r.path, "kill", "--all", ctr.ID(), "TERM"); err != nil {
|
||||
return errors.Wrapf(err, "error sending SIGTERM to container %s processes", ctr.ID())
|
||||
}
|
||||
|
||||
// Wait for all processes to stop
|
||||
if err := waitPidsStop(execSessions, time.Duration(timeout)*time.Second); err != nil {
|
||||
logrus.Warnf("Timed out stopping container %s exec sessions", ctr.ID())
|
||||
} else {
|
||||
// No error, all exec sessions are dead
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Send SIGKILL
|
||||
logrus.Debugf("Killing all processes in container %s with SIGKILL", ctr.ID())
|
||||
if err := utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, env, r.path, "kill", "--all", ctr.ID(), "KILL"); err != nil {
|
||||
return errors.Wrapf(err, "error sending SIGKILL to container %s processes", ctr.ID())
|
||||
}
|
||||
|
||||
// Give the processes a few seconds to go down
|
||||
if err := waitPidsStop(execSessions, killContainerTimeout); err != nil {
|
||||
return errors.Wrapf(err, "failed to kill container %s exec sessions", ctr.ID())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -26,3 +26,15 @@ func (r *OCIRuntime) pathPackage() string {
|
||||
func (r *OCIRuntime) conmonPackage() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (r *OCIRuntime) createOCIContainer(ctr *Container, cgroupParent string, restoreOptions *ContainerCheckpointOptions) (err error) {
|
||||
return ErrOSNotSupported
|
||||
}
|
||||
|
||||
func (r *OCIRuntime) execStopContainer(ctr *Container, timeout uint) error {
|
||||
return ErrOSNotSupported
|
||||
}
|
||||
|
||||
func (r *OCIRuntime) stopContainer(ctr *Container, timeout uint) error {
|
||||
return ErrOSNotSupported
|
||||
}
|
||||
|
1
pkg/adapter/runtime_remote_supported.go
Normal file
1
pkg/adapter/runtime_remote_supported.go
Normal file
@ -0,0 +1 @@
|
||||
package adapter
|
@ -2,16 +2,12 @@ package adapter
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
gosignal "os/signal"
|
||||
|
||||
"github.com/containers/libpod/libpod"
|
||||
"github.com/docker/docker/pkg/signal"
|
||||
"github.com/docker/docker/pkg/term"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/crypto/ssh/terminal"
|
||||
"k8s.io/client-go/tools/remotecommand"
|
||||
)
|
||||
|
||||
@ -19,83 +15,6 @@ import (
|
||||
type RawTtyFormatter struct {
|
||||
}
|
||||
|
||||
// StartAttachCtr starts and (if required) attaches to a container
|
||||
func StartAttachCtr(ctx context.Context, ctr *libpod.Container, stdout, stderr, stdin *os.File, detachKeys string, sigProxy bool, startContainer bool, recursive bool) error {
|
||||
resize := make(chan remotecommand.TerminalSize)
|
||||
|
||||
haveTerminal := terminal.IsTerminal(int(os.Stdin.Fd()))
|
||||
|
||||
// Check if we are attached to a terminal. If we are, generate resize
|
||||
// events, and set the terminal to raw mode
|
||||
if haveTerminal && ctr.Spec().Process.Terminal {
|
||||
logrus.Debugf("Handling terminal attach")
|
||||
|
||||
subCtx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
resizeTty(subCtx, resize)
|
||||
|
||||
oldTermState, err := term.SaveState(os.Stdin.Fd())
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to save terminal state")
|
||||
}
|
||||
|
||||
logrus.SetFormatter(&RawTtyFormatter{})
|
||||
term.SetRawTerminal(os.Stdin.Fd())
|
||||
|
||||
defer restoreTerminal(oldTermState)
|
||||
}
|
||||
|
||||
streams := new(libpod.AttachStreams)
|
||||
streams.OutputStream = stdout
|
||||
streams.ErrorStream = stderr
|
||||
streams.InputStream = stdin
|
||||
streams.AttachOutput = true
|
||||
streams.AttachError = true
|
||||
streams.AttachInput = true
|
||||
|
||||
if stdout == nil {
|
||||
logrus.Debugf("Not attaching to stdout")
|
||||
streams.AttachOutput = false
|
||||
}
|
||||
if stderr == nil {
|
||||
logrus.Debugf("Not attaching to stderr")
|
||||
streams.AttachError = false
|
||||
}
|
||||
if stdin == nil {
|
||||
logrus.Debugf("Not attaching to stdin")
|
||||
streams.AttachInput = false
|
||||
}
|
||||
|
||||
if !startContainer {
|
||||
if sigProxy {
|
||||
ProxySignals(ctr)
|
||||
}
|
||||
|
||||
return ctr.Attach(streams, detachKeys, resize)
|
||||
}
|
||||
|
||||
attachChan, err := ctr.StartAndAttach(ctx, streams, detachKeys, resize, recursive)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if sigProxy {
|
||||
ProxySignals(ctr)
|
||||
}
|
||||
|
||||
if stdout == nil && stderr == nil {
|
||||
fmt.Printf("%s\n", ctr.ID())
|
||||
}
|
||||
|
||||
err = <-attachChan
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error attaching to container %s", ctr.ID())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getResize returns a TerminalSize command matching stdin's current
|
||||
// size on success, and nil on errors.
|
||||
func getResize() *remotecommand.TerminalSize {
|
||||
|
91
pkg/adapter/terminal_linux.go
Normal file
91
pkg/adapter/terminal_linux.go
Normal file
@ -0,0 +1,91 @@
|
||||
package adapter
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/containers/libpod/libpod"
|
||||
"github.com/docker/docker/pkg/term"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/crypto/ssh/terminal"
|
||||
"k8s.io/client-go/tools/remotecommand"
|
||||
)
|
||||
|
||||
// StartAttachCtr starts and (if required) attaches to a container
|
||||
func StartAttachCtr(ctx context.Context, ctr *libpod.Container, stdout, stderr, stdin *os.File, detachKeys string, sigProxy bool, startContainer bool, recursive bool) error {
|
||||
resize := make(chan remotecommand.TerminalSize)
|
||||
|
||||
haveTerminal := terminal.IsTerminal(int(os.Stdin.Fd()))
|
||||
|
||||
// Check if we are attached to a terminal. If we are, generate resize
|
||||
// events, and set the terminal to raw mode
|
||||
if haveTerminal && ctr.Spec().Process.Terminal {
|
||||
logrus.Debugf("Handling terminal attach")
|
||||
|
||||
subCtx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
resizeTty(subCtx, resize)
|
||||
|
||||
oldTermState, err := term.SaveState(os.Stdin.Fd())
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to save terminal state")
|
||||
}
|
||||
|
||||
logrus.SetFormatter(&RawTtyFormatter{})
|
||||
term.SetRawTerminal(os.Stdin.Fd())
|
||||
|
||||
defer restoreTerminal(oldTermState)
|
||||
}
|
||||
|
||||
streams := new(libpod.AttachStreams)
|
||||
streams.OutputStream = stdout
|
||||
streams.ErrorStream = stderr
|
||||
streams.InputStream = stdin
|
||||
streams.AttachOutput = true
|
||||
streams.AttachError = true
|
||||
streams.AttachInput = true
|
||||
|
||||
if stdout == nil {
|
||||
logrus.Debugf("Not attaching to stdout")
|
||||
streams.AttachOutput = false
|
||||
}
|
||||
if stderr == nil {
|
||||
logrus.Debugf("Not attaching to stderr")
|
||||
streams.AttachError = false
|
||||
}
|
||||
if stdin == nil {
|
||||
logrus.Debugf("Not attaching to stdin")
|
||||
streams.AttachInput = false
|
||||
}
|
||||
|
||||
if !startContainer {
|
||||
if sigProxy {
|
||||
ProxySignals(ctr)
|
||||
}
|
||||
|
||||
return ctr.Attach(streams, detachKeys, resize)
|
||||
}
|
||||
|
||||
attachChan, err := ctr.StartAndAttach(ctx, streams, detachKeys, resize, recursive)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if sigProxy {
|
||||
ProxySignals(ctr)
|
||||
}
|
||||
|
||||
if stdout == nil && stderr == nil {
|
||||
fmt.Printf("%s\n", ctr.ID())
|
||||
}
|
||||
|
||||
err = <-attachChan
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error attaching to container %s", ctr.ID())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -244,3 +244,9 @@ func makeThrottleArray(throttleInput []string, rateType int) ([]spec.LinuxThrott
|
||||
}
|
||||
return ltds, nil
|
||||
}
|
||||
|
||||
func getStatFromPath(path string) (unix.Stat_t, error) {
|
||||
s := unix.Stat_t{}
|
||||
err := unix.Stat(path, &s)
|
||||
return s, err
|
||||
}
|
||||
|
@ -19,7 +19,6 @@ import (
|
||||
"github.com/opencontainers/runtime-tools/generate"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// Type constants
|
||||
@ -643,9 +642,3 @@ func NatToOCIPortBindings(ports nat.PortMap) ([]ocicni.PortMapping, error) {
|
||||
func (c *CreateConfig) AddPrivilegedDevices(g *generate.Generator) error {
|
||||
return c.addPrivilegedDevices(g)
|
||||
}
|
||||
|
||||
func getStatFromPath(path string) (unix.Stat_t, error) {
|
||||
s := unix.Stat_t{}
|
||||
err := unix.Stat(path, &s)
|
||||
return s, err
|
||||
}
|
||||
|
@ -6,12 +6,10 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/containers/libpod/pkg/rootless"
|
||||
"github.com/containers/storage"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/opencontainers/image-spec/specs-go/v1"
|
||||
@ -187,76 +185,6 @@ var (
|
||||
rootlessRuntimeDir string
|
||||
)
|
||||
|
||||
// GetRootlessRuntimeDir returns the runtime directory when running as non root
|
||||
func GetRootlessRuntimeDir() (string, error) {
|
||||
var rootlessRuntimeDirError error
|
||||
|
||||
rootlessRuntimeDirOnce.Do(func() {
|
||||
runtimeDir := os.Getenv("XDG_RUNTIME_DIR")
|
||||
uid := fmt.Sprintf("%d", rootless.GetRootlessUID())
|
||||
if runtimeDir == "" {
|
||||
tmpDir := filepath.Join("/run", "user", uid)
|
||||
os.MkdirAll(tmpDir, 0700)
|
||||
st, err := os.Stat(tmpDir)
|
||||
if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && st.Mode().Perm() == 0700 {
|
||||
runtimeDir = tmpDir
|
||||
}
|
||||
}
|
||||
if runtimeDir == "" {
|
||||
tmpDir := filepath.Join(os.TempDir(), fmt.Sprintf("run-%s", uid))
|
||||
os.MkdirAll(tmpDir, 0700)
|
||||
st, err := os.Stat(tmpDir)
|
||||
if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && st.Mode().Perm() == 0700 {
|
||||
runtimeDir = tmpDir
|
||||
}
|
||||
}
|
||||
if runtimeDir == "" {
|
||||
home := os.Getenv("HOME")
|
||||
if home == "" {
|
||||
rootlessRuntimeDirError = fmt.Errorf("neither XDG_RUNTIME_DIR nor HOME was set non-empty")
|
||||
return
|
||||
}
|
||||
resolvedHome, err := filepath.EvalSymlinks(home)
|
||||
if err != nil {
|
||||
rootlessRuntimeDirError = errors.Wrapf(err, "cannot resolve %s", home)
|
||||
return
|
||||
}
|
||||
runtimeDir = filepath.Join(resolvedHome, "rundir")
|
||||
}
|
||||
rootlessRuntimeDir = runtimeDir
|
||||
})
|
||||
|
||||
if rootlessRuntimeDirError != nil {
|
||||
return "", rootlessRuntimeDirError
|
||||
}
|
||||
return rootlessRuntimeDir, nil
|
||||
}
|
||||
|
||||
// GetRootlessDirInfo returns the parent path of where the storage for containers and
|
||||
// volumes will be in rootless mode
|
||||
func GetRootlessDirInfo() (string, string, error) {
|
||||
rootlessRuntime, err := GetRootlessRuntimeDir()
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
dataDir := os.Getenv("XDG_DATA_HOME")
|
||||
if dataDir == "" {
|
||||
home := os.Getenv("HOME")
|
||||
if home == "" {
|
||||
return "", "", fmt.Errorf("neither XDG_DATA_HOME nor HOME was set non-empty")
|
||||
}
|
||||
// runc doesn't like symlinks in the rootfs path, and at least
|
||||
// on CoreOS /home is a symlink to /var/home, so resolve any symlink.
|
||||
resolvedHome, err := filepath.EvalSymlinks(home)
|
||||
if err != nil {
|
||||
return "", "", errors.Wrapf(err, "cannot resolve %s", home)
|
||||
}
|
||||
dataDir = filepath.Join(resolvedHome, ".local", "share")
|
||||
}
|
||||
return dataDir, rootlessRuntime, nil
|
||||
}
|
||||
|
||||
type tomlOptionsConfig struct {
|
||||
MountProgram string `toml:"mount_program"`
|
||||
}
|
||||
|
60
pkg/util/utils_supported.go
Normal file
60
pkg/util/utils_supported.go
Normal file
@ -0,0 +1,60 @@
|
||||
// +build linux darwin
|
||||
|
||||
package util
|
||||
|
||||
// TODO once rootless function is consolidated under libpod, we
|
||||
// should work to take darwin from this
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/containers/libpod/pkg/rootless"
|
||||
"github.com/pkg/errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// GetRootlessRuntimeDir returns the runtime directory when running as non root
|
||||
func GetRootlessRuntimeDir() (string, error) {
|
||||
var rootlessRuntimeDirError error
|
||||
|
||||
rootlessRuntimeDirOnce.Do(func() {
|
||||
runtimeDir := os.Getenv("XDG_RUNTIME_DIR")
|
||||
uid := fmt.Sprintf("%d", rootless.GetRootlessUID())
|
||||
if runtimeDir == "" {
|
||||
tmpDir := filepath.Join("/run", "user", uid)
|
||||
os.MkdirAll(tmpDir, 0700)
|
||||
st, err := os.Stat(tmpDir)
|
||||
if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && st.Mode().Perm() == 0700 {
|
||||
runtimeDir = tmpDir
|
||||
}
|
||||
}
|
||||
if runtimeDir == "" {
|
||||
tmpDir := filepath.Join(os.TempDir(), fmt.Sprintf("run-%s", uid))
|
||||
os.MkdirAll(tmpDir, 0700)
|
||||
st, err := os.Stat(tmpDir)
|
||||
if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && st.Mode().Perm() == 0700 {
|
||||
runtimeDir = tmpDir
|
||||
}
|
||||
}
|
||||
if runtimeDir == "" {
|
||||
home := os.Getenv("HOME")
|
||||
if home == "" {
|
||||
rootlessRuntimeDirError = fmt.Errorf("neither XDG_RUNTIME_DIR nor HOME was set non-empty")
|
||||
return
|
||||
}
|
||||
resolvedHome, err := filepath.EvalSymlinks(home)
|
||||
if err != nil {
|
||||
rootlessRuntimeDirError = errors.Wrapf(err, "cannot resolve %s", home)
|
||||
return
|
||||
}
|
||||
runtimeDir = filepath.Join(resolvedHome, "rundir")
|
||||
}
|
||||
rootlessRuntimeDir = runtimeDir
|
||||
})
|
||||
|
||||
if rootlessRuntimeDirError != nil {
|
||||
return "", rootlessRuntimeDirError
|
||||
}
|
||||
return rootlessRuntimeDir, nil
|
||||
}
|
12
pkg/util/utils_windows.go
Normal file
12
pkg/util/utils_windows.go
Normal file
@ -0,0 +1,12 @@
|
||||
// +build windows
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// GetRootlessRuntimeDir returns the runtime directory when running as non root
|
||||
func GetRootlessRuntimeDir() (string, error) {
|
||||
return "", errors.New("this function is not implemented for windows")
|
||||
}
|
@ -9,8 +9,6 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
systemdDbus "github.com/coreos/go-systemd/dbus"
|
||||
"github.com/godbus/dbus"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@ -55,37 +53,6 @@ func StatusToExitCode(status int) int {
|
||||
return ((status) & 0xff00) >> 8
|
||||
}
|
||||
|
||||
// RunUnderSystemdScope adds the specified pid to a systemd scope
|
||||
func RunUnderSystemdScope(pid int, slice string, unitName string) error {
|
||||
var properties []systemdDbus.Property
|
||||
conn, err := systemdDbus.New()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
properties = append(properties, systemdDbus.PropSlice(slice))
|
||||
properties = append(properties, newProp("PIDs", []uint32{uint32(pid)}))
|
||||
properties = append(properties, newProp("Delegate", true))
|
||||
properties = append(properties, newProp("DefaultDependencies", false))
|
||||
ch := make(chan string)
|
||||
_, err = conn.StartTransientUnit(unitName, "replace", properties, ch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
// Block until job is started
|
||||
<-ch
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func newProp(name string, units interface{}) systemdDbus.Property {
|
||||
return systemdDbus.Property{
|
||||
Name: name,
|
||||
Value: dbus.MakeVariant(units),
|
||||
}
|
||||
}
|
||||
|
||||
// ErrDetach is an error indicating that the user manually detached from the
|
||||
// container.
|
||||
var ErrDetach = errors.New("detached from container")
|
||||
|
39
utils/utils_supported.go
Normal file
39
utils/utils_supported.go
Normal file
@ -0,0 +1,39 @@
|
||||
// +build linux darwin
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
systemdDbus "github.com/coreos/go-systemd/dbus"
|
||||
"github.com/godbus/dbus"
|
||||
)
|
||||
|
||||
// RunUnderSystemdScope adds the specified pid to a systemd scope
|
||||
func RunUnderSystemdScope(pid int, slice string, unitName string) error {
|
||||
var properties []systemdDbus.Property
|
||||
conn, err := systemdDbus.New()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
properties = append(properties, systemdDbus.PropSlice(slice))
|
||||
properties = append(properties, newProp("PIDs", []uint32{uint32(pid)}))
|
||||
properties = append(properties, newProp("Delegate", true))
|
||||
properties = append(properties, newProp("DefaultDependencies", false))
|
||||
ch := make(chan string)
|
||||
_, err = conn.StartTransientUnit(unitName, "replace", properties, ch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
// Block until job is started
|
||||
<-ch
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func newProp(name string, units interface{}) systemdDbus.Property {
|
||||
return systemdDbus.Property{
|
||||
Name: name,
|
||||
Value: dbus.MakeVariant(units),
|
||||
}
|
||||
}
|
9
utils/utils_windows.go
Normal file
9
utils/utils_windows.go
Normal file
@ -0,0 +1,9 @@
|
||||
// +build windows
|
||||
|
||||
package utils
|
||||
|
||||
import "github.com/pkg/errors"
|
||||
|
||||
func RunUnderSystemdScope(pid int, slice string, unitName string) error {
|
||||
return errors.New("not implemented for windows")
|
||||
}
|
@ -93,8 +93,8 @@ k8s.io/api kubernetes-1.10.13-beta.0 https://github.com/kubernetes/api
|
||||
k8s.io/apimachinery kubernetes-1.10.13-beta.0 https://github.com/kubernetes/apimachinery
|
||||
k8s.io/client-go kubernetes-1.10.13-beta.0 https://github.com/kubernetes/client-go
|
||||
github.com/mrunalp/fileutils 7d4729fb36185a7c1719923406c9d40e54fb93c7
|
||||
github.com/varlink/go 3ac79db6fd6aec70924193b090962f92985fe199
|
||||
github.com/containers/buildah v1.8.0
|
||||
github.com/varlink/go 64e07fabffa33e385817b41971cf2674f692f391
|
||||
github.com/containers/buildah 34e7eba408282e890e61395b6d97e58b88e14d25
|
||||
# TODO: Gotty has not been updated since 2012. Can we find replacement?
|
||||
github.com/Nvveen/Gotty cd527374f1e5bff4938207604a14f2e38a9cf512
|
||||
github.com/fsouza/go-dockerclient v1.3.0
|
||||
|
2
vendor/github.com/containers/buildah/buildah.go
generated
vendored
2
vendor/github.com/containers/buildah/buildah.go
generated
vendored
@ -26,7 +26,7 @@ const (
|
||||
Package = "buildah"
|
||||
// Version for the Package. Bump version in contrib/rpm/buildah.spec
|
||||
// too.
|
||||
Version = "1.8.0"
|
||||
Version = "1.9.0-dev"
|
||||
// The value we use to identify what type of information, currently a
|
||||
// serialized Builder structure, we are using as per-container state.
|
||||
// This should only be changed when we make incompatible changes to
|
||||
|
@ -24,6 +24,22 @@ func init() {
|
||||
reexec.Register(symlinkModifiedTime, resolveSymlinkTimeModified)
|
||||
}
|
||||
|
||||
// resolveSymlink uses a child subprocess to resolve any symlinks in filename
|
||||
// in the context of rootdir.
|
||||
func resolveSymlink(rootdir, filename string) (string, error) {
|
||||
// The child process expects a chroot and one path that
|
||||
// will be consulted relative to the chroot directory and evaluated
|
||||
// for any symbolic links present.
|
||||
cmd := reexec.Command(symlinkChrootedCommand, rootdir, filename)
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, string(output))
|
||||
}
|
||||
|
||||
// Hand back the resolved symlink, will be filename if a symlink is not found
|
||||
return string(output), nil
|
||||
}
|
||||
|
||||
// main() for resolveSymlink()'s subprocess.
|
||||
func resolveChrootedSymlinks() {
|
||||
status := 0
|
||||
@ -55,22 +71,6 @@ func resolveChrootedSymlinks() {
|
||||
os.Exit(status)
|
||||
}
|
||||
|
||||
// resolveSymlink uses a child subprocess to resolve any symlinks in filename
|
||||
// in the context of rootdir.
|
||||
func resolveSymlink(rootdir, filename string) (string, error) {
|
||||
// The child process expects a chroot and one path that
|
||||
// will be consulted relative to the chroot directory and evaluated
|
||||
// for any symbolic links present.
|
||||
cmd := reexec.Command(symlinkChrootedCommand, rootdir, filename)
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, string(output))
|
||||
}
|
||||
|
||||
// Hand back the resolved symlink, will be filename if a symlink is not found
|
||||
return string(output), nil
|
||||
}
|
||||
|
||||
// main() for grandparent subprocess. Its main job is to shuttle stdio back
|
||||
// and forth, managing a pseudo-terminal if we want one, for our child, the
|
||||
// parent subprocess.
|
13
vendor/github.com/containers/buildah/imagebuildah/chroot_symlink_unsupported.go
generated
vendored
Normal file
13
vendor/github.com/containers/buildah/imagebuildah/chroot_symlink_unsupported.go
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
// +build !linux
|
||||
|
||||
package imagebuildah
|
||||
|
||||
import "github.com/pkg/errors"
|
||||
|
||||
func resolveSymlink(rootdir, filename string) (string, error) {
|
||||
return "", errors.New("function not supported on non-linux systems")
|
||||
}
|
||||
|
||||
func resolveModifiedTime(rootdir, filename, historyTime string) (bool, error) {
|
||||
return false, errors.New("function not supported on non-linux systems")
|
||||
}
|
12
vendor/github.com/containers/buildah/pkg/parse/parse.go
generated
vendored
12
vendor/github.com/containers/buildah/pkg/parse/parse.go
generated
vendored
@ -22,7 +22,6 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/crypto/ssh/terminal"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -39,14 +38,9 @@ func CommonBuildOptions(c *cobra.Command) (*buildah.CommonBuildOptions, error) {
|
||||
memorySwap int64
|
||||
err error
|
||||
)
|
||||
rlim := unix.Rlimit{Cur: 1048576, Max: 1048576}
|
||||
defaultLimits := []string{}
|
||||
if err := unix.Setrlimit(unix.RLIMIT_NOFILE, &rlim); err == nil {
|
||||
defaultLimits = append(defaultLimits, fmt.Sprintf("nofile=%d:%d", rlim.Cur, rlim.Max))
|
||||
}
|
||||
if err := unix.Setrlimit(unix.RLIMIT_NPROC, &rlim); err == nil {
|
||||
defaultLimits = append(defaultLimits, fmt.Sprintf("nproc=%d:%d", rlim.Cur, rlim.Max))
|
||||
}
|
||||
|
||||
defaultLimits := getDefaultProcessLimits()
|
||||
|
||||
memVal, _ := c.Flags().GetString("memory")
|
||||
if memVal != "" {
|
||||
memoryLimit, err = units.RAMInBytes(memVal)
|
||||
|
20
vendor/github.com/containers/buildah/pkg/parse/parse_unix.go
generated
vendored
Normal file
20
vendor/github.com/containers/buildah/pkg/parse/parse_unix.go
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
// +build linux darwin
|
||||
|
||||
package parse
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func getDefaultProcessLimits() []string {
|
||||
rlim := unix.Rlimit{Cur: 1048576, Max: 1048576}
|
||||
defaultLimits := []string{}
|
||||
if err := unix.Setrlimit(unix.RLIMIT_NOFILE, &rlim); err == nil {
|
||||
defaultLimits = append(defaultLimits, fmt.Sprintf("nofile=%d:%d", rlim.Cur, rlim.Max))
|
||||
}
|
||||
if err := unix.Setrlimit(unix.RLIMIT_NPROC, &rlim); err == nil {
|
||||
defaultLimits = append(defaultLimits, fmt.Sprintf("nproc=%d:%d", rlim.Cur, rlim.Max))
|
||||
}
|
||||
return defaultLimits
|
||||
}
|
7
vendor/github.com/containers/buildah/pkg/parse/parse_unsupported.go
generated
vendored
Normal file
7
vendor/github.com/containers/buildah/pkg/parse/parse_unsupported.go
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
// +build !linux,!darwin
|
||||
|
||||
package parse
|
||||
|
||||
func getDefaultProcessLimits() []string {
|
||||
return []string{}
|
||||
}
|
2020
vendor/github.com/containers/buildah/run.go
generated
vendored
2020
vendor/github.com/containers/buildah/run.go
generated
vendored
File diff suppressed because it is too large
Load Diff
2022
vendor/github.com/containers/buildah/run_linux.go
generated
vendored
2022
vendor/github.com/containers/buildah/run_linux.go
generated
vendored
File diff suppressed because it is too large
Load Diff
11
vendor/github.com/containers/buildah/run_unsupport.go
generated
vendored
11
vendor/github.com/containers/buildah/run_unsupport.go
generated
vendored
@ -1,11 +0,0 @@
|
||||
// +build !linux
|
||||
|
||||
package buildah
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func setChildProcess() error {
|
||||
return errors.New("function not supported on non-linux systems")
|
||||
}
|
20
vendor/github.com/containers/buildah/run_unsupported.go
generated
vendored
Normal file
20
vendor/github.com/containers/buildah/run_unsupported.go
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
// +build !linux
|
||||
|
||||
package buildah
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func setChildProcess() error {
|
||||
return errors.New("function not supported on non-linux systems")
|
||||
}
|
||||
|
||||
func runUsingRuntimeMain() {}
|
||||
|
||||
func (b *Builder) Run(command []string, options RunOptions) error {
|
||||
return errors.New("function not supported on non-linux systems")
|
||||
}
|
||||
func DefaultNamespaceOptions() (NamespaceOptions, error) {
|
||||
return NamespaceOptions{}, errors.New("function not supported on non-linux systems")
|
||||
}
|
4
vendor/github.com/varlink/go/varlink/bridge_windows.go
generated
vendored
4
vendor/github.com/varlink/go/varlink/bridge_windows.go
generated
vendored
@ -44,8 +44,8 @@ func NewBridge(bridge string) (*Connection, error) {
|
||||
}
|
||||
c.conn = PipeCon{nil, cmd, &r, &w}
|
||||
c.address = ""
|
||||
c.reader = bufio.NewReader(r)
|
||||
c.writer = bufio.NewWriter(w)
|
||||
c.Reader = bufio.NewReader(r)
|
||||
c.Writer = bufio.NewWriter(w)
|
||||
|
||||
err = cmd.Start()
|
||||
if err != nil {
|
||||
|
19
vendor/github.com/varlink/go/varlink/call.go
generated
vendored
19
vendor/github.com/varlink/go/varlink/call.go
generated
vendored
@ -5,6 +5,7 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"strings"
|
||||
)
|
||||
|
||||
@ -14,36 +15,38 @@ import (
|
||||
type Call struct {
|
||||
*bufio.Reader
|
||||
*bufio.Writer
|
||||
in *serviceCall
|
||||
Conn *net.Conn
|
||||
Request *[]byte
|
||||
In *serviceCall
|
||||
Continues bool
|
||||
Upgrade bool
|
||||
}
|
||||
|
||||
// WantsMore indicates if the calling client accepts more than one reply to this method call.
|
||||
func (c *Call) WantsMore() bool {
|
||||
return c.in.More
|
||||
return c.In.More
|
||||
}
|
||||
|
||||
// WantsUpgrade indicates that the calling client wants the connection to be upgraded.
|
||||
func (c *Call) WantsUpgrade() bool {
|
||||
return c.in.Upgrade
|
||||
return c.In.Upgrade
|
||||
}
|
||||
|
||||
// IsOneway indicate that the calling client does not expect a reply.
|
||||
func (c *Call) IsOneway() bool {
|
||||
return c.in.Oneway
|
||||
return c.In.Oneway
|
||||
}
|
||||
|
||||
// GetParameters retrieves the method call parameters.
|
||||
func (c *Call) GetParameters(p interface{}) error {
|
||||
if c.in.Parameters == nil {
|
||||
if c.In.Parameters == nil {
|
||||
return fmt.Errorf("empty parameters")
|
||||
}
|
||||
return json.Unmarshal(*c.in.Parameters, p)
|
||||
return json.Unmarshal(*c.In.Parameters, p)
|
||||
}
|
||||
|
||||
func (c *Call) sendMessage(r *serviceReply) error {
|
||||
if c.in.Oneway {
|
||||
if c.In.Oneway {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -75,7 +78,7 @@ func (c *Call) Reply(parameters interface{}) error {
|
||||
})
|
||||
}
|
||||
|
||||
if !c.in.More {
|
||||
if !c.In.More {
|
||||
return fmt.Errorf("call did not set more, it does not expect continues")
|
||||
}
|
||||
|
||||
|
105
vendor/github.com/varlink/go/varlink/service.go
generated
vendored
105
vendor/github.com/varlink/go/varlink/service.go
generated
vendored
@ -74,7 +74,7 @@ func (s *Service) getInterfaceDescription(c Call, name string) error {
|
||||
return c.replyGetInterfaceDescription(description)
|
||||
}
|
||||
|
||||
func (s *Service) handleMessage(reader *bufio.Reader, writer *bufio.Writer, request []byte) error {
|
||||
func (s *Service) HandleMessage(conn *net.Conn, reader *bufio.Reader, writer *bufio.Writer, request []byte) error {
|
||||
var in serviceCall
|
||||
|
||||
err := json.Unmarshal(request, &in)
|
||||
@ -84,9 +84,11 @@ func (s *Service) handleMessage(reader *bufio.Reader, writer *bufio.Writer, requ
|
||||
}
|
||||
|
||||
c := Call{
|
||||
Conn: conn,
|
||||
Reader: reader,
|
||||
Writer: writer,
|
||||
in: &in,
|
||||
In: &in,
|
||||
Request: &request,
|
||||
}
|
||||
|
||||
r := strings.LastIndex(in.Method, ".")
|
||||
@ -131,7 +133,7 @@ func (s *Service) handleConnection(conn net.Conn, wg *sync.WaitGroup) {
|
||||
break
|
||||
}
|
||||
|
||||
err = s.handleMessage(reader, writer, request[:len(request)-1])
|
||||
err = s.HandleMessage(&conn, reader, writer, request[:len(request)-1])
|
||||
if err != nil {
|
||||
// FIXME: report error
|
||||
//fmt.Fprintf(os.Stderr, "handleMessage: %v", err)
|
||||
@ -179,25 +181,36 @@ func (s *Service) parseAddress(address string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func getListener(protocol string, address string) (net.Listener, error) {
|
||||
func (s *Service) GetListener() (*net.Listener, error) {
|
||||
s.mutex.Lock()
|
||||
l := s.listener
|
||||
s.mutex.Unlock()
|
||||
return &l, nil
|
||||
}
|
||||
|
||||
func (s *Service) setListener() error {
|
||||
l := activationListener()
|
||||
if l == nil {
|
||||
if protocol == "unix" && address[0] != '@' {
|
||||
os.Remove(address)
|
||||
if s.protocol == "unix" && s.address[0] != '@' {
|
||||
os.Remove(s.address)
|
||||
}
|
||||
|
||||
var err error
|
||||
l, err = net.Listen(protocol, address)
|
||||
l, err = net.Listen(s.protocol, s.address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
if protocol == "unix" && address[0] != '@' {
|
||||
if s.protocol == "unix" && s.address[0] != '@' {
|
||||
l.(*net.UnixListener).SetUnlinkOnClose(true)
|
||||
}
|
||||
}
|
||||
|
||||
return l, nil
|
||||
s.mutex.Lock()
|
||||
s.listener = l
|
||||
s.mutex.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) refreshTimeout(timeout time.Duration) error {
|
||||
@ -216,26 +229,84 @@ func (s *Service) refreshTimeout(timeout time.Duration) error {
|
||||
}
|
||||
|
||||
// Listen starts a Service.
|
||||
func (s *Service) Listen(address string, timeout time.Duration) error {
|
||||
var wg sync.WaitGroup
|
||||
defer func() { s.teardown(); wg.Wait() }()
|
||||
|
||||
func (s *Service) Bind(address string) error {
|
||||
s.mutex.Lock()
|
||||
if s.running {
|
||||
s.mutex.Unlock()
|
||||
return fmt.Errorf("Listen(): already running")
|
||||
return fmt.Errorf("Init(): already running")
|
||||
}
|
||||
s.mutex.Unlock()
|
||||
|
||||
s.parseAddress(address)
|
||||
|
||||
l, err := getListener(s.protocol, s.address)
|
||||
err := s.setListener()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Listen starts a Service.
|
||||
func (s *Service) Listen(address string, timeout time.Duration) error {
|
||||
var wg sync.WaitGroup
|
||||
defer func() { s.teardown(); wg.Wait() }()
|
||||
|
||||
err := s.Bind(address)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.mutex.Lock()
|
||||
s.listener = l
|
||||
s.running = true
|
||||
l := s.listener
|
||||
s.mutex.Unlock()
|
||||
|
||||
for s.running {
|
||||
if timeout != 0 {
|
||||
if err := s.refreshTimeout(timeout); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
conn, err := l.Accept()
|
||||
if err != nil {
|
||||
if err.(net.Error).Timeout() {
|
||||
s.mutex.Lock()
|
||||
if s.conncounter == 0 {
|
||||
s.mutex.Unlock()
|
||||
return ServiceTimeoutError{}
|
||||
}
|
||||
s.mutex.Unlock()
|
||||
continue
|
||||
}
|
||||
if !s.running {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
s.mutex.Lock()
|
||||
s.conncounter++
|
||||
s.mutex.Unlock()
|
||||
wg.Add(1)
|
||||
go s.handleConnection(conn, &wg)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Listen starts a Service.
|
||||
func (s *Service) DoListen(timeout time.Duration) error {
|
||||
var wg sync.WaitGroup
|
||||
defer func() { s.teardown(); wg.Wait() }()
|
||||
|
||||
s.mutex.Lock()
|
||||
l := s.listener
|
||||
s.mutex.Unlock()
|
||||
|
||||
if l == nil {
|
||||
return fmt.Errorf("No listener set")
|
||||
}
|
||||
|
||||
s.mutex.Lock()
|
||||
s.running = true
|
||||
s.mutex.Unlock()
|
||||
|
||||
|
26
vendor/github.com/varlink/go/varlink/varlink_test.go
generated
vendored
26
vendor/github.com/varlink/go/varlink/varlink_test.go
generated
vendored
@ -31,7 +31,7 @@ func TestService(t *testing.T) {
|
||||
r := bufio.NewReader(&br)
|
||||
var b bytes.Buffer
|
||||
w := bufio.NewWriter(&b)
|
||||
if err := service.handleMessage(r, w, []byte{0}); err == nil {
|
||||
if err := service.HandleMessage(nil, r, w, []byte{0}); err == nil {
|
||||
t.Fatal("HandleMessage returned non-error")
|
||||
}
|
||||
})
|
||||
@ -42,7 +42,7 @@ func TestService(t *testing.T) {
|
||||
var b bytes.Buffer
|
||||
w := bufio.NewWriter(&b)
|
||||
msg := []byte(`{"method":"foo.GetInterfaceDescription" fdgdfg}`)
|
||||
if err := service.handleMessage(r, w, msg); err == nil {
|
||||
if err := service.HandleMessage(nil, r, w, msg); err == nil {
|
||||
t.Fatal("HandleMessage returned no error on invalid json")
|
||||
}
|
||||
})
|
||||
@ -53,7 +53,7 @@ func TestService(t *testing.T) {
|
||||
var b bytes.Buffer
|
||||
w := bufio.NewWriter(&b)
|
||||
msg := []byte(`{"method":"foo.GetInterfaceDescription"}`)
|
||||
if err := service.handleMessage(r, w, msg); err != nil {
|
||||
if err := service.HandleMessage(nil, r, w, msg); err != nil {
|
||||
t.Fatal("HandleMessage returned error on wrong interface")
|
||||
}
|
||||
expect(t, `{"parameters":{"interface":"foo"},"error":"org.varlink.service.InterfaceNotFound"}`+"\000",
|
||||
@ -66,7 +66,7 @@ func TestService(t *testing.T) {
|
||||
var b bytes.Buffer
|
||||
w := bufio.NewWriter(&b)
|
||||
msg := []byte(`{"method":"InvalidMethod"}`)
|
||||
if err := service.handleMessage(r, w, msg); err != nil {
|
||||
if err := service.HandleMessage(nil, r, w, msg); err != nil {
|
||||
t.Fatal("HandleMessage returned error on invalid method")
|
||||
}
|
||||
expect(t, `{"parameters":{"parameter":"method"},"error":"org.varlink.service.InvalidParameter"}`+"\000",
|
||||
@ -79,7 +79,7 @@ func TestService(t *testing.T) {
|
||||
var b bytes.Buffer
|
||||
w := bufio.NewWriter(&b)
|
||||
msg := []byte(`{"method":"org.varlink.service.WrongMethod"}`)
|
||||
if err := service.handleMessage(r, w, msg); err != nil {
|
||||
if err := service.HandleMessage(nil, r, w, msg); err != nil {
|
||||
t.Fatal("HandleMessage returned error on wrong method")
|
||||
}
|
||||
expect(t, `{"parameters":{"method":"WrongMethod"},"error":"org.varlink.service.MethodNotFound"}`+"\000",
|
||||
@ -92,7 +92,7 @@ func TestService(t *testing.T) {
|
||||
var b bytes.Buffer
|
||||
w := bufio.NewWriter(&b)
|
||||
msg := []byte(`{"method":"org.varlink.service.GetInterfaceDescription","parameters": null}`)
|
||||
if err := service.handleMessage(r, w, msg); err != nil {
|
||||
if err := service.HandleMessage(nil, r, w, msg); err != nil {
|
||||
t.Fatalf("HandleMessage returned error: %v", err)
|
||||
}
|
||||
expect(t, `{"parameters":{"parameter":"parameters"},"error":"org.varlink.service.InvalidParameter"}`+"\000",
|
||||
@ -105,7 +105,7 @@ func TestService(t *testing.T) {
|
||||
var b bytes.Buffer
|
||||
w := bufio.NewWriter(&b)
|
||||
msg := []byte(`{"method":"org.varlink.service.GetInterfaceDescription","parameters":{}}`)
|
||||
if err := service.handleMessage(r, w, msg); err != nil {
|
||||
if err := service.HandleMessage(nil, r, w, msg); err != nil {
|
||||
t.Fatalf("HandleMessage returned error: %v", err)
|
||||
}
|
||||
expect(t, `{"parameters":{"parameter":"interface"},"error":"org.varlink.service.InvalidParameter"}`+"\000",
|
||||
@ -118,7 +118,7 @@ func TestService(t *testing.T) {
|
||||
var b bytes.Buffer
|
||||
w := bufio.NewWriter(&b)
|
||||
msg := []byte(`{"method":"org.varlink.service.GetInterfaceDescription","parameters":{"interface":"foo"}}`)
|
||||
if err := service.handleMessage(r, w, msg); err != nil {
|
||||
if err := service.HandleMessage(nil, r, w, msg); err != nil {
|
||||
t.Fatalf("HandleMessage returned error: %v", err)
|
||||
}
|
||||
expect(t, `{"parameters":{"parameter":"interface"},"error":"org.varlink.service.InvalidParameter"}`+"\000",
|
||||
@ -131,7 +131,7 @@ func TestService(t *testing.T) {
|
||||
var b bytes.Buffer
|
||||
w := bufio.NewWriter(&b)
|
||||
msg := []byte(`{"method":"org.varlink.service.GetInterfaceDescription","parameters":{"interface":"org.varlink.service"}}`)
|
||||
if err := service.handleMessage(r, w, msg); err != nil {
|
||||
if err := service.HandleMessage(nil, r, w, msg); err != nil {
|
||||
t.Fatalf("HandleMessage returned error: %v", err)
|
||||
}
|
||||
expect(t, `{"parameters":{"description":"# The Varlink Service Interface is provided by every varlink service. It\n# describes the service and the interfaces it implements.\ninterface org.varlink.service\n\n# Get a list of all the interfaces a service provides and information\n# about the implementation.\nmethod GetInfo() -\u003e (\n vendor: string,\n product: string,\n version: string,\n url: string,\n interfaces: []string\n)\n\n# Get the description of an interface that is implemented by this service.\nmethod GetInterfaceDescription(interface: string) -\u003e (description: string)\n\n# The requested interface was not found.\nerror InterfaceNotFound (interface: string)\n\n# The requested method was not found\nerror MethodNotFound (method: string)\n\n# The interface defines the requested method, but the service does not\n# implement it.\nerror MethodNotImplemented (method: string)\n\n# One of the passed parameters is invalid.\nerror InvalidParameter (parameter: string)"}}`+"\000",
|
||||
@ -144,7 +144,7 @@ func TestService(t *testing.T) {
|
||||
var b bytes.Buffer
|
||||
w := bufio.NewWriter(&b)
|
||||
msg := []byte(`{"method":"org.varlink.service.GetInfo"}`)
|
||||
if err := service.handleMessage(r, w, msg); err != nil {
|
||||
if err := service.HandleMessage(nil, r, w, msg); err != nil {
|
||||
t.Fatalf("HandleMessage returned error: %v", err)
|
||||
}
|
||||
expect(t, `{"parameters":{"vendor":"Varlink","product":"Varlink Test","version":"1","url":"https://github.com/varlink/go/varlink","interfaces":["org.varlink.service"]}}`+"\000",
|
||||
@ -224,7 +224,7 @@ func TestMoreService(t *testing.T) {
|
||||
var b bytes.Buffer
|
||||
w := bufio.NewWriter(&b)
|
||||
msg := []byte(`{"method":"org.example.test.Pingf"}`)
|
||||
if err := service.handleMessage(r, w, msg); err != nil {
|
||||
if err := service.HandleMessage(nil, r, w, msg); err != nil {
|
||||
t.Fatalf("HandleMessage returned error: %v", err)
|
||||
}
|
||||
expect(t, `{"parameters":{"method":"Pingf"},"error":"org.varlink.service.MethodNotImplemented"}`+"\000",
|
||||
@ -237,7 +237,7 @@ func TestMoreService(t *testing.T) {
|
||||
var b bytes.Buffer
|
||||
w := bufio.NewWriter(&b)
|
||||
msg := []byte(`{"method":"org.example.test.PingError", "more" : true}`)
|
||||
if err := service.handleMessage(r, w, msg); err != nil {
|
||||
if err := service.HandleMessage(nil, r, w, msg); err != nil {
|
||||
t.Fatalf("HandleMessage returned error: %v", err)
|
||||
}
|
||||
expect(t, `{"error":"org.example.test.PingError"}`+"\000",
|
||||
@ -249,7 +249,7 @@ func TestMoreService(t *testing.T) {
|
||||
var b bytes.Buffer
|
||||
w := bufio.NewWriter(&b)
|
||||
msg := []byte(`{"method":"org.example.test.Ping", "more" : true}`)
|
||||
if err := service.handleMessage(r, w, msg); err != nil {
|
||||
if err := service.HandleMessage(nil, r, w, msg); err != nil {
|
||||
t.Fatalf("HandleMessage returned error: %v", err)
|
||||
}
|
||||
expect(t, `{"continues":true}`+"\000"+`{"continues":true}`+"\000"+`{}`+"\000",
|
||||
|
Reference in New Issue
Block a user