mirror of
https://github.com/containers/podman.git
synced 2025-10-19 20:23:08 +08:00
Clear up fragments of the old api
As everything is being moved over to the new container api removing files that depended on the old api Signed-off-by: umohnani8 <umohnani@redhat.com> Closes: #116 Approved by: rhatdan
This commit is contained in:
@ -15,7 +15,6 @@ import (
|
|||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/projectatomic/libpod/cmd/kpod/formats"
|
"github.com/projectatomic/libpod/cmd/kpod/formats"
|
||||||
"github.com/projectatomic/libpod/libpod"
|
"github.com/projectatomic/libpod/libpod"
|
||||||
"github.com/projectatomic/libpod/oci"
|
|
||||||
"github.com/urfave/cli"
|
"github.com/urfave/cli"
|
||||||
"k8s.io/apimachinery/pkg/fields"
|
"k8s.io/apimachinery/pkg/fields"
|
||||||
)
|
)
|
||||||
@ -491,7 +490,7 @@ func getJSONOutput(containers []*libpod.Container, nSpace bool) ([]psJSONParams,
|
|||||||
Names: cc.Name,
|
Names: cc.Name,
|
||||||
Labels: cc.Labels,
|
Labels: cc.Labels,
|
||||||
Mounts: cc.Spec.Mounts,
|
Mounts: cc.Spec.Mounts,
|
||||||
ContainerRunning: conState.String() == oci.ContainerStateRunning,
|
ContainerRunning: conState == libpod.ContainerStateRunning,
|
||||||
Namespaces: ns,
|
Namespaces: ns,
|
||||||
}
|
}
|
||||||
psOutput = append(psOutput, params)
|
psOutput = append(psOutput, params)
|
||||||
@ -605,7 +604,7 @@ type FilterParamsPS struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// parseFilter takes a filter string and a list of containers and filters it
|
// parseFilter takes a filter string and a list of containers and filters it
|
||||||
func parseFilter(filter string, containers []*oci.Container) (*FilterParamsPS, error) {
|
func parseFilter(filter string, containers []*libpod.Container) (*FilterParamsPS, error) {
|
||||||
params := new(FilterParamsPS)
|
params := new(FilterParamsPS)
|
||||||
allFilters := strings.Split(filter, ",")
|
allFilters := strings.Split(filter, ",")
|
||||||
|
|
||||||
@ -630,13 +629,13 @@ func parseFilter(filter string, containers []*oci.Container) (*FilterParamsPS, e
|
|||||||
params.ancestor = pair[1]
|
params.ancestor = pair[1]
|
||||||
case "before":
|
case "before":
|
||||||
if ctr, err := findContainer(containers, pair[1]); err == nil {
|
if ctr, err := findContainer(containers, pair[1]); err == nil {
|
||||||
params.before = ctr.CreatedAt()
|
params.before = ctr.Config().CreatedTime
|
||||||
} else {
|
} else {
|
||||||
return nil, errors.Wrapf(err, "no such container %q", pair[1])
|
return nil, errors.Wrapf(err, "no such container %q", pair[1])
|
||||||
}
|
}
|
||||||
case "since":
|
case "since":
|
||||||
if ctr, err := findContainer(containers, pair[1]); err == nil {
|
if ctr, err := findContainer(containers, pair[1]); err == nil {
|
||||||
params.before = ctr.CreatedAt()
|
params.before = ctr.Config().CreatedTime
|
||||||
} else {
|
} else {
|
||||||
return nil, errors.Wrapf(err, "no such container %q", pair[1])
|
return nil, errors.Wrapf(err, "no such container %q", pair[1])
|
||||||
}
|
}
|
||||||
@ -650,7 +649,7 @@ func parseFilter(filter string, containers []*oci.Container) (*FilterParamsPS, e
|
|||||||
}
|
}
|
||||||
|
|
||||||
// findContainer finds a container with a specific name or id from a list of containers
|
// findContainer finds a container with a specific name or id from a list of containers
|
||||||
func findContainer(containers []*oci.Container, ref string) (*oci.Container, error) {
|
func findContainer(containers []*libpod.Container, ref string) (*libpod.Container, error) {
|
||||||
for _, ctr := range containers {
|
for _, ctr := range containers {
|
||||||
if strings.HasPrefix(ctr.ID(), ref) || ctr.Name() == ref {
|
if strings.HasPrefix(ctr.ID(), ref) || ctr.Name() == ref {
|
||||||
return ctr, nil
|
return ctr, nil
|
||||||
|
@ -6,7 +6,6 @@ import (
|
|||||||
|
|
||||||
"github.com/BurntSushi/toml"
|
"github.com/BurntSushi/toml"
|
||||||
"github.com/opencontainers/selinux/go-selinux"
|
"github.com/opencontainers/selinux/go-selinux"
|
||||||
"github.com/projectatomic/libpod/oci"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Default paths if none are specified
|
// Default paths if none are specified
|
||||||
@ -21,9 +20,9 @@ const (
|
|||||||
apparmorProfileName = "crio-default"
|
apparmorProfileName = "crio-default"
|
||||||
cniConfigDir = "/etc/cni/net.d/"
|
cniConfigDir = "/etc/cni/net.d/"
|
||||||
cniBinDir = "/opt/cni/bin/"
|
cniBinDir = "/opt/cni/bin/"
|
||||||
cgroupManager = oci.CgroupfsCgroupsManager
|
cgroupManager = "" //oci.CgroupfsCgroupsManager
|
||||||
lockPath = "/run/crio.lock"
|
lockPath = "/run/crio.lock"
|
||||||
containerExitsDir = oci.ContainerExitsDir
|
containerExitsDir = "" //oci.ContainerExitsDir
|
||||||
)
|
)
|
||||||
|
|
||||||
// Config represents the entire set of configuration values that can be set for
|
// Config represents the entire set of configuration values that can be set for
|
||||||
|
@ -1,157 +0,0 @@
|
|||||||
package libkpod
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
cstorage "github.com/containers/storage"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/projectatomic/libpod/libkpod/sandbox"
|
|
||||||
"github.com/projectatomic/libpod/oci"
|
|
||||||
"github.com/projectatomic/libpod/pkg/registrar"
|
|
||||||
)
|
|
||||||
|
|
||||||
// GetStorageContainer searches for a container with the given name or ID in the given store
|
|
||||||
func (c *ContainerServer) GetStorageContainer(container string) (*cstorage.Container, error) {
|
|
||||||
ociCtr, err := c.LookupContainer(container)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return c.store.Container(ociCtr.ID())
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetContainerTopLayerID gets the ID of the top layer of the given container
|
|
||||||
func (c *ContainerServer) GetContainerTopLayerID(containerID string) (string, error) {
|
|
||||||
ctr, err := c.GetStorageContainer(containerID)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return ctr.LayerID, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetContainerRwSize Gets the size of the mutable top layer of the container
|
|
||||||
func (c *ContainerServer) GetContainerRwSize(containerID string) (int64, error) {
|
|
||||||
container, err := c.store.Container(containerID)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the size of the top layer by calculating the size of the diff
|
|
||||||
// between the layer and its parent. The top layer of a container is
|
|
||||||
// the only RW layer, all others are immutable
|
|
||||||
layer, err := c.store.Layer(container.LayerID)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return c.store.DiffSize(layer.Parent, layer.ID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetContainerRootFsSize gets the size of the container's root filesystem
|
|
||||||
// A container FS is split into two parts. The first is the top layer, a
|
|
||||||
// mutable layer, and the rest is the RootFS: the set of immutable layers
|
|
||||||
// that make up the image on which the container is based
|
|
||||||
func (c *ContainerServer) GetContainerRootFsSize(containerID string) (int64, error) {
|
|
||||||
container, err := c.store.Container(containerID)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ignore the size of the top layer. The top layer is a mutable RW layer
|
|
||||||
// and is not considered a part of the rootfs
|
|
||||||
rwLayer, err := c.store.Layer(container.LayerID)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
layer, err := c.store.Layer(rwLayer.Parent)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
size := int64(0)
|
|
||||||
for layer.Parent != "" {
|
|
||||||
layerSize, err := c.store.DiffSize(layer.Parent, layer.ID)
|
|
||||||
if err != nil {
|
|
||||||
return size, errors.Wrapf(err, "getting diffsize of layer %q and its parent %q", layer.ID, layer.Parent)
|
|
||||||
}
|
|
||||||
size += layerSize
|
|
||||||
layer, err = c.store.Layer(layer.Parent)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Get the size of the last layer. Has to be outside of the loop
|
|
||||||
// because the parent of the last layer is "", andlstore.Get("")
|
|
||||||
// will return an error
|
|
||||||
layerSize, err := c.store.DiffSize(layer.Parent, layer.ID)
|
|
||||||
return size + layerSize, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetContainerFromRequest gets an oci container matching the specified full or partial id
|
|
||||||
func (c *ContainerServer) GetContainerFromRequest(cid string) (*oci.Container, error) {
|
|
||||||
if cid == "" {
|
|
||||||
return nil, fmt.Errorf("container ID should not be empty")
|
|
||||||
}
|
|
||||||
|
|
||||||
containerID, err := c.ctrIDIndex.Get(cid)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("container with ID starting with %s not found: %v", cid, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
ctr := c.GetContainer(containerID)
|
|
||||||
if ctr == nil {
|
|
||||||
return nil, fmt.Errorf("specified container not found: %s", containerID)
|
|
||||||
}
|
|
||||||
return ctr, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *ContainerServer) getSandboxFromRequest(pid string) (*sandbox.Sandbox, error) {
|
|
||||||
if pid == "" {
|
|
||||||
return nil, fmt.Errorf("pod ID should not be empty")
|
|
||||||
}
|
|
||||||
|
|
||||||
podID, err := c.podIDIndex.Get(pid)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("pod with ID starting with %s not found: %v", pid, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
sb := c.GetSandbox(podID)
|
|
||||||
if sb == nil {
|
|
||||||
return nil, fmt.Errorf("specified pod not found: %s", podID)
|
|
||||||
}
|
|
||||||
return sb, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// LookupContainer returns the container with the given name or full or partial id
|
|
||||||
func (c *ContainerServer) LookupContainer(idOrName string) (*oci.Container, error) {
|
|
||||||
if idOrName == "" {
|
|
||||||
return nil, fmt.Errorf("container ID or name should not be empty")
|
|
||||||
}
|
|
||||||
|
|
||||||
ctrID, err := c.ctrNameIndex.Get(idOrName)
|
|
||||||
if err != nil {
|
|
||||||
if err == registrar.ErrNameNotReserved {
|
|
||||||
ctrID = idOrName
|
|
||||||
} else {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return c.GetContainerFromRequest(ctrID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// LookupSandbox returns the pod sandbox with the given name or full or partial id
|
|
||||||
func (c *ContainerServer) LookupSandbox(idOrName string) (*sandbox.Sandbox, error) {
|
|
||||||
if idOrName == "" {
|
|
||||||
return nil, fmt.Errorf("container ID or name should not be empty")
|
|
||||||
}
|
|
||||||
|
|
||||||
podID, err := c.podNameIndex.Get(idOrName)
|
|
||||||
if err != nil {
|
|
||||||
if err == registrar.ErrNameNotReserved {
|
|
||||||
podID = idOrName
|
|
||||||
} else {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return c.getSandboxFromRequest(podID)
|
|
||||||
}
|
|
@ -1,775 +0,0 @@
|
|||||||
package libkpod
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/containers/image/types"
|
|
||||||
cstorage "github.com/containers/storage"
|
|
||||||
"github.com/docker/docker/pkg/ioutils"
|
|
||||||
"github.com/docker/docker/pkg/truncindex"
|
|
||||||
"github.com/opencontainers/runc/libcontainer"
|
|
||||||
rspec "github.com/opencontainers/runtime-spec/specs-go"
|
|
||||||
"github.com/opencontainers/selinux/go-selinux"
|
|
||||||
"github.com/opencontainers/selinux/go-selinux/label"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/projectatomic/libpod/libkpod/sandbox"
|
|
||||||
"github.com/projectatomic/libpod/oci"
|
|
||||||
"github.com/projectatomic/libpod/pkg/annotations"
|
|
||||||
"github.com/projectatomic/libpod/pkg/registrar"
|
|
||||||
"github.com/projectatomic/libpod/pkg/storage"
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ContainerServer implements the ImageServer
|
|
||||||
type ContainerServer struct {
|
|
||||||
runtime *oci.Runtime
|
|
||||||
store cstorage.Store
|
|
||||||
storageImageServer storage.ImageServer
|
|
||||||
storageRuntimeServer storage.RuntimeServer
|
|
||||||
updateLock sync.RWMutex
|
|
||||||
ctrNameIndex *registrar.Registrar
|
|
||||||
ctrIDIndex *truncindex.TruncIndex
|
|
||||||
podNameIndex *registrar.Registrar
|
|
||||||
podIDIndex *truncindex.TruncIndex
|
|
||||||
hooks map[string]HookParams
|
|
||||||
|
|
||||||
imageContext *types.SystemContext
|
|
||||||
stateLock sync.Locker
|
|
||||||
state *containerServerState
|
|
||||||
config *Config
|
|
||||||
}
|
|
||||||
|
|
||||||
// Runtime returns the oci runtime for the ContainerServer
|
|
||||||
func (c *ContainerServer) Runtime() *oci.Runtime {
|
|
||||||
return c.runtime
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hooks returns the oci hooks for the ContainerServer
|
|
||||||
func (c *ContainerServer) Hooks() map[string]HookParams {
|
|
||||||
return c.hooks
|
|
||||||
}
|
|
||||||
|
|
||||||
// Store returns the Store for the ContainerServer
|
|
||||||
func (c *ContainerServer) Store() cstorage.Store {
|
|
||||||
return c.store
|
|
||||||
}
|
|
||||||
|
|
||||||
// StorageImageServer returns the ImageServer for the ContainerServer
|
|
||||||
func (c *ContainerServer) StorageImageServer() storage.ImageServer {
|
|
||||||
return c.storageImageServer
|
|
||||||
}
|
|
||||||
|
|
||||||
// CtrNameIndex returns the Registrar for the ContainerServer
|
|
||||||
func (c *ContainerServer) CtrNameIndex() *registrar.Registrar {
|
|
||||||
return c.ctrNameIndex
|
|
||||||
}
|
|
||||||
|
|
||||||
// CtrIDIndex returns the TruncIndex for the ContainerServer
|
|
||||||
func (c *ContainerServer) CtrIDIndex() *truncindex.TruncIndex {
|
|
||||||
return c.ctrIDIndex
|
|
||||||
}
|
|
||||||
|
|
||||||
// PodNameIndex returns the index of pod names
|
|
||||||
func (c *ContainerServer) PodNameIndex() *registrar.Registrar {
|
|
||||||
return c.podNameIndex
|
|
||||||
}
|
|
||||||
|
|
||||||
// PodIDIndex returns the index of pod IDs
|
|
||||||
func (c *ContainerServer) PodIDIndex() *truncindex.TruncIndex {
|
|
||||||
return c.podIDIndex
|
|
||||||
}
|
|
||||||
|
|
||||||
// ImageContext returns the SystemContext for the ContainerServer
|
|
||||||
func (c *ContainerServer) ImageContext() *types.SystemContext {
|
|
||||||
return c.imageContext
|
|
||||||
}
|
|
||||||
|
|
||||||
// Config gets the configuration for the ContainerServer
|
|
||||||
func (c *ContainerServer) Config() *Config {
|
|
||||||
return c.config
|
|
||||||
}
|
|
||||||
|
|
||||||
// StorageRuntimeServer gets the runtime server for the ContainerServer
|
|
||||||
func (c *ContainerServer) StorageRuntimeServer() storage.RuntimeServer {
|
|
||||||
return c.storageRuntimeServer
|
|
||||||
}
|
|
||||||
|
|
||||||
// New creates a new ContainerServer with options provided
|
|
||||||
func New(config *Config) (*ContainerServer, error) {
|
|
||||||
store, err := cstorage.GetStore(cstorage.StoreOptions{
|
|
||||||
RunRoot: config.RunRoot,
|
|
||||||
GraphRoot: config.Root,
|
|
||||||
GraphDriverName: config.Storage,
|
|
||||||
GraphDriverOptions: config.StorageOptions,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
imageService, err := storage.GetImageService(store, config.DefaultTransport, config.InsecureRegistries, config.Registries)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
storageRuntimeService := storage.GetRuntimeService(imageService, config.PauseImage)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
runtime, err := oci.New(config.Runtime, config.RuntimeUntrustedWorkload, config.DefaultWorkloadTrust, config.Conmon, config.ConmonEnv, config.CgroupManager, config.ContainerExitsDir, config.LogSizeMax, config.NoPivot)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var lock sync.Locker
|
|
||||||
if config.FileLocking {
|
|
||||||
fileLock, err := cstorage.GetLockfile(lockPath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("error obtaining lockfile: %v", err)
|
|
||||||
}
|
|
||||||
lock = fileLock
|
|
||||||
} else {
|
|
||||||
lock = new(sync.Mutex)
|
|
||||||
}
|
|
||||||
|
|
||||||
hooks := make(map[string]HookParams)
|
|
||||||
// If hooks directory is set in config use it
|
|
||||||
if config.HooksDirPath != "" {
|
|
||||||
if err := readHooks(config.HooksDirPath, hooks); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// If user overrode default hooks, this means it is in a test, so don't
|
|
||||||
// use OverrideHooksDirPath
|
|
||||||
if config.HooksDirPath == DefaultHooksDirPath {
|
|
||||||
if err := readHooks(OverrideHooksDirPath, hooks); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return &ContainerServer{
|
|
||||||
runtime: runtime,
|
|
||||||
store: store,
|
|
||||||
storageImageServer: imageService,
|
|
||||||
storageRuntimeServer: storageRuntimeService,
|
|
||||||
ctrNameIndex: registrar.NewRegistrar(),
|
|
||||||
ctrIDIndex: truncindex.NewTruncIndex([]string{}),
|
|
||||||
podNameIndex: registrar.NewRegistrar(),
|
|
||||||
podIDIndex: truncindex.NewTruncIndex([]string{}),
|
|
||||||
imageContext: &types.SystemContext{SignaturePolicyPath: config.SignaturePolicyPath},
|
|
||||||
hooks: hooks,
|
|
||||||
stateLock: lock,
|
|
||||||
state: &containerServerState{
|
|
||||||
containers: oci.NewMemoryStore(),
|
|
||||||
infraContainers: oci.NewMemoryStore(),
|
|
||||||
sandboxes: make(map[string]*sandbox.Sandbox),
|
|
||||||
processLevels: make(map[string]int),
|
|
||||||
},
|
|
||||||
config: config,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update makes changes to the server's state (lists of pods and containers) to
|
|
||||||
// reflect the list of pods and containers that are stored on disk, possibly
|
|
||||||
// having been modified by other parties
|
|
||||||
func (c *ContainerServer) Update() error {
|
|
||||||
c.updateLock.Lock()
|
|
||||||
defer c.updateLock.Unlock()
|
|
||||||
|
|
||||||
containers, err := c.store.Containers()
|
|
||||||
if err != nil && !os.IsNotExist(errors.Cause(err)) {
|
|
||||||
logrus.Warnf("could not read containers and sandboxes: %v", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
newPods := map[string]*storage.RuntimeContainerMetadata{}
|
|
||||||
oldPods := map[string]string{}
|
|
||||||
removedPods := map[string]string{}
|
|
||||||
newPodContainers := map[string]*storage.RuntimeContainerMetadata{}
|
|
||||||
oldPodContainers := map[string]string{}
|
|
||||||
removedPodContainers := map[string]string{}
|
|
||||||
for _, container := range containers {
|
|
||||||
if c.HasSandbox(container.ID) {
|
|
||||||
// FIXME: do we need to reload/update any info about the sandbox?
|
|
||||||
oldPods[container.ID] = container.ID
|
|
||||||
oldPodContainers[container.ID] = container.ID
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if c.GetContainer(container.ID) != nil {
|
|
||||||
// FIXME: do we need to reload/update any info about the container?
|
|
||||||
oldPodContainers[container.ID] = container.ID
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// not previously known, so figure out what it is
|
|
||||||
metadata, err2 := c.storageRuntimeServer.GetContainerMetadata(container.ID)
|
|
||||||
if err2 != nil {
|
|
||||||
logrus.Errorf("error parsing metadata for %s: %v, ignoring", container.ID, err2)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if metadata.Pod {
|
|
||||||
newPods[container.ID] = &metadata
|
|
||||||
} else {
|
|
||||||
newPodContainers[container.ID] = &metadata
|
|
||||||
}
|
|
||||||
}
|
|
||||||
c.ctrIDIndex.Iterate(func(id string) {
|
|
||||||
if _, ok := oldPodContainers[id]; !ok {
|
|
||||||
// this container's ID wasn't in the updated list -> removed
|
|
||||||
removedPodContainers[id] = id
|
|
||||||
} else {
|
|
||||||
ctr := c.GetContainer(id)
|
|
||||||
if ctr != nil {
|
|
||||||
// if the container exists, update its state
|
|
||||||
c.ContainerStateFromDisk(c.GetContainer(id))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
for removedPodContainer := range removedPodContainers {
|
|
||||||
// forget this container
|
|
||||||
ctr := c.GetContainer(removedPodContainer)
|
|
||||||
if ctr == nil {
|
|
||||||
logrus.Warnf("bad state when getting container removed %+v", removedPodContainer)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
c.ReleaseContainerName(ctr.Name())
|
|
||||||
c.RemoveContainer(ctr)
|
|
||||||
if err = c.ctrIDIndex.Delete(ctr.ID()); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
logrus.Debugf("forgetting removed pod container %s", ctr.ID())
|
|
||||||
}
|
|
||||||
c.PodIDIndex().Iterate(func(id string) {
|
|
||||||
if _, ok := oldPods[id]; !ok {
|
|
||||||
// this pod's ID wasn't in the updated list -> removed
|
|
||||||
removedPods[id] = id
|
|
||||||
}
|
|
||||||
})
|
|
||||||
for removedPod := range removedPods {
|
|
||||||
// forget this pod
|
|
||||||
sb := c.GetSandbox(removedPod)
|
|
||||||
if sb == nil {
|
|
||||||
logrus.Warnf("bad state when getting pod to remove %+v", removedPod)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
podInfraContainer := sb.InfraContainer()
|
|
||||||
c.ReleaseContainerName(podInfraContainer.Name())
|
|
||||||
c.RemoveContainer(podInfraContainer)
|
|
||||||
if err = c.ctrIDIndex.Delete(podInfraContainer.ID()); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
sb.RemoveInfraContainer()
|
|
||||||
c.ReleasePodName(sb.Name())
|
|
||||||
c.RemoveSandbox(sb.ID())
|
|
||||||
if err = c.podIDIndex.Delete(sb.ID()); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
logrus.Debugf("forgetting removed pod %s", sb.ID())
|
|
||||||
}
|
|
||||||
for sandboxID := range newPods {
|
|
||||||
// load this pod
|
|
||||||
if err = c.LoadSandbox(sandboxID); err != nil {
|
|
||||||
logrus.Warnf("could not load new pod sandbox %s: %v, ignoring", sandboxID, err)
|
|
||||||
} else {
|
|
||||||
logrus.Debugf("loaded new pod sandbox %s", sandboxID, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for containerID := range newPodContainers {
|
|
||||||
// load this container
|
|
||||||
if err = c.LoadContainer(containerID); err != nil {
|
|
||||||
logrus.Warnf("could not load new sandbox container %s: %v, ignoring", containerID, err)
|
|
||||||
} else {
|
|
||||||
logrus.Debugf("loaded new pod container %s", containerID, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// LoadSandbox loads a sandbox from the disk into the sandbox store
|
|
||||||
func (c *ContainerServer) LoadSandbox(id string) error {
|
|
||||||
config, err := c.store.FromContainerDirectory(id, "config.json")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
var m rspec.Spec
|
|
||||||
if err = json.Unmarshal(config, &m); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
labels := make(map[string]string)
|
|
||||||
if err = json.Unmarshal([]byte(m.Annotations[annotations.Labels]), &labels); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
name := m.Annotations[annotations.Name]
|
|
||||||
name, err = c.ReservePodName(id, name)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
if err != nil {
|
|
||||||
c.ReleasePodName(name)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
var metadata pb.PodSandboxMetadata
|
|
||||||
if err = json.Unmarshal([]byte(m.Annotations[annotations.Metadata]), &metadata); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
ip := m.Annotations[annotations.IP]
|
|
||||||
|
|
||||||
processLabel, mountLabel, err := label.InitLabels(label.DupSecOpt(m.Process.SelinuxLabel))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
kubeAnnotations := make(map[string]string)
|
|
||||||
if err = json.Unmarshal([]byte(m.Annotations[annotations.Annotations]), &kubeAnnotations); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
privileged := isTrue(m.Annotations[annotations.PrivilegedRuntime])
|
|
||||||
trusted := isTrue(m.Annotations[annotations.TrustedSandbox])
|
|
||||||
|
|
||||||
sb, err := sandbox.New(id, name, m.Annotations[annotations.KubeName], filepath.Dir(m.Annotations[annotations.LogPath]), "", labels, kubeAnnotations, processLabel, mountLabel, &metadata, m.Annotations[annotations.ShmPath], "", privileged, trusted, m.Annotations[annotations.ResolvPath], "", nil)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
sb.AddHostnamePath(m.Annotations[annotations.HostnamePath])
|
|
||||||
sb.AddIP(ip)
|
|
||||||
|
|
||||||
// We add a netNS only if we can load a permanent one.
|
|
||||||
// Otherwise, the sandbox will live in the host namespace.
|
|
||||||
netNsPath, err := configNetNsPath(m)
|
|
||||||
if err == nil {
|
|
||||||
nsErr := sb.NetNsJoin(netNsPath, sb.Name())
|
|
||||||
// If we can't load the networking namespace
|
|
||||||
// because it's closed, we just set the sb netns
|
|
||||||
// pointer to nil. Otherwise we return an error.
|
|
||||||
if nsErr != nil && nsErr != sandbox.ErrClosedNetNS {
|
|
||||||
return nsErr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
c.AddSandbox(sb)
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
if err != nil {
|
|
||||||
c.RemoveSandbox(sb.ID())
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
sandboxPath, err := c.store.ContainerRunDirectory(id)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
sandboxDir, err := c.store.ContainerDirectory(id)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
cname, err := c.ReserveContainerName(m.Annotations[annotations.ContainerID], m.Annotations[annotations.ContainerName])
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
if err != nil {
|
|
||||||
c.ReleaseContainerName(cname)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
created, err := time.Parse(time.RFC3339Nano, m.Annotations[annotations.Created])
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
scontainer, err := oci.NewContainer(m.Annotations[annotations.ContainerID], cname, sandboxPath, m.Annotations[annotations.LogPath], sb.NetNs(), labels, m.Annotations, kubeAnnotations, "", "", "", nil, id, false, false, false, privileged, trusted, sandboxDir, created, m.Annotations["org.opencontainers.image.stopSignal"])
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
scontainer.SetSpec(&m)
|
|
||||||
scontainer.SetMountPoint(m.Annotations[annotations.MountPoint])
|
|
||||||
|
|
||||||
if m.Annotations[annotations.Volumes] != "" {
|
|
||||||
containerVolumes := []oci.ContainerVolume{}
|
|
||||||
if err = json.Unmarshal([]byte(m.Annotations[annotations.Volumes]), &containerVolumes); err != nil {
|
|
||||||
return fmt.Errorf("failed to unmarshal container volumes: %v", err)
|
|
||||||
}
|
|
||||||
if containerVolumes != nil {
|
|
||||||
for _, cv := range containerVolumes {
|
|
||||||
scontainer.AddVolume(cv)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
c.ContainerStateFromDisk(scontainer)
|
|
||||||
|
|
||||||
if err = label.ReserveLabel(processLabel); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
sb.SetInfraContainer(scontainer)
|
|
||||||
if err = c.ctrIDIndex.Add(scontainer.ID()); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err = c.podIDIndex.Add(id); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func configNetNsPath(spec rspec.Spec) (string, error) {
|
|
||||||
for _, ns := range spec.Linux.Namespaces {
|
|
||||||
if ns.Type != rspec.NetworkNamespace {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if ns.Path == "" {
|
|
||||||
return "", fmt.Errorf("empty networking namespace")
|
|
||||||
}
|
|
||||||
|
|
||||||
return ns.Path, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return "", fmt.Errorf("missing networking namespace")
|
|
||||||
}
|
|
||||||
|
|
||||||
// LoadContainer loads a container from the disk into the container store
|
|
||||||
func (c *ContainerServer) LoadContainer(id string) error {
|
|
||||||
config, err := c.store.FromContainerDirectory(id, "config.json")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
var m rspec.Spec
|
|
||||||
if err = json.Unmarshal(config, &m); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
labels := make(map[string]string)
|
|
||||||
if err = json.Unmarshal([]byte(m.Annotations[annotations.Labels]), &labels); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
name := m.Annotations[annotations.Name]
|
|
||||||
name, err = c.ReserveContainerName(id, name)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
if err != nil {
|
|
||||||
c.ReleaseContainerName(name)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
var metadata pb.ContainerMetadata
|
|
||||||
if err = json.Unmarshal([]byte(m.Annotations[annotations.Metadata]), &metadata); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
sb := c.GetSandbox(m.Annotations[annotations.SandboxID])
|
|
||||||
if sb == nil {
|
|
||||||
return fmt.Errorf("could not get sandbox with id %s, skipping", m.Annotations[annotations.SandboxID])
|
|
||||||
}
|
|
||||||
|
|
||||||
tty := isTrue(m.Annotations[annotations.TTY])
|
|
||||||
stdin := isTrue(m.Annotations[annotations.Stdin])
|
|
||||||
stdinOnce := isTrue(m.Annotations[annotations.StdinOnce])
|
|
||||||
|
|
||||||
containerPath, err := c.store.ContainerRunDirectory(id)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
containerDir, err := c.store.ContainerDirectory(id)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
img, ok := m.Annotations[annotations.Image]
|
|
||||||
if !ok {
|
|
||||||
img = ""
|
|
||||||
}
|
|
||||||
|
|
||||||
imgName, ok := m.Annotations[annotations.ImageName]
|
|
||||||
if !ok {
|
|
||||||
imgName = ""
|
|
||||||
}
|
|
||||||
|
|
||||||
imgRef, ok := m.Annotations[annotations.ImageRef]
|
|
||||||
if !ok {
|
|
||||||
imgRef = ""
|
|
||||||
}
|
|
||||||
|
|
||||||
kubeAnnotations := make(map[string]string)
|
|
||||||
if err = json.Unmarshal([]byte(m.Annotations[annotations.Annotations]), &kubeAnnotations); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
created, err := time.Parse(time.RFC3339Nano, m.Annotations[annotations.Created])
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
ctr, err := oci.NewContainer(id, name, containerPath, m.Annotations[annotations.LogPath], sb.NetNs(), labels, m.Annotations, kubeAnnotations, img, imgName, imgRef, &metadata, sb.ID(), tty, stdin, stdinOnce, sb.Privileged(), sb.Trusted(), containerDir, created, m.Annotations["org.opencontainers.image.stopSignal"])
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
ctr.SetSpec(&m)
|
|
||||||
ctr.SetMountPoint(m.Annotations[annotations.MountPoint])
|
|
||||||
|
|
||||||
c.ContainerStateFromDisk(ctr)
|
|
||||||
|
|
||||||
c.AddContainer(ctr)
|
|
||||||
return c.ctrIDIndex.Add(id)
|
|
||||||
}
|
|
||||||
|
|
||||||
func isTrue(annotaton string) bool {
|
|
||||||
return annotaton == "true"
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContainerStateFromDisk retrieves information on the state of a running container
|
|
||||||
// from the disk
|
|
||||||
func (c *ContainerServer) ContainerStateFromDisk(ctr *oci.Container) error {
|
|
||||||
if err := ctr.FromDisk(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// ignore errors, this is a best effort to have up-to-date info about
|
|
||||||
// a given container before its state gets stored
|
|
||||||
c.runtime.UpdateStatus(ctr)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContainerStateToDisk writes the container's state information to a JSON file
|
|
||||||
// on disk
|
|
||||||
func (c *ContainerServer) ContainerStateToDisk(ctr *oci.Container) error {
|
|
||||||
// ignore errors, this is a best effort to have up-to-date info about
|
|
||||||
// a given container before its state gets stored
|
|
||||||
c.Runtime().UpdateStatus(ctr)
|
|
||||||
|
|
||||||
jsonSource, err := ioutils.NewAtomicFileWriter(ctr.StatePath(), 0644)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer jsonSource.Close()
|
|
||||||
enc := json.NewEncoder(jsonSource)
|
|
||||||
return enc.Encode(c.runtime.ContainerStatus(ctr))
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReserveContainerName holds a name for a container that is being created
|
|
||||||
func (c *ContainerServer) ReserveContainerName(id, name string) (string, error) {
|
|
||||||
if err := c.ctrNameIndex.Reserve(name, id); err != nil {
|
|
||||||
if err == registrar.ErrNameReserved {
|
|
||||||
id, err := c.ctrNameIndex.Get(name)
|
|
||||||
if err != nil {
|
|
||||||
logrus.Warnf("conflict, ctr name %q already reserved", name)
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return "", fmt.Errorf("conflict, name %q already reserved for ctr %q", name, id)
|
|
||||||
}
|
|
||||||
return "", fmt.Errorf("error reserving ctr name %s", name)
|
|
||||||
}
|
|
||||||
return name, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReleaseContainerName releases a container name from the index so that it can
|
|
||||||
// be used by other containers
|
|
||||||
func (c *ContainerServer) ReleaseContainerName(name string) {
|
|
||||||
c.ctrNameIndex.Release(name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReservePodName holds a name for a pod that is being created
|
|
||||||
func (c *ContainerServer) ReservePodName(id, name string) (string, error) {
|
|
||||||
if err := c.podNameIndex.Reserve(name, id); err != nil {
|
|
||||||
if err == registrar.ErrNameReserved {
|
|
||||||
id, err := c.podNameIndex.Get(name)
|
|
||||||
if err != nil {
|
|
||||||
logrus.Warnf("conflict, pod name %q already reserved", name)
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return "", fmt.Errorf("conflict, name %q already reserved for pod %q", name, id)
|
|
||||||
}
|
|
||||||
return "", fmt.Errorf("error reserving pod name %q", name)
|
|
||||||
}
|
|
||||||
return name, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReleasePodName releases a pod name from the index so it can be used by other
|
|
||||||
// pods
|
|
||||||
func (c *ContainerServer) ReleasePodName(name string) {
|
|
||||||
c.podNameIndex.Release(name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Shutdown attempts to shut down the server's storage cleanly
|
|
||||||
func (c *ContainerServer) Shutdown() error {
|
|
||||||
_, err := c.store.Shutdown(false)
|
|
||||||
if err != nil && errors.Cause(err) != cstorage.ErrLayerUsedByContainer {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type containerServerState struct {
|
|
||||||
containers oci.ContainerStorer
|
|
||||||
infraContainers oci.ContainerStorer
|
|
||||||
sandboxes map[string]*sandbox.Sandbox
|
|
||||||
// processLevels The number of sandboxes using the same SELinux MCS level. Need to release MCS Level, when count reaches 0
|
|
||||||
processLevels map[string]int
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddContainer adds a container to the container state store
|
|
||||||
func (c *ContainerServer) AddContainer(ctr *oci.Container) {
|
|
||||||
c.stateLock.Lock()
|
|
||||||
defer c.stateLock.Unlock()
|
|
||||||
sandbox := c.state.sandboxes[ctr.Sandbox()]
|
|
||||||
sandbox.AddContainer(ctr)
|
|
||||||
c.state.containers.Add(ctr.ID(), ctr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddInfraContainer adds a container to the container state store
|
|
||||||
func (c *ContainerServer) AddInfraContainer(ctr *oci.Container) {
|
|
||||||
c.stateLock.Lock()
|
|
||||||
defer c.stateLock.Unlock()
|
|
||||||
c.state.infraContainers.Add(ctr.ID(), ctr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetContainer returns a container by its ID
|
|
||||||
func (c *ContainerServer) GetContainer(id string) *oci.Container {
|
|
||||||
c.stateLock.Lock()
|
|
||||||
defer c.stateLock.Unlock()
|
|
||||||
return c.state.containers.Get(id)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetInfraContainer returns a container by its ID
|
|
||||||
func (c *ContainerServer) GetInfraContainer(id string) *oci.Container {
|
|
||||||
c.stateLock.Lock()
|
|
||||||
defer c.stateLock.Unlock()
|
|
||||||
return c.state.infraContainers.Get(id)
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasContainer checks if a container exists in the state
|
|
||||||
func (c *ContainerServer) HasContainer(id string) bool {
|
|
||||||
c.stateLock.Lock()
|
|
||||||
defer c.stateLock.Unlock()
|
|
||||||
ctr := c.state.containers.Get(id)
|
|
||||||
return ctr != nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoveContainer removes a container from the container state store
|
|
||||||
func (c *ContainerServer) RemoveContainer(ctr *oci.Container) {
|
|
||||||
c.stateLock.Lock()
|
|
||||||
defer c.stateLock.Unlock()
|
|
||||||
sbID := ctr.Sandbox()
|
|
||||||
sb := c.state.sandboxes[sbID]
|
|
||||||
sb.RemoveContainer(ctr)
|
|
||||||
c.state.containers.Delete(ctr.ID())
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoveInfraContainer removes a container from the container state store
|
|
||||||
func (c *ContainerServer) RemoveInfraContainer(ctr *oci.Container) {
|
|
||||||
c.stateLock.Lock()
|
|
||||||
defer c.stateLock.Unlock()
|
|
||||||
c.state.infraContainers.Delete(ctr.ID())
|
|
||||||
}
|
|
||||||
|
|
||||||
// listContainers returns a list of all containers stored by the server state
|
|
||||||
func (c *ContainerServer) listContainers() []*oci.Container {
|
|
||||||
c.stateLock.Lock()
|
|
||||||
defer c.stateLock.Unlock()
|
|
||||||
return c.state.containers.List()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListContainers returns a list of all containers stored by the server state
|
|
||||||
// that match the given filter function
|
|
||||||
func (c *ContainerServer) ListContainers(filters ...func(*oci.Container) bool) ([]*oci.Container, error) {
|
|
||||||
containers := c.listContainers()
|
|
||||||
if len(filters) == 0 {
|
|
||||||
return containers, nil
|
|
||||||
}
|
|
||||||
filteredContainers := make([]*oci.Container, 0, len(containers))
|
|
||||||
for _, container := range containers {
|
|
||||||
for _, filter := range filters {
|
|
||||||
if filter(container) {
|
|
||||||
filteredContainers = append(filteredContainers, container)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return filteredContainers, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddSandbox adds a sandbox to the sandbox state store
|
|
||||||
func (c *ContainerServer) AddSandbox(sb *sandbox.Sandbox) {
|
|
||||||
c.stateLock.Lock()
|
|
||||||
defer c.stateLock.Unlock()
|
|
||||||
c.state.sandboxes[sb.ID()] = sb
|
|
||||||
c.state.processLevels[selinux.NewContext(sb.ProcessLabel())["level"]]++
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetSandbox returns a sandbox by its ID
|
|
||||||
func (c *ContainerServer) GetSandbox(id string) *sandbox.Sandbox {
|
|
||||||
c.stateLock.Lock()
|
|
||||||
defer c.stateLock.Unlock()
|
|
||||||
return c.state.sandboxes[id]
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetSandboxContainer returns a sandbox's infra container
|
|
||||||
func (c *ContainerServer) GetSandboxContainer(id string) *oci.Container {
|
|
||||||
c.stateLock.Lock()
|
|
||||||
defer c.stateLock.Unlock()
|
|
||||||
sb, ok := c.state.sandboxes[id]
|
|
||||||
if !ok {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return sb.InfraContainer()
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasSandbox checks if a sandbox exists in the state
|
|
||||||
func (c *ContainerServer) HasSandbox(id string) bool {
|
|
||||||
c.stateLock.Lock()
|
|
||||||
defer c.stateLock.Unlock()
|
|
||||||
_, ok := c.state.sandboxes[id]
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoveSandbox removes a sandbox from the state store
|
|
||||||
func (c *ContainerServer) RemoveSandbox(id string) {
|
|
||||||
c.stateLock.Lock()
|
|
||||||
defer c.stateLock.Unlock()
|
|
||||||
processLabel := c.state.sandboxes[id].ProcessLabel()
|
|
||||||
delete(c.state.sandboxes, id)
|
|
||||||
level := selinux.NewContext(processLabel)["level"]
|
|
||||||
c.state.processLevels[level]--
|
|
||||||
if c.state.processLevels[level] == 0 {
|
|
||||||
label.ReleaseLabel(processLabel)
|
|
||||||
delete(c.state.processLevels, level)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListSandboxes lists all sandboxes in the state store
|
|
||||||
func (c *ContainerServer) ListSandboxes() []*sandbox.Sandbox {
|
|
||||||
c.stateLock.Lock()
|
|
||||||
defer c.stateLock.Unlock()
|
|
||||||
sbArray := make([]*sandbox.Sandbox, 0, len(c.state.sandboxes))
|
|
||||||
for _, sb := range c.state.sandboxes {
|
|
||||||
sbArray = append(sbArray, sb)
|
|
||||||
}
|
|
||||||
|
|
||||||
return sbArray
|
|
||||||
}
|
|
||||||
|
|
||||||
// LibcontainerStats gets the stats for the container with the given id from runc/libcontainer
|
|
||||||
func (c *ContainerServer) LibcontainerStats(ctr *oci.Container) (*libcontainer.Stats, error) {
|
|
||||||
// TODO: make this not hardcoded
|
|
||||||
// was: c.runtime.Path(ociContainer) but that returns /usr/bin/runc - how do we get /run/runc?
|
|
||||||
// runroot is /var/run/runc
|
|
||||||
// Hardcoding probably breaks ClearContainers compatibility
|
|
||||||
factory, err := loadFactory("/run/runc")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
container, err := factory.Load(ctr.ID())
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return container.Stats()
|
|
||||||
}
|
|
@ -1,45 +0,0 @@
|
|||||||
package libkpod
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/docker/docker/pkg/signal"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/projectatomic/libpod/oci"
|
|
||||||
"github.com/projectatomic/libpod/utils"
|
|
||||||
"os"
|
|
||||||
"syscall"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Reverse lookup signal string from its map
|
|
||||||
func findStringInSignalMap(killSignal syscall.Signal) (string, error) {
|
|
||||||
for k, v := range signal.SignalMap {
|
|
||||||
if v == killSignal {
|
|
||||||
return k, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return "", errors.Errorf("unable to convert signal to string")
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContainerKill sends the user provided signal to the containers primary process.
|
|
||||||
func (c *ContainerServer) ContainerKill(container string, killSignal syscall.Signal) (string, error) { // nolint
|
|
||||||
ctr, err := c.LookupContainer(container)
|
|
||||||
if err != nil {
|
|
||||||
return "", errors.Wrapf(err, "failed to find container %s", container)
|
|
||||||
}
|
|
||||||
c.runtime.UpdateStatus(ctr)
|
|
||||||
cStatus := c.runtime.ContainerStatus(ctr)
|
|
||||||
|
|
||||||
// If the container is not running, error and move on.
|
|
||||||
if cStatus.Status != oci.ContainerStateRunning {
|
|
||||||
return "", errors.Errorf("cannot kill container %s: it is not running", container)
|
|
||||||
}
|
|
||||||
signalString, err := findStringInSignalMap(killSignal)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
if err := utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, c.runtime.Path(ctr), "kill", ctr.ID(), signalString); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
c.ContainerStateToDisk(ctr)
|
|
||||||
return ctr.ID(), nil
|
|
||||||
}
|
|
@ -1,114 +0,0 @@
|
|||||||
package libkpod
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"path/filepath"
|
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
|
||||||
|
|
||||||
"github.com/docker/docker/pkg/ioutils"
|
|
||||||
"github.com/opencontainers/runtime-tools/generate"
|
|
||||||
"github.com/projectatomic/libpod/oci"
|
|
||||||
"github.com/projectatomic/libpod/pkg/annotations"
|
|
||||||
)
|
|
||||||
|
|
||||||
const configFile = "config.json"
|
|
||||||
|
|
||||||
// ContainerRename renames the given container
|
|
||||||
func (c *ContainerServer) ContainerRename(container, name string) error {
|
|
||||||
ctr, err := c.LookupContainer(container)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
oldName := ctr.Name()
|
|
||||||
_, err = c.ReserveContainerName(ctr.ID(), name)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
if err != nil {
|
|
||||||
c.ReleaseContainerName(name)
|
|
||||||
} else {
|
|
||||||
c.ReleaseContainerName(oldName)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Update state.json
|
|
||||||
if err = c.updateStateName(ctr, name); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update config.json
|
|
||||||
configRuntimePath := filepath.Join(ctr.BundlePath(), configFile)
|
|
||||||
if err = updateConfigName(configRuntimePath, name); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
configStoragePath := filepath.Join(ctr.Dir(), configFile)
|
|
||||||
if err = updateConfigName(configStoragePath, name); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update containers.json
|
|
||||||
if err = c.store.SetNames(ctr.ID(), []string{name}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func updateConfigName(configPath, name string) error {
|
|
||||||
specgen, err := generate.NewFromFile(configPath)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
specgen.AddAnnotation(annotations.Name, name)
|
|
||||||
specgen.AddAnnotation(annotations.Metadata, updateMetadata(specgen.Spec().Annotations, name))
|
|
||||||
|
|
||||||
return specgen.SaveToFile(configPath, generate.ExportOptions{})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *ContainerServer) updateStateName(ctr *oci.Container, name string) error {
|
|
||||||
ctr.State().Annotations[annotations.Name] = name
|
|
||||||
ctr.State().Annotations[annotations.Metadata] = updateMetadata(ctr.State().Annotations, name)
|
|
||||||
// This is taken directly from c.ContainerStateToDisk(), which can't be used because of the call to UpdateStatus() in the first line
|
|
||||||
jsonSource, err := ioutils.NewAtomicFileWriter(ctr.StatePath(), 0644)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer jsonSource.Close()
|
|
||||||
enc := json.NewEncoder(jsonSource)
|
|
||||||
return enc.Encode(c.runtime.ContainerStatus(ctr))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Attempts to update a metadata annotation
|
|
||||||
func updateMetadata(specAnnotations map[string]string, name string) string {
|
|
||||||
oldMetadata := specAnnotations[annotations.Metadata]
|
|
||||||
containerType := specAnnotations[annotations.ContainerType]
|
|
||||||
if containerType == "container" {
|
|
||||||
metadata := runtime.ContainerMetadata{}
|
|
||||||
err := json.Unmarshal([]byte(oldMetadata), metadata)
|
|
||||||
if err != nil {
|
|
||||||
return oldMetadata
|
|
||||||
}
|
|
||||||
metadata.Name = name
|
|
||||||
m, err := json.Marshal(metadata)
|
|
||||||
if err != nil {
|
|
||||||
return oldMetadata
|
|
||||||
}
|
|
||||||
return string(m)
|
|
||||||
} else if containerType == "sandbox" {
|
|
||||||
metadata := runtime.PodSandboxMetadata{}
|
|
||||||
err := json.Unmarshal([]byte(oldMetadata), metadata)
|
|
||||||
if err != nil {
|
|
||||||
return oldMetadata
|
|
||||||
}
|
|
||||||
metadata.Name = name
|
|
||||||
m, err := json.Marshal(metadata)
|
|
||||||
if err != nil {
|
|
||||||
return oldMetadata
|
|
||||||
}
|
|
||||||
return string(m)
|
|
||||||
} else {
|
|
||||||
return specAnnotations[annotations.Metadata]
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,484 +0,0 @@
|
|||||||
package sandbox
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/rand"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/containernetworking/plugins/pkg/ns"
|
|
||||||
"github.com/docker/docker/pkg/mount"
|
|
||||||
"github.com/docker/docker/pkg/symlink"
|
|
||||||
"github.com/projectatomic/libpod/oci"
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
"golang.org/x/sys/unix"
|
|
||||||
"k8s.io/apimachinery/pkg/fields"
|
|
||||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
|
||||||
"k8s.io/kubernetes/pkg/kubelet/network/hostport"
|
|
||||||
)
|
|
||||||
|
|
||||||
// NetNs handles data pertaining a network namespace
|
|
||||||
type NetNs struct {
|
|
||||||
sync.Mutex
|
|
||||||
ns ns.NetNS
|
|
||||||
symlink *os.File
|
|
||||||
closed bool
|
|
||||||
restored bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ns *NetNs) symlinkCreate(name string) error {
|
|
||||||
b := make([]byte, 4)
|
|
||||||
_, randErr := rand.Reader.Read(b)
|
|
||||||
if randErr != nil {
|
|
||||||
return randErr
|
|
||||||
}
|
|
||||||
|
|
||||||
nsName := fmt.Sprintf("%s-%x", name, b)
|
|
||||||
symlinkPath := filepath.Join(NsRunDir, nsName)
|
|
||||||
|
|
||||||
if err := os.Symlink(ns.ns.Path(), symlinkPath); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
fd, err := os.Open(symlinkPath)
|
|
||||||
if err != nil {
|
|
||||||
if removeErr := os.RemoveAll(symlinkPath); removeErr != nil {
|
|
||||||
return removeErr
|
|
||||||
}
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
ns.symlink = fd
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ns *NetNs) symlinkRemove() error {
|
|
||||||
if err := ns.symlink.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return os.RemoveAll(ns.symlink.Name())
|
|
||||||
}
|
|
||||||
|
|
||||||
func isSymbolicLink(path string) (bool, error) {
|
|
||||||
fi, err := os.Lstat(path)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return fi.Mode()&os.ModeSymlink == os.ModeSymlink, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NetNsGet returns the NetNs associated with the given nspath and name
|
|
||||||
func NetNsGet(nspath, name string) (*NetNs, error) {
|
|
||||||
if err := ns.IsNSorErr(nspath); err != nil {
|
|
||||||
return nil, ErrClosedNetNS
|
|
||||||
}
|
|
||||||
|
|
||||||
symlink, symlinkErr := isSymbolicLink(nspath)
|
|
||||||
if symlinkErr != nil {
|
|
||||||
return nil, symlinkErr
|
|
||||||
}
|
|
||||||
|
|
||||||
var resolvedNsPath string
|
|
||||||
if symlink {
|
|
||||||
path, err := os.Readlink(nspath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
resolvedNsPath = path
|
|
||||||
} else {
|
|
||||||
resolvedNsPath = nspath
|
|
||||||
}
|
|
||||||
|
|
||||||
netNS, err := ns.GetNS(resolvedNsPath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
netNs := &NetNs{ns: netNS, closed: false, restored: true}
|
|
||||||
|
|
||||||
if symlink {
|
|
||||||
fd, err := os.Open(nspath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
netNs.symlink = fd
|
|
||||||
} else {
|
|
||||||
if err := netNs.symlinkCreate(name); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return netNs, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// HostNetNsPath returns the current network namespace for the host
|
|
||||||
func HostNetNsPath() (string, error) {
|
|
||||||
netNS, err := ns.GetCurrentNS()
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
defer netNS.Close()
|
|
||||||
return netNS.Path(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sandbox contains data surrounding kubernetes sandboxes on the server
|
|
||||||
type Sandbox struct {
|
|
||||||
id string
|
|
||||||
namespace string
|
|
||||||
// OCI pod name (eg "<namespace>-<name>-<attempt>")
|
|
||||||
name string
|
|
||||||
// Kubernetes pod name (eg, "<name>")
|
|
||||||
kubeName string
|
|
||||||
logDir string
|
|
||||||
labels fields.Set
|
|
||||||
annotations map[string]string
|
|
||||||
infraContainer *oci.Container
|
|
||||||
containers oci.ContainerStorer
|
|
||||||
processLabel string
|
|
||||||
mountLabel string
|
|
||||||
netns *NetNs
|
|
||||||
metadata *pb.PodSandboxMetadata
|
|
||||||
shmPath string
|
|
||||||
cgroupParent string
|
|
||||||
privileged bool
|
|
||||||
trusted bool
|
|
||||||
resolvPath string
|
|
||||||
hostnamePath string
|
|
||||||
hostname string
|
|
||||||
portMappings []*hostport.PortMapping
|
|
||||||
stopped bool
|
|
||||||
// ipv4 or ipv6 cache
|
|
||||||
ip string
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
// DefaultShmSize is the default shm size
|
|
||||||
DefaultShmSize = 64 * 1024 * 1024
|
|
||||||
// NsRunDir is the default directory in which running network namespaces
|
|
||||||
// are stored
|
|
||||||
NsRunDir = "/var/run/netns"
|
|
||||||
// PodInfraCommand is the default command when starting a pod infrastructure
|
|
||||||
// container
|
|
||||||
PodInfraCommand = "/pause"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// ErrIDEmpty is the erro returned when the id of the sandbox is empty
|
|
||||||
ErrIDEmpty = errors.New("PodSandboxId should not be empty")
|
|
||||||
// ErrClosedNetNS is the error returned when the network namespace of the
|
|
||||||
// sandbox is closed
|
|
||||||
ErrClosedNetNS = errors.New("PodSandbox networking namespace is closed")
|
|
||||||
)
|
|
||||||
|
|
||||||
// New creates and populates a new pod sandbox
|
|
||||||
// New sandboxes have no containers, no infra container, and no network namespaces associated with them
|
|
||||||
// An infra container must be attached before the sandbox is added to the state
|
|
||||||
func New(id, namespace, name, kubeName, logDir string, labels, annotations map[string]string, processLabel, mountLabel string, metadata *pb.PodSandboxMetadata, shmPath, cgroupParent string, privileged, trusted bool, resolvPath, hostname string, portMappings []*hostport.PortMapping) (*Sandbox, error) {
|
|
||||||
sb := new(Sandbox)
|
|
||||||
sb.id = id
|
|
||||||
sb.namespace = namespace
|
|
||||||
sb.name = name
|
|
||||||
sb.kubeName = kubeName
|
|
||||||
sb.logDir = logDir
|
|
||||||
sb.labels = labels
|
|
||||||
sb.annotations = annotations
|
|
||||||
sb.containers = oci.NewMemoryStore()
|
|
||||||
sb.processLabel = processLabel
|
|
||||||
sb.mountLabel = mountLabel
|
|
||||||
sb.metadata = metadata
|
|
||||||
sb.shmPath = shmPath
|
|
||||||
sb.cgroupParent = cgroupParent
|
|
||||||
sb.privileged = privileged
|
|
||||||
sb.trusted = trusted
|
|
||||||
sb.resolvPath = resolvPath
|
|
||||||
sb.hostname = hostname
|
|
||||||
sb.portMappings = portMappings
|
|
||||||
|
|
||||||
return sb, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddIP stores the ip in the sandbox
|
|
||||||
func (s *Sandbox) AddIP(ip string) {
|
|
||||||
s.ip = ip
|
|
||||||
}
|
|
||||||
|
|
||||||
// IP returns the ip of the sandbox
|
|
||||||
func (s *Sandbox) IP() string {
|
|
||||||
return s.ip
|
|
||||||
}
|
|
||||||
|
|
||||||
// ID returns the id of the sandbox
|
|
||||||
func (s *Sandbox) ID() string {
|
|
||||||
return s.id
|
|
||||||
}
|
|
||||||
|
|
||||||
// Namespace returns the namespace for the sandbox
|
|
||||||
func (s *Sandbox) Namespace() string {
|
|
||||||
return s.namespace
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name returns the name of the sandbox
|
|
||||||
func (s *Sandbox) Name() string {
|
|
||||||
return s.name
|
|
||||||
}
|
|
||||||
|
|
||||||
// KubeName returns the kubernetes name for the sandbox
|
|
||||||
func (s *Sandbox) KubeName() string {
|
|
||||||
return s.kubeName
|
|
||||||
}
|
|
||||||
|
|
||||||
// LogDir returns the location of the logging directory for the sandbox
|
|
||||||
func (s *Sandbox) LogDir() string {
|
|
||||||
return s.logDir
|
|
||||||
}
|
|
||||||
|
|
||||||
// Labels returns the labels associated with the sandbox
|
|
||||||
func (s *Sandbox) Labels() fields.Set {
|
|
||||||
return s.labels
|
|
||||||
}
|
|
||||||
|
|
||||||
// Annotations returns a list of annotations for the sandbox
|
|
||||||
func (s *Sandbox) Annotations() map[string]string {
|
|
||||||
return s.annotations
|
|
||||||
}
|
|
||||||
|
|
||||||
// InfraContainer returns the infrastructure container for the sandbox
|
|
||||||
func (s *Sandbox) InfraContainer() *oci.Container {
|
|
||||||
return s.infraContainer
|
|
||||||
}
|
|
||||||
|
|
||||||
// Containers returns the ContainerStorer that contains information on all
|
|
||||||
// of the containers in the sandbox
|
|
||||||
func (s *Sandbox) Containers() oci.ContainerStorer {
|
|
||||||
return s.containers
|
|
||||||
}
|
|
||||||
|
|
||||||
// ProcessLabel returns the process label for the sandbox
|
|
||||||
func (s *Sandbox) ProcessLabel() string {
|
|
||||||
return s.processLabel
|
|
||||||
}
|
|
||||||
|
|
||||||
// MountLabel returns the mount label for the sandbox
|
|
||||||
func (s *Sandbox) MountLabel() string {
|
|
||||||
return s.mountLabel
|
|
||||||
}
|
|
||||||
|
|
||||||
// Metadata returns a set of metadata about the sandbox
|
|
||||||
func (s *Sandbox) Metadata() *pb.PodSandboxMetadata {
|
|
||||||
return s.metadata
|
|
||||||
}
|
|
||||||
|
|
||||||
// ShmPath returns the shm path of the sandbox
|
|
||||||
func (s *Sandbox) ShmPath() string {
|
|
||||||
return s.shmPath
|
|
||||||
}
|
|
||||||
|
|
||||||
// CgroupParent returns the cgroup parent of the sandbox
|
|
||||||
func (s *Sandbox) CgroupParent() string {
|
|
||||||
return s.cgroupParent
|
|
||||||
}
|
|
||||||
|
|
||||||
// Privileged returns whether or not the containers in the sandbox are
|
|
||||||
// privileged containers
|
|
||||||
func (s *Sandbox) Privileged() bool {
|
|
||||||
return s.privileged
|
|
||||||
}
|
|
||||||
|
|
||||||
// Trusted returns whether or not the containers in the sandbox are trusted
|
|
||||||
func (s *Sandbox) Trusted() bool {
|
|
||||||
return s.trusted
|
|
||||||
}
|
|
||||||
|
|
||||||
// ResolvPath returns the resolv path for the sandbox
|
|
||||||
func (s *Sandbox) ResolvPath() string {
|
|
||||||
return s.resolvPath
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddHostnamePath adds the hostname path to the sandbox
|
|
||||||
func (s *Sandbox) AddHostnamePath(hostname string) {
|
|
||||||
s.hostnamePath = hostname
|
|
||||||
}
|
|
||||||
|
|
||||||
// HostnamePath retrieves the hostname path from a sandbox
|
|
||||||
func (s *Sandbox) HostnamePath() string {
|
|
||||||
return s.hostnamePath
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hostname returns the hsotname of the sandbox
|
|
||||||
func (s *Sandbox) Hostname() string {
|
|
||||||
return s.hostname
|
|
||||||
}
|
|
||||||
|
|
||||||
// PortMappings returns a list of port mappings between the host and the sandbox
|
|
||||||
func (s *Sandbox) PortMappings() []*hostport.PortMapping {
|
|
||||||
return s.portMappings
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddContainer adds a container to the sandbox
|
|
||||||
func (s *Sandbox) AddContainer(c *oci.Container) {
|
|
||||||
s.containers.Add(c.Name(), c)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetContainer retrieves a container from the sandbox
|
|
||||||
func (s *Sandbox) GetContainer(name string) *oci.Container {
|
|
||||||
return s.containers.Get(name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoveContainer deletes a container from the sandbox
|
|
||||||
func (s *Sandbox) RemoveContainer(c *oci.Container) {
|
|
||||||
s.containers.Delete(c.Name())
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetInfraContainer sets the infrastructure container of a sandbox
|
|
||||||
// Attempts to set the infrastructure container after one is already present will throw an error
|
|
||||||
func (s *Sandbox) SetInfraContainer(infraCtr *oci.Container) error {
|
|
||||||
if s.infraContainer != nil {
|
|
||||||
return fmt.Errorf("sandbox already has an infra container")
|
|
||||||
} else if infraCtr == nil {
|
|
||||||
return fmt.Errorf("must provide non-nil infra container")
|
|
||||||
}
|
|
||||||
|
|
||||||
s.infraContainer = infraCtr
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoveInfraContainer removes the infrastructure container of a sandbox
|
|
||||||
func (s *Sandbox) RemoveInfraContainer() {
|
|
||||||
s.infraContainer = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NetNs retrieves the network namespace of the sandbox
|
|
||||||
// If the sandbox uses the host namespace, nil is returned
|
|
||||||
func (s *Sandbox) NetNs() ns.NetNS {
|
|
||||||
if s.netns == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return s.netns.ns
|
|
||||||
}
|
|
||||||
|
|
||||||
// NetNsPath returns the path to the network namespace of the sandbox.
|
|
||||||
// If the sandbox uses the host namespace, nil is returned
|
|
||||||
func (s *Sandbox) NetNsPath() string {
|
|
||||||
if s.netns == nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
return s.netns.symlink.Name()
|
|
||||||
}
|
|
||||||
|
|
||||||
// NetNsCreate creates a new network namespace for the sandbox
|
|
||||||
func (s *Sandbox) NetNsCreate() error {
|
|
||||||
if s.netns != nil {
|
|
||||||
return fmt.Errorf("net NS already created")
|
|
||||||
}
|
|
||||||
|
|
||||||
netNS, err := ns.NewNS()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
s.netns = &NetNs{
|
|
||||||
ns: netNS,
|
|
||||||
closed: false,
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.netns.symlinkCreate(s.name); err != nil {
|
|
||||||
logrus.Warnf("Could not create nentns symlink %v", err)
|
|
||||||
|
|
||||||
if err1 := s.netns.ns.Close(); err1 != nil {
|
|
||||||
return err1
|
|
||||||
}
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetStopped sets the sandbox state to stopped.
|
|
||||||
// This should be set after a stop operation succeeds
|
|
||||||
// so that subsequent stops can return fast.
|
|
||||||
func (s *Sandbox) SetStopped() {
|
|
||||||
s.stopped = true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stopped returns whether the sandbox state has been
|
|
||||||
// set to stopped.
|
|
||||||
func (s *Sandbox) Stopped() bool {
|
|
||||||
return s.stopped
|
|
||||||
}
|
|
||||||
|
|
||||||
// NetNsJoin attempts to join the sandbox to an existing network namespace
|
|
||||||
// This will fail if the sandbox is already part of a network namespace
|
|
||||||
func (s *Sandbox) NetNsJoin(nspath, name string) error {
|
|
||||||
if s.netns != nil {
|
|
||||||
return fmt.Errorf("sandbox already has a network namespace, cannot join another")
|
|
||||||
}
|
|
||||||
|
|
||||||
netNS, err := NetNsGet(nspath, name)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
s.netns = netNS
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NetNsRemove removes the network namespace associated with the sandbox
|
|
||||||
func (s *Sandbox) NetNsRemove() error {
|
|
||||||
if s.netns == nil {
|
|
||||||
logrus.Warn("no networking namespace")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
s.netns.Lock()
|
|
||||||
defer s.netns.Unlock()
|
|
||||||
|
|
||||||
if s.netns.closed {
|
|
||||||
// netNsRemove() can be called multiple
|
|
||||||
// times without returning an error.
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.netns.symlinkRemove(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.netns.ns.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if s.netns.restored {
|
|
||||||
// we got namespaces in the form of
|
|
||||||
// /var/run/netns/cni-0d08effa-06eb-a963-f51a-e2b0eceffc5d
|
|
||||||
// but /var/run on most system is symlinked to /run so we first resolve
|
|
||||||
// the symlink and then try and see if it's mounted
|
|
||||||
fp, err := symlink.FollowSymlinkInScope(s.netns.ns.Path(), "/")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if mounted, err := mount.Mounted(fp); err == nil && mounted {
|
|
||||||
if err := unix.Unmount(fp, unix.MNT_DETACH); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := os.RemoveAll(s.netns.ns.Path()); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
s.netns.closed = true
|
|
||||||
return nil
|
|
||||||
}
|
|
111
libkpod/stats.go
111
libkpod/stats.go
@ -1,111 +0,0 @@
|
|||||||
package libkpod
|
|
||||||
|
|
||||||
import (
|
|
||||||
"path/filepath"
|
|
||||||
"syscall"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/opencontainers/runc/libcontainer"
|
|
||||||
"github.com/projectatomic/libpod/oci"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ContainerStats contains the statistics information for a running container
|
|
||||||
type ContainerStats struct {
|
|
||||||
Container string
|
|
||||||
CPU float64
|
|
||||||
cpuNano uint64
|
|
||||||
systemNano uint64
|
|
||||||
MemUsage uint64
|
|
||||||
MemLimit uint64
|
|
||||||
MemPerc float64
|
|
||||||
NetInput uint64
|
|
||||||
NetOutput uint64
|
|
||||||
BlockInput uint64
|
|
||||||
BlockOutput uint64
|
|
||||||
PIDs uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetContainerStats gets the running stats for a given container
|
|
||||||
func (c *ContainerServer) GetContainerStats(ctr *oci.Container, previousStats *ContainerStats) (*ContainerStats, error) {
|
|
||||||
previousCPU := previousStats.cpuNano
|
|
||||||
previousSystem := previousStats.systemNano
|
|
||||||
libcontainerStats, err := c.LibcontainerStats(ctr)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
cgroupStats := libcontainerStats.CgroupStats
|
|
||||||
stats := new(ContainerStats)
|
|
||||||
stats.Container = ctr.ID()
|
|
||||||
stats.CPU = calculateCPUPercent(libcontainerStats, previousCPU, previousSystem)
|
|
||||||
stats.MemUsage = cgroupStats.MemoryStats.Usage.Usage
|
|
||||||
stats.MemLimit = getMemLimit(cgroupStats.MemoryStats.Usage.Limit)
|
|
||||||
stats.MemPerc = float64(stats.MemUsage) / float64(stats.MemLimit)
|
|
||||||
stats.PIDs = cgroupStats.PidsStats.Current
|
|
||||||
stats.BlockInput, stats.BlockOutput = calculateBlockIO(libcontainerStats)
|
|
||||||
stats.NetInput, stats.NetOutput = getContainerNetIO(libcontainerStats)
|
|
||||||
|
|
||||||
return stats, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func loadFactory(root string) (libcontainer.Factory, error) {
|
|
||||||
abs, err := filepath.Abs(root)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
cgroupManager := libcontainer.Cgroupfs
|
|
||||||
return libcontainer.New(abs, cgroupManager, libcontainer.CriuPath(""))
|
|
||||||
}
|
|
||||||
|
|
||||||
// getMemory limit returns the memory limit for a given cgroup
|
|
||||||
// If the configured memory limit is larger than the total memory on the sys, the
|
|
||||||
// physical system memory size is returned
|
|
||||||
func getMemLimit(cgroupLimit uint64) uint64 {
|
|
||||||
si := &syscall.Sysinfo_t{}
|
|
||||||
err := syscall.Sysinfo(si)
|
|
||||||
if err != nil {
|
|
||||||
return cgroupLimit
|
|
||||||
}
|
|
||||||
|
|
||||||
physicalLimit := uint64(si.Totalram)
|
|
||||||
if cgroupLimit > physicalLimit {
|
|
||||||
return physicalLimit
|
|
||||||
}
|
|
||||||
return cgroupLimit
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns the total number of bytes transmitted and received for the given container stats
|
|
||||||
func getContainerNetIO(stats *libcontainer.Stats) (received uint64, transmitted uint64) {
|
|
||||||
for _, iface := range stats.Interfaces {
|
|
||||||
received += iface.RxBytes
|
|
||||||
transmitted += iface.TxBytes
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func calculateCPUPercent(stats *libcontainer.Stats, previousCPU, previousSystem uint64) float64 {
|
|
||||||
var (
|
|
||||||
cpuPercent = 0.0
|
|
||||||
cpuDelta = float64(stats.CgroupStats.CpuStats.CpuUsage.TotalUsage - previousCPU)
|
|
||||||
systemDelta = float64(uint64(time.Now().UnixNano()) - previousSystem)
|
|
||||||
)
|
|
||||||
if systemDelta > 0.0 && cpuDelta > 0.0 {
|
|
||||||
// gets a ratio of container cpu usage total, multiplies it by the number of cores (4 cores running
|
|
||||||
// at 100% utilization should be 400% utilization), and multiplies that by 100 to get a percentage
|
|
||||||
cpuPercent = (cpuDelta / systemDelta) * float64(len(stats.CgroupStats.CpuStats.CpuUsage.PercpuUsage)) * 100
|
|
||||||
}
|
|
||||||
return cpuPercent
|
|
||||||
}
|
|
||||||
|
|
||||||
func calculateBlockIO(stats *libcontainer.Stats) (read uint64, write uint64) {
|
|
||||||
for _, blkIOEntry := range stats.CgroupStats.BlkioStats.IoServiceBytesRecursive {
|
|
||||||
switch strings.ToLower(blkIOEntry.Op) {
|
|
||||||
case "read":
|
|
||||||
read += blkIOEntry.Value
|
|
||||||
case "write":
|
|
||||||
write += blkIOEntry.Value
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
@ -1,36 +0,0 @@
|
|||||||
package libkpod
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/projectatomic/libpod/oci"
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ContainerStop stops a running container with a grace period (i.e., timeout).
|
|
||||||
func (c *ContainerServer) ContainerStop(ctx context.Context, container string, timeout int64) (string, error) {
|
|
||||||
ctr, err := c.LookupContainer(container)
|
|
||||||
if err != nil {
|
|
||||||
return "", errors.Wrapf(err, "failed to find container %s", container)
|
|
||||||
}
|
|
||||||
ctrID := ctr.ID()
|
|
||||||
|
|
||||||
cStatus := c.runtime.ContainerStatus(ctr)
|
|
||||||
switch cStatus.Status {
|
|
||||||
|
|
||||||
case oci.ContainerStatePaused:
|
|
||||||
return "", errors.Errorf("cannot stop paused container %s", ctrID)
|
|
||||||
default:
|
|
||||||
if cStatus.Status != oci.ContainerStateStopped {
|
|
||||||
if err := c.runtime.StopContainer(ctx, ctr, timeout); err != nil {
|
|
||||||
return "", errors.Wrapf(err, "failed to stop container %s", ctrID)
|
|
||||||
}
|
|
||||||
if err := c.storageRuntimeServer.StopContainer(ctrID); err != nil {
|
|
||||||
return "", errors.Wrapf(err, "failed to unmount container %s", ctrID)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
c.ContainerStateToDisk(ctr)
|
|
||||||
|
|
||||||
return ctrID, nil
|
|
||||||
}
|
|
@ -1,42 +0,0 @@
|
|||||||
package libkpod
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/projectatomic/libpod/oci"
|
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
|
||||||
)
|
|
||||||
|
|
||||||
func isStopped(c *ContainerServer, ctr *oci.Container) bool {
|
|
||||||
c.runtime.UpdateStatus(ctr)
|
|
||||||
cStatus := c.runtime.ContainerStatus(ctr)
|
|
||||||
if cStatus.Status == oci.ContainerStateStopped {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContainerWait stops a running container with a grace period (i.e., timeout).
|
|
||||||
func (c *ContainerServer) ContainerWait(container string) (int32, error) {
|
|
||||||
ctr, err := c.LookupContainer(container)
|
|
||||||
if err != nil {
|
|
||||||
return 0, errors.Wrapf(err, "failed to find container %s", container)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = wait.PollImmediateInfinite(1,
|
|
||||||
func() (bool, error) {
|
|
||||||
if !isStopped(c, ctr) {
|
|
||||||
return false, nil
|
|
||||||
} else { // nolint
|
|
||||||
return true, nil // nolint
|
|
||||||
} // nolint
|
|
||||||
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
exitCode := ctr.State().ExitCode
|
|
||||||
c.ContainerStateToDisk(ctr)
|
|
||||||
return exitCode, nil
|
|
||||||
}
|
|
260
oci/container.go
260
oci/container.go
@ -1,260 +0,0 @@
|
|||||||
package oci
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/containernetworking/plugins/pkg/ns"
|
|
||||||
"github.com/docker/docker/pkg/signal"
|
|
||||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
|
||||||
"k8s.io/apimachinery/pkg/fields"
|
|
||||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
defaultStopSignal = "TERM"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Container represents a runtime container.
|
|
||||||
type Container struct {
|
|
||||||
id string
|
|
||||||
name string
|
|
||||||
logPath string
|
|
||||||
labels fields.Set
|
|
||||||
annotations fields.Set
|
|
||||||
crioAnnotations fields.Set
|
|
||||||
image string
|
|
||||||
sandbox string
|
|
||||||
netns ns.NetNS
|
|
||||||
terminal bool
|
|
||||||
stdin bool
|
|
||||||
stdinOnce bool
|
|
||||||
privileged bool
|
|
||||||
trusted bool
|
|
||||||
state *ContainerState
|
|
||||||
metadata *pb.ContainerMetadata
|
|
||||||
opLock sync.Locker
|
|
||||||
// this is the /var/run/storage/... directory, erased on reboot
|
|
||||||
bundlePath string
|
|
||||||
// this is the /var/lib/storage/... directory
|
|
||||||
dir string
|
|
||||||
stopSignal string
|
|
||||||
imageName string
|
|
||||||
imageRef string
|
|
||||||
volumes []ContainerVolume
|
|
||||||
mountPoint string
|
|
||||||
spec *specs.Spec
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContainerVolume is a bind mount for the container.
|
|
||||||
type ContainerVolume struct {
|
|
||||||
ContainerPath string `json:"container_path"`
|
|
||||||
HostPath string `json:"host_path"`
|
|
||||||
Readonly bool `json:"readonly"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContainerState represents the status of a container.
|
|
||||||
type ContainerState struct {
|
|
||||||
specs.State
|
|
||||||
Created time.Time `json:"created"`
|
|
||||||
Started time.Time `json:"started,omitempty"`
|
|
||||||
Finished time.Time `json:"finished,omitempty"`
|
|
||||||
ExitCode int32 `json:"exitCode,omitempty"`
|
|
||||||
OOMKilled bool `json:"oomKilled,omitempty"`
|
|
||||||
Error string `json:"error,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewContainer creates a container object.
|
|
||||||
func NewContainer(id string, name string, bundlePath string, logPath string, netns ns.NetNS, labels map[string]string, crioAnnotations map[string]string, annotations map[string]string, image string, imageName string, imageRef string, metadata *pb.ContainerMetadata, sandbox string, terminal bool, stdin bool, stdinOnce bool, privileged bool, trusted bool, dir string, created time.Time, stopSignal string) (*Container, error) {
|
|
||||||
state := &ContainerState{}
|
|
||||||
state.Created = created
|
|
||||||
c := &Container{
|
|
||||||
id: id,
|
|
||||||
name: name,
|
|
||||||
bundlePath: bundlePath,
|
|
||||||
logPath: logPath,
|
|
||||||
labels: labels,
|
|
||||||
sandbox: sandbox,
|
|
||||||
netns: netns,
|
|
||||||
terminal: terminal,
|
|
||||||
stdin: stdin,
|
|
||||||
stdinOnce: stdinOnce,
|
|
||||||
privileged: privileged,
|
|
||||||
trusted: trusted,
|
|
||||||
metadata: metadata,
|
|
||||||
annotations: annotations,
|
|
||||||
crioAnnotations: crioAnnotations,
|
|
||||||
image: image,
|
|
||||||
imageName: imageName,
|
|
||||||
imageRef: imageRef,
|
|
||||||
dir: dir,
|
|
||||||
state: state,
|
|
||||||
stopSignal: stopSignal,
|
|
||||||
opLock: new(sync.Mutex),
|
|
||||||
}
|
|
||||||
return c, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetSpec loads the OCI spec in the container struct
|
|
||||||
func (c *Container) SetSpec(s *specs.Spec) {
|
|
||||||
c.spec = s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Spec returns a copy of the spec for the container
|
|
||||||
func (c *Container) Spec() specs.Spec {
|
|
||||||
return *c.spec
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetStopSignal returns the container's own stop signal configured from the
|
|
||||||
// image configuration or the default one.
|
|
||||||
func (c *Container) GetStopSignal() string {
|
|
||||||
if c.stopSignal == "" {
|
|
||||||
return defaultStopSignal
|
|
||||||
}
|
|
||||||
cleanSignal := strings.TrimPrefix(strings.ToUpper(c.stopSignal), "SIG")
|
|
||||||
_, ok := signal.SignalMap[cleanSignal]
|
|
||||||
if !ok {
|
|
||||||
return defaultStopSignal
|
|
||||||
}
|
|
||||||
return cleanSignal
|
|
||||||
}
|
|
||||||
|
|
||||||
// FromDisk restores container's state from disk
|
|
||||||
func (c *Container) FromDisk() error {
|
|
||||||
jsonSource, err := os.Open(c.StatePath())
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer jsonSource.Close()
|
|
||||||
|
|
||||||
dec := json.NewDecoder(jsonSource)
|
|
||||||
return dec.Decode(c.state)
|
|
||||||
}
|
|
||||||
|
|
||||||
// StatePath returns the containers state.json path
|
|
||||||
func (c *Container) StatePath() string {
|
|
||||||
return filepath.Join(c.dir, "state.json")
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreatedAt returns the container creation time
|
|
||||||
func (c *Container) CreatedAt() time.Time {
|
|
||||||
return c.state.Created
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name returns the name of the container.
|
|
||||||
func (c *Container) Name() string {
|
|
||||||
return c.name
|
|
||||||
}
|
|
||||||
|
|
||||||
// ID returns the id of the container.
|
|
||||||
func (c *Container) ID() string {
|
|
||||||
return c.id
|
|
||||||
}
|
|
||||||
|
|
||||||
// BundlePath returns the bundlePath of the container.
|
|
||||||
func (c *Container) BundlePath() string {
|
|
||||||
return c.bundlePath
|
|
||||||
}
|
|
||||||
|
|
||||||
// LogPath returns the log path of the container.
|
|
||||||
func (c *Container) LogPath() string {
|
|
||||||
return c.logPath
|
|
||||||
}
|
|
||||||
|
|
||||||
// Labels returns the labels of the container.
|
|
||||||
func (c *Container) Labels() map[string]string {
|
|
||||||
return c.labels
|
|
||||||
}
|
|
||||||
|
|
||||||
// Annotations returns the annotations of the container.
|
|
||||||
func (c *Container) Annotations() map[string]string {
|
|
||||||
return c.annotations
|
|
||||||
}
|
|
||||||
|
|
||||||
// CrioAnnotations returns the crio annotations of the container.
|
|
||||||
func (c *Container) CrioAnnotations() map[string]string {
|
|
||||||
return c.crioAnnotations
|
|
||||||
}
|
|
||||||
|
|
||||||
// Image returns the image of the container.
|
|
||||||
func (c *Container) Image() string {
|
|
||||||
return c.image
|
|
||||||
}
|
|
||||||
|
|
||||||
// ImageName returns the image name of the container.
|
|
||||||
func (c *Container) ImageName() string {
|
|
||||||
return c.imageName
|
|
||||||
}
|
|
||||||
|
|
||||||
// ImageRef returns the image ref of the container.
|
|
||||||
func (c *Container) ImageRef() string {
|
|
||||||
return c.imageRef
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sandbox returns the sandbox name of the container.
|
|
||||||
func (c *Container) Sandbox() string {
|
|
||||||
return c.sandbox
|
|
||||||
}
|
|
||||||
|
|
||||||
// Dir returns the the dir of the container
|
|
||||||
func (c *Container) Dir() string {
|
|
||||||
return c.dir
|
|
||||||
}
|
|
||||||
|
|
||||||
// NetNsPath returns the path to the network namespace of the container.
|
|
||||||
func (c *Container) NetNsPath() (string, error) {
|
|
||||||
if c.state == nil {
|
|
||||||
return "", fmt.Errorf("container state is not populated")
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.netns == nil {
|
|
||||||
return fmt.Sprintf("/proc/%d/ns/net", c.state.Pid), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return c.netns.Path(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Metadata returns the metadata of the container.
|
|
||||||
func (c *Container) Metadata() *pb.ContainerMetadata {
|
|
||||||
return c.metadata
|
|
||||||
}
|
|
||||||
|
|
||||||
// State returns the state of the running container
|
|
||||||
func (c *Container) State() *ContainerState {
|
|
||||||
c.opLock.Lock()
|
|
||||||
defer c.opLock.Unlock()
|
|
||||||
return c.state
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddVolume adds a volume to list of container volumes.
|
|
||||||
func (c *Container) AddVolume(v ContainerVolume) {
|
|
||||||
c.volumes = append(c.volumes, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Volumes returns the list of container volumes.
|
|
||||||
func (c *Container) Volumes() []ContainerVolume {
|
|
||||||
return c.volumes
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetMountPoint sets the container mount point
|
|
||||||
func (c *Container) SetMountPoint(mp string) {
|
|
||||||
c.mountPoint = mp
|
|
||||||
}
|
|
||||||
|
|
||||||
// MountPoint returns the container mount point
|
|
||||||
func (c *Container) MountPoint() string {
|
|
||||||
return c.mountPoint
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetState sets the conainer state
|
|
||||||
//
|
|
||||||
// XXX: DO NOT EVER USE THIS, THIS IS JUST USEFUL FOR MOCKING!!!
|
|
||||||
func (c *Container) SetState(state *ContainerState) {
|
|
||||||
c.state = state
|
|
||||||
}
|
|
@ -1,14 +0,0 @@
|
|||||||
// +build !arm,!386
|
|
||||||
|
|
||||||
package oci
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"syscall"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func getFinishedTime(fi os.FileInfo) time.Time {
|
|
||||||
st := fi.Sys().(*syscall.Stat_t)
|
|
||||||
return time.Unix(st.Ctim.Sec, st.Ctim.Nsec)
|
|
||||||
}
|
|
@ -1,14 +0,0 @@
|
|||||||
// +build arm 386
|
|
||||||
|
|
||||||
package oci
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"syscall"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func getFinishedTime(fi os.FileInfo) time.Time {
|
|
||||||
st := fi.Sys().(*syscall.Stat_t)
|
|
||||||
return time.Unix(int64(st.Ctim.Sec), int64(st.Ctim.Nsec))
|
|
||||||
}
|
|
@ -1,31 +0,0 @@
|
|||||||
package oci
|
|
||||||
|
|
||||||
import "sort"
|
|
||||||
|
|
||||||
// History is a convenience type for storing a list of containers,
|
|
||||||
// sorted by creation date in descendant order.
|
|
||||||
type History []*Container
|
|
||||||
|
|
||||||
// Len returns the number of containers in the history.
|
|
||||||
func (history *History) Len() int {
|
|
||||||
return len(*history)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Less compares two containers and returns true if the second one
|
|
||||||
// was created before the first one.
|
|
||||||
func (history *History) Less(i, j int) bool {
|
|
||||||
containers := *history
|
|
||||||
// FIXME: state access should be serialized
|
|
||||||
return containers[j].state.Created.Before(containers[i].state.Created)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Swap switches containers i and j positions in the history.
|
|
||||||
func (history *History) Swap(i, j int) {
|
|
||||||
containers := *history
|
|
||||||
containers[i], containers[j] = containers[j], containers[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
// sort orders the history by creation date in descendant order.
|
|
||||||
func (history *History) sort() {
|
|
||||||
sort.Sort(history)
|
|
||||||
}
|
|
@ -1,92 +0,0 @@
|
|||||||
package oci
|
|
||||||
|
|
||||||
import "sync"
|
|
||||||
|
|
||||||
// memoryStore implements a Store in memory.
|
|
||||||
type memoryStore struct {
|
|
||||||
s map[string]*Container
|
|
||||||
sync.RWMutex
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewMemoryStore initializes a new memory store.
|
|
||||||
func NewMemoryStore() ContainerStorer {
|
|
||||||
return &memoryStore{
|
|
||||||
s: make(map[string]*Container),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add appends a new container to the memory store.
|
|
||||||
// It overrides the id if it existed before.
|
|
||||||
func (c *memoryStore) Add(id string, cont *Container) {
|
|
||||||
c.Lock()
|
|
||||||
c.s[id] = cont
|
|
||||||
c.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get returns a container from the store by id.
|
|
||||||
func (c *memoryStore) Get(id string) *Container {
|
|
||||||
c.RLock()
|
|
||||||
res := c.s[id]
|
|
||||||
c.RUnlock()
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete removes a container from the store by id.
|
|
||||||
func (c *memoryStore) Delete(id string) {
|
|
||||||
c.Lock()
|
|
||||||
delete(c.s, id)
|
|
||||||
c.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// List returns a sorted list of containers from the store.
|
|
||||||
// The containers are ordered by creation date.
|
|
||||||
func (c *memoryStore) List() []*Container {
|
|
||||||
containers := History(c.all())
|
|
||||||
containers.sort()
|
|
||||||
return containers
|
|
||||||
}
|
|
||||||
|
|
||||||
// Size returns the number of containers in the store.
|
|
||||||
func (c *memoryStore) Size() int {
|
|
||||||
c.RLock()
|
|
||||||
defer c.RUnlock()
|
|
||||||
return len(c.s)
|
|
||||||
}
|
|
||||||
|
|
||||||
// First returns the first container found in the store by a given filter.
|
|
||||||
func (c *memoryStore) First(filter StoreFilter) *Container {
|
|
||||||
for _, cont := range c.all() {
|
|
||||||
if filter(cont) {
|
|
||||||
return cont
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ApplyAll calls the reducer function with every container in the store.
|
|
||||||
// This operation is asynchronous in the memory store.
|
|
||||||
// NOTE: Modifications to the store MUST NOT be done by the StoreReducer.
|
|
||||||
func (c *memoryStore) ApplyAll(apply StoreReducer) {
|
|
||||||
wg := new(sync.WaitGroup)
|
|
||||||
for _, cont := range c.all() {
|
|
||||||
wg.Add(1)
|
|
||||||
go func(container *Container) {
|
|
||||||
apply(container)
|
|
||||||
wg.Done()
|
|
||||||
}(cont)
|
|
||||||
}
|
|
||||||
|
|
||||||
wg.Wait()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *memoryStore) all() []*Container {
|
|
||||||
c.RLock()
|
|
||||||
containers := make([]*Container, 0, len(c.s))
|
|
||||||
for _, cont := range c.s {
|
|
||||||
containers = append(containers, cont)
|
|
||||||
}
|
|
||||||
c.RUnlock()
|
|
||||||
return containers
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ ContainerStorer = &memoryStore{}
|
|
748
oci/oci.go
748
oci/oci.go
@ -1,748 +0,0 @@
|
|||||||
package oci
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"path/filepath"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"syscall"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/containerd/cgroups"
|
|
||||||
rspec "github.com/opencontainers/runtime-spec/specs-go"
|
|
||||||
"github.com/projectatomic/libpod/utils"
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
"golang.org/x/sys/unix"
|
|
||||||
kwait "k8s.io/apimachinery/pkg/util/wait"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// ContainerStateCreated represents the created state of a container
|
|
||||||
ContainerStateCreated = "created"
|
|
||||||
// ContainerStatePaused represents the paused state of a container
|
|
||||||
ContainerStatePaused = "paused"
|
|
||||||
// ContainerStateRunning represents the running state of a container
|
|
||||||
ContainerStateRunning = "running"
|
|
||||||
// ContainerStateStopped represents the stopped state of a container
|
|
||||||
ContainerStateStopped = "stopped"
|
|
||||||
// ContainerCreateTimeout represents the value of container creating timeout
|
|
||||||
ContainerCreateTimeout = 240 * time.Second
|
|
||||||
|
|
||||||
// CgroupfsCgroupsManager represents cgroupfs native cgroup manager
|
|
||||||
CgroupfsCgroupsManager = "cgroupfs"
|
|
||||||
// SystemdCgroupsManager represents systemd native cgroup manager
|
|
||||||
SystemdCgroupsManager = "systemd"
|
|
||||||
// ContainerExitsDir is the location of container exit dirs
|
|
||||||
ContainerExitsDir = "/var/run/crio/exits"
|
|
||||||
// ContainerAttachSocketDir is the location for container attach sockets
|
|
||||||
ContainerAttachSocketDir = "/var/run/crio"
|
|
||||||
|
|
||||||
// killContainerTimeout is the timeout that we wait for the container to
|
|
||||||
// be SIGKILLed.
|
|
||||||
killContainerTimeout = 2 * time.Minute
|
|
||||||
)
|
|
||||||
|
|
||||||
// New creates a new Runtime with options provided
|
|
||||||
func New(runtimeTrustedPath string,
|
|
||||||
runtimeUntrustedPath string,
|
|
||||||
trustLevel string,
|
|
||||||
conmonPath string,
|
|
||||||
conmonEnv []string,
|
|
||||||
cgroupManager string,
|
|
||||||
containerExitsDir string,
|
|
||||||
logSizeMax int64,
|
|
||||||
noPivot bool) (*Runtime, error) {
|
|
||||||
r := &Runtime{
|
|
||||||
name: filepath.Base(runtimeTrustedPath),
|
|
||||||
trustedPath: runtimeTrustedPath,
|
|
||||||
untrustedPath: runtimeUntrustedPath,
|
|
||||||
trustLevel: trustLevel,
|
|
||||||
conmonPath: conmonPath,
|
|
||||||
conmonEnv: conmonEnv,
|
|
||||||
cgroupManager: cgroupManager,
|
|
||||||
containerExitsDir: containerExitsDir,
|
|
||||||
logSizeMax: logSizeMax,
|
|
||||||
noPivot: noPivot,
|
|
||||||
}
|
|
||||||
return r, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Runtime stores the information about a oci runtime
|
|
||||||
type Runtime struct {
|
|
||||||
name string
|
|
||||||
trustedPath string
|
|
||||||
untrustedPath string
|
|
||||||
trustLevel string
|
|
||||||
conmonPath string
|
|
||||||
conmonEnv []string
|
|
||||||
cgroupManager string
|
|
||||||
containerExitsDir string
|
|
||||||
logSizeMax int64
|
|
||||||
noPivot bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// syncInfo is used to return data from monitor process to daemon
|
|
||||||
type syncInfo struct {
|
|
||||||
Pid int `json:"pid"`
|
|
||||||
Message string `json:"message,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// exitCodeInfo is used to return the monitored process exit code to the daemon
|
|
||||||
type exitCodeInfo struct {
|
|
||||||
ExitCode int32 `json:"exit_code"`
|
|
||||||
Message string `json:"message,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name returns the name of the OCI Runtime
|
|
||||||
func (r *Runtime) Name() string {
|
|
||||||
return r.name
|
|
||||||
}
|
|
||||||
|
|
||||||
// Path returns the full path the OCI Runtime executable.
|
|
||||||
// Depending if the container is privileged and/or trusted,
|
|
||||||
// this will return either the trusted or untrusted runtime path.
|
|
||||||
func (r *Runtime) Path(c *Container) string {
|
|
||||||
if !c.trusted {
|
|
||||||
// We have an explicitly untrusted container.
|
|
||||||
if c.privileged {
|
|
||||||
logrus.Warnf("Running an untrusted but privileged container")
|
|
||||||
return r.trustedPath
|
|
||||||
}
|
|
||||||
|
|
||||||
if r.untrustedPath != "" {
|
|
||||||
return r.untrustedPath
|
|
||||||
}
|
|
||||||
|
|
||||||
return r.trustedPath
|
|
||||||
}
|
|
||||||
|
|
||||||
// Our container is trusted. Let's look at the configured trust level.
|
|
||||||
if r.trustLevel == "trusted" {
|
|
||||||
return r.trustedPath
|
|
||||||
}
|
|
||||||
|
|
||||||
// Our container is trusted, but we are running untrusted.
|
|
||||||
// We will use the untrusted container runtime if it's set
|
|
||||||
// and if it's not a privileged container.
|
|
||||||
if c.privileged || r.untrustedPath == "" {
|
|
||||||
return r.trustedPath
|
|
||||||
}
|
|
||||||
|
|
||||||
return r.untrustedPath
|
|
||||||
}
|
|
||||||
|
|
||||||
// Version returns the version of the OCI Runtime
|
|
||||||
func (r *Runtime) Version() (string, error) {
|
|
||||||
runtimeVersion, err := getOCIVersion(r.trustedPath, "-v")
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return runtimeVersion, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getOCIVersion(name string, args ...string) (string, error) {
|
|
||||||
out, err := utils.ExecCmd(name, args...)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
firstLine := out[:strings.Index(out, "\n")]
|
|
||||||
v := firstLine[strings.LastIndex(firstLine, " ")+1:]
|
|
||||||
return v, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateContainer creates a container.
|
|
||||||
func (r *Runtime) CreateContainer(c *Container, cgroupParent string) (err error) {
|
|
||||||
var stderrBuf bytes.Buffer
|
|
||||||
parentPipe, childPipe, err := newPipe()
|
|
||||||
childStartPipe, parentStartPipe, err := newPipe()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error creating socket pair: %v", err)
|
|
||||||
}
|
|
||||||
defer parentPipe.Close()
|
|
||||||
defer parentStartPipe.Close()
|
|
||||||
|
|
||||||
var args []string
|
|
||||||
if r.cgroupManager == SystemdCgroupsManager {
|
|
||||||
args = append(args, "-s")
|
|
||||||
}
|
|
||||||
args = append(args, "-c", c.id)
|
|
||||||
args = append(args, "-u", c.id)
|
|
||||||
args = append(args, "-r", r.Path(c))
|
|
||||||
args = append(args, "-b", c.bundlePath)
|
|
||||||
args = append(args, "-p", filepath.Join(c.bundlePath, "pidfile"))
|
|
||||||
args = append(args, "-l", c.logPath)
|
|
||||||
args = append(args, "--exit-dir", r.containerExitsDir)
|
|
||||||
args = append(args, "--socket-dir-path", ContainerAttachSocketDir)
|
|
||||||
if r.logSizeMax >= 0 {
|
|
||||||
args = append(args, "--log-size-max", fmt.Sprintf("%v", r.logSizeMax))
|
|
||||||
}
|
|
||||||
if r.noPivot {
|
|
||||||
args = append(args, "--no-pivot")
|
|
||||||
}
|
|
||||||
if c.terminal {
|
|
||||||
args = append(args, "-t")
|
|
||||||
} else if c.stdin {
|
|
||||||
args = append(args, "-i")
|
|
||||||
}
|
|
||||||
logrus.WithFields(logrus.Fields{
|
|
||||||
"args": args,
|
|
||||||
}).Debugf("running conmon: %s", r.conmonPath)
|
|
||||||
|
|
||||||
cmd := exec.Command(r.conmonPath, args...)
|
|
||||||
cmd.Dir = c.bundlePath
|
|
||||||
cmd.SysProcAttr = &syscall.SysProcAttr{
|
|
||||||
Setpgid: true,
|
|
||||||
}
|
|
||||||
cmd.Stdin = os.Stdin
|
|
||||||
cmd.Stdout = os.Stdout
|
|
||||||
cmd.Stderr = os.Stderr
|
|
||||||
if c.terminal {
|
|
||||||
cmd.Stderr = &stderrBuf
|
|
||||||
}
|
|
||||||
cmd.ExtraFiles = append(cmd.ExtraFiles, childPipe, childStartPipe)
|
|
||||||
// 0, 1 and 2 are stdin, stdout and stderr
|
|
||||||
cmd.Env = append(r.conmonEnv, fmt.Sprintf("_OCI_SYNCPIPE=%d", 3))
|
|
||||||
cmd.Env = append(cmd.Env, fmt.Sprintf("_OCI_STARTPIPE=%d", 4))
|
|
||||||
|
|
||||||
err = cmd.Start()
|
|
||||||
if err != nil {
|
|
||||||
childPipe.Close()
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// We don't need childPipe on the parent side
|
|
||||||
childPipe.Close()
|
|
||||||
childStartPipe.Close()
|
|
||||||
|
|
||||||
// Move conmon to specified cgroup
|
|
||||||
if r.cgroupManager == SystemdCgroupsManager {
|
|
||||||
logrus.Infof("Running conmon under slice %s and unitName %s", cgroupParent, createUnitName("crio-conmon", c.id))
|
|
||||||
if err = utils.RunUnderSystemdScope(cmd.Process.Pid, cgroupParent, createUnitName("crio-conmon", c.id)); err != nil {
|
|
||||||
logrus.Warnf("Failed to add conmon to systemd sandbox cgroup: %v", err)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
control, err := cgroups.New(cgroups.V1, cgroups.StaticPath(filepath.Join(cgroupParent, "/crio-conmon-"+c.id)), &rspec.LinuxResources{})
|
|
||||||
if err != nil {
|
|
||||||
logrus.Warnf("Failed to add conmon to cgroupfs sandbox cgroup: %v", err)
|
|
||||||
} else {
|
|
||||||
// Here we should defer a crio-connmon- cgroup hierarchy deletion, but it will
|
|
||||||
// always fail as conmon's pid is still there.
|
|
||||||
// Fortunately, kubelet takes care of deleting this for us, so the leak will
|
|
||||||
// only happens in corner case where one does a manual deletion of the container
|
|
||||||
// through e.g. runc. This should be handled by implementing a conmon monitoring
|
|
||||||
// routine that does the cgroup cleanup once conmon is terminated.
|
|
||||||
if err := control.Add(cgroups.Process{Pid: cmd.Process.Pid}); err != nil {
|
|
||||||
logrus.Warnf("Failed to add conmon to cgroupfs sandbox cgroup: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* We set the cgroup, now the child can start creating children */
|
|
||||||
someData := []byte{0}
|
|
||||||
_, err = parentStartPipe.Write(someData)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Wait for initial setup and fork, and reap child */
|
|
||||||
err = cmd.Wait()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// We will delete all container resources if creation fails
|
|
||||||
defer func() {
|
|
||||||
if err != nil {
|
|
||||||
r.DeleteContainer(c)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Wait to get container pid from conmon
|
|
||||||
type syncStruct struct {
|
|
||||||
si *syncInfo
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
ch := make(chan syncStruct)
|
|
||||||
go func() {
|
|
||||||
var si *syncInfo
|
|
||||||
if err = json.NewDecoder(parentPipe).Decode(&si); err != nil {
|
|
||||||
ch <- syncStruct{err: err}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
ch <- syncStruct{si: si}
|
|
||||||
}()
|
|
||||||
|
|
||||||
select {
|
|
||||||
case ss := <-ch:
|
|
||||||
if ss.err != nil {
|
|
||||||
return fmt.Errorf("error reading container (probably exited) json message: %v", ss.err)
|
|
||||||
}
|
|
||||||
logrus.Debugf("Received container pid: %d", ss.si.Pid)
|
|
||||||
if ss.si.Pid == -1 {
|
|
||||||
if ss.si.Message != "" {
|
|
||||||
logrus.Errorf("Container creation error: %s", ss.si.Message)
|
|
||||||
return fmt.Errorf("container create failed: %s", ss.si.Message)
|
|
||||||
}
|
|
||||||
logrus.Errorf("Container creation failed")
|
|
||||||
return fmt.Errorf("container create failed")
|
|
||||||
}
|
|
||||||
case <-time.After(ContainerCreateTimeout):
|
|
||||||
logrus.Errorf("Container creation timeout (%v)", ContainerCreateTimeout)
|
|
||||||
return fmt.Errorf("create container timeout")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func createUnitName(prefix string, name string) string {
|
|
||||||
return fmt.Sprintf("%s-%s.scope", prefix, name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// StartContainer starts a container.
|
|
||||||
func (r *Runtime) StartContainer(c *Container) error {
|
|
||||||
c.opLock.Lock()
|
|
||||||
defer c.opLock.Unlock()
|
|
||||||
if err := utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, r.Path(c), "start", c.id); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
c.state.Started = time.Now()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExecSyncResponse is returned from ExecSync.
|
|
||||||
type ExecSyncResponse struct {
|
|
||||||
Stdout []byte
|
|
||||||
Stderr []byte
|
|
||||||
ExitCode int32
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExecSyncError wraps command's streams, exit code and error on ExecSync error.
|
|
||||||
type ExecSyncError struct {
|
|
||||||
Stdout bytes.Buffer
|
|
||||||
Stderr bytes.Buffer
|
|
||||||
ExitCode int32
|
|
||||||
Err error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e ExecSyncError) Error() string {
|
|
||||||
return fmt.Sprintf("command error: %+v, stdout: %s, stderr: %s, exit code %d", e.Err, e.Stdout.Bytes(), e.Stderr.Bytes(), e.ExitCode)
|
|
||||||
}
|
|
||||||
|
|
||||||
func prepareExec() (pidFile, parentPipe, childPipe *os.File, err error) {
|
|
||||||
parentPipe, childPipe, err = os.Pipe()
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
pidFile, err = ioutil.TempFile("", "pidfile")
|
|
||||||
if err != nil {
|
|
||||||
parentPipe.Close()
|
|
||||||
childPipe.Close()
|
|
||||||
return nil, nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseLog(log []byte) (stdout, stderr []byte) {
|
|
||||||
// Split the log on newlines, which is what separates entries.
|
|
||||||
lines := bytes.SplitAfter(log, []byte{'\n'})
|
|
||||||
for _, line := range lines {
|
|
||||||
// Ignore empty lines.
|
|
||||||
if len(line) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// The format of log lines is "DATE pipe REST".
|
|
||||||
parts := bytes.SplitN(line, []byte{' '}, 3)
|
|
||||||
if len(parts) < 3 {
|
|
||||||
// Ignore the line if it's formatted incorrectly, but complain
|
|
||||||
// about it so it can be debugged.
|
|
||||||
logrus.Warnf("hit invalid log format: %q", string(line))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
pipe := string(parts[1])
|
|
||||||
content := parts[2]
|
|
||||||
|
|
||||||
switch pipe {
|
|
||||||
case "stdout":
|
|
||||||
stdout = append(stdout, content...)
|
|
||||||
case "stderr":
|
|
||||||
stderr = append(stderr, content...)
|
|
||||||
default:
|
|
||||||
// Complain about unknown pipes.
|
|
||||||
logrus.Warnf("hit invalid log format [unknown pipe %s]: %q", pipe, string(line))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return stdout, stderr
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExecSync execs a command in a container and returns it's stdout, stderr and return code.
|
|
||||||
func (r *Runtime) ExecSync(c *Container, command []string, timeout int64) (resp *ExecSyncResponse, err error) {
|
|
||||||
pidFile, parentPipe, childPipe, err := prepareExec()
|
|
||||||
if err != nil {
|
|
||||||
return nil, ExecSyncError{
|
|
||||||
ExitCode: -1,
|
|
||||||
Err: err,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
defer parentPipe.Close()
|
|
||||||
defer func() {
|
|
||||||
if e := os.Remove(pidFile.Name()); e != nil {
|
|
||||||
logrus.Warnf("could not remove temporary PID file %s", pidFile.Name())
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
logFile, err := ioutil.TempFile("", "crio-log-"+c.id)
|
|
||||||
if err != nil {
|
|
||||||
return nil, ExecSyncError{
|
|
||||||
ExitCode: -1,
|
|
||||||
Err: err,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
logPath := logFile.Name()
|
|
||||||
defer func() {
|
|
||||||
logFile.Close()
|
|
||||||
os.RemoveAll(logPath)
|
|
||||||
}()
|
|
||||||
|
|
||||||
f, err := ioutil.TempFile("", "exec-process")
|
|
||||||
if err != nil {
|
|
||||||
return nil, ExecSyncError{
|
|
||||||
ExitCode: -1,
|
|
||||||
Err: err,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
defer os.RemoveAll(f.Name())
|
|
||||||
|
|
||||||
var args []string
|
|
||||||
args = append(args, "-c", c.id)
|
|
||||||
args = append(args, "-r", r.Path(c))
|
|
||||||
args = append(args, "-p", pidFile.Name())
|
|
||||||
args = append(args, "-e")
|
|
||||||
if c.terminal {
|
|
||||||
args = append(args, "-t")
|
|
||||||
}
|
|
||||||
if timeout > 0 {
|
|
||||||
args = append(args, "-T")
|
|
||||||
args = append(args, fmt.Sprintf("%d", timeout))
|
|
||||||
}
|
|
||||||
args = append(args, "-l", logPath)
|
|
||||||
args = append(args, "--socket-dir-path", ContainerAttachSocketDir)
|
|
||||||
|
|
||||||
pspec := c.Spec().Process
|
|
||||||
pspec.Env = append(pspec.Env, r.conmonEnv...)
|
|
||||||
pspec.Args = command
|
|
||||||
processJSON, err := json.Marshal(pspec)
|
|
||||||
if err != nil {
|
|
||||||
return nil, ExecSyncError{
|
|
||||||
ExitCode: -1,
|
|
||||||
Err: err,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := ioutil.WriteFile(f.Name(), processJSON, 0644); err != nil {
|
|
||||||
return nil, ExecSyncError{
|
|
||||||
ExitCode: -1,
|
|
||||||
Err: err,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
args = append(args, "--exec-process-spec", f.Name())
|
|
||||||
|
|
||||||
cmd := exec.Command(r.conmonPath, args...)
|
|
||||||
|
|
||||||
var stdoutBuf, stderrBuf bytes.Buffer
|
|
||||||
cmd.Stdout = &stdoutBuf
|
|
||||||
cmd.Stderr = &stderrBuf
|
|
||||||
cmd.ExtraFiles = append(cmd.ExtraFiles, childPipe)
|
|
||||||
// 0, 1 and 2 are stdin, stdout and stderr
|
|
||||||
cmd.Env = append(r.conmonEnv, fmt.Sprintf("_OCI_SYNCPIPE=%d", 3))
|
|
||||||
|
|
||||||
err = cmd.Start()
|
|
||||||
if err != nil {
|
|
||||||
childPipe.Close()
|
|
||||||
return nil, ExecSyncError{
|
|
||||||
Stdout: stdoutBuf,
|
|
||||||
Stderr: stderrBuf,
|
|
||||||
ExitCode: -1,
|
|
||||||
Err: err,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// We don't need childPipe on the parent side
|
|
||||||
childPipe.Close()
|
|
||||||
|
|
||||||
err = cmd.Wait()
|
|
||||||
if err != nil {
|
|
||||||
if exitErr, ok := err.(*exec.ExitError); ok {
|
|
||||||
if status, ok := exitErr.Sys().(unix.WaitStatus); ok {
|
|
||||||
return nil, ExecSyncError{
|
|
||||||
Stdout: stdoutBuf,
|
|
||||||
Stderr: stderrBuf,
|
|
||||||
ExitCode: int32(status.ExitStatus()),
|
|
||||||
Err: err,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return nil, ExecSyncError{
|
|
||||||
Stdout: stdoutBuf,
|
|
||||||
Stderr: stderrBuf,
|
|
||||||
ExitCode: -1,
|
|
||||||
Err: err,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var ec *exitCodeInfo
|
|
||||||
if err := json.NewDecoder(parentPipe).Decode(&ec); err != nil {
|
|
||||||
return nil, ExecSyncError{
|
|
||||||
Stdout: stdoutBuf,
|
|
||||||
Stderr: stderrBuf,
|
|
||||||
ExitCode: -1,
|
|
||||||
Err: err,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
logrus.Infof("Received container exit code: %v, message: %s", ec.ExitCode, ec.Message)
|
|
||||||
|
|
||||||
if ec.ExitCode == -1 {
|
|
||||||
return nil, ExecSyncError{
|
|
||||||
Stdout: stdoutBuf,
|
|
||||||
Stderr: stderrBuf,
|
|
||||||
ExitCode: -1,
|
|
||||||
Err: fmt.Errorf(ec.Message),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// The actual logged output is not the same as stdoutBuf and stderrBuf,
|
|
||||||
// which are used for getting error information. For the actual
|
|
||||||
// ExecSyncResponse we have to read the logfile.
|
|
||||||
// XXX: Currently runC dups the same console over both stdout and stderr,
|
|
||||||
// so we can't differentiate between the two.
|
|
||||||
|
|
||||||
logBytes, err := ioutil.ReadFile(logPath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, ExecSyncError{
|
|
||||||
Stdout: stdoutBuf,
|
|
||||||
Stderr: stderrBuf,
|
|
||||||
ExitCode: -1,
|
|
||||||
Err: err,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// We have to parse the log output into {stdout, stderr} buffers.
|
|
||||||
stdoutBytes, stderrBytes := parseLog(logBytes)
|
|
||||||
return &ExecSyncResponse{
|
|
||||||
Stdout: stdoutBytes,
|
|
||||||
Stderr: stderrBytes,
|
|
||||||
ExitCode: ec.ExitCode,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func waitContainerStop(ctx context.Context, c *Container, timeout time.Duration) error {
|
|
||||||
done := make(chan struct{})
|
|
||||||
// we could potentially re-use "done" channel to exit the loop on timeout
|
|
||||||
// but we use another channel "chControl" so that we won't never incur in the
|
|
||||||
// case the "done" channel is closed in the "default" select case and we also
|
|
||||||
// reach the timeout in the select below. If that happens we could raise
|
|
||||||
// a panic closing a closed channel so better be safe and use another new
|
|
||||||
// channel just to control the loop.
|
|
||||||
chControl := make(chan struct{})
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-chControl:
|
|
||||||
return
|
|
||||||
default:
|
|
||||||
// Check if the process is still around
|
|
||||||
err := unix.Kill(c.state.Pid, 0)
|
|
||||||
if err == unix.ESRCH {
|
|
||||||
close(done)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
time.Sleep(100 * time.Millisecond)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
select {
|
|
||||||
case <-done:
|
|
||||||
return nil
|
|
||||||
case <-ctx.Done():
|
|
||||||
close(chControl)
|
|
||||||
return ctx.Err()
|
|
||||||
case <-time.After(timeout):
|
|
||||||
close(chControl)
|
|
||||||
err := unix.Kill(c.state.Pid, unix.SIGKILL)
|
|
||||||
if err != nil && err != unix.ESRCH {
|
|
||||||
return fmt.Errorf("failed to kill process: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
c.state.Finished = time.Now()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// StopContainer stops a container. Timeout is given in seconds.
|
|
||||||
func (r *Runtime) StopContainer(ctx context.Context, c *Container, timeout int64) error {
|
|
||||||
c.opLock.Lock()
|
|
||||||
defer c.opLock.Unlock()
|
|
||||||
|
|
||||||
// Check if the process is around before sending a signal
|
|
||||||
err := unix.Kill(c.state.Pid, 0)
|
|
||||||
if err == unix.ESRCH {
|
|
||||||
c.state.Finished = time.Now()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if timeout > 0 {
|
|
||||||
if err := utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, r.Path(c), "kill", c.id, c.GetStopSignal()); err != nil {
|
|
||||||
return fmt.Errorf("failed to stop container %s, %v", c.id, err)
|
|
||||||
}
|
|
||||||
err = waitContainerStop(ctx, c, time.Duration(timeout)*time.Second)
|
|
||||||
if err == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
logrus.Warnf("Stop container %q timed out: %v", c.ID(), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, r.Path(c), "kill", "--all", c.id, "KILL"); err != nil {
|
|
||||||
return fmt.Errorf("failed to stop container %s, %v", c.id, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return waitContainerStop(ctx, c, killContainerTimeout)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteContainer deletes a container.
|
|
||||||
func (r *Runtime) DeleteContainer(c *Container) error {
|
|
||||||
c.opLock.Lock()
|
|
||||||
defer c.opLock.Unlock()
|
|
||||||
_, err := utils.ExecCmd(r.Path(c), "delete", "--force", c.id)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetStartFailed sets the container state appropriately after a start failure
|
|
||||||
func (r *Runtime) SetStartFailed(c *Container, err error) {
|
|
||||||
c.opLock.Lock()
|
|
||||||
defer c.opLock.Unlock()
|
|
||||||
// adjust finished and started times
|
|
||||||
c.state.Finished, c.state.Started = c.state.Created, c.state.Created
|
|
||||||
c.state.Error = err.Error()
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateStatus refreshes the status of the container.
|
|
||||||
func (r *Runtime) UpdateStatus(c *Container) error {
|
|
||||||
c.opLock.Lock()
|
|
||||||
defer c.opLock.Unlock()
|
|
||||||
out, err := exec.Command(r.Path(c), "state", c.id).CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
// there are many code paths that could lead to have a bad state in the
|
|
||||||
// underlying runtime.
|
|
||||||
// On any error like a container went away or we rebooted and containers
|
|
||||||
// went away we do not error out stopping kubernetes to recover.
|
|
||||||
// We always populate the fields below so kube can restart/reschedule
|
|
||||||
// containers failing.
|
|
||||||
c.state.Status = ContainerStateStopped
|
|
||||||
c.state.Finished = time.Now()
|
|
||||||
c.state.ExitCode = 255
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if err := json.NewDecoder(bytes.NewBuffer(out)).Decode(&c.state); err != nil {
|
|
||||||
return fmt.Errorf("failed to decode container status for %s: %s", c.id, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.state.Status == ContainerStateStopped {
|
|
||||||
exitFilePath := filepath.Join(r.containerExitsDir, c.id)
|
|
||||||
var fi os.FileInfo
|
|
||||||
err = kwait.ExponentialBackoff(
|
|
||||||
kwait.Backoff{
|
|
||||||
Duration: 500 * time.Millisecond,
|
|
||||||
Factor: 1.2,
|
|
||||||
Steps: 6,
|
|
||||||
},
|
|
||||||
func() (bool, error) {
|
|
||||||
var err error
|
|
||||||
fi, err = os.Stat(exitFilePath)
|
|
||||||
if err != nil {
|
|
||||||
// wait longer
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
return true, nil
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
logrus.Warnf("failed to find container exit file: %v", err)
|
|
||||||
c.state.ExitCode = -1
|
|
||||||
} else {
|
|
||||||
c.state.Finished = getFinishedTime(fi)
|
|
||||||
statusCodeStr, err := ioutil.ReadFile(exitFilePath)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to read exit file: %v", err)
|
|
||||||
}
|
|
||||||
statusCode, err := strconv.Atoi(string(statusCodeStr))
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("status code conversion failed: %v", err)
|
|
||||||
}
|
|
||||||
c.state.ExitCode = int32(statusCode)
|
|
||||||
}
|
|
||||||
|
|
||||||
oomFilePath := filepath.Join(c.bundlePath, "oom")
|
|
||||||
if _, err = os.Stat(oomFilePath); err == nil {
|
|
||||||
c.state.OOMKilled = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContainerStatus returns the state of a container.
|
|
||||||
func (r *Runtime) ContainerStatus(c *Container) *ContainerState {
|
|
||||||
c.opLock.Lock()
|
|
||||||
defer c.opLock.Unlock()
|
|
||||||
return c.state
|
|
||||||
}
|
|
||||||
|
|
||||||
// newPipe creates a unix socket pair for communication
|
|
||||||
func newPipe() (parent *os.File, child *os.File, err error) {
|
|
||||||
fds, err := unix.Socketpair(unix.AF_LOCAL, unix.SOCK_STREAM|unix.SOCK_CLOEXEC, 0)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
return os.NewFile(uintptr(fds[1]), "parent"), os.NewFile(uintptr(fds[0]), "child"), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// RuntimeReady checks if the runtime is up and ready to accept
|
|
||||||
// basic containers e.g. container only needs host network.
|
|
||||||
func (r *Runtime) RuntimeReady() (bool, error) {
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NetworkReady checks if the runtime network is up and ready to
|
|
||||||
// accept containers which require container network.
|
|
||||||
func (r *Runtime) NetworkReady() (bool, error) {
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// PauseContainer pauses a container.
|
|
||||||
func (r *Runtime) PauseContainer(c *Container) error {
|
|
||||||
c.opLock.Lock()
|
|
||||||
defer c.opLock.Unlock()
|
|
||||||
_, err := utils.ExecCmd(r.Path(c), "pause", c.id)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnpauseContainer unpauses a container.
|
|
||||||
func (r *Runtime) UnpauseContainer(c *Container) error {
|
|
||||||
c.opLock.Lock()
|
|
||||||
defer c.opLock.Unlock()
|
|
||||||
_, err := utils.ExecCmd(r.Path(c), "resume", c.id)
|
|
||||||
return err
|
|
||||||
}
|
|
27
oci/store.go
27
oci/store.go
@ -1,27 +0,0 @@
|
|||||||
package oci
|
|
||||||
|
|
||||||
// StoreFilter defines a function to filter
|
|
||||||
// container in the store.
|
|
||||||
type StoreFilter func(*Container) bool
|
|
||||||
|
|
||||||
// StoreReducer defines a function to
|
|
||||||
// manipulate containers in the store
|
|
||||||
type StoreReducer func(*Container)
|
|
||||||
|
|
||||||
// ContainerStorer defines an interface that any container store must implement.
|
|
||||||
type ContainerStorer interface {
|
|
||||||
// Add appends a new container to the store.
|
|
||||||
Add(string, *Container)
|
|
||||||
// Get returns a container from the store by the identifier it was stored with.
|
|
||||||
Get(string) *Container
|
|
||||||
// Delete removes a container from the store by the identifier it was stored with.
|
|
||||||
Delete(string)
|
|
||||||
// List returns a list of containers from the store.
|
|
||||||
List() []*Container
|
|
||||||
// Size returns the number of containers in the store.
|
|
||||||
Size() int
|
|
||||||
// First returns the first container found in the store by a given filter.
|
|
||||||
First(StoreFilter) *Container
|
|
||||||
// ApplyAll calls the reducer function with every container in the store.
|
|
||||||
ApplyAll(StoreReducer)
|
|
||||||
}
|
|
Reference in New Issue
Block a user