mirror of
https://github.com/containers/podman.git
synced 2025-06-06 15:00:40 +08:00
Prune Server package. Convert to new github location.
Signed-off-by: Matthew Heon <matthew.heon@gmail.com>
This commit is contained in:
@ -1,4 +1,4 @@
|
||||
# Contributing to CRI-O
|
||||
# Contributing to Libpod
|
||||
|
||||
We'd love to have you join the community! Below summarizes the processes
|
||||
that we follow.
|
||||
@ -13,7 +13,7 @@ that we follow.
|
||||
## Reporting Issues
|
||||
|
||||
Before reporting an issue, check our backlog of
|
||||
[open issues](https://github.com/kubernetes-incubator/cri-o/issues)
|
||||
[open issues](https://github.com/projectatomic/libpod/issues)
|
||||
to see if someone else has already reported it. If so, feel free to add
|
||||
your scenario, or additional information, to the discussion. Or simply
|
||||
"subscribe" to it to be notified when it is updated.
|
||||
@ -120,9 +120,9 @@ IRC group on `irc.freenode.net` called `cri-o`
|
||||
that has been setup.
|
||||
|
||||
For discussions around issues/bugs and features, you can use the github
|
||||
[issues](https://github.com/kubernetes-incubator/cri-o/issues)
|
||||
[issues](https://github.com/projectatomic/libpod/issues)
|
||||
and
|
||||
[PRs](https://github.com/kubernetes-incubator/cri-o/pulls)
|
||||
[PRs](https://github.com/projectatomic/libpod/pulls)
|
||||
tracking system.
|
||||
|
||||
<!--
|
||||
|
@ -112,6 +112,6 @@ RUN mkdir -p /etc/containers
|
||||
COPY test/policy.json /etc/containers/policy.json
|
||||
COPY test/redhat_sigstore.yaml /etc/containers/registries.d/registry.access.redhat.com.yaml
|
||||
|
||||
WORKDIR /go/src/github.com/kubernetes-incubator/cri-o
|
||||
WORKDIR /go/src/github.com/projectatomic/libpod
|
||||
|
||||
ADD . /go/src/github.com/kubernetes-incubator/cri-o
|
||||
ADD . /go/src/github.com/projectatomic/libpod
|
||||
|
4
Makefile
4
Makefile
@ -1,6 +1,6 @@
|
||||
GO ?= go
|
||||
EPOCH_TEST_COMMIT ?= 1cc5a27
|
||||
PROJECT := github.com/kubernetes-incubator/cri-o
|
||||
PROJECT := github.com/projectatomic/libpod
|
||||
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
|
||||
GIT_BRANCH_CLEAN := $(shell echo $(GIT_BRANCH) | sed -e "s/[^[:alnum:]]/-/g")
|
||||
CRIO_IMAGE := crio_dev$(if $(GIT_BRANCH_CLEAN),:$(GIT_BRANCH_CLEAN))
|
||||
@ -17,7 +17,7 @@ BASHINSTALLDIR=${PREFIX}/share/bash-completion/completions
|
||||
OCIUMOUNTINSTALLDIR=$(PREFIX)/share/oci-umount/oci-umount.d
|
||||
|
||||
SELINUXOPT ?= $(shell test -x /usr/sbin/selinuxenabled && selinuxenabled && echo -Z)
|
||||
PACKAGES ?= $(shell go list -tags "${BUILDTAGS}" ./... | grep -v github.com/kubernetes-incubator/cri-o/vendor)
|
||||
PACKAGES ?= $(shell go list -tags "${BUILDTAGS}" ./... | grep -v github.com/projectatomic/libpod/vendor)
|
||||
|
||||
COMMIT_NO := $(shell git rev-parse HEAD 2> /dev/null || true)
|
||||
GIT_COMMIT := $(if $(shell git status --porcelain --untracked-files=no),"${COMMIT_NO}-dirty","${COMMIT_NO}")
|
||||
|
@ -9,9 +9,8 @@ import (
|
||||
is "github.com/containers/image/storage"
|
||||
"github.com/containers/storage"
|
||||
"github.com/fatih/camelcase"
|
||||
"github.com/kubernetes-incubator/cri-o/libkpod"
|
||||
"github.com/kubernetes-incubator/cri-o/libpod"
|
||||
"github.com/kubernetes-incubator/cri-o/server"
|
||||
"github.com/projectatomic/libpod/libkpod"
|
||||
"github.com/projectatomic/libpod/libpod"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
@ -20,6 +19,8 @@ var (
|
||||
stores = make(map[storage.Store]struct{})
|
||||
)
|
||||
|
||||
const CrioConfigPath = "/etc/crio/crio.conf"
|
||||
|
||||
func getStore(c *libkpod.Config) (storage.Store, error) {
|
||||
options := storage.DefaultStoreOptions
|
||||
options.GraphRoot = c.Root
|
||||
@ -65,8 +66,8 @@ func getConfig(c *cli.Context) (*libkpod.Config, error) {
|
||||
var configFile string
|
||||
if c.GlobalIsSet("config") {
|
||||
configFile = c.GlobalString("config")
|
||||
} else if _, err := os.Stat(server.CrioConfigPath); err == nil {
|
||||
configFile = server.CrioConfigPath
|
||||
} else if _, err := os.Stat(CrioConfigPath); err == nil {
|
||||
configFile = CrioConfigPath
|
||||
}
|
||||
// load and merge the configfile from the commandline or use
|
||||
// the default crio config file
|
||||
|
@ -4,7 +4,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/kubernetes-incubator/cri-o/cmd/kpod/formats"
|
||||
"github.com/projectatomic/libpod/cmd/kpod/formats"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
@ -8,7 +8,7 @@ import (
|
||||
|
||||
"github.com/containers/image/types"
|
||||
units "github.com/docker/go-units"
|
||||
"github.com/kubernetes-incubator/cri-o/cmd/kpod/formats"
|
||||
"github.com/projectatomic/libpod/cmd/kpod/formats"
|
||||
"github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/urfave/cli"
|
||||
|
@ -9,9 +9,9 @@ import (
|
||||
"github.com/containers/image/types"
|
||||
"github.com/containers/storage"
|
||||
"github.com/docker/go-units"
|
||||
"github.com/kubernetes-incubator/cri-o/cmd/kpod/formats"
|
||||
"github.com/kubernetes-incubator/cri-o/libpod"
|
||||
"github.com/kubernetes-incubator/cri-o/libpod/common"
|
||||
"github.com/projectatomic/libpod/cmd/kpod/formats"
|
||||
"github.com/projectatomic/libpod/libpod"
|
||||
"github.com/projectatomic/libpod/libpod/common"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/urfave/cli"
|
||||
|
@ -8,7 +8,7 @@ import (
|
||||
"runtime"
|
||||
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/kubernetes-incubator/cri-o/cmd/kpod/formats"
|
||||
"github.com/projectatomic/libpod/cmd/kpod/formats"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
@ -1,9 +1,9 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/kubernetes-incubator/cri-o/cmd/kpod/formats"
|
||||
"github.com/kubernetes-incubator/cri-o/libkpod"
|
||||
"github.com/kubernetes-incubator/cri-o/libpod/images"
|
||||
"github.com/projectatomic/libpod/cmd/kpod/formats"
|
||||
"github.com/projectatomic/libpod/libkpod"
|
||||
"github.com/projectatomic/libpod/libpod/images"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
@ -5,7 +5,7 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/docker/docker/pkg/signal"
|
||||
"github.com/kubernetes-incubator/cri-o/libkpod"
|
||||
"github.com/projectatomic/libpod/libkpod"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
@ -5,7 +5,7 @@ import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/kubernetes-incubator/cri-o/libpod"
|
||||
"github.com/projectatomic/libpod/libpod"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
@ -9,7 +9,7 @@ import (
|
||||
|
||||
"github.com/containers/image/docker"
|
||||
"github.com/containers/image/pkg/docker/config"
|
||||
"github.com/kubernetes-incubator/cri-o/libpod/common"
|
||||
"github.com/projectatomic/libpod/libpod/common"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/urfave/cli"
|
||||
"golang.org/x/crypto/ssh/terminal"
|
||||
|
@ -4,7 +4,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/containers/image/pkg/docker/config"
|
||||
"github.com/kubernetes-incubator/cri-o/libpod/common"
|
||||
"github.com/projectatomic/libpod/libpod/common"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
@ -4,7 +4,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/kubernetes-incubator/cri-o/libkpod"
|
||||
"github.com/projectatomic/libpod/libkpod"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
@ -4,7 +4,7 @@ import (
|
||||
js "encoding/json"
|
||||
"fmt"
|
||||
|
||||
of "github.com/kubernetes-incubator/cri-o/cmd/kpod/formats"
|
||||
of "github.com/projectatomic/libpod/cmd/kpod/formats"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
@ -2,7 +2,7 @@ package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/kubernetes-incubator/cri-o/libkpod"
|
||||
"github.com/projectatomic/libpod/libkpod"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/urfave/cli"
|
||||
"os"
|
||||
|
@ -15,9 +15,9 @@ import (
|
||||
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
|
||||
"github.com/kubernetes-incubator/cri-o/cmd/kpod/formats"
|
||||
"github.com/kubernetes-incubator/cri-o/libkpod"
|
||||
"github.com/kubernetes-incubator/cri-o/oci"
|
||||
"github.com/projectatomic/libpod/cmd/kpod/formats"
|
||||
"github.com/projectatomic/libpod/libkpod"
|
||||
"github.com/projectatomic/libpod/oci"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
@ -8,8 +8,8 @@ import (
|
||||
"golang.org/x/crypto/ssh/terminal"
|
||||
|
||||
"github.com/containers/image/types"
|
||||
"github.com/kubernetes-incubator/cri-o/libpod"
|
||||
"github.com/kubernetes-incubator/cri-o/libpod/common"
|
||||
"github.com/projectatomic/libpod/libpod"
|
||||
"github.com/projectatomic/libpod/libpod/common"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
|
@ -7,8 +7,8 @@ import (
|
||||
|
||||
"github.com/containers/image/types"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/kubernetes-incubator/cri-o/libpod"
|
||||
"github.com/kubernetes-incubator/cri-o/libpod/common"
|
||||
"github.com/projectatomic/libpod/libpod"
|
||||
"github.com/projectatomic/libpod/libpod/common"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/urfave/cli"
|
||||
"golang.org/x/crypto/ssh/terminal"
|
||||
|
@ -1,7 +1,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/kubernetes-incubator/cri-o/libkpod"
|
||||
"github.com/projectatomic/libpod/libkpod"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
@ -3,7 +3,7 @@ package main
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/kubernetes-incubator/cri-o/libkpod"
|
||||
"github.com/projectatomic/libpod/libkpod"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/urfave/cli"
|
||||
"golang.org/x/net/context"
|
||||
|
@ -4,7 +4,7 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/kubernetes-incubator/cri-o/libpod"
|
||||
"github.com/projectatomic/libpod/libpod"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
|
@ -11,8 +11,8 @@ import (
|
||||
"github.com/docker/go-units"
|
||||
|
||||
tm "github.com/buger/goterm"
|
||||
"github.com/kubernetes-incubator/cri-o/libkpod"
|
||||
"github.com/kubernetes-incubator/cri-o/oci"
|
||||
"github.com/projectatomic/libpod/libkpod"
|
||||
"github.com/projectatomic/libpod/oci"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
@ -4,7 +4,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/kubernetes-incubator/cri-o/libkpod"
|
||||
"github.com/projectatomic/libpod/libkpod"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/urfave/cli"
|
||||
"golang.org/x/net/context"
|
||||
|
@ -3,7 +3,7 @@ package main
|
||||
import (
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/storage"
|
||||
"github.com/kubernetes-incubator/cri-o/libpod"
|
||||
"github.com/projectatomic/libpod/libpod"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
@ -2,7 +2,7 @@ package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/kubernetes-incubator/cri-o/libkpod"
|
||||
"github.com/projectatomic/libpod/libkpod"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/urfave/cli"
|
||||
"os"
|
||||
|
@ -4,7 +4,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/kubernetes-incubator/cri-o/libkpod"
|
||||
"github.com/projectatomic/libpod/libkpod"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
@ -1,14 +0,0 @@
|
||||
.PHONY: dist
|
||||
dist: crio.spec
|
||||
spectool -g crio.spec
|
||||
|
||||
.PHONY: rpm
|
||||
rpm: dist
|
||||
rpmbuild --define "_sourcedir `pwd`" --define "_specdir `pwd`" \
|
||||
--define "_rpmdir `pwd`" --define "_srcrpmdir `pwd`" -ba crio.spec
|
||||
|
||||
all: rpm
|
||||
|
||||
clean:
|
||||
rm -f *rpm *gz
|
||||
rm -rf x86_64
|
@ -1,76 +0,0 @@
|
||||
%define debug_package %{nil}
|
||||
%global provider github
|
||||
%global provider_tld com
|
||||
%global project kubernetes-incubator
|
||||
%global repo cri-o
|
||||
%global Name crio
|
||||
# https://github.com/kubernetes-incubator/cri-o
|
||||
%global provider_prefix %{provider}.%{provider_tld}/%{project}/%{repo}
|
||||
%global import_path %{provider_prefix}
|
||||
%global commit 8ba639952a95f2e24cc98987689138b67545576c
|
||||
%global shortcommit %(c=%{commit}; echo ${c:0:7})
|
||||
|
||||
Name: %{Name}
|
||||
Version: 0.0.1
|
||||
Release: 1.git%{shortcommit}%{?dist}
|
||||
Summary: Kubelet Container Runtime Interface (CRI) for OCI runtimes.
|
||||
Group: Applications/Text
|
||||
License: Apache 2.0
|
||||
URL: https://%{provider_prefix}
|
||||
Source0: https://%{provider_prefix}/archive/%{commit}/%{repo}-%{shortcommit}.tar.gz
|
||||
Provides: %{repo}
|
||||
|
||||
BuildRequires: golang-github-cpuguy83-go-md2man
|
||||
|
||||
%description
|
||||
The crio package provides an implementation of the
|
||||
Kubelet Container Runtime Interface (CRI) using OCI conformant runtimes.
|
||||
|
||||
crio provides following functionalities:
|
||||
|
||||
Support multiple image formats including the existing Docker image format
|
||||
Support for multiple means to download images including trust & image verification
|
||||
Container image management (managing image layers, overlay filesystems, etc)
|
||||
Container process lifecycle management
|
||||
Monitoring and logging required to satisfy the CRI
|
||||
Resource isolation as required by the CRI
|
||||
|
||||
%prep
|
||||
%setup -q -n %{repo}-%{commit}
|
||||
|
||||
%build
|
||||
make all
|
||||
|
||||
%install
|
||||
%make_install
|
||||
%make_install install.systemd
|
||||
|
||||
#define license tag if not already defined
|
||||
%{!?_licensedir:%global license %doc}
|
||||
%files
|
||||
%{_bindir}/crio
|
||||
%{_bindir}/crioctl
|
||||
%{_mandir}/man5/crio.conf.5*
|
||||
%{_mandir}/man8/crio.8*
|
||||
%{_sysconfdir}/crio.conf
|
||||
%{_sysconfdir}/seccomp.json
|
||||
%dir /%{_libexecdir}/crio
|
||||
/%{_libexecdir}/crio/conmon
|
||||
/%{_libexecdir}/crio/pause
|
||||
%{_unitdir}/crio.service
|
||||
%doc README.md
|
||||
%license LICENSE
|
||||
%dir /usr/share/oci-umount/oci-umount.d
|
||||
/usr/share/oci-umount/oci-umount.d/cri-umount.conf
|
||||
|
||||
|
||||
%preun
|
||||
%systemd_preun %{Name}
|
||||
|
||||
%postun
|
||||
%systemd_postun_with_restart %{Name}
|
||||
|
||||
%changelog
|
||||
* Mon Oct 31 2016 Dan Walsh <dwalsh@redhat.com> - 0.0.1
|
||||
- Initial RPM release
|
||||
|
@ -1,14 +0,0 @@
|
||||
[Unit]
|
||||
Description=Shutdown CRIO containers before shutting down the system
|
||||
Wants=crio.service
|
||||
After=crio.service
|
||||
Documentation=man:crio(8)
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStart=/usr/bin/true
|
||||
ExecStop=mkdir -p /var/lib/crio; touch /var/lib/crio/crio.shutdown
|
||||
RemainAfterExit=yes
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
@ -1,24 +0,0 @@
|
||||
[Unit]
|
||||
Description=Open Container Initiative Daemon
|
||||
Documentation=https://github.com/kubernetes-incubator/cri-o
|
||||
After=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=notify
|
||||
EnvironmentFile=-/etc/sysconfig/crio-storage
|
||||
EnvironmentFile=-/etc/sysconfig/crio-network
|
||||
Environment=GOTRACEBACK=crash
|
||||
ExecStart=/usr/local/bin/crio \
|
||||
$CRIO_STORAGE_OPTIONS \
|
||||
$CRIO_NETWORK_OPTIONS
|
||||
ExecReload=/bin/kill -s HUP $MAINPID
|
||||
TasksMax=infinity
|
||||
LimitNOFILE=1048576
|
||||
LimitNPROC=1048576
|
||||
LimitCORE=infinity
|
||||
OOMScoreAdjust=-999
|
||||
TimeoutStartSec=0
|
||||
Restart=on-abnormal
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
@ -1,21 +0,0 @@
|
||||
# Fedora and RHEL Integration and End-to-End Tests
|
||||
|
||||
This directory contains playbooks to set up for and run the integration and
|
||||
end-to-end tests for CRI-O on RHEL and Fedora hosts. Two entrypoints exist:
|
||||
|
||||
- `main.yml`: sets up the machine and runs tests
|
||||
- `results.yml`: gathers test output to `/tmp/artifacts`
|
||||
|
||||
When running `main.yml`, three tags are present:
|
||||
|
||||
- `setup`: run all tasks to set up the system for testing
|
||||
- `e2e`: build CRI-O from source and run Kubernetes node E2Es
|
||||
- `integration`: build CRI-O from source and run the local integration suite
|
||||
|
||||
The playbooks assume the following things about your system:
|
||||
|
||||
- on RHEL, the server and extras repos are configured and certs are present
|
||||
- `ansible` is installed and the host is boot-strapped to allow `ansible` to run against it
|
||||
- the `$GOPATH` is set and present for all shells (*e.g.* written in `/etc/environment`)
|
||||
- CRI-O is checked out to the correct state at `${GOPATH}/src/github.com/kubernetes-incubator/cri-o`
|
||||
- the user running the playbook has access to passwordless `sudo`
|
@ -1,359 +0,0 @@
|
||||
# config file for ansible -- http://ansible.com/
|
||||
# ==============================================
|
||||
|
||||
# nearly all parameters can be overridden in ansible-playbook
|
||||
# or with command line flags. ansible will read ANSIBLE_CONFIG,
|
||||
# ansible.cfg in the current working directory, .ansible.cfg in
|
||||
# the home directory or /etc/ansible/ansible.cfg, whichever it
|
||||
# finds first
|
||||
|
||||
[defaults]
|
||||
|
||||
# some basic default values...
|
||||
|
||||
#inventory = inventory
|
||||
#library = /usr/share/my_modules/
|
||||
#remote_tmp = $HOME/.ansible/tmp
|
||||
#local_tmp = .ansible/tmp
|
||||
#forks = 5
|
||||
forks = 10
|
||||
#poll_interval = 15
|
||||
#sudo_user = root
|
||||
#ask_sudo_pass = True
|
||||
ask_sudo_pass = False
|
||||
#ask_pass = True
|
||||
ask_pass = False
|
||||
#transport = smart
|
||||
#remote_port = 22
|
||||
#module_lang = C
|
||||
#module_set_locale = True
|
||||
|
||||
# plays will gather facts by default, which contain information about
|
||||
# the remote system.
|
||||
#
|
||||
# smart - gather by default, but don't regather if already gathered
|
||||
# implicit - gather by default, turn off with gather_facts: False
|
||||
# explicit - do not gather by default, must say gather_facts: True
|
||||
#gathering = implicit
|
||||
gathering = smart
|
||||
|
||||
# by default retrieve all facts subsets
|
||||
# all - gather all subsets
|
||||
# network - gather min and network facts
|
||||
# hardware - gather hardware facts (longest facts to retrieve)
|
||||
# virtual - gather min and virtual facts
|
||||
# facter - import facts from facter
|
||||
# ohai - import facts from ohai
|
||||
# You can combine them using comma (ex: network,virtual)
|
||||
# You can negate them using ! (ex: !hardware,!facter,!ohai)
|
||||
# A minimal set of facts is always gathered.
|
||||
gather_subset = network
|
||||
|
||||
# additional paths to search for roles in, colon separated
|
||||
# N/B: This depends on how ansible is called
|
||||
#roles_path = $WORKSPACE/kommandir_workspace/roles
|
||||
|
||||
# uncomment this to disable SSH key host checking
|
||||
#host_key_checking = False
|
||||
host_key_checking = False
|
||||
|
||||
# change the default callback
|
||||
#stdout_callback = skippy
|
||||
# enable additional callbacks
|
||||
#callback_whitelist = timer, mail
|
||||
|
||||
# Determine whether includes in tasks and handlers are "static" by
|
||||
# default. As of 2.0, includes are dynamic by default. Setting these
|
||||
# values to True will make includes behave more like they did in the
|
||||
# 1.x versions.
|
||||
task_includes_static = True
|
||||
handler_includes_static = True
|
||||
|
||||
# change this for alternative sudo implementations
|
||||
#sudo_exe = sudo
|
||||
|
||||
# What flags to pass to sudo
|
||||
# WARNING: leaving out the defaults might create unexpected behaviours
|
||||
#sudo_flags = -H -S -n
|
||||
|
||||
# SSH timeout
|
||||
#timeout = 10
|
||||
|
||||
# default user to use for playbooks if user is not specified
|
||||
# (/usr/bin/ansible will use current user as default)
|
||||
#remote_user = root
|
||||
remote_user = root
|
||||
|
||||
# logging is off by default unless this path is defined
|
||||
# if so defined, consider logrotate
|
||||
log_path = $ARTIFACTS/main.log
|
||||
|
||||
# default module name for /usr/bin/ansible
|
||||
#module_name = command
|
||||
|
||||
# use this shell for commands executed under sudo
|
||||
# you may need to change this to bin/bash in rare instances
|
||||
# if sudo is constrained
|
||||
# executable = /bin/sh
|
||||
|
||||
# if inventory variables overlap, does the higher precedence one win
|
||||
# or are hash values merged together? The default is 'replace' but
|
||||
# this can also be set to 'merge'.
|
||||
hash_behaviour = replace
|
||||
|
||||
# by default, variables from roles will be visible in the global variable
|
||||
# scope. To prevent this, the following option can be enabled, and only
|
||||
# tasks and handlers within the role will see the variables there
|
||||
private_role_vars = False
|
||||
|
||||
# list any Jinja2 extensions to enable here:
|
||||
#jinja2_extensions = jinja2.ext.do,jinja2.ext.i18n
|
||||
|
||||
# if set, always use this private key file for authentication, same as
|
||||
# if passing --private-key to ansible or ansible-playbook
|
||||
#private_key_file = /path/to/file
|
||||
|
||||
# If set, configures the path to the Vault password file as an alternative to
|
||||
# specifying --vault-password-file on the command line.
|
||||
#vault_password_file = /path/to/vault_password_file
|
||||
|
||||
# format of string {{ ansible_managed }} available within Jinja2
|
||||
# templates indicates to users editing templates files will be replaced.
|
||||
# replacing {file}, {host} and {uid} and strftime codes with proper values.
|
||||
#ansible_managed = Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} on {host}
|
||||
# This short version is better used in templates as it won't flag the file as changed every run.
|
||||
#ansible_managed = Ansible managed: {file} on {host}
|
||||
|
||||
# by default, ansible-playbook will display "Skipping [host]" if it determines a task
|
||||
# should not be run on a host. Set this to "False" if you don't want to see these "Skipping"
|
||||
# messages. NOTE: the task header will still be shown regardless of whether or not the
|
||||
# task is skipped.
|
||||
#display_skipped_hosts = True
|
||||
display_skipped_hosts = False
|
||||
|
||||
# by default, if a task in a playbook does not include a name: field then
|
||||
# ansible-playbook will construct a header that includes the task's action but
|
||||
# not the task's args. This is a security feature because ansible cannot know
|
||||
# if the *module* considers an argument to be no_log at the time that the
|
||||
# header is printed. If your environment doesn't have a problem securing
|
||||
# stdout from ansible-playbook (or you have manually specified no_log in your
|
||||
# playbook on all of the tasks where you have secret information) then you can
|
||||
# safely set this to True to get more informative messages.
|
||||
display_args_to_stdout = False
|
||||
|
||||
# by default (as of 1.3), Ansible will raise errors when attempting to dereference
|
||||
# Jinja2 variables that are not set in templates or action lines. Uncomment this line
|
||||
# to revert the behavior to pre-1.3.
|
||||
#error_on_undefined_vars = False
|
||||
|
||||
# by default (as of 1.6), Ansible may display warnings based on the configuration of the
|
||||
# system running ansible itself. This may include warnings about 3rd party packages or
|
||||
# other conditions that should be resolved if possible.
|
||||
# to disable these warnings, set the following value to False:
|
||||
system_warnings = False
|
||||
|
||||
# by default (as of 1.4), Ansible may display deprecation warnings for language
|
||||
# features that should no longer be used and will be removed in future versions.
|
||||
# to disable these warnings, set the following value to False:
|
||||
deprecation_warnings = False
|
||||
|
||||
# (as of 1.8), Ansible can optionally warn when usage of the shell and
|
||||
# command module appear to be simplified by using a default Ansible module
|
||||
# instead. These warnings can be silenced by adjusting the following
|
||||
# setting or adding warn=yes or warn=no to the end of the command line
|
||||
# parameter string. This will for example suggest using the git module
|
||||
# instead of shelling out to the git command.
|
||||
command_warnings = False
|
||||
|
||||
|
||||
# set plugin path directories here, separate with colons
|
||||
#action_plugins = /usr/share/ansible/plugins/action
|
||||
#callback_plugins = /usr/share/ansible/plugins/callback
|
||||
#connection_plugins = /usr/share/ansible/plugins/connection
|
||||
#lookup_plugins = /usr/share/ansible/plugins/lookup
|
||||
#vars_plugins = /usr/share/ansible/plugins/vars
|
||||
#filter_plugins = /usr/share/ansible/plugins/filter
|
||||
#test_plugins = /usr/share/ansible/plugins/test
|
||||
#strategy_plugins = /usr/share/ansible/plugins/strategy
|
||||
|
||||
# Most callbacks shipped with Ansible are disabled by default
|
||||
# and need to be whitelisted in your ansible.cfg file in order to function.
|
||||
callback_whitelist = default
|
||||
|
||||
# by default callbacks are not loaded for /bin/ansible, enable this if you
|
||||
# want, for example, a notification or logging callback to also apply to
|
||||
# /bin/ansible runs
|
||||
#bin_ansible_callbacks = False
|
||||
|
||||
|
||||
# don't like cows? that's unfortunate.
|
||||
# set to 1 if you don't want cowsay support or export ANSIBLE_NOCOWS=1
|
||||
#nocows = 1
|
||||
|
||||
# set which cowsay stencil you'd like to use by default. When set to 'random',
|
||||
# a random stencil will be selected for each task. The selection will be filtered
|
||||
# against the `cow_whitelist` option below.
|
||||
#cow_selection = default
|
||||
#cow_selection = random
|
||||
|
||||
# when using the 'random' option for cowsay, stencils will be restricted to this list.
|
||||
# it should be formatted as a comma-separated list with no spaces between names.
|
||||
# NOTE: line continuations here are for formatting purposes only, as the INI parser
|
||||
# in python does not support them.
|
||||
#cow_whitelist=bud-frogs,bunny,cheese,daemon,default,dragon,elephant-in-snake,elephant,eyes,\
|
||||
# hellokitty,kitty,luke-koala,meow,milk,moofasa,moose,ren,sheep,small,stegosaurus,\
|
||||
# stimpy,supermilker,three-eyes,turkey,turtle,tux,udder,vader-koala,vader,www
|
||||
|
||||
# don't like colors either?
|
||||
# set to 1 if you don't want colors, or export ANSIBLE_NOCOLOR=1
|
||||
nocolor = 0
|
||||
|
||||
# if set to a persistent type (not 'memory', for example 'redis') fact values
|
||||
# from previous runs in Ansible will be stored. This may be useful when
|
||||
# wanting to use, for example, IP information from one group of servers
|
||||
# without having to talk to them in the same playbook run to get their
|
||||
# current IP information.
|
||||
#fact_caching = memory
|
||||
|
||||
# retry files
|
||||
# When a playbook fails by default a .retry file will be created in ~/
|
||||
# You can disable this feature by setting retry_files_enabled to False
|
||||
# and you can change the location of the files by setting retry_files_save_path
|
||||
|
||||
#retry_files_enabled = False
|
||||
retry_files_enabled = False
|
||||
|
||||
# squash actions
|
||||
# Ansible can optimise actions that call modules with list parameters
|
||||
# when looping. Instead of calling the module once per with_ item, the
|
||||
# module is called once with all items at once. Currently this only works
|
||||
# under limited circumstances, and only with parameters named 'name'.
|
||||
squash_actions = apk,apt,dnf,package,pacman,pkgng,yum,zypper
|
||||
|
||||
# prevents logging of task data, off by default
|
||||
#no_log = False
|
||||
|
||||
# prevents logging of tasks, but only on the targets, data is still logged on the master/controller
|
||||
no_target_syslog = True
|
||||
|
||||
# controls whether Ansible will raise an error or warning if a task has no
|
||||
# choice but to create world readable temporary files to execute a module on
|
||||
# the remote machine. This option is False by default for security. Users may
|
||||
# turn this on to have behaviour more like Ansible prior to 2.1.x. See
|
||||
# https://docs.ansible.com/ansible/become.html#becoming-an-unprivileged-user
|
||||
# for more secure ways to fix this than enabling this option.
|
||||
#allow_world_readable_tmpfiles = False
|
||||
|
||||
# controls the compression level of variables sent to
|
||||
# worker processes. At the default of 0, no compression
|
||||
# is used. This value must be an integer from 0 to 9.
|
||||
#var_compression_level = 9
|
||||
|
||||
# controls what compression method is used for new-style ansible modules when
|
||||
# they are sent to the remote system. The compression types depend on having
|
||||
# support compiled into both the controller's python and the client's python.
|
||||
# The names should match with the python Zipfile compression types:
|
||||
# * ZIP_STORED (no compression. available everywhere)
|
||||
# * ZIP_DEFLATED (uses zlib, the default)
|
||||
# These values may be set per host via the ansible_module_compression inventory
|
||||
# variable
|
||||
#module_compression = 'ZIP_DEFLATED'
|
||||
|
||||
# This controls the cutoff point (in bytes) on --diff for files
|
||||
# set to 0 for unlimited (RAM may suffer!).
|
||||
#max_diff_size = 1048576
|
||||
|
||||
[privilege_escalation]
|
||||
#become=True
|
||||
#become_method=sudo
|
||||
#become_user=root
|
||||
become_user=root
|
||||
#become_ask_pass=False
|
||||
|
||||
[paramiko_connection]
|
||||
|
||||
# uncomment this line to cause the paramiko connection plugin to not record new host
|
||||
# keys encountered. Increases performance on new host additions. Setting works independently of the
|
||||
# host key checking setting above.
|
||||
#record_host_keys=False
|
||||
|
||||
# by default, Ansible requests a pseudo-terminal for commands executed under sudo. Uncomment this
|
||||
# line to disable this behaviour.
|
||||
#pty=False
|
||||
|
||||
[ssh_connection]
|
||||
|
||||
# ssh arguments to use
|
||||
# Leaving off ControlPersist will result in poor performance, so use
|
||||
# paramiko on older platforms rather than removing it
|
||||
ssh_args = -o ControlMaster=auto -o ControlPersist=60s -o UserKnownHostsFile=/dev/null -o PreferredAuthentications=publickey -o ConnectTimeout=13
|
||||
|
||||
# The path to use for the ControlPath sockets. This defaults to
|
||||
# "%(directory)s/ansible-ssh-%%h-%%p-%%r", however on some systems with
|
||||
# very long hostnames or very long path names (caused by long user names or
|
||||
# deeply nested home directories) this can exceed the character limit on
|
||||
# file socket names (108 characters for most platforms). In that case, you
|
||||
# may wish to shorten the string below.
|
||||
#
|
||||
# Example:
|
||||
# control_path = %(directory)s/%%h-%%r
|
||||
#control_path = %(directory)s/ansible-ssh-%%h-%%p-%%r
|
||||
|
||||
# Enabling pipelining reduces the number of SSH operations required to
|
||||
# execute a module on the remote server. This can result in a significant
|
||||
# performance improvement when enabled, however when using "sudo:" you must
|
||||
# first disable 'requiretty' in /etc/sudoers
|
||||
#
|
||||
# By default, this option is disabled to preserve compatibility with
|
||||
# sudoers configurations that have requiretty (the default on many distros).
|
||||
#
|
||||
#pipelining = False
|
||||
pipelining=True
|
||||
|
||||
# if True, make ansible use scp if the connection type is ssh
|
||||
# (default is sftp)
|
||||
#scp_if_ssh = True
|
||||
|
||||
# if False, sftp will not use batch mode to transfer files. This may cause some
|
||||
# types of file transfer failures impossible to catch however, and should
|
||||
# only be disabled if your sftp version has problems with batch mode
|
||||
#sftp_batch_mode = False
|
||||
|
||||
[accelerate]
|
||||
#accelerate_port = 5099
|
||||
#accelerate_timeout = 30
|
||||
#accelerate_connect_timeout = 5.0
|
||||
|
||||
# The daemon timeout is measured in minutes. This time is measured
|
||||
# from the last activity to the accelerate daemon.
|
||||
#accelerate_daemon_timeout = 30
|
||||
|
||||
# If set to yes, accelerate_multi_key will allow multiple
|
||||
# private keys to be uploaded to it, though each user must
|
||||
# have access to the system via SSH to add a new key. The default
|
||||
# is "no".
|
||||
#accelerate_multi_key = yes
|
||||
|
||||
[selinux]
|
||||
# file systems that require special treatment when dealing with security context
|
||||
# the default behaviour that copies the existing context or uses the user default
|
||||
# needs to be changed to use the file system dependent context.
|
||||
#special_context_filesystems=nfs,vboxsf,fuse,ramfs
|
||||
|
||||
# Set this to yes to allow libvirt_lxc connections to work without SELinux.
|
||||
#libvirt_lxc_noseclabel = yes
|
||||
|
||||
[colors]
|
||||
#highlight = white
|
||||
#verbose = blue
|
||||
#warn = bright purple
|
||||
#error = red
|
||||
#debug = dark gray
|
||||
#deprecate = purple
|
||||
#skip = cyan
|
||||
#unreachable = red
|
||||
#ok = green
|
||||
#changed = yellow
|
||||
#diff_add = green
|
||||
#diff_remove = red
|
||||
#diff_lines = cyan
|
@ -1,17 +0,0 @@
|
||||
---
|
||||
|
||||
- name: clone bats source repo
|
||||
git:
|
||||
repo: "https://github.com/sstephenson/bats.git"
|
||||
dest: "{{ ansible_env.GOPATH }}/src/github.com/sstephenson/bats"
|
||||
|
||||
- name: install bats
|
||||
command: "./install.sh /usr/local"
|
||||
args:
|
||||
chdir: "{{ ansible_env.GOPATH }}/src/github.com/sstephenson/bats"
|
||||
|
||||
- name: link bats
|
||||
file:
|
||||
src: /usr/local/bin/bats
|
||||
dest: /usr/bin/bats
|
||||
state: link
|
@ -1,79 +0,0 @@
|
||||
---
|
||||
|
||||
- name: stat the expected cri-o directory
|
||||
stat:
|
||||
path: "{{ ansible_env.GOPATH }}/src/github.com/kubernetes-incubator/cri-o"
|
||||
register: dir_stat
|
||||
|
||||
- name: expect cri-o to be cloned already
|
||||
fail:
|
||||
msg: "Expected cri-o to be cloned at {{ ansible_env.GOPATH }}/src/github.com/kubernetes-incubator/cri-o but it wasn't!"
|
||||
when: not dir_stat.stat.exists
|
||||
|
||||
- name: install cri-o tools
|
||||
make:
|
||||
target: install.tools
|
||||
chdir: "{{ ansible_env.GOPATH }}/src/github.com/kubernetes-incubator/cri-o"
|
||||
|
||||
- name: build cri-o
|
||||
make:
|
||||
chdir: "{{ ansible_env.GOPATH }}/src/github.com/kubernetes-incubator/cri-o"
|
||||
|
||||
- name: install cri-o
|
||||
make:
|
||||
target: install
|
||||
chdir: "{{ ansible_env.GOPATH }}/src/github.com/kubernetes-incubator/cri-o"
|
||||
|
||||
- name: install cri-o systemd files
|
||||
make:
|
||||
target: install.systemd
|
||||
chdir: "{{ ansible_env.GOPATH }}/src/github.com/kubernetes-incubator/cri-o"
|
||||
|
||||
- name: install cri-o config
|
||||
make:
|
||||
target: install.config
|
||||
chdir: "{{ ansible_env.GOPATH }}/src/github.com/kubernetes-incubator/cri-o"
|
||||
|
||||
- name: install configs
|
||||
copy:
|
||||
src: "{{ ansible_env.GOPATH }}/src/github.com/kubernetes-incubator/cri-o/{{ item.src }}"
|
||||
dest: "{{ item.dest }}"
|
||||
remote_src: yes
|
||||
with_items:
|
||||
- src: contrib/cni/10-crio-bridge.conf
|
||||
dest: /etc/cni/net.d/10-crio-bridge.conf
|
||||
- src: contrib/cni/99-loopback.conf
|
||||
dest: /etc/cni/net.d/99-loopback.conf
|
||||
- src: test/redhat_sigstore.yaml
|
||||
dest: /etc/containers/registries.d/registry.access.redhat.com.yaml
|
||||
|
||||
- name: run with overlay
|
||||
replace:
|
||||
regexp: 'storage_driver = ""'
|
||||
replace: 'storage_driver = "overlay"'
|
||||
name: /etc/crio/crio.conf
|
||||
backup: yes
|
||||
|
||||
- name: run with systemd cgroup manager
|
||||
replace:
|
||||
regexp: 'cgroup_manager = "cgroupfs"'
|
||||
replace: 'cgroup_manager = "systemd"'
|
||||
name: /etc/crio/crio.conf
|
||||
backup: yes
|
||||
|
||||
- name: add docker.io default registry
|
||||
lineinfile:
|
||||
dest: /etc/crio/crio.conf
|
||||
line: '"docker.io"'
|
||||
insertafter: 'registries = \['
|
||||
regexp: 'docker\.io'
|
||||
state: present
|
||||
|
||||
- name: add overlay storage opts on RHEL/CentOS
|
||||
lineinfile:
|
||||
dest: /etc/crio/crio.conf
|
||||
line: '"overlay.override_kernel_check=1"'
|
||||
insertafter: 'storage_option = \['
|
||||
regexp: 'overlay\.override_kernel_check=1'
|
||||
state: present
|
||||
when: ansible_distribution == 'RedHat' or ansible_distribution == 'CentOS'
|
@ -1,16 +0,0 @@
|
||||
---
|
||||
|
||||
- name: clone cri-tools source repo
|
||||
git:
|
||||
repo: "https://github.com/kubernetes-incubator/cri-tools.git"
|
||||
dest: "{{ ansible_env.GOPATH }}/src/github.com/kubernetes-incubator/cri-tools"
|
||||
version: "16e6fe4d7199c5689db4630a9330e6a8a12cecd1"
|
||||
|
||||
- name: install crictl
|
||||
command: "/usr/bin/go install github.com/kubernetes-incubator/cri-tools/cmd/crictl"
|
||||
|
||||
- name: link crictl
|
||||
file:
|
||||
src: "{{ ansible_env.GOPATH }}/bin/crictl"
|
||||
dest: /usr/bin/crictl
|
||||
state: link
|
@ -1,63 +0,0 @@
|
||||
---
|
||||
|
||||
- name: clone kubernetes source repo
|
||||
git:
|
||||
repo: "https://github.com/runcom/kubernetes.git"
|
||||
dest: "{{ ansible_env.GOPATH }}/src/k8s.io/kubernetes"
|
||||
version: "cri-o-node-e2e-patched"
|
||||
|
||||
- name: install etcd
|
||||
command: "hack/install-etcd.sh"
|
||||
args:
|
||||
chdir: "{{ ansible_env.GOPATH }}/src/k8s.io/kubernetes"
|
||||
|
||||
- name: build kubernetes
|
||||
make:
|
||||
chdir: "{{ ansible_env.GOPATH }}/src/k8s.io/kubernetes"
|
||||
|
||||
- name: Add custom cluster service file for the e2e testing
|
||||
copy:
|
||||
dest: /etc/systemd/system/customcluster.service
|
||||
content: |
|
||||
[Unit]
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
[Service]
|
||||
WorkingDirectory={{ ansible_env.GOPATH }}/src/k8s.io/kubernetes
|
||||
ExecStart=/usr/local/bin/createcluster.sh
|
||||
User=root
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
- name: Add create cluster background script for e2e testing
|
||||
copy:
|
||||
dest: /usr/local/bin/createcluster.sh
|
||||
content: |
|
||||
#!/bin/bash
|
||||
|
||||
export PATH=/usr/local/go/bin:/usr/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/root/bin:{{ ansible_env.GOPATH }}/bin:{{ ansible_env.GOPATH }}/src/k8s.io/kubernetes/third_party/etcd:{{ ansible_env.GOPATH }}/src/k8s.io/kubernetes/_output/local/bin/linux/amd64/
|
||||
export CONTAINER_RUNTIME=remote
|
||||
export CGROUP_DRIVER=systemd
|
||||
export CONTAINER_RUNTIME_ENDPOINT='/var/run/crio.sock --runtime-request-timeout=5m'
|
||||
export ALLOW_SECURITY_CONTEXT=","
|
||||
export ALLOW_PRIVILEGED=1
|
||||
export DNS_SERVER_IP={{ ansible_eth0.ipv4.address }}
|
||||
export API_HOST={{ ansible_eth0.ipv4.address }}
|
||||
export API_HOST_IP={{ ansible_eth0.ipv4.address }}
|
||||
export KUBE_ENABLE_CLUSTER_DNS=true
|
||||
./hack/local-up-cluster.sh
|
||||
mode: "u=rwx,g=rwx,o=x"
|
||||
|
||||
- name: Set kubernetes_provider to be local
|
||||
lineinfile:
|
||||
dest: /etc/environment
|
||||
line: 'KUBERNETES_PROVIDER=local'
|
||||
regexp: 'KUBERNETES_PROVIDER='
|
||||
state: present
|
||||
|
||||
- name: Set KUBECONFIG
|
||||
lineinfile:
|
||||
dest: /etc/environment
|
||||
line: 'KUBECONFIG=/var/run/kubernetes/admin.kubeconfig'
|
||||
regexp: 'KUBECONFIG='
|
||||
state: present
|
@ -1,50 +0,0 @@
|
||||
---
|
||||
|
||||
- name: clone plugins source repo
|
||||
git:
|
||||
repo: "https://github.com/containernetworking/plugins.git"
|
||||
dest: "{{ ansible_env.GOPATH }}/src/github.com/containernetworking/plugins"
|
||||
version: "dcf7368eeab15e2affc6256f0bb1e84dd46a34de"
|
||||
|
||||
- name: build plugins
|
||||
command: "./build.sh"
|
||||
args:
|
||||
chdir: "{{ ansible_env.GOPATH }}/src/github.com/containernetworking/plugins"
|
||||
|
||||
- name: install plugins
|
||||
copy:
|
||||
src: "{{ ansible_env.GOPATH }}/src/github.com/containernetworking/plugins/bin/{{ item }}"
|
||||
dest: "/opt/cni/bin"
|
||||
mode: "o=rwx,g=rx,o=rx"
|
||||
remote_src: yes
|
||||
with_items:
|
||||
- bridge
|
||||
- dhcp
|
||||
- flannel
|
||||
- host-local
|
||||
- ipvlan
|
||||
- loopback
|
||||
- macvlan
|
||||
- ptp
|
||||
- sample
|
||||
- tuning
|
||||
- vlan
|
||||
|
||||
- name: clone runcom plugins source repo
|
||||
git:
|
||||
repo: "https://github.com/runcom/plugins.git"
|
||||
dest: "{{ ansible_env.GOPATH }}/src/github.com/containernetworking/plugins"
|
||||
version: "custom-bridge"
|
||||
force: yes
|
||||
|
||||
- name: build plugins
|
||||
command: "./build.sh"
|
||||
args:
|
||||
chdir: "{{ ansible_env.GOPATH }}/src/github.com/containernetworking/plugins"
|
||||
|
||||
- name: install custom bridge
|
||||
copy:
|
||||
src: "{{ ansible_env.GOPATH }}/src/github.com/containernetworking/plugins/bin/bridge"
|
||||
dest: "/opt/cni/bin/bridge-custom"
|
||||
mode: "o=rwx,g=rx,o=rx"
|
||||
remote_src: yes
|
@ -1,23 +0,0 @@
|
||||
---
|
||||
|
||||
- name: clone runc source repo
|
||||
git:
|
||||
repo: "https://github.com/opencontainers/runc.git"
|
||||
dest: "{{ ansible_env.GOPATH }}/src/github.com/opencontainers/runc"
|
||||
version: "84a082bfef6f932de921437815355186db37aeb1"
|
||||
|
||||
- name: build runc
|
||||
make:
|
||||
params: BUILDTAGS="seccomp selinux"
|
||||
chdir: "{{ ansible_env.GOPATH }}/src/github.com/opencontainers/runc"
|
||||
|
||||
- name: install runc
|
||||
make:
|
||||
target: "install"
|
||||
chdir: "{{ ansible_env.GOPATH }}/src/github.com/opencontainers/runc"
|
||||
|
||||
- name: link runc
|
||||
file:
|
||||
src: /usr/local/sbin/runc
|
||||
dest: /usr/bin/runc
|
||||
state: link
|
@ -1,156 +0,0 @@
|
||||
'''Plugin to override the default output logic.'''
|
||||
|
||||
# upstream: https://gist.github.com/cliffano/9868180
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
# For some reason this has to be done
|
||||
import imp
|
||||
import os
|
||||
|
||||
ANSIBLE_PATH = imp.find_module('ansible')[1]
|
||||
DEFAULT_PATH = os.path.join(ANSIBLE_PATH, 'plugins/callback/default.py')
|
||||
DEFAULT_MODULE = imp.load_source(
|
||||
'ansible.plugins.callback.default',
|
||||
DEFAULT_PATH
|
||||
)
|
||||
|
||||
try:
|
||||
from ansible.plugins.callback import CallbackBase
|
||||
BASECLASS = CallbackBase
|
||||
except ImportError: # < ansible 2.1
|
||||
BASECLASS = DEFAULT_MODULE.CallbackModule
|
||||
|
||||
|
||||
class CallbackModule(DEFAULT_MODULE.CallbackModule): # pylint: disable=too-few-public-methods,no-init
|
||||
'''
|
||||
Override for the default callback module.
|
||||
|
||||
Render std err/out outside of the rest of the result which it prints with
|
||||
indentation.
|
||||
'''
|
||||
CALLBACK_VERSION = 2.0
|
||||
CALLBACK_TYPE = 'stdout'
|
||||
CALLBACK_NAME = 'default'
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
# pylint: disable=non-parent-init-called
|
||||
BASECLASS.__init__(self, *args, **kwargs)
|
||||
self.failed_task = []
|
||||
self.result_file = os.environ.get('AHT_RESULT_FILE')
|
||||
|
||||
def _dump_results(self, result):
|
||||
'''Return the text to output for a result.'''
|
||||
result['_ansible_verbose_always'] = True
|
||||
|
||||
save = {}
|
||||
for key in ['stdout', 'stdout_lines', 'stderr', 'stderr_lines', 'msg']:
|
||||
if key in result:
|
||||
save[key] = result.pop(key)
|
||||
|
||||
output = BASECLASS._dump_results(self, result) # pylint: disable=protected-access
|
||||
|
||||
for key in ['stdout', 'stderr', 'msg']:
|
||||
if key in save and save[key]:
|
||||
output += '\n\n%s:\n---\n%s\n---' % (key.upper(), save[key])
|
||||
|
||||
for key, value in save.items():
|
||||
result[key] = value
|
||||
|
||||
return output
|
||||
|
||||
def v2_runner_on_unreachable(self, result):
|
||||
self.failed_task = result
|
||||
|
||||
if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid:
|
||||
self._print_task_banner(result._task)
|
||||
|
||||
delegated_vars = result._result.get('_ansible_delegated_vars', None)
|
||||
if delegated_vars:
|
||||
self._display.display("fatal: [%s -> %s]: UNREACHABLE! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result)), color=C.COLOR_UNREACHABLE)
|
||||
else:
|
||||
self._display.display("fatal: [%s]: UNREACHABLE! => %s" % (result._host.get_name(), self._dump_results(result._result)), color=C.COLOR_UNREACHABLE)
|
||||
|
||||
def v2_runner_on_failed(self,result, ignore_errors=False):
|
||||
if ignore_errors is not True:
|
||||
# Sets environment variable for test failures for use in playboks.
|
||||
# Handlers tasks can conditionalize themselves using this variable
|
||||
# to run only on failure.
|
||||
os.environ["AHT_FAILURE"] = "1"
|
||||
|
||||
# Save last failure
|
||||
self.failed_task = result
|
||||
|
||||
if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid:
|
||||
self._print_task_banner(result._task)
|
||||
|
||||
delegated_vars = result._result.get('_ansible_delegated_vars', None)
|
||||
if 'exception' in result._result:
|
||||
if self._display.verbosity < 3:
|
||||
# extract just the actual error message from the exception text
|
||||
error = result._result['exception'].strip().split('\n')[-1]
|
||||
msg = "An exception occurred during task execution. To see the full traceback, use -vvv. The error was: %s" % error
|
||||
else:
|
||||
msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception']
|
||||
|
||||
self._display.display(msg, color=C.COLOR_ERROR)
|
||||
|
||||
if result._task.loop and 'results' in result._result:
|
||||
self._process_items(result)
|
||||
|
||||
else:
|
||||
if delegated_vars:
|
||||
self._display.display("fatal: [%s -> %s]: FAILED! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result)), color=C.COLOR_ERROR)
|
||||
else:
|
||||
self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result)), color=C.COLOR_ERROR)
|
||||
|
||||
if ignore_errors:
|
||||
self._display.display("...ignoring", color=C.COLOR_SKIP)
|
||||
|
||||
def v2_playbook_on_stats(self, stats):
|
||||
self._display.banner("PLAY RECAP")
|
||||
|
||||
hosts = sorted(stats.processed.keys())
|
||||
for h in hosts:
|
||||
t = stats.summarize(h)
|
||||
|
||||
self._display.display(u"%s : %s %s %s %s" % (
|
||||
hostcolor(h, t),
|
||||
colorize(u'ok', t['ok'], C.COLOR_OK),
|
||||
colorize(u'changed', t['changed'], C.COLOR_CHANGED),
|
||||
colorize(u'unreachable', t['unreachable'], C.COLOR_UNREACHABLE),
|
||||
colorize(u'failed', t['failures'], C.COLOR_ERROR)),
|
||||
screen_only=True
|
||||
)
|
||||
|
||||
self._display.display(u"%s : %s %s %s %s" % (
|
||||
hostcolor(h, t, False),
|
||||
colorize(u'ok', t['ok'], None),
|
||||
colorize(u'changed', t['changed'], None),
|
||||
colorize(u'unreachable', t['unreachable'], None),
|
||||
colorize(u'failed', t['failures'], None)),
|
||||
log_only=True
|
||||
)
|
||||
|
||||
self._display.display("", screen_only=True)
|
||||
# Save result to file if environment variable exists
|
||||
if self.result_file is not None:
|
||||
if self.failed_task:
|
||||
with open(self.result_file, 'w') as f:
|
||||
f.write("PLAY: %s\n%s\n%s" % (self._play, \
|
||||
self.failed_task._task, \
|
||||
self._dump_results(self.failed_task._result)))
|
||||
else:
|
||||
open(self.result_file, 'w').close()
|
@ -1,57 +0,0 @@
|
||||
---
|
||||
|
||||
- name: enable and start CRI-O
|
||||
systemd:
|
||||
name: crio
|
||||
state: started
|
||||
enabled: yes
|
||||
daemon_reload: yes
|
||||
|
||||
- name: update the server address for the custom cluster
|
||||
lineinfile:
|
||||
dest: /usr/local/bin/createcluster.sh
|
||||
line: "export {{ item }}={{ ansible_eth0.ipv4.address }}"
|
||||
regexp: "^export {{ item }}="
|
||||
state: present
|
||||
with_items:
|
||||
- DNS_SERVER_IP
|
||||
- API_HOST
|
||||
- API_HOST_IP
|
||||
|
||||
- name: enable and start the custom cluster
|
||||
systemd:
|
||||
name: customcluster.service
|
||||
state: started
|
||||
enabled: yes
|
||||
daemon_reload: yes
|
||||
|
||||
- name: wait for the cluster to be running
|
||||
command: "{{ ansible_env.GOPATH }}/src/k8s.io/kubernetes/_output/bin/kubectl get service kubernetes --namespace default"
|
||||
register: kube_poll
|
||||
until: kube_poll | succeeded
|
||||
retries: 100
|
||||
delay: 30
|
||||
|
||||
- name: ensure directory exists for e2e reports
|
||||
file:
|
||||
path: "{{ artifacts }}"
|
||||
state: directory
|
||||
|
||||
- name: Buffer the e2e testing command to workaround Ansible YAML folding "feature"
|
||||
set_fact:
|
||||
e2e_shell_cmd: >
|
||||
/usr/bin/go run hack/e2e.go
|
||||
--test
|
||||
--test_args="-host=https://{{ ansible_default_ipv4.address }}:6443
|
||||
--ginkgo.focus=\[Conformance\]
|
||||
--report-dir={{ artifacts }}"
|
||||
&> {{ artifacts }}/e2e.log
|
||||
# Fix vim syntax hilighting: "
|
||||
|
||||
- name: disable SELinux
|
||||
command: setenforce 0
|
||||
|
||||
- name: run e2e tests
|
||||
shell: "{{ e2e_shell_cmd | regex_replace('\\s+', ' ') }}"
|
||||
args:
|
||||
chdir: "{{ ansible_env.GOPATH }}/src/k8s.io/kubernetes"
|
@ -1,51 +0,0 @@
|
||||
---
|
||||
|
||||
- name: fetch Golang
|
||||
unarchive:
|
||||
remote_src: yes
|
||||
src: https://storage.googleapis.com/golang/go1.8.4.linux-amd64.tar.gz
|
||||
dest: /usr/local
|
||||
|
||||
- name: link go toolchain
|
||||
file:
|
||||
src: "/usr/local/go/bin/{{ item }}"
|
||||
dest: "/usr/bin/{{ item }}"
|
||||
state: link
|
||||
with_items:
|
||||
- go
|
||||
- gofmt
|
||||
- godoc
|
||||
|
||||
- name: ensure user profile exists
|
||||
file:
|
||||
path: "{{ ansible_user_dir }}/.profile"
|
||||
state: touch
|
||||
|
||||
- name: set up PATH for Go toolchain and built binaries
|
||||
lineinfile:
|
||||
dest: "{{ ansible_user_dir }}/.profile"
|
||||
line: 'PATH={{ ansible_env.PATH }}:{{ ansible_env.GOPATH }}/bin:/usr/local/go/bin'
|
||||
regexp: '^PATH='
|
||||
state: present
|
||||
|
||||
- name: set up directories
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
with_items:
|
||||
- "{{ ansible_env.GOPATH }}/src/github.com/containernetworking"
|
||||
- "{{ ansible_env.GOPATH }}/src/github.com/kubernetes-incubator"
|
||||
- "{{ ansible_env.GOPATH }}/src/github.com/k8s.io"
|
||||
- "{{ ansible_env.GOPATH }}/src/github.com/sstephenson"
|
||||
- "{{ ansible_env.GOPATH }}/src/github.com/opencontainers"
|
||||
|
||||
- name: install Go tools and dependencies
|
||||
shell: /usr/bin/go get -u "github.com/{{ item }}"
|
||||
with_items:
|
||||
- tools/godep
|
||||
- onsi/ginkgo/ginkgo
|
||||
- onsi/gomega
|
||||
- cloudflare/cfssl/cmd/...
|
||||
- jteeuwen/go-bindata/go-bindata
|
||||
- vbatts/git-validation
|
||||
- cpuguy83/go-md2man
|
@ -1,58 +0,0 @@
|
||||
- hosts: all
|
||||
remote_user: root
|
||||
vars_files:
|
||||
- "{{ playbook_dir }}/vars.yml"
|
||||
tags:
|
||||
- setup
|
||||
tasks:
|
||||
- name: set up the system
|
||||
include: system.yml
|
||||
|
||||
- name: install Golang tools
|
||||
include: golang.yml
|
||||
|
||||
- name: clone build and install bats
|
||||
include: "build/bats.yml"
|
||||
|
||||
- name: clone build and install cri-tools
|
||||
include: "build/cri-tools.yml"
|
||||
|
||||
- name: clone build and install kubernetes
|
||||
include: "build/kubernetes.yml"
|
||||
|
||||
- name: clone build and install runc
|
||||
include: "build/runc.yml"
|
||||
|
||||
- name: clone build and install networking plugins
|
||||
include: "build/plugins.yml"
|
||||
|
||||
- hosts: all
|
||||
remote_user: root
|
||||
vars_files:
|
||||
- "{{ playbook_dir }}/vars.yml"
|
||||
tags:
|
||||
- integration
|
||||
- e2e
|
||||
tasks:
|
||||
- name: clone build and install cri-o
|
||||
include: "build/cri-o.yml"
|
||||
|
||||
- hosts: all
|
||||
remote_user: root
|
||||
vars_files:
|
||||
- "{{ playbook_dir }}/vars.yml"
|
||||
tags:
|
||||
- integration
|
||||
tasks:
|
||||
- name: run cri-o integration tests
|
||||
include: test.yml
|
||||
|
||||
- hosts: all
|
||||
remote_user: root
|
||||
vars_files:
|
||||
- "{{ playbook_dir }}/vars.yml"
|
||||
tags:
|
||||
- e2e
|
||||
tasks:
|
||||
- name: run k8s e2e tests
|
||||
include: e2e.yml
|
@ -1,62 +0,0 @@
|
||||
---
|
||||
# vim-syntax: ansible
|
||||
|
||||
- hosts: '{{ hosts | default("all") }}'
|
||||
vars_files:
|
||||
- "{{ playbook_dir }}/vars.yml"
|
||||
vars:
|
||||
_result_filepaths: [] # do not use
|
||||
_dstfnbuff: [] # do not use
|
||||
tasks:
|
||||
- name: The crio_integration_filepath is required
|
||||
tags:
|
||||
- integration
|
||||
set_fact:
|
||||
_result_filepaths: "{{ _result_filepaths + [crio_integration_filepath] }}"
|
||||
|
||||
- name: The crio_node_e2e_filepath is required
|
||||
tags:
|
||||
- e2e
|
||||
set_fact:
|
||||
_result_filepaths: "{{ _result_filepaths + [crio_node_e2e_filepath] }}"
|
||||
|
||||
- name: Verify expectations
|
||||
assert:
|
||||
that:
|
||||
- 'result_dest_basedir | default(False, True)'
|
||||
- '_result_filepaths | default(False, True)'
|
||||
- '_dstfnbuff == []'
|
||||
- 'results_fetched is undefined'
|
||||
|
||||
- name: Results directory exists
|
||||
file:
|
||||
path: "{{ result_dest_basedir }}"
|
||||
state: directory
|
||||
delegate_to: localhost
|
||||
|
||||
- name: destination file paths are buffered for overwrite-checking and jUnit conversion
|
||||
set_fact:
|
||||
_dstfnbuff: >
|
||||
{{ _dstfnbuff |
|
||||
union( [result_dest_basedir ~ "/" ~ inventory_hostname ~ "/" ~ item | basename] ) }}
|
||||
with_items: '{{ _result_filepaths }}'
|
||||
|
||||
- name: Overwriting existing results assumed very very bad
|
||||
fail:
|
||||
msg: "Cowardly refusing to overwrite {{ item }}"
|
||||
when: item | exists
|
||||
delegate_to: localhost
|
||||
with_items: '{{ _dstfnbuff }}'
|
||||
|
||||
# fetch module doesn't support directories
|
||||
- name: Retrieve results from all hosts
|
||||
synchronize:
|
||||
checksum: True # Don't rely on date/time being in sync
|
||||
archive: False # Don't bother with permissions or times
|
||||
copy_links: True # We want files, not links to files
|
||||
recursive: True
|
||||
mode: pull
|
||||
dest: '{{ result_dest_basedir }}/{{ inventory_hostname }}/' # must end in /
|
||||
src: '{{ item }}'
|
||||
register: results_fetched
|
||||
with_items: '{{ _result_filepaths }}'
|
@ -1,117 +0,0 @@
|
||||
---
|
||||
|
||||
- name: Make sure we have all required packages
|
||||
package:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
with_items:
|
||||
- container-selinux
|
||||
- curl
|
||||
- device-mapper-devel
|
||||
- expect
|
||||
- findutils
|
||||
- gcc
|
||||
- git
|
||||
- glib2-devel
|
||||
- glibc-devel
|
||||
- glibc-static
|
||||
- gpgme-devel
|
||||
- hostname
|
||||
- iproute
|
||||
- iptables
|
||||
- krb5-workstation
|
||||
- libassuan-devel
|
||||
- libffi-devel
|
||||
- libgpg-error-devel
|
||||
- libguestfs-tools
|
||||
- libseccomp-devel
|
||||
- libvirt-client
|
||||
- libvirt-python
|
||||
- libxml2-devel
|
||||
- libxslt-devel
|
||||
- make
|
||||
- mlocate
|
||||
- nfs-utils
|
||||
- nmap-ncat
|
||||
- oci-register-machine
|
||||
- oci-systemd-hook
|
||||
- oci-umount
|
||||
- openssl
|
||||
- openssl-devel
|
||||
- ostree-devel
|
||||
- pkgconfig
|
||||
- python
|
||||
- python2-boto
|
||||
- python2-crypto
|
||||
- python-devel
|
||||
- python-virtualenv
|
||||
- PyYAML
|
||||
- redhat-rpm-config
|
||||
- rpcbind
|
||||
- rsync
|
||||
- sed
|
||||
- skopeo-containers
|
||||
- socat
|
||||
- tar
|
||||
- wget
|
||||
async: 600
|
||||
poll: 10
|
||||
|
||||
- name: Add Btrfs for Fedora
|
||||
package:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
with_items:
|
||||
- btrfs-progs-devel
|
||||
when: ansible_distribution in ['Fedora']
|
||||
|
||||
- name: Update all packages
|
||||
package:
|
||||
name: '*'
|
||||
state: latest
|
||||
async: 600
|
||||
poll: 10
|
||||
|
||||
- name: Setup swap to prevent kernel firing off the OOM killer
|
||||
shell: |
|
||||
truncate -s 8G /root/swap && \
|
||||
export SWAPDEV=$(losetup --show -f /root/swap | head -1) && \
|
||||
mkswap $SWAPDEV && \
|
||||
swapon $SWAPDEV && \
|
||||
swapon --show
|
||||
|
||||
- name: ensure directories exist as needed
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
with_items:
|
||||
- /opt/cni/bin
|
||||
- /etc/cni/net.d
|
||||
|
||||
- name: set sysctl vm.overcommit_memory=1 for CentOS
|
||||
sysctl:
|
||||
name: vm.overcommit_memory
|
||||
state: present
|
||||
value: 1
|
||||
when: ansible_distribution == 'CentOS'
|
||||
|
||||
- name: inject hostname into /etc/hosts
|
||||
lineinfile:
|
||||
dest: /etc/hosts
|
||||
line: '{{ ansible_default_ipv4.address }} {{ ansible_nodename }}'
|
||||
insertafter: 'EOF'
|
||||
regexp: '{{ ansible_default_ipv4.address }}\s+{{ ansible_nodename }}'
|
||||
state: present
|
||||
|
||||
- name: Flush the iptables
|
||||
command: iptables -F
|
||||
|
||||
- name: Enable localnet routing
|
||||
command: sysctl -w net.ipv4.conf.all.route_localnet=1
|
||||
|
||||
- name: Add masquerade for localhost
|
||||
command: iptables -t nat -I POSTROUTING -s 127.0.0.1 ! -d 127.0.0.1 -j MASQUERADE
|
||||
|
||||
- name: Update the kernel cmdline to include quota support
|
||||
command: grubby --update-kernel=ALL --args="rootflags=pquota"
|
||||
when: ansible_distribution in ['RedHat', 'CentOS']
|
@ -1,25 +0,0 @@
|
||||
---
|
||||
|
||||
- name: Make testing output verbose so it can be converted to xunit
|
||||
lineinfile:
|
||||
dest: "{{ ansible_env.GOPATH }}/src/k8s.io/kubernetes/hack/make-rules/test.sh"
|
||||
line: ' go test -v "${goflags[@]:+${goflags[@]}}" \'
|
||||
regexp: ' go test \"\$'
|
||||
state: present
|
||||
|
||||
- name: set extra storage options
|
||||
set_fact:
|
||||
extra_storage_opts: " --storage-opt overlay.override_kernel_check=1"
|
||||
when: ansible_distribution == 'RedHat' or ansible_distribution == 'CentOS'
|
||||
|
||||
- name: ensure directory exists for e2e reports
|
||||
file:
|
||||
path: "{{ artifacts }}"
|
||||
state: directory
|
||||
|
||||
- name: run integration tests
|
||||
shell: "CGROUP_MANAGER=cgroupfs STORAGE_OPTIONS='--storage-driver=overlay{{ extra_storage_opts | default('') }}' make localintegration >& {{ artifacts }}/testout.txt"
|
||||
args:
|
||||
chdir: "{{ ansible_env.GOPATH }}/src/github.com/kubernetes-incubator/cri-o"
|
||||
async: 5400
|
||||
poll: 30
|
@ -1,8 +0,0 @@
|
||||
---
|
||||
|
||||
# For results.yml Paths use rsync 'source' conventions
|
||||
artifacts: "/tmp/artifacts" # Base-directory for collection
|
||||
crio_integration_filepath: "{{ artifacts }}/testout.txt"
|
||||
crio_node_e2e_filepath: "{{ artifacts }}/junit_01.xml"
|
||||
result_dest_basedir: '{{ lookup("env","WORKSPACE") |
|
||||
default(playbook_dir, True) }}/artifacts'
|
@ -1,54 +0,0 @@
|
||||
# Pip requirements file for Ansible-based integration-testing environment.
|
||||
# Intended to be utilized by venv-ansible-playbook.sh script
|
||||
#
|
||||
# N/B: Hashes are required here | versions frozen for stability
|
||||
|
||||
ansible==2.3.1.0 --hash=sha256:cd4b8f53720fcd0c351156b840fdd15ecfbec22c951b5406ec503de49d40b9f5
|
||||
|
||||
asn1crypto==0.22.0 --hash=sha256:d232509fefcfcdb9a331f37e9c9dc20441019ad927c7d2176cf18ed5da0ba097 \
|
||||
--hash=sha256:cbbadd640d3165ab24b06ef25d1dca09a3441611ac15f6a6b452474fdf0aed1a
|
||||
|
||||
bcrypt==3.1.3 --hash=sha256:05b35b9842b009b44496fa5433ce462f69966291e50fbd471dbb427f399f748f \
|
||||
--hash=sha256:6645c8d0ad845308de3eb9be98b6fd22a46ec5412bfc664a423e411cdd8f5488
|
||||
|
||||
cffi==1.10.0 --hash=sha256:c49187260043bd4c1d6a52186f9774f17d9b1da0a406798ebf4bfc12da166ade \
|
||||
--hash=sha256:b3b02911eb1f6ada203b0763ba924234629b51586f72a21faacc638269f4ced5
|
||||
|
||||
cryptography==1.9 --hash=sha256:5518337022718029e367d982642f3e3523541e098ad671672a90b82474c84882
|
||||
|
||||
enum34==1.1.6 --hash=sha256:6bd0f6ad48ec2aa117d3d141940d484deccda84d4fcd884f5c3d93c23ecd8c79 \
|
||||
--hash=sha256:8ad8c4783bf61ded74527bffb48ed9b54166685e4230386a9ed9b1279e2df5b1
|
||||
|
||||
idna==2.5 --hash=sha256:cc19709fd6d0cbfed39ea875d29ba6d4e22c0cebc510a76d6302a28385e8bb70 \
|
||||
--hash=sha256:3cb5ce08046c4e3a560fc02f138d0ac63e00f8ce5901a56b32ec8b7994082aab
|
||||
|
||||
ipaddress==1.0.18 --hash=sha256:d34cf15d95ce9a734560f7400a8bd2ac2606f378e2a1d0eadbf1c98707e7c74a \
|
||||
--hash=sha256:5d8534c8e185f2d8a1fda1ef73f2c8f4b23264e8e30063feeb9511d492a413e1
|
||||
|
||||
Jinja2==2.9.6 --hash=sha256:2231bace0dfd8d2bf1e5d7e41239c06c9e0ded46e70cc1094a0aa64b0afeb054 \
|
||||
--hash=sha256:ddaa01a212cd6d641401cb01b605f4a4d9f37bfc93043d7f760ec70fb99ff9ff
|
||||
|
||||
MarkupSafe==1.0 --hash=sha256:a6be69091dac236ea9c6bc7d012beab42010fa914c459791d627dad4910eb665
|
||||
|
||||
paramiko==2.2.1 --hash=sha256:9c9402377ba8594889aab1e44a13b78eda685eb2145dc00b2353b4fbb25088cf \
|
||||
--hash=sha256:ff94ae65379914ec3c960de731381f49092057b6dd1d24d18842ead5a2eb2277
|
||||
|
||||
pyasn1==0.2.3 --hash=sha256:0439b9bd518418260c2641a571f0e07fce4370cab13b68f19b5e023306c03cad \
|
||||
--hash=sha256:738c4ebd88a718e700ee35c8d129acce2286542daa80a82823a7073644f706ad
|
||||
|
||||
pycparser==2.17 --hash=sha256:0aac31e917c24cb3357f5a4d5566f2cc91a19ca41862f6c3c22dc60a629673b6
|
||||
|
||||
pycrypto==2.6.1 --hash=sha256:f2ce1e989b272cfcb677616763e0a2e7ec659effa67a88aa92b3a65528f60a3c
|
||||
|
||||
PyNaCl==1.1.2 --hash=sha256:57314a7bad4bd39501dc622942f9921923673e52e126b0fc4f0214b5d25d619a \
|
||||
--hash=sha256:32f52b754abf07c319c04ce16905109cab44b0e7f7c79497431d3b2000f8af8c
|
||||
|
||||
PyYAML==3.12 --hash=sha256:592766c6303207a20efc445587778322d7f73b161bd994f227adaa341ba212ab
|
||||
|
||||
six==1.10.0 --hash=sha256:0ff78c403d9bccf5a425a6d31a12aa6b47f1c21ca4dc2573a7e2f32a97335eb1 \
|
||||
--hash=sha256:105f8d68616f8248e24bf0e9372ef04d3cc10104f1980f54d57b2ce73a5ad56a
|
||||
|
||||
virtualenv==15.1.0 --hash=sha256:39d88b533b422825d644087a21e78c45cf5af0ef7a99a1fc9fbb7b481e5c85b0 \
|
||||
--hash=sha256:02f8102c2436bb03b3ee6dede1919d1dac8a427541652e5ec95171ec8adbc93a
|
||||
|
||||
pip==9.0.1 --hash=sha256:690b762c0a8460c303c089d5d0be034fb15a5ea2b75bdf565f40421f542fefb0
|
@ -1,106 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# example usage
|
||||
# $ ./venv-ansible-playbook.sh \
|
||||
# -i 192.168.169.170 \
|
||||
# --private-key=/path/to/key \
|
||||
# --extra-vars "pullrequest=42" \
|
||||
# --extra-vars "commit=abcd1234" \
|
||||
# --user root \
|
||||
# --verbose \
|
||||
# $PWD/crio-integration-playbook.yaml
|
||||
|
||||
# All errors are fatal
|
||||
set -e
|
||||
|
||||
SCRIPT_PATH=`realpath $(dirname $0)`
|
||||
REQUIREMENTS="$SCRIPT_PATH/requirements.txt"
|
||||
|
||||
echo
|
||||
|
||||
if ! type -P virtualenv &> /dev/null
|
||||
then
|
||||
echo "Could not find required 'virtualenv' binary installed on system."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$#" -lt "1" ]
|
||||
then
|
||||
echo "No ansible-playbook command-line options specified."
|
||||
echo "usage: $0 -i whatever --private-key=something --extra-vars foo=bar playbook.yml"
|
||||
exit 2
|
||||
fi
|
||||
|
||||
# Avoid dirtying up repository, keep execution bits confined to a known location
|
||||
if [ -z "$WORKSPACE" ] || [ ! -d "$WORKSPACE" ]
|
||||
then
|
||||
export WORKSPACE="$(mktemp -d)"
|
||||
echo "Using temporary \$WORKSPACE=\"$WORKSPACE\" for execution environment."
|
||||
echo "Directory will be removed upon exit. Export this variable with path"
|
||||
echo "to an existing directory to preserve contents."
|
||||
trap 'rm -rf "$WORKSPACE"' EXIT
|
||||
else
|
||||
echo "Using existing \$WORKSPACE=\"$WORKSPACE\" for execution environment."
|
||||
echo "Directory will be left as-is upon exit."
|
||||
# Don't recycle cache, next job may have different requirements
|
||||
trap 'rm -rf "$PIPCACHE"' EXIT
|
||||
fi
|
||||
|
||||
# Create a directory to contain logs and test artifacts
|
||||
export ARTIFACTS=$(mkdir -pv $WORKSPACE/artifacts | tail -1 | cut -d \' -f 2)
|
||||
[ -d "$ARTIFACTS" ] || exit 3
|
||||
|
||||
# All command failures from now on are fatal
|
||||
set -e
|
||||
echo
|
||||
echo "Bootstrapping trusted virtual environment, this may take a few minutes, depending on networking."
|
||||
echo "(logs: \"$ARTIFACTS/crio_venv_setup_log.txt\")"
|
||||
echo
|
||||
|
||||
|
||||
(
|
||||
set -x
|
||||
cd "$WORKSPACE"
|
||||
# When running more than once, make it fast by skipping the bootstrap
|
||||
if [ ! -d "./.cri-o_venv" ]; then
|
||||
# N/B: local system's virtualenv binary - uncontrolled version fixed below
|
||||
virtualenv --no-site-packages --python=python2.7 ./.venvbootstrap
|
||||
# Set up paths to install/operate out of $WORKSPACE/.venvbootstrap
|
||||
source ./.venvbootstrap/bin/activate
|
||||
# N/B: local system's pip binary - uncontrolled version fixed below
|
||||
# pip may not support --cache-dir, force it's location into $WORKSPACE the ugly-way
|
||||
OLD_HOME="$HOME"
|
||||
export HOME="$WORKSPACE"
|
||||
export PIPCACHE="$WORKSPACE/.cache/pip"
|
||||
pip install --force-reinstall --upgrade pip==9.0.1
|
||||
# Undo --cache-dir workaround
|
||||
export HOME="$OLD_HOME"
|
||||
# Install fixed, trusted, hashed versions of all requirements (including pip and virtualenv)
|
||||
pip --cache-dir="$PIPCACHE" install --require-hashes \
|
||||
--requirement "$SCRIPT_PATH/requirements.txt"
|
||||
|
||||
# Setup trusted virtualenv using hashed binary from requirements.txt
|
||||
./.venvbootstrap/bin/virtualenv --no-site-packages --python=python2.7 ./.cri-o_venv
|
||||
# Exit untrusted virtualenv
|
||||
deactivate
|
||||
fi
|
||||
# Enter trusted virtualenv
|
||||
source ./.cri-o_venv/bin/activate
|
||||
# Upgrade stock-pip to support hashes
|
||||
pip install --force-reinstall --cache-dir="$PIPCACHE" --upgrade pip==9.0.1
|
||||
# Re-install from cache but validate all hashes (including on pip itself)
|
||||
pip --cache-dir="$PIPCACHE" install --require-hashes \
|
||||
--requirement "$SCRIPT_PATH/requirements.txt"
|
||||
# Remove temporary bootstrap virtualenv
|
||||
rm -rf ./.venvbootstrap
|
||||
# Exit trusted virtualenv
|
||||
|
||||
) &> $ARTIFACTS/crio_venv_setup_log.txt;
|
||||
|
||||
echo
|
||||
echo "Executing \"$WORKSPACE/.cri-o_venv/bin/ansible-playbook $@\""
|
||||
echo
|
||||
|
||||
# Execute command-line arguments under virtualenv
|
||||
source ${WORKSPACE}/.cri-o_venv/bin/activate
|
||||
${WORKSPACE}/.cri-o_venv/bin/ansible-playbook $@
|
105
kubernetes.md
105
kubernetes.md
@ -1,105 +0,0 @@
|
||||
# Running CRI-O on kubernetes cluster
|
||||
|
||||
## Switching runtime from docker to CRI-O
|
||||
|
||||
In standard docker kubernetes cluster, kubelet is running on each node as systemd service and is taking care of communication between runtime and api service.
|
||||
It is reponsible for starting microservices pods (such as `kube-proxy`, `kubedns`, etc. - they can be different for various ways of deploying k8s) and user pods.
|
||||
Configuration of kubelet determines which runtime is used and in what way.
|
||||
|
||||
Kubelet itself is executed in docker container (as we can see in `kubelet.service`), but, what is important, **it's not** a kubernetes pod (at least for now),
|
||||
so we can keep kubelet running inside container (as well as directly on the host), and regardless of this, run pods in chosen runtime.
|
||||
|
||||
Below, you can find an instruction how to switch one or more nodes on running kubernetes cluster from docker to CRI-O.
|
||||
|
||||
### Preparing crio
|
||||
|
||||
You must prepare and install `crio` on each node you would like to switch. Here's the list of files that must be provided:
|
||||
|
||||
| File path | Description | Location |
|
||||
|--------------------------------------------|----------------------------|-----------------------------------------------------|
|
||||
| `/etc/crio/crio.conf` | crio configuration | Generated on cri-o `make install` |
|
||||
| `/etc/crio/seccomp.conf` | seccomp config | Example stored in cri-o repository |
|
||||
| `/etc/containers/policy.json` | containers policy | Example stored in cri-o repository |
|
||||
| `/bin/{crio, runc}` | `crio` and `runc` binaries | Built from cri-o repository |
|
||||
| `/usr/local/libexec/crio/conmon` | `conmon` binary | Built from cri-o repository |
|
||||
| `/opt/cni/bin/{flannel, bridge,...}` | CNI plugins binaries | Can be built from sources `containernetworking/cni` |
|
||||
| `/etc/cni/net.d/10-mynet.conf` | Network config | Example stored in [README file](README.md) |
|
||||
|
||||
`crio` binary can be executed directly on host, inside the container or in any way.
|
||||
However, recommended way is to set it as a systemd service.
|
||||
Here's the example of unit file:
|
||||
|
||||
```
|
||||
# cat /etc/systemd/system/crio.service
|
||||
[Unit]
|
||||
Description=CRI-O daemon
|
||||
Documentation=https://github.com/kubernetes-incubator/cri-o
|
||||
|
||||
[Service]
|
||||
ExecStart=/bin/crio --runtime /bin/runc --log /root/crio.log --log-level debug
|
||||
Restart=always
|
||||
RestartSec=10s
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
```
|
||||
|
||||
### Preparing kubelet
|
||||
At first, you need to stop kubelet service working on the node:
|
||||
```
|
||||
# systemctl stop kubelet
|
||||
```
|
||||
and stop all kubelet docker containers that are still runing.
|
||||
|
||||
```
|
||||
# docker stop $(docker ps | grep k8s_ | awk '{print $1}')
|
||||
```
|
||||
|
||||
We have to be sure that `kubelet.service` will start after `crio.service`.
|
||||
It can be done by adding `crio.service` to `Wants=` section in `/etc/systemd/system/kubelet.service`:
|
||||
|
||||
```
|
||||
# cat /etc/systemd/system/kubelet.service | grep Wants
|
||||
Wants=docker.socket crio.service
|
||||
```
|
||||
|
||||
If you'd like to change the way of starting kubelet (e.g. directly on host instead of docker container), you can change it here, but, as mentioned, it's not necessary.
|
||||
|
||||
|
||||
Kubelet parameters are stored in `/etc/kubernetes/kubelet.env` file.
|
||||
```
|
||||
# cat /etc/kubernetes/kubelet.env | grep KUBELET_ARGS
|
||||
KUBELET_ARGS="--pod-manifest-path=/etc/kubernetes/manifests
|
||||
--pod-infra-container-image=gcr.io/google_containers/pause-amd64:3.0
|
||||
--cluster_dns=10.233.0.3 --cluster_domain=cluster.local
|
||||
--resolv-conf=/etc/resolv.conf --kubeconfig=/etc/kubernetes/node-kubeconfig.yaml
|
||||
--require-kubeconfig"
|
||||
```
|
||||
|
||||
You need to add following parameters to `KUBELET_ARGS`:
|
||||
* `--experimental-cri=true` - Use Container Runtime Interface. Will be true by default from kubernetes 1.6 release.
|
||||
* `--container-runtime=remote` - Use remote runtime with provided socket.
|
||||
* `--container-runtime-endpoint=/var/run/crio.sock` - Socket for remote runtime (default `crio` socket localization).
|
||||
* `--runtime-request-timeout=10m` - Optional but useful. Some requests, especially pulling huge images, may take longer than default (2 minutes) and will cause an error.
|
||||
|
||||
Kubelet is prepared now.
|
||||
|
||||
## Flannel network
|
||||
If your cluster is using flannel network, your network configuration should be like:
|
||||
```
|
||||
# cat /etc/cni/net.d/10-mynet.conf
|
||||
{
|
||||
"name": "mynet",
|
||||
"type": "flannel"
|
||||
}
|
||||
```
|
||||
Then, kubelet will take parameters from `/run/flannel/subnet.env` - file generated by flannel kubelet microservice.
|
||||
|
||||
## Starting kubelet with CRI-O
|
||||
Start crio first, then kubelet. If you created `crio` service:
|
||||
```
|
||||
# systemctl start crio
|
||||
# systemctl start kubelet
|
||||
```
|
||||
|
||||
You can follow the progress of preparing node using `kubectl get nodes` or `kubectl get pods --all-namespaces` on kubernetes master.
|
@ -5,7 +5,7 @@ import (
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
"github.com/kubernetes-incubator/cri-o/oci"
|
||||
"github.com/projectatomic/libpod/oci"
|
||||
"github.com/opencontainers/selinux/go-selinux"
|
||||
)
|
||||
|
||||
|
@ -4,9 +4,9 @@ import (
|
||||
"fmt"
|
||||
|
||||
cstorage "github.com/containers/storage"
|
||||
"github.com/kubernetes-incubator/cri-o/libkpod/sandbox"
|
||||
"github.com/kubernetes-incubator/cri-o/oci"
|
||||
"github.com/kubernetes-incubator/cri-o/pkg/registrar"
|
||||
"github.com/projectatomic/libpod/libkpod/sandbox"
|
||||
"github.com/projectatomic/libpod/oci"
|
||||
"github.com/projectatomic/libpod/pkg/registrar"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
|
@ -8,9 +8,9 @@ import (
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
|
||||
"github.com/kubernetes-incubator/cri-o/libpod/driver"
|
||||
"github.com/kubernetes-incubator/cri-o/libpod/images"
|
||||
"github.com/kubernetes-incubator/cri-o/oci"
|
||||
"github.com/projectatomic/libpod/libpod/driver"
|
||||
"github.com/projectatomic/libpod/libpod/images"
|
||||
"github.com/projectatomic/libpod/oci"
|
||||
"github.com/opencontainers/image-spec/specs-go/v1"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
|
@ -12,11 +12,11 @@ import (
|
||||
cstorage "github.com/containers/storage"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/pkg/truncindex"
|
||||
"github.com/kubernetes-incubator/cri-o/libkpod/sandbox"
|
||||
"github.com/kubernetes-incubator/cri-o/oci"
|
||||
"github.com/kubernetes-incubator/cri-o/pkg/annotations"
|
||||
"github.com/kubernetes-incubator/cri-o/pkg/registrar"
|
||||
"github.com/kubernetes-incubator/cri-o/pkg/storage"
|
||||
"github.com/projectatomic/libpod/libkpod/sandbox"
|
||||
"github.com/projectatomic/libpod/oci"
|
||||
"github.com/projectatomic/libpod/pkg/annotations"
|
||||
"github.com/projectatomic/libpod/pkg/registrar"
|
||||
"github.com/projectatomic/libpod/pkg/storage"
|
||||
"github.com/opencontainers/runc/libcontainer"
|
||||
rspec "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/opencontainers/selinux/go-selinux"
|
||||
|
@ -2,8 +2,8 @@ package libkpod
|
||||
|
||||
import (
|
||||
"github.com/docker/docker/pkg/signal"
|
||||
"github.com/kubernetes-incubator/cri-o/oci"
|
||||
"github.com/kubernetes-incubator/cri-o/utils"
|
||||
"github.com/projectatomic/libpod/oci"
|
||||
"github.com/projectatomic/libpod/utils"
|
||||
"github.com/pkg/errors"
|
||||
"os"
|
||||
"syscall"
|
||||
|
@ -1,7 +1,7 @@
|
||||
package libkpod
|
||||
|
||||
import (
|
||||
"github.com/kubernetes-incubator/cri-o/oci"
|
||||
"github.com/projectatomic/libpod/oci"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
|
@ -4,7 +4,7 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/kubernetes-incubator/cri-o/oci"
|
||||
"github.com/projectatomic/libpod/oci"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
@ -7,8 +7,8 @@ import (
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/kubernetes-incubator/cri-o/oci"
|
||||
"github.com/kubernetes-incubator/cri-o/pkg/annotations"
|
||||
"github.com/projectatomic/libpod/oci"
|
||||
"github.com/projectatomic/libpod/pkg/annotations"
|
||||
"github.com/opencontainers/runtime-tools/generate"
|
||||
)
|
||||
|
||||
|
@ -11,7 +11,7 @@ import (
|
||||
"github.com/containernetworking/plugins/pkg/ns"
|
||||
"github.com/docker/docker/pkg/mount"
|
||||
"github.com/docker/docker/pkg/symlink"
|
||||
"github.com/kubernetes-incubator/cri-o/oci"
|
||||
"github.com/projectatomic/libpod/oci"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/unix"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
|
@ -7,7 +7,7 @@ import (
|
||||
|
||||
"strings"
|
||||
|
||||
"github.com/kubernetes-incubator/cri-o/oci"
|
||||
"github.com/projectatomic/libpod/oci"
|
||||
"github.com/opencontainers/runc/libcontainer"
|
||||
)
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
package libkpod
|
||||
|
||||
import (
|
||||
"github.com/kubernetes-incubator/cri-o/oci"
|
||||
"github.com/projectatomic/libpod/oci"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
@ -1,7 +1,7 @@
|
||||
package libkpod
|
||||
|
||||
import (
|
||||
"github.com/kubernetes-incubator/cri-o/oci"
|
||||
"github.com/projectatomic/libpod/oci"
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
)
|
||||
|
@ -10,7 +10,7 @@ import (
|
||||
|
||||
"github.com/containers/storage"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
crioAnnotations "github.com/kubernetes-incubator/cri-o/pkg/annotations"
|
||||
crioAnnotations "github.com/projectatomic/libpod/pkg/annotations"
|
||||
spec "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
@ -2,7 +2,7 @@ package libpod
|
||||
|
||||
import (
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/kubernetes-incubator/cri-o/libpod/layers"
|
||||
"github.com/projectatomic/libpod/libpod/layers"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
|
@ -9,7 +9,7 @@ import (
|
||||
"github.com/containers/image/transports"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/containers/storage"
|
||||
"github.com/kubernetes-incubator/cri-o/libpod/driver"
|
||||
"github.com/projectatomic/libpod/libpod/driver"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ociv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
|
@ -2,7 +2,7 @@ package libpod
|
||||
|
||||
import (
|
||||
"github.com/docker/docker/pkg/truncindex"
|
||||
"github.com/kubernetes-incubator/cri-o/pkg/registrar"
|
||||
"github.com/projectatomic/libpod/pkg/registrar"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
|
@ -18,7 +18,7 @@ import (
|
||||
|
||||
// TODO import these functions into libpod and remove the import
|
||||
// Trying to keep libpod from depending on CRI-O code
|
||||
"github.com/kubernetes-incubator/cri-o/utils"
|
||||
"github.com/projectatomic/libpod/utils"
|
||||
)
|
||||
|
||||
// OCI code is undergoing heavy rewrite
|
||||
|
@ -22,7 +22,7 @@ import (
|
||||
"github.com/containers/image/types"
|
||||
"github.com/containers/storage"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/kubernetes-incubator/cri-o/libpod/common"
|
||||
"github.com/projectatomic/libpod/libpod/common"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ociv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
|
@ -14,7 +14,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/containerd/cgroups"
|
||||
"github.com/kubernetes-incubator/cri-o/utils"
|
||||
"github.com/projectatomic/libpod/utils"
|
||||
rspec "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/context"
|
||||
|
@ -1,89 +0,0 @@
|
||||
// +build apparmor
|
||||
|
||||
package apparmor
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
binary = "apparmor_parser"
|
||||
)
|
||||
|
||||
// GetVersion returns the major and minor version of apparmor_parser.
|
||||
func GetVersion() (int, error) {
|
||||
output, err := cmd("", "--version")
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
return parseVersion(output)
|
||||
}
|
||||
|
||||
// LoadProfile runs `apparmor_parser -r` on a specified apparmor profile to
|
||||
// replace the profile.
|
||||
func LoadProfile(profilePath string) error {
|
||||
_, err := cmd("", "-r", profilePath)
|
||||
return err
|
||||
}
|
||||
|
||||
// cmd runs `apparmor_parser` with the passed arguments.
|
||||
func cmd(dir string, arg ...string) (string, error) {
|
||||
c := exec.Command(binary, arg...)
|
||||
c.Dir = dir
|
||||
|
||||
output, err := c.CombinedOutput()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("running `%s %s` failed with output: %s\nerror: %v", c.Path, strings.Join(c.Args, " "), output, err)
|
||||
}
|
||||
|
||||
return string(output), nil
|
||||
}
|
||||
|
||||
// parseVersion takes the output from `apparmor_parser --version` and returns
|
||||
// a representation of the {major, minor, patch} version as a single number of
|
||||
// the form MMmmPPP {major, minor, patch}.
|
||||
func parseVersion(output string) (int, error) {
|
||||
// output is in the form of the following:
|
||||
// AppArmor parser version 2.9.1
|
||||
// Copyright (C) 1999-2008 Novell Inc.
|
||||
// Copyright 2009-2012 Canonical Ltd.
|
||||
|
||||
lines := strings.SplitN(output, "\n", 2)
|
||||
words := strings.Split(lines[0], " ")
|
||||
version := words[len(words)-1]
|
||||
|
||||
// split by major minor version
|
||||
v := strings.Split(version, ".")
|
||||
if len(v) == 0 || len(v) > 3 {
|
||||
return -1, fmt.Errorf("parsing version failed for output: `%s`", output)
|
||||
}
|
||||
|
||||
// Default the versions to 0.
|
||||
var majorVersion, minorVersion, patchLevel int
|
||||
|
||||
majorVersion, err := strconv.Atoi(v[0])
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
if len(v) > 1 {
|
||||
minorVersion, err = strconv.Atoi(v[1])
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
}
|
||||
if len(v) > 2 {
|
||||
patchLevel, err = strconv.Atoi(v[2])
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
}
|
||||
|
||||
// major*10^5 + minor*10^3 + patch*10^0
|
||||
numericVersion := majorVersion*1e5 + minorVersion*1e3 + patchLevel
|
||||
return numericVersion, nil
|
||||
}
|
@ -1,14 +0,0 @@
|
||||
package apparmor
|
||||
|
||||
const (
|
||||
// DefaultApparmorProfile is the name of default apparmor profile name.
|
||||
DefaultApparmorProfile = "crio-default"
|
||||
|
||||
// ContainerAnnotationKeyPrefix is the prefix to an annotation key specifying a container profile.
|
||||
ContainerAnnotationKeyPrefix = "container.apparmor.security.beta.kubernetes.io/"
|
||||
|
||||
// ProfileRuntimeDefault is he profile specifying the runtime default.
|
||||
ProfileRuntimeDefault = "runtime/default"
|
||||
// ProfileNamePrefix is the prefix for specifying profiles loaded on the node.
|
||||
ProfileNamePrefix = "localhost/"
|
||||
)
|
@ -1,145 +0,0 @@
|
||||
// +build apparmor
|
||||
|
||||
package apparmor
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/utils/templates"
|
||||
"github.com/opencontainers/runc/libcontainer/apparmor"
|
||||
)
|
||||
|
||||
const (
|
||||
// profileDirectory is the file store for apparmor profiles and macros.
|
||||
profileDirectory = "/etc/apparmor.d"
|
||||
)
|
||||
|
||||
// profileData holds information about the given profile for generation.
|
||||
type profileData struct {
|
||||
// Name is profile name.
|
||||
Name string
|
||||
// Imports defines the apparmor functions to import, before defining the profile.
|
||||
Imports []string
|
||||
// InnerImports defines the apparmor functions to import in the profile.
|
||||
InnerImports []string
|
||||
// Version is the {major, minor, patch} version of apparmor_parser as a single number.
|
||||
Version int
|
||||
}
|
||||
|
||||
// EnsureDefaultApparmorProfile loads default apparmor profile, if it is not loaded.
|
||||
func EnsureDefaultApparmorProfile() error {
|
||||
if apparmor.IsEnabled() {
|
||||
loaded, err := IsLoaded(DefaultApparmorProfile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not check if %s AppArmor profile was loaded: %s", DefaultApparmorProfile, err)
|
||||
}
|
||||
|
||||
// Nothing to do.
|
||||
if loaded {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Load the profile.
|
||||
if err := InstallDefault(DefaultApparmorProfile); err != nil {
|
||||
return fmt.Errorf("AppArmor enabled on system but the %s profile could not be loaded.", DefaultApparmorProfile)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsEnabled returns true if apparmor is enabled for the host.
|
||||
func IsEnabled() bool {
|
||||
return apparmor.IsEnabled()
|
||||
}
|
||||
|
||||
// GetProfileNameFromPodAnnotations gets the name of the profile to use with container from
|
||||
// pod annotations
|
||||
func GetProfileNameFromPodAnnotations(annotations map[string]string, containerName string) string {
|
||||
return annotations[ContainerAnnotationKeyPrefix+containerName]
|
||||
}
|
||||
|
||||
// InstallDefault generates a default profile in a temp directory determined by
|
||||
// os.TempDir(), then loads the profile into the kernel using 'apparmor_parser'.
|
||||
func InstallDefault(name string) error {
|
||||
p := profileData{
|
||||
Name: name,
|
||||
}
|
||||
|
||||
// Install to a temporary directory.
|
||||
f, err := ioutil.TempFile("", name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
if err := p.generateDefault(f); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return LoadProfile(f.Name())
|
||||
}
|
||||
|
||||
// IsLoaded checks if a profile with the given name has been loaded into the
|
||||
// kernel.
|
||||
func IsLoaded(name string) (bool, error) {
|
||||
file, err := os.Open("/sys/kernel/security/apparmor/profiles")
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
r := bufio.NewReader(file)
|
||||
for {
|
||||
p, err := r.ReadString('\n')
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if strings.HasPrefix(p, name+" ") {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// generateDefault creates an apparmor profile from ProfileData.
|
||||
func (p *profileData) generateDefault(out io.Writer) error {
|
||||
compiled, err := templates.NewParse("apparmor_profile", baseTemplate)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if macroExists("tunables/global") {
|
||||
p.Imports = append(p.Imports, "#include <tunables/global>")
|
||||
} else {
|
||||
p.Imports = append(p.Imports, "@{PROC}=/proc/")
|
||||
}
|
||||
|
||||
if macroExists("abstractions/base") {
|
||||
p.InnerImports = append(p.InnerImports, "#include <abstractions/base>")
|
||||
}
|
||||
|
||||
ver, err := GetVersion()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.Version = ver
|
||||
|
||||
return compiled.Execute(out, p)
|
||||
}
|
||||
|
||||
// macrosExists checks if the passed macro exists.
|
||||
func macroExists(m string) bool {
|
||||
_, err := os.Stat(path.Join(profileDirectory, m))
|
||||
return err == nil
|
||||
}
|
@ -1,18 +0,0 @@
|
||||
// +build !apparmor
|
||||
|
||||
package apparmor
|
||||
|
||||
// IsEnabled returns false, when build without apparmor build tag.
|
||||
func IsEnabled() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// EnsureDefaultApparmorProfile dose nothing, when build without apparmor build tag.
|
||||
func EnsureDefaultApparmorProfile() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetProfileNameFromPodAnnotations dose nothing, when build without apparmor build tag.
|
||||
func GetProfileNameFromPodAnnotations(annotations map[string]string, containerName string) string {
|
||||
return ""
|
||||
}
|
@ -1,45 +0,0 @@
|
||||
// +build apparmor
|
||||
|
||||
package apparmor
|
||||
|
||||
// baseTemplate defines the default apparmor profile for containers.
|
||||
const baseTemplate = `
|
||||
{{range $value := .Imports}}
|
||||
{{$value}}
|
||||
{{end}}
|
||||
|
||||
profile {{.Name}} flags=(attach_disconnected,mediate_deleted) {
|
||||
{{range $value := .InnerImports}}
|
||||
{{$value}}
|
||||
{{end}}
|
||||
|
||||
network,
|
||||
capability,
|
||||
file,
|
||||
umount,
|
||||
|
||||
deny @{PROC}/* w, # deny write for all files directly in /proc (not in a subdir)
|
||||
# deny write to files not in /proc/<number>/** or /proc/sys/**
|
||||
deny @{PROC}/{[^1-9],[^1-9][^0-9],[^1-9s][^0-9y][^0-9s],[^1-9][^0-9][^0-9][^0-9]*}/** w,
|
||||
deny @{PROC}/sys/[^k]** w, # deny /proc/sys except /proc/sys/k* (effectively /proc/sys/kernel)
|
||||
deny @{PROC}/sys/kernel/{?,??,[^s][^h][^m]**} w, # deny everything except shm* in /proc/sys/kernel/
|
||||
deny @{PROC}/sysrq-trigger rwklx,
|
||||
deny @{PROC}/mem rwklx,
|
||||
deny @{PROC}/kmem rwklx,
|
||||
deny @{PROC}/kcore rwklx,
|
||||
|
||||
deny mount,
|
||||
|
||||
deny /sys/[^f]*/** wklx,
|
||||
deny /sys/f[^s]*/** wklx,
|
||||
deny /sys/fs/[^c]*/** wklx,
|
||||
deny /sys/fs/c[^g]*/** wklx,
|
||||
deny /sys/fs/cg[^r]*/** wklx,
|
||||
deny /sys/firmware/** rwklx,
|
||||
deny /sys/kernel/security/** rwklx,
|
||||
|
||||
{{if ge .Version 208095}}
|
||||
ptrace (trace,read) peer={{.Name}},
|
||||
{{end}}
|
||||
}
|
||||
`
|
112
server/config.go
112
server/config.go
@ -1,112 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
"github.com/kubernetes-incubator/cri-o/libkpod"
|
||||
)
|
||||
|
||||
//CrioConfigPath is the default location for the conf file
|
||||
const CrioConfigPath = "/etc/crio/crio.conf"
|
||||
|
||||
// Config represents the entire set of configuration values that can be set for
|
||||
// the server. This is intended to be loaded from a toml-encoded config file.
|
||||
type Config struct {
|
||||
libkpod.Config
|
||||
APIConfig
|
||||
}
|
||||
|
||||
// APIConfig represents the "crio.api" TOML config table.
|
||||
type APIConfig struct {
|
||||
// Listen is the path to the AF_LOCAL socket on which cri-o will listen.
|
||||
// This may support proto://addr formats later, but currently this is just
|
||||
// a path.
|
||||
Listen string `toml:"listen"`
|
||||
|
||||
// StreamAddress is the IP address on which the stream server will listen.
|
||||
StreamAddress string `toml:"stream_address"`
|
||||
|
||||
// StreamPort is the port on which the stream server will listen.
|
||||
StreamPort string `toml:"stream_port"`
|
||||
}
|
||||
|
||||
// tomlConfig is another way of looking at a Config, which is
|
||||
// TOML-friendly (it has all of the explicit tables). It's just used for
|
||||
// conversions.
|
||||
type tomlConfig struct {
|
||||
Crio struct {
|
||||
libkpod.RootConfig
|
||||
API struct{ APIConfig } `toml:"api"`
|
||||
Runtime struct{ libkpod.RuntimeConfig } `toml:"runtime"`
|
||||
Image struct{ libkpod.ImageConfig } `toml:"image"`
|
||||
Network struct{ libkpod.NetworkConfig } `toml:"network"`
|
||||
} `toml:"crio"`
|
||||
}
|
||||
|
||||
func (t *tomlConfig) toConfig(c *Config) {
|
||||
c.RootConfig = t.Crio.RootConfig
|
||||
c.APIConfig = t.Crio.API.APIConfig
|
||||
c.RuntimeConfig = t.Crio.Runtime.RuntimeConfig
|
||||
c.ImageConfig = t.Crio.Image.ImageConfig
|
||||
c.NetworkConfig = t.Crio.Network.NetworkConfig
|
||||
}
|
||||
|
||||
func (t *tomlConfig) fromConfig(c *Config) {
|
||||
t.Crio.RootConfig = c.RootConfig
|
||||
t.Crio.API.APIConfig = c.APIConfig
|
||||
t.Crio.Runtime.RuntimeConfig = c.RuntimeConfig
|
||||
t.Crio.Image.ImageConfig = c.ImageConfig
|
||||
t.Crio.Network.NetworkConfig = c.NetworkConfig
|
||||
}
|
||||
|
||||
// UpdateFromFile populates the Config from the TOML-encoded file at the given path.
|
||||
// Returns errors encountered when reading or parsing the files, or nil
|
||||
// otherwise.
|
||||
func (c *Config) UpdateFromFile(path string) error {
|
||||
data, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
t := new(tomlConfig)
|
||||
t.fromConfig(c)
|
||||
|
||||
_, err = toml.Decode(string(data), t)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
t.toConfig(c)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ToFile outputs the given Config as a TOML-encoded file at the given path.
|
||||
// Returns errors encountered when generating or writing the file, or nil
|
||||
// otherwise.
|
||||
func (c *Config) ToFile(path string) error {
|
||||
var w bytes.Buffer
|
||||
e := toml.NewEncoder(&w)
|
||||
|
||||
t := new(tomlConfig)
|
||||
t.fromConfig(c)
|
||||
|
||||
if err := e.Encode(*t); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ioutil.WriteFile(path, w.Bytes(), 0644)
|
||||
}
|
||||
|
||||
// DefaultConfig returns the default configuration for crio.
|
||||
func DefaultConfig() *Config {
|
||||
return &Config{
|
||||
Config: *libkpod.DefaultConfig(),
|
||||
APIConfig: APIConfig{
|
||||
Listen: "/var/run/crio.sock",
|
||||
StreamAddress: "",
|
||||
StreamPort: "10010",
|
||||
},
|
||||
}
|
||||
}
|
@ -1,147 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/kubernetes-incubator/cri-o/oci"
|
||||
"github.com/kubernetes-incubator/cri-o/utils"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/sys/unix"
|
||||
"k8s.io/client-go/tools/remotecommand"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
)
|
||||
|
||||
/* Sync with stdpipe_t in conmon.c */
|
||||
const (
|
||||
AttachPipeStdin = 1
|
||||
AttachPipeStdout = 2
|
||||
AttachPipeStderr = 3
|
||||
)
|
||||
|
||||
// Attach prepares a streaming endpoint to attach to a running container.
|
||||
func (s *Server) Attach(ctx context.Context, req *pb.AttachRequest) (*pb.AttachResponse, error) {
|
||||
logrus.Debugf("AttachRequest %+v", req)
|
||||
|
||||
resp, err := s.GetAttach(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to prepare attach endpoint")
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// Attach endpoint for streaming.Runtime
|
||||
func (ss streamService) Attach(containerID string, inputStream io.Reader, outputStream, errorStream io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error {
|
||||
c := ss.runtimeServer.GetContainer(containerID)
|
||||
|
||||
if c == nil {
|
||||
return fmt.Errorf("could not find container %q", containerID)
|
||||
}
|
||||
|
||||
if err := ss.runtimeServer.Runtime().UpdateStatus(c); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cState := ss.runtimeServer.Runtime().ContainerStatus(c)
|
||||
if !(cState.Status == oci.ContainerStateRunning || cState.Status == oci.ContainerStateCreated) {
|
||||
return fmt.Errorf("container is not created or running")
|
||||
}
|
||||
|
||||
controlPath := filepath.Join(c.BundlePath(), "ctl")
|
||||
controlFile, err := os.OpenFile(controlPath, unix.O_WRONLY, 0)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open container ctl file: %v", err)
|
||||
}
|
||||
|
||||
kubecontainer.HandleResizing(resize, func(size remotecommand.TerminalSize) {
|
||||
logrus.Infof("Got a resize event: %+v", size)
|
||||
_, err := fmt.Fprintf(controlFile, "%d %d %d\n", 1, size.Height, size.Width)
|
||||
if err != nil {
|
||||
logrus.Infof("Failed to write to control file to resize terminal: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
attachSocketPath := filepath.Join(oci.ContainerAttachSocketDir, c.ID(), "attach")
|
||||
conn, err := net.DialUnix("unixpacket", nil, &net.UnixAddr{Name: attachSocketPath, Net: "unixpacket"})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to connect to container %s attach socket: %v", c.ID(), err)
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
receiveStdout := make(chan error)
|
||||
if outputStream != nil || errorStream != nil {
|
||||
go func() {
|
||||
receiveStdout <- redirectResponseToOutputStreams(outputStream, errorStream, conn)
|
||||
}()
|
||||
}
|
||||
|
||||
stdinDone := make(chan error)
|
||||
go func() {
|
||||
var err error
|
||||
if inputStream != nil {
|
||||
_, err = utils.CopyDetachable(conn, inputStream, nil)
|
||||
conn.CloseWrite()
|
||||
}
|
||||
stdinDone <- err
|
||||
}()
|
||||
|
||||
select {
|
||||
case err := <-receiveStdout:
|
||||
return err
|
||||
case err := <-stdinDone:
|
||||
if _, ok := err.(utils.DetachError); ok {
|
||||
return nil
|
||||
}
|
||||
if outputStream != nil || errorStream != nil {
|
||||
return <-receiveStdout
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func redirectResponseToOutputStreams(outputStream, errorStream io.Writer, conn io.Reader) error {
|
||||
var err error
|
||||
buf := make([]byte, 8192+1) /* Sync with conmon STDIO_BUF_SIZE */
|
||||
|
||||
for {
|
||||
nr, er := conn.Read(buf)
|
||||
if nr > 0 {
|
||||
var dst io.Writer
|
||||
if buf[0] == AttachPipeStdout {
|
||||
dst = outputStream
|
||||
} else if buf[0] == AttachPipeStderr {
|
||||
dst = errorStream
|
||||
} else {
|
||||
logrus.Infof("Got unexpected attach type %+d", buf[0])
|
||||
}
|
||||
|
||||
if dst != nil {
|
||||
nw, ew := dst.Write(buf[1:nr])
|
||||
if ew != nil {
|
||||
err = ew
|
||||
break
|
||||
}
|
||||
if nr != nw+1 {
|
||||
err = io.ErrShortWrite
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if er == io.EOF {
|
||||
break
|
||||
}
|
||||
if er != nil {
|
||||
err = er
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -1,108 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
|
||||
"github.com/docker/docker/pkg/pools"
|
||||
"github.com/kubernetes-incubator/cri-o/oci"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/context"
|
||||
"k8s.io/client-go/tools/remotecommand"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
utilexec "k8s.io/kubernetes/pkg/util/exec"
|
||||
"k8s.io/kubernetes/pkg/util/term"
|
||||
)
|
||||
|
||||
// Exec prepares a streaming endpoint to execute a command in the container.
|
||||
func (s *Server) Exec(ctx context.Context, req *pb.ExecRequest) (*pb.ExecResponse, error) {
|
||||
logrus.Debugf("ExecRequest %+v", req)
|
||||
|
||||
resp, err := s.GetExec(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to prepare exec endpoint")
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// Exec endpoint for streaming.Runtime
|
||||
func (ss streamService) Exec(containerID string, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error {
|
||||
c := ss.runtimeServer.GetContainer(containerID)
|
||||
|
||||
if c == nil {
|
||||
return fmt.Errorf("could not find container %q", containerID)
|
||||
}
|
||||
|
||||
if err := ss.runtimeServer.Runtime().UpdateStatus(c); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cState := ss.runtimeServer.Runtime().ContainerStatus(c)
|
||||
if !(cState.Status == oci.ContainerStateRunning || cState.Status == oci.ContainerStateCreated) {
|
||||
return fmt.Errorf("container is not created or running")
|
||||
}
|
||||
|
||||
args := []string{"exec"}
|
||||
if tty {
|
||||
args = append(args, "-t")
|
||||
}
|
||||
args = append(args, c.ID())
|
||||
args = append(args, cmd...)
|
||||
execCmd := exec.Command(ss.runtimeServer.Runtime().Path(c), args...)
|
||||
var cmdErr error
|
||||
if tty {
|
||||
p, err := kubecontainer.StartPty(execCmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer p.Close()
|
||||
|
||||
// make sure to close the stdout stream
|
||||
defer stdout.Close()
|
||||
|
||||
kubecontainer.HandleResizing(resize, func(size remotecommand.TerminalSize) {
|
||||
term.SetSize(p.Fd(), size)
|
||||
})
|
||||
|
||||
if stdin != nil {
|
||||
go pools.Copy(p, stdin)
|
||||
}
|
||||
|
||||
if stdout != nil {
|
||||
go pools.Copy(stdout, p)
|
||||
}
|
||||
|
||||
cmdErr = execCmd.Wait()
|
||||
} else {
|
||||
if stdin != nil {
|
||||
// Use an os.Pipe here as it returns true *os.File objects.
|
||||
// This way, if you run 'kubectl exec <pod> -i bash' (no tty) and type 'exit',
|
||||
// the call below to execCmd.Run() can unblock because its Stdin is the read half
|
||||
// of the pipe.
|
||||
r, w, err := os.Pipe()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
go pools.Copy(w, stdin)
|
||||
|
||||
execCmd.Stdin = r
|
||||
}
|
||||
if stdout != nil {
|
||||
execCmd.Stdout = stdout
|
||||
}
|
||||
if stderr != nil {
|
||||
execCmd.Stderr = stderr
|
||||
}
|
||||
|
||||
cmdErr = execCmd.Run()
|
||||
}
|
||||
|
||||
if exitErr, ok := cmdErr.(*exec.ExitError); ok {
|
||||
return &utilexec.ExitErrorWrapper{ExitError: exitErr}
|
||||
}
|
||||
return cmdErr
|
||||
}
|
@ -1,46 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/kubernetes-incubator/cri-o/oci"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/context"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
)
|
||||
|
||||
// ExecSync runs a command in a container synchronously.
|
||||
func (s *Server) ExecSync(ctx context.Context, req *pb.ExecSyncRequest) (*pb.ExecSyncResponse, error) {
|
||||
logrus.Debugf("ExecSyncRequest %+v", req)
|
||||
c, err := s.GetContainerFromRequest(req.ContainerId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = s.Runtime().UpdateStatus(c); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cState := s.Runtime().ContainerStatus(c)
|
||||
if !(cState.Status == oci.ContainerStateRunning || cState.Status == oci.ContainerStateCreated) {
|
||||
return nil, fmt.Errorf("container is not created or running")
|
||||
}
|
||||
|
||||
cmd := req.Cmd
|
||||
if cmd == nil {
|
||||
return nil, fmt.Errorf("exec command cannot be empty")
|
||||
}
|
||||
|
||||
execResp, err := s.Runtime().ExecSync(c, cmd, req.Timeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp := &pb.ExecSyncResponse{
|
||||
Stdout: execResp.Stdout,
|
||||
Stderr: execResp.Stderr,
|
||||
ExitCode: execResp.ExitCode,
|
||||
}
|
||||
|
||||
logrus.Debugf("ExecSyncResponse: %+v", resp)
|
||||
return resp, nil
|
||||
}
|
@ -1,112 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"github.com/kubernetes-incubator/cri-o/oci"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/context"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
)
|
||||
|
||||
// filterContainer returns whether passed container matches filtering criteria
|
||||
func filterContainer(c *pb.Container, filter *pb.ContainerFilter) bool {
|
||||
if filter != nil {
|
||||
if filter.State != nil {
|
||||
if c.State != filter.State.State {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if filter.LabelSelector != nil {
|
||||
sel := fields.SelectorFromSet(filter.LabelSelector)
|
||||
if !sel.Matches(fields.Set(c.Labels)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// ListContainers lists all containers by filters.
|
||||
func (s *Server) ListContainers(ctx context.Context, req *pb.ListContainersRequest) (*pb.ListContainersResponse, error) {
|
||||
logrus.Debugf("ListContainersRequest %+v", req)
|
||||
var ctrs []*pb.Container
|
||||
filter := req.Filter
|
||||
ctrList, err := s.ContainerServer.ListContainers()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Filter using container id and pod id first.
|
||||
if filter.Id != "" {
|
||||
id, err := s.CtrIDIndex().Get(filter.Id)
|
||||
if err != nil {
|
||||
// If we don't find a container ID with a filter, it should not
|
||||
// be considered an error. Log a warning and return an empty struct
|
||||
logrus.Warn("unable to find container ID %s", filter.Id)
|
||||
return &pb.ListContainersResponse{}, nil
|
||||
}
|
||||
c := s.ContainerServer.GetContainer(id)
|
||||
if c != nil {
|
||||
if filter.PodSandboxId != "" {
|
||||
if c.Sandbox() == filter.PodSandboxId {
|
||||
ctrList = []*oci.Container{c}
|
||||
} else {
|
||||
ctrList = []*oci.Container{}
|
||||
}
|
||||
|
||||
} else {
|
||||
ctrList = []*oci.Container{c}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if filter.PodSandboxId != "" {
|
||||
pod := s.ContainerServer.GetSandbox(filter.PodSandboxId)
|
||||
if pod == nil {
|
||||
ctrList = []*oci.Container{}
|
||||
} else {
|
||||
ctrList = pod.Containers().List()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, ctr := range ctrList {
|
||||
podSandboxID := ctr.Sandbox()
|
||||
cState := s.Runtime().ContainerStatus(ctr)
|
||||
created := cState.Created.UnixNano()
|
||||
rState := pb.ContainerState_CONTAINER_UNKNOWN
|
||||
cID := ctr.ID()
|
||||
img := &pb.ImageSpec{
|
||||
Image: ctr.Image(),
|
||||
}
|
||||
c := &pb.Container{
|
||||
Id: cID,
|
||||
PodSandboxId: podSandboxID,
|
||||
CreatedAt: created,
|
||||
Labels: ctr.Labels(),
|
||||
Metadata: ctr.Metadata(),
|
||||
Annotations: ctr.Annotations(),
|
||||
Image: img,
|
||||
}
|
||||
|
||||
switch cState.Status {
|
||||
case oci.ContainerStateCreated:
|
||||
rState = pb.ContainerState_CONTAINER_CREATED
|
||||
case oci.ContainerStateRunning:
|
||||
rState = pb.ContainerState_CONTAINER_RUNNING
|
||||
case oci.ContainerStateStopped:
|
||||
rState = pb.ContainerState_CONTAINER_EXITED
|
||||
}
|
||||
c.State = rState
|
||||
|
||||
// Filter by other criteria such as state and labels.
|
||||
if filterContainer(c, req.Filter) {
|
||||
ctrs = append(ctrs, c)
|
||||
}
|
||||
}
|
||||
|
||||
resp := &pb.ListContainersResponse{
|
||||
Containers: ctrs,
|
||||
}
|
||||
logrus.Debugf("ListContainersResponse: %+v", resp)
|
||||
return resp, nil
|
||||
}
|
@ -1,91 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/pools"
|
||||
"github.com/kubernetes-incubator/cri-o/oci"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/context"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
)
|
||||
|
||||
// PortForward prepares a streaming endpoint to forward ports from a PodSandbox.
|
||||
func (s *Server) PortForward(ctx context.Context, req *pb.PortForwardRequest) (*pb.PortForwardResponse, error) {
|
||||
logrus.Debugf("PortForwardRequest %+v", req)
|
||||
|
||||
resp, err := s.GetPortForward(req)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to prepare portforward endpoint")
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (ss streamService) PortForward(podSandboxID string, port int32, stream io.ReadWriteCloser) error {
|
||||
c := ss.runtimeServer.GetSandboxContainer(podSandboxID)
|
||||
|
||||
if c == nil {
|
||||
return fmt.Errorf("could not find container for sandbox %q", podSandboxID)
|
||||
}
|
||||
|
||||
if err := ss.runtimeServer.Runtime().UpdateStatus(c); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cState := ss.runtimeServer.Runtime().ContainerStatus(c)
|
||||
if !(cState.Status == oci.ContainerStateRunning || cState.Status == oci.ContainerStateCreated) {
|
||||
return fmt.Errorf("container is not created or running")
|
||||
}
|
||||
|
||||
containerPid := cState.Pid
|
||||
socatPath, lookupErr := exec.LookPath("socat")
|
||||
if lookupErr != nil {
|
||||
return fmt.Errorf("unable to do port forwarding: socat not found")
|
||||
}
|
||||
|
||||
args := []string{"-t", fmt.Sprintf("%d", containerPid), "-n", socatPath, "-", fmt.Sprintf("TCP4:localhost:%d", port)}
|
||||
|
||||
nsenterPath, lookupErr := exec.LookPath("nsenter")
|
||||
if lookupErr != nil {
|
||||
return fmt.Errorf("unable to do port forwarding: nsenter not found")
|
||||
}
|
||||
|
||||
commandString := fmt.Sprintf("%s %s", nsenterPath, strings.Join(args, " "))
|
||||
logrus.Debugf("executing port forwarding command: %s", commandString)
|
||||
|
||||
command := exec.Command(nsenterPath, args...)
|
||||
command.Stdout = stream
|
||||
|
||||
stderr := new(bytes.Buffer)
|
||||
command.Stderr = stderr
|
||||
|
||||
// If we use Stdin, command.Run() won't return until the goroutine that's copying
|
||||
// from stream finishes. Unfortunately, if you have a client like telnet connected
|
||||
// via port forwarding, as long as the user's telnet client is connected to the user's
|
||||
// local listener that port forwarding sets up, the telnet session never exits. This
|
||||
// means that even if socat has finished running, command.Run() won't ever return
|
||||
// (because the client still has the connection and stream open).
|
||||
//
|
||||
// The work around is to use StdinPipe(), as Wait() (called by Run()) closes the pipe
|
||||
// when the command (socat) exits.
|
||||
inPipe, err := command.StdinPipe()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to do port forwarding: error creating stdin pipe: %v", err)
|
||||
}
|
||||
go func() {
|
||||
pools.Copy(inPipe, stream)
|
||||
inPipe.Close()
|
||||
}()
|
||||
|
||||
if err := command.Run(); err != nil {
|
||||
return fmt.Errorf("%v: %s", err, stderr.String())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -1,20 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/context"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
)
|
||||
|
||||
// RemoveContainer removes the container. If the container is running, the container
|
||||
// should be force removed.
|
||||
func (s *Server) RemoveContainer(ctx context.Context, req *pb.RemoveContainerRequest) (*pb.RemoveContainerResponse, error) {
|
||||
_, err := s.ContainerServer.Remove(ctx, req.ContainerId, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp := &pb.RemoveContainerResponse{}
|
||||
logrus.Debugf("RemoveContainerResponse: %+v", resp)
|
||||
return resp, nil
|
||||
}
|
@ -1,43 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/kubernetes-incubator/cri-o/oci"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/context"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
)
|
||||
|
||||
// StartContainer starts the container.
|
||||
func (s *Server) StartContainer(ctx context.Context, req *pb.StartContainerRequest) (*pb.StartContainerResponse, error) {
|
||||
logrus.Debugf("StartContainerRequest %+v", req)
|
||||
c, err := s.GetContainerFromRequest(req.ContainerId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
state := s.Runtime().ContainerStatus(c)
|
||||
if state.Status != oci.ContainerStateCreated {
|
||||
return nil, fmt.Errorf("container %s is not in created state: %s", c.ID(), state.Status)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
// if the call to StartContainer fails below we still want to fill
|
||||
// some fields of a container status. In particular, we're going to
|
||||
// adjust container started/finished time and set an error to be
|
||||
// returned in the Reason field for container status call.
|
||||
if err != nil {
|
||||
s.Runtime().SetStartFailed(c, err)
|
||||
}
|
||||
s.ContainerStateToDisk(c)
|
||||
}()
|
||||
|
||||
err = s.Runtime().StartContainer(c)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to start container %s: %v", c.ID(), err)
|
||||
}
|
||||
|
||||
resp := &pb.StartContainerResponse{}
|
||||
logrus.Debugf("StartContainerResponse %+v", resp)
|
||||
return resp, nil
|
||||
}
|
@ -1,14 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
)
|
||||
|
||||
// ContainerStats returns stats of the container. If the container does not
|
||||
// exist, the call returns an error.
|
||||
func (s *Server) ContainerStats(ctx context.Context, req *pb.ContainerStatsRequest) (*pb.ContainerStatsResponse, error) {
|
||||
return nil, fmt.Errorf("not implemented")
|
||||
}
|
@ -1,13 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
)
|
||||
|
||||
// ListContainerStats returns stats of all running containers.
|
||||
func (s *Server) ListContainerStats(ctx context.Context, req *pb.ListContainerStatsRequest) (*pb.ListContainerStatsResponse, error) {
|
||||
return nil, fmt.Errorf("not implemented")
|
||||
}
|
@ -1,102 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"github.com/kubernetes-incubator/cri-o/oci"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/context"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
)
|
||||
|
||||
const (
|
||||
oomKilledReason = "OOMKilled"
|
||||
completedReason = "Completed"
|
||||
errorReason = "Error"
|
||||
)
|
||||
|
||||
// ContainerStatus returns status of the container.
|
||||
func (s *Server) ContainerStatus(ctx context.Context, req *pb.ContainerStatusRequest) (*pb.ContainerStatusResponse, error) {
|
||||
logrus.Debugf("ContainerStatusRequest %+v", req)
|
||||
c, err := s.GetContainerFromRequest(req.ContainerId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
containerID := c.ID()
|
||||
resp := &pb.ContainerStatusResponse{
|
||||
Status: &pb.ContainerStatus{
|
||||
Id: containerID,
|
||||
Metadata: c.Metadata(),
|
||||
Labels: c.Labels(),
|
||||
Annotations: c.Annotations(),
|
||||
ImageRef: c.ImageRef(),
|
||||
},
|
||||
}
|
||||
resp.Status.Image = &pb.ImageSpec{Image: c.ImageName()}
|
||||
|
||||
mounts := []*pb.Mount{}
|
||||
for _, cv := range c.Volumes() {
|
||||
mounts = append(mounts, &pb.Mount{
|
||||
ContainerPath: cv.ContainerPath,
|
||||
HostPath: cv.HostPath,
|
||||
Readonly: cv.Readonly,
|
||||
})
|
||||
}
|
||||
resp.Status.Mounts = mounts
|
||||
|
||||
cState := s.Runtime().ContainerStatus(c)
|
||||
rStatus := pb.ContainerState_CONTAINER_UNKNOWN
|
||||
|
||||
imageName := c.Image()
|
||||
status, err := s.StorageImageServer().ImageStatus(s.ImageContext(), imageName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp.Status.ImageRef = status.ImageRef
|
||||
|
||||
// If we defaulted to exit code -1 earlier then we attempt to
|
||||
// get the exit code from the exit file again.
|
||||
if cState.ExitCode == -1 {
|
||||
err := s.Runtime().UpdateStatus(c)
|
||||
if err != nil {
|
||||
logrus.Warnf("Failed to UpdateStatus of container %s: %v", c.ID(), err)
|
||||
}
|
||||
cState = s.Runtime().ContainerStatus(c)
|
||||
}
|
||||
|
||||
switch cState.Status {
|
||||
case oci.ContainerStateCreated:
|
||||
rStatus = pb.ContainerState_CONTAINER_CREATED
|
||||
created := cState.Created.UnixNano()
|
||||
resp.Status.CreatedAt = created
|
||||
case oci.ContainerStateRunning:
|
||||
rStatus = pb.ContainerState_CONTAINER_RUNNING
|
||||
created := cState.Created.UnixNano()
|
||||
resp.Status.CreatedAt = created
|
||||
started := cState.Started.UnixNano()
|
||||
resp.Status.StartedAt = started
|
||||
case oci.ContainerStateStopped:
|
||||
rStatus = pb.ContainerState_CONTAINER_EXITED
|
||||
created := cState.Created.UnixNano()
|
||||
resp.Status.CreatedAt = created
|
||||
started := cState.Started.UnixNano()
|
||||
resp.Status.StartedAt = started
|
||||
finished := cState.Finished.UnixNano()
|
||||
resp.Status.FinishedAt = finished
|
||||
resp.Status.ExitCode = cState.ExitCode
|
||||
switch {
|
||||
case cState.OOMKilled:
|
||||
resp.Status.Reason = oomKilledReason
|
||||
case cState.ExitCode == 0:
|
||||
resp.Status.Reason = completedReason
|
||||
default:
|
||||
resp.Status.Reason = errorReason
|
||||
resp.Status.Message = cState.Error
|
||||
}
|
||||
}
|
||||
|
||||
resp.Status.State = rStatus
|
||||
|
||||
logrus.Debugf("ContainerStatusResponse: %+v", resp)
|
||||
return resp, nil
|
||||
}
|
@ -1,19 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/context"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
)
|
||||
|
||||
// StopContainer stops a running container with a grace period (i.e., timeout).
|
||||
func (s *Server) StopContainer(ctx context.Context, req *pb.StopContainerRequest) (*pb.StopContainerResponse, error) {
|
||||
_, err := s.ContainerServer.ContainerStop(ctx, req.ContainerId, req.Timeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp := &pb.StopContainerResponse{}
|
||||
logrus.Debugf("StopContainerResponse %s: %+v", req.ContainerId, resp)
|
||||
return resp, nil
|
||||
}
|
@ -1,11 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
)
|
||||
|
||||
// UpdateRuntimeConfig updates the configuration of a running container.
|
||||
func (s *Server) UpdateRuntimeConfig(ctx context.Context, req *pb.UpdateRuntimeConfigRequest) (*pb.UpdateRuntimeConfigResponse, error) {
|
||||
return &pb.UpdateRuntimeConfigResponse{}, nil
|
||||
}
|
@ -1,13 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
)
|
||||
|
||||
// ImageFsInfo returns information of the filesystem that is used to store images.
|
||||
func (s *Server) ImageFsInfo(ctx context.Context, req *pb.ImageFsInfoRequest) (*pb.ImageFsInfoResponse, error) {
|
||||
return nil, fmt.Errorf("not implemented")
|
||||
}
|
@ -1,41 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/context"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
)
|
||||
|
||||
// ListImages lists existing images.
|
||||
func (s *Server) ListImages(ctx context.Context, req *pb.ListImagesRequest) (*pb.ListImagesResponse, error) {
|
||||
logrus.Debugf("ListImagesRequest: %+v", req)
|
||||
filter := ""
|
||||
reqFilter := req.GetFilter()
|
||||
if reqFilter != nil {
|
||||
filterImage := reqFilter.GetImage()
|
||||
if filterImage != nil {
|
||||
filter = filterImage.Image
|
||||
}
|
||||
}
|
||||
results, err := s.StorageImageServer().ListImages(s.ImageContext(), filter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
response := pb.ListImagesResponse{}
|
||||
for _, result := range results {
|
||||
if result.Size != nil {
|
||||
response.Images = append(response.Images, &pb.Image{
|
||||
Id: result.ID,
|
||||
RepoTags: result.Names,
|
||||
Size_: *result.Size,
|
||||
})
|
||||
} else {
|
||||
response.Images = append(response.Images, &pb.Image{
|
||||
Id: result.ID,
|
||||
RepoTags: result.Names,
|
||||
})
|
||||
}
|
||||
}
|
||||
logrus.Debugf("ListImagesResponse: %+v", response)
|
||||
return &response, nil
|
||||
}
|
@ -1,108 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/copy"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/context"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
)
|
||||
|
||||
// PullImage pulls a image with authentication config.
|
||||
func (s *Server) PullImage(ctx context.Context, req *pb.PullImageRequest) (*pb.PullImageResponse, error) {
|
||||
logrus.Debugf("PullImageRequest: %+v", req)
|
||||
// TODO: what else do we need here? (Signatures when the story isn't just pulling from docker://)
|
||||
image := ""
|
||||
img := req.GetImage()
|
||||
if img != nil {
|
||||
image = img.Image
|
||||
}
|
||||
|
||||
var (
|
||||
images []string
|
||||
pulled string
|
||||
err error
|
||||
)
|
||||
images, err = s.StorageImageServer().ResolveNames(image)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, img := range images {
|
||||
var (
|
||||
username string
|
||||
password string
|
||||
)
|
||||
if req.GetAuth() != nil {
|
||||
username = req.GetAuth().Username
|
||||
password = req.GetAuth().Password
|
||||
if req.GetAuth().Auth != "" {
|
||||
username, password, err = decodeDockerAuth(req.GetAuth().Auth)
|
||||
if err != nil {
|
||||
logrus.Debugf("error decoding authentication for image %s: %v", img, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
options := ©.Options{
|
||||
SourceCtx: &types.SystemContext{},
|
||||
}
|
||||
// Specifying a username indicates the user intends to send authentication to the registry.
|
||||
if username != "" {
|
||||
options.SourceCtx = &types.SystemContext{
|
||||
DockerAuthConfig: &types.DockerAuthConfig{
|
||||
Username: username,
|
||||
Password: password,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
var canPull bool
|
||||
canPull, err = s.StorageImageServer().CanPull(img, options)
|
||||
if err != nil && !canPull {
|
||||
logrus.Debugf("error checking image %s: %v", img, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// let's be smart, docker doesn't repull if image already exists.
|
||||
_, err = s.StorageImageServer().ImageStatus(s.ImageContext(), img)
|
||||
if err == nil {
|
||||
logrus.Debugf("image %s already in store, skipping pull", img)
|
||||
pulled = img
|
||||
break
|
||||
}
|
||||
|
||||
_, err = s.StorageImageServer().PullImage(s.ImageContext(), img, options)
|
||||
if err != nil {
|
||||
logrus.Debugf("error pulling image %s: %v", img, err)
|
||||
continue
|
||||
}
|
||||
pulled = img
|
||||
break
|
||||
}
|
||||
if pulled == "" && err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp := &pb.PullImageResponse{
|
||||
ImageRef: pulled,
|
||||
}
|
||||
logrus.Debugf("PullImageResponse: %+v", resp)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func decodeDockerAuth(s string) (string, string, error) {
|
||||
decoded, err := base64.StdEncoding.DecodeString(s)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
parts := strings.SplitN(string(decoded), ":", 2)
|
||||
if len(parts) != 2 {
|
||||
// if it's invalid just skip, as docker does
|
||||
return "", "", nil
|
||||
}
|
||||
user := parts[0]
|
||||
password := strings.Trim(parts[1], "\x00")
|
||||
return user, password, nil
|
||||
}
|
@ -1,52 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/context"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
)
|
||||
|
||||
// RemoveImage removes the image.
|
||||
func (s *Server) RemoveImage(ctx context.Context, req *pb.RemoveImageRequest) (*pb.RemoveImageResponse, error) {
|
||||
logrus.Debugf("RemoveImageRequest: %+v", req)
|
||||
image := ""
|
||||
img := req.GetImage()
|
||||
if img != nil {
|
||||
image = img.Image
|
||||
}
|
||||
if image == "" {
|
||||
return nil, fmt.Errorf("no image specified")
|
||||
}
|
||||
var (
|
||||
images []string
|
||||
err error
|
||||
deleted bool
|
||||
)
|
||||
images, err = s.StorageImageServer().ResolveNames(image)
|
||||
if err != nil {
|
||||
// This means we got an image ID
|
||||
if strings.Contains(err.Error(), "cannot specify 64-byte hexadecimal strings") {
|
||||
images = append(images, image)
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
for _, img := range images {
|
||||
err = s.StorageImageServer().UntagImage(s.ImageContext(), img)
|
||||
if err != nil {
|
||||
logrus.Debugf("error deleting image %s: %v", img, err)
|
||||
continue
|
||||
}
|
||||
deleted = true
|
||||
break
|
||||
}
|
||||
if !deleted && err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp := &pb.RemoveImageResponse{}
|
||||
logrus.Debugf("RemoveImageResponse: %+v", resp)
|
||||
return resp, nil
|
||||
}
|
@ -1,53 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/storage"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/context"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
)
|
||||
|
||||
// ImageStatus returns the status of the image.
|
||||
func (s *Server) ImageStatus(ctx context.Context, req *pb.ImageStatusRequest) (*pb.ImageStatusResponse, error) {
|
||||
logrus.Debugf("ImageStatusRequest: %+v", req)
|
||||
image := ""
|
||||
img := req.GetImage()
|
||||
if img != nil {
|
||||
image = img.Image
|
||||
}
|
||||
if image == "" {
|
||||
return nil, fmt.Errorf("no image specified")
|
||||
}
|
||||
images, err := s.StorageImageServer().ResolveNames(image)
|
||||
if err != nil {
|
||||
// This means we got an image ID
|
||||
if strings.Contains(err.Error(), "cannot specify 64-byte hexadecimal strings") {
|
||||
images = append(images, image)
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// match just the first registry as that's what kube meant
|
||||
image = images[0]
|
||||
status, err := s.StorageImageServer().ImageStatus(s.ImageContext(), image)
|
||||
if err != nil {
|
||||
if errors.Cause(err) == storage.ErrImageUnknown {
|
||||
return &pb.ImageStatusResponse{}, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
resp := &pb.ImageStatusResponse{
|
||||
Image: &pb.Image{
|
||||
Id: status.ID,
|
||||
RepoTags: status.Names,
|
||||
RepoDigests: status.Digests,
|
||||
Size_: *status.Size,
|
||||
},
|
||||
}
|
||||
logrus.Debugf("ImageStatusResponse: %+v", resp)
|
||||
return resp, nil
|
||||
}
|
@ -1,105 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/go-zoo/bone"
|
||||
"github.com/kubernetes-incubator/cri-o/libkpod/sandbox"
|
||||
"github.com/kubernetes-incubator/cri-o/oci"
|
||||
"github.com/kubernetes-incubator/cri-o/types"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func (s *Server) getInfo() types.CrioInfo {
|
||||
return types.CrioInfo{
|
||||
StorageDriver: s.config.Config.Storage,
|
||||
StorageRoot: s.config.Config.Root,
|
||||
CgroupDriver: s.config.Config.CgroupManager,
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
errCtrNotFound = errors.New("container not found")
|
||||
errCtrStateNil = errors.New("container state is nil")
|
||||
errSandboxNotFound = errors.New("sandbox for container not found")
|
||||
)
|
||||
|
||||
func (s *Server) getContainerInfo(id string, getContainerFunc func(id string) *oci.Container, getInfraContainerFunc func(id string) *oci.Container, getSandboxFunc func(id string) *sandbox.Sandbox) (types.ContainerInfo, error) {
|
||||
ctr := getContainerFunc(id)
|
||||
if ctr == nil {
|
||||
ctr = getInfraContainerFunc(id)
|
||||
if ctr == nil {
|
||||
return types.ContainerInfo{}, errCtrNotFound
|
||||
}
|
||||
}
|
||||
// TODO(mrunalp): should we call UpdateStatus()?
|
||||
ctrState := ctr.State()
|
||||
if ctrState == nil {
|
||||
return types.ContainerInfo{}, errCtrStateNil
|
||||
}
|
||||
sb := getSandboxFunc(ctr.Sandbox())
|
||||
if sb == nil {
|
||||
logrus.Debugf("can't find sandbox %s for container %s", ctr.Sandbox(), id)
|
||||
return types.ContainerInfo{}, errSandboxNotFound
|
||||
}
|
||||
return types.ContainerInfo{
|
||||
Name: ctr.Name(),
|
||||
Pid: ctrState.Pid,
|
||||
Image: ctr.Image(),
|
||||
CreatedTime: ctrState.Created.UnixNano(),
|
||||
Labels: ctr.Labels(),
|
||||
Annotations: ctr.Annotations(),
|
||||
CrioAnnotations: ctr.CrioAnnotations(),
|
||||
Root: ctr.MountPoint(),
|
||||
LogPath: ctr.LogPath(),
|
||||
Sandbox: ctr.Sandbox(),
|
||||
IP: sb.IP(),
|
||||
}, nil
|
||||
|
||||
}
|
||||
|
||||
// GetInfoMux returns the mux used to serve info requests
|
||||
func (s *Server) GetInfoMux() *bone.Mux {
|
||||
mux := bone.New()
|
||||
|
||||
mux.Get("/info", http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
ci := s.getInfo()
|
||||
js, err := json.Marshal(ci)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Write(js)
|
||||
}))
|
||||
|
||||
mux.Get("/containers/:id", http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
containerID := bone.GetValue(req, "id")
|
||||
ci, err := s.getContainerInfo(containerID, s.GetContainer, s.getInfraContainer, s.getSandbox)
|
||||
if err != nil {
|
||||
switch err {
|
||||
case errCtrNotFound:
|
||||
http.Error(w, fmt.Sprintf("can't find the container with id %s", containerID), http.StatusNotFound)
|
||||
case errCtrStateNil:
|
||||
http.Error(w, fmt.Sprintf("can't find container state for container with id %s", containerID), http.StatusInternalServerError)
|
||||
case errSandboxNotFound:
|
||||
http.Error(w, fmt.Sprintf("can't find the sandbox for container id %s", containerID), http.StatusNotFound)
|
||||
default:
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
return
|
||||
}
|
||||
js, err := json.Marshal(ci)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Write(js)
|
||||
}))
|
||||
|
||||
return mux
|
||||
}
|
@ -1,235 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
|
||||
"github.com/containernetworking/plugins/pkg/ns"
|
||||
"github.com/kubernetes-incubator/cri-o/libkpod"
|
||||
"github.com/kubernetes-incubator/cri-o/libkpod/sandbox"
|
||||
"github.com/kubernetes-incubator/cri-o/oci"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
)
|
||||
|
||||
func TestGetInfo(t *testing.T) {
|
||||
c := libkpod.DefaultConfig()
|
||||
c.RootConfig.Storage = "afoobarstorage"
|
||||
c.RootConfig.Root = "afoobarroot"
|
||||
c.RuntimeConfig.CgroupManager = "systemd"
|
||||
apiConfig := APIConfig{}
|
||||
s := &Server{
|
||||
config: Config{*c, apiConfig},
|
||||
}
|
||||
ci := s.getInfo()
|
||||
if ci.CgroupDriver != "systemd" {
|
||||
t.Fatalf("expected 'systemd', got %q", ci.CgroupDriver)
|
||||
}
|
||||
if ci.StorageDriver != "afoobarstorage" {
|
||||
t.Fatalf("expected 'afoobarstorage', got %q", ci.StorageDriver)
|
||||
}
|
||||
if ci.StorageRoot != "afoobarroot" {
|
||||
t.Fatalf("expected 'afoobarroot', got %q", ci.StorageRoot)
|
||||
}
|
||||
}
|
||||
|
||||
type mockNetNS struct {
|
||||
}
|
||||
|
||||
func (ns mockNetNS) Close() error {
|
||||
return nil
|
||||
}
|
||||
func (ns mockNetNS) Fd() uintptr {
|
||||
ptr := new(uintptr)
|
||||
return *ptr
|
||||
}
|
||||
func (ns mockNetNS) Do(toRun func(ns.NetNS) error) error {
|
||||
return nil
|
||||
}
|
||||
func (ns mockNetNS) Set() error {
|
||||
return nil
|
||||
}
|
||||
func (ns mockNetNS) Path() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func TestGetContainerInfo(t *testing.T) {
|
||||
s := &Server{}
|
||||
created := time.Now()
|
||||
labels := map[string]string{
|
||||
"io.kubernetes.container.name": "POD",
|
||||
"io.kubernetes.test2": "value2",
|
||||
"io.kubernetes.test3": "value3",
|
||||
}
|
||||
annotations := map[string]string{
|
||||
"io.kubernetes.test": "value",
|
||||
"io.kubernetes.test1": "value1",
|
||||
}
|
||||
getContainerFunc := func(id string) *oci.Container {
|
||||
container, err := oci.NewContainer("testid", "testname", "", "/container/logs", mockNetNS{}, labels, annotations, annotations, "imageName", "imageName", "imageRef", &runtime.ContainerMetadata{}, "testsandboxid", false, false, false, false, false, "/root/for/container", created, "SIGKILL")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
container.SetMountPoint("/var/foo/container")
|
||||
cstate := &oci.ContainerState{}
|
||||
cstate.State = specs.State{
|
||||
Pid: 42,
|
||||
}
|
||||
cstate.Created = created
|
||||
container.SetState(cstate)
|
||||
return container
|
||||
}
|
||||
getInfraContainerFunc := func(id string) *oci.Container {
|
||||
return nil
|
||||
}
|
||||
getSandboxFunc := func(id string) *sandbox.Sandbox {
|
||||
s := &sandbox.Sandbox{}
|
||||
s.AddIP("1.1.1.42")
|
||||
return s
|
||||
}
|
||||
ci, err := s.getContainerInfo("", getContainerFunc, getInfraContainerFunc, getSandboxFunc)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if ci.CreatedTime != created.UnixNano() {
|
||||
t.Fatalf("expected same created time %d, got %d", created.UnixNano(), ci.CreatedTime)
|
||||
}
|
||||
if ci.Pid != 42 {
|
||||
t.Fatalf("expected pid 42, got %v", ci.Pid)
|
||||
}
|
||||
if ci.Name != "testname" {
|
||||
t.Fatalf("expected name testname, got %s", ci.Name)
|
||||
}
|
||||
if ci.Image != "imageName" {
|
||||
t.Fatalf("expected image name imageName, got %s", ci.Image)
|
||||
}
|
||||
if ci.Root != "/var/foo/container" {
|
||||
t.Fatalf("expected root to be /var/foo/container, got %s", ci.Root)
|
||||
}
|
||||
if ci.LogPath != "/container/logs" {
|
||||
t.Fatalf("expected log path to be /containers/logs, got %s", ci.LogPath)
|
||||
}
|
||||
if ci.Sandbox != "testsandboxid" {
|
||||
t.Fatalf("expected sandbox to be testsandboxid, got %s", ci.Sandbox)
|
||||
}
|
||||
if ci.IP != "1.1.1.42" {
|
||||
t.Fatalf("expected ip 1.1.1.42, got %s", ci.IP)
|
||||
}
|
||||
if len(ci.Annotations) == 0 {
|
||||
t.Fatal("annotations are empty")
|
||||
}
|
||||
if len(ci.Labels) == 0 {
|
||||
t.Fatal("labels are empty")
|
||||
}
|
||||
if len(ci.Annotations) != len(annotations) {
|
||||
t.Fatalf("container info annotations len (%d) isn't the same as original annotations len (%d)", len(ci.Annotations), len(annotations))
|
||||
}
|
||||
if len(ci.Labels) != len(labels) {
|
||||
t.Fatalf("container info labels len (%d) isn't the same as original labels len (%d)", len(ci.Labels), len(labels))
|
||||
}
|
||||
var found bool
|
||||
for k, v := range annotations {
|
||||
found = false
|
||||
for key, value := range ci.Annotations {
|
||||
if k == key && v == value {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Fatalf("key %s with value %v wasn't in container info annotations", k, v)
|
||||
}
|
||||
}
|
||||
for k, v := range labels {
|
||||
found = false
|
||||
for key, value := range ci.Labels {
|
||||
if k == key && v == value {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Fatalf("key %s with value %v wasn't in container info labels", k, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetContainerInfoCtrNotFound(t *testing.T) {
|
||||
s := &Server{}
|
||||
getContainerFunc := func(id string) *oci.Container {
|
||||
return nil
|
||||
}
|
||||
getInfraContainerFunc := func(id string) *oci.Container {
|
||||
return nil
|
||||
}
|
||||
getSandboxFunc := func(id string) *sandbox.Sandbox {
|
||||
return nil
|
||||
}
|
||||
_, err := s.getContainerInfo("", getContainerFunc, getInfraContainerFunc, getSandboxFunc)
|
||||
if err == nil {
|
||||
t.Fatal("expected an error but got nothing")
|
||||
}
|
||||
if err != errCtrNotFound {
|
||||
t.Fatalf("expected errCtrNotFound error, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetContainerInfoCtrStateNil(t *testing.T) {
|
||||
s := &Server{}
|
||||
created := time.Now()
|
||||
labels := map[string]string{}
|
||||
annotations := map[string]string{}
|
||||
getContainerFunc := func(id string) *oci.Container {
|
||||
container, err := oci.NewContainer("testid", "testname", "", "/container/logs", mockNetNS{}, labels, annotations, annotations, "imageName", "imageName", "imageRef", &runtime.ContainerMetadata{}, "testsandboxid", false, false, false, false, false, "/root/for/container", created, "SIGKILL")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
container.SetMountPoint("/var/foo/container")
|
||||
container.SetState(nil)
|
||||
return container
|
||||
}
|
||||
getInfraContainerFunc := func(id string) *oci.Container {
|
||||
return nil
|
||||
}
|
||||
getSandboxFunc := func(id string) *sandbox.Sandbox {
|
||||
s := &sandbox.Sandbox{}
|
||||
s.AddIP("1.1.1.42")
|
||||
return s
|
||||
}
|
||||
_, err := s.getContainerInfo("", getContainerFunc, getInfraContainerFunc, getSandboxFunc)
|
||||
if err == nil {
|
||||
t.Fatal("expected an error but got nothing")
|
||||
}
|
||||
if err != errCtrStateNil {
|
||||
t.Fatalf("expected errCtrStateNil error, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetContainerInfoSandboxNotFound(t *testing.T) {
|
||||
s := &Server{}
|
||||
created := time.Now()
|
||||
labels := map[string]string{}
|
||||
annotations := map[string]string{}
|
||||
getContainerFunc := func(id string) *oci.Container {
|
||||
container, err := oci.NewContainer("testid", "testname", "", "/container/logs", mockNetNS{}, labels, annotations, annotations, "imageName", "imageName", "imageRef", &runtime.ContainerMetadata{}, "testsandboxid", false, false, false, false, false, "/root/for/container", created, "SIGKILL")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
container.SetMountPoint("/var/foo/container")
|
||||
return container
|
||||
}
|
||||
getInfraContainerFunc := func(id string) *oci.Container {
|
||||
return nil
|
||||
}
|
||||
getSandboxFunc := func(id string) *sandbox.Sandbox {
|
||||
return nil
|
||||
}
|
||||
_, err := s.getContainerInfo("", getContainerFunc, getInfraContainerFunc, getSandboxFunc)
|
||||
if err == nil {
|
||||
t.Fatal("expected an error but got nothing")
|
||||
}
|
||||
if err != errSandboxNotFound {
|
||||
t.Fatalf("expected errSandboxNotFound error, got %v", err)
|
||||
}
|
||||
}
|
@ -1,86 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
)
|
||||
|
||||
const (
|
||||
kubePrefix = "k8s"
|
||||
infraName = "POD"
|
||||
nameDelimiter = "_"
|
||||
)
|
||||
|
||||
func makeSandboxName(sandboxConfig *pb.PodSandboxConfig) string {
|
||||
return strings.Join([]string{
|
||||
kubePrefix,
|
||||
sandboxConfig.Metadata.Name,
|
||||
sandboxConfig.Metadata.Namespace,
|
||||
sandboxConfig.Metadata.Uid,
|
||||
fmt.Sprintf("%d", sandboxConfig.Metadata.Attempt),
|
||||
}, nameDelimiter)
|
||||
}
|
||||
|
||||
func makeSandboxContainerName(sandboxConfig *pb.PodSandboxConfig) string {
|
||||
return strings.Join([]string{
|
||||
kubePrefix,
|
||||
infraName,
|
||||
sandboxConfig.Metadata.Name,
|
||||
sandboxConfig.Metadata.Namespace,
|
||||
sandboxConfig.Metadata.Uid,
|
||||
fmt.Sprintf("%d", sandboxConfig.Metadata.Attempt),
|
||||
}, nameDelimiter)
|
||||
}
|
||||
|
||||
func makeContainerName(sandboxMetadata *pb.PodSandboxMetadata, containerConfig *pb.ContainerConfig) string {
|
||||
return strings.Join([]string{
|
||||
kubePrefix,
|
||||
containerConfig.Metadata.Name,
|
||||
sandboxMetadata.Name,
|
||||
sandboxMetadata.Namespace,
|
||||
sandboxMetadata.Uid,
|
||||
fmt.Sprintf("%d", containerConfig.Metadata.Attempt),
|
||||
}, nameDelimiter)
|
||||
}
|
||||
|
||||
func (s *Server) generatePodIDandName(sandboxConfig *pb.PodSandboxConfig) (string, string, error) {
|
||||
var (
|
||||
err error
|
||||
id = stringid.GenerateNonCryptoID()
|
||||
)
|
||||
if sandboxConfig.Metadata.Namespace == "" {
|
||||
return "", "", fmt.Errorf("cannot generate pod ID without namespace")
|
||||
}
|
||||
name, err := s.ReservePodName(id, makeSandboxName(sandboxConfig))
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
return id, name, err
|
||||
}
|
||||
|
||||
func (s *Server) generateContainerIDandNameForSandbox(sandboxConfig *pb.PodSandboxConfig) (string, string, error) {
|
||||
var (
|
||||
err error
|
||||
id = stringid.GenerateNonCryptoID()
|
||||
)
|
||||
name, err := s.ReserveContainerName(id, makeSandboxContainerName(sandboxConfig))
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
return id, name, err
|
||||
}
|
||||
|
||||
func (s *Server) generateContainerIDandName(sandboxMetadata *pb.PodSandboxMetadata, containerConfig *pb.ContainerConfig) (string, string, error) {
|
||||
var (
|
||||
err error
|
||||
id = stringid.GenerateNonCryptoID()
|
||||
)
|
||||
name, err := s.ReserveContainerName(id, makeContainerName(sandboxMetadata, containerConfig))
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
return id, name, err
|
||||
}
|
@ -1,41 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
)
|
||||
|
||||
// Status returns the status of the runtime
|
||||
func (s *Server) Status(ctx context.Context, req *pb.StatusRequest) (*pb.StatusResponse, error) {
|
||||
|
||||
// Deal with Runtime conditions
|
||||
runtimeReady, err := s.Runtime().RuntimeReady()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
networkReady, err := s.Runtime().NetworkReady()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Use vendored strings
|
||||
runtimeReadyConditionString := pb.RuntimeReady
|
||||
networkReadyConditionString := pb.NetworkReady
|
||||
|
||||
resp := &pb.StatusResponse{
|
||||
Status: &pb.RuntimeStatus{
|
||||
Conditions: []*pb.RuntimeCondition{
|
||||
{
|
||||
Type: runtimeReadyConditionString,
|
||||
Status: runtimeReady,
|
||||
},
|
||||
{
|
||||
Type: networkReadyConditionString,
|
||||
Status: networkReady,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
@ -1,94 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"github.com/kubernetes-incubator/cri-o/libkpod/sandbox"
|
||||
"github.com/kubernetes-incubator/cri-o/oci"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/context"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
)
|
||||
|
||||
// filterSandbox returns whether passed container matches filtering criteria
|
||||
func filterSandbox(p *pb.PodSandbox, filter *pb.PodSandboxFilter) bool {
|
||||
if filter != nil {
|
||||
if filter.State != nil {
|
||||
if p.State != filter.State.State {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if filter.LabelSelector != nil {
|
||||
sel := fields.SelectorFromSet(filter.LabelSelector)
|
||||
if !sel.Matches(fields.Set(p.Labels)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// ListPodSandbox returns a list of SandBoxes.
|
||||
func (s *Server) ListPodSandbox(ctx context.Context, req *pb.ListPodSandboxRequest) (*pb.ListPodSandboxResponse, error) {
|
||||
logrus.Debugf("ListPodSandboxRequest %+v", req)
|
||||
var pods []*pb.PodSandbox
|
||||
var podList []*sandbox.Sandbox
|
||||
for _, sb := range s.ContainerServer.ListSandboxes() {
|
||||
podList = append(podList, sb)
|
||||
}
|
||||
|
||||
filter := req.Filter
|
||||
// Filter by pod id first.
|
||||
if filter != nil {
|
||||
if filter.Id != "" {
|
||||
id, err := s.PodIDIndex().Get(filter.Id)
|
||||
if err != nil {
|
||||
// Not finding an ID in a filtered list should not be considered
|
||||
// and error; it might have been deleted when stop was done.
|
||||
// Log and return an empty struct.
|
||||
logrus.Warn("unable to find pod %s with filter", filter.Id)
|
||||
return &pb.ListPodSandboxResponse{}, nil
|
||||
}
|
||||
sb := s.getSandbox(id)
|
||||
if sb == nil {
|
||||
podList = []*sandbox.Sandbox{}
|
||||
} else {
|
||||
podList = []*sandbox.Sandbox{sb}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, sb := range podList {
|
||||
podInfraContainer := sb.InfraContainer()
|
||||
if podInfraContainer == nil {
|
||||
// this can't really happen, but if it does because of a bug
|
||||
// it's better not to panic
|
||||
continue
|
||||
}
|
||||
cState := s.Runtime().ContainerStatus(podInfraContainer)
|
||||
created := cState.Created.UnixNano()
|
||||
rStatus := pb.PodSandboxState_SANDBOX_NOTREADY
|
||||
if cState.Status == oci.ContainerStateRunning {
|
||||
rStatus = pb.PodSandboxState_SANDBOX_READY
|
||||
}
|
||||
|
||||
pod := &pb.PodSandbox{
|
||||
Id: sb.ID(),
|
||||
CreatedAt: created,
|
||||
State: rStatus,
|
||||
Labels: sb.Labels(),
|
||||
Annotations: sb.Annotations(),
|
||||
Metadata: sb.Metadata(),
|
||||
}
|
||||
|
||||
// Filter by other criteria such as state and labels.
|
||||
if filterSandbox(pod, req.Filter) {
|
||||
pods = append(pods, pod)
|
||||
}
|
||||
}
|
||||
|
||||
resp := &pb.ListPodSandboxResponse{
|
||||
Items: pods,
|
||||
}
|
||||
logrus.Debugf("ListPodSandboxResponse %+v", resp)
|
||||
return resp, nil
|
||||
}
|
@ -1,70 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
"github.com/kubernetes-incubator/cri-o/libkpod/sandbox"
|
||||
"github.com/sirupsen/logrus"
|
||||
"k8s.io/kubernetes/pkg/kubelet/network/hostport"
|
||||
)
|
||||
|
||||
// networkStart sets up the sandbox's network and returns the pod IP on success
|
||||
// or an error
|
||||
func (s *Server) networkStart(hostNetwork bool, sb *sandbox.Sandbox) (string, error) {
|
||||
if hostNetwork {
|
||||
return s.BindAddress(), nil
|
||||
}
|
||||
|
||||
podNetwork := newPodNetwork(sb)
|
||||
err := s.netPlugin.SetUpPod(podNetwork)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to create pod network sandbox %s(%s): %v", sb.Name(), sb.ID(), err)
|
||||
}
|
||||
|
||||
var ip string
|
||||
if ip, err = s.netPlugin.GetPodNetworkStatus(podNetwork); err != nil {
|
||||
return "", fmt.Errorf("failed to get network status for pod sandbox %s(%s): %v", sb.Name(), sb.ID(), err)
|
||||
}
|
||||
|
||||
if len(sb.PortMappings()) > 0 {
|
||||
ip4 := net.ParseIP(ip).To4()
|
||||
if ip4 == nil {
|
||||
return "", fmt.Errorf("failed to get valid ipv4 address for sandbox %s(%s)", sb.Name(), sb.ID())
|
||||
}
|
||||
|
||||
if err = s.hostportManager.Add(sb.ID(), &hostport.PodPortMapping{
|
||||
Name: sb.Name(),
|
||||
PortMappings: sb.PortMappings(),
|
||||
IP: ip4,
|
||||
HostNetwork: false,
|
||||
}, "lo"); err != nil {
|
||||
return "", fmt.Errorf("failed to add hostport mapping for sandbox %s(%s): %v", sb.Name(), sb.ID(), err)
|
||||
}
|
||||
|
||||
}
|
||||
return ip, nil
|
||||
}
|
||||
|
||||
// networkStop cleans up and removes a pod's network. It is best-effort and
|
||||
// must call the network plugin even if the network namespace is already gone
|
||||
func (s *Server) networkStop(hostNetwork bool, sb *sandbox.Sandbox) error {
|
||||
if !hostNetwork {
|
||||
if err := s.hostportManager.Remove(sb.ID(), &hostport.PodPortMapping{
|
||||
Name: sb.Name(),
|
||||
PortMappings: sb.PortMappings(),
|
||||
HostNetwork: false,
|
||||
}); err != nil {
|
||||
logrus.Warnf("failed to remove hostport for pod sandbox %s(%s): %v",
|
||||
sb.Name(), sb.ID(), err)
|
||||
}
|
||||
|
||||
podNetwork := newPodNetwork(sb)
|
||||
if err := s.netPlugin.TearDownPod(podNetwork); err != nil {
|
||||
logrus.Warnf("failed to destroy network for pod sandbox %s(%s): %v",
|
||||
sb.Name(), sb.ID(), err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -1,98 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/containers/storage"
|
||||
"github.com/kubernetes-incubator/cri-o/libkpod/sandbox"
|
||||
"github.com/kubernetes-incubator/cri-o/oci"
|
||||
pkgstorage "github.com/kubernetes-incubator/cri-o/pkg/storage"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/context"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
)
|
||||
|
||||
// RemovePodSandbox deletes the sandbox. If there are any running containers in the
|
||||
// sandbox, they should be force deleted.
|
||||
func (s *Server) RemovePodSandbox(ctx context.Context, req *pb.RemovePodSandboxRequest) (*pb.RemovePodSandboxResponse, error) {
|
||||
logrus.Debugf("RemovePodSandboxRequest %+v", req)
|
||||
sb, err := s.getPodSandboxFromRequest(req.PodSandboxId)
|
||||
if err != nil {
|
||||
if err == sandbox.ErrIDEmpty {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If the sandbox isn't found we just return an empty response to adhere
|
||||
// the the CRI interface which expects to not error out in not found
|
||||
// cases.
|
||||
|
||||
resp := &pb.RemovePodSandboxResponse{}
|
||||
logrus.Warnf("could not get sandbox %s, it's probably been removed already: %v", req.PodSandboxId, err)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
podInfraContainer := sb.InfraContainer()
|
||||
containers := sb.Containers().List()
|
||||
containers = append(containers, podInfraContainer)
|
||||
|
||||
// Delete all the containers in the sandbox
|
||||
for _, c := range containers {
|
||||
if !sb.Stopped() {
|
||||
cState := s.Runtime().ContainerStatus(c)
|
||||
if cState.Status == oci.ContainerStateCreated || cState.Status == oci.ContainerStateRunning {
|
||||
if err := s.Runtime().StopContainer(ctx, c, 10); err != nil {
|
||||
// Assume container is already stopped
|
||||
logrus.Warnf("failed to stop container %s: %v", c.Name(), err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.Runtime().DeleteContainer(c); err != nil {
|
||||
return nil, fmt.Errorf("failed to delete container %s in pod sandbox %s: %v", c.Name(), sb.ID(), err)
|
||||
}
|
||||
|
||||
if c.ID() == podInfraContainer.ID() {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := s.StorageRuntimeServer().StopContainer(c.ID()); err != nil && err != storage.ErrContainerUnknown {
|
||||
// assume container already umounted
|
||||
logrus.Warnf("failed to stop container %s in pod sandbox %s: %v", c.Name(), sb.ID(), err)
|
||||
}
|
||||
if err := s.StorageRuntimeServer().DeleteContainer(c.ID()); err != nil && err != storage.ErrContainerUnknown {
|
||||
return nil, fmt.Errorf("failed to delete container %s in pod sandbox %s: %v", c.Name(), sb.ID(), err)
|
||||
}
|
||||
|
||||
s.ReleaseContainerName(c.Name())
|
||||
s.removeContainer(c)
|
||||
if err := s.CtrIDIndex().Delete(c.ID()); err != nil {
|
||||
return nil, fmt.Errorf("failed to delete container %s in pod sandbox %s from index: %v", c.Name(), sb.ID(), err)
|
||||
}
|
||||
}
|
||||
|
||||
s.removeInfraContainer(podInfraContainer)
|
||||
|
||||
// Remove the files related to the sandbox
|
||||
if err := s.StorageRuntimeServer().StopContainer(sb.ID()); err != nil && errors.Cause(err) != storage.ErrContainerUnknown {
|
||||
logrus.Warnf("failed to stop sandbox container in pod sandbox %s: %v", sb.ID(), err)
|
||||
}
|
||||
if err := s.StorageRuntimeServer().RemovePodSandbox(sb.ID()); err != nil && err != pkgstorage.ErrInvalidSandboxID {
|
||||
return nil, fmt.Errorf("failed to remove pod sandbox %s: %v", sb.ID(), err)
|
||||
}
|
||||
|
||||
s.ReleaseContainerName(podInfraContainer.Name())
|
||||
if err := s.CtrIDIndex().Delete(podInfraContainer.ID()); err != nil {
|
||||
return nil, fmt.Errorf("failed to delete infra container %s in pod sandbox %s from index: %v", podInfraContainer.ID(), sb.ID(), err)
|
||||
}
|
||||
|
||||
s.ReleasePodName(sb.Name())
|
||||
s.removeSandbox(sb.ID())
|
||||
if err := s.PodIDIndex().Delete(sb.ID()); err != nil {
|
||||
return nil, fmt.Errorf("failed to delete pod sandbox %s from index: %v", sb.ID(), err)
|
||||
}
|
||||
|
||||
resp := &pb.RemovePodSandboxResponse{}
|
||||
logrus.Debugf("RemovePodSandboxResponse %+v", resp)
|
||||
return resp, nil
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user