Update podman build to match buildah bud functionality

Add --label, --annotations, --idfile, --squash

Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>

Closes: #824
Approved by: TomSweeneyRedHat
This commit is contained in:
Daniel J Walsh
2018-05-23 14:15:54 -04:00
committed by Atomic Bot
parent d252fa710e
commit 915364034f
14 changed files with 1046 additions and 172 deletions

View File

@ -166,6 +166,10 @@ func buildCmd(c *cli.Context) error {
SystemContext: systemContext, SystemContext: systemContext,
CommonBuildOpts: commonOpts, CommonBuildOpts: commonOpts,
DefaultMountsFilePath: c.GlobalString("default-mounts-file"), DefaultMountsFilePath: c.GlobalString("default-mounts-file"),
IIDFile: c.String("iidfile"),
Squash: c.Bool("squash"),
Labels: c.StringSlice("label"),
Annotations: c.StringSlice("annotation"),
} }
if !c.Bool("quiet") { if !c.Bool("quiet") {

View File

@ -709,18 +709,36 @@ _podman_build() {
" "
local options_with_args=" local options_with_args="
--add-host
--annotation
--authfile --authfile
--build-arg --build-arg
--cert-dir --cert-dir
--cgroup-parent
--cpu-period
--cpu-quota
--cpu-shares
--cpuset-cpus
--cpuset-mems
--creds --creds
--file
-f -f
--file
--format --format
--iidfile
--label
-m
--memory
--memory-swap
--runtime --runtime
--runtime-flag --runtime-flag
--security-opt
--shm-size
--signature-policy --signature-policy
--tag
-t -t
--tag
--ulimit
-v
--volume
" "
local all_options="$options_with_args $boolean_options" local all_options="$options_with_args $boolean_options"

View File

@ -3,7 +3,7 @@
# podman-build "7" "December 2017" "podman" # podman-build "7" "December 2017" "podman"
## NAME ## NAME
podman-build - Build a container image using a Dockerfile. podman\-build - Build a container image using a Dockerfile.
## SYNOPSIS ## SYNOPSIS
**podman** **build** [*options* [...]] **context** **podman** **build** [*options* [...]] **context**
@ -27,9 +27,15 @@ Add a custom host-to-IP mapping (host:ip)
Add a line to /etc/hosts. The format is hostname:ip. The **--add-host** option can be set multiple times. Add a line to /etc/hosts. The format is hostname:ip. The **--add-host** option can be set multiple times.
**--annotation** *annotation*
Add an image *annotation* (e.g. annotation=*value*) to the image metadata. Can be used multiple times.
Note: this information is not present in Docker image formats, so it is discarded when writing images in Docker formats.
**--authfile** *path* **--authfile** *path*
Path of the authentication file. Default is ${XDG_RUNTIME\_DIR}/containers/auth.json, which is set using `podman login`. Path of the authentication file. Default is ${XDG\_RUNTIME\_DIR}/containers/auth.json, which is set using `podman login`.
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`. If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
**--build-arg** *arg=value* **--build-arg** *arg=value*
@ -39,15 +45,25 @@ instructions read from the Dockerfiles in the same way that environment
variables are, but which will not be added to environment variable list in the variables are, but which will not be added to environment variable list in the
resulting image's configuration. resulting image's configuration.
**--cache-from**
Images to utilize as potential cache sources. Podman does not currently support caching so this is a NOOP.
**--cert-dir** *path* **--cert-dir** *path*
Use certificates at *path* (*.crt, *.cert, *.key) to connect to the registry. Use certificates at *path* (\*.crt, \*.cert, \*.key) to connect to the registry.
Default certificates directory is _/etc/containers/certs.d_. Default certificates directory is _/etc/containers/certs.d_.
**--cgroup-parent**="" **--cgroup-parent**=""
Path to cgroups under which the cgroup for the container will be created. If the path is not absolute, the path is considered to be relative to the cgroups path of the init process. Cgroups will be created if they do not already exist. Path to cgroups under which the cgroup for the container will be created. If the path is not absolute, the path is considered to be relative to the cgroups path of the init process. Cgroups will be created if they do not already exist.
**--compress**
This option is added to be aligned with other containers CLIs.
Podman doesn't communicate with a daemon or a remote server.
Thus, compressing the data before sending it is irrelevant to Podman.
**--cpu-period**=*0* **--cpu-period**=*0*
Limit the CPU CFS (Completely Fair Scheduler) period Limit the CPU CFS (Completely Fair Scheduler) period
@ -62,7 +78,7 @@ Limit the container's CPU usage. By default, containers run with the full
CPU resource. This flag tell the kernel to restrict the container's CPU usage CPU resource. This flag tell the kernel to restrict the container's CPU usage
to the quota you specify. to the quota you specify.
**--cpu-shares**=*0* **--cpu-shares, -c**=*0*
CPU shares (relative weight) CPU shares (relative weight)
@ -117,7 +133,7 @@ The [username[:password]] to use to authenticate with the registry if required.
If one or both values are not supplied, a command line prompt will appear and the If one or both values are not supplied, a command line prompt will appear and the
value can be entered. The password is entered without echo. value can be entered. The password is entered without echo.
**-f, --file** *Dockerfile* **--file, -f** *Dockerfile*
Specifies a Dockerfile which contains instructions for building the image, Specifies a Dockerfile which contains instructions for building the image,
either a local file or an **http** or **https** URL. If more than one either a local file or an **http** or **https** URL. If more than one
@ -128,6 +144,10 @@ If a build context is not specified, and at least one Dockerfile is a
local file, the directory in which it resides will be used as the build local file, the directory in which it resides will be used as the build
context. context.
**--force-rm**
Always remove intermediate containers after a build. Podman does not currently support caching so this is a NOOP.
**--format** **--format**
Control the format for the built image's manifest and configuration data. Control the format for the built image's manifest and configuration data.
@ -144,8 +164,11 @@ Podman is not currently supported on Windows, and does not have a daemon.
If you want to override the container isolation you can choose a different If you want to override the container isolation you can choose a different
OCI Runtime, using the --runtime flag. OCI Runtime, using the --runtime flag.
**-m**, **--memory**="" **--label** *label*
Add an image *label* (e.g. label=*value*) to the image metadata. Can be used multiple times.
**--memory, -m**=""
Memory limit (format: <number>[<unit>], where unit = b, k, m or g) Memory limit (format: <number>[<unit>], where unit = b, k, m or g)
Allows you to constrain the memory available to a container. If the host Allows you to constrain the memory available to a container. If the host
@ -164,16 +187,31 @@ the value of --memory.
The format of `LIMIT` is `<number>[<unit>]`. Unit can be `b` (bytes), The format of `LIMIT` is `<number>[<unit>]`. Unit can be `b` (bytes),
`k` (kilobytes), `m` (megabytes), or `g` (gigabytes). If you don't specify a `k` (kilobytes), `m` (megabytes), or `g` (gigabytes). If you don't specify a
unit, `b` is used. Set LIMIT to `-1` to enable unlimited swap. unit, `b` is used. Set LIMIT to `-1` to enable unlimited swap.
**--no-cache**
Do not use caching for the container build. Podman does not currently support caching so this is a NOOP.
**--pull**
Pull the image if it is not present. If this flag is disabled (with
*--pull=false*) and the image is not present, the image will not be pulled.
Defaults to *true*.
**--pull-always** **--pull-always**
Pull the image even if a version of the image is already present. Pull the image even if a version of the image is already present.
**-q, --quiet** **--quiet, -q**
Suppress output messages which indicate which instruction is being processed, Suppress output messages which indicate which instruction is being processed,
and of progress when pulling images from a registry, and when writing the and of progress when pulling images from a registry, and when writing the
output image. output image.
**--rm**
Remove intermediate containers after a successful build. Podman does not currently support caching so this is a NOOP.
**--runtime** *path* **--runtime** *path*
The *path* to an alternate OCI-compatible runtime, which will be used to run The *path* to an alternate OCI-compatible runtime, which will be used to run
@ -181,7 +219,11 @@ commands specified by the **RUN** instruction.
**--runtime-flag** *flag* **--runtime-flag** *flag*
Adds global flags for the container runtime. Adds global flags for the container rutime. To list the supported flags, please
consult the manpages of the selected container runtime (`runc` is the default
runtime, the manpage to consult is `runc(8)`).
Note: Do not pass the leading `--` to the flag. To pass the runc flag `--log-format json`
to podman build, the option given would be `--runtime-flag log-format=json`.
**--security-opt**=[] **--security-opt**=[]
@ -205,26 +247,49 @@ Security Options
Size of `/dev/shm`. The format is `<number><unit>`. `number` must be greater than `0`. Size of `/dev/shm`. The format is `<number><unit>`. `number` must be greater than `0`.
Unit is optional and can be `b` (bytes), `k` (kilobytes), `m`(megabytes), or `g` (gigabytes). Unit is optional and can be `b` (bytes), `k` (kilobytes), `m`(megabytes), or `g` (gigabytes).
If you omit the unit, the system uses bytes. If you omit the size entirely, the system uses `64m`. If you omit the unit, the system uses bytes. If you omit the size entirely, the system uses `64m`.
**--signature-policy** *signature-policy-file*
Path name of a signature policy file to use. It is not recommended that this **--signature-policy** *signaturepolicy*
Pathname of a signature policy file to use. It is not recommended that this
option be used, as the default behavior of using the system-wide default policy option be used, as the default behavior of using the system-wide default policy
(frequently */etc/containers/policy.json*) is most often preferred. (frequently */etc/containers/policy.json*) is most often preferred.
**-t, --tag** *imageName* **--squash**
Squash all of the new image's layers (including those inherited from a base image) into a single new layer.
**--tag, -t** *imageName*
Specifies the name which will be assigned to the resulting image if the build Specifies the name which will be assigned to the resulting image if the build
process completes successfully. process completes successfully.
If _imageName_ does not include a registry name, the registry name *localhost* will be prepended to the image name.
**--tls-verify** *bool-value* **--tls-verify** *bool-value*
Require HTTPS and verify certificates when talking to container registries (defaults to true) Require HTTPS and verify certificates when talking to container registries (defaults to true).
**--ulimit**=[] **--ulimit**=*type*:*soft-limit*[:*hard-limit*]
Ulimit options Specifies resource limits to apply to processes launched when processing `RUN` instructions.
This option can be specified multiple times. Recognized resource types
include:
"core": maximimum core dump size (ulimit -c)
"cpu": maximum CPU time (ulimit -t)
"data": maximum size of a process's data segment (ulimit -d)
"fsize": maximum size of new files (ulimit -f)
"locks": maximum number of file locks (ulimit -x)
"memlock": maximum amount of locked memory (ulimit -l)
"msgqueue": maximum amount of data in message queues (ulimit -q)
"nice": niceness adjustment (nice -n, ulimit -e)
"nofile": maximum number of open files (ulimit -n)
"nproc": maximum number of processes (ulimit -u)
"rss": maximum size of a process's (ulimit -m)
"rtprio": maximum real-time scheduling priority (ulimit -r)
"rttime": maximum amount of real-time execution between blocking syscalls
"sigpending": maximum number of pending signals (ulimit -i)
"stack": maximum stack size (ulimit -s)
**-v**|**--volume**[=*[HOST-DIR:CONTAINER-DIR[:OPTIONS]]*] **--volume, -v**[=*[HOST-DIR:CONTAINER-DIR[:OPTIONS]]*]
Create a bind mount. If you specify, ` -v /HOST-DIR:/CONTAINER-DIR`, podman Create a bind mount. If you specify, ` -v /HOST-DIR:/CONTAINER-DIR`, podman
bind mounts `/HOST-DIR` in the host to `/CONTAINER-DIR` in the podman bind mounts `/HOST-DIR` in the host to `/CONTAINER-DIR` in the podman
@ -236,12 +301,14 @@ Ulimit options
The `CONTAINER-DIR` must be an absolute path such as `/src/docs`. The `HOST-DIR` The `CONTAINER-DIR` must be an absolute path such as `/src/docs`. The `HOST-DIR`
must be an absolute path as well. Podman bind-mounts the `HOST-DIR` to the must be an absolute path as well. Podman bind-mounts the `HOST-DIR` to the
path you specify. For example, if you supply the `/foo` value, podman creates a bind-mount. path you specify. For example, if you supply `/foo` as the host path,
Podman copies the contents of `/foo` to the container filesystem on the host
and bind mounts that into the container.
You can specify multiple **-v** options to mount one or more mounts to a You can specify multiple **-v** options to mount one or more mounts to a
container. container.
You can add `:ro` or `:rw` suffix to a volume to mount it read-only or You can add the `:ro` or `:rw` suffix to a volume to mount it read-only or
read-write mode, respectively. By default, the volumes are mounted read-write. read-write mode, respectively. By default, the volumes are mounted read-write.
See examples. See examples.
@ -339,7 +406,13 @@ podman build --volume /home/test:/myvol:ro,Z -t imageName .
`podman build -f dev/Dockerfile https://10.10.10.1/podman/context.tar.gz` `podman build -f dev/Dockerfile https://10.10.10.1/podman/context.tar.gz`
Note: The supported compression formats are `xz`, `bzip2`, `gzip` and `identity` (no compression). Note: supported compression formats are 'xz', 'bzip2', 'gzip' and 'identity' (no compression).
## Files
**registries.conf** (`/etc/containers/registries.conf`)
registries.conf is the configuration file which specifies which registries should be consulted when completing image names which do not include a registry or domain portion.
## SEE ALSO ## SEE ALSO
podman(1), buildah(1) podman(1), buildah(1)

View File

@ -88,7 +88,7 @@ k8s.io/kube-openapi 275e2ce91dec4c05a4094a7b1daee5560b555ac9 https://github.com/
k8s.io/utils 258e2a2fa64568210fbd6267cf1d8fd87c3cb86e https://github.com/kubernetes/utils k8s.io/utils 258e2a2fa64568210fbd6267cf1d8fd87c3cb86e https://github.com/kubernetes/utils
github.com/mrunalp/fileutils master github.com/mrunalp/fileutils master
github.com/varlink/go master github.com/varlink/go master
github.com/projectatomic/buildah 3e320b9ae4c3f4c9db8e7295b2dc7e114cca38c2 github.com/projectatomic/buildah 40325d3e31cae9b2332a7e61d715c0687b4ce8fa
github.com/Nvveen/Gotty master github.com/Nvveen/Gotty master
github.com/fsouza/go-dockerclient master github.com/fsouza/go-dockerclient master
github.com/openshift/imagebuilder master github.com/openshift/imagebuilder master

View File

@ -3,6 +3,7 @@ package buildah
import ( import (
"context" "context"
"encoding/json" "encoding/json"
"fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"os" "os"
@ -35,11 +36,14 @@ const (
stateFile = Package + ".json" stateFile = Package + ".json"
) )
// PullPolicy takes the value PullIfMissing, PullAlways, or PullNever.
type PullPolicy int
const ( const (
// PullIfMissing is one of the values that BuilderOptions.PullPolicy // PullIfMissing is one of the values that BuilderOptions.PullPolicy
// can take, signalling that the source image should be pulled from a // can take, signalling that the source image should be pulled from a
// registry if a local copy of it is not already present. // registry if a local copy of it is not already present.
PullIfMissing = iota PullIfMissing PullPolicy = iota
// PullAlways is one of the values that BuilderOptions.PullPolicy can // PullAlways is one of the values that BuilderOptions.PullPolicy can
// take, signalling that a fresh, possibly updated, copy of the image // take, signalling that a fresh, possibly updated, copy of the image
// should be pulled from a registry before the build proceeds. // should be pulled from a registry before the build proceeds.
@ -50,6 +54,19 @@ const (
PullNever PullNever
) )
// String converts a PullPolicy into a string.
func (p PullPolicy) String() string {
switch p {
case PullIfMissing:
return "PullIfMissing"
case PullAlways:
return "PullAlways"
case PullNever:
return "PullNever"
}
return fmt.Sprintf("unrecognized policy %d", p)
}
// Builder objects are used to represent containers which are being used to // Builder objects are used to represent containers which are being used to
// build images. They also carry potential updates which will be applied to // build images. They also carry potential updates which will be applied to
// the image's configuration when the container's contents are used to build an // the image's configuration when the container's contents are used to build an
@ -95,9 +112,11 @@ type Builder struct {
// Image metadata and runtime settings, in multiple formats. // Image metadata and runtime settings, in multiple formats.
OCIv1 v1.Image `json:"ociv1,omitempty"` OCIv1 v1.Image `json:"ociv1,omitempty"`
Docker docker.V2Image `json:"docker,omitempty"` Docker docker.V2Image `json:"docker,omitempty"`
// DefaultMountsFilePath is the file path holding the mounts to be mounted in "host-path:container-path" format
// DefaultMountsFilePath is the file path holding the mounts to be mounted in "host-path:container-path" format.
DefaultMountsFilePath string `json:"defaultMountsFilePath,omitempty"` DefaultMountsFilePath string `json:"defaultMountsFilePath,omitempty"`
CommonBuildOpts *CommonBuildOptions
CommonBuildOpts *CommonBuildOptions
} }
// BuilderInfo are used as objects to display container information // BuilderInfo are used as objects to display container information
@ -140,35 +159,54 @@ func GetBuildInfo(b *Builder) BuilderInfo {
} }
} }
// CommonBuildOptions are reseources that can be defined by flags for both buildah from and bud // CommonBuildOptions are resources that can be defined by flags for both buildah from and build-using-dockerfile
type CommonBuildOptions struct { type CommonBuildOptions struct {
// AddHost is the list of hostnames to add to the resolv.conf // AddHost is the list of hostnames to add to the resolv.conf
AddHost []string AddHost []string
//CgroupParent it the path to cgroups under which the cgroup for the container will be created. // CgroupParent is the path to cgroups under which the cgroup for the container will be created.
CgroupParent string CgroupParent string
//CPUPeriod limits the CPU CFS (Completely Fair Scheduler) period // CPUPeriod limits the CPU CFS (Completely Fair Scheduler) period
CPUPeriod uint64 CPUPeriod uint64
//CPUQuota limits the CPU CFS (Completely Fair Scheduler) quota // CPUQuota limits the CPU CFS (Completely Fair Scheduler) quota
CPUQuota int64 CPUQuota int64
//CPUShares (relative weight // CPUShares (relative weight
CPUShares uint64 CPUShares uint64
//CPUSetCPUs in which to allow execution (0-3, 0,1) // CPUSetCPUs in which to allow execution (0-3, 0,1)
CPUSetCPUs string CPUSetCPUs string
//CPUSetMems memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. // CPUSetMems memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems.
CPUSetMems string CPUSetMems string
//Memory limit // Memory is the upper limit (in bytes) on how much memory running containers can use.
Memory int64 Memory int64
//MemorySwap limit value equal to memory plus swap. // MemorySwap limits the amount of memory and swap together.
MemorySwap int64 MemorySwap int64
//SecruityOpts modify the way container security is running // LabelOpts is the a slice of fields of an SELinux context, given in "field:pair" format, or "disable".
LabelOpts []string // Recognized field names are "role", "type", and "level".
LabelOpts []string
// SeccompProfilePath is the pathname of a seccomp profile.
SeccompProfilePath string SeccompProfilePath string
ApparmorProfile string // ApparmorProfile is the name of an apparmor profile.
//ShmSize is the shared memory size ApparmorProfile string
// ShmSize is the "size" value to use when mounting an shmfs on the container's /dev/shm directory.
ShmSize string ShmSize string
//Ulimit options // Ulimit specifies resource limit options, in the form type:softlimit[:hardlimit].
// These types are recognized:
// "core": maximimum core dump size (ulimit -c)
// "cpu": maximum CPU time (ulimit -t)
// "data": maximum size of a process's data segment (ulimit -d)
// "fsize": maximum size of new files (ulimit -f)
// "locks": maximum number of file locks (ulimit -x)
// "memlock": maximum amount of locked memory (ulimit -l)
// "msgqueue": maximum amount of data in message queues (ulimit -q)
// "nice": niceness adjustment (nice -n, ulimit -e)
// "nofile": maximum number of open files (ulimit -n)
// "nproc": maximum number of processes (ulimit -u)
// "rss": maximum size of a process's (ulimit -m)
// "rtprio": maximum real-time scheduling priority (ulimit -r)
// "rttime": maximum amount of real-time execution between blocking syscalls
// "sigpending": maximum number of pending signals (ulimit -i)
// "stack": maximum stack size (ulimit -s)
Ulimit []string Ulimit []string
//Volumes to bind mount into the container // Volumes to bind mount into the container
Volumes []string Volumes []string
} }
@ -184,7 +222,7 @@ type BuilderOptions struct {
// PullPolicy decides whether or not we should pull the image that // PullPolicy decides whether or not we should pull the image that
// we're using as a base image. It should be PullIfMissing, // we're using as a base image. It should be PullIfMissing,
// PullAlways, or PullNever. // PullAlways, or PullNever.
PullPolicy int PullPolicy PullPolicy
// Registry is a value which is prepended to the image's name, if it // Registry is a value which is prepended to the image's name, if it
// needs to be pulled and the image name alone can not be resolved to a // needs to be pulled and the image name alone can not be resolved to a
// reference to a source image. No separator is implicitly added. // reference to a source image. No separator is implicitly added.
@ -209,7 +247,8 @@ type BuilderOptions struct {
// github.com/containers/image/types SystemContext to hold credentials // github.com/containers/image/types SystemContext to hold credentials
// and other authentication/authorization information. // and other authentication/authorization information.
SystemContext *types.SystemContext SystemContext *types.SystemContext
// DefaultMountsFilePath is the file path holding the mounts to be mounted in "host-path:container-path" format // DefaultMountsFilePath is the file path holding the mounts to be
// mounted in "host-path:container-path" format
DefaultMountsFilePath string DefaultMountsFilePath string
CommonBuildOpts *CommonBuildOptions CommonBuildOpts *CommonBuildOptions
} }

View File

@ -49,6 +49,9 @@ type CommitOptions struct {
SystemContext *types.SystemContext SystemContext *types.SystemContext
// IIDFile tells the builder to write the image ID to the specified file // IIDFile tells the builder to write the image ID to the specified file
IIDFile string IIDFile string
// Squash tells the builder to produce an image with a single layer
// instead of with possibly more than one layer.
Squash bool
} }
// PushOptions can be used to alter how an image is copied somewhere. // PushOptions can be used to alter how an image is copied somewhere.
@ -100,7 +103,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
// Check if we're keeping everything in local storage. If so, we can take certain shortcuts. // Check if we're keeping everything in local storage. If so, we can take certain shortcuts.
_, destIsStorage := dest.Transport().(is.StoreTransport) _, destIsStorage := dest.Transport().(is.StoreTransport)
exporting := !destIsStorage exporting := !destIsStorage
src, err := b.makeImageRef(options.PreferredManifestType, exporting, options.Compression, options.HistoryTimestamp) src, err := b.makeImageRef(options.PreferredManifestType, exporting, options.Squash, options.Compression, options.HistoryTimestamp)
if err != nil { if err != nil {
return imgID, errors.Wrapf(err, "error computing layer digests and building metadata") return imgID, errors.Wrapf(err, "error computing layer digests and building metadata")
} }

View File

@ -4,6 +4,7 @@ import (
"bytes" "bytes"
"context" "context"
"encoding/json" "encoding/json"
"fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"os" "os"
@ -41,6 +42,8 @@ type containerImageRef struct {
compression archive.Compression compression archive.Compression
name reference.Named name reference.Named
names []string names []string
containerID string
mountLabel string
layerID string layerID string
oconfig []byte oconfig []byte
dconfig []byte dconfig []byte
@ -50,12 +53,15 @@ type containerImageRef struct {
annotations map[string]string annotations map[string]string
preferredManifestType string preferredManifestType string
exporting bool exporting bool
squash bool
} }
type containerImageSource struct { type containerImageSource struct {
path string path string
ref *containerImageRef ref *containerImageRef
store storage.Store store storage.Store
containerID string
mountLabel string
layerID string layerID string
names []string names []string
compression archive.Compression compression archive.Compression
@ -94,6 +100,124 @@ func expectedDockerDiffIDs(image docker.V2Image) int {
return expected return expected
} }
// Compute the media types which we need to attach to a layer, given the type of
// compression that we'll be applying.
func (i *containerImageRef) computeLayerMIMEType(what string) (omediaType, dmediaType string, err error) {
omediaType = v1.MediaTypeImageLayer
dmediaType = docker.V2S2MediaTypeUncompressedLayer
if i.compression != archive.Uncompressed {
switch i.compression {
case archive.Gzip:
omediaType = v1.MediaTypeImageLayerGzip
dmediaType = docker.V2S2MediaTypeLayer
logrus.Debugf("compressing %s with gzip", what)
case archive.Bzip2:
// Until the image specs define a media type for bzip2-compressed layers, even if we know
// how to decompress them, we can't try to compress layers with bzip2.
return "", "", errors.New("media type for bzip2-compressed layers is not defined")
case archive.Xz:
// Until the image specs define a media type for xz-compressed layers, even if we know
// how to decompress them, we can't try to compress layers with xz.
return "", "", errors.New("media type for xz-compressed layers is not defined")
default:
logrus.Debugf("compressing %s with unknown compressor(?)", what)
}
}
return omediaType, dmediaType, nil
}
// Extract the container's whole filesystem as if it were a single layer.
func (i *containerImageRef) extractRootfs() (io.ReadCloser, error) {
mountPoint, err := i.store.Mount(i.containerID, i.mountLabel)
if err != nil {
return nil, errors.Wrapf(err, "error extracting container %q", i.containerID)
}
tarOptions := &archive.TarOptions{
Compression: archive.Uncompressed,
}
rc, err := archive.TarWithOptions(mountPoint, tarOptions)
if err != nil {
return nil, errors.Wrapf(err, "error extracting container %q", i.containerID)
}
return ioutils.NewReadCloserWrapper(rc, func() error {
err := rc.Close()
if err != nil {
err = errors.Wrapf(err, "error closing tar archive of container %q", i.containerID)
}
if err2 := i.store.Unmount(i.containerID); err == nil {
if err2 != nil {
err2 = errors.Wrapf(err2, "error unmounting container %q", i.containerID)
}
err = err2
}
return err
}), nil
}
// Build fresh copies of the container configuration structures so that we can edit them
// without making unintended changes to the original Builder.
func (i *containerImageRef) createConfigsAndManifests() (v1.Image, v1.Manifest, docker.V2Image, docker.V2S2Manifest, error) {
created := i.created
// Build an empty image, and then decode over it.
oimage := v1.Image{}
if err := json.Unmarshal(i.oconfig, &oimage); err != nil {
return v1.Image{}, v1.Manifest{}, docker.V2Image{}, docker.V2S2Manifest{}, err
}
// Always replace this value, since we're newer than our base image.
oimage.Created = &created
// Clear the list of diffIDs, since we always repopulate it.
oimage.RootFS.Type = docker.TypeLayers
oimage.RootFS.DiffIDs = []digest.Digest{}
// Only clear the history if we're squashing, otherwise leave it be so that we can append
// entries to it.
if i.squash {
oimage.History = []v1.History{}
}
// Build an empty image, and then decode over it.
dimage := docker.V2Image{}
if err := json.Unmarshal(i.dconfig, &dimage); err != nil {
return v1.Image{}, v1.Manifest{}, docker.V2Image{}, docker.V2S2Manifest{}, err
}
// Always replace this value, since we're newer than our base image.
dimage.Created = created
// Clear the list of diffIDs, since we always repopulate it.
dimage.RootFS = &docker.V2S2RootFS{}
dimage.RootFS.Type = docker.TypeLayers
dimage.RootFS.DiffIDs = []digest.Digest{}
// Only clear the history if we're squashing, otherwise leave it be so that we can append
// entries to it.
if i.squash {
dimage.History = []docker.V2S2History{}
}
// Build empty manifests. The Layers lists will be populated later.
omanifest := v1.Manifest{
Versioned: specs.Versioned{
SchemaVersion: 2,
},
Config: v1.Descriptor{
MediaType: v1.MediaTypeImageConfig,
},
Layers: []v1.Descriptor{},
Annotations: i.annotations,
}
dmanifest := docker.V2S2Manifest{
V2Versioned: docker.V2Versioned{
SchemaVersion: 2,
MediaType: docker.V2S2MediaTypeManifest,
},
Config: docker.V2S2Descriptor{
MediaType: docker.V2S2MediaTypeImageConfig,
},
Layers: []docker.V2S2Descriptor{},
}
return oimage, omanifest, dimage, dmanifest, nil
}
func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.SystemContext) (src types.ImageSource, err error) { func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.SystemContext) (src types.ImageSource, err error) {
// Decide which type of manifest and configuration output we're going to provide. // Decide which type of manifest and configuration output we're going to provide.
manifestType := i.preferredManifestType manifestType := i.preferredManifestType
@ -109,11 +233,12 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "unable to read layer %q", layerID) return nil, errors.Wrapf(err, "unable to read layer %q", layerID)
} }
// Walk the list of parent layers, prepending each as we go. // Walk the list of parent layers, prepending each as we go. If we're squashing,
// stop at the layer ID of the top layer, which we won't really be using anyway.
for layer != nil { for layer != nil {
layers = append(append([]string{}, layerID), layers...) layers = append(append([]string{}, layerID), layers...)
layerID = layer.Parent layerID = layer.Parent
if layerID == "" { if layerID == "" || i.squash {
err = nil err = nil
break break
} }
@ -139,57 +264,25 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
} }
}() }()
// Build fresh copies of the configurations so that we don't mess with the values in the Builder // Build fresh copies of the configurations and manifest so that we don't mess with any
// object itself. // values in the Builder object itself.
oimage := v1.Image{} oimage, omanifest, dimage, dmanifest, err := i.createConfigsAndManifests()
err = json.Unmarshal(i.oconfig, &oimage)
if err != nil { if err != nil {
return nil, err return nil, err
} }
created := i.created
oimage.Created = &created
dimage := docker.V2Image{}
err = json.Unmarshal(i.dconfig, &dimage)
if err != nil {
return nil, err
}
dimage.Created = created
// Start building manifests.
omanifest := v1.Manifest{
Versioned: specs.Versioned{
SchemaVersion: 2,
},
Config: v1.Descriptor{
MediaType: v1.MediaTypeImageConfig,
},
Layers: []v1.Descriptor{},
Annotations: i.annotations,
}
dmanifest := docker.V2S2Manifest{
V2Versioned: docker.V2Versioned{
SchemaVersion: 2,
MediaType: docker.V2S2MediaTypeManifest,
},
Config: docker.V2S2Descriptor{
MediaType: docker.V2S2MediaTypeImageConfig,
},
Layers: []docker.V2S2Descriptor{},
}
oimage.RootFS.Type = docker.TypeLayers
oimage.RootFS.DiffIDs = []digest.Digest{}
dimage.RootFS = &docker.V2S2RootFS{}
dimage.RootFS.Type = docker.TypeLayers
dimage.RootFS.DiffIDs = []digest.Digest{}
// Extract each layer and compute its digests, both compressed (if requested) and uncompressed. // Extract each layer and compute its digests, both compressed (if requested) and uncompressed.
for _, layerID := range layers { for _, layerID := range layers {
what := fmt.Sprintf("layer %q", layerID)
if i.squash {
what = fmt.Sprintf("container %q", i.containerID)
}
// The default layer media type assumes no compression. // The default layer media type assumes no compression.
omediaType := v1.MediaTypeImageLayer omediaType := v1.MediaTypeImageLayer
dmediaType := docker.V2S2MediaTypeUncompressedLayer dmediaType := docker.V2S2MediaTypeUncompressedLayer
// If we're not re-exporting the data, reuse the blobsum and diff IDs. // If we're not re-exporting the data, and we're reusing layers individually, reuse
if !i.exporting && layerID != i.layerID { // the blobsum and diff IDs.
if !i.exporting && !i.squash && layerID != i.layerID {
layer, err2 := i.store.Layer(layerID) layer, err2 := i.store.Layer(layerID)
if err2 != nil { if err2 != nil {
return nil, errors.Wrapf(err, "unable to locate layer %q", layerID) return nil, errors.Wrapf(err, "unable to locate layer %q", layerID)
@ -218,40 +311,37 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
continue continue
} }
// Figure out if we need to change the media type, in case we're using compression. // Figure out if we need to change the media type, in case we're using compression.
if i.compression != archive.Uncompressed { omediaType, dmediaType, err = i.computeLayerMIMEType(what)
switch i.compression { if err != nil {
case archive.Gzip: return nil, err
omediaType = v1.MediaTypeImageLayerGzip
dmediaType = docker.V2S2MediaTypeLayer
logrus.Debugf("compressing layer %q with gzip", layerID)
case archive.Bzip2:
// Until the image specs define a media type for bzip2-compressed layers, even if we know
// how to decompress them, we can't try to compress layers with bzip2.
return nil, errors.New("media type for bzip2-compressed layers is not defined")
case archive.Xz:
// Until the image specs define a media type for xz-compressed layers, even if we know
// how to decompress them, we can't try to compress layers with xz.
return nil, errors.New("media type for xz-compressed layers is not defined")
default:
logrus.Debugf("compressing layer %q with unknown compressor(?)", layerID)
}
} }
// Start reading the layer. // Start reading either the layer or the whole container rootfs.
noCompression := archive.Uncompressed noCompression := archive.Uncompressed
diffOptions := &storage.DiffOptions{ diffOptions := &storage.DiffOptions{
Compression: &noCompression, Compression: &noCompression,
} }
rc, err := i.store.Diff("", layerID, diffOptions) var rc io.ReadCloser
if err != nil { if i.squash {
return nil, errors.Wrapf(err, "error extracting layer %q", layerID) // Extract the root filesystem as a single layer.
rc, err = i.extractRootfs()
if err != nil {
return nil, err
}
defer rc.Close()
} else {
// Extract this layer, one of possibly many.
rc, err = i.store.Diff("", layerID, diffOptions)
if err != nil {
return nil, errors.Wrapf(err, "error extracting %s", what)
}
defer rc.Close()
} }
defer rc.Close()
srcHasher := digest.Canonical.Digester() srcHasher := digest.Canonical.Digester()
reader := io.TeeReader(rc, srcHasher.Hash()) reader := io.TeeReader(rc, srcHasher.Hash())
// Set up to write the possibly-recompressed blob. // Set up to write the possibly-recompressed blob.
layerFile, err := os.OpenFile(filepath.Join(path, "layer"), os.O_CREATE|os.O_WRONLY, 0600) layerFile, err := os.OpenFile(filepath.Join(path, "layer"), os.O_CREATE|os.O_WRONLY, 0600)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "error opening file for layer %q", layerID) return nil, errors.Wrapf(err, "error opening file for %s", what)
} }
destHasher := digest.Canonical.Digester() destHasher := digest.Canonical.Digester()
counter := ioutils.NewWriteCounter(layerFile) counter := ioutils.NewWriteCounter(layerFile)
@ -259,26 +349,26 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
// Compress the layer, if we're recompressing it. // Compress the layer, if we're recompressing it.
writer, err := archive.CompressStream(multiWriter, i.compression) writer, err := archive.CompressStream(multiWriter, i.compression)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "error compressing layer %q", layerID) return nil, errors.Wrapf(err, "error compressing %s", what)
} }
size, err := io.Copy(writer, reader) size, err := io.Copy(writer, reader)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "error storing layer %q to file", layerID) return nil, errors.Wrapf(err, "error storing %s to file", what)
} }
writer.Close() writer.Close()
layerFile.Close() layerFile.Close()
if i.compression == archive.Uncompressed { if i.compression == archive.Uncompressed {
if size != counter.Count { if size != counter.Count {
return nil, errors.Errorf("error storing layer %q to file: inconsistent layer size (copied %d, wrote %d)", layerID, size, counter.Count) return nil, errors.Errorf("error storing %s to file: inconsistent layer size (copied %d, wrote %d)", what, size, counter.Count)
} }
} else { } else {
size = counter.Count size = counter.Count
} }
logrus.Debugf("layer %q size is %d bytes", layerID, size) logrus.Debugf("%s size is %d bytes", what, size)
// Rename the layer so that we can more easily find it by digest later. // Rename the layer so that we can more easily find it by digest later.
err = os.Rename(filepath.Join(path, "layer"), filepath.Join(path, destHasher.Digest().String())) err = os.Rename(filepath.Join(path, "layer"), filepath.Join(path, destHasher.Digest().String()))
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "error storing layer %q to file", layerID) return nil, errors.Wrapf(err, "error storing %s to file", what)
} }
// Add a note in the manifest about the layer. The blobs are identified by their possibly- // Add a note in the manifest about the layer. The blobs are identified by their possibly-
// compressed blob digests. // compressed blob digests.
@ -383,6 +473,8 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
path: path, path: path,
ref: i, ref: i,
store: i.store, store: i.store,
containerID: i.containerID,
mountLabel: i.mountLabel,
layerID: i.layerID, layerID: i.layerID,
names: i.names, names: i.names,
compression: i.compression, compression: i.compression,
@ -440,15 +532,15 @@ func (i *containerImageSource) Reference() types.ImageReference {
} }
func (i *containerImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { func (i *containerImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
if instanceDigest != nil && *instanceDigest != digest.FromBytes(i.manifest) { if instanceDigest != nil {
return nil, errors.Errorf("TODO") return nil, errors.Errorf("containerImageSource does not support manifest lists")
} }
return nil, nil return nil, nil
} }
func (i *containerImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { func (i *containerImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
if instanceDigest != nil && *instanceDigest != digest.FromBytes(i.manifest) { if instanceDigest != nil {
return nil, "", errors.Errorf("TODO") return nil, "", errors.Errorf("containerImageSource does not support manifest lists")
} }
return i.manifest, i.manifestType, nil return i.manifest, i.manifestType, nil
} }
@ -488,7 +580,7 @@ func (i *containerImageSource) GetBlob(ctx context.Context, blob types.BlobInfo)
return ioutils.NewReadCloserWrapper(layerFile, closer), size, nil return ioutils.NewReadCloserWrapper(layerFile, closer), size, nil
} }
func (b *Builder) makeImageRef(manifestType string, exporting bool, compress archive.Compression, historyTimestamp *time.Time) (types.ImageReference, error) { func (b *Builder) makeImageRef(manifestType string, exporting bool, squash bool, compress archive.Compression, historyTimestamp *time.Time) (types.ImageReference, error) {
var name reference.Named var name reference.Named
container, err := b.store.Container(b.ContainerID) container, err := b.store.Container(b.ContainerID)
if err != nil { if err != nil {
@ -519,6 +611,8 @@ func (b *Builder) makeImageRef(manifestType string, exporting bool, compress arc
compression: compress, compression: compress,
name: name, name: name,
names: container.Names, names: container.Names,
containerID: container.ID,
mountLabel: b.MountLabel,
layerID: container.LayerID, layerID: container.LayerID,
oconfig: oconfig, oconfig: oconfig,
dconfig: dconfig, dconfig: dconfig,
@ -528,6 +622,7 @@ func (b *Builder) makeImageRef(manifestType string, exporting bool, compress arc
annotations: b.Annotations(), annotations: b.Annotations(),
preferredManifestType: manifestType, preferredManifestType: manifestType,
exporting: exporting, exporting: exporting,
squash: squash,
} }
return ref, nil return ref, nil
} }

View File

@ -51,7 +51,7 @@ type BuildOptions struct {
ContextDirectory string ContextDirectory string
// PullPolicy controls whether or not we pull images. It should be one // PullPolicy controls whether or not we pull images. It should be one
// of PullIfMissing, PullAlways, or PullNever. // of PullIfMissing, PullAlways, or PullNever.
PullPolicy int PullPolicy buildah.PullPolicy
// Registry is a value which is prepended to the image's name, if it // Registry is a value which is prepended to the image's name, if it
// needs to be pulled and the image name alone can not be resolved to a // needs to be pulled and the image name alone can not be resolved to a
// reference to a source image. No separator is implicitly added. // reference to a source image. No separator is implicitly added.
@ -113,6 +113,13 @@ type BuildOptions struct {
DefaultMountsFilePath string DefaultMountsFilePath string
// IIDFile tells the builder to write the image ID to the specified file // IIDFile tells the builder to write the image ID to the specified file
IIDFile string IIDFile string
// Squash tells the builder to produce an image with a single layer
// instead of with possibly more than one layer.
Squash bool
// Labels metadata for an image
Labels []string
// Annotation metadata for an image
Annotations []string
} }
// Executor is a buildah-based implementation of the imagebuilder.Executor // Executor is a buildah-based implementation of the imagebuilder.Executor
@ -124,7 +131,7 @@ type Executor struct {
store storage.Store store storage.Store
contextDir string contextDir string
builder *buildah.Builder builder *buildah.Builder
pullPolicy int pullPolicy buildah.PullPolicy
registry string registry string
transport string transport string
ignoreUnrecognizedInstructions bool ignoreUnrecognizedInstructions bool
@ -150,6 +157,9 @@ type Executor struct {
commonBuildOptions *buildah.CommonBuildOptions commonBuildOptions *buildah.CommonBuildOptions
defaultMountsFilePath string defaultMountsFilePath string
iidfile string iidfile string
squash bool
labels []string
annotations []string
} }
// withName creates a new child executor that will be used whenever a COPY statement uses --from=NAME. // withName creates a new child executor that will be used whenever a COPY statement uses --from=NAME.
@ -482,6 +492,9 @@ func NewExecutor(store storage.Store, options BuildOptions) (*Executor, error) {
commonBuildOptions: options.CommonBuildOpts, commonBuildOptions: options.CommonBuildOpts,
defaultMountsFilePath: options.DefaultMountsFilePath, defaultMountsFilePath: options.DefaultMountsFilePath,
iidfile: options.IIDFile, iidfile: options.IIDFile,
squash: options.Squash,
labels: append([]string{}, options.Labels...),
annotations: append([]string{}, options.Annotations...),
} }
if exec.err == nil { if exec.err == nil {
exec.err = os.Stderr exec.err = os.Stderr
@ -673,6 +686,22 @@ func (b *Executor) Commit(ctx context.Context, ib *imagebuilder.Builder) (err er
for k, v := range config.Labels { for k, v := range config.Labels {
b.builder.SetLabel(k, v) b.builder.SetLabel(k, v)
} }
for _, labelSpec := range b.labels {
label := strings.SplitN(labelSpec, "=", 2)
if len(label) > 1 {
b.builder.SetLabel(label[0], label[1])
} else {
b.builder.SetLabel(label[0], "")
}
}
for _, annotationSpec := range b.annotations {
annotation := strings.SplitN(annotationSpec, "=", 2)
if len(annotation) > 1 {
b.builder.SetAnnotation(annotation[0], annotation[1])
} else {
b.builder.SetAnnotation(annotation[0], "")
}
}
if imageRef != nil { if imageRef != nil {
logName := transports.ImageName(imageRef) logName := transports.ImageName(imageRef)
logrus.Debugf("COMMIT %q", logName) logrus.Debugf("COMMIT %q", logName)
@ -692,6 +721,7 @@ func (b *Executor) Commit(ctx context.Context, ib *imagebuilder.Builder) (err er
ReportWriter: b.reportWriter, ReportWriter: b.reportWriter,
PreferredManifestType: b.outputFormat, PreferredManifestType: b.outputFormat,
IIDFile: b.iidfile, IIDFile: b.iidfile,
Squash: b.squash,
} }
imgID, err := b.builder.Commit(ctx, imageRef, options) imgID, err := b.builder.Commit(ctx, imageRef, options)
if err != nil { if err != nil {

View File

@ -11,6 +11,10 @@ import (
var ( var (
BudFlags = []cli.Flag{ BudFlags = []cli.Flag{
cli.StringSliceFlag{
Name: "annotation",
Usage: "Set metadata for an image (default [])",
},
cli.StringFlag{ cli.StringFlag{
Name: "authfile", Name: "authfile",
Usage: "path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json", Usage: "path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json",
@ -53,6 +57,10 @@ var (
Name: "iidfile", Name: "iidfile",
Usage: "Write the image ID to the file", Usage: "Write the image ID to the file",
}, },
cli.StringSliceFlag{
Name: "label",
Usage: "Set metadata for an image (default [])",
},
cli.BoolFlag{ cli.BoolFlag{
Name: "no-cache", Name: "no-cache",
Usage: "Do not use caching for the container build. Buildah does not currently support caching so this is a NOOP.", Usage: "Do not use caching for the container build. Buildah does not currently support caching so this is a NOOP.",
@ -139,11 +147,11 @@ var (
}, },
cli.StringSliceFlag{ cli.StringSliceFlag{
Name: "security-opt", Name: "security-opt",
Usage: "security Options (default [])", Usage: "security options (default [])",
}, },
cli.StringFlag{ cli.StringFlag{
Name: "shm-size", Name: "shm-size",
Usage: "size of `/dev/shm`. The format is `<number><unit>`.", Usage: "size of '/dev/shm'. The format is `<number><unit>`.",
Value: "65536k", Value: "65536k",
}, },
cli.StringSliceFlag{ cli.StringSliceFlag{

View File

@ -88,7 +88,7 @@ func parseSecurityOpts(securityOpts []string, commonOpts *buildah.CommonBuildOpt
} }
con := strings.SplitN(opt, "=", 2) con := strings.SplitN(opt, "=", 2)
if len(con) != 2 { if len(con) != 2 {
return errors.Errorf("Invalid --security-opt 1: %q", opt) return errors.Errorf("Invalid --security-opt name=value pair: %q", opt)
} }
switch con[0] { switch con[0] {

View File

@ -60,10 +60,15 @@ func localImageNameForReference(ctx context.Context, store storage.Store, srcRef
if err != nil { if err != nil {
return "", errors.Wrapf(err, "error loading manifest for %q", srcRef) return "", errors.Wrapf(err, "error loading manifest for %q", srcRef)
} }
// if index.json has no reference name, compute the image digest instead
if manifest.Annotations == nil || manifest.Annotations["org.opencontainers.image.ref.name"] == "" { if manifest.Annotations == nil || manifest.Annotations["org.opencontainers.image.ref.name"] == "" {
return "", errors.Errorf("error, archive doesn't have a name annotation. Cannot store image with no name") name, err = getImageDigest(ctx, srcRef, nil)
if err != nil {
return "", err
}
} else {
name = manifest.Annotations["org.opencontainers.image.ref.name"]
} }
name = manifest.Annotations["org.opencontainers.image.ref.name"]
case util.DirTransport: case util.DirTransport:
// supports pull from a directory // supports pull from a directory
name = split[1] name = split[1]

View File

@ -2,16 +2,23 @@ package buildah
import ( import (
"bufio" "bufio"
"bytes"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"net"
"os" "os"
"os/exec" "os/exec"
"path/filepath" "path/filepath"
"strconv"
"strings" "strings"
"sync"
"syscall"
"time"
"github.com/containers/storage/pkg/ioutils" "github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/reexec"
"github.com/docker/docker/profiles/seccomp" "github.com/docker/docker/profiles/seccomp"
units "github.com/docker/go-units" units "github.com/docker/go-units"
digest "github.com/opencontainers/go-digest" digest "github.com/opencontainers/go-digest"
@ -22,6 +29,7 @@ import (
"github.com/projectatomic/libpod/pkg/secrets" "github.com/projectatomic/libpod/pkg/secrets"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"golang.org/x/crypto/ssh/terminal" "golang.org/x/crypto/ssh/terminal"
"golang.org/x/sys/unix"
) )
const ( const (
@ -29,12 +37,17 @@ const (
DefaultWorkingDir = "/" DefaultWorkingDir = "/"
// DefaultRuntime is the default command to use to run the container. // DefaultRuntime is the default command to use to run the container.
DefaultRuntime = "runc" DefaultRuntime = "runc"
// runUsingRuntimeCommand is a command we use as a key for reexec
runUsingRuntimeCommand = Package + "-runtime"
) )
// TerminalPolicy takes the value DefaultTerminal, WithoutTerminal, or WithTerminal.
type TerminalPolicy int
const ( const (
// DefaultTerminal indicates that this Run invocation should be // DefaultTerminal indicates that this Run invocation should be
// connected to a pseudoterminal if we're connected to a terminal. // connected to a pseudoterminal if we're connected to a terminal.
DefaultTerminal = iota DefaultTerminal TerminalPolicy = iota
// WithoutTerminal indicates that this Run invocation should NOT be // WithoutTerminal indicates that this Run invocation should NOT be
// connected to a pseudoterminal. // connected to a pseudoterminal.
WithoutTerminal WithoutTerminal
@ -43,6 +56,19 @@ const (
WithTerminal WithTerminal
) )
// String converts a TerminalPoliicy into a string.
func (t TerminalPolicy) String() string {
switch t {
case DefaultTerminal:
return "DefaultTerminal"
case WithoutTerminal:
return "WithoutTerminal"
case WithTerminal:
return "WithTerminal"
}
return fmt.Sprintf("unrecognized terminal setting %d", t)
}
// RunOptions can be used to alter how a command is run in the container. // RunOptions can be used to alter how a command is run in the container.
type RunOptions struct { type RunOptions struct {
// Hostname is the hostname we set for the running container. // Hostname is the hostname we set for the running container.
@ -72,7 +98,7 @@ type RunOptions struct {
// terminal is used if os.Stdout is connected to a terminal, but that // terminal is used if os.Stdout is connected to a terminal, but that
// decision can be overridden by specifying either WithTerminal or // decision can be overridden by specifying either WithTerminal or
// WithoutTerminal. // WithoutTerminal.
Terminal int Terminal TerminalPolicy
// Quiet tells the run to turn off output to stdout. // Quiet tells the run to turn off output to stdout.
Quiet bool Quiet bool
} }
@ -114,7 +140,7 @@ func addHostsToFile(hosts []string, filename string) error {
} }
func addCommonOptsToSpec(commonOpts *CommonBuildOptions, g *generate.Generator) error { func addCommonOptsToSpec(commonOpts *CommonBuildOptions, g *generate.Generator) error {
// RESOURCES - CPU // Resources - CPU
if commonOpts.CPUPeriod != 0 { if commonOpts.CPUPeriod != 0 {
g.SetLinuxResourcesCPUPeriod(commonOpts.CPUPeriod) g.SetLinuxResourcesCPUPeriod(commonOpts.CPUPeriod)
} }
@ -131,7 +157,7 @@ func addCommonOptsToSpec(commonOpts *CommonBuildOptions, g *generate.Generator)
g.SetLinuxResourcesCPUMems(commonOpts.CPUSetMems) g.SetLinuxResourcesCPUMems(commonOpts.CPUSetMems)
} }
// RESOURCES - MEMORY // Resources - Memory
if commonOpts.Memory != 0 { if commonOpts.Memory != 0 {
g.SetLinuxResourcesMemoryLimit(commonOpts.Memory) g.SetLinuxResourcesMemoryLimit(commonOpts.Memory)
} }
@ -139,22 +165,21 @@ func addCommonOptsToSpec(commonOpts *CommonBuildOptions, g *generate.Generator)
g.SetLinuxResourcesMemorySwap(commonOpts.MemorySwap) g.SetLinuxResourcesMemorySwap(commonOpts.MemorySwap)
} }
// cgroup membership
if commonOpts.CgroupParent != "" { if commonOpts.CgroupParent != "" {
g.SetLinuxCgroupsPath(commonOpts.CgroupParent) g.SetLinuxCgroupsPath(commonOpts.CgroupParent)
} }
// Other process resource limits
if err := addRlimits(commonOpts.Ulimit, g); err != nil { if err := addRlimits(commonOpts.Ulimit, g); err != nil {
return err return err
} }
if err := addHostsToFile(commonOpts.AddHost, "/etc/hosts"); err != nil {
return err
}
logrus.Debugln("Resources:", commonOpts) logrus.Debugf("Resources: %#v", commonOpts)
return nil return nil
} }
func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, optionMounts []specs.Mount, bindFiles, builtinVolumes, volumeMounts []string, shmSize string) error { func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, optionMounts []specs.Mount, bindFiles map[string]string, builtinVolumes, volumeMounts []string, shmSize string) error {
// The passed-in mounts matter the most to us. // The passed-in mounts matter the most to us.
mounts := make([]specs.Mount, len(optionMounts)) mounts := make([]specs.Mount, len(optionMounts))
copy(mounts, optionMounts) copy(mounts, optionMounts)
@ -179,14 +204,14 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, optionMounts
mounts = append(mounts, specMount) mounts = append(mounts, specMount)
} }
// Add bind mounts for important files, unless they conflict. // Add bind mounts for important files, unless they conflict.
for _, boundFile := range bindFiles { for dest, src := range bindFiles {
if haveMount(boundFile) { if haveMount(dest) {
// Already have something to mount there, so skip this one. // Already have something to mount there, so skip this one.
continue continue
} }
mounts = append(mounts, specs.Mount{ mounts = append(mounts, specs.Mount{
Source: boundFile, Source: src,
Destination: boundFile, Destination: dest,
Type: "bind", Type: "bind",
Options: []string{"rbind", "ro"}, Options: []string{"rbind", "ro"},
}) })
@ -293,6 +318,28 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, optionMounts
return nil return nil
} }
// addNetworkConfig copies files from host and sets them up to bind mount into container
func (b *Builder) addNetworkConfig(rdir, hostPath string) (string, error) {
stat, err := os.Stat(hostPath)
if err != nil {
return "", errors.Wrapf(err, "stat %q failed", hostPath)
}
buf, err := ioutil.ReadFile(hostPath)
if err != nil {
return "", errors.Wrapf(err, "opening %q failed", hostPath)
}
cfile := filepath.Join(rdir, filepath.Base(hostPath))
if err := ioutil.WriteFile(cfile, buf, stat.Mode()); err != nil {
return "", errors.Wrapf(err, "opening %q failed", cfile)
}
if err = label.Relabel(cfile, b.MountLabel, false); err != nil {
return "", errors.Wrapf(err, "error relabeling %q in container %q", cfile, b.ContainerID)
}
return cfile, nil
}
// Run runs the specified command in the container's root filesystem. // Run runs the specified command in the container's root filesystem.
func (b *Builder) Run(command []string, options RunOptions) error { func (b *Builder) Run(command []string, options RunOptions) error {
var user specs.User var user specs.User
@ -399,10 +446,10 @@ func (b *Builder) Run(command []string, options RunOptions) error {
return errors.Wrapf(err, "error ensuring working directory %q exists", spec.Process.Cwd) return errors.Wrapf(err, "error ensuring working directory %q exists", spec.Process.Cwd)
} }
//Security Opts // Set the apparmor profile name.
g.SetProcessApparmorProfile(b.CommonBuildOpts.ApparmorProfile) g.SetProcessApparmorProfile(b.CommonBuildOpts.ApparmorProfile)
// HANDLE SECCOMP // Set the seccomp configuration using the specified profile name.
if b.CommonBuildOpts.SeccompProfilePath != "unconfined" { if b.CommonBuildOpts.SeccompProfilePath != "unconfined" {
if b.CommonBuildOpts.SeccompProfilePath != "" { if b.CommonBuildOpts.SeccompProfilePath != "" {
seccompProfile, err := ioutil.ReadFile(b.CommonBuildOpts.SeccompProfilePath) seccompProfile, err := ioutil.ReadFile(b.CommonBuildOpts.SeccompProfilePath)
@ -430,37 +477,580 @@ func (b *Builder) Run(command []string, options RunOptions) error {
Options: []string{"nosuid", "noexec", "nodev", "relatime", "ro"}, Options: []string{"nosuid", "noexec", "nodev", "relatime", "ro"},
} }
g.AddMount(cgroupMnt) g.AddMount(cgroupMnt)
hostFile, err := b.addNetworkConfig(path, "/etc/hosts")
if err != nil {
return err
}
resolvFile, err := b.addNetworkConfig(path, "/etc/resolv.conf")
if err != nil {
return err
}
bindFiles := []string{"/etc/hosts", "/etc/resolv.conf"} if err := addHostsToFile(b.CommonBuildOpts.AddHost, hostFile); err != nil {
return err
}
bindFiles := map[string]string{
"/etc/hosts": hostFile,
"/etc/resolv.conf": resolvFile,
}
err = b.setupMounts(mountPoint, spec, options.Mounts, bindFiles, b.Volumes(), b.CommonBuildOpts.Volumes, b.CommonBuildOpts.ShmSize) err = b.setupMounts(mountPoint, spec, options.Mounts, bindFiles, b.Volumes(), b.CommonBuildOpts.Volumes, b.CommonBuildOpts.ShmSize)
if err != nil { if err != nil {
return errors.Wrapf(err, "error resolving mountpoints for container") return errors.Wrapf(err, "error resolving mountpoints for container")
} }
return b.runUsingRuntimeSubproc(options, spec, mountPoint, path, Package+"-"+filepath.Base(path))
}
type runUsingRuntimeSubprocOptions struct {
Options RunOptions
Spec *specs.Spec
RootPath string
BundlePath string
ContainerName string
}
func (b *Builder) runUsingRuntimeSubproc(options RunOptions, spec *specs.Spec, rootPath, bundlePath, containerName string) (err error) {
var confwg sync.WaitGroup
config, conferr := json.Marshal(runUsingRuntimeSubprocOptions{
Options: options,
Spec: spec,
RootPath: rootPath,
BundlePath: bundlePath,
ContainerName: containerName,
})
if conferr != nil {
return errors.Wrapf(conferr, "error encoding configuration for %q", runUsingRuntimeCommand)
}
cmd := reexec.Command(runUsingRuntimeCommand)
cmd.Dir = bundlePath
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.Env = append(os.Environ(), fmt.Sprintf("LOGLEVEL=%d", logrus.GetLevel()))
preader, pwriter, err := os.Pipe()
if err != nil {
return errors.Wrapf(err, "error creating configuration pipe")
}
confwg.Add(1)
go func() {
_, conferr = io.Copy(pwriter, bytes.NewReader(config))
confwg.Done()
}()
cmd.ExtraFiles = append([]*os.File{preader}, cmd.ExtraFiles...)
defer preader.Close()
defer pwriter.Close()
err = cmd.Run()
confwg.Wait()
if err == nil {
return conferr
}
return err
}
func init() {
reexec.Register(runUsingRuntimeCommand, runUsingRuntimeMain)
}
func runUsingRuntimeMain() {
var options runUsingRuntimeSubprocOptions
// Set logging.
if level := os.Getenv("LOGLEVEL"); level != "" {
if ll, err := strconv.Atoi(level); err == nil {
logrus.SetLevel(logrus.Level(ll))
}
}
// Unpack our configuration.
confPipe := os.NewFile(3, "confpipe")
if confPipe == nil {
fmt.Fprintf(os.Stderr, "error reading options pipe\n")
os.Exit(1)
}
defer confPipe.Close()
if err := json.NewDecoder(confPipe).Decode(&options); err != nil {
fmt.Fprintf(os.Stderr, "error decoding options: %v\n", err)
os.Exit(1)
}
// Set ourselves up to read the container's exit status. We're doing this in a child process
// so that we won't mess with the setting in a caller of the library.
if err := unix.Prctl(unix.PR_SET_CHILD_SUBREAPER, uintptr(1), 0, 0, 0); err != nil {
fmt.Fprintf(os.Stderr, "prctl(PR_SET_CHILD_SUBREAPER, 1): %v\n", err)
os.Exit(1)
}
// Run the container, start to finish.
status, err := runUsingRuntime(options.Options, options.Spec, options.RootPath, options.BundlePath, options.ContainerName)
if err != nil {
fmt.Fprintf(os.Stderr, "error running container: %v\n", err)
os.Exit(1)
}
// Pass the container's exit status back to the caller by exiting with the same status.
if status.Exited() {
os.Exit(status.ExitStatus())
} else if status.Signaled() {
fmt.Fprintf(os.Stderr, "container exited on %s\n", status.Signal())
os.Exit(1)
}
os.Exit(1)
}
func runUsingRuntime(options RunOptions, spec *specs.Spec, rootPath, bundlePath, containerName string) (wstatus unix.WaitStatus, err error) {
// Write the runtime configuration.
specbytes, err := json.Marshal(spec) specbytes, err := json.Marshal(spec)
if err != nil { if err != nil {
return err return 1, err
} }
err = ioutils.AtomicWriteFile(filepath.Join(path, "config.json"), specbytes, 0600) if err = ioutils.AtomicWriteFile(filepath.Join(bundlePath, "config.json"), specbytes, 0600); err != nil {
if err != nil { return 1, errors.Wrapf(err, "error storing runtime configuration")
return errors.Wrapf(err, "error storing runtime configuration")
} }
logrus.Debugf("config = %v", string(specbytes)) logrus.Debugf("config = %v", string(specbytes))
// Decide which runtime to use.
runtime := options.Runtime runtime := options.Runtime
if runtime == "" { if runtime == "" {
runtime = DefaultRuntime runtime = DefaultRuntime
} }
args := append(options.Args, "run", "-b", path, Package+"-"+b.ContainerID)
cmd := exec.Command(runtime, args...) // Default to not specifying a console socket location.
cmd.Dir = mountPoint moreCreateArgs := func() []string { return nil }
cmd.Stdin = os.Stdin // Default to just passing down our stdio.
cmd.Stdout = os.Stdout getCreateStdio := func() (*os.File, *os.File, *os.File) { return os.Stdin, os.Stdout, os.Stderr }
if options.Quiet {
cmd.Stdout = nil // Figure out how we're doing stdio handling, and create pipes and sockets.
var stdio sync.WaitGroup
var consoleListener *net.UnixListener
stdioPipe := make([][]int, 3)
copyConsole := false
copyStdio := false
finishCopy := make([]int, 2)
if err = unix.Pipe(finishCopy); err != nil {
return 1, errors.Wrapf(err, "error creating pipe for notifying to stop stdio")
} }
cmd.Stderr = os.Stderr finishedCopy := make(chan struct{})
err = cmd.Run() if spec.Process != nil {
if spec.Process.Terminal {
copyConsole = true
// Create a listening socket for accepting the container's terminal's PTY master.
socketPath := filepath.Join(bundlePath, "console.sock")
consoleListener, err = net.ListenUnix("unix", &net.UnixAddr{Name: socketPath, Net: "unix"})
if err != nil {
return 1, errors.Wrapf(err, "error creating socket to receive terminal descriptor")
}
// Add console socket arguments.
moreCreateArgs = func() []string { return []string{"--console-socket", socketPath} }
} else {
copyStdio = true
// Create pipes to use for relaying stdio.
for i := range stdioPipe {
stdioPipe[i] = make([]int, 2)
if err = unix.Pipe(stdioPipe[i]); err != nil {
return 1, errors.Wrapf(err, "error creating pipe for container FD %d", i)
}
}
// Set stdio to our pipes.
getCreateStdio = func() (*os.File, *os.File, *os.File) {
stdin := os.NewFile(uintptr(stdioPipe[unix.Stdin][0]), "/dev/stdin")
stdout := os.NewFile(uintptr(stdioPipe[unix.Stdout][1]), "/dev/stdout")
stderr := os.NewFile(uintptr(stdioPipe[unix.Stderr][1]), "/dev/stderr")
return stdin, stdout, stderr
}
}
} else {
if options.Quiet {
// Discard stdout.
getCreateStdio = func() (*os.File, *os.File, *os.File) {
return os.Stdin, nil, os.Stderr
}
}
}
// Build the commands that we'll execute.
pidFile := filepath.Join(bundlePath, "pid")
args := append(append(append(options.Args, "create", "--bundle", bundlePath, "--pid-file", pidFile), moreCreateArgs()...), containerName)
create := exec.Command(runtime, args...)
create.Dir = bundlePath
stdin, stdout, stderr := getCreateStdio()
create.Stdin, create.Stdout, create.Stderr = stdin, stdout, stderr
if create.SysProcAttr == nil {
create.SysProcAttr = &syscall.SysProcAttr{}
}
runSetDeathSig(create)
args = append(options.Args, "start", containerName)
start := exec.Command(runtime, args...)
start.Dir = bundlePath
start.Stderr = os.Stderr
runSetDeathSig(start)
args = append(options.Args, "kill", containerName)
kill := exec.Command(runtime, args...)
kill.Dir = bundlePath
kill.Stderr = os.Stderr
runSetDeathSig(kill)
args = append(options.Args, "delete", containerName)
del := exec.Command(runtime, args...)
del.Dir = bundlePath
del.Stderr = os.Stderr
runSetDeathSig(del)
// Actually create the container.
err = create.Run()
if err != nil { if err != nil {
logrus.Debugf("error running runc %v: %v", spec.Process.Args, err) return 1, errors.Wrapf(err, "error creating container for %v", spec.Process.Args)
}
defer func() {
err2 := del.Run()
if err2 != nil {
if err == nil {
err = errors.Wrapf(err2, "error deleting container")
} else {
logrus.Infof("error deleting container: %v", err2)
}
}
}()
// Make sure we read the container's exit status when it exits.
pidValue, err := ioutil.ReadFile(pidFile)
if err != nil {
return 1, errors.Wrapf(err, "error reading pid from %q", pidFile)
}
pid, err := strconv.Atoi(strings.TrimSpace(string(pidValue)))
if err != nil {
return 1, errors.Wrapf(err, "error parsing pid %s as a number", string(pidValue))
}
var reaping sync.WaitGroup
reaping.Add(1)
go func() {
defer reaping.Done()
var err error
_, err = unix.Wait4(pid, &wstatus, 0, nil)
if err != nil {
wstatus = 0
logrus.Errorf("error waiting for container child process: %v\n", err)
}
}()
if copyStdio {
// We don't need the ends of the pipes that belong to the container.
stdin.Close()
if stdout != nil {
stdout.Close()
}
stderr.Close()
}
// Handle stdio for the container in the background.
stdio.Add(1)
go runCopyStdio(&stdio, copyStdio, stdioPipe, copyConsole, consoleListener, finishCopy, finishedCopy)
// Start the container.
err = start.Run()
if err != nil {
return 1, errors.Wrapf(err, "error starting container")
}
stopped := false
defer func() {
if !stopped {
err2 := kill.Run()
if err2 != nil {
if err == nil {
err = errors.Wrapf(err2, "error stopping container")
} else {
logrus.Infof("error stopping container: %v", err2)
}
}
}
}()
// Wait for the container to exit.
for {
now := time.Now()
var state specs.State
args = append(options.Args, "state", containerName)
stat := exec.Command(runtime, args...)
stat.Dir = bundlePath
stat.Stderr = os.Stderr
stateOutput, stateErr := stat.Output()
if stateErr != nil {
return 1, errors.Wrapf(stateErr, "error reading container state")
}
if err = json.Unmarshal(stateOutput, &state); err != nil {
return 1, errors.Wrapf(stateErr, "error parsing container state %q", string(stateOutput))
}
switch state.Status {
case "running":
case "stopped":
stopped = true
default:
return 1, errors.Errorf("container status unexpectedly changed to %q", state.Status)
}
if stopped {
break
}
select {
case <-finishedCopy:
stopped = true
case <-time.After(time.Until(now.Add(100 * time.Millisecond))):
continue
}
if stopped {
break
}
}
// Close the writing end of the stop-handling-stdio notification pipe.
unix.Close(finishCopy[1])
// Wait for the stdio copy goroutine to flush.
stdio.Wait()
// Wait until we finish reading the exit status.
reaping.Wait()
return wstatus, nil
}
func runCopyStdio(stdio *sync.WaitGroup, copyStdio bool, stdioPipe [][]int, copyConsole bool, consoleListener *net.UnixListener, finishCopy []int, finishedCopy chan struct{}) {
defer func() {
unix.Close(finishCopy[0])
if copyStdio {
unix.Close(stdioPipe[unix.Stdin][1])
unix.Close(stdioPipe[unix.Stdout][0])
unix.Close(stdioPipe[unix.Stderr][0])
}
stdio.Done()
finishedCopy <- struct{}{}
}()
// If we're not doing I/O handling, we're done.
if !copyConsole && !copyStdio {
return
}
terminalFD := -1
if copyConsole {
// Accept a connection over our listening socket.
fd, err := runAcceptTerminal(consoleListener)
if err != nil {
logrus.Errorf("%v", err)
return
}
terminalFD = fd
// Set our terminal's mode to raw, to pass handling of special
// terminal input to the terminal in the container.
state, err := terminal.MakeRaw(unix.Stdin)
if err != nil {
logrus.Warnf("error setting terminal state: %v", err)
} else {
defer func() {
if err = terminal.Restore(unix.Stdin, state); err != nil {
logrus.Errorf("unable to restore terminal state: %v", err)
}
}()
// FIXME - if we're connected to a terminal, we should be
// passing the updated terminal size down when we receive a
// SIGWINCH.
}
}
// Track how many descriptors we're expecting data from.
reading := 0
// Map describing where data on an incoming descriptor should go.
relayMap := make(map[int]int)
// Map describing incoming descriptors.
relayDesc := make(map[int]string)
// Buffers.
relayBuffer := make(map[int]*bytes.Buffer)
if copyConsole {
// Input from our stdin, output from the terminal descriptor.
relayMap[unix.Stdin] = terminalFD
relayDesc[unix.Stdin] = "stdin"
relayBuffer[unix.Stdin] = new(bytes.Buffer)
relayMap[terminalFD] = unix.Stdout
relayDesc[terminalFD] = "container terminal output"
relayBuffer[terminalFD] = new(bytes.Buffer)
reading = 2
}
if copyStdio {
// Input from our stdin, output from the stdout and stderr pipes.
relayMap[unix.Stdin] = stdioPipe[unix.Stdin][1]
relayDesc[unix.Stdin] = "stdin"
relayBuffer[unix.Stdin] = new(bytes.Buffer)
relayMap[stdioPipe[unix.Stdout][0]] = unix.Stdout
relayDesc[stdioPipe[unix.Stdout][0]] = "container stdout"
relayBuffer[stdioPipe[unix.Stdout][0]] = new(bytes.Buffer)
relayMap[stdioPipe[unix.Stderr][0]] = unix.Stderr
relayDesc[stdioPipe[unix.Stderr][0]] = "container stderr"
relayBuffer[stdioPipe[unix.Stderr][0]] = new(bytes.Buffer)
reading = 3
}
// Set our reading descriptors to non-blocking.
for fd := range relayMap {
if err := unix.SetNonblock(fd, true); err != nil {
logrus.Errorf("error setting %s to nonblocking: %v", relayDesc[fd], err)
return
}
}
// Pass data back and forth.
for {
// Start building the list of descriptors to poll.
pollFds := make([]unix.PollFd, 0, reading+1)
// Poll for a notification that we should stop handling stdio.
pollFds = append(pollFds, unix.PollFd{Fd: int32(finishCopy[0]), Events: unix.POLLIN | unix.POLLHUP})
// Poll on our reading descriptors.
for rfd := range relayMap {
pollFds = append(pollFds, unix.PollFd{Fd: int32(rfd), Events: unix.POLLIN | unix.POLLHUP})
}
buf := make([]byte, 8192)
// Wait for new data from any input descriptor, or a notification that we're done.
nevents, err := unix.Poll(pollFds, -1)
if err != nil {
if errno, isErrno := err.(syscall.Errno); isErrno {
switch errno {
case syscall.EINTR:
continue
default:
logrus.Errorf("unable to wait for stdio/terminal data to relay: %v", err)
return
}
} else {
logrus.Errorf("unable to wait for stdio/terminal data to relay: %v", err)
return
}
}
if nevents == 0 {
logrus.Errorf("unexpected no data, no error waiting for terminal data to relay")
return
}
var removes []int
for _, pollFd := range pollFds {
// If this descriptor's just been closed from the other end, mark it for
// removal from the set that we're checking for.
if pollFd.Revents&unix.POLLHUP == unix.POLLHUP {
removes = append(removes, int(pollFd.Fd))
}
// If the EPOLLIN flag isn't set, then there's no data to be read from this descriptor.
if pollFd.Revents&unix.POLLIN == 0 {
// If we're using pipes and it's our stdin, close the writing end
// of the corresponding pipe.
if copyStdio && int(pollFd.Fd) == unix.Stdin {
unix.Close(stdioPipe[unix.Stdin][1])
stdioPipe[unix.Stdin][1] = -1
}
continue
}
// Copy whatever we read to wherever it needs to be sent.
readFD := int(pollFd.Fd)
writeFD, needToRelay := relayMap[readFD]
if needToRelay {
n, err := unix.Read(readFD, buf)
if err != nil {
if errno, isErrno := err.(syscall.Errno); isErrno {
switch errno {
default:
logrus.Errorf("unable to read %s: %v", relayDesc[readFD], err)
case syscall.EINTR, syscall.EAGAIN:
}
} else {
logrus.Errorf("unable to wait for %s data to relay: %v", relayDesc[readFD], err)
}
continue
}
// If it's zero-length on our stdin and we're
// using pipes, it's an EOF, so close the stdin
// pipe's writing end.
if n == 0 && copyStdio && int(pollFd.Fd) == unix.Stdin {
unix.Close(stdioPipe[unix.Stdin][1])
stdioPipe[unix.Stdin][1] = -1
}
if n > 0 {
// Buffer the data in case we're blocked on where they need to go.
relayBuffer[readFD].Write(buf[:n])
// Try to drain the buffer.
n, err = unix.Write(writeFD, relayBuffer[readFD].Bytes())
if err != nil {
logrus.Errorf("unable to write %s: %v", relayDesc[readFD], err)
return
}
relayBuffer[readFD].Next(n)
}
}
}
// Remove any descriptors which we don't need to poll any more from the poll descriptor list.
for _, remove := range removes {
delete(relayMap, remove)
reading--
}
if reading == 0 {
// We have no more open descriptors to read, so we can stop now.
return
}
// If the we-can-return pipe had anything for us, we're done.
for _, pollFd := range pollFds {
if int(pollFd.Fd) == finishCopy[0] && pollFd.Revents != 0 {
// The pipe is closed, indicating that we can stop now.
return
}
}
}
}
func runAcceptTerminal(consoleListener *net.UnixListener) (int, error) {
defer consoleListener.Close()
c, err := consoleListener.AcceptUnix()
if err != nil {
return -1, errors.Wrapf(err, "error accepting socket descriptor connection")
}
defer c.Close()
// Expect a control message over our new connection.
b := make([]byte, 8192)
oob := make([]byte, 8192)
n, oobn, _, _, err := c.ReadMsgUnix(b, oob)
if err != nil {
return -1, errors.Wrapf(err, "error reading socket descriptor: %v")
}
if n > 0 {
logrus.Debugf("socket descriptor is for %q", string(b[:n]))
}
if oobn > len(oob) {
return -1, errors.Errorf("too much out-of-bounds data (%d bytes)", oobn)
}
// Parse the control message.
scm, err := unix.ParseSocketControlMessage(oob[:oobn])
if err != nil {
return -1, errors.Wrapf(err, "error parsing out-of-bound data as a socket control message")
}
logrus.Debugf("control messages: %v", scm)
// Expect to get a descriptor.
terminalFD := -1
for i := range scm {
fds, err := unix.ParseUnixRights(&scm[i])
if err != nil {
return -1, errors.Wrapf(err, "error parsing unix rights control message: %v")
}
logrus.Debugf("fds: %v", fds)
if len(fds) == 0 {
continue
}
terminalFD = fds[0]
break
}
if terminalFD == -1 {
return -1, errors.Errorf("unable to read terminal descriptor")
}
// Set the pseudoterminal's size to match our own.
winsize, err := unix.IoctlGetWinsize(unix.Stdin, unix.TIOCGWINSZ)
if err != nil {
logrus.Warnf("error reading size of controlling terminal: %v", err)
return terminalFD, nil
}
err = unix.IoctlSetWinsize(terminalFD, unix.TIOCSWINSZ, winsize)
if err != nil {
logrus.Warnf("error setting size of container pseudoterminal: %v", err)
}
return terminalFD, nil
}
func runSetDeathSig(cmd *exec.Cmd) {
if cmd.SysProcAttr == nil {
cmd.SysProcAttr = &syscall.SysProcAttr{}
}
if cmd.SysProcAttr.Pdeathsig == 0 {
cmd.SysProcAttr.Pdeathsig = syscall.SIGTERM
} }
return err
} }

View File

@ -70,7 +70,7 @@ func ResolveName(name string, firstRegistry string, sc *types.SystemContext, sto
} }
} }
// If the image is from a different transport // If the image includes a transport's name as a prefix, use it as-is.
split := strings.SplitN(name, ":", 2) split := strings.SplitN(name, ":", 2)
if len(split) == 2 { if len(split) == 2 {
if _, ok := Transports[split[0]]; ok { if _, ok := Transports[split[0]]; ok {
@ -91,8 +91,16 @@ func ResolveName(name string, firstRegistry string, sc *types.SystemContext, sto
// If this domain can cause us to insert something in the middle, check if that happened. // If this domain can cause us to insert something in the middle, check if that happened.
repoPath := reference.Path(named) repoPath := reference.Path(named)
domain := reference.Domain(named) domain := reference.Domain(named)
tag := ""
if tagged, ok := named.(reference.Tagged); ok {
tag = ":" + tagged.Tag()
}
digest := ""
if digested, ok := named.(reference.Digested); ok {
digest = "@" + digested.Digest().String()
}
defaultPrefix := RegistryDefaultPathPrefix[reference.Domain(named)] + "/" defaultPrefix := RegistryDefaultPathPrefix[reference.Domain(named)] + "/"
if strings.HasPrefix(repoPath, defaultPrefix) && path.Join(domain, repoPath[len(defaultPrefix):]) == name { if strings.HasPrefix(repoPath, defaultPrefix) && path.Join(domain, repoPath[len(defaultPrefix):])+tag+digest == name {
// Yup, parsing just inserted a bit in the middle, so there was a domain name there to begin with. // Yup, parsing just inserted a bit in the middle, so there was a domain name there to begin with.
return []string{name} return []string{name}
} }

View File

@ -1,27 +1,32 @@
github.com/BurntSushi/toml master
github.com/Nvveen/Gotty master
github.com/blang/semver master github.com/blang/semver master
github.com/BurntSushi/toml master
github.com/containerd/continuity master
github.com/containernetworking/cni v0.6.0
github.com/containers/image master github.com/containers/image master
github.com/containers/storage master github.com/containers/storage 0b8ab959bba614a4f88bb3791dbc078c3d47f259
github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716 github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716
github.com/docker/docker b8571fd81c7d2223c9ecbf799c693e3ef1daaea9 github.com/docker/docker b8571fd81c7d2223c9ecbf799c693e3ef1daaea9
github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1
github.com/docker/engine-api master github.com/docker/engine-api master
github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d
github.com/docker/go-units 0dadbb0345b35ec7ef35e228dabb8de89a65bf52 github.com/docker/go-units 0dadbb0345b35ec7ef35e228dabb8de89a65bf52
github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1
github.com/docker/libtrust aabc10ec26b754e797f9028f4589c5b7bd90dc20 github.com/docker/libtrust aabc10ec26b754e797f9028f4589c5b7bd90dc20
github.com/fsouza/go-dockerclient master github.com/fsouza/go-dockerclient master
github.com/ghodss/yaml master github.com/ghodss/yaml master
github.com/gogo/protobuf master
github.com/golang/glog master github.com/golang/glog master
github.com/gorilla/context master github.com/gorilla/context master
github.com/gorilla/mux master github.com/gorilla/mux master
github.com/hashicorp/errwrap master
github.com/hashicorp/go-cleanhttp master github.com/hashicorp/go-cleanhttp master
github.com/hashicorp/go-multierror master
github.com/imdario/mergo master github.com/imdario/mergo master
github.com/mattn/go-runewidth master github.com/mattn/go-runewidth master
github.com/mattn/go-shellwords master github.com/mattn/go-shellwords master
github.com/mistifyio/go-zfs master github.com/mistifyio/go-zfs master
github.com/moby/moby f8806b18b4b92c5e1980f6e11c917fad201cd73c github.com/moby/moby f8806b18b4b92c5e1980f6e11c917fad201cd73c
github.com/mtrmac/gpgme master github.com/mtrmac/gpgme master
github.com/Nvveen/Gotty master
github.com/opencontainers/go-digest aa2ec055abd10d26d539eb630a92241b781ce4bc github.com/opencontainers/go-digest aa2ec055abd10d26d539eb630a92241b781ce4bc
github.com/opencontainers/image-spec v1.0.0 github.com/opencontainers/image-spec v1.0.0
github.com/opencontainers/runc master github.com/opencontainers/runc master
@ -32,11 +37,16 @@ github.com/openshift/imagebuilder master
github.com/ostreedev/ostree-go aeb02c6b6aa2889db3ef62f7855650755befd460 github.com/ostreedev/ostree-go aeb02c6b6aa2889db3ef62f7855650755befd460
github.com/pborman/uuid master github.com/pborman/uuid master
github.com/pkg/errors master github.com/pkg/errors master
github.com/pquerna/ffjson d49c2bc1aa135aad0c6f4fc2056623ec78f5d5ac
github.com/projectatomic/libpod e686269da34ed4208f4ed517c0587ab38e8eaf2c
github.com/sirupsen/logrus master github.com/sirupsen/logrus master
github.com/syndtr/gocapability master github.com/syndtr/gocapability master
github.com/tchap/go-patricia master github.com/tchap/go-patricia master
github.com/urfave/cli master github.com/urfave/cli master
github.com/vbatts/tar-split v0.10.2 github.com/vbatts/tar-split v0.10.2
github.com/xeipuuv/gojsonpointer master
github.com/xeipuuv/gojsonreference master
github.com/xeipuuv/gojsonschema master
golang.org/x/crypto master golang.org/x/crypto master
golang.org/x/net master golang.org/x/net master
golang.org/x/sys master golang.org/x/sys master
@ -46,12 +56,3 @@ gopkg.in/yaml.v2 cd8b52f8269e0feb286dfeef29f8fe4d5b397e0b
k8s.io/apimachinery master k8s.io/apimachinery master
k8s.io/client-go master k8s.io/client-go master
k8s.io/kubernetes master k8s.io/kubernetes master
github.com/hashicorp/go-multierror master
github.com/hashicorp/errwrap master
github.com/xeipuuv/gojsonschema master
github.com/xeipuuv/gojsonreference master
github.com/containerd/continuity master
github.com/gogo/protobuf master
github.com/xeipuuv/gojsonpointer master
github.com/pquerna/ffjson d49c2bc1aa135aad0c6f4fc2056623ec78f5d5ac
github.com/projectatomic/libpod master