update buildah to latest and use new network stack

Make sure buildah uses the new network stack.

Signed-off-by: Paul Holzinger <pholzing@redhat.com>
This commit is contained in:
Paul Holzinger
2022-01-06 14:50:12 +01:00
parent 495884b319
commit 0151e10b62
77 changed files with 2786 additions and 1122 deletions

View File

@ -54,6 +54,10 @@ Add a custom host-to-IP mapping (host:ip)
Add a line to /etc/hosts. The format is hostname:ip. The **--add-host** option
can be set multiple times.
#### **--all-platforms**
Instead of building for a set of platforms specified using the **--platform** option, inspect the build's base images, and build for all of the platforms for which they are all available. Stages that use *scratch* as a starting point can not be inspected, so at least one non-*scratch* stage must be present for detection to work usefully.
#### **--annotation**=*annotation*
Add an image *annotation* (e.g. annotation=*value*) to the image metadata. Can
@ -123,24 +127,19 @@ Path to cgroups under which the cgroup for the container will be created. If the
path is not absolute, the path is considered to be relative to the cgroups path
of the init process. Cgroups will be created if they do not already exist.
#### **--cgroupns**=*how*
Sets the configuration for cgroup namespaces when handling `RUN` instructions.
The configured value can be "" (the empty string) or "private" to indicate
that a new cgroup namespace should be created, or it can be "host" to indicate
that the cgroup namespace in which `buildah` itself is being run should be reused.
#### **--compress**
This option is added to be aligned with other containers CLIs.
Podman doesn't communicate with a daemon or a remote server.
Thus, compressing the data before sending it is irrelevant to Podman. (This option is not available with the remote Podman client)
#### **--cni-config-dir**=*directory*
Location of CNI configuration files which will dictate which plugins will be
used to configure network interfaces and routing for containers created for
handling `RUN` instructions, if those containers will be run in their own
network namespaces, and networking is not disabled.
#### **--cni-plugin-path**=*directory[:directory[:directory[...]]]*
List of directories in which the CNI plugins which will be used for configuring
network namespaces can be found.
#### **--cpu-period**=*limit*
Set the CPU period for the Completely Fair Scheduler (CFS), which is a
@ -625,6 +624,10 @@ types include:
"sigpending": maximum number of pending signals (ulimit -i)
"stack": maximum stack size (ulimit -s)
#### **--unsetenv** *env*
Unset environment variables from the final image.
#### **--userns**=*how*
Sets the configuration for user namespaces when handling `RUN` instructions.

8
go.mod
View File

@ -11,10 +11,10 @@ require (
github.com/container-orchestrated-devices/container-device-interface v0.0.0-20210325223243-f99e8b6c10b9
github.com/containernetworking/cni v1.0.1
github.com/containernetworking/plugins v1.0.1
github.com/containers/buildah v1.23.1
github.com/containers/buildah v1.23.1-0.20220112160421-d744ebc4b1d5
github.com/containers/common v0.46.1-0.20220112112017-31e8cc4aeeab
github.com/containers/conmon v2.0.20+incompatible
github.com/containers/image/v5 v5.17.1-0.20220106205022-73f80d60f0e1
github.com/containers/image/v5 v5.18.0
github.com/containers/ocicrypt v1.1.2
github.com/containers/psgo v1.7.1
github.com/containers/storage v1.37.1-0.20211213220314-73a749e4fec5
@ -46,7 +46,7 @@ require (
github.com/onsi/ginkgo v1.16.5
github.com/onsi/gomega v1.17.0
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.0.3-0.20211202193544-a5463b7f9c84
github.com/opencontainers/image-spec v1.0.3-0.20211215212317-ea0209f50ae1
github.com/opencontainers/runc v1.0.3
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417
github.com/opencontainers/runtime-tools v0.9.1-0.20211020193359-09d837bf40a7
@ -64,7 +64,7 @@ require (
github.com/vbauerster/mpb/v6 v6.0.4
github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5
go.etcd.io/bbolt v1.3.6
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
golang.org/x/sys v0.0.0-20211214234402-4825e8c3871d
golang.org/x/text v0.3.7

368
go.sum

File diff suppressed because it is too large Load Diff

View File

@ -96,6 +96,8 @@ func (r *Runtime) Build(ctx context.Context, options buildahDefine.BuildOptions,
if options.Runtime == "" {
options.Runtime = r.GetOCIRuntimePath()
}
// share the network interface between podman and buildah
options.NetworkInterface = r.network
id, ref, err := imagebuildah.BuildDockerfiles(ctx, r.store, options, dockerfiles...)
// Write event for build completion
r.newImageBuildCompleteEvent(id)

View File

@ -16,7 +16,6 @@ import (
"github.com/containers/buildah"
buildahDefine "github.com/containers/buildah/define"
"github.com/containers/buildah/pkg/parse"
"github.com/containers/buildah/util"
"github.com/containers/image/v5/types"
"github.com/containers/podman/v3/libpod"
"github.com/containers/podman/v3/pkg/api/handlers/utils"
@ -491,11 +490,6 @@ func BuildImage(w http.ResponseWriter, r *http.Request) {
defer reporter.Close()
runtime := r.Context().Value(api.RuntimeKey).(*libpod.Runtime)
rtc, err := runtime.GetConfig()
if err != nil {
utils.Error(w, "Something went wrong.", http.StatusInternalServerError, errors.Wrap(err, "Decode()"))
return
}
buildOptions := buildahDefine.BuildOptions{
AddCapabilities: addCaps,
AdditionalTags: additionalTags,
@ -522,8 +516,6 @@ func BuildImage(w http.ResponseWriter, r *http.Request) {
Ulimit: ulimits,
Secrets: secrets,
},
CNIConfigDir: rtc.Network.CNIPluginDirs[0],
CNIPluginPath: util.DefaultCNIPluginPath,
Compression: compression,
ConfigureNetwork: parseNetworkConfigurationPolicy(query.ConfigureNetwork),
ContextDirectory: contextDirectory,

View File

@ -6,7 +6,7 @@ env:
#### Global variables used for all tasks
####
# Name of the ultimate destination branch for this CI run, PR or post-merge.
DEST_BRANCH: "release-1.23"
DEST_BRANCH: "main"
GOPATH: "/var/tmp/go"
GOSRC: "${GOPATH}/src/github.com/containers/buildah"
# Overrides default location (/tmp/cirrus) for repo clone
@ -25,16 +25,14 @@ env:
####
# GCE project where images live
IMAGE_PROJECT: "libpod-218412"
FEDORA_NAME: "fedora-34"
PRIOR_FEDORA_NAME: "fedora-33"
UBUNTU_NAME: "ubuntu-2104"
PRIOR_UBUNTU_NAME: "ubuntu-2010"
FEDORA_NAME: "fedora-35"
PRIOR_FEDORA_NAME: "fedora-34"
UBUNTU_NAME: "ubuntu-2110"
IMAGE_SUFFIX: "c6248193773010944"
IMAGE_SUFFIX: "c6226133906620416"
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}"
UBUNTU_CACHE_IMAGE_NAME: "ubuntu-${IMAGE_SUFFIX}"
PRIOR_UBUNTU_CACHE_IMAGE_NAME: "prior-ubuntu-${IMAGE_SUFFIX}"
IN_PODMAN_IMAGE: "quay.io/libpod/fedora_podman:${IMAGE_SUFFIX}"
@ -76,7 +74,6 @@ meta_task:
${FEDORA_CACHE_IMAGE_NAME}
${PRIOR_FEDORA_CACHE_IMAGE_NAME}
${UBUNTU_CACHE_IMAGE_NAME}
${PRIOR_UBUNTU_CACHE_IMAGE_NAME}
BUILDID: "${CIRRUS_BUILD_ID}"
REPOREF: "${CIRRUS_CHANGE_IN_REPO}"
GCPJSON: ENCRYPTED[d3614d6f5cc0e66be89d4252b3365fd84f14eee0259d4eb47e25fc0bc2842c7937f5ee8c882b7e547b4c5ec4b6733b14]
@ -200,62 +197,13 @@ cross_build_task:
path: ./bin/*
static_build_task:
name: "Static Build"
alias: static_build
only_if: *not_docs
depends_on:
- unit
gce_instance:
image_name: "${FEDORA_CACHE_IMAGE_NAME}"
cpu: 8
memory: 12
disk: 200
env:
NIX_FQIN: "docker.io/nixos/nix:latest"
init_script: |
set -ex
setenforce 0
growpart /dev/sda 1 || true
resize2fs /dev/sda1 || true
yum -y install podman
nix_cache:
folder: '.cache'
fingerprint_script: cat nix/*
build_script: |
set -ex
mkdir -p .cache
mv .cache /nix
if [[ -z $(ls -A /nix) ]]; then
podman run --rm --privileged -i -v /:/mnt \
$NIX_FQIN \
cp -rfT /nix /mnt/nix
fi
podman run --rm --privileged -i -v /nix:/nix \
-v ${PWD}:${PWD} -w ${PWD} \
$NIX_FQIN \
nix --print-build-logs --option cores 8 \
--option max-jobs 8 build --file nix/
binaries_artifacts:
path: "result/bin/buildah"
save_cache_script: |
mv /nix .cache
chown -Rf $(whoami) .cache
integration_task:
name: "Integration $DISTRO_NV w/ $STORAGE_DRIVER"
alias: integration
only_if: *not_docs
depends_on:
- unit
- smoke
- vendor
matrix:
# VFS
@ -271,10 +219,6 @@ integration_task:
DISTRO_NV: "${UBUNTU_NAME}"
IMAGE_NAME: "${UBUNTU_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'vfs'
- env:
DISTRO_NV: "${PRIOR_UBUNTU_NAME}"
IMAGE_NAME: "${PRIOR_UBUNTU_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'vfs'
# OVERLAY
- env:
DISTRO_NV: "${FEDORA_NAME}"
@ -288,10 +232,6 @@ integration_task:
DISTRO_NV: "${UBUNTU_NAME}"
IMAGE_NAME: "${UBUNTU_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'overlay'
- env:
DISTRO_NV: "${PRIOR_UBUNTU_NAME}"
IMAGE_NAME: "${PRIOR_UBUNTU_CACHE_IMAGE_NAME}"
STORAGE_DRIVER: 'overlay'
gce_instance:
image_name: "$IMAGE_NAME"
@ -320,7 +260,8 @@ in_podman_task:
alias: in_podman
only_if: *not_docs
depends_on:
- unit
- smoke
- vendor
env:
# This is key, cause the scripts to re-execute themselves inside a container.
@ -356,7 +297,6 @@ success_task:
- cross_build
- integration
- in_podman
- static_build
container:
image: "quay.io/libpod/alpine:latest"

View File

@ -1,4 +1,5 @@
docs/buildah*.1
docs/*.5
/bin
/buildah
/imgtype

View File

@ -2,11 +2,6 @@
# Changelog
## v1.23.1 (2021-09-27)
Vendor containers/common v0.44.2
post-1.23 branch fixups
## v1.23.0 (2021-09-13)
Vendor in containers/common v0.44.0

View File

@ -12,6 +12,8 @@ BUILDFLAGS := -tags "$(BUILDTAGS)"
BUILDAH := buildah
GO := go
GO_LDFLAGS := $(shell if $(GO) version|grep -q gccgo; then echo "-gccgoflags"; else echo "-ldflags"; fi)
GO_GCFLAGS := $(shell if $(GO) version|grep -q gccgo; then echo "-gccgoflags"; else echo "-gcflags"; fi)
GO110 := 1.10
GOVERSION := $(findstring $(GO110),$(shell go version))
# test for go module support
@ -22,6 +24,7 @@ else
export GO_BUILD=$(GO) build
export GO_TEST=$(GO) test
endif
RACEFLAGS := $(shell $(GO_TEST) -race ./pkg/dummy > /dev/null 2>&1 && echo -race)
GIT_COMMIT ?= $(if $(shell git rev-parse --short HEAD),$(shell git rev-parse --short HEAD),$(error "git failed"))
SOURCE_DATE_EPOCH ?= $(if $(shell date +%s),$(shell date +%s),$(error "date failed"))
@ -33,8 +36,8 @@ RUNC_COMMIT := v1.0.0-rc8
LIBSECCOMP_COMMIT := release-2.3
EXTRA_LDFLAGS ?=
BUILDAH_LDFLAGS := -ldflags '-X main.GitCommit=$(GIT_COMMIT) -X main.buildInfo=$(SOURCE_DATE_EPOCH) -X main.cniVersion=$(CNI_COMMIT) $(EXTRA_LDFLAGS)'
SOURCES=*.go imagebuildah/*.go bind/*.go chroot/*.go copier/*.go docker/*.go manifests/*.go pkg/blobcache/*.go pkg/chrootuser/*.go pkg/cli/*.go pkg/completion/*.go pkg/formats/*.go pkg/overlay/*.go pkg/parse/*.go pkg/rusage/*.go pkg/sshagent/*.go pkg/umask/*.go pkg/util/*.go util/*.go
BUILDAH_LDFLAGS := $(GO_LDFLAGS) '-X main.GitCommit=$(GIT_COMMIT) -X main.buildInfo=$(SOURCE_DATE_EPOCH) -X main.cniVersion=$(CNI_COMMIT) $(EXTRA_LDFLAGS)'
SOURCES=*.go imagebuildah/*.go bind/*.go chroot/*.go copier/*.go define/*.go docker/*.go manifests/*.go pkg/blobcache/*.go pkg/chrootuser/*.go pkg/cli/*.go pkg/completion/*.go pkg/formats/*.go pkg/overlay/*.go pkg/parse/*.go pkg/rusage/*.go pkg/sshagent/*.go pkg/umask/*.go pkg/util/*.go util/*.go
LINTFLAGS ?=
@ -65,14 +68,15 @@ static:
cp -rfp ./result/bin/* ./bin/
bin/buildah: $(SOURCES) cmd/buildah/*.go
$(GO_BUILD) $(BUILDAH_LDFLAGS) -gcflags "$(GOGCFLAGS)" -o $@ $(BUILDFLAGS) ./cmd/buildah
$(GO_BUILD) $(BUILDAH_LDFLAGS) $(GO_GCFLAGS) "$(GOGCFLAGS)" -o $@ $(BUILDFLAGS) ./cmd/buildah
.PHONY: buildah
buildah: bin/buildah
LINUX_CROSS_TARGETS = $(addprefix bin/buildah.,$(subst /,.,$(shell $(GO) tool dist list | grep ^linux/)))
DARWIN_CROSS_TARGETS = $(addprefix bin/buildah.,$(subst /,.,$(shell $(GO) tool dist list | grep ^darwin/)))
WINDOWS_CROSS_TARGETS = $(addsuffix .exe,$(addprefix bin/buildah.,$(subst /,.,$(shell $(GO) tool dist list | grep ^windows/))))
ALL_CROSS_TARGETS := $(addprefix bin/buildah.,$(subst /,.,$(shell $(GO) tool dist list)))
LINUX_CROSS_TARGETS := $(filter bin/buildah.linux.%,$(ALL_CROSS_TARGETS))
DARWIN_CROSS_TARGETS := $(filter bin/buildah.darwin.%,$(ALL_CROSS_TARGETS))
WINDOWS_CROSS_TARGETS := $(addsuffix .exe,$(filter bin/buildah.windows.%,$(ALL_CROSS_TARGETS)))
.PHONY: cross
cross: $(LINUX_CROSS_TARGETS) $(DARWIN_CROSS_TARGETS) $(WINDOWS_CROSS_TARGETS)
@ -164,14 +168,14 @@ test-integration: install.tools
cd tests; ./test_runner.sh
tests/testreport/testreport: tests/testreport/testreport.go
$(GO_BUILD) -ldflags "-linkmode external -extldflags -static" -tags "$(STORAGETAGS) $(SECURITYTAGS)" -o tests/testreport/testreport ./tests/testreport/testreport.go
$(GO_BUILD) $(GO_LDFLAGS) "-linkmode external -extldflags -static" -tags "$(STORAGETAGS) $(SECURITYTAGS)" -o tests/testreport/testreport ./tests/testreport/testreport.go
.PHONY: test-unit
test-unit: tests/testreport/testreport
$(GO_TEST) -v -tags "$(STORAGETAGS) $(SECURITYTAGS)" -cover -race $(shell $(GO) list ./... | grep -v vendor | grep -v tests | grep -v cmd) -timeout 45m
$(GO_TEST) -v -tags "$(STORAGETAGS) $(SECURITYTAGS)" -cover $(RACEFLAGS) $(shell $(GO) list ./... | grep -v vendor | grep -v tests | grep -v cmd) -timeout 45m
tmp=$(shell mktemp -d) ; \
mkdir -p $$tmp/root $$tmp/runroot; \
$(GO_TEST) -v -tags "$(STORAGETAGS) $(SECURITYTAGS)" -cover -race ./cmd/buildah -args --root $$tmp/root --runroot $$tmp/runroot --storage-driver vfs --signature-policy $(shell pwd)/tests/policy.json --registries-conf $(shell pwd)/tests/registries.conf
$(GO_TEST) -v -tags "$(STORAGETAGS) $(SECURITYTAGS)" -cover $(RACEFLAGS) ./cmd/buildah -args --root $$tmp/root --runroot $$tmp/runroot --storage-driver vfs --signature-policy $(shell pwd)/tests/policy.json --registries-conf $(shell pwd)/tests/registries.conf
vendor-in-container:
podman run --privileged --rm --env HOME=/root -v `pwd`:/src -w /src docker.io/library/golang:1.16 make vendor

View File

@ -2,6 +2,7 @@ approvers:
- TomSweeneyRedHat
- ashley-cui
- cevich
- flouthoc
- giuseppe
- lsm5
- nalind

View File

@ -77,7 +77,9 @@ From [`./examples/lighttpd.sh`](examples/lighttpd.sh):
```bash
$ cat > lighttpd.sh <<"EOF"
#!/usr/bin/env bash -x
#!/usr/bin/env bash
set -x
ctr1=$(buildah from "${1:-fedora}")
@ -103,27 +105,27 @@ $ sudo ./lighttpd.sh
## Commands
| Command | Description |
| ---------------------------------------------------- | ---------------------------------------------------------------------------------------------------- |
| [buildah-add(1)](/docs/buildah-add.md) | Add the contents of a file, URL, or a directory to the container. |
| [buildah-build(1)](/docs/buildah-build.md) | Build an image using instructions from Containerfiles or Dockerfiles. |
| [buildah-commit(1)](/docs/buildah-commit.md) | Create an image from a working container. |
| [buildah-config(1)](/docs/buildah-config.md) | Update image configuration settings. |
| [buildah-containers(1)](/docs/buildah-containers.md) | List the working containers and their base images. |
| [buildah-copy(1)](/docs/buildah-copy.md) | Copies the contents of a file, URL, or directory into a container's working directory. |
| [buildah-from(1)](/docs/buildah-from.md) | Creates a new working container, either from scratch or using a specified image as a starting point. |
| [buildah-images(1)](/docs/buildah-images.md) | List images in local storage. |
| [buildah-info(1)](/docs/buildah-info.md) | Display Buildah system information. |
| [buildah-inspect(1)](/docs/buildah-inspect.md) | Inspects the configuration of a container or image. |
| [buildah-mount(1)](/docs/buildah-mount.md) | Mount the working container's root filesystem. |
| [buildah-pull(1)](/docs/buildah-pull.md) | Pull an image from the specified location. |
| [buildah-push(1)](/docs/buildah-push.md) | Push an image from local storage to elsewhere. |
| [buildah-rename(1)](/docs/buildah-rename.md) | Rename a local container. |
| [buildah-rm(1)](/docs/buildah-rm.md) | Removes one or more working containers. |
| [buildah-rmi(1)](/docs/buildah-rmi.md) | Removes one or more images. |
| [buildah-run(1)](/docs/buildah-run.md) | Run a command inside of the container. |
| [buildah-tag(1)](/docs/buildah-tag.md) | Add an additional name to a local image. |
| [buildah-umount(1)](/docs/buildah-umount.md) | Unmount a working container's root file system. |
| [buildah-unshare(1)](/docs/buildah-unshare.md) | Launch a command in a user namespace with modified ID mappings. |
| [buildah-version(1)](/docs/buildah-version.md) | Display the Buildah Version Information |
| [buildah-add(1)](/docs/buildah-add.1.md) | Add the contents of a file, URL, or a directory to the container. |
| [buildah-build(1)](/docs/buildah-build.1.md) | Build an image using instructions from Containerfiles or Dockerfiles. |
| [buildah-commit(1)](/docs/buildah-commit.1.md) | Create an image from a working container. |
| [buildah-config(1)](/docs/buildah-config.1.md) | Update image configuration settings. |
| [buildah-containers(1)](/docs/buildah-containers.1.md) | List the working containers and their base images. |
| [buildah-copy(1)](/docs/buildah-copy.1.md) | Copies the contents of a file, URL, or directory into a container's working directory. |
| [buildah-from(1)](/docs/buildah-from.1.md) | Creates a new working container, either from scratch or using a specified image as a starting point. |
| [buildah-images(1)](/docs/buildah-images.1.md) | List images in local storage. |
| [buildah-info(1)](/docs/buildah-info.1.md) | Display Buildah system information. |
| [buildah-inspect(1)](/docs/buildah-inspect.1.md) | Inspects the configuration of a container or image. |
| [buildah-mount(1)](/docs/buildah-mount.1.md) | Mount the working container's root filesystem. |
| [buildah-pull(1)](/docs/buildah-pull.1.md) | Pull an image from the specified location. |
| [buildah-push(1)](/docs/buildah-push.1.md) | Push an image from local storage to elsewhere. |
| [buildah-rename(1)](/docs/buildah-rename.1.md) | Rename a local container. |
| [buildah-rm(1)](/docs/buildah-rm.1.md) | Removes one or more working containers. |
| [buildah-rmi(1)](/docs/buildah-rmi.1.md) | Removes one or more images. |
| [buildah-run(1)](/docs/buildah-run.1.md) | Run a command inside of the container. |
| [buildah-tag(1)](/docs/buildah-tag.1.md) | Add an additional name to a local image. |
| [buildah-umount(1)](/docs/buildah-umount.1.md) | Unmount a working container's root file system. |
| [buildah-unshare(1)](/docs/buildah-unshare.1.md) | Launch a command in a user namespace with modified ID mappings. |
| [buildah-version(1)](/docs/buildah-version.1.md) | Display the Buildah Version Information |
**Future goals include:**
* more CI tests

View File

@ -47,8 +47,10 @@ type AddAndCopyOptions struct {
// If the sources include directory trees, Hasher will be passed
// tar-format archives of the directory trees.
Hasher io.Writer
// Excludes is the contents of the .dockerignore file.
// Excludes is the contents of the .containerignore file.
Excludes []string
// IgnoreFile is the path to the .containerignore file.
IgnoreFile string
// ContextDir is the base directory for content being copied and
// Excludes patterns.
ContextDir string
@ -564,7 +566,11 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
}
}
if itemsCopied == 0 {
return errors.Wrapf(syscall.ENOENT, "no items matching glob %q copied (%d filtered out)", localSourceStat.Glob, len(localSourceStat.Globbed))
excludesFile := ""
if options.IgnoreFile != "" {
excludesFile = " using " + options.IgnoreFile
}
return errors.Wrapf(syscall.ENOENT, "no items matching glob %q copied (%d filtered out%s)", localSourceStat.Glob, len(localSourceStat.Globbed), excludesFile)
}
}
return nil

View File

@ -13,6 +13,7 @@ import (
"github.com/containers/buildah/define"
"github.com/containers/buildah/docker"
nettypes "github.com/containers/common/libnetwork/types"
"github.com/containers/image/v5/types"
encconfig "github.com/containers/ocicrypt/config"
"github.com/containers/storage"
@ -154,6 +155,10 @@ type Builder struct {
// CNIConfigDir is the location of CNI configuration files, if the files in
// the default configuration directory shouldn't be used.
CNIConfigDir string
// NetworkInterface is the libnetwork network interface used to setup CNI or netavark networks.
NetworkInterface nettypes.ContainerNetwork `json:"-"`
// ID mapping options to use when running processes in the container with non-host user namespaces.
IDMappingOptions define.IDMappingOptions
// Capabilities is a list of capabilities to use when running commands in the container.
@ -257,6 +262,8 @@ type BuilderOptions struct {
// or "scratch" to indicate that the container should not be based on
// an image.
FromImage string
// ContainerSuffix is the suffix to add for generated container names
ContainerSuffix string
// Container is a desired name for the build container.
Container string
// PullPolicy decides whether or not we should pull the image that
@ -271,6 +278,8 @@ type BuilderOptions struct {
// to store copies of layer blobs that we pull down, if any. It should
// already exist.
BlobDirectory string
// Logger is the logrus logger to write log messages with
Logger *logrus.Logger `json:"-"`
// Mount signals to NewBuilder() that the container should be mounted
// immediately.
Mount bool
@ -307,6 +316,10 @@ type BuilderOptions struct {
// CNIConfigDir is the location of CNI configuration files, if the files in
// the default configuration directory shouldn't be used.
CNIConfigDir string
// NetworkInterface is the libnetwork network interface used to setup CNI or netavark networks.
NetworkInterface nettypes.ContainerNetwork `json:"-"`
// ID mapping options to use if we're setting up our own user namespace.
IDMappingOptions *define.IDMappingOptions
// Capabilities is a list of capabilities to use when
@ -327,6 +340,10 @@ type BuilderOptions struct {
// OciDecryptConfig contains the config that can be used to decrypt an image if it is
// encrypted if non-nil. If nil, it does not attempt to decrypt an image.
OciDecryptConfig *encconfig.DecryptConfig
// ProcessLabel is the SELinux process label associated with the container
ProcessLabel string
// MountLabel is the SELinux mount label associated with the container
MountLabel string
}
// ImportOptions are used to initialize a Builder from an existing container
@ -396,6 +413,12 @@ func OpenBuilder(store storage.Store, container string) (*Builder, error) {
if b.Type != containerType {
return nil, errors.Errorf("container %q is not a %s container (is a %q container)", container, define.Package, b.Type)
}
netInt, err := getNetworkInterface(store, b.CNIConfigDir, b.CNIPluginPath)
if err != nil {
return nil, err
}
b.NetworkInterface = netInt
b.store = store
b.fixupConfig(nil)
b.setupLogger()

View File

@ -1,7 +1,3 @@
- Changelog for v1.23.1 (2021-09-27)
* Vendor containers/common v0.44.2
* post-1.23 branch fixups
- Changelog for v1.23.0 (2021-09-13)
* Vendor in containers/common v0.44.0
* build(deps): bump github.com/containers/storage from 1.35.0 to 1.36.0

View File

@ -238,7 +238,7 @@ func runUsingChrootMain() {
// Set the kernel's lock to "unlocked".
locked := 0
if result, _, err := unix.Syscall(unix.SYS_IOCTL, uintptr(ptyMasterFd), unix.TIOCSPTLCK, uintptr(unsafe.Pointer(&locked))); int(result) == -1 {
logrus.Errorf("error locking PTY descriptor: %v", err)
logrus.Errorf("error unlocking PTY descriptor: %v", err)
os.Exit(1)
}
// Get a handle for the other end.
@ -1191,21 +1191,33 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
}
requestFlags := bindFlags
expectedFlags := uintptr(0)
if util.StringInSlice("nodev", m.Options) {
requestFlags |= unix.MS_NODEV
expectedFlags |= unix.ST_NODEV
}
if util.StringInSlice("noexec", m.Options) {
requestFlags |= unix.MS_NOEXEC
expectedFlags |= unix.ST_NOEXEC
}
if util.StringInSlice("nosuid", m.Options) {
requestFlags |= unix.MS_NOSUID
expectedFlags |= unix.ST_NOSUID
}
if util.StringInSlice("ro", m.Options) {
requestFlags |= unix.MS_RDONLY
expectedFlags |= unix.ST_RDONLY
for _, option := range m.Options {
switch option {
case "nodev":
requestFlags |= unix.MS_NODEV
expectedFlags |= unix.ST_NODEV
case "dev":
requestFlags &= ^uintptr(unix.MS_NODEV)
expectedFlags &= ^uintptr(unix.ST_NODEV)
case "noexec":
requestFlags |= unix.MS_NOEXEC
expectedFlags |= unix.ST_NOEXEC
case "exec":
requestFlags &= ^uintptr(unix.MS_NOEXEC)
expectedFlags &= ^uintptr(unix.ST_NOEXEC)
case "nosuid":
requestFlags |= unix.MS_NOSUID
expectedFlags |= unix.ST_NOSUID
case "suid":
requestFlags &= ^uintptr(unix.MS_NOSUID)
expectedFlags &= ^uintptr(unix.ST_NOSUID)
case "ro":
requestFlags |= unix.MS_RDONLY
expectedFlags |= unix.ST_RDONLY
case "rw":
requestFlags &= ^uintptr(unix.MS_RDONLY)
expectedFlags &= ^uintptr(unix.ST_RDONLY)
}
}
switch m.Type {
case "bind":

View File

@ -3,6 +3,9 @@
package chroot
import (
"io/ioutil"
"github.com/containers/common/pkg/seccomp"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
libseccomp "github.com/seccomp/libseccomp-golang"
@ -171,3 +174,27 @@ func setSeccomp(spec *specs.Spec) error {
}
return nil
}
func setupSeccomp(spec *specs.Spec, seccompProfilePath string) error {
switch seccompProfilePath {
case "unconfined":
spec.Linux.Seccomp = nil
case "":
seccompConfig, err := seccomp.GetDefaultProfile(spec)
if err != nil {
return errors.Wrapf(err, "loading default seccomp profile failed")
}
spec.Linux.Seccomp = seccompConfig
default:
seccompProfile, err := ioutil.ReadFile(seccompProfilePath)
if err != nil {
return errors.Wrapf(err, "opening seccomp profile (%s) failed", seccompProfilePath)
}
seccompConfig, err := seccomp.LoadProfile(string(seccompProfile), spec)
if err != nil {
return errors.Wrapf(err, "loading seccomp profile (%s) failed", seccompProfilePath)
}
spec.Linux.Seccomp = seccompConfig
}
return nil
}

View File

@ -13,3 +13,11 @@ func setSeccomp(spec *specs.Spec) error {
}
return nil
}
func setupSeccomp(spec *specs.Spec, seccompProfilePath string) error {
if spec.Linux != nil {
// runtime-tools may have supplied us with a default filter
spec.Linux.Seccomp = nil
}
return nil
}

View File

@ -101,6 +101,8 @@ type CommitOptions struct {
// integers in the slice represent 0-indexed layer indices, with support for negative
// indexing. i.e. 0 is the first layer, -1 is the last (top-most) layer.
OciEncryptLayers *[]int
// UnsetEnvs is a list of environments to not add to final image.
UnsetEnvs []string
}
var (

View File

@ -8,9 +8,11 @@ import (
"strings"
"time"
"github.com/containerd/containerd/platforms"
"github.com/containers/buildah/define"
"github.com/containers/buildah/docker"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/compression"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/stringid"
@ -28,18 +30,24 @@ func unmarshalConvertedConfig(ctx context.Context, dest interface{}, img types.I
return errors.Wrapf(err, "error getting manifest MIME type for %q", transports.ImageName(img.Reference()))
}
if wantedManifestMIMEType != actualManifestMIMEType {
layerInfos := img.LayerInfos()
for i := range layerInfos { // force the "compression" to gzip, which is supported by all of the formats we care about
layerInfos[i].CompressionOperation = types.Compress
layerInfos[i].CompressionAlgorithm = &compression.Gzip
}
updatedImg, err := img.UpdatedImage(ctx, types.ManifestUpdateOptions{
LayerInfos: layerInfos,
})
if err != nil {
return errors.Wrapf(err, "resetting recorded compression for %q", transports.ImageName(img.Reference()))
}
secondUpdatedImg, err := updatedImg.UpdatedImage(ctx, types.ManifestUpdateOptions{
ManifestMIMEType: wantedManifestMIMEType,
InformationOnly: types.ManifestUpdateInformation{ // Strictly speaking, every value in here is invalid. But…
Destination: nil, // Destination is technically required, but actually necessary only for conversion _to_ v2s1. Leave it nil, we will crash if that ever changes.
LayerInfos: nil, // LayerInfos is necessary for size information in v2s2/OCI manifests, but the code can work with nil, and we are not reading the converted manifest at all.
LayerDiffIDs: nil, // LayerDiffIDs are actually embedded in the converted manifest, but the code can work with nil, and the values are not needed until pushing the finished image, at which time containerImageRef.NewImageSource builds the values from scratch.
},
})
if err != nil {
return errors.Wrapf(err, "error converting image %q from %q to %q", transports.ImageName(img.Reference()), actualManifestMIMEType, wantedManifestMIMEType)
}
img = updatedImg
img = secondUpdatedImg
}
config, err := img.ConfigBlob(ctx)
if err != nil {
@ -126,6 +134,10 @@ func (b *Builder) fixupConfig(sys *types.SystemContext) {
} else {
b.SetArchitecture(runtime.GOARCH)
}
// in case the arch string we started with was shorthand for a known arch+variant pair, normalize it
ps := platforms.Normalize(ociv1.Platform{OS: b.OS(), Architecture: b.Architecture(), Variant: b.Variant()})
b.SetArchitecture(ps.Architecture)
b.SetVariant(ps.Variant)
}
if b.Format == define.Dockerv2ImageManifest && b.Hostname() == "" {
b.SetHostname(stringid.TruncateID(stringid.GenerateRandomID()))
@ -205,6 +217,21 @@ func (b *Builder) SetArchitecture(arch string) {
b.Docker.Architecture = arch
}
// Variant returns a name of the architecture variant on which the container,
// or a container built using an image built from this container, is intended
// to be run.
func (b *Builder) Variant() string {
return b.OCIv1.Variant
}
// SetVariant sets the name of the architecture variant on which the container,
// or a container built using an image built from this container, is intended
// to be run.
func (b *Builder) SetVariant(variant string) {
b.Docker.Variant = variant
b.OCIv1.Variant = variant
}
// Maintainer returns contact information for the person who built the image.
func (b *Builder) Maintainer() string {
return b.OCIv1.Author
@ -247,7 +274,7 @@ func (b *Builder) ClearOnBuild() {
// discarded when writing images using OCIv1 formats.
func (b *Builder) SetOnBuild(onBuild string) {
if onBuild != "" && b.Format != define.Dockerv2ImageManifest {
logrus.Warnf("ONBUILD is not supported for OCI image format, %s will be ignored. Must use `docker` format", onBuild)
b.Logger.Warnf("ONBUILD is not supported for OCI image format, %s will be ignored. Must use `docker` format", onBuild)
}
b.Docker.Config.OnBuild = append(b.Docker.Config.OnBuild, onBuild)
}
@ -279,7 +306,7 @@ func (b *Builder) Shell() []string {
// discarded when writing images using OCIv1 formats.
func (b *Builder) SetShell(shell []string) {
if len(shell) > 0 && b.Format != define.Dockerv2ImageManifest {
logrus.Warnf("SHELL is not supported for OCI image format, %s will be ignored. Must use `docker` format", shell)
b.Logger.Warnf("SHELL is not supported for OCI image format, %s will be ignored. Must use `docker` format", shell)
}
b.Docker.Config.Shell = copyStringSlice(shell)
@ -516,7 +543,7 @@ func (b *Builder) Domainname() string {
// discarded when writing images using OCIv1 formats.
func (b *Builder) SetDomainname(name string) {
if name != "" && b.Format != define.Dockerv2ImageManifest {
logrus.Warnf("DOMAINNAME is not supported for OCI image format, domainname %s will be ignored. Must use `docker` format", name)
b.Logger.Warnf("DOMAINNAME is not supported for OCI image format, domainname %s will be ignored. Must use `docker` format", name)
}
b.Docker.Config.Domainname = name
}
@ -593,7 +620,7 @@ func (b *Builder) SetHealthcheck(config *docker.HealthConfig) {
b.Docker.Config.Healthcheck = nil
if config != nil {
if b.Format != define.Dockerv2ImageManifest {
logrus.Warnf("Healthcheck is not supported for OCI image format and will be ignored. Must use `docker` format")
b.Logger.Warnf("HEALTHCHECK is not supported for OCI image format and will be ignored. Must use `docker` format")
}
b.Docker.Config.Healthcheck = &docker.HealthConfig{
Test: copyStringSlice(config.Test),

View File

@ -1660,7 +1660,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
// only check the length if there wasn't an error, which we'll
// check along with errors for other types of entries
if err == nil && written != hdr.Size {
return errors.Errorf("copier: put: error creating %q: incorrect length (%d != %d)", path, written, hdr.Size)
return errors.Errorf("copier: put: error creating regular file %q: incorrect length (%d != %d)", path, written, hdr.Size)
}
case tar.TypeLink:
var linkTarget string
@ -1681,7 +1681,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
break
}
}
if err = os.Remove(path); err == nil {
if err = os.RemoveAll(path); err == nil {
err = os.Link(linkTarget, path)
}
}
@ -1696,7 +1696,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
break
}
}
if err = os.Remove(path); err == nil {
if err = os.RemoveAll(path); err == nil {
err = os.Symlink(filepath.FromSlash(hdr.Linkname), filepath.FromSlash(path))
}
}
@ -1711,7 +1711,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
break
}
}
if err = os.Remove(path); err == nil {
if err = os.RemoveAll(path); err == nil {
err = mknod(path, chrMode(0600), int(mkdev(devMajor, devMinor)))
}
}
@ -1726,14 +1726,14 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
break
}
}
if err = os.Remove(path); err == nil {
if err = os.RemoveAll(path); err == nil {
err = mknod(path, blkMode(0600), int(mkdev(devMajor, devMinor)))
}
}
case tar.TypeDir:
if err = os.Mkdir(path, 0700); err != nil && os.IsExist(err) {
var st os.FileInfo
if st, err = os.Stat(path); err == nil && !st.IsDir() {
if st, err = os.Lstat(path); err == nil && !st.IsDir() {
// it's not a directory, so remove it and mkdir
if err = os.Remove(path); err == nil {
err = os.Mkdir(path, 0700)
@ -1758,7 +1758,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
break
}
}
if err = os.Remove(path); err == nil {
if err = os.RemoveAll(path); err == nil {
err = mkfifo(path, 0600)
}
}

View File

@ -4,6 +4,7 @@ import (
"io"
"time"
nettypes "github.com/containers/common/libnetwork/types"
"github.com/containers/image/v5/types"
encconfig "github.com/containers/ocicrypt/config"
"github.com/containers/storage/pkg/archive"
@ -70,7 +71,9 @@ type CommonBuildOptions struct {
Ulimit []string
// Volumes to bind mount into the container
Volumes []string
// Secrets are the available secrets to use in a build
// Secrets are the available secrets to use in a build. Each item in the
// slice takes the form "id=foo,src=bar", where both "id" and "src" are
// required, in that order, and "bar" is the name of a file.
Secrets []string
// SSHSources is the available ssh agent connections to forward in the build
SSHSources []string
@ -78,6 +81,8 @@ type CommonBuildOptions struct {
// BuildOptions can be used to alter how an image is built.
type BuildOptions struct {
// ContainerSuffix it the name to suffix containers with
ContainerSuffix string
// ContextDirectory is the default source location for COPY and ADD
// commands.
ContextDirectory string
@ -157,6 +162,10 @@ type BuildOptions struct {
// CNIConfigDir is the location of CNI configuration files, if the files in
// the default configuration directory shouldn't be used.
CNIConfigDir string
// NetworkInterface is the libnetwork network interface used to setup CNI or netavark networks.
NetworkInterface nettypes.ContainerNetwork `json:"-"`
// ID mapping options to use if we're setting up our own user namespace
// when handling RUN instructions.
IDMappingOptions *IDMappingOptions
@ -227,6 +236,8 @@ type BuildOptions struct {
RusageLogFile string
// Excludes is a list of excludes to be used instead of the .dockerignore file.
Excludes []string
// IgnoreFile is a name of the .containerignore file
IgnoreFile string
// From is the image name to use to replace the value specified in the first
// FROM instruction in the Containerfile
From string
@ -234,4 +245,10 @@ type BuildOptions struct {
// to build the image for. If this slice has items in it, the OS and
// Architecture fields above are ignored.
Platforms []struct{ OS, Arch, Variant string }
// AllPlatforms tells the builder to set the list of target platforms
// to match the set of platforms for which all of the build's base
// images are available. If this field is set, Platforms is ignored.
AllPlatforms bool
// UnsetEnvs is a list of environments to not add to final image.
UnsetEnvs []string
}

View File

@ -29,15 +29,11 @@ const (
Package = "buildah"
// Version for the Package. Bump version in contrib/rpm/buildah.spec
// too.
Version = "1.23.1"
Version = "1.24.0-dev"
// DefaultRuntime if containers.conf fails.
DefaultRuntime = "runc"
DefaultCNIPluginPath = "/usr/libexec/cni:/opt/cni/bin"
// DefaultCNIConfigDir is the default location of CNI configuration files.
DefaultCNIConfigDir = "/etc/cni/net.d"
// OCIv1ImageManifest is the MIME type of an OCIv1 image manifest,
// suitable for specifying as a value of the PreferredManifestType
// member of a CommitOptions structure. It is also the default.
@ -93,6 +89,13 @@ type IDMappingOptions struct {
GIDMap []specs.LinuxIDMapping
}
// Secret is a secret source that can be used in a RUN
type Secret struct {
ID string
Source string
SourceType string
}
// TempDirForURL checks if the passed-in string looks like a URL or -. If it is,
// TempDirForURL creates a temporary directory, arranges for its contents to be
// the contents of that URL, and returns the temporary directory's path, along
@ -117,12 +120,12 @@ func TempDirForURL(dir, prefix, url string) (name string, subdir string, err err
return "", "", errors.Wrapf(err, "error parsing url %q", url)
}
if strings.HasPrefix(url, "git://") || strings.HasSuffix(urlParsed.Path, ".git") {
err = cloneToDirectory(url, name)
combinedOutput, err := cloneToDirectory(url, name)
if err != nil {
if err2 := os.RemoveAll(name); err2 != nil {
logrus.Debugf("error removing temporary directory %q: %v", name, err2)
}
return "", "", err
return "", "", errors.Wrapf(err, "cloning %q to %q:\n%s", url, name, string(combinedOutput))
}
return name, "", nil
}
@ -160,7 +163,7 @@ func TempDirForURL(dir, prefix, url string) (name string, subdir string, err err
return "", "", errors.Errorf("unreachable code reached")
}
func cloneToDirectory(url, dir string) error {
func cloneToDirectory(url, dir string) ([]byte, error) {
gitBranch := strings.Split(url, "#")
var cmd *exec.Cmd
if len(gitBranch) < 2 {
@ -170,7 +173,7 @@ func cloneToDirectory(url, dir string) error {
logrus.Debugf("cloning repo %q and branch %q to %q", gitBranch[0], gitBranch[1], dir)
cmd = exec.Command("git", "clone", "--recurse-submodules", "-b", gitBranch[1], gitBranch[0], dir)
}
return cmd.Run()
return cmd.CombinedOutput()
}
func downloadToDirectory(url, dir string) error {

View File

@ -151,6 +151,8 @@ type V1Image struct {
Config *Config `json:"config,omitempty"`
// Architecture is the hardware that the image is build and runs on
Architecture string `json:"architecture,omitempty"`
// Variant is a variant of the CPU that the image is built and runs on
Variant string `json:"variant,omitempty"`
// OS is the operating system used to build and run the image
OS string `json:"os,omitempty"`
// Size is the total size of the image including all layers it is composed of

View File

@ -3,43 +3,46 @@ module github.com/containers/buildah
go 1.13
require (
github.com/containerd/containerd v1.5.5
github.com/containernetworking/cni v0.8.1
github.com/containers/common v0.44.2
github.com/containers/image/v5 v5.16.0
github.com/containerd/containerd v1.5.9
github.com/containernetworking/cni v1.0.1
github.com/containers/common v0.46.1-0.20220110165509-08c2c97e5e25
github.com/containers/image/v5 v5.18.0
github.com/containers/ocicrypt v1.1.2
github.com/containers/storage v1.36.0
github.com/containers/storage v1.37.1-0.20211122164443-82b8f06bfc08
github.com/docker/distribution v2.7.1+incompatible
github.com/docker/docker v20.10.12+incompatible
github.com/docker/go-units v0.4.0
github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316
github.com/fsouza/go-dockerclient v1.7.4
github.com/fsouza/go-dockerclient v1.7.7
github.com/ghodss/yaml v1.0.0
github.com/hashicorp/go-multierror v1.1.1
github.com/ishidawataru/sctp v0.0.0-20210226210310-f2269e66cdee // indirect
github.com/konsorten/go-windows-terminal-sequences v1.0.3 // indirect
github.com/mattn/go-shellwords v1.0.12
github.com/onsi/ginkgo v1.16.4
github.com/onsi/gomega v1.16.0
github.com/onsi/ginkgo v1.16.5
github.com/onsi/gomega v1.17.0
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.0.2-0.20210819154149-5ad6f50d6283
github.com/opencontainers/runc v1.0.2
github.com/opencontainers/image-spec v1.0.3-0.20211202193544-a5463b7f9c84
github.com/opencontainers/runc v1.0.3
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417
github.com/opencontainers/runtime-tools v0.9.0
github.com/opencontainers/selinux v1.8.5
github.com/opencontainers/selinux v1.10.0
github.com/openshift/imagebuilder v1.2.2-0.20210415181909-87f3e48c2656
github.com/pkg/errors v0.9.1
github.com/seccomp/libseccomp-golang v0.9.2-0.20200616122406-847368b35ebf
github.com/sirupsen/logrus v1.8.1
github.com/spf13/cobra v1.2.1
github.com/spf13/cobra v1.3.0
github.com/spf13/pflag v1.0.5
github.com/stretchr/testify v1.7.0
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
go.etcd.io/bbolt v1.3.6
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
golang.org/x/sys v0.0.0-20210820121016-41cdb8703e55
golang.org/x/sys v0.0.0-20211214234402-4825e8c3871d
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b
k8s.io/klog v1.0.0 // indirect
)
replace github.com/sirupsen/logrus => github.com/sirupsen/logrus v1.4.2
replace github.com/opencontainers/image-spec => github.com/opencontainers/image-spec v1.0.2-0.20211123152302-43a7dee1ec31

File diff suppressed because it is too large Load Diff

View File

@ -239,6 +239,7 @@ func (i *containerImageRef) createConfigsAndManifests() (v1.Image, v1.Manifest,
Versioned: specs.Versioned{
SchemaVersion: 2,
},
MediaType: v1.MediaTypeImageManifest,
Config: v1.Descriptor{
MediaType: v1.MediaTypeImageConfig,
},
@ -752,6 +753,10 @@ func (b *Builder) makeImageRef(options CommitOptions) (types.ImageReference, err
if manifestType == "" {
manifestType = define.OCIv1ImageManifest
}
for _, u := range options.UnsetEnvs {
b.UnsetEnv(u)
}
oconfig, err := json.Marshal(&b.OCIv1)
if err != nil {
return nil, errors.Wrapf(err, "error encoding OCI-format image configuration %#v", b.OCIv1)

View File

@ -10,15 +10,19 @@ import (
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"sync"
"github.com/containerd/containerd/platforms"
"github.com/containers/buildah/define"
"github.com/containers/buildah/util"
"github.com/containers/common/libimage"
"github.com/containers/common/pkg/config"
"github.com/containers/image/v5/docker"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/shortnames"
istorage "github.com/containers/image/v5/storage"
"github.com/containers/image/v5/types"
"github.com/containers/storage"
@ -58,6 +62,10 @@ type BuildOptions = define.BuildOptions
// returns the ID of the built image, and if a name was assigned to it, a
// canonical reference for that image.
func BuildDockerfiles(ctx context.Context, store storage.Store, options define.BuildOptions, paths ...string) (id string, ref reference.Canonical, err error) {
if options.CommonBuildOpts == nil {
options.CommonBuildOpts = &define.CommonBuildOptions{}
}
if len(paths) == 0 {
return "", nil, errors.Errorf("error building: no dockerfiles specified")
}
@ -168,7 +176,7 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B
files = append(files, b.Bytes())
}
if options.Jobs != nil && *options.Jobs != 0 {
if options.JobSemaphore == nil && options.Jobs != nil && *options.Jobs != 0 {
options.JobSemaphore = semaphore.NewWeighted(int64(*options.Jobs))
}
@ -193,21 +201,37 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B
})
}
if options.AllPlatforms {
options.Platforms, err = platformsForBaseImages(ctx, logger, paths, files, options.From, options.Args, options.SystemContext)
if err != nil {
return "", nil, err
}
}
systemContext := options.SystemContext
for _, platform := range options.Platforms {
platformContext := *systemContext
platformContext.OSChoice = platform.OS
platformContext.ArchitectureChoice = platform.Arch
platformContext.VariantChoice = platform.Variant
platformSpec := platforms.Normalize(v1.Platform{
OS: platform.OS,
Architecture: platform.Arch,
Variant: platform.Variant,
})
// platforms.Normalize converts an empty os value to GOOS
// so we have to check the original value here to not overwrite the default for no reason
if platform.OS != "" {
platformContext.OSChoice = platformSpec.OS
}
if platform.Arch != "" {
platformContext.ArchitectureChoice = platformSpec.Architecture
platformContext.VariantChoice = platformSpec.Variant
}
platformOptions := options
platformOptions.SystemContext = &platformContext
platformOptions.OS = platformContext.OSChoice
platformOptions.Architecture = platformContext.ArchitectureChoice
logPrefix := ""
if len(options.Platforms) > 1 {
logPrefix = "[" + platform.OS + "/" + platform.Arch
if platform.Variant != "" {
logPrefix += "/" + platform.Variant
}
logPrefix += "] "
logPrefix = "[" + platforms.Format(platformSpec) + "] "
}
builds.Go(func() error {
thisID, thisRef, err := buildDockerfilesOnce(ctx, store, logger, logPrefix, platformOptions, paths, files)
@ -217,12 +241,8 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B
id, ref = thisID, thisRef
instancesLock.Lock()
instances = append(instances, instance{
ID: thisID,
Platform: v1.Platform{
OS: platformContext.OSChoice,
Architecture: platformContext.ArchitectureChoice,
Variant: platformContext.VariantChoice,
},
ID: thisID,
Platform: platformSpec,
})
instancesLock.Unlock()
return nil
@ -318,6 +338,35 @@ func buildDockerfilesOnce(ctx context.Context, store storage.Store, logger *logr
}
mainNode.Children = append(mainNode.Children, additionalNode.Children...)
}
// Check if any modifications done to labels
// add them to node-layer so it becomes regular
// layer.
// Reason: Docker adds label modification as
// last step which can be processed as regular
// steps and if no modification is done to layers
// its easier to re-use cached layers.
if len(options.Labels) > 0 {
for _, labelSpec := range options.Labels {
label := strings.SplitN(labelSpec, "=", 2)
labelLine := ""
key := label[0]
value := ""
if len(label) > 1 {
value = label[1]
}
// check from only empty key since docker supports empty value
if key != "" {
labelLine = fmt.Sprintf("LABEL %q=%q\n", key, value)
additionalNode, err := imagebuilder.ParseDockerfile(strings.NewReader(labelLine))
if err != nil {
return "", nil, errors.Wrapf(err, "error while adding additional LABEL steps")
}
mainNode.Children = append(mainNode.Children, additionalNode.Children...)
}
}
}
exec, err := newExecutor(logger, logPrefix, store, options, mainNode)
if err != nil {
return "", nil, errors.Wrapf(err, "error creating build executor")
@ -400,3 +449,194 @@ func preprocessContainerfileContents(logger *logrus.Logger, containerfile string
}
return &stdoutBuffer, nil
}
// platformsForBaseImages resolves the names of base images from the
// dockerfiles, and if they are all valid references to manifest lists, returns
// the list of platforms that are supported by all of the base images.
func platformsForBaseImages(ctx context.Context, logger *logrus.Logger, dockerfilepaths []string, dockerfiles [][]byte, from string, args map[string]string, systemContext *types.SystemContext) ([]struct{ OS, Arch, Variant string }, error) {
baseImages, err := baseImages(dockerfilepaths, dockerfiles, from, args)
if err != nil {
return nil, errors.Wrapf(err, "determining list of base images")
}
logrus.Debugf("unresolved base images: %v", baseImages)
if len(baseImages) == 0 {
return nil, errors.Wrapf(err, "build uses no non-scratch base images")
}
targetPlatforms := make(map[string]struct{})
var platformList []struct{ OS, Arch, Variant string }
for baseImageIndex, baseImage := range baseImages {
resolved, err := shortnames.Resolve(systemContext, baseImage)
if err != nil {
return nil, errors.Wrapf(err, "resolving image name %q", baseImage)
}
var manifestBytes []byte
var manifestType string
for _, candidate := range resolved.PullCandidates {
ref, err := docker.NewReference(candidate.Value)
if err != nil {
logrus.Debugf("parsing image reference %q: %v", candidate.Value.String(), err)
continue
}
src, err := ref.NewImageSource(ctx, systemContext)
if err != nil {
logrus.Debugf("preparing to read image manifest for %q: %v", baseImage, err)
continue
}
candidateBytes, candidateType, err := src.GetManifest(ctx, nil)
_ = src.Close()
if err != nil {
logrus.Debugf("reading image manifest for %q: %v", baseImage, err)
continue
}
if !manifest.MIMETypeIsMultiImage(candidateType) {
logrus.Debugf("base image %q is not a reference to a manifest list: %v", baseImage, err)
continue
}
if err := candidate.Record(); err != nil {
logrus.Debugf("error recording name %q for base image %q: %v", candidate.Value.String(), baseImage, err)
continue
}
baseImage = candidate.Value.String()
manifestBytes, manifestType = candidateBytes, candidateType
break
}
if len(manifestBytes) == 0 {
if len(resolved.PullCandidates) > 0 {
return nil, errors.Errorf("base image name %q didn't resolve to a manifest list", baseImage)
}
return nil, errors.Errorf("base image name %q didn't resolve to anything", baseImage)
}
if manifestType != v1.MediaTypeImageIndex {
list, err := manifest.ListFromBlob(manifestBytes, manifestType)
if err != nil {
return nil, errors.Wrapf(err, "parsing manifest list from base image %q", baseImage)
}
list, err = list.ConvertToMIMEType(v1.MediaTypeImageIndex)
if err != nil {
return nil, errors.Wrapf(err, "converting manifest list from base image %q to v2s2 list", baseImage)
}
manifestBytes, err = list.Serialize()
if err != nil {
return nil, errors.Wrapf(err, "encoding converted v2s2 manifest list for base image %q", baseImage)
}
}
index, err := manifest.OCI1IndexFromManifest(manifestBytes)
if err != nil {
return nil, errors.Wrapf(err, "decoding manifest list for base image %q", baseImage)
}
if baseImageIndex == 0 {
// populate the list with the first image's normalized platforms
for _, instance := range index.Manifests {
if instance.Platform == nil {
continue
}
platform := platforms.Normalize(*instance.Platform)
targetPlatforms[platforms.Format(platform)] = struct{}{}
logger.Debugf("image %q supports %q", baseImage, platforms.Format(platform))
}
} else {
// prune the list of any normalized platforms this base image doesn't support
imagePlatforms := make(map[string]struct{})
for _, instance := range index.Manifests {
if instance.Platform == nil {
continue
}
platform := platforms.Normalize(*instance.Platform)
imagePlatforms[platforms.Format(platform)] = struct{}{}
logger.Debugf("image %q supports %q", baseImage, platforms.Format(platform))
}
var removed []string
for platform := range targetPlatforms {
if _, present := imagePlatforms[platform]; !present {
removed = append(removed, platform)
logger.Debugf("image %q does not support %q", baseImage, platform)
}
}
for _, remove := range removed {
delete(targetPlatforms, remove)
}
}
if baseImageIndex == len(baseImages)-1 && len(targetPlatforms) > 0 {
// extract the list
for platform := range targetPlatforms {
platform, err := platforms.Parse(platform)
if err != nil {
return nil, errors.Wrapf(err, "parsing platform double/triple %q", platform)
}
platformList = append(platformList, struct{ OS, Arch, Variant string }{
OS: platform.OS,
Arch: platform.Architecture,
Variant: platform.Variant,
})
logger.Debugf("base images all support %q", platform)
}
}
}
if len(platformList) == 0 {
return nil, errors.New("base images have no platforms in common")
}
return platformList, nil
}
// baseImages parses the dockerfilecontents, possibly replacing the first
// stage's base image with FROM, and returns the list of base images as
// provided. Each entry in the dockerfilenames slice corresponds to a slice in
// dockerfilecontents.
func baseImages(dockerfilenames []string, dockerfilecontents [][]byte, from string, args map[string]string) ([]string, error) {
mainNode, err := imagebuilder.ParseDockerfile(bytes.NewReader(dockerfilecontents[0]))
if err != nil {
return nil, errors.Wrapf(err, "error parsing main Dockerfile: %s", dockerfilenames[0])
}
for i, d := range dockerfilecontents[1:] {
additionalNode, err := imagebuilder.ParseDockerfile(bytes.NewReader(d))
if err != nil {
return nil, errors.Wrapf(err, "error parsing additional Dockerfile %s", dockerfilenames[i])
}
mainNode.Children = append(mainNode.Children, additionalNode.Children...)
}
b := imagebuilder.NewBuilder(args)
defaultContainerConfig, err := config.Default()
if err != nil {
return nil, errors.Wrapf(err, "failed to get container config")
}
b.Env = defaultContainerConfig.GetDefaultEnv()
stages, err := imagebuilder.NewStages(mainNode, b)
if err != nil {
return nil, errors.Wrap(err, "error reading multiple stages")
}
var baseImages []string
nicknames := make(map[string]bool)
for stageIndex, stage := range stages {
node := stage.Node // first line
for node != nil { // each line
for _, child := range node.Children { // tokens on this line, though we only care about the first
switch strings.ToUpper(child.Value) { // first token - instruction
case "FROM":
if child.Next != nil { // second token on this line
// If we have a fromOverride, replace the value of
// image name for the first FROM in the Containerfile.
if from != "" {
child.Next.Value = from
from = ""
}
base := child.Next.Value
if base != "scratch" && !nicknames[base] {
// TODO: this didn't undergo variable and arg
// expansion, so if the AS clause in another
// FROM instruction uses argument values,
// we might not record the right value here.
baseImages = append(baseImages, base)
}
}
}
}
node = node.Next // next line
}
if stage.Name != strconv.Itoa(stageIndex) {
nicknames[stage.Name] = true
}
}
return baseImages, nil
}

View File

@ -18,6 +18,7 @@ import (
"github.com/containers/buildah/pkg/sshagent"
"github.com/containers/buildah/util"
"github.com/containers/common/libimage"
nettypes "github.com/containers/common/libnetwork/types"
"github.com/containers/common/pkg/config"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/manifest"
@ -57,6 +58,7 @@ var builtinAllowedBuildArgs = map[string]bool{
// interface. It coordinates the entire build by using one or more
// StageExecutors to handle each stage of the build.
type Executor struct {
containerSuffix string
logger *logrus.Logger
stages map[string]*StageExecutor
store storage.Store
@ -84,47 +86,53 @@ type Executor struct {
configureNetwork define.NetworkConfigurationPolicy
cniPluginPath string
cniConfigDir string
idmappingOptions *define.IDMappingOptions
commonBuildOptions *define.CommonBuildOptions
defaultMountsFilePath string
iidfile string
squash bool
labels []string
annotations []string
layers bool
useCache bool
removeIntermediateCtrs bool
forceRmIntermediateCtrs bool
imageMap map[string]string // Used to map images that we create to handle the AS construct.
containerMap map[string]*buildah.Builder // Used to map from image names to only-created-for-the-rootfs containers.
baseMap map[string]bool // Holds the names of every base image, as given.
rootfsMap map[string]bool // Holds the names of every stage whose rootfs is referenced in a COPY or ADD instruction.
blobDirectory string
excludes []string
unusedArgs map[string]struct{}
capabilities []string
devices define.ContainerDevices
signBy string
architecture string
timestamp *time.Time
os string
maxPullPushRetries int
retryPullPushDelay time.Duration
ociDecryptConfig *encconfig.DecryptConfig
lastError error
terminatedStage map[string]error
stagesLock sync.Mutex
stagesSemaphore *semaphore.Weighted
jobs int
logRusage bool
rusageLogFile io.Writer
imageInfoLock sync.Mutex
imageInfoCache map[string]imageTypeAndHistoryAndDiffIDs
fromOverride string
manifest string
secrets map[string]string
sshsources map[string]*sshagent.Source
logPrefix string
// NetworkInterface is the libnetwork network interface used to setup CNI or netavark networks.
networkInterface nettypes.ContainerNetwork
idmappingOptions *define.IDMappingOptions
commonBuildOptions *define.CommonBuildOptions
defaultMountsFilePath string
iidfile string
squash bool
labels []string
annotations []string
layers bool
useCache bool
removeIntermediateCtrs bool
forceRmIntermediateCtrs bool
imageMap map[string]string // Used to map images that we create to handle the AS construct.
containerMap map[string]*buildah.Builder // Used to map from image names to only-created-for-the-rootfs containers.
baseMap map[string]bool // Holds the names of every base image, as given.
rootfsMap map[string]bool // Holds the names of every stage whose rootfs is referenced in a COPY or ADD instruction.
blobDirectory string
excludes []string
ignoreFile string
unusedArgs map[string]struct{}
capabilities []string
devices define.ContainerDevices
signBy string
architecture string
timestamp *time.Time
os string
maxPullPushRetries int
retryPullPushDelay time.Duration
ociDecryptConfig *encconfig.DecryptConfig
lastError error
terminatedStage map[string]error
stagesLock sync.Mutex
stagesSemaphore *semaphore.Weighted
jobs int
logRusage bool
rusageLogFile io.Writer
imageInfoLock sync.Mutex
imageInfoCache map[string]imageTypeAndHistoryAndDiffIDs
fromOverride string
manifest string
secrets map[string]define.Secret
sshsources map[string]*sshagent.Source
logPrefix string
unsetEnvs []string
processLabel string // Shares processLabel of first stage container with containers of other stages in same build
mountLabel string // Shares mountLabel of first stage container with containers of other stages in same build
}
type imageTypeAndHistoryAndDiffIDs struct {
@ -143,7 +151,7 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o
excludes := options.Excludes
if len(excludes) == 0 {
excludes, err = imagebuilder.ParseDockerignore(options.ContextDirectory)
excludes, options.IgnoreFile, err = parse.ContainerIgnoreFile(options.ContextDirectory, options.IgnoreFile)
if err != nil {
return nil, err
}
@ -203,11 +211,13 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o
}
exec := Executor{
containerSuffix: options.ContainerSuffix,
logger: logger,
stages: make(map[string]*StageExecutor),
store: store,
contextDir: options.ContextDirectory,
excludes: excludes,
ignoreFile: options.IgnoreFile,
pullPolicy: options.PullPolicy,
registry: options.Registry,
ignoreUnrecognizedInstructions: options.IgnoreUnrecognizedInstructions,
@ -231,6 +241,7 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o
configureNetwork: options.ConfigureNetwork,
cniPluginPath: options.CNIPluginPath,
cniConfigDir: options.CNIConfigDir,
networkInterface: options.NetworkInterface,
idmappingOptions: options.IDMappingOptions,
commonBuildOptions: options.CommonBuildOpts,
defaultMountsFilePath: options.DefaultMountsFilePath,
@ -268,6 +279,7 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o
secrets: secrets,
sshsources: sshsources,
logPrefix: logPrefix,
unsetEnvs: options.UnsetEnvs,
}
if exec.err == nil {
exec.err = os.Stderr
@ -353,7 +365,7 @@ func (b *Executor) resolveNameToImageRef(output string) (types.ImageReference, e
func (b *Executor) waitForStage(ctx context.Context, name string, stages imagebuilder.Stages) (bool, error) {
found := false
for _, otherStage := range stages {
if otherStage.Name == name || fmt.Sprintf("%d", otherStage.Position) == name {
if otherStage.Name == name || strconv.Itoa(otherStage.Position) == name {
found = true
break
}
@ -521,9 +533,9 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image
lastErr = err
}
}
cleanupStages = nil
b.stagesLock.Unlock()
cleanupStages = nil
// Clean up any builders that we used to get data from images.
for _, builder := range b.containerMap {
if err := builder.Delete(); err != nil {
@ -628,7 +640,7 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image
Error error
}
ch := make(chan Result)
ch := make(chan Result, len(stages))
if b.stagesSemaphore == nil {
jobs := int64(b.jobs)
@ -645,19 +657,43 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image
wg.Add(len(stages))
go func() {
cancel := false
for stageIndex := range stages {
index := stageIndex
// Acquire the semaphore before creating the goroutine so we are sure they
// run in the specified order.
if err := b.stagesSemaphore.Acquire(ctx, 1); err != nil {
cancel = true
b.lastError = err
return
ch <- Result{
Index: index,
Error: err,
}
wg.Done()
continue
}
b.stagesLock.Lock()
cleanupStages := cleanupStages
b.stagesLock.Unlock()
go func() {
defer b.stagesSemaphore.Release(1)
defer wg.Done()
if cancel || cleanupStages == nil {
var err error
if stages[index].Name != strconv.Itoa(index) {
err = errors.Errorf("not building stage %d: build canceled", index)
} else {
err = errors.Errorf("not building stage %d (%s): build canceled", index, stages[index].Name)
}
ch <- Result{
Index: index,
Error: err,
}
return
}
stageID, stageRef, stageErr := b.buildStage(ctx, cleanupStages, stages, index)
if stageErr != nil {
cancel = true
ch <- Result{
Index: index,
Error: stageErr,
@ -684,7 +720,7 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image
b.stagesLock.Lock()
b.terminatedStage[stage.Name] = r.Error
b.terminatedStage[fmt.Sprintf("%d", stage.Position)] = r.Error
b.terminatedStage[strconv.Itoa(stage.Position)] = r.Error
if r.Error != nil {
b.stagesLock.Unlock()

View File

@ -15,6 +15,7 @@ import (
"github.com/containers/buildah/copier"
"github.com/containers/buildah/define"
buildahdocker "github.com/containers/buildah/docker"
"github.com/containers/buildah/internal"
"github.com/containers/buildah/pkg/rusage"
"github.com/containers/buildah/util"
cp "github.com/containers/image/v5/copy"
@ -401,6 +402,7 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err
PreserveOwnership: preserveOwnership,
ContextDir: contextDir,
Excludes: copyExcludes,
IgnoreFile: s.executor.ignoreFile,
IDMappingOptions: idMappingOptions,
StripSetuidBit: stripSetuid,
StripSetgidBit: stripSetgid,
@ -412,10 +414,67 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err
return nil
}
// Returns a map of StageName/ImageName:internal.StageMountDetails for RunOpts if any --mount with from is provided
// Stage can automatically cleanup this mounts when a stage is removed
// check if RUN contains `--mount` with `from`. If yes pre-mount images or stages from executor for Run.
// stages mounted here will we used be Run().
func (s *StageExecutor) runStageMountPoints(mountList []string) (map[string]internal.StageMountDetails, error) {
stageMountPoints := make(map[string]internal.StageMountDetails)
for _, flag := range mountList {
if strings.Contains(flag, "from") {
arr := strings.SplitN(flag, ",", 2)
if len(arr) < 2 {
return nil, errors.Errorf("Invalid --mount command: %s", flag)
}
tokens := strings.Split(arr[1], ",")
for _, val := range tokens {
kv := strings.SplitN(val, "=", 2)
switch kv[0] {
case "from":
if len(kv) == 1 {
return nil, errors.Errorf("unable to resolve argument for `from=`: bad argument")
}
if kv[1] == "" {
return nil, errors.Errorf("unable to resolve argument for `from=`: from points to an empty value")
}
from, fromErr := imagebuilder.ProcessWord(kv[1], s.stage.Builder.Arguments())
if fromErr != nil {
return nil, errors.Wrapf(fromErr, "unable to resolve argument %q", kv[1])
}
// If the source's name corresponds to the
// result of an earlier stage, wait for that
// stage to finish being built.
if isStage, err := s.executor.waitForStage(s.ctx, from, s.stages[:s.index]); isStage && err != nil {
return nil, err
}
if otherStage, ok := s.executor.stages[from]; ok && otherStage.index < s.index {
stageMountPoints[from] = internal.StageMountDetails{IsStage: true, MountPoint: otherStage.mountPoint}
break
} else {
mountPoint, err := s.getImageRootfs(s.ctx, from)
if err != nil {
return nil, errors.Errorf("%s from=%s: no stage or image found with that name", flag, from)
}
stageMountPoints[from] = internal.StageMountDetails{IsStage: false, MountPoint: mountPoint}
break
}
default:
continue
}
}
}
}
return stageMountPoints, nil
}
// Run executes a RUN instruction using the stage's current working container
// as a root directory.
func (s *StageExecutor) Run(run imagebuilder.Run, config docker.Config) error {
logrus.Debugf("RUN %#v, %#v", run, config)
stageMountPoints, err := s.runStageMountPoints(run.Mounts)
if err != nil {
return err
}
if s.builder == nil {
return errors.Errorf("no build container available")
}
@ -439,6 +498,7 @@ func (s *StageExecutor) Run(run imagebuilder.Run, config docker.Config) error {
User: config.User,
WorkingDir: config.WorkingDir,
Entrypoint: config.Entrypoint,
ContextDir: s.executor.contextDir,
Cmd: config.Cmd,
Stdin: stdin,
Stdout: s.executor.out,
@ -449,6 +509,8 @@ func (s *StageExecutor) Run(run imagebuilder.Run, config docker.Config) error {
Secrets: s.executor.secrets,
SSHSources: s.executor.sshsources,
RunMounts: run.Mounts,
StageMountPoints: stageMountPoints,
SystemContext: s.executor.systemContext,
}
if config.NetworkDisabled {
options.ConfigureNetwork = buildah.NetworkDisabled
@ -537,6 +599,7 @@ func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBCo
Args: ib.Args,
FromImage: from,
PullPolicy: pullPolicy,
ContainerSuffix: s.executor.containerSuffix,
Registry: s.executor.registry,
BlobDirectory: s.executor.blobDirectory,
SignaturePolicyPath: s.executor.signaturePolicyPath,
@ -547,6 +610,7 @@ func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBCo
ConfigureNetwork: s.executor.configureNetwork,
CNIPluginPath: s.executor.cniPluginPath,
CNIConfigDir: s.executor.cniConfigDir,
NetworkInterface: s.executor.networkInterface,
IDMappingOptions: s.executor.idmappingOptions,
CommonBuildOpts: s.executor.commonBuildOptions,
DefaultMountsFilePath: s.executor.defaultMountsFilePath,
@ -556,6 +620,9 @@ func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBCo
MaxPullRetries: s.executor.maxPullPushRetries,
PullRetryDelay: s.executor.retryPullPushDelay,
OciDecryptConfig: s.executor.ociDecryptConfig,
Logger: s.executor.logger,
ProcessLabel: s.executor.processLabel,
MountLabel: s.executor.mountLabel,
}
builder, err = buildah.NewBuilder(ctx, s.executor.store, builderOptions)
@ -563,6 +630,16 @@ func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBCo
return nil, errors.Wrapf(err, "error creating build container")
}
// If executor's ProcessLabel and MountLabel is empty means this is the first stage
// Make sure we share first stage's ProcessLabel and MountLabel with all other subsequent stages
// Doing this will ensure and one stage in same build can mount another stage even if `selinux`
// is enabled.
if s.executor.mountLabel == "" && s.executor.processLabel == "" {
s.executor.mountLabel = builder.MountLabel
s.executor.processLabel = builder.ProcessLabel
}
if initializeIBConfig {
volumes := map[string]struct{}{}
for _, v := range builder.Volumes() {
@ -673,8 +750,8 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
checkForLayers := s.executor.layers && s.executor.useCache
moreStages := s.index < len(s.stages)-1
lastStage := !moreStages
imageIsUsedLater := moreStages && (s.executor.baseMap[stage.Name] || s.executor.baseMap[fmt.Sprintf("%d", stage.Position)])
rootfsIsUsedLater := moreStages && (s.executor.rootfsMap[stage.Name] || s.executor.rootfsMap[fmt.Sprintf("%d", stage.Position)])
imageIsUsedLater := moreStages && (s.executor.baseMap[stage.Name] || s.executor.baseMap[strconv.Itoa(stage.Position)])
rootfsIsUsedLater := moreStages && (s.executor.rootfsMap[stage.Name] || s.executor.rootfsMap[strconv.Itoa(stage.Position)])
// If the base image's name corresponds to the result of an earlier
// stage, make sure that stage has finished building an image, and
@ -971,7 +1048,13 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
}
}
if cacheID != "" && !(s.executor.squash && lastInstruction) {
// We want to save history for other layers during a squashed build.
// Toggle flag allows executor to treat other instruction and layers
// as regular builds and only perform squashing at last
squashToggle := false
// Note: If the build has squash, we must try to re-use as many layers as possible if cache is found.
// So only perform commit if its the lastInstruction of lastStage.
if cacheID != "" {
logCacheHit(cacheID)
// A suitable cached image was found, so we can just
// reuse it. If we need to add a name to the resulting
@ -985,6 +1068,13 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
}
}
} else {
if s.executor.squash {
// We want to save history for other layers during a squashed build.
// squashToggle flag allows executor to treat other instruction and layers
// as regular builds and only perform squashing at last
s.executor.squash = false
squashToggle = true
}
// We're not going to find any more cache hits, so we
// can stop looking for them.
checkForLayers = false
@ -996,6 +1086,17 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
return "", nil, errors.Wrapf(err, "error committing container for step %+v", *step)
}
}
// Perform final squash for this build as we are one the,
// last instruction of last stage
if (s.executor.squash || squashToggle) && lastInstruction && lastStage {
s.executor.squash = true
imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), !s.stepRequiresLayer(step), commitName)
if err != nil {
return "", nil, errors.Wrapf(err, "error committing final squash step %+v", *step)
}
}
logImageID(imgID)
// Update our working container to be based off of the cached
@ -1110,10 +1211,10 @@ func (s *StageExecutor) getCreatedBy(node *parser.Node, addedContentSummary stri
}
switch strings.ToUpper(node.Value) {
case "ARG":
buildArgs := s.getBuildArgs()
buildArgs := s.getBuildArgsKey()
return "/bin/sh -c #(nop) ARG " + buildArgs
case "RUN":
buildArgs := s.getBuildArgs()
buildArgs := s.getBuildArgsResolvedForRun()
if buildArgs != "" {
return "|" + strconv.Itoa(len(strings.Split(buildArgs, " "))) + " " + buildArgs + " /bin/sh -c " + node.Original[4:]
}
@ -1131,10 +1232,47 @@ func (s *StageExecutor) getCreatedBy(node *parser.Node, addedContentSummary stri
// getBuildArgs returns a string of the build-args specified during the build process
// it excludes any build-args that were not used in the build process
func (s *StageExecutor) getBuildArgs() string {
buildArgs := s.stage.Builder.Arguments()
sort.Strings(buildArgs)
return strings.Join(buildArgs, " ")
// values for args are overridden by the values specified using ENV.
// Reason: Values from ENV will always override values specified arg.
func (s *StageExecutor) getBuildArgsResolvedForRun() string {
var envs []string
configuredEnvs := make(map[string]string)
dockerConfig := s.stage.Builder.Config()
for _, env := range dockerConfig.Env {
splitv := strings.SplitN(env, "=", 2)
if len(splitv) == 2 {
configuredEnvs[splitv[0]] = splitv[1]
}
}
for key, value := range s.stage.Builder.Args {
if _, ok := s.stage.Builder.AllowedArgs[key]; ok {
// if value was in image it will be given higher priority
// so please embed that into build history
_, inImage := configuredEnvs[key]
if inImage {
envs = append(envs, fmt.Sprintf("%s=%s", key, configuredEnvs[key]))
} else {
envs = append(envs, fmt.Sprintf("%s=%s", key, value))
}
}
}
sort.Strings(envs)
return strings.Join(envs, " ")
}
// getBuildArgs key returns set args are key which were specified during the build process
// following function will be exclusively used by build history
func (s *StageExecutor) getBuildArgsKey() string {
var envs []string
for key := range s.stage.Builder.Args {
if _, ok := s.stage.Builder.AllowedArgs[key]; ok {
envs = append(envs, key)
}
}
sort.Strings(envs)
return strings.Join(envs, " ")
}
// tagExistingImage adds names to an image already in the store
@ -1364,6 +1502,7 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer
RetryDelay: s.executor.retryPullPushDelay,
HistoryTimestamp: s.executor.timestamp,
Manifest: s.executor.manifest,
UnsetEnvs: s.executor.unsetEnvs,
}
imgID, _, manifestDigest, err := s.builder.Commit(ctx, imageRef, options)
if err != nil {

View File

@ -83,6 +83,11 @@ func importBuilderDataFromImage(ctx context.Context, store storage.Store, system
return nil, err
}
netInt, err := getNetworkInterface(store, "", "")
if err != nil {
return nil, err
}
builder := &Builder{
store: store,
Type: containerType,
@ -100,6 +105,7 @@ func importBuilderDataFromImage(ctx context.Context, store storage.Store, system
UIDMap: uidmap,
GIDMap: gidmap,
},
NetworkInterface: netInt,
}
if err := builder.initConfig(ctx, image, systemContext); err != nil {

View File

@ -11,10 +11,12 @@ import (
"strings"
"time"
"github.com/containerd/containerd/platforms"
"github.com/containers/buildah/util"
"github.com/containers/storage"
"github.com/containers/storage/pkg/system"
"github.com/containers/storage/pkg/unshare"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@ -43,8 +45,10 @@ func Info(store storage.Store) ([]InfoData, error) {
func hostInfo() map[string]interface{} {
info := map[string]interface{}{}
info["os"] = runtime.GOOS
info["arch"] = runtime.GOARCH
ps := platforms.Normalize(v1.Platform{OS: runtime.GOOS, Architecture: runtime.GOARCH})
info["os"] = ps.OS
info["arch"] = ps.Architecture
info["variant"] = ps.Variant
info["cpus"] = runtime.NumCPU()
info["rootless"] = unshare.IsRootless()

View File

@ -0,0 +1,408 @@
package parse
import (
"context"
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/containers/buildah/internal"
internalUtil "github.com/containers/buildah/internal/util"
"github.com/containers/common/pkg/parse"
"github.com/containers/image/v5/types"
"github.com/containers/storage"
"github.com/containers/storage/pkg/idtools"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
)
const (
// TypeBind is the type for mounting host dir
TypeBind = "bind"
// TypeTmpfs is the type for mounting tmpfs
TypeTmpfs = "tmpfs"
// TypeCache is the type for mounting a common persistent cache from host
TypeCache = "cache"
// mount=type=cache must create a persistent directory on host so its available for all consecutive builds.
// Lifecycle of following directory will be inherited from how host machine treats temporary directory
BuildahCacheDir = "buildah-cache"
)
var (
errBadMntOption = errors.New("invalid mount option")
errBadOptionArg = errors.New("must provide an argument for option")
errBadVolDest = errors.New("must set volume destination")
errBadVolSrc = errors.New("must set volume source")
)
// GetBindMount parses a single bind mount entry from the --mount flag.
// Returns specifiedMount and a string which contains name of image that we mounted otherwise its empty.
// Caller is expected to perform unmount of any mounted images
func GetBindMount(ctx *types.SystemContext, args []string, contextDir string, store storage.Store, imageMountLabel string, additionalMountPoints map[string]internal.StageMountDetails) (specs.Mount, string, error) {
newMount := specs.Mount{
Type: TypeBind,
}
mountReadability := false
setDest := false
bindNonRecursive := false
fromImage := ""
for _, val := range args {
kv := strings.SplitN(val, "=", 2)
switch kv[0] {
case "bind-nonrecursive":
newMount.Options = append(newMount.Options, "bind")
bindNonRecursive = true
case "ro", "nosuid", "nodev", "noexec":
// TODO: detect duplication of these options.
// (Is this necessary?)
newMount.Options = append(newMount.Options, kv[0])
mountReadability = true
case "rw", "readwrite":
newMount.Options = append(newMount.Options, "rw")
mountReadability = true
case "readonly":
// Alias for "ro"
newMount.Options = append(newMount.Options, "ro")
mountReadability = true
case "shared", "rshared", "private", "rprivate", "slave", "rslave", "Z", "z", "U":
newMount.Options = append(newMount.Options, kv[0])
case "from":
if len(kv) == 1 {
return newMount, "", errors.Wrapf(errBadOptionArg, kv[0])
}
fromImage = kv[1]
case "bind-propagation":
if len(kv) == 1 {
return newMount, "", errors.Wrapf(errBadOptionArg, kv[0])
}
newMount.Options = append(newMount.Options, kv[1])
case "src", "source":
if len(kv) == 1 {
return newMount, "", errors.Wrapf(errBadOptionArg, kv[0])
}
newMount.Source = kv[1]
case "target", "dst", "destination":
if len(kv) == 1 {
return newMount, "", errors.Wrapf(errBadOptionArg, kv[0])
}
if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil {
return newMount, "", err
}
newMount.Destination = kv[1]
setDest = true
case "consistency":
// Option for OS X only, has no meaning on other platforms
// and can thus be safely ignored.
// See also the handling of the equivalent "delegated" and "cached" in ValidateVolumeOpts
default:
return newMount, "", errors.Wrapf(errBadMntOption, kv[0])
}
}
// default mount readability is always readonly
if !mountReadability {
newMount.Options = append(newMount.Options, "ro")
}
// Following variable ensures that we return imagename only if we did additional mount
isImageMounted := false
if fromImage != "" {
mountPoint := ""
if additionalMountPoints != nil {
if val, ok := additionalMountPoints[fromImage]; ok {
mountPoint = val.MountPoint
}
}
// if mountPoint of image was not found in additionalMap
// or additionalMap was nil, try mounting image
if mountPoint == "" {
image, err := internalUtil.LookupImage(ctx, store, fromImage)
if err != nil {
return newMount, "", err
}
mountPoint, err = image.Mount(context.Background(), nil, imageMountLabel)
if err != nil {
return newMount, "", err
}
isImageMounted = true
}
contextDir = mountPoint
}
// buildkit parity: default bind option must be `rbind`
// unless specified
if !bindNonRecursive {
newMount.Options = append(newMount.Options, "rbind")
}
if !setDest {
return newMount, fromImage, errBadVolDest
}
// buildkit parity: support absolute path for sources from current build context
if contextDir != "" {
// path should be /contextDir/specified path
newMount.Source = filepath.Join(contextDir, filepath.Clean(string(filepath.Separator)+newMount.Source))
} else {
// looks like its coming from `build run --mount=type=bind` allow using absolute path
// error out if no source is set
if newMount.Source == "" {
return newMount, "", errBadVolSrc
}
if err := parse.ValidateVolumeHostDir(newMount.Source); err != nil {
return newMount, "", err
}
}
opts, err := parse.ValidateVolumeOpts(newMount.Options)
if err != nil {
return newMount, fromImage, err
}
newMount.Options = opts
if !isImageMounted {
// we don't want any cleanups if image was not mounted explicitly
// so dont return anything
fromImage = ""
}
return newMount, fromImage, nil
}
// GetCacheMount parses a single cache mount entry from the --mount flag.
func GetCacheMount(args []string, store storage.Store, imageMountLabel string, additionalMountPoints map[string]internal.StageMountDetails) (specs.Mount, error) {
var err error
var mode uint64
var (
setDest bool
setShared bool
setReadOnly bool
)
fromStage := ""
newMount := specs.Mount{
Type: TypeBind,
}
// if id is set a new subdirectory with `id` will be created under /host-temp/buildah-build-cache/id
id := ""
//buidkit parity: cache directory defaults to 755
mode = 0o755
//buidkit parity: cache directory defaults to uid 0 if not specified
uid := 0
//buidkit parity: cache directory defaults to gid 0 if not specified
gid := 0
for _, val := range args {
kv := strings.SplitN(val, "=", 2)
switch kv[0] {
case "nosuid", "nodev", "noexec":
// TODO: detect duplication of these options.
// (Is this necessary?)
newMount.Options = append(newMount.Options, kv[0])
case "rw", "readwrite":
newMount.Options = append(newMount.Options, "rw")
case "readonly", "ro":
// Alias for "ro"
newMount.Options = append(newMount.Options, "ro")
setReadOnly = true
case "shared", "rshared", "private", "rprivate", "slave", "rslave", "Z", "z", "U":
newMount.Options = append(newMount.Options, kv[0])
setShared = true
case "bind-propagation":
if len(kv) == 1 {
return newMount, errors.Wrapf(errBadOptionArg, kv[0])
}
newMount.Options = append(newMount.Options, kv[1])
case "id":
if len(kv) == 1 {
return newMount, errors.Wrapf(errBadOptionArg, kv[0])
}
id = kv[1]
case "from":
if len(kv) == 1 {
return newMount, errors.Wrapf(errBadOptionArg, kv[0])
}
fromStage = kv[1]
case "target", "dst", "destination":
if len(kv) == 1 {
return newMount, errors.Wrapf(errBadOptionArg, kv[0])
}
if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil {
return newMount, err
}
newMount.Destination = kv[1]
setDest = true
case "src", "source":
if len(kv) == 1 {
return newMount, errors.Wrapf(errBadOptionArg, kv[0])
}
newMount.Source = kv[1]
case "mode":
if len(kv) == 1 {
return newMount, errors.Wrapf(errBadOptionArg, kv[0])
}
mode, err = strconv.ParseUint(kv[1], 8, 32)
if err != nil {
return newMount, errors.Wrapf(err, "Unable to parse cache mode")
}
case "uid":
if len(kv) == 1 {
return newMount, errors.Wrapf(errBadOptionArg, kv[0])
}
uid, err = strconv.Atoi(kv[1])
if err != nil {
return newMount, errors.Wrapf(err, "Unable to parse cache uid")
}
case "gid":
if len(kv) == 1 {
return newMount, errors.Wrapf(errBadOptionArg, kv[0])
}
gid, err = strconv.Atoi(kv[1])
if err != nil {
return newMount, errors.Wrapf(err, "Unable to parse cache gid")
}
default:
return newMount, errors.Wrapf(errBadMntOption, kv[0])
}
}
if !setDest {
return newMount, errBadVolDest
}
if fromStage != "" {
// do not create cache on host
// instead use read-only mounted stage as cache
mountPoint := ""
if additionalMountPoints != nil {
if val, ok := additionalMountPoints[fromStage]; ok {
if val.IsStage {
mountPoint = val.MountPoint
}
}
}
// Cache does not supports using image so if not stage found
// return with error
if mountPoint == "" {
return newMount, fmt.Errorf("no stage found with name %s", fromStage)
}
// path should be /contextDir/specified path
newMount.Source = filepath.Join(mountPoint, filepath.Clean(string(filepath.Separator)+newMount.Source))
} else {
// we need to create cache on host if no image is being used
// since type is cache and cache can be reused by consecutive builds
// create a common cache directory, which persists on hosts within temp lifecycle
// add subdirectory if specified
// cache parent directory
cacheParent := filepath.Join(getTempDir(), BuildahCacheDir)
// create cache on host if not present
err = os.MkdirAll(cacheParent, os.FileMode(0755))
if err != nil {
return newMount, errors.Wrapf(err, "Unable to create build cache directory")
}
if id != "" {
newMount.Source = filepath.Join(cacheParent, filepath.Clean(id))
} else {
newMount.Source = filepath.Join(cacheParent, filepath.Clean(newMount.Destination))
}
idPair := idtools.IDPair{
UID: uid,
GID: gid,
}
//buildkit parity: change uid and gid if specificed otheriwise keep `0`
err = idtools.MkdirAllAndChownNew(newMount.Source, os.FileMode(mode), idPair)
if err != nil {
return newMount, errors.Wrapf(err, "Unable to change uid,gid of cache directory")
}
}
// buildkit parity: default sharing should be shared
// unless specified
if !setShared {
newMount.Options = append(newMount.Options, "shared")
}
// buildkit parity: cache must writable unless `ro` or `readonly` is configured explicitly
if !setReadOnly {
newMount.Options = append(newMount.Options, "rw")
}
newMount.Options = append(newMount.Options, "bind")
opts, err := parse.ValidateVolumeOpts(newMount.Options)
if err != nil {
return newMount, err
}
newMount.Options = opts
return newMount, nil
}
// GetTmpfsMount parses a single tmpfs mount entry from the --mount flag
func GetTmpfsMount(args []string) (specs.Mount, error) {
newMount := specs.Mount{
Type: TypeTmpfs,
Source: TypeTmpfs,
}
setDest := false
for _, val := range args {
kv := strings.SplitN(val, "=", 2)
switch kv[0] {
case "ro", "nosuid", "nodev", "noexec":
newMount.Options = append(newMount.Options, kv[0])
case "readonly":
// Alias for "ro"
newMount.Options = append(newMount.Options, "ro")
case "tmpcopyup":
//the path that is shadowed by the tmpfs mount is recursively copied up to the tmpfs itself.
newMount.Options = append(newMount.Options, kv[0])
case "tmpfs-mode":
if len(kv) == 1 {
return newMount, errors.Wrapf(errBadOptionArg, kv[0])
}
newMount.Options = append(newMount.Options, fmt.Sprintf("mode=%s", kv[1]))
case "tmpfs-size":
if len(kv) == 1 {
return newMount, errors.Wrapf(errBadOptionArg, kv[0])
}
newMount.Options = append(newMount.Options, fmt.Sprintf("size=%s", kv[1]))
case "src", "source":
return newMount, errors.Errorf("source is not supported with tmpfs mounts")
case "target", "dst", "destination":
if len(kv) == 1 {
return newMount, errors.Wrapf(errBadOptionArg, kv[0])
}
if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil {
return newMount, err
}
newMount.Destination = kv[1]
setDest = true
default:
return newMount, errors.Wrapf(errBadMntOption, kv[0])
}
}
if !setDest {
return newMount, errBadVolDest
}
return newMount, nil
}
/* This is internal function and could be changed at any time */
/* for external usage please refer to buildah/pkg/parse.GetTempDir() */
func getTempDir() string {
if tmpdir, ok := os.LookupEnv("TMPDIR"); ok {
return tmpdir
}
return "/var/tmp"
}

11
vendor/github.com/containers/buildah/internal/types.go generated vendored Normal file
View File

@ -0,0 +1,11 @@
package internal
// Types is internal packages are suspected to change with releases avoid using these outside of buildah
// StageMountDetails holds the Stage/Image mountpoint returned by StageExecutor
// StageExecutor has ability to mount stages/images in current context and
// automatically clean them up.
type StageMountDetails struct {
IsStage bool // tells if mountpoint returned from stage executor is stage or image
MountPoint string // mountpoint of stage/image
}

View File

@ -0,0 +1,24 @@
package util
import (
"github.com/containers/common/libimage"
"github.com/containers/image/v5/types"
"github.com/containers/storage"
)
// LookupImage returns *Image to corresponding imagename or id
func LookupImage(ctx *types.SystemContext, store storage.Store, image string) (*libimage.Image, error) {
systemContext := ctx
if systemContext == nil {
systemContext = &types.SystemContext{}
}
runtime, err := libimage.RuntimeFromStore(store, &libimage.RuntimeOptions{SystemContext: systemContext})
if err != nil {
return nil, err
}
localImage, _, err := runtime.LookupImage(image, nil)
if err != nil {
return nil, err
}
return localImage, nil
}

View File

@ -113,6 +113,17 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions
options.FromImage = ""
}
if options.NetworkInterface == nil {
// create the network interface
// Note: It is important to do this before we pull any images/create containers.
// The default backend detection logic needs an empty store to correctly detect
// that we can use netavark, if the store was not empty it will use CNI to not break existing installs.
options.NetworkInterface, err = getNetworkInterface(store, options.CNIConfigDir, options.CNIPluginPath)
if err != nil {
return nil, err
}
}
systemContext := getSystemContext(store, options.SystemContext, options.SignaturePolicyPath)
if options.FromImage != "" && options.FromImage != "scratch" {
@ -197,6 +208,9 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions
}
name := "working-container"
if options.ContainerSuffix != "" {
name = options.ContainerSuffix
}
if options.Container != "" {
name = options.Container
} else {
@ -216,9 +230,20 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions
conflict := 100
for {
var flags map[string]interface{}
// check if we have predefined ProcessLabel and MountLabel
// this could be true if this is another stage in a build
if options.ProcessLabel != "" && options.MountLabel != "" {
flags = map[string]interface{}{
"ProcessLabel": options.ProcessLabel,
"MountLabel": options.MountLabel,
}
}
coptions := storage.ContainerOptions{
LabelOpts: options.CommonBuildOpts.LabelOpts,
IDMappingOptions: newContainerIDMappingOptions(options.IDMappingOptions),
Flags: flags,
Volatile: true,
}
container, err = store.CreateContainer("", []string{tmpName}, imageID, "", "", &coptions)
@ -283,13 +308,15 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions
UIDMap: uidmap,
GIDMap: gidmap,
},
Capabilities: copyStringSlice(options.Capabilities),
CommonBuildOpts: options.CommonBuildOpts,
TopLayer: topLayer,
Args: options.Args,
Format: options.Format,
TempVolumes: map[string]bool{},
Devices: options.Devices,
Capabilities: copyStringSlice(options.Capabilities),
CommonBuildOpts: options.CommonBuildOpts,
TopLayer: topLayer,
Args: options.Args,
Format: options.Format,
TempVolumes: map[string]bool{},
Devices: options.Devices,
Logger: options.Logger,
NetworkInterface: options.NetworkInterface,
}
if options.Mount {

View File

@ -38,6 +38,7 @@ type UserNSResults struct {
// NameSpaceResults represents the results for Namespace flags
type NameSpaceResults struct {
Cgroup string
IPC string
Network string
CNIConfigDir string
@ -48,6 +49,7 @@ type NameSpaceResults struct {
// BudResults represents the results for Build flags
type BudResults struct {
AllPlatforms bool
Annotation []string
Authfile string
BuildArg []string
@ -86,6 +88,7 @@ type BudResults struct {
Jobs int
LogRusage bool
RusageLogFile string
UnsetEnvs []string
}
// FromAndBugResults represents the results for common flags
@ -141,10 +144,13 @@ func GetUserNSFlagsCompletions() commonComp.FlagCompletions {
// GetNameSpaceFlags returns the common flags for a namespace menu
func GetNameSpaceFlags(flags *NameSpaceResults) pflag.FlagSet {
fs := pflag.FlagSet{}
fs.StringVar(&flags.Cgroup, "cgroupns", "", "'private', or 'host'")
fs.StringVar(&flags.IPC, string(specs.IPCNamespace), "", "'private', `path` of IPC namespace to join, or 'host'")
fs.StringVar(&flags.Network, string(specs.NetworkNamespace), "", "'private', 'none', 'ns:path' of network namespace to join, or 'host'")
fs.StringVar(&flags.CNIConfigDir, "cni-config-dir", define.DefaultCNIConfigDir, "`directory` of CNI configuration files")
fs.StringVar(&flags.CNIPlugInPath, "cni-plugin-path", define.DefaultCNIPluginPath, "`path` of CNI network plugins")
fs.StringVar(&flags.CNIConfigDir, "cni-config-dir", "", "`directory` of CNI configuration files")
_ = fs.MarkHidden("cni-config-dir")
fs.StringVar(&flags.CNIPlugInPath, "cni-plugin-path", "", "`path` of CNI network plugins")
_ = fs.MarkHidden("cni-plugin-path")
fs.StringVar(&flags.PID, string(specs.PIDNamespace), "", "private, `path` of PID namespace to join, or 'host'")
fs.StringVar(&flags.UTS, string(specs.UTSNamespace), "", "private, :`path` of UTS namespace to join, or 'host'")
return fs
@ -153,10 +159,9 @@ func GetNameSpaceFlags(flags *NameSpaceResults) pflag.FlagSet {
// GetNameSpaceFlagsCompletions returns the FlagCompletions for the namespace flags
func GetNameSpaceFlagsCompletions() commonComp.FlagCompletions {
flagCompletion := commonComp.FlagCompletions{}
flagCompletion["cgroupns"] = completion.AutocompleteNamespaceFlag
flagCompletion[string(specs.IPCNamespace)] = completion.AutocompleteNamespaceFlag
flagCompletion[string(specs.NetworkNamespace)] = completion.AutocompleteNamespaceFlag
flagCompletion["cni-config-dir"] = commonComp.AutocompleteDefault
flagCompletion["cni-plugin-path"] = commonComp.AutocompleteDefault
flagCompletion[string(specs.PIDNamespace)] = completion.AutocompleteNamespaceFlag
flagCompletion[string(specs.UTSNamespace)] = completion.AutocompleteNamespaceFlag
return flagCompletion
@ -175,6 +180,7 @@ func GetLayerFlags(flags *LayerResults) pflag.FlagSet {
// GetBudFlags returns common build flags
func GetBudFlags(flags *BudResults) pflag.FlagSet {
fs := pflag.FlagSet{}
fs.BoolVar(&flags.AllPlatforms, "all-platforms", false, "attempt to build for all base image platforms")
fs.String("arch", runtime.GOARCH, "set the ARCH of the image to the provided value instead of the architecture of the host")
fs.StringArrayVar(&flags.Annotation, "annotation", []string{}, "Set metadata for an image (default [])")
fs.StringVar(&flags.Authfile, "authfile", "", "path of the authentication file.")
@ -229,6 +235,7 @@ func GetBudFlags(flags *BudResults) pflag.FlagSet {
fs.Int64Var(&flags.Timestamp, "timestamp", 0, "set created timestamp to the specified epoch seconds to allow for deterministic builds, defaults to current time")
fs.BoolVar(&flags.TLSVerify, "tls-verify", true, "require HTTPS and verify certificates when accessing the registry")
fs.String("variant", "", "override the `variant` of the specified image")
fs.StringSliceVar(&flags.UnsetEnvs, "unsetenv", nil, "Unset environment variable from final image")
return fs
}
@ -261,6 +268,7 @@ func GetBudFlagsCompletions() commonComp.FlagCompletions {
flagCompletion["target"] = commonComp.AutocompleteNone
flagCompletion["timestamp"] = commonComp.AutocompleteNone
flagCompletion["variant"] = commonComp.AutocompleteNone
flagCompletion["unsetenv"] = commonComp.AutocompleteNone
return flagCompletion
}

View File

@ -88,7 +88,7 @@ func mountHelper(contentDir, source, dest string, _, _ int, graphOptions []strin
if err := os.Mkdir(lowerTwo, 0755); err != nil {
return mount, err
}
overlayOptions = fmt.Sprintf("lowerdir=%s:%s,private", source, lowerTwo)
overlayOptions = fmt.Sprintf("lowerdir=%s:%s,private", escapeColon(source), lowerTwo)
} else {
// Read-write overlay mounts want a lower, upper and a work layer.
workDir := filepath.Join(contentDir, "work")
@ -105,8 +105,7 @@ func mountHelper(contentDir, source, dest string, _, _ int, graphOptions []strin
return mount, err
}
}
overlayOptions = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s,private", source, upperDir, workDir)
overlayOptions = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s,private", escapeColon(source), upperDir, workDir)
}
if unshare.IsRootless() {
@ -155,6 +154,11 @@ func mountHelper(contentDir, source, dest string, _, _ int, graphOptions []strin
return mount, nil
}
// Convert ":" to "\:", the path which will be overlay mounted need to be escaped
func escapeColon(source string) string {
return strings.ReplaceAll(source, ":", "\\:")
}
// RemoveTemp removes temporary mountpoint and all content from its parent
// directory
func RemoveTemp(contentDir string) error {

View File

@ -9,22 +9,26 @@ import (
"net"
"os"
"path/filepath"
"runtime"
"strconv"
"strings"
"unicode"
"github.com/containerd/containerd/platforms"
"github.com/containers/buildah/define"
internalParse "github.com/containers/buildah/internal/parse"
"github.com/containers/buildah/pkg/sshagent"
"github.com/containers/common/pkg/parse"
"github.com/containers/image/v5/types"
"github.com/containers/storage"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/unshare"
units "github.com/docker/go-units"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/openshift/imagebuilder"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"golang.org/x/term"
)
@ -37,17 +41,24 @@ const (
TypeBind = "bind"
// TypeTmpfs is the type for mounting tmpfs
TypeTmpfs = "tmpfs"
// TypeCache is the type for mounting a common persistent cache from host
TypeCache = "cache"
// mount=type=cache must create a persistent directory on host so its available for all consecutive builds.
// Lifecycle of following directory will be inherited from how host machine treats temporary directory
BuildahCacheDir = "buildah-cache"
)
var (
errBadMntOption = errors.Errorf("invalid mount option")
errDuplicateDest = errors.Errorf("duplicate mount destination")
optionArgError = errors.Errorf("must provide an argument for option")
noDestError = errors.Errorf("must set volume destination")
)
// CommonBuildOptions parses the build options from the bud cli
func CommonBuildOptions(c *cobra.Command) (*define.CommonBuildOptions, error) {
return CommonBuildOptionsFromFlagSet(c.Flags(), c.Flag)
}
// CommonBuildOptionsFromFlagSet parses the build options from the bud cli
func CommonBuildOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name string) *pflag.Flag) (*define.CommonBuildOptions, error) {
var (
memoryLimit int64
memorySwap int64
@ -55,7 +66,7 @@ func CommonBuildOptions(c *cobra.Command) (*define.CommonBuildOptions, error) {
err error
)
memVal, _ := c.Flags().GetString("memory")
memVal, _ := flags.GetString("memory")
if memVal != "" {
memoryLimit, err = units.RAMInBytes(memVal)
if err != nil {
@ -63,15 +74,19 @@ func CommonBuildOptions(c *cobra.Command) (*define.CommonBuildOptions, error) {
}
}
memSwapValue, _ := c.Flags().GetString("memory-swap")
memSwapValue, _ := flags.GetString("memory-swap")
if memSwapValue != "" {
memorySwap, err = units.RAMInBytes(memSwapValue)
if err != nil {
return nil, errors.Wrapf(err, "invalid value for memory-swap")
if memSwapValue == "-1" {
memorySwap = -1
} else {
memorySwap, err = units.RAMInBytes(memSwapValue)
if err != nil {
return nil, errors.Wrapf(err, "invalid value for memory-swap")
}
}
}
addHost, _ := c.Flags().GetStringSlice("add-host")
addHost, _ := flags.GetStringSlice("add-host")
if len(addHost) > 0 {
for _, host := range addHost {
if err := validateExtraHost(host); err != nil {
@ -82,8 +97,8 @@ func CommonBuildOptions(c *cobra.Command) (*define.CommonBuildOptions, error) {
noDNS = false
dnsServers := []string{}
if c.Flag("dns").Changed {
dnsServers, _ = c.Flags().GetStringSlice("dns")
if flags.Changed("dns") {
dnsServers, _ = flags.GetStringSlice("dns")
for _, server := range dnsServers {
if strings.ToLower(server) == "none" {
noDNS = true
@ -95,62 +110,62 @@ func CommonBuildOptions(c *cobra.Command) (*define.CommonBuildOptions, error) {
}
dnsSearch := []string{}
if c.Flag("dns-search").Changed {
dnsSearch, _ = c.Flags().GetStringSlice("dns-search")
if flags.Changed("dns-search") {
dnsSearch, _ = flags.GetStringSlice("dns-search")
if noDNS && len(dnsSearch) > 0 {
return nil, errors.Errorf("invalid --dns-search, --dns-search may not be used with --dns=none")
}
}
dnsOptions := []string{}
if c.Flag("dns-option").Changed {
dnsOptions, _ = c.Flags().GetStringSlice("dns-option")
if flags.Changed("dns-option") {
dnsOptions, _ = flags.GetStringSlice("dns-option")
if noDNS && len(dnsOptions) > 0 {
return nil, errors.Errorf("invalid --dns-option, --dns-option may not be used with --dns=none")
}
}
if _, err := units.FromHumanSize(c.Flag("shm-size").Value.String()); err != nil {
if _, err := units.FromHumanSize(findFlagFunc("shm-size").Value.String()); err != nil {
return nil, errors.Wrapf(err, "invalid --shm-size")
}
volumes, _ := c.Flags().GetStringArray("volume")
volumes, _ := flags.GetStringArray("volume")
if err := Volumes(volumes); err != nil {
return nil, err
}
cpuPeriod, _ := c.Flags().GetUint64("cpu-period")
cpuQuota, _ := c.Flags().GetInt64("cpu-quota")
cpuShares, _ := c.Flags().GetUint64("cpu-shares")
httpProxy, _ := c.Flags().GetBool("http-proxy")
cpuPeriod, _ := flags.GetUint64("cpu-period")
cpuQuota, _ := flags.GetInt64("cpu-quota")
cpuShares, _ := flags.GetUint64("cpu-shares")
httpProxy, _ := flags.GetBool("http-proxy")
ulimit := []string{}
if c.Flag("ulimit").Changed {
ulimit, _ = c.Flags().GetStringSlice("ulimit")
if flags.Changed("ulimit") {
ulimit, _ = flags.GetStringSlice("ulimit")
}
secrets, _ := c.Flags().GetStringArray("secret")
sshsources, _ := c.Flags().GetStringArray("ssh")
secrets, _ := flags.GetStringArray("secret")
sshsources, _ := flags.GetStringArray("ssh")
commonOpts := &define.CommonBuildOptions{
AddHost: addHost,
CPUPeriod: cpuPeriod,
CPUQuota: cpuQuota,
CPUSetCPUs: c.Flag("cpuset-cpus").Value.String(),
CPUSetMems: c.Flag("cpuset-mems").Value.String(),
CPUSetCPUs: findFlagFunc("cpuset-cpus").Value.String(),
CPUSetMems: findFlagFunc("cpuset-mems").Value.String(),
CPUShares: cpuShares,
CgroupParent: c.Flag("cgroup-parent").Value.String(),
CgroupParent: findFlagFunc("cgroup-parent").Value.String(),
DNSOptions: dnsOptions,
DNSSearch: dnsSearch,
DNSServers: dnsServers,
HTTPProxy: httpProxy,
Memory: memoryLimit,
MemorySwap: memorySwap,
ShmSize: c.Flag("shm-size").Value.String(),
ShmSize: findFlagFunc("shm-size").Value.String(),
Ulimit: ulimit,
Volumes: volumes,
Secrets: secrets,
SSHSources: sshsources,
}
securityOpts, _ := c.Flags().GetStringArray("security-opt")
securityOpts, _ := flags.GetStringArray("security-opt")
if err := parseSecurityOpts(securityOpts, commonOpts); err != nil {
return nil, err
}
@ -199,10 +214,39 @@ func parseSecurityOpts(securityOpts []string, commonOpts *define.CommonBuildOpti
return nil
}
// Split string into slice by colon. Backslash-escaped colon (i.e. "\:") will not be regarded as separator
func SplitStringWithColonEscape(str string) []string {
result := make([]string, 0, 3)
sb := &strings.Builder{}
for idx, r := range str {
if r == ':' {
// the colon is backslash-escaped
if idx-1 > 0 && str[idx-1] == '\\' {
sb.WriteRune(r)
} else {
// os.Stat will fail if path contains escaped colon
result = append(result, revertEscapedColon(sb.String()))
sb.Reset()
}
} else {
sb.WriteRune(r)
}
}
if sb.Len() > 0 {
result = append(result, revertEscapedColon(sb.String()))
}
return result
}
// Convert "\:" to ":"
func revertEscapedColon(source string) string {
return strings.ReplaceAll(source, "\\:", ":")
}
// Volume parses the input of --volume
func Volume(volume string) (specs.Mount, error) {
mount := specs.Mount{}
arr := strings.SplitN(volume, ":", 3)
arr := SplitStringWithColonEscape(volume)
if len(arr) < 2 {
return mount, errors.Errorf("incorrect volume format %q, should be host-dir:ctr-dir[:option]", volume)
}
@ -257,18 +301,18 @@ func getVolumeMounts(volumes []string) (map[string]specs.Mount, error) {
}
// GetVolumes gets the volumes from --volume and --mount
func GetVolumes(volumes []string, mounts []string) ([]specs.Mount, error) {
unifiedMounts, err := getMounts(mounts)
func GetVolumes(ctx *types.SystemContext, store storage.Store, volumes []string, mounts []string, contextDir string) ([]specs.Mount, []string, error) {
unifiedMounts, mountedImages, err := getMounts(ctx, store, mounts, contextDir)
if err != nil {
return nil, err
return nil, mountedImages, err
}
volumeMounts, err := getVolumeMounts(volumes)
if err != nil {
return nil, err
return nil, mountedImages, err
}
for dest, mount := range volumeMounts {
if _, ok := unifiedMounts[dest]; ok {
return nil, errors.Wrapf(errDuplicateDest, dest)
return nil, mountedImages, errors.Wrapf(errDuplicateDest, dest)
}
unifiedMounts[dest] = mount
}
@ -277,15 +321,16 @@ func GetVolumes(volumes []string, mounts []string) ([]specs.Mount, error) {
for _, mount := range unifiedMounts {
finalMounts = append(finalMounts, mount)
}
return finalMounts, nil
return finalMounts, mountedImages, nil
}
// getMounts takes user-provided input from the --mount flag and creates OCI
// spec mounts.
// buildah run --mount type=bind,src=/etc/resolv.conf,target=/etc/resolv.conf ...
// buildah run --mount type=tmpfs,target=/dev/shm ...
func getMounts(mounts []string) (map[string]specs.Mount, error) {
func getMounts(ctx *types.SystemContext, store storage.Store, mounts []string, contextDir string) (map[string]specs.Mount, []string, error) {
finalMounts := make(map[string]specs.Mount)
mountedImages := make([]string, 0)
errInvalidSyntax := errors.Errorf("incorrect mount format: should be --mount type=<bind|tmpfs>,[src=<host-dir>,]target=<ctr-dir>[,options]")
@ -295,163 +340,51 @@ func getMounts(mounts []string) (map[string]specs.Mount, error) {
for _, mount := range mounts {
arr := strings.SplitN(mount, ",", 2)
if len(arr) < 2 {
return nil, errors.Wrapf(errInvalidSyntax, "%q", mount)
return nil, mountedImages, errors.Wrapf(errInvalidSyntax, "%q", mount)
}
kv := strings.Split(arr[0], "=")
// TODO: type is not explicitly required in Docker.
// If not specified, it defaults to "volume".
if len(kv) != 2 || kv[0] != "type" {
return nil, errors.Wrapf(errInvalidSyntax, "%q", mount)
return nil, mountedImages, errors.Wrapf(errInvalidSyntax, "%q", mount)
}
tokens := strings.Split(arr[1], ",")
switch kv[1] {
case TypeBind:
mount, err := GetBindMount(tokens)
mount, image, err := internalParse.GetBindMount(ctx, tokens, contextDir, store, "", nil)
if err != nil {
return nil, err
return nil, mountedImages, err
}
if _, ok := finalMounts[mount.Destination]; ok {
return nil, errors.Wrapf(errDuplicateDest, mount.Destination)
return nil, mountedImages, errors.Wrapf(errDuplicateDest, mount.Destination)
}
finalMounts[mount.Destination] = mount
mountedImages = append(mountedImages, image)
case TypeCache:
mount, err := internalParse.GetCacheMount(tokens, store, "", nil)
if err != nil {
return nil, mountedImages, err
}
if _, ok := finalMounts[mount.Destination]; ok {
return nil, mountedImages, errors.Wrapf(errDuplicateDest, mount.Destination)
}
finalMounts[mount.Destination] = mount
case TypeTmpfs:
mount, err := GetTmpfsMount(tokens)
mount, err := internalParse.GetTmpfsMount(tokens)
if err != nil {
return nil, err
return nil, mountedImages, err
}
if _, ok := finalMounts[mount.Destination]; ok {
return nil, errors.Wrapf(errDuplicateDest, mount.Destination)
return nil, mountedImages, errors.Wrapf(errDuplicateDest, mount.Destination)
}
finalMounts[mount.Destination] = mount
default:
return nil, errors.Errorf("invalid filesystem type %q", kv[1])
return nil, mountedImages, errors.Errorf("invalid filesystem type %q", kv[1])
}
}
return finalMounts, nil
}
// GetBindMount parses a single bind mount entry from the --mount flag.
func GetBindMount(args []string) (specs.Mount, error) {
newMount := specs.Mount{
Type: TypeBind,
}
setSource := false
setDest := false
for _, val := range args {
kv := strings.SplitN(val, "=", 2)
switch kv[0] {
case "bind-nonrecursive":
newMount.Options = append(newMount.Options, "bind")
case "ro", "nosuid", "nodev", "noexec":
// TODO: detect duplication of these options.
// (Is this necessary?)
newMount.Options = append(newMount.Options, kv[0])
case "readonly":
// Alias for "ro"
newMount.Options = append(newMount.Options, "ro")
case "shared", "rshared", "private", "rprivate", "slave", "rslave", "Z", "z":
newMount.Options = append(newMount.Options, kv[0])
case "bind-propagation":
if len(kv) == 1 {
return newMount, errors.Wrapf(optionArgError, kv[0])
}
newMount.Options = append(newMount.Options, kv[1])
case "src", "source":
if len(kv) == 1 {
return newMount, errors.Wrapf(optionArgError, kv[0])
}
if err := parse.ValidateVolumeHostDir(kv[1]); err != nil {
return newMount, err
}
newMount.Source = kv[1]
setSource = true
case "target", "dst", "destination":
if len(kv) == 1 {
return newMount, errors.Wrapf(optionArgError, kv[0])
}
if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil {
return newMount, err
}
newMount.Destination = kv[1]
setDest = true
case "consistency":
// Option for OS X only, has no meaning on other platforms
// and can thus be safely ignored.
// See also the handling of the equivalent "delegated" and "cached" in ValidateVolumeOpts
default:
return newMount, errors.Wrapf(errBadMntOption, kv[0])
}
}
if !setDest {
return newMount, noDestError
}
if !setSource {
newMount.Source = newMount.Destination
}
opts, err := parse.ValidateVolumeOpts(newMount.Options)
if err != nil {
return newMount, err
}
newMount.Options = opts
return newMount, nil
}
// GetTmpfsMount parses a single tmpfs mount entry from the --mount flag
func GetTmpfsMount(args []string) (specs.Mount, error) {
newMount := specs.Mount{
Type: TypeTmpfs,
Source: TypeTmpfs,
}
setDest := false
for _, val := range args {
kv := strings.SplitN(val, "=", 2)
switch kv[0] {
case "ro", "nosuid", "nodev", "noexec":
newMount.Options = append(newMount.Options, kv[0])
case "readonly":
// Alias for "ro"
newMount.Options = append(newMount.Options, "ro")
case "tmpfs-mode":
if len(kv) == 1 {
return newMount, errors.Wrapf(optionArgError, kv[0])
}
newMount.Options = append(newMount.Options, fmt.Sprintf("mode=%s", kv[1]))
case "tmpfs-size":
if len(kv) == 1 {
return newMount, errors.Wrapf(optionArgError, kv[0])
}
newMount.Options = append(newMount.Options, fmt.Sprintf("size=%s", kv[1]))
case "src", "source":
return newMount, errors.Errorf("source is not supported with tmpfs mounts")
case "target", "dst", "destination":
if len(kv) == 1 {
return newMount, errors.Wrapf(optionArgError, kv[0])
}
if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil {
return newMount, err
}
newMount.Destination = kv[1]
setDest = true
default:
return newMount, errors.Wrapf(errBadMntOption, kv[0])
}
}
if !setDest {
return newMount, noDestError
}
return newMount, nil
return finalMounts, mountedImages, nil
}
// ValidateVolumeHostDir validates a volume mount's source directory
@ -669,7 +602,7 @@ const platformSep = "/"
// DefaultPlatform returns the standard platform for the current system
func DefaultPlatform() string {
return runtime.GOOS + platformSep + runtime.GOARCH
return platforms.DefaultString()
}
// Platform separates the platform string into os, arch and variant,
@ -829,13 +762,6 @@ func IDMappingOptions(c *cobra.Command, isolation define.Isolation) (usernsOptio
}
usernsOptions = define.NamespaceOptions{usernsOption}
usernetwork := c.Flags().Lookup("network")
if usernetwork != nil && !usernetwork.Changed {
usernsOptions = append(usernsOptions, define.NamespaceOption{
Name: string(specs.NetworkNamespace),
Host: usernsOption.Host,
})
}
// If the user requested that we use the host namespace, but also that
// we use mappings, that's not going to work.
if (len(uidmap) != 0 || len(gidmap) != 0) && usernsOption.Host {
@ -877,23 +803,32 @@ func parseIDMap(spec []string) (m [][3]uint32, err error) {
// NamespaceOptions parses the build options for all namespaces except for user namespace.
func NamespaceOptions(c *cobra.Command) (namespaceOptions define.NamespaceOptions, networkPolicy define.NetworkConfigurationPolicy, err error) {
return NamespaceOptionsFromFlagSet(c.Flags(), c.Flag)
}
// NamespaceOptionsFromFlagSet parses the build options for all namespaces except for user namespace.
func NamespaceOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name string) *pflag.Flag) (namespaceOptions define.NamespaceOptions, networkPolicy define.NetworkConfigurationPolicy, err error) {
options := make(define.NamespaceOptions, 0, 7)
policy := define.NetworkDefault
for _, what := range []string{string(specs.IPCNamespace), "network", string(specs.PIDNamespace), string(specs.UTSNamespace)} {
if c.Flags().Lookup(what) != nil && c.Flag(what).Changed {
how := c.Flag(what).Value.String()
for _, what := range []string{"cgroupns", string(specs.IPCNamespace), "network", string(specs.PIDNamespace), string(specs.UTSNamespace)} {
if flags.Lookup(what) != nil && findFlagFunc(what).Changed {
how := findFlagFunc(what).Value.String()
switch what {
case "network":
what = string(specs.NetworkNamespace)
case "cgroupns":
what = string(specs.CgroupNamespace)
}
switch how {
case "", "container", "private":
logrus.Debugf("setting %q namespace to %q", what, "")
policy = define.NetworkEnabled
options.AddOrReplace(define.NamespaceOption{
Name: what,
})
case "host":
logrus.Debugf("setting %q namespace to host", what)
policy = define.NetworkEnabled
options.AddOrReplace(define.NamespaceOption{
Name: what,
Host: true,
@ -1034,35 +969,60 @@ func GetTempDir() string {
}
// Secrets parses the --secret flag
func Secrets(secrets []string) (map[string]string, error) {
parsed := make(map[string]string)
invalidSyntax := errors.Errorf("incorrect secret flag format: should be --secret id=foo,src=bar")
func Secrets(secrets []string) (map[string]define.Secret, error) {
invalidSyntax := errors.Errorf("incorrect secret flag format: should be --secret id=foo,src=bar[,env=ENV,type=file|env]")
parsed := make(map[string]define.Secret)
for _, secret := range secrets {
split := strings.Split(secret, ",")
if len(split) > 2 {
return nil, invalidSyntax
}
if len(split) == 2 {
id := strings.Split(split[0], "=")
src := strings.Split(split[1], "=")
if len(split) == 2 && strings.ToLower(id[0]) == "id" && strings.ToLower(src[0]) == "src" {
fullPath, err := filepath.Abs(src[1])
if err != nil {
return nil, err
tokens := strings.Split(secret, ",")
var id, src, typ string
for _, val := range tokens {
kv := strings.SplitN(val, "=", 2)
switch kv[0] {
case "id":
id = kv[1]
case "src":
src = kv[1]
case "env":
src = kv[1]
typ = "env"
case "type":
if kv[1] != "file" && kv[1] != "env" {
return nil, errors.New("invalid secret type, must be file or env")
}
_, err = os.Stat(fullPath)
if err == nil {
parsed[id[1]] = fullPath
}
if err != nil {
return nil, errors.Wrap(err, "could not parse secrets")
}
} else {
return nil, invalidSyntax
typ = kv[1]
}
} else {
}
if id == "" {
return nil, invalidSyntax
}
if src == "" {
src = id
}
if typ == "" {
if _, ok := os.LookupEnv(id); ok {
typ = "env"
} else {
typ = "file"
}
}
if typ == "file" {
fullPath, err := filepath.Abs(src)
if err != nil {
return nil, errors.Wrap(err, "could not parse secrets")
}
_, err = os.Stat(fullPath)
if err != nil {
return nil, errors.Wrap(err, "could not parse secrets")
}
src = fullPath
}
newSecret := define.Secret{
Source: src,
SourceType: typ,
}
parsed[id] = newSecret
}
return parsed, nil
}
@ -1085,3 +1045,20 @@ func SSH(sshSources []string) (map[string]*sshagent.Source, error) {
}
return parsed, nil
}
func ContainerIgnoreFile(contextDir, path string) ([]string, string, error) {
if path != "" {
excludes, err := imagebuilder.ParseIgnore(path)
return excludes, path, err
}
path = filepath.Join(contextDir, ".containerignore")
excludes, err := imagebuilder.ParseIgnore(path)
if os.IsNotExist(err) {
path = filepath.Join(contextDir, ".dockerignore")
excludes, err = imagebuilder.ParseIgnore(path)
}
if os.IsNotExist(err) {
return excludes, "", nil
}
return excludes, path, err
}

View File

@ -111,8 +111,9 @@ func (a *AgentServer) Serve(processLabel string) (string, error) {
a.wg.Done()
}()
// the only way to get agent.ServeAgent is to close the connection it's serving on
// TODO: ideally we should use some sort of forwarding mechanism for output instead of manually closing connection.
go func() {
time.Sleep(500 * time.Millisecond)
time.Sleep(2000 * time.Millisecond)
c.Close()
}()
}

View File

@ -10,6 +10,7 @@ import (
"github.com/containers/common/libimage"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/compression"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
encconfig "github.com/containers/ocicrypt/config"
@ -25,6 +26,7 @@ type PushOptions struct {
// Compression specifies the type of compression which is applied to
// layer blobs. The default is to not use compression, but
// archive.Gzip is recommended.
// OBSOLETE: Use CompressionFormat instead.
Compression archive.Compression
// SignaturePolicyPath specifies an override location for the signature
// policy which should be used for verifying the new image as it is
@ -71,6 +73,11 @@ type PushOptions struct {
// integers in the slice represent 0-indexed layer indices, with support for negative
// indexing. i.e. 0 is the first layer, -1 is the last (top-most) layer.
OciEncryptLayers *[]int
// CompressionFormat is the format to use for the compression of the blobs
CompressionFormat *compression.Algorithm
// CompressionLevel specifies what compression level is used
CompressionLevel *int
}
// Push copies the contents of the image to a new location.
@ -84,6 +91,8 @@ func Push(ctx context.Context, image string, dest types.ImageReference, options
libimageOptions.RetryDelay = &options.RetryDelay
libimageOptions.OciEncryptConfig = options.OciEncryptConfig
libimageOptions.OciEncryptLayers = options.OciEncryptLayers
libimageOptions.CompressionFormat = options.CompressionFormat
libimageOptions.CompressionLevel = options.CompressionLevel
libimageOptions.PolicyAllowStorage = true
if options.Quiet {

View File

@ -5,7 +5,9 @@ import (
"io"
"github.com/containers/buildah/define"
"github.com/containers/buildah/internal"
"github.com/containers/buildah/pkg/sshagent"
"github.com/containers/image/v5/types"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus"
)
@ -93,6 +95,8 @@ type RunOptions struct {
User string
// WorkingDir is an override for the working directory.
WorkingDir string
// ContextDir is used as the root directory for the source location for mounts that are of type "bind".
ContextDir string
// Shell is default shell to run in a container.
Shell string
// Cmd is an override for the configured default command.
@ -139,18 +143,30 @@ type RunOptions struct {
// Devices are the additional devices to add to the containers
Devices define.ContainerDevices
// Secrets are the available secrets to use in a RUN
Secrets map[string]string
Secrets map[string]define.Secret
// SSHSources is the available ssh agents to use in a RUN
SSHSources map[string]*sshagent.Source `json:"-"`
// RunMounts are mounts for this run. RunMounts for this run
// will not show up in subsequent runs.
RunMounts []string
// Map of stages and container mountpoint if any from stage executor
StageMountPoints map[string]internal.StageMountDetails
// External Image mounts to be cleaned up.
// Buildah run --mount could mount image before RUN calls, RUN could cleanup
// them up as well
ExternalImageMounts []string
// System context of current build
SystemContext *types.SystemContext
}
// RunMountArtifacts are the artifacts created when using a run mount.
type runMountArtifacts struct {
// RunMountTargets are the run mount targets inside the container
RunMountTargets []string
// TmpFiles are artifacts that need to be removed outside the container
TmpFiles []string
// Any external images which were mounted inside container
MountedImages []string
// Agents are the ssh agents started
Agents []*sshagent.AgentServer
// SSHAuthSock is the path to the ssh auth sock inside the container

File diff suppressed because it is too large Load Diff

View File

@ -4,6 +4,8 @@ package buildah
import (
"github.com/containers/buildah/define"
nettypes "github.com/containers/common/libnetwork/types"
"github.com/containers/storage"
"github.com/pkg/errors"
)
@ -22,3 +24,8 @@ func (b *Builder) Run(command []string, options RunOptions) error {
func DefaultNamespaceOptions() (NamespaceOptions, error) {
return NamespaceOptions{}, errors.New("function not supported on non-linux systems")
}
// getNetworkInterface creates the network interface
func getNetworkInterface(store storage.Store, cniConfDir, cniPluginPath string) (nettypes.ContainerNetwork, error) {
return nil, errors.New("function not supported on non-linux systems")
}

View File

@ -3,6 +3,8 @@
package buildah
import (
nettypes "github.com/containers/common/libnetwork/types"
"github.com/containers/storage"
"github.com/pkg/errors"
)
@ -18,3 +20,8 @@ func (b *Builder) Run(command []string, options RunOptions) error {
func DefaultNamespaceOptions() (NamespaceOptions, error) {
return NamespaceOptions{}, errors.New("function not supported on non-linux systems")
}
// getNetworkInterface creates the network interface
func getNetworkInterface(store storage.Store, cniConfDir, cniPluginPath string) (nettypes.ContainerNetwork, error) {
return nil, errors.New("function not supported on non-linux systems")
}

View File

@ -3,8 +3,12 @@
package buildah
import (
"fmt"
"github.com/opencontainers/runtime-tools/generate"
selinux "github.com/opencontainers/selinux/go-selinux"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
)
func selinuxGetEnabled() bool {
@ -17,3 +21,21 @@ func setupSelinux(g *generate.Generator, processLabel, mountLabel string) {
g.SetLinuxMountLabel(mountLabel)
}
}
func runLabelStdioPipes(stdioPipe [][]int, processLabel, mountLabel string) error {
if !selinuxGetEnabled() || processLabel == "" || mountLabel == "" {
// SELinux is completely disabled, or we're not doing anything at all with labeling
return nil
}
pipeContext, err := selinux.ComputeCreateContext(processLabel, mountLabel, "fifo_file")
if err != nil {
return errors.Wrapf(err, "computing file creation context for pipes")
}
for i := range stdioPipe {
pipeFdName := fmt.Sprintf("/proc/self/fd/%d", stdioPipe[i][0])
if err := label.Relabel(pipeFdName, pipeContext, false); err != nil {
return errors.Wrapf(err, "setting file label on %q", pipeFdName)
}
}
return nil
}

View File

@ -12,3 +12,7 @@ func selinuxGetEnabled() bool {
func setupSelinux(g *generate.Generator, processLabel, mountLabel string) {
}
func runLabelStdioPipes(stdioPipe [][]int, processLabel, mountLabel string) error {
return nil
}

View File

@ -7,10 +7,6 @@ import (
const (
// DefaultRuntime if containers.conf fails.
DefaultRuntime = define.DefaultRuntime
// DefaultCNIPluginPath is the default location of CNI plugin helpers.
DefaultCNIPluginPath = define.DefaultCNIPluginPath
// DefaultCNIConfigDir is the default location of CNI configuration files.
DefaultCNIConfigDir = define.DefaultCNIConfigDir
)
var (

View File

@ -3,6 +3,7 @@ package util
import (
"fmt"
"io"
"net"
"net/url"
"os"
"path/filepath"
@ -486,3 +487,20 @@ func VerifyTagName(imageSpec string) (types.ImageReference, error) {
}
return ref, nil
}
// LocalIP returns the non loopback local IP of the host
func LocalIP() string {
addrs, err := net.InterfaceAddrs()
if err != nil {
return ""
}
for _, address := range addrs {
// check the address type and if it is not a loopback the display it
if ipnet, ok := address.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
if ipnet.IP.To4() != nil {
return ipnet.IP.String()
}
}
}
return ""
}

View File

@ -1,222 +0,0 @@
package defaultnet
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net"
"os"
"path/filepath"
"regexp"
"text/template"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// TODO: A smarter implementation would make sure cni-podman0 was unused before
// making the default, and adjust if necessary
const networkTemplate = `{
"cniVersion": "0.4.0",
"name": "{{{{.Name}}}}",
"plugins": [
{
"type": "bridge",
"bridge": "cni-podman0",
"isGateway": true,
"ipMasq": true,
"hairpinMode": true,
"ipam": {
"type": "host-local",
"routes": [{ "dst": "0.0.0.0/0" }],
"ranges": [
[
{
"subnet": "{{{{.Subnet}}}}",
"gateway": "{{{{.Gateway}}}}"
}
]
]
}
},
{{{{- if (eq .Machine true) }}}}
{
"type": "podman-machine",
"capabilities": {
"portMappings": true
}
},
{{{{- end}}}}
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
},
{
"type": "firewall"
},
{
"type": "tuning"
}
]
}
`
var (
// Borrowed from Podman, modified to remove dashes and periods.
nameRegex = regexp.MustCompile("^[a-zA-Z0-9][a-zA-Z0-9_]*$")
)
// Used to pass info into the template engine
type networkInfo struct {
Name string
Subnet string
Gateway string
Machine bool
}
// The most trivial definition of a CNI network possible for our use here.
// We need the name, and nothing else.
type network struct {
Name string `json:"name"`
}
// Create makes the CNI default network, if necessary.
// Accepts the name and subnet of the network to create (a standard template
// will be used, with these values plugged in), the configuration directory
// where CNI configs are stored (to verify if a named configuration already
// exists), an exists directory (where a sentinel file will be stored, to ensure
// the network is only made once), and an isMachine bool (to determine whether
// the machine block will be added to the config).
// Create first checks if a default network has already been created via the
// presence of a sentinel file. If it does exist, it returns immediately without
// error.
// It next checks if a CNI network with the given name already exists. In that
// case, it creates the sentinel file and returns without error.
// If neither of these are true, the default network is created.
func Create(name, subnet, configDir, existsDir string, isMachine bool) error {
// TODO: Should probably regex name to make sure it's valid.
if name == "" || subnet == "" || configDir == "" || existsDir == "" {
return errors.Errorf("must provide values for all arguments to MakeDefaultNetwork")
}
if !nameRegex.MatchString(name) {
return errors.Errorf("invalid default network name %s - letters, numbers, and underscores only", name)
}
sentinelFile := filepath.Join(existsDir, "defaultCNINetExists")
// Check if sentinel file exists, return immediately if it does.
if _, err := os.Stat(sentinelFile); err == nil {
return nil
}
// Create the sentinel file if it doesn't exist, so subsequent checks
// don't need to go further.
file, err := os.Create(sentinelFile)
if err != nil {
return err
}
file.Close()
// We may need to make the config dir.
if err := os.MkdirAll(configDir, 0755); err != nil && !os.IsExist(err) {
return errors.Wrapf(err, "error creating CNI configuration directory")
}
// Check all networks in the CNI conflist.
files, err := ioutil.ReadDir(configDir)
if err != nil {
return errors.Wrapf(err, "error reading CNI configuration directory")
}
if len(files) > 0 {
configPaths := make([]string, 0, len(files))
for _, path := range files {
if !path.IsDir() && filepath.Ext(path.Name()) == ".conflist" {
configPaths = append(configPaths, filepath.Join(configDir, path.Name()))
}
}
for _, config := range configPaths {
configName, err := getConfigName(config)
if err != nil {
logrus.Errorf("Error reading CNI configuration file: %v", err)
continue
}
if configName == name {
return nil
}
}
}
// We need to make the config.
// Get subnet and gateway.
_, ipNet, err := net.ParseCIDR(subnet)
if err != nil {
return errors.Wrapf(err, "default network subnet %s is invalid", subnet)
}
ones, bits := ipNet.Mask.Size()
if ones == bits {
return errors.Wrapf(err, "default network subnet %s is to small", subnet)
}
gateway := make(net.IP, len(ipNet.IP))
// copy the subnet ip to the gateway so we can modify it
copy(gateway, ipNet.IP)
// the default gateway should be the first ip in the subnet
gateway[len(gateway)-1]++
netInfo := new(networkInfo)
netInfo.Name = name
netInfo.Gateway = gateway.String()
netInfo.Subnet = ipNet.String()
netInfo.Machine = isMachine
templ, err := template.New("network_template").Delims("{{{{", "}}}}").Parse(networkTemplate)
if err != nil {
return errors.Wrapf(err, "error compiling template for default network")
}
var output bytes.Buffer
if err := templ.Execute(&output, netInfo); err != nil {
return errors.Wrapf(err, "error executing template for default network")
}
// Next, we need to place the config on disk.
// Loop through possible indexes, with a limit of 100 attempts.
created := false
for i := 87; i < 187; i++ {
configFile, err := os.OpenFile(filepath.Join(configDir, fmt.Sprintf("%d-%s.conflist", i, name)), os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0644)
if err != nil {
logrus.Infof("Attempt to create default CNI network config file failed: %v", err)
continue
}
defer configFile.Close()
created = true
// Success - file is open. Write our buffer to it.
if _, err := configFile.Write(output.Bytes()); err != nil {
return errors.Wrapf(err, "error writing default CNI config to file")
}
break
}
if !created {
return errors.Errorf("no available default network configuration file was found")
}
return nil
}
// Get the name of the configuration contained in a given conflist file. Accepts
// the full path of a .conflist CNI configuration.
func getConfigName(file string) (string, error) {
contents, err := ioutil.ReadFile(file)
if err != nil {
return "", err
}
config := new(network)
if err := json.Unmarshal(contents, config); err != nil {
return "", errors.Wrapf(err, "error decoding CNI configuration %s", filepath.Base(file))
}
return config.Name, nil
}

View File

@ -9,7 +9,11 @@ import (
"io/ioutil"
"strings"
"golang.org/x/crypto/openpgp"
// This code is used only to parse the data in an explicitly-untrusted
// code path, where cryptography is not relevant. For now, continue to
// use this frozen deprecated implementation. When mechanism_openpgp.go
// migrates to another implementation, this should migrate as well.
"golang.org/x/crypto/openpgp" //nolint:staticcheck
)
// SigningMechanism abstracts a way to sign binary blobs and verify their signatures.

View File

@ -14,7 +14,13 @@ import (
"time"
"github.com/containers/storage/pkg/homedir"
"golang.org/x/crypto/openpgp"
// This is a fallback code; the primary recommendation is to use the gpgme mechanism
// implementation, which is out-of-process and more appropriate for handling long-term private key material
// than any Go implementation.
// For this verify-only fallback, we haven't reviewed any of the
// existing alternatives to choose; so, for now, continue to
// use this frozen deprecated implementation.
"golang.org/x/crypto/openpgp" //nolint:staticcheck
)
// A GPG/OpenPGP signing mechanism, implemented using x/crypto/openpgp.

View File

@ -6,12 +6,12 @@ const (
// VersionMajor is for an API incompatible changes
VersionMajor = 5
// VersionMinor is for functionality in a backwards-compatible manner
VersionMinor = 17
VersionMinor = 18
// VersionPatch is for backwards-compatible bug fixes
VersionPatch = 1
VersionPatch = 0
// VersionDev indicates development branch. Releases will be empty string.
VersionDev = "-dev"
VersionDev = ""
)
// Version is the specification version that the package types support.

View File

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !windows
// +build !windows
package docker

View File

@ -3,9 +3,9 @@ module github.com/fsouza/go-dockerclient
go 1.16
require (
github.com/Microsoft/go-winio v0.5.0
github.com/containerd/containerd v1.5.5 // indirect
github.com/docker/docker v20.10.8+incompatible
github.com/Microsoft/go-winio v0.5.1
github.com/containerd/containerd v1.5.9 // indirect
github.com/docker/docker v20.10.12+incompatible
github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-units v0.4.0
github.com/google/go-cmp v0.5.6
@ -13,5 +13,6 @@ require (
github.com/moby/sys/mount v0.2.0 // indirect
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/opencontainers/runc v1.0.3 // indirect
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b
)

View File

@ -46,8 +46,8 @@ github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugX
github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
github.com/Microsoft/go-winio v0.5.0 h1:Elr9Wn+sGKPlkaBvwu4mTrxtmOp3F3yV9qhaHbXGjwU=
github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
github.com/Microsoft/go-winio v0.5.1 h1:aPJp2QD7OOrhO5tQXqQoGSJc+DjDtWTGLOmNyAm6FgY=
github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ=
@ -55,8 +55,8 @@ github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg3
github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg=
github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00=
github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600=
github.com/Microsoft/hcsshim v0.8.18 h1:cYnKADiM1869gvBpos3YCteeT6sZLB48lB5dmMMs8Tg=
github.com/Microsoft/hcsshim v0.8.18/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
github.com/Microsoft/hcsshim v0.8.23 h1:47MSwtKGXet80aIn+7h4YI6fwPmwIghAnsx2aOUrG2M=
github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg=
github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
@ -87,6 +87,7 @@ github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7
github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50=
github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
@ -131,13 +132,13 @@ github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMX
github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.4.9/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ=
github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU=
github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI=
github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s=
github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g=
github.com/containerd/containerd v1.5.5 h1:q1gxsZsGZ8ddVe98yO6pR21b5xQSMiR61lD0W96pgQo=
github.com/containerd/containerd v1.5.5/go.mod h1:oSTh0QpT1w6jYcGmbiSbxv9OSQYaa88mPyWIuU79zyo=
github.com/containerd/containerd v1.5.9 h1:rs6Xg1gtIxaeyG+Smsb/0xaSDu1VgFhOCKBXxMxbsF4=
github.com/containerd/containerd v1.5.9/go.mod h1:fvQqCfadDGga5HZyn3j4+dx56qj2I9YwBrlSdalvJYQ=
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
@ -171,6 +172,7 @@ github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDG
github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8=
github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ=
github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk=
github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg=
@ -224,8 +226,8 @@ github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyG
github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY=
github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v20.10.8+incompatible h1:RVqD337BgQicVCzYrrlhLDWhq6OAD2PJDUg2LsEUvKM=
github.com/docker/docker v20.10.8+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v20.10.12+incompatible h1:CEeNmFM0QZIsJCZKMkZx0ZcahTiewkrgiwfYD+dfl1U=
github.com/docker/docker v20.10.12+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
@ -372,6 +374,7 @@ github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ
github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA=
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
@ -460,15 +463,17 @@ github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0=
github.com/opencontainers/runc v1.0.1 h1:G18PGckGdAm3yVQRWDVQ1rLSLntiniKJ0cNRT2Tm5gs=
github.com/opencontainers/runc v1.0.1/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
github.com/opencontainers/runc v1.0.3 h1:1hbqejyQWCJBvtKAfdO0b1FmaEf2z/bxnjqbARass5k=
github.com/opencontainers/runc v1.0.3/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
@ -880,8 +885,9 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=

View File

@ -21,7 +21,7 @@ import "github.com/opencontainers/image-spec/specs-go"
type Index struct {
specs.Versioned
// MediaType specificies the type of this document data structure e.g. `application/vnd.oci.image.index.v1+json`
// MediaType specifies the type of this document data structure e.g. `application/vnd.oci.image.index.v1+json`
MediaType string `json:"mediaType,omitempty"`
// Manifests references platform specific manifests.

View File

@ -20,7 +20,7 @@ import "github.com/opencontainers/image-spec/specs-go"
type Manifest struct {
specs.Versioned
// MediaType specificies the type of this document data structure e.g. `application/vnd.oci.image.manifest.v1+json`
// MediaType specifies the type of this document data structure e.g. `application/vnd.oci.image.manifest.v1+json`
MediaType string `json:"mediaType,omitempty"`
// Config references a configuration object for a container, by digest.

View File

@ -15,7 +15,7 @@
// used with a fixed key in order to generate one-time keys from an nonce.
// However, in this package AES isn't used and the one-time key is specified
// directly.
package poly1305 // import "golang.org/x/crypto/poly1305"
package poly1305
import "crypto/subtle"

View File

@ -18,7 +18,7 @@
// value. These limbs are, for the most part, zero extended and
// placed into 64-bit vector register elements. Each vector
// register is 128-bits wide and so holds 2 of these elements.
// Using 26-bit limbs allows us plenty of headroom to accomodate
// Using 26-bit limbs allows us plenty of headroom to accommodate
// accumulations before and after multiplication without
// overflowing either 32-bits (before multiplication) or 64-bits
// (after multiplication).

View File

@ -14,7 +14,7 @@ import (
"time"
)
// These constants from [PROTOCOL.certkeys] represent the algorithm names
// These constants from [PROTOCOL.certkeys] represent the key algorithm names
// for certificate types supported by this package.
const (
CertAlgoRSAv01 = "ssh-rsa-cert-v01@openssh.com"
@ -27,6 +27,14 @@ const (
CertAlgoSKED25519v01 = "sk-ssh-ed25519-cert-v01@openssh.com"
)
// These constants from [PROTOCOL.certkeys] represent additional signature
// algorithm names for certificate types supported by this package.
const (
CertSigAlgoRSAv01 = "ssh-rsa-cert-v01@openssh.com"
CertSigAlgoRSASHA2256v01 = "rsa-sha2-256-cert-v01@openssh.com"
CertSigAlgoRSASHA2512v01 = "rsa-sha2-512-cert-v01@openssh.com"
)
// Certificate types distinguish between host and user
// certificates. The values can be set in the CertType field of
// Certificate.
@ -423,6 +431,12 @@ func (c *Certificate) SignCert(rand io.Reader, authority Signer) error {
}
c.SignatureKey = authority.PublicKey()
if v, ok := authority.(AlgorithmSigner); ok {
if v.PublicKey().Type() == KeyAlgoRSA {
authority = &rsaSigner{v, SigAlgoRSASHA2512}
}
}
sig, err := authority.Sign(rand, c.bytesForSigning())
if err != nil {
return err
@ -431,8 +445,14 @@ func (c *Certificate) SignCert(rand io.Reader, authority Signer) error {
return nil
}
// certAlgoNames includes a mapping from signature algorithms to the
// corresponding certificate signature algorithm. When a key type (such
// as ED25516) is associated with only one algorithm, the KeyAlgo
// constant is used instead of the SigAlgo.
var certAlgoNames = map[string]string{
KeyAlgoRSA: CertAlgoRSAv01,
SigAlgoRSA: CertSigAlgoRSAv01,
SigAlgoRSASHA2256: CertSigAlgoRSASHA2256v01,
SigAlgoRSASHA2512: CertSigAlgoRSASHA2512v01,
KeyAlgoDSA: CertAlgoDSAv01,
KeyAlgoECDSA256: CertAlgoECDSA256v01,
KeyAlgoECDSA384: CertAlgoECDSA384v01,

View File

@ -18,7 +18,7 @@ import (
"io/ioutil"
"golang.org/x/crypto/chacha20"
"golang.org/x/crypto/poly1305"
"golang.org/x/crypto/internal/poly1305"
)
const (
@ -394,6 +394,10 @@ func (c *gcmCipher) readCipherPacket(seqNum uint32, r io.Reader) ([]byte, error)
}
c.incIV()
if len(plain) == 0 {
return nil, errors.New("ssh: empty packet")
}
padding := plain[0]
if padding < 4 {
// padding is a byte, so it automatically satisfies
@ -710,6 +714,10 @@ func (c *chacha20Poly1305Cipher) readCipherPacket(seqNum uint32, r io.Reader) ([
plain := c.buf[4:contentEnd]
s.XORKeyStream(plain, plain)
if len(plain) == 0 {
return nil, errors.New("ssh: empty packet")
}
padding := plain[0]
if padding < 4 {
// padding is a byte, so it automatically satisfies

View File

@ -115,12 +115,25 @@ func (c *connection) clientHandshake(dialAddress string, config *ClientConfig) e
// verifyHostKeySignature verifies the host key obtained in the key
// exchange.
func verifyHostKeySignature(hostKey PublicKey, result *kexResult) error {
func verifyHostKeySignature(hostKey PublicKey, algo string, result *kexResult) error {
sig, rest, ok := parseSignatureBody(result.Signature)
if len(rest) > 0 || !ok {
return errors.New("ssh: signature parse error")
}
// For keys, underlyingAlgo is exactly algo. For certificates,
// we have to look up the underlying key algorithm that SSH
// uses to evaluate signatures.
underlyingAlgo := algo
for sigAlgo, certAlgo := range certAlgoNames {
if certAlgo == algo {
underlyingAlgo = sigAlgo
}
}
if sig.Format != underlyingAlgo {
return fmt.Errorf("ssh: invalid signature algorithm %q, expected %q", sig.Format, underlyingAlgo)
}
return hostKey.Verify(result.H, sig)
}

View File

@ -69,11 +69,13 @@ var preferredKexAlgos = []string{
// supportedHostKeyAlgos specifies the supported host-key algorithms (i.e. methods
// of authenticating servers) in preference order.
var supportedHostKeyAlgos = []string{
CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01,
CertSigAlgoRSASHA2512v01, CertSigAlgoRSASHA2256v01,
CertSigAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01,
CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01,
KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521,
KeyAlgoRSA, KeyAlgoDSA,
SigAlgoRSASHA2512, SigAlgoRSASHA2256,
SigAlgoRSA, KeyAlgoDSA,
KeyAlgoED25519,
}
@ -90,16 +92,20 @@ var supportedCompressions = []string{compressionNone}
// hashFuncs keeps the mapping of supported algorithms to their respective
// hashes needed for signature verification.
var hashFuncs = map[string]crypto.Hash{
KeyAlgoRSA: crypto.SHA1,
KeyAlgoDSA: crypto.SHA1,
KeyAlgoECDSA256: crypto.SHA256,
KeyAlgoECDSA384: crypto.SHA384,
KeyAlgoECDSA521: crypto.SHA512,
CertAlgoRSAv01: crypto.SHA1,
CertAlgoDSAv01: crypto.SHA1,
CertAlgoECDSA256v01: crypto.SHA256,
CertAlgoECDSA384v01: crypto.SHA384,
CertAlgoECDSA521v01: crypto.SHA512,
SigAlgoRSA: crypto.SHA1,
SigAlgoRSASHA2256: crypto.SHA256,
SigAlgoRSASHA2512: crypto.SHA512,
KeyAlgoDSA: crypto.SHA1,
KeyAlgoECDSA256: crypto.SHA256,
KeyAlgoECDSA384: crypto.SHA384,
KeyAlgoECDSA521: crypto.SHA512,
CertSigAlgoRSAv01: crypto.SHA1,
CertSigAlgoRSASHA2256v01: crypto.SHA256,
CertSigAlgoRSASHA2512v01: crypto.SHA512,
CertAlgoDSAv01: crypto.SHA1,
CertAlgoECDSA256v01: crypto.SHA256,
CertAlgoECDSA384v01: crypto.SHA384,
CertAlgoECDSA521v01: crypto.SHA512,
}
// unexpectedMessageError results when the SSH message that we received didn't

View File

@ -457,8 +457,15 @@ func (t *handshakeTransport) sendKexInit() error {
if len(t.hostKeys) > 0 {
for _, k := range t.hostKeys {
msg.ServerHostKeyAlgos = append(
msg.ServerHostKeyAlgos, k.PublicKey().Type())
algo := k.PublicKey().Type()
switch algo {
case KeyAlgoRSA:
msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, []string{SigAlgoRSASHA2512, SigAlgoRSASHA2256, SigAlgoRSA}...)
case CertAlgoRSAv01:
msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, []string{CertSigAlgoRSASHA2512v01, CertSigAlgoRSASHA2256v01, CertSigAlgoRSAv01}...)
default:
msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, algo)
}
}
} else {
msg.ServerHostKeyAlgos = t.hostKeyAlgorithms
@ -614,8 +621,22 @@ func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error {
func (t *handshakeTransport) server(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) {
var hostKey Signer
for _, k := range t.hostKeys {
if algs.hostKey == k.PublicKey().Type() {
kt := k.PublicKey().Type()
if kt == algs.hostKey {
hostKey = k
} else if signer, ok := k.(AlgorithmSigner); ok {
// Some signature algorithms don't show up as key types
// so we have to manually check for a compatible host key.
switch kt {
case KeyAlgoRSA:
if algs.hostKey == SigAlgoRSASHA2256 || algs.hostKey == SigAlgoRSASHA2512 {
hostKey = &rsaSigner{signer, algs.hostKey}
}
case CertAlgoRSAv01:
if algs.hostKey == CertSigAlgoRSASHA2256v01 || algs.hostKey == CertSigAlgoRSASHA2512v01 {
hostKey = &rsaSigner{signer, certToPrivAlgo(algs.hostKey)}
}
}
}
}
@ -634,7 +655,7 @@ func (t *handshakeTransport) client(kex kexAlgorithm, algs *algorithms, magics *
return nil, err
}
if err := verifyHostKeySignature(hostKey, result); err != nil {
if err := verifyHostKeySignature(hostKey, algs.hostKey, result); err != nil {
return nil, err
}

View File

@ -939,6 +939,15 @@ func newDSAPrivateKey(key *dsa.PrivateKey) (Signer, error) {
return &dsaPrivateKey{key}, nil
}
type rsaSigner struct {
AlgorithmSigner
defaultAlgorithm string
}
func (s *rsaSigner) Sign(rand io.Reader, data []byte) (*Signature, error) {
return s.AlgorithmSigner.SignWithAlgorithm(rand, data, s.defaultAlgorithm)
}
type wrappedSigner struct {
signer crypto.Signer
pubKey PublicKey

View File

@ -284,7 +284,7 @@ func (s *connection) serverHandshake(config *ServerConfig) (*Permissions, error)
func isAcceptableAlgo(algo string) bool {
switch algo {
case KeyAlgoRSA, KeyAlgoDSA, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, KeyAlgoSKECDSA256, KeyAlgoED25519, KeyAlgoSKED25519,
case SigAlgoRSA, SigAlgoRSASHA2256, SigAlgoRSASHA2512, KeyAlgoDSA, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, KeyAlgoSKECDSA256, KeyAlgoED25519, KeyAlgoSKED25519,
CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoSKECDSA256v01, CertAlgoED25519v01, CertAlgoSKED25519v01:
return true
}

View File

@ -1213,6 +1213,9 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) {
return err
}
cc.addStreamLocked(cs) // assigns stream ID
if isConnectionCloseRequest(req) {
cc.doNotReuse = true
}
cc.mu.Unlock()
// TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
@ -2313,7 +2316,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra
cs.bytesRemain = res.ContentLength
res.Body = transportResponseBody{cs}
if cs.requestedGzip && res.Header.Get("Content-Encoding") == "gzip" {
if cs.requestedGzip && asciiEqualFold(res.Header.Get("Content-Encoding"), "gzip") {
res.Header.Del("Content-Encoding")
res.Header.Del("Content-Length")
res.ContentLength = -1
@ -2452,7 +2455,10 @@ func (b transportResponseBody) Close() error {
select {
case <-cs.donec:
case <-cs.ctx.Done():
return cs.ctx.Err()
// See golang/go#49366: The net/http package can cancel the
// request context after the response body is fully read.
// Don't treat this as an error.
return nil
case <-cs.reqCancel:
return errRequestCanceled
}

22
vendor/modules.txt vendored
View File

@ -87,7 +87,7 @@ github.com/containernetworking/cni/pkg/version
# github.com/containernetworking/plugins v1.0.1
## explicit
github.com/containernetworking/plugins/pkg/ns
# github.com/containers/buildah v1.23.1
# github.com/containers/buildah v1.23.1-0.20220112160421-d744ebc4b1d5
## explicit
github.com/containers/buildah
github.com/containers/buildah/bind
@ -96,6 +96,9 @@ github.com/containers/buildah/copier
github.com/containers/buildah/define
github.com/containers/buildah/docker
github.com/containers/buildah/imagebuildah
github.com/containers/buildah/internal
github.com/containers/buildah/internal/parse
github.com/containers/buildah/internal/util
github.com/containers/buildah/pkg/blobcache
github.com/containers/buildah/pkg/chrootuser
github.com/containers/buildah/pkg/cli
@ -125,7 +128,6 @@ github.com/containers/common/pkg/cgroupv2
github.com/containers/common/pkg/chown
github.com/containers/common/pkg/completion
github.com/containers/common/pkg/config
github.com/containers/common/pkg/defaultnet
github.com/containers/common/pkg/download
github.com/containers/common/pkg/filters
github.com/containers/common/pkg/flag
@ -150,7 +152,7 @@ github.com/containers/common/version
# github.com/containers/conmon v2.0.20+incompatible
## explicit
github.com/containers/conmon/runner/config
# github.com/containers/image/v5 v5.17.1-0.20220106205022-73f80d60f0e1
# github.com/containers/image/v5 v5.18.0
## explicit
github.com/containers/image/v5/copy
github.com/containers/image/v5/directory
@ -273,7 +275,7 @@ github.com/containers/storage/pkg/tarlog
github.com/containers/storage/pkg/truncindex
github.com/containers/storage/pkg/unshare
github.com/containers/storage/types
# github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e
# github.com/coreos/go-systemd v0.0.0-20190620071333-e64a0ec8b42a
github.com/coreos/go-systemd/activation
# github.com/coreos/go-systemd/v22 v22.3.2
## explicit
@ -377,7 +379,7 @@ github.com/dtylman/scp
# github.com/fsnotify/fsnotify v1.5.1
## explicit
github.com/fsnotify/fsnotify
# github.com/fsouza/go-dockerclient v1.7.4
# github.com/fsouza/go-dockerclient v1.7.7
github.com/fsouza/go-dockerclient
# github.com/ghodss/yaml v1.0.0
## explicit
@ -553,7 +555,7 @@ github.com/onsi/gomega/types
# github.com/opencontainers/go-digest v1.0.0
## explicit
github.com/opencontainers/go-digest
# github.com/opencontainers/image-spec v1.0.3-0.20211202193544-a5463b7f9c84
# github.com/opencontainers/image-spec v1.0.3-0.20211215212317-ea0209f50ae1
## explicit
github.com/opencontainers/image-spec/specs-go
github.com/opencontainers/image-spec/specs-go/v1
@ -703,7 +705,7 @@ go.opencensus.io/internal
go.opencensus.io/trace
go.opencensus.io/trace/internal
go.opencensus.io/trace/tracestate
# golang.org/x/crypto v0.0.0-20210817164053-32db794688a5
# golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3
## explicit
golang.org/x/crypto/blowfish
golang.org/x/crypto/cast5
@ -712,6 +714,7 @@ golang.org/x/crypto/curve25519
golang.org/x/crypto/curve25519/internal/field
golang.org/x/crypto/ed25519
golang.org/x/crypto/ed25519/internal/edwards25519
golang.org/x/crypto/internal/poly1305
golang.org/x/crypto/internal/subtle
golang.org/x/crypto/openpgp
golang.org/x/crypto/openpgp/armor
@ -720,13 +723,12 @@ golang.org/x/crypto/openpgp/errors
golang.org/x/crypto/openpgp/packet
golang.org/x/crypto/openpgp/s2k
golang.org/x/crypto/pbkdf2
golang.org/x/crypto/poly1305
golang.org/x/crypto/ssh
golang.org/x/crypto/ssh/agent
golang.org/x/crypto/ssh/internal/bcrypt_pbkdf
golang.org/x/crypto/ssh/knownhosts
golang.org/x/crypto/ssh/terminal
# golang.org/x/net v0.0.0-20211105192438-b53810dc28af
# golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2
golang.org/x/net/context
golang.org/x/net/html
golang.org/x/net/html/atom
@ -776,7 +778,7 @@ golang.org/x/text/secure/bidirule
golang.org/x/text/transform
golang.org/x/text/unicode/bidi
golang.org/x/text/unicode/norm
# golang.org/x/tools v0.1.5
# golang.org/x/tools v0.1.7
golang.org/x/tools/go/ast/inspector
# google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa
google.golang.org/genproto/googleapis/rpc/status