mirror of
https://github.com/containers/podman.git
synced 2025-10-20 12:43:58 +08:00
Vendor in containers/buildah 1.16.1
Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
This commit is contained in:
6
vendor/github.com/containers/buildah/.cirrus.yml
generated
vendored
6
vendor/github.com/containers/buildah/.cirrus.yml
generated
vendored
@ -6,7 +6,7 @@ env:
|
||||
#### Global variables used for all tasks
|
||||
####
|
||||
# Name of the ultimate destination branch for this CI run, PR or post-merge.
|
||||
DEST_BRANCH: "master"
|
||||
DEST_BRANCH: "release-1.16"
|
||||
GOPATH: "/var/tmp/go"
|
||||
GOSRC: "${GOPATH}/src/github.com/containers/buildah"
|
||||
# Overrides default location (/tmp/cirrus) for repo clone
|
||||
@ -295,11 +295,11 @@ gce_instance:
|
||||
|
||||
build_script: |
|
||||
set -ex
|
||||
mkdir -p /nix
|
||||
mkdir -p .cache
|
||||
mount --bind .cache /nix
|
||||
mv .cache /nix
|
||||
if [[ -z $(ls -A /nix) ]]; then podman run --rm --privileged -ti -v /:/mnt nixos/nix cp -rfT /nix /mnt/nix; fi
|
||||
podman run --rm --privileged -ti -v /nix:/nix -v ${PWD}:${PWD} -w ${PWD} nixos/nix nix --print-build-logs --option cores 8 --option max-jobs 8 build --file nix/
|
||||
mv /nix .cache
|
||||
chown -Rf $(whoami) .cache
|
||||
|
||||
binaries_artifacts:
|
||||
|
112
vendor/github.com/containers/buildah/CHANGELOG.md
generated
vendored
112
vendor/github.com/containers/buildah/CHANGELOG.md
generated
vendored
@ -2,6 +2,118 @@
|
||||
|
||||
# Changelog
|
||||
|
||||
## v1.16.1 (2020-09-10)
|
||||
copier.Get(): hard link targets shouldn't be relative paths
|
||||
|
||||
## v1.16.0 (2020-09-03)
|
||||
fix build on 32bit arches
|
||||
containerImageRef.NewImageSource(): don't always force timestamps
|
||||
Add fuse module warning to image readme
|
||||
Heed our retry delay option values when retrying commit/pull/push
|
||||
Switch to containers/common for seccomp
|
||||
Use --timestamp rather then --omit-timestamp
|
||||
docs: remove outdated notice
|
||||
docs: remove outdated notice
|
||||
build-using-dockerfile: add a hidden --log-rusage flag
|
||||
build(deps): bump github.com/containers/image/v5 from 5.5.1 to 5.5.2
|
||||
Discard ReportWriter if user sets options.Quiet
|
||||
build(deps): bump github.com/containers/common from 0.19.0 to 0.20.3
|
||||
Fix ownership of content copied using COPY --from
|
||||
newTarDigester: zero out timestamps in tar headers
|
||||
Update nix pin with `make nixpkgs`
|
||||
bud.bats: correct .dockerignore integration tests
|
||||
Use pipes for copying
|
||||
run: include stdout in error message
|
||||
run: use the correct error for errors.Wrapf
|
||||
copier: un-export internal types
|
||||
copier: add Mkdir()
|
||||
in_podman: don't get tripped up by $CIRRUS_CHANGE_TITLE
|
||||
docs/buildah-commit.md: tweak some wording, add a --rm example
|
||||
imagebuildah: don’t blank out destination names when COPYing
|
||||
Replace retry functions with common/pkg/retry
|
||||
StageExecutor.historyMatches: compare timestamps using .Equal
|
||||
Update vendor of containers/common
|
||||
Fix errors found in coverity scan
|
||||
Change namespace handling flags to better match podman commands
|
||||
conformance testing: ignore buildah.BuilderIdentityAnnotation labels
|
||||
Vendor in containers/storage v1.23.0
|
||||
Add buildah.IsContainer interface
|
||||
Avoid feeding run_buildah to pipe
|
||||
fix(buildahimage): add xz dependency in buildah image
|
||||
Bump github.com/containers/common from 0.15.2 to 0.18.0
|
||||
Howto for rootless image building from OpenShift
|
||||
Add --omit-timestamp flag to buildah bud
|
||||
Update nix pin with `make nixpkgs`
|
||||
Shutdown storage on failures
|
||||
Handle COPY --from when an argument is used
|
||||
Bump github.com/seccomp/containers-golang from 0.5.0 to 0.6.0
|
||||
Cirrus: Use newly built VM images
|
||||
Bump github.com/opencontainers/runc from 1.0.0-rc91 to 1.0.0-rc92
|
||||
Enhance the .dockerignore man pages
|
||||
conformance: add a test for COPY from subdirectory
|
||||
fix bug manifest inspct
|
||||
Add documentation for .dockerignore
|
||||
Add BuilderIdentityAnnotation to identify buildah version
|
||||
DOC: Add quay.io/containers/buildah image to README.md
|
||||
Update buildahimages readme
|
||||
fix spelling mistake in "info" command result display
|
||||
Don't bind /etc/host and /etc/resolv.conf if network is not present
|
||||
blobcache: avoid an unnecessary NewImage()
|
||||
Build static binary with `buildGoModule`
|
||||
copier: split StripSetidBits into StripSetuidBit/StripSetgidBit/StripStickyBit
|
||||
tarFilterer: handle multiple archives
|
||||
Fix a race we hit during conformance tests
|
||||
Rework conformance testing
|
||||
Update 02-registries-repositories.md
|
||||
test-unit: invoke cmd/buildah tests with --flags
|
||||
parse: fix a type mismatch in a test
|
||||
Fix compilation of tests/testreport/testreport
|
||||
build.sh: log the version of Go that we're using
|
||||
test-unit: increase the test timeout to 40/45 minutes
|
||||
Add the "copier" package
|
||||
Fix & add notes regarding problematic language in codebase
|
||||
Add dependency on github.com/stretchr/testify/require
|
||||
CompositeDigester: add the ability to filter tar streams
|
||||
BATS tests: make more robust
|
||||
vendor golang.org/x/text@v0.3.3
|
||||
Switch golang 1.12 to golang 1.13
|
||||
imagebuildah: wait for stages that might not have even started yet
|
||||
chroot, run: not fail on bind mounts from /sys
|
||||
chroot: do not use setgroups if it is blocked
|
||||
Set engine env from containers.conf
|
||||
imagebuildah: return the right stage's image as the "final" image
|
||||
Fix a help string
|
||||
Deduplicate environment variables
|
||||
switch containers/libpod to containers/podman
|
||||
Bump github.com/containers/ocicrypt from 1.0.2 to 1.0.3
|
||||
Bump github.com/opencontainers/selinux from 1.5.2 to 1.6.0
|
||||
Mask out /sys/dev to prevent information leak
|
||||
linux: skip errors from the runtime kill
|
||||
Mask over the /sys/fs/selinux in mask branch
|
||||
Add VFS additional image store to container
|
||||
tests: add auth tests
|
||||
Allow "readonly" as alias to "ro" in mount options
|
||||
Ignore OS X specific consistency mount option
|
||||
Bump github.com/onsi/ginkgo from 1.13.0 to 1.14.0
|
||||
Bump github.com/containers/common from 0.14.0 to 0.15.2
|
||||
Rootless Buildah should default to IsolationOCIRootless
|
||||
imagebuildah: fix inheriting multi-stage builds
|
||||
Make imagebuildah.BuildOptions.Architecture/OS optional
|
||||
Make imagebuildah.BuildOptions.Jobs optional
|
||||
Resolve a possible race in imagebuildah.Executor.startStage()
|
||||
Switch scripts to use containers.conf
|
||||
Bump openshift/imagebuilder to v1.1.6
|
||||
Bump go.etcd.io/bbolt from 1.3.4 to 1.3.5
|
||||
buildah, bud: support --jobs=N for parallel execution
|
||||
executor: refactor build code inside new function
|
||||
Add bud regression tests
|
||||
Cirrus: Fix missing htpasswd in registry img
|
||||
docs: clarify the 'triples' format
|
||||
CHANGELOG.md: Fix markdown formatting
|
||||
Add nix derivation for static builds
|
||||
Bump to v1.16.0-dev
|
||||
version centos7 for compatible
|
||||
|
||||
## v1.15.0 (2020-06-17)
|
||||
Bump github.com/containers/common from 0.12.0 to 0.13.1
|
||||
Bump github.com/containers/storage from 1.20.1 to 1.20.2
|
||||
|
657
vendor/github.com/containers/buildah/add.go
generated
vendored
657
vendor/github.com/containers/buildah/add.go
generated
vendored
@ -1,21 +1,25 @@
|
||||
package buildah
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/containers/buildah/copier"
|
||||
"github.com/containers/buildah/pkg/chrootuser"
|
||||
"github.com/containers/buildah/util"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
@ -25,17 +29,22 @@ import (
|
||||
type AddAndCopyOptions struct {
|
||||
// Chown is a spec for the user who should be given ownership over the
|
||||
// newly-added content, potentially overriding permissions which would
|
||||
// otherwise match those of local files and directories being copied.
|
||||
// otherwise be set to 0:0.
|
||||
Chown string
|
||||
// PreserveOwnership, if Chown is not set, tells us to avoid setting
|
||||
// ownership of copied items to 0:0, instead using whatever ownership
|
||||
// information is already set. Not meaningful for remote sources.
|
||||
PreserveOwnership bool
|
||||
// All of the data being copied will pass through Hasher, if set.
|
||||
// If the sources are URLs or files, their contents will be passed to
|
||||
// Hasher.
|
||||
// If the sources include directory trees, Hasher will be passed
|
||||
// tar-format archives of the directory trees.
|
||||
Hasher io.Writer
|
||||
// Excludes is the contents of the .dockerignore file
|
||||
// Excludes is the contents of the .dockerignore file.
|
||||
Excludes []string
|
||||
// ContextDir is the base directory for Excludes for content being copied
|
||||
// ContextDir is the base directory for content being copied and
|
||||
// Excludes patterns.
|
||||
ContextDir string
|
||||
// ID mapping options to use when contents to be copied are part of
|
||||
// another container, and need ownerships to be mapped from the host to
|
||||
@ -44,74 +53,93 @@ type AddAndCopyOptions struct {
|
||||
// DryRun indicates that the content should be digested, but not actually
|
||||
// copied into the container.
|
||||
DryRun bool
|
||||
// Clear the setuid bit on items being copied. Has no effect on
|
||||
// archives being extracted, where the bit is always preserved.
|
||||
StripSetuidBit bool
|
||||
// Clear the setgid bit on items being copied. Has no effect on
|
||||
// archives being extracted, where the bit is always preserved.
|
||||
StripSetgidBit bool
|
||||
// Clear the sticky bit on items being copied. Has no effect on
|
||||
// archives being extracted, where the bit is always preserved.
|
||||
StripStickyBit bool
|
||||
}
|
||||
|
||||
// addURL copies the contents of the source URL to the destination. This is
|
||||
// its own function so that deferred closes happen after we're done pulling
|
||||
// down each item of potentially many.
|
||||
func (b *Builder) addURL(destination, srcurl string, owner idtools.IDPair, hasher io.Writer, dryRun bool) error {
|
||||
resp, err := http.Get(srcurl)
|
||||
// sourceIsRemote returns true if "source" is a remote location.
|
||||
func sourceIsRemote(source string) bool {
|
||||
return strings.HasPrefix(source, "http://") || strings.HasPrefix(source, "https://")
|
||||
}
|
||||
|
||||
// getURL writes a tar archive containing the named content
|
||||
func getURL(src, mountpoint, renameTarget string, writer io.Writer) error {
|
||||
url, err := url.Parse(src)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error getting %q", srcurl)
|
||||
return errors.Wrapf(err, "error parsing URL %q", url)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
thisHasher := hasher
|
||||
if thisHasher != nil && b.ContentDigester.Hash() != nil {
|
||||
thisHasher = io.MultiWriter(thisHasher, b.ContentDigester.Hash())
|
||||
response, err := http.Get(src)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error parsing URL %q", url)
|
||||
}
|
||||
if thisHasher == nil {
|
||||
thisHasher = b.ContentDigester.Hash()
|
||||
defer response.Body.Close()
|
||||
// Figure out what to name the new content.
|
||||
name := renameTarget
|
||||
if name == "" {
|
||||
name = path.Base(url.Path)
|
||||
}
|
||||
thisWriter := thisHasher
|
||||
|
||||
if !dryRun {
|
||||
logrus.Debugf("saving %q to %q", srcurl, destination)
|
||||
f, err := os.Create(destination)
|
||||
// If there's a date on the content, use it. If not, use the Unix epoch
|
||||
// for compatibility.
|
||||
date := time.Unix(0, 0).UTC()
|
||||
lastModified := response.Header.Get("Last-Modified")
|
||||
if lastModified != "" {
|
||||
d, err := time.Parse(time.RFC1123, lastModified)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error creating %q", destination)
|
||||
return errors.Wrapf(err, "error parsing last-modified time %q", lastModified)
|
||||
}
|
||||
date = d
|
||||
}
|
||||
// Figure out the size of the content.
|
||||
size := response.ContentLength
|
||||
responseBody := response.Body
|
||||
if size < 0 {
|
||||
// Create a temporary file and copy the content to it, so that
|
||||
// we can figure out how much content there is.
|
||||
f, err := ioutil.TempFile(mountpoint, "download")
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error creating temporary file to hold %q", src)
|
||||
}
|
||||
defer os.Remove(f.Name())
|
||||
defer f.Close()
|
||||
if err = f.Chown(owner.UID, owner.GID); err != nil {
|
||||
return errors.Wrapf(err, "error setting owner of %q to %d:%d", destination, owner.UID, owner.GID)
|
||||
size, err = io.Copy(f, response.Body)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error writing %q to temporary file %q", src, f.Name())
|
||||
}
|
||||
if last := resp.Header.Get("Last-Modified"); last != "" {
|
||||
if mtime, err2 := time.Parse(time.RFC1123, last); err2 != nil {
|
||||
logrus.Debugf("error parsing Last-Modified time %q: %v", last, err2)
|
||||
} else {
|
||||
defer func() {
|
||||
if err3 := os.Chtimes(destination, time.Now(), mtime); err3 != nil {
|
||||
logrus.Debugf("error setting mtime on %q to Last-Modified time %q: %v", destination, last, err3)
|
||||
}
|
||||
}()
|
||||
}
|
||||
_, err = f.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error setting up to read %q from temporary file %q", src, f.Name())
|
||||
}
|
||||
defer func() {
|
||||
if err2 := f.Chmod(0600); err2 != nil {
|
||||
logrus.Debugf("error setting permissions on %q: %v", destination, err2)
|
||||
}
|
||||
}()
|
||||
thisWriter = io.MultiWriter(f, thisWriter)
|
||||
responseBody = f
|
||||
}
|
||||
|
||||
n, err := io.Copy(thisWriter, resp.Body)
|
||||
// Write the output archive. Set permissions for compatibility.
|
||||
tw := tar.NewWriter(writer)
|
||||
defer tw.Close()
|
||||
hdr := tar.Header{
|
||||
Typeflag: tar.TypeReg,
|
||||
Name: name,
|
||||
Size: size,
|
||||
Mode: 0600,
|
||||
ModTime: date,
|
||||
}
|
||||
err = tw.WriteHeader(&hdr)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading contents for %q from %q", destination, srcurl)
|
||||
return errors.Wrapf(err, "error writing header")
|
||||
}
|
||||
if resp.ContentLength >= 0 && n != resp.ContentLength {
|
||||
return errors.Errorf("error reading contents for %q from %q: wrong length (%d != %d)", destination, srcurl, n, resp.ContentLength)
|
||||
}
|
||||
return nil
|
||||
_, err = io.Copy(tw, responseBody)
|
||||
return errors.Wrapf(err, "error writing content from %q to tar stream", src)
|
||||
}
|
||||
|
||||
// Add copies the contents of the specified sources into the container's root
|
||||
// filesystem, optionally extracting contents of local files that look like
|
||||
// non-empty archives.
|
||||
func (b *Builder) Add(destination string, extract bool, options AddAndCopyOptions, source ...string) error {
|
||||
excludes, err := dockerIgnoreMatcher(options.Excludes, options.ContextDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
func (b *Builder) Add(destination string, extract bool, options AddAndCopyOptions, sources ...string) error {
|
||||
mountPoint, err := b.Mount(b.MountLabel)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -121,65 +149,311 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
|
||||
logrus.Errorf("error unmounting container: %v", err2)
|
||||
}
|
||||
}()
|
||||
|
||||
contextDir := options.ContextDir
|
||||
if contextDir == "" {
|
||||
contextDir = string(os.PathSeparator)
|
||||
}
|
||||
|
||||
// Figure out what sorts of sources we have.
|
||||
var localSources, remoteSources []string
|
||||
for _, src := range sources {
|
||||
if sourceIsRemote(src) {
|
||||
remoteSources = append(remoteSources, src)
|
||||
continue
|
||||
}
|
||||
localSources = append(localSources, src)
|
||||
}
|
||||
|
||||
// Check how many items our local source specs matched. Each spec
|
||||
// should have matched at least one item, otherwise we consider it an
|
||||
// error.
|
||||
var localSourceStats []*copier.StatsForGlob
|
||||
if len(localSources) > 0 {
|
||||
statOptions := copier.StatOptions{
|
||||
CheckForArchives: extract,
|
||||
}
|
||||
localSourceStats, err = copier.Stat(contextDir, contextDir, statOptions, localSources)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error checking on sources %v under %q", localSources, contextDir)
|
||||
}
|
||||
}
|
||||
numLocalSourceItems := 0
|
||||
for _, localSourceStat := range localSourceStats {
|
||||
if localSourceStat.Error != "" {
|
||||
errorText := localSourceStat.Error
|
||||
rel, err := filepath.Rel(contextDir, localSourceStat.Glob)
|
||||
if err != nil {
|
||||
errorText = fmt.Sprintf("%v; %s", err, errorText)
|
||||
}
|
||||
if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
|
||||
errorText = fmt.Sprintf("possible escaping context directory error: %s", errorText)
|
||||
}
|
||||
return errors.Errorf("error checking on source %v under %q: %v", localSourceStat.Glob, contextDir, errorText)
|
||||
}
|
||||
if len(localSourceStat.Globbed) == 0 {
|
||||
return errors.Wrapf(syscall.ENOENT, "error checking on source %v under %q: no glob matches", localSourceStat.Glob, contextDir)
|
||||
}
|
||||
numLocalSourceItems += len(localSourceStat.Globbed)
|
||||
}
|
||||
if numLocalSourceItems+len(remoteSources) == 0 {
|
||||
return errors.Wrapf(syscall.ENOENT, "no sources %v found", sources)
|
||||
}
|
||||
|
||||
// Find out which user (and group) the destination should belong to.
|
||||
user, _, err := b.user(mountPoint, options.Chown)
|
||||
if err != nil {
|
||||
return err
|
||||
var chownDirs, chownFiles *idtools.IDPair
|
||||
var chmodDirs, chmodFiles *os.FileMode
|
||||
var user specs.User
|
||||
if options.Chown != "" {
|
||||
user, _, err = b.user(mountPoint, options.Chown)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error looking up UID/GID for %q", options.Chown)
|
||||
}
|
||||
}
|
||||
containerOwner := idtools.IDPair{UID: int(user.UID), GID: int(user.GID)}
|
||||
hostUID, hostGID, err := util.GetHostIDs(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap, user.UID, user.GID)
|
||||
if err != nil {
|
||||
return err
|
||||
chownDirs = &idtools.IDPair{UID: int(user.UID), GID: int(user.GID)}
|
||||
chownFiles = &idtools.IDPair{UID: int(user.UID), GID: int(user.GID)}
|
||||
if options.Chown == "" && options.PreserveOwnership {
|
||||
chownDirs = nil
|
||||
chownFiles = nil
|
||||
}
|
||||
hostOwner := idtools.IDPair{UID: int(hostUID), GID: int(hostGID)}
|
||||
dest := mountPoint
|
||||
if !options.DryRun {
|
||||
// Resolve the destination if it was specified as a relative path.
|
||||
if destination != "" && filepath.IsAbs(destination) {
|
||||
dir := filepath.Dir(destination)
|
||||
if dir != "." && dir != "/" {
|
||||
if err = idtools.MkdirAllAndChownNew(filepath.Join(dest, dir), 0755, hostOwner); err != nil {
|
||||
return errors.Wrapf(err, "error creating directory %q", filepath.Join(dest, dir))
|
||||
|
||||
// If we have a single source archive to extract, or more than one
|
||||
// source item, or the destination has a path separator at the end of
|
||||
// it, and it's not a remote URL, the destination needs to be a
|
||||
// directory.
|
||||
if destination == "" || !filepath.IsAbs(destination) {
|
||||
tmpDestination := filepath.Join(string(os.PathSeparator)+b.WorkDir(), destination)
|
||||
if destination == "" || strings.HasSuffix(destination, string(os.PathSeparator)) {
|
||||
destination = tmpDestination + string(os.PathSeparator)
|
||||
} else {
|
||||
destination = tmpDestination
|
||||
}
|
||||
}
|
||||
destMustBeDirectory := (len(sources) > 1) || strings.HasSuffix(destination, string(os.PathSeparator))
|
||||
destCanBeFile := false
|
||||
if len(sources) == 1 {
|
||||
if len(remoteSources) == 1 {
|
||||
destCanBeFile = sourceIsRemote(sources[0])
|
||||
}
|
||||
if len(localSources) == 1 {
|
||||
item := localSourceStats[0].Results[localSourceStats[0].Globbed[0]]
|
||||
if item.IsDir || (item.IsArchive && extract) {
|
||||
destMustBeDirectory = true
|
||||
}
|
||||
if item.IsRegular {
|
||||
destCanBeFile = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We care if the destination either doesn't exist, or exists and is a
|
||||
// file. If the source can be a single file, for those cases we treat
|
||||
// the destination as a file rather than as a directory tree.
|
||||
renameTarget := ""
|
||||
extractDirectory := filepath.Join(mountPoint, destination)
|
||||
statOptions := copier.StatOptions{
|
||||
CheckForArchives: extract,
|
||||
}
|
||||
destStats, err := copier.Stat(mountPoint, filepath.Join(mountPoint, b.WorkDir()), statOptions, []string{extractDirectory})
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error checking on destination %v", extractDirectory)
|
||||
}
|
||||
if (len(destStats) == 0 || len(destStats[0].Globbed) == 0) && !destMustBeDirectory && destCanBeFile {
|
||||
// destination doesn't exist - extract to parent and rename the incoming file to the destination's name
|
||||
renameTarget = filepath.Base(extractDirectory)
|
||||
extractDirectory = filepath.Dir(extractDirectory)
|
||||
}
|
||||
if len(destStats) == 1 && len(destStats[0].Globbed) == 1 && destStats[0].Results[destStats[0].Globbed[0]].IsRegular {
|
||||
if destMustBeDirectory {
|
||||
return errors.Errorf("destination %v already exists but is not a directory", destination)
|
||||
}
|
||||
// destination exists - it's a file, we need to extract to parent and rename the incoming file to the destination's name
|
||||
renameTarget = filepath.Base(extractDirectory)
|
||||
extractDirectory = filepath.Dir(extractDirectory)
|
||||
}
|
||||
|
||||
pm, err := fileutils.NewPatternMatcher(options.Excludes)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error processing excludes list %v", options.Excludes)
|
||||
}
|
||||
|
||||
// Copy each source in turn.
|
||||
var srcUIDMap, srcGIDMap []idtools.IDMap
|
||||
if options.IDMappingOptions != nil {
|
||||
srcUIDMap, srcGIDMap = convertRuntimeIDMaps(options.IDMappingOptions.UIDMap, options.IDMappingOptions.GIDMap)
|
||||
}
|
||||
destUIDMap, destGIDMap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap)
|
||||
|
||||
for _, src := range sources {
|
||||
var multiErr *multierror.Error
|
||||
var getErr, closeErr, renameErr, putErr error
|
||||
var wg sync.WaitGroup
|
||||
if sourceIsRemote(src) {
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
getErr = getURL(src, mountPoint, renameTarget, pipeWriter)
|
||||
pipeWriter.Close()
|
||||
wg.Done()
|
||||
}()
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
b.ContentDigester.Start("")
|
||||
hashCloser := b.ContentDigester.Hash()
|
||||
hasher := io.Writer(hashCloser)
|
||||
if options.Hasher != nil {
|
||||
hasher = io.MultiWriter(hasher, options.Hasher)
|
||||
}
|
||||
if options.DryRun {
|
||||
_, putErr = io.Copy(hasher, pipeReader)
|
||||
} else {
|
||||
putOptions := copier.PutOptions{
|
||||
UIDMap: destUIDMap,
|
||||
GIDMap: destGIDMap,
|
||||
ChownDirs: chownDirs,
|
||||
ChmodDirs: chmodDirs,
|
||||
ChownFiles: chownFiles,
|
||||
ChmodFiles: chmodFiles,
|
||||
}
|
||||
putErr = copier.Put(mountPoint, extractDirectory, putOptions, io.TeeReader(pipeReader, hasher))
|
||||
}
|
||||
hashCloser.Close()
|
||||
pipeReader.Close()
|
||||
wg.Done()
|
||||
}()
|
||||
wg.Wait()
|
||||
if getErr != nil {
|
||||
getErr = errors.Wrapf(getErr, "error reading %q", src)
|
||||
}
|
||||
if putErr != nil {
|
||||
putErr = errors.Wrapf(putErr, "error storing %q", src)
|
||||
}
|
||||
multiErr = multierror.Append(getErr, putErr)
|
||||
if multiErr != nil && multiErr.ErrorOrNil() != nil {
|
||||
if len(multiErr.Errors) > 1 {
|
||||
return multiErr.ErrorOrNil()
|
||||
}
|
||||
return multiErr.Errors[0]
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Dig out the result of running glob+stat on this source spec.
|
||||
var localSourceStat *copier.StatsForGlob
|
||||
for _, st := range localSourceStats {
|
||||
if st.Glob == src {
|
||||
localSourceStat = st
|
||||
break
|
||||
}
|
||||
}
|
||||
if localSourceStat == nil {
|
||||
return errors.Errorf("internal error: should have statted %s, but we didn't?", src)
|
||||
}
|
||||
|
||||
// Iterate through every item that matched the glob.
|
||||
itemsCopied := 0
|
||||
for _, glob := range localSourceStat.Globbed {
|
||||
rel, err := filepath.Rel(contextDir, glob)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error computing path of %q", glob)
|
||||
}
|
||||
if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
|
||||
return errors.Errorf("possible escaping context directory error: %q is outside of %q", glob, contextDir)
|
||||
}
|
||||
// Check for dockerignore-style exclusion of this item.
|
||||
if rel != "." {
|
||||
matches, err := pm.Matches(filepath.ToSlash(rel)) // nolint:staticcheck
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error checking if %q(%q) is excluded", glob, rel)
|
||||
}
|
||||
if matches {
|
||||
continue
|
||||
}
|
||||
}
|
||||
dest = filepath.Join(dest, destination)
|
||||
} else {
|
||||
if err = idtools.MkdirAllAndChownNew(filepath.Join(dest, b.WorkDir()), 0755, hostOwner); err != nil {
|
||||
return errors.Wrapf(err, "error creating directory %q", filepath.Join(dest, b.WorkDir()))
|
||||
st := localSourceStat.Results[glob]
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
renamedItems := 0
|
||||
writer := io.WriteCloser(pipeWriter)
|
||||
if renameTarget != "" {
|
||||
writer = newTarFilterer(writer, func(hdr *tar.Header) (bool, bool, io.Reader) {
|
||||
hdr.Name = renameTarget
|
||||
renamedItems++
|
||||
return false, false, nil
|
||||
})
|
||||
}
|
||||
getOptions := copier.GetOptions{
|
||||
UIDMap: srcUIDMap,
|
||||
GIDMap: srcGIDMap,
|
||||
Excludes: options.Excludes,
|
||||
ExpandArchives: extract,
|
||||
StripSetuidBit: options.StripSetuidBit,
|
||||
StripSetgidBit: options.StripSetgidBit,
|
||||
StripStickyBit: options.StripStickyBit,
|
||||
}
|
||||
getErr = copier.Get(contextDir, contextDir, getOptions, []string{glob}, writer)
|
||||
closeErr = writer.Close()
|
||||
if renameTarget != "" && renamedItems > 1 {
|
||||
renameErr = errors.Errorf("internal error: renamed %d items when we expected to only rename 1", renamedItems)
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
if st.IsDir {
|
||||
b.ContentDigester.Start("dir")
|
||||
} else {
|
||||
b.ContentDigester.Start("file")
|
||||
}
|
||||
hashCloser := b.ContentDigester.Hash()
|
||||
hasher := io.Writer(hashCloser)
|
||||
if options.Hasher != nil {
|
||||
hasher = io.MultiWriter(hasher, options.Hasher)
|
||||
}
|
||||
if options.DryRun {
|
||||
_, putErr = io.Copy(hasher, pipeReader)
|
||||
} else {
|
||||
putOptions := copier.PutOptions{
|
||||
UIDMap: destUIDMap,
|
||||
GIDMap: destGIDMap,
|
||||
ChownDirs: chownDirs,
|
||||
ChmodDirs: chmodDirs,
|
||||
ChownFiles: chownFiles,
|
||||
ChmodFiles: chmodFiles,
|
||||
}
|
||||
putErr = copier.Put(mountPoint, extractDirectory, putOptions, io.TeeReader(pipeReader, hasher))
|
||||
}
|
||||
hashCloser.Close()
|
||||
pipeReader.Close()
|
||||
wg.Done()
|
||||
}()
|
||||
wg.Wait()
|
||||
if getErr != nil {
|
||||
getErr = errors.Wrapf(getErr, "error reading %q", src)
|
||||
}
|
||||
dest = filepath.Join(dest, b.WorkDir(), destination)
|
||||
}
|
||||
// If the destination was explicitly marked as a directory by ending it
|
||||
// with a '/', create it so that we can be sure that it's a directory,
|
||||
// and any files we're copying will be placed in the directory.
|
||||
if len(destination) > 0 && destination[len(destination)-1] == os.PathSeparator {
|
||||
if err = idtools.MkdirAllAndChownNew(dest, 0755, hostOwner); err != nil {
|
||||
return errors.Wrapf(err, "error creating directory %q", dest)
|
||||
if closeErr != nil {
|
||||
closeErr = errors.Wrapf(closeErr, "error closing %q", src)
|
||||
}
|
||||
if renameErr != nil {
|
||||
renameErr = errors.Wrapf(renameErr, "error renaming %q", src)
|
||||
}
|
||||
if putErr != nil {
|
||||
putErr = errors.Wrapf(putErr, "error storing %q", src)
|
||||
}
|
||||
multiErr = multierror.Append(getErr, closeErr, renameErr, putErr)
|
||||
if multiErr != nil && multiErr.ErrorOrNil() != nil {
|
||||
if len(multiErr.Errors) > 1 {
|
||||
return multiErr.ErrorOrNil()
|
||||
}
|
||||
return multiErr.Errors[0]
|
||||
}
|
||||
itemsCopied++
|
||||
}
|
||||
// Make sure the destination's parent directory is usable.
|
||||
if destpfi, err2 := os.Stat(filepath.Dir(dest)); err2 == nil && !destpfi.IsDir() {
|
||||
return errors.Errorf("%q already exists, but is not a subdirectory)", filepath.Dir(dest))
|
||||
if itemsCopied == 0 {
|
||||
return errors.Wrapf(syscall.ENOENT, "no items matching glob %q copied (%d filtered)", localSourceStat.Glob, len(localSourceStat.Globbed))
|
||||
}
|
||||
}
|
||||
// Now look at the destination itself.
|
||||
destfi, err := os.Stat(dest)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return errors.Wrapf(err, "couldn't determine what %q is", dest)
|
||||
}
|
||||
destfi = nil
|
||||
}
|
||||
if len(source) > 1 && (destfi == nil || !destfi.IsDir()) {
|
||||
return errors.Errorf("destination %q is not a directory", dest)
|
||||
}
|
||||
copyFileWithTar := b.copyFileWithTar(options.IDMappingOptions, &containerOwner, options.Hasher, options.DryRun)
|
||||
copyWithTar := b.copyWithTar(options.IDMappingOptions, &containerOwner, options.Hasher, options.DryRun)
|
||||
untarPath := b.untarPath(nil, options.Hasher, options.DryRun)
|
||||
err = b.addHelper(excludes, extract, dest, destfi, hostOwner, options, copyFileWithTar, copyWithTar, untarPath, source...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -208,180 +482,3 @@ func (b *Builder) user(mountPoint string, userspec string) (specs.User, string,
|
||||
}
|
||||
return u, homeDir, err
|
||||
}
|
||||
|
||||
// dockerIgnoreMatcher returns a matcher based on the contents of the .dockerignore file under contextDir
|
||||
func dockerIgnoreMatcher(lines []string, contextDir string) (*fileutils.PatternMatcher, error) {
|
||||
// if there's no context dir, there's no .dockerignore file to consult
|
||||
if contextDir == "" {
|
||||
return nil, nil
|
||||
}
|
||||
// If there's no .dockerignore file, then we don't have to add a
|
||||
// pattern to tell copy logic to ignore it later.
|
||||
var patterns []string
|
||||
if _, err := os.Stat(filepath.Join(contextDir, ".dockerignore")); err == nil || !os.IsNotExist(err) {
|
||||
patterns = []string{".dockerignore"}
|
||||
}
|
||||
for _, ignoreSpec := range lines {
|
||||
ignoreSpec = strings.TrimSpace(ignoreSpec)
|
||||
// ignore comments passed back from .dockerignore
|
||||
if ignoreSpec == "" || ignoreSpec[0] == '#' {
|
||||
continue
|
||||
}
|
||||
// if the spec starts with '!' it means the pattern
|
||||
// should be included. make a note so that we can move
|
||||
// it to the front of the updated pattern, and insert
|
||||
// the context dir's path in between
|
||||
includeFlag := ""
|
||||
if strings.HasPrefix(ignoreSpec, "!") {
|
||||
includeFlag = "!"
|
||||
ignoreSpec = ignoreSpec[1:]
|
||||
}
|
||||
if ignoreSpec == "" {
|
||||
continue
|
||||
}
|
||||
patterns = append(patterns, includeFlag+filepath.Join(contextDir, ignoreSpec))
|
||||
}
|
||||
// if there are no patterns, save time by not constructing the object
|
||||
if len(patterns) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
// return a matcher object
|
||||
matcher, err := fileutils.NewPatternMatcher(patterns)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error creating file matcher using patterns %v", patterns)
|
||||
}
|
||||
return matcher, nil
|
||||
}
|
||||
|
||||
func (b *Builder) addHelper(excludes *fileutils.PatternMatcher, extract bool, dest string, destfi os.FileInfo, hostOwner idtools.IDPair, options AddAndCopyOptions, copyFileWithTar, copyWithTar, untarPath func(src, dest string) error, source ...string) error {
|
||||
for n, src := range source {
|
||||
if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") {
|
||||
b.ContentDigester.Start("")
|
||||
// We assume that source is a file, and we're copying
|
||||
// it to the destination. If the destination is
|
||||
// already a directory, create a file inside of it.
|
||||
// Otherwise, the destination is the file to which
|
||||
// we'll save the contents.
|
||||
url, err := url.Parse(src)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error parsing URL %q", src)
|
||||
}
|
||||
d := dest
|
||||
if destfi != nil && destfi.IsDir() {
|
||||
d = filepath.Join(dest, path.Base(url.Path))
|
||||
}
|
||||
if err = b.addURL(d, src, hostOwner, options.Hasher, options.DryRun); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
glob, err := filepath.Glob(src)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "invalid glob %q", src)
|
||||
}
|
||||
if len(glob) == 0 {
|
||||
return errors.Wrapf(syscall.ENOENT, "no files found matching %q", src)
|
||||
}
|
||||
|
||||
for _, gsrc := range glob {
|
||||
esrc, err := filepath.EvalSymlinks(gsrc)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error evaluating symlinks %q", gsrc)
|
||||
}
|
||||
srcfi, err := os.Stat(esrc)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading %q", esrc)
|
||||
}
|
||||
if srcfi.IsDir() {
|
||||
b.ContentDigester.Start("dir")
|
||||
// The source is a directory, so copy the contents of
|
||||
// the source directory into the target directory. Try
|
||||
// to create it first, so that if there's a problem,
|
||||
// we'll discover why that won't work.
|
||||
if !options.DryRun {
|
||||
if err = idtools.MkdirAllAndChownNew(dest, 0755, hostOwner); err != nil {
|
||||
return errors.Wrapf(err, "error creating directory %q", dest)
|
||||
}
|
||||
}
|
||||
logrus.Debugf("copying[%d] %q to %q", n, esrc+string(os.PathSeparator)+"*", dest+string(os.PathSeparator)+"*")
|
||||
|
||||
// Copy the whole directory because we do not exclude anything
|
||||
if excludes == nil {
|
||||
if err = copyWithTar(esrc, dest); err != nil {
|
||||
return errors.Wrapf(err, "error copying %q to %q", esrc, dest)
|
||||
}
|
||||
continue
|
||||
}
|
||||
err := filepath.Walk(esrc, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
res, err := excludes.MatchesResult(path)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error checking if %s is an excluded path", path)
|
||||
}
|
||||
// The latest match result has the highest priority,
|
||||
// which means that we only skip the filepath if
|
||||
// the last result matched.
|
||||
if res.IsMatched() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// combine the source's basename with the dest directory
|
||||
fpath, err := filepath.Rel(esrc, path)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error converting %s to a path relative to %s", path, esrc)
|
||||
}
|
||||
if err = copyFileWithTar(path, filepath.Join(dest, fpath)); err != nil {
|
||||
return errors.Wrapf(err, "error copying %q to %q", path, dest)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// This source is a file
|
||||
// Check if the path matches the .dockerignore
|
||||
if excludes != nil {
|
||||
res, err := excludes.MatchesResult(esrc)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error checking if %s is an excluded path", esrc)
|
||||
}
|
||||
// Skip the file if the pattern matches
|
||||
if res.IsMatched() {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
b.ContentDigester.Start("file")
|
||||
|
||||
if !extract || !archive.IsArchivePath(esrc) {
|
||||
// This source is a file, and either it's not an
|
||||
// archive, or we don't care whether or not it's an
|
||||
// archive.
|
||||
d := dest
|
||||
if destfi != nil && destfi.IsDir() {
|
||||
d = filepath.Join(dest, filepath.Base(gsrc))
|
||||
}
|
||||
// Copy the file, preserving attributes.
|
||||
logrus.Debugf("copying[%d] %q to %q", n, esrc, d)
|
||||
if err = copyFileWithTar(esrc, d); err != nil {
|
||||
return errors.Wrapf(err, "error copying %q to %q", esrc, d)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// We're extracting an archive into the destination directory.
|
||||
logrus.Debugf("extracting contents[%d] of %q into %q", n, esrc, dest)
|
||||
if err = untarPath(esrc, dest); err != nil {
|
||||
return errors.Wrapf(err, "error extracting %q into %q", esrc, dest)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
2
vendor/github.com/containers/buildah/buildah.go
generated
vendored
2
vendor/github.com/containers/buildah/buildah.go
generated
vendored
@ -28,7 +28,7 @@ const (
|
||||
Package = "buildah"
|
||||
// Version for the Package. Bump version in contrib/rpm/buildah.spec
|
||||
// too.
|
||||
Version = "1.16.0-dev"
|
||||
Version = "1.16.1"
|
||||
// The value we use to identify what type of information, currently a
|
||||
// serialized Builder structure, we are using as per-container state.
|
||||
// This should only be changed when we make incompatible changes to
|
||||
|
113
vendor/github.com/containers/buildah/changelog.txt
generated
vendored
113
vendor/github.com/containers/buildah/changelog.txt
generated
vendored
@ -1,3 +1,116 @@
|
||||
|
||||
- Changelog for v1.16.1 (2020-09-10)
|
||||
* copier.Get(): hard link targets shouldn't be relative paths
|
||||
|
||||
- Changelog for v1.16.0 (2020-09-03)
|
||||
* fix build on 32bit arches
|
||||
* containerImageRef.NewImageSource(): don't always force timestamps
|
||||
* Add fuse module warning to image readme
|
||||
* Heed our retry delay option values when retrying commit/pull/push
|
||||
* Switch to containers/common for seccomp
|
||||
* Use --timestamp rather then --omit-timestamp
|
||||
* docs: remove outdated notice
|
||||
* docs: remove outdated notice
|
||||
* build-using-dockerfile: add a hidden --log-rusage flag
|
||||
* build(deps): bump github.com/containers/image/v5 from 5.5.1 to 5.5.2
|
||||
* Discard ReportWriter if user sets options.Quiet
|
||||
* build(deps): bump github.com/containers/common from 0.19.0 to 0.20.3
|
||||
* Fix ownership of content copied using COPY --from
|
||||
* newTarDigester: zero out timestamps in tar headers
|
||||
* Update nix pin with `make nixpkgs`
|
||||
* bud.bats: correct .dockerignore integration tests
|
||||
* Use pipes for copying
|
||||
* run: include stdout in error message
|
||||
* run: use the correct error for errors.Wrapf
|
||||
* copier: un-export internal types
|
||||
* copier: add Mkdir()
|
||||
* in_podman: don't get tripped up by $CIRRUS_CHANGE_TITLE
|
||||
* docs/buildah-commit.md: tweak some wording, add a --rm example
|
||||
* imagebuildah: don’t blank out destination names when COPYing
|
||||
* Replace retry functions with common/pkg/retry
|
||||
* StageExecutor.historyMatches: compare timestamps using .Equal
|
||||
* Update vendor of containers/common
|
||||
* Fix errors found in coverity scan
|
||||
* Change namespace handling flags to better match podman commands
|
||||
* conformance testing: ignore buildah.BuilderIdentityAnnotation labels
|
||||
* Vendor in containers/storage v1.23.0
|
||||
* Add buildah.IsContainer interface
|
||||
* Avoid feeding run_buildah to pipe
|
||||
* fix(buildahimage): add xz dependency in buildah image
|
||||
* Bump github.com/containers/common from 0.15.2 to 0.18.0
|
||||
* Howto for rootless image building from OpenShift
|
||||
* Add --omit-timestamp flag to buildah bud
|
||||
* Update nix pin with `make nixpkgs`
|
||||
* Shutdown storage on failures
|
||||
* Handle COPY --from when an argument is used
|
||||
* Bump github.com/seccomp/containers-golang from 0.5.0 to 0.6.0
|
||||
* Cirrus: Use newly built VM images
|
||||
* Bump github.com/opencontainers/runc from 1.0.0-rc91 to 1.0.0-rc92
|
||||
* Enhance the .dockerignore man pages
|
||||
* conformance: add a test for COPY from subdirectory
|
||||
* fix bug manifest inspct
|
||||
* Add documentation for .dockerignore
|
||||
* Add BuilderIdentityAnnotation to identify buildah version
|
||||
* DOC: Add quay.io/containers/buildah image to README.md
|
||||
* Update buildahimages readme
|
||||
* fix spelling mistake in "info" command result display
|
||||
* Don't bind /etc/host and /etc/resolv.conf if network is not present
|
||||
* blobcache: avoid an unnecessary NewImage()
|
||||
* Build static binary with `buildGoModule`
|
||||
* copier: split StripSetidBits into StripSetuidBit/StripSetgidBit/StripStickyBit
|
||||
* tarFilterer: handle multiple archives
|
||||
* Fix a race we hit during conformance tests
|
||||
* Rework conformance testing
|
||||
* Update 02-registries-repositories.md
|
||||
* test-unit: invoke cmd/buildah tests with --flags
|
||||
* parse: fix a type mismatch in a test
|
||||
* Fix compilation of tests/testreport/testreport
|
||||
* build.sh: log the version of Go that we're using
|
||||
* test-unit: increase the test timeout to 40/45 minutes
|
||||
* Add the "copier" package
|
||||
* Fix & add notes regarding problematic language in codebase
|
||||
* Add dependency on github.com/stretchr/testify/require
|
||||
* CompositeDigester: add the ability to filter tar streams
|
||||
* BATS tests: make more robust
|
||||
* vendor golang.org/x/text@v0.3.3
|
||||
* Switch golang 1.12 to golang 1.13
|
||||
* imagebuildah: wait for stages that might not have even started yet
|
||||
* chroot, run: not fail on bind mounts from /sys
|
||||
* chroot: do not use setgroups if it is blocked
|
||||
* Set engine env from containers.conf
|
||||
* imagebuildah: return the right stage's image as the "final" image
|
||||
* Fix a help string
|
||||
* Deduplicate environment variables
|
||||
* switch containers/libpod to containers/podman
|
||||
* Bump github.com/containers/ocicrypt from 1.0.2 to 1.0.3
|
||||
* Bump github.com/opencontainers/selinux from 1.5.2 to 1.6.0
|
||||
* Mask out /sys/dev to prevent information leak
|
||||
* linux: skip errors from the runtime kill
|
||||
* Mask over the /sys/fs/selinux in mask branch
|
||||
* Add VFS additional image store to container
|
||||
* tests: add auth tests
|
||||
* Allow "readonly" as alias to "ro" in mount options
|
||||
* Ignore OS X specific consistency mount option
|
||||
* Bump github.com/onsi/ginkgo from 1.13.0 to 1.14.0
|
||||
* Bump github.com/containers/common from 0.14.0 to 0.15.2
|
||||
* Rootless Buildah should default to IsolationOCIRootless
|
||||
* imagebuildah: fix inheriting multi-stage builds
|
||||
* Make imagebuildah.BuildOptions.Architecture/OS optional
|
||||
* Make imagebuildah.BuildOptions.Jobs optional
|
||||
* Resolve a possible race in imagebuildah.Executor.startStage()
|
||||
* Switch scripts to use containers.conf
|
||||
* Bump openshift/imagebuilder to v1.1.6
|
||||
* Bump go.etcd.io/bbolt from 1.3.4 to 1.3.5
|
||||
* buildah, bud: support --jobs=N for parallel execution
|
||||
* executor: refactor build code inside new function
|
||||
* Add bud regression tests
|
||||
* Cirrus: Fix missing htpasswd in registry img
|
||||
* docs: clarify the 'triples' format
|
||||
* CHANGELOG.md: Fix markdown formatting
|
||||
* Add nix derivation for static builds
|
||||
* Bump to v1.16.0-dev
|
||||
* add version centos7 for compatible
|
||||
|
||||
- Changelog for v1.15.0 (2020-06-17)
|
||||
* Bump github.com/containers/common from 0.12.0 to 0.13.1
|
||||
* Bump github.com/containers/storage from 1.20.1 to 1.20.2
|
||||
|
12
vendor/github.com/containers/buildah/commit.go
generated
vendored
12
vendor/github.com/containers/buildah/commit.go
generated
vendored
@ -79,6 +79,7 @@ type CommitOptions struct {
|
||||
EmptyLayer bool
|
||||
// OmitTimestamp forces epoch 0 as created timestamp to allow for
|
||||
// deterministic, content-addressable builds.
|
||||
// Deprecated use HistoryTimestamp instead.
|
||||
OmitTimestamp bool
|
||||
// SignBy is the fingerprint of a GPG key to use for signing the image.
|
||||
SignBy string
|
||||
@ -231,6 +232,13 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
|
||||
// want to compute here because we'll have to do it again when
|
||||
// cp.Image() instantiates a source image, and we don't want to do the
|
||||
// work twice.
|
||||
if options.OmitTimestamp {
|
||||
if options.HistoryTimestamp != nil {
|
||||
return imgID, nil, "", errors.Errorf("OmitTimestamp ahd HistoryTimestamp can not be used together")
|
||||
}
|
||||
timestamp := time.Unix(0, 0).UTC()
|
||||
options.HistoryTimestamp = ×tamp
|
||||
}
|
||||
nameToRemove := ""
|
||||
if dest == nil {
|
||||
nameToRemove = stringid.GenerateRandomID() + "-tmp"
|
||||
@ -344,7 +352,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
|
||||
}
|
||||
|
||||
var manifestBytes []byte
|
||||
if manifestBytes, err = retryCopyImage(ctx, policyContext, maybeCachedDest, maybeCachedSrc, dest, "push", getCopyOptions(b.store, options.ReportWriter, nil, systemContext, "", false, options.SignBy, options.OciEncryptLayers, options.OciEncryptConfig, nil), options.MaxRetries, options.RetryDelay); err != nil {
|
||||
if manifestBytes, err = retryCopyImage(ctx, policyContext, maybeCachedDest, maybeCachedSrc, dest, getCopyOptions(b.store, options.ReportWriter, nil, systemContext, "", false, options.SignBy, options.OciEncryptLayers, options.OciEncryptConfig, nil), options.MaxRetries, options.RetryDelay); err != nil {
|
||||
return imgID, nil, "", errors.Wrapf(err, "error copying layers and metadata for container %q", b.ContainerID)
|
||||
}
|
||||
// If we've got more names to attach, and we know how to do that for
|
||||
@ -476,7 +484,7 @@ func Push(ctx context.Context, image string, dest types.ImageReference, options
|
||||
systemContext.DirForceCompress = true
|
||||
}
|
||||
var manifestBytes []byte
|
||||
if manifestBytes, err = retryCopyImage(ctx, policyContext, dest, maybeCachedSrc, dest, "push", getCopyOptions(options.Store, options.ReportWriter, nil, systemContext, options.ManifestType, options.RemoveSignatures, options.SignBy, options.OciEncryptLayers, options.OciEncryptConfig, nil), options.MaxRetries, options.RetryDelay); err != nil {
|
||||
if manifestBytes, err = retryCopyImage(ctx, policyContext, dest, maybeCachedSrc, dest, getCopyOptions(options.Store, options.ReportWriter, nil, systemContext, options.ManifestType, options.RemoveSignatures, options.SignBy, options.OciEncryptLayers, options.OciEncryptConfig, nil), options.MaxRetries, options.RetryDelay); err != nil {
|
||||
return nil, "", errors.Wrapf(err, "error copying layers and metadata from %q to %q", transports.ImageName(maybeCachedSrc), transports.ImageName(dest))
|
||||
}
|
||||
if options.ReportWriter != nil {
|
||||
|
79
vendor/github.com/containers/buildah/common.go
generated
vendored
79
vendor/github.com/containers/buildah/common.go
generated
vendored
@ -3,13 +3,11 @@ package buildah
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/containers/common/pkg/retry"
|
||||
cp "github.com/containers/image/v5/copy"
|
||||
"github.com/containers/image/v5/docker"
|
||||
"github.com/containers/image/v5/signature"
|
||||
@ -17,11 +15,6 @@ import (
|
||||
encconfig "github.com/containers/ocicrypt/config"
|
||||
"github.com/containers/storage"
|
||||
"github.com/containers/storage/pkg/unshare"
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
errcodev2 "github.com/docker/distribution/registry/api/v2"
|
||||
multierror "github.com/hashicorp/go-multierror"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -76,64 +69,22 @@ func getSystemContext(store storage.Store, defaults *types.SystemContext, signat
|
||||
return sc
|
||||
}
|
||||
|
||||
func isRetryable(err error) bool {
|
||||
err = errors.Cause(err)
|
||||
type unwrapper interface {
|
||||
Unwrap() error
|
||||
}
|
||||
if unwrapper, ok := err.(unwrapper); ok {
|
||||
err = unwrapper.Unwrap()
|
||||
return isRetryable(err)
|
||||
}
|
||||
if registryError, ok := err.(errcode.Error); ok {
|
||||
switch registryError.Code {
|
||||
case errcode.ErrorCodeUnauthorized, errcodev2.ErrorCodeNameUnknown, errcodev2.ErrorCodeManifestUnknown:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
if op, ok := err.(*net.OpError); ok {
|
||||
return isRetryable(op.Err)
|
||||
}
|
||||
if url, ok := err.(*url.Error); ok {
|
||||
return isRetryable(url.Err)
|
||||
}
|
||||
if errno, ok := err.(syscall.Errno); ok {
|
||||
if errno == syscall.ECONNREFUSED {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if errs, ok := err.(errcode.Errors); ok {
|
||||
// if this error is a group of errors, process them all in turn
|
||||
for i := range errs {
|
||||
if !isRetryable(errs[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
if errs, ok := err.(*multierror.Error); ok {
|
||||
// if this error is a group of errors, process them all in turn
|
||||
for i := range errs.Errors {
|
||||
if !isRetryable(errs.Errors[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func retryCopyImage(ctx context.Context, policyContext *signature.PolicyContext, dest, src, registry types.ImageReference, action string, copyOptions *cp.Options, maxRetries int, retryDelay time.Duration) ([]byte, error) {
|
||||
manifestBytes, err := cp.Image(ctx, policyContext, dest, src, copyOptions)
|
||||
for retries := 0; err != nil && isRetryable(err) && registry != nil && registry.Transport().Name() == docker.Transport.Name() && retries < maxRetries; retries++ {
|
||||
if retryDelay == 0 {
|
||||
retryDelay = 5 * time.Second
|
||||
}
|
||||
logrus.Infof("Warning: %s failed, retrying in %s ... (%d/%d)", action, retryDelay, retries+1, maxRetries)
|
||||
time.Sleep(retryDelay)
|
||||
func retryCopyImage(ctx context.Context, policyContext *signature.PolicyContext, dest, src, registry types.ImageReference, copyOptions *cp.Options, maxRetries int, retryDelay time.Duration) ([]byte, error) {
|
||||
var (
|
||||
manifestBytes []byte
|
||||
err error
|
||||
lastErr error
|
||||
)
|
||||
err = retry.RetryIfNecessary(ctx, func() error {
|
||||
manifestBytes, err = cp.Image(ctx, policyContext, dest, src, copyOptions)
|
||||
if err == nil {
|
||||
break
|
||||
if registry != nil && registry.Transport().Name() != docker.Transport.Name() {
|
||||
lastErr = err
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}, &retry.RetryOptions{MaxRetry: maxRetries, Delay: retryDelay})
|
||||
if lastErr != nil {
|
||||
err = lastErr
|
||||
}
|
||||
return manifestBytes, err
|
||||
}
|
||||
|
1526
vendor/github.com/containers/buildah/copier/copier.go
generated
vendored
Normal file
1526
vendor/github.com/containers/buildah/copier/copier.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
79
vendor/github.com/containers/buildah/copier/syscall_unix.go
generated
vendored
Normal file
79
vendor/github.com/containers/buildah/copier/syscall_unix.go
generated
vendored
Normal file
@ -0,0 +1,79 @@
|
||||
// +build !windows
|
||||
|
||||
package copier
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
var canChroot = true
|
||||
|
||||
func chroot(root string) (bool, error) {
|
||||
if canChroot {
|
||||
if err := os.Chdir(root); err != nil {
|
||||
return false, fmt.Errorf("error changing to intended-new-root directory %q: %v", root, err)
|
||||
}
|
||||
if err := unix.Chroot(root); err != nil {
|
||||
return false, fmt.Errorf("error chrooting to directory %q: %v", root, err)
|
||||
}
|
||||
if err := os.Chdir(string(os.PathSeparator)); err != nil {
|
||||
return false, fmt.Errorf("error changing to just-became-root directory %q: %v", root, err)
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func chrMode(mode os.FileMode) uint32 {
|
||||
return uint32(unix.S_IFCHR | mode)
|
||||
}
|
||||
|
||||
func blkMode(mode os.FileMode) uint32 {
|
||||
return uint32(unix.S_IFBLK | mode)
|
||||
}
|
||||
|
||||
func mkdev(major, minor uint32) uint64 {
|
||||
return unix.Mkdev(major, minor)
|
||||
}
|
||||
|
||||
func mkfifo(path string, mode uint32) error {
|
||||
return unix.Mkfifo(path, mode)
|
||||
}
|
||||
|
||||
func mknod(path string, mode uint32, dev int) error {
|
||||
return unix.Mknod(path, mode, dev)
|
||||
}
|
||||
|
||||
func chmod(path string, mode os.FileMode) error {
|
||||
return os.Chmod(path, mode)
|
||||
}
|
||||
|
||||
func chown(path string, uid, gid int) error {
|
||||
return os.Chown(path, uid, gid)
|
||||
}
|
||||
|
||||
func lchown(path string, uid, gid int) error {
|
||||
return os.Lchown(path, uid, gid)
|
||||
}
|
||||
|
||||
func lutimes(isSymlink bool, path string, atime, mtime time.Time) error {
|
||||
if atime.IsZero() || mtime.IsZero() {
|
||||
now := time.Now()
|
||||
if atime.IsZero() {
|
||||
atime = now
|
||||
}
|
||||
if mtime.IsZero() {
|
||||
mtime = now
|
||||
}
|
||||
}
|
||||
return unix.Lutimes(path, []unix.Timeval{unix.NsecToTimeval(atime.UnixNano()), unix.NsecToTimeval(mtime.UnixNano())})
|
||||
}
|
||||
|
||||
const (
|
||||
testModeMask = int64(os.ModePerm)
|
||||
testIgnoreSymlinkDates = false
|
||||
)
|
83
vendor/github.com/containers/buildah/copier/syscall_windows.go
generated
vendored
Normal file
83
vendor/github.com/containers/buildah/copier/syscall_windows.go
generated
vendored
Normal file
@ -0,0 +1,83 @@
|
||||
// +build windows
|
||||
|
||||
package copier
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
var canChroot = false
|
||||
|
||||
func chroot(path string) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func chrMode(mode os.FileMode) uint32 {
|
||||
return windows.S_IFCHR | uint32(mode)
|
||||
}
|
||||
|
||||
func blkMode(mode os.FileMode) uint32 {
|
||||
return windows.S_IFBLK | uint32(mode)
|
||||
}
|
||||
|
||||
func mkdev(major, minor uint32) uint64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func mkfifo(path string, mode uint32) error {
|
||||
return syscall.ENOSYS
|
||||
}
|
||||
|
||||
func mknod(path string, mode uint32, dev int) error {
|
||||
return syscall.ENOSYS
|
||||
}
|
||||
|
||||
func chmod(path string, mode os.FileMode) error {
|
||||
err := os.Chmod(path, mode)
|
||||
if err != nil && errors.Is(err, syscall.EWINDOWS) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func chown(path string, uid, gid int) error {
|
||||
err := os.Chown(path, uid, gid)
|
||||
if err != nil && errors.Is(err, syscall.EWINDOWS) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func lchown(path string, uid, gid int) error {
|
||||
err := os.Lchown(path, uid, gid)
|
||||
if err != nil && errors.Is(err, syscall.EWINDOWS) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func lutimes(isSymlink bool, path string, atime, mtime time.Time) error {
|
||||
if isSymlink {
|
||||
return nil
|
||||
}
|
||||
if atime.IsZero() || mtime.IsZero() {
|
||||
now := time.Now()
|
||||
if atime.IsZero() {
|
||||
atime = now
|
||||
}
|
||||
if mtime.IsZero() {
|
||||
mtime = now
|
||||
}
|
||||
}
|
||||
return windows.UtimesNano(path, []windows.Timespec{windows.NsecToTimespec(atime.UnixNano()), windows.NsecToTimespec(mtime.UnixNano())})
|
||||
}
|
||||
|
||||
const (
|
||||
testModeMask = int64(0600)
|
||||
testIgnoreSymlinkDates = true
|
||||
)
|
11
vendor/github.com/containers/buildah/copier/unwrap_112.go
generated
vendored
Normal file
11
vendor/github.com/containers/buildah/copier/unwrap_112.go
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
// +build !go113
|
||||
|
||||
package copier
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func unwrapError(err error) error {
|
||||
return errors.Cause(err)
|
||||
}
|
18
vendor/github.com/containers/buildah/copier/unwrap_113.go
generated
vendored
Normal file
18
vendor/github.com/containers/buildah/copier/unwrap_113.go
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
// +build go113
|
||||
|
||||
package copier
|
||||
|
||||
import (
|
||||
stderror "errors"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func unwrapError(err error) error {
|
||||
e := errors.Cause(err)
|
||||
for e != nil {
|
||||
err = e
|
||||
e = errors.Unwrap(err)
|
||||
}
|
||||
return err
|
||||
}
|
92
vendor/github.com/containers/buildah/copier/xattrs.go
generated
vendored
Normal file
92
vendor/github.com/containers/buildah/copier/xattrs.go
generated
vendored
Normal file
@ -0,0 +1,92 @@
|
||||
// +build linux netbsd freebsd darwin
|
||||
|
||||
package copier
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
const (
|
||||
xattrsSupported = true
|
||||
)
|
||||
|
||||
var (
|
||||
relevantAttributes = []string{"security.capability", "security.ima", "user.*"} // the attributes that we preserve - we discard others
|
||||
)
|
||||
|
||||
// isRelevantXattr checks if "attribute" matches one of the attribute patterns
|
||||
// listed in the "relevantAttributes" list.
|
||||
func isRelevantXattr(attribute string) bool {
|
||||
for _, relevant := range relevantAttributes {
|
||||
matched, err := filepath.Match(relevant, attribute)
|
||||
if err != nil || !matched {
|
||||
continue
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Lgetxattrs returns a map of the relevant extended attributes set on the given file.
|
||||
func Lgetxattrs(path string) (map[string]string, error) {
|
||||
maxSize := 64 * 1024 * 1024
|
||||
listSize := 64 * 1024
|
||||
var list []byte
|
||||
for listSize < maxSize {
|
||||
list = make([]byte, listSize)
|
||||
size, err := unix.Llistxattr(path, list)
|
||||
if err != nil {
|
||||
if unwrapError(err) == syscall.ERANGE {
|
||||
listSize *= 2
|
||||
continue
|
||||
}
|
||||
return nil, errors.Wrapf(err, "error listing extended attributes of %q", path)
|
||||
}
|
||||
list = list[:size]
|
||||
break
|
||||
}
|
||||
if listSize >= maxSize {
|
||||
return nil, errors.Errorf("unable to read list of attributes for %q: size would have been too big", path)
|
||||
}
|
||||
m := make(map[string]string)
|
||||
for _, attribute := range strings.Split(string(list), string('\000')) {
|
||||
if isRelevantXattr(attribute) {
|
||||
attributeSize := 64 * 1024
|
||||
var attributeValue []byte
|
||||
for attributeSize < maxSize {
|
||||
attributeValue = make([]byte, attributeSize)
|
||||
size, err := unix.Lgetxattr(path, attribute, attributeValue)
|
||||
if err != nil {
|
||||
if unwrapError(err) == syscall.ERANGE {
|
||||
attributeSize *= 2
|
||||
continue
|
||||
}
|
||||
return nil, errors.Wrapf(err, "error getting value of extended attribute %q on %q", attribute, path)
|
||||
}
|
||||
m[attribute] = string(attributeValue[:size])
|
||||
break
|
||||
}
|
||||
if attributeSize >= maxSize {
|
||||
return nil, errors.Errorf("unable to read attribute %q of %q: size would have been too big", attribute, path)
|
||||
}
|
||||
}
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// Lsetxattrs sets the relevant members of the specified extended attributes on the given file.
|
||||
func Lsetxattrs(path string, xattrs map[string]string) error {
|
||||
for attribute, value := range xattrs {
|
||||
if isRelevantXattr(attribute) {
|
||||
if err := unix.Lsetxattr(path, attribute, []byte(value), 0); err != nil {
|
||||
return errors.Wrapf(err, "error setting value of extended attribute %q on %q", attribute, path)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
15
vendor/github.com/containers/buildah/copier/xattrs_unsupported.go
generated
vendored
Normal file
15
vendor/github.com/containers/buildah/copier/xattrs_unsupported.go
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
// +build !linux,!netbsd,!freebsd,!darwin
|
||||
|
||||
package copier
|
||||
|
||||
const (
|
||||
xattrsSupported = false
|
||||
)
|
||||
|
||||
func Lgetxattrs(path string) (map[string]string, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func Lsetxattrs(path string, xattrs map[string]string) error {
|
||||
return nil
|
||||
}
|
15
vendor/github.com/containers/buildah/digester.go
generated
vendored
15
vendor/github.com/containers/buildah/digester.go
generated
vendored
@ -6,6 +6,7 @@ import (
|
||||
"hash"
|
||||
"io"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
@ -82,6 +83,10 @@ func (t *tarFilterer) Close() error {
|
||||
// newTarFilterer passes one or more tar archives through to an io.WriteCloser
|
||||
// as a single archive, potentially calling filter to modify headers and
|
||||
// contents as it goes.
|
||||
//
|
||||
// Note: if "filter" indicates that a given item should be skipped, there is no
|
||||
// guarantee that there will not be a subsequent item of type TypeLink, which
|
||||
// is a hard link, which points to the skipped item as the link target.
|
||||
func newTarFilterer(writeCloser io.WriteCloser, filter func(hdr *tar.Header) (skip, replaceContents bool, replacementContents io.Reader)) io.WriteCloser {
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
tarWriter := tar.NewWriter(writeCloser)
|
||||
@ -153,12 +158,20 @@ type tarDigester struct {
|
||||
tarFilterer io.WriteCloser
|
||||
}
|
||||
|
||||
func modifyTarHeaderForDigesting(hdr *tar.Header) (skip, replaceContents bool, replacementContents io.Reader) {
|
||||
zeroTime := time.Time{}
|
||||
hdr.ModTime = zeroTime
|
||||
hdr.AccessTime = zeroTime
|
||||
hdr.ChangeTime = zeroTime
|
||||
return false, false, nil
|
||||
}
|
||||
|
||||
func newTarDigester(contentType string) digester {
|
||||
nested := newSimpleDigester(contentType)
|
||||
digester := &tarDigester{
|
||||
isOpen: true,
|
||||
nested: nested,
|
||||
tarFilterer: nested,
|
||||
tarFilterer: newTarFilterer(nested, modifyTarHeaderForDigesting),
|
||||
}
|
||||
return digester
|
||||
}
|
||||
|
10
vendor/github.com/containers/buildah/go.mod
generated
vendored
10
vendor/github.com/containers/buildah/go.mod
generated
vendored
@ -4,11 +4,10 @@ go 1.12
|
||||
|
||||
require (
|
||||
github.com/containernetworking/cni v0.7.2-0.20190904153231-83439463f784
|
||||
github.com/containers/common v0.19.0
|
||||
github.com/containers/image/v5 v5.5.1
|
||||
github.com/containers/common v0.21.0
|
||||
github.com/containers/image/v5 v5.5.2
|
||||
github.com/containers/ocicrypt v1.0.3
|
||||
github.com/containers/storage v1.23.0
|
||||
github.com/cyphar/filepath-securejoin v0.2.2
|
||||
github.com/containers/storage v1.23.3
|
||||
github.com/docker/distribution v2.7.1+incompatible
|
||||
github.com/docker/go-units v0.4.0
|
||||
github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316
|
||||
@ -27,8 +26,7 @@ require (
|
||||
github.com/opencontainers/selinux v1.6.0
|
||||
github.com/openshift/imagebuilder v1.1.6
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/seccomp/containers-golang v0.6.0
|
||||
github.com/seccomp/libseccomp-golang v0.9.1
|
||||
github.com/seccomp/libseccomp-golang v0.9.2-0.20200616122406-847368b35ebf
|
||||
github.com/sirupsen/logrus v1.6.0
|
||||
github.com/spf13/cobra v0.0.7
|
||||
github.com/spf13/pflag v1.0.5
|
||||
|
21
vendor/github.com/containers/buildah/go.sum
generated
vendored
21
vendor/github.com/containers/buildah/go.sum
generated
vendored
@ -52,10 +52,10 @@ github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDG
|
||||
github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
|
||||
github.com/containernetworking/cni v0.7.2-0.20190904153231-83439463f784 h1:rqUVLD8I859xRgUx/WMC3v7QAFqbLKZbs+0kqYboRJc=
|
||||
github.com/containernetworking/cni v0.7.2-0.20190904153231-83439463f784/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
|
||||
github.com/containers/common v0.19.0 h1:nya/Fh51kiyV0cAO31ejoNwvRAeYreymsO820yjfc3Y=
|
||||
github.com/containers/common v0.19.0/go.mod h1:+NUHV8V5Kmo260ja9Dxtr8ialrDnK4RNzyeEbSgmLac=
|
||||
github.com/containers/image/v5 v5.5.1 h1:h1FCOXH6Ux9/p/E4rndsQOC4yAdRU0msRTfLVeQ7FDQ=
|
||||
github.com/containers/image/v5 v5.5.1/go.mod h1:4PyNYR0nwlGq/ybVJD9hWlhmIsNra4Q8uOQX2s6E2uM=
|
||||
github.com/containers/common v0.21.0 h1:v2U9MrGw0vMgefQf0/uJYBsSnengxLbSORYqhCVEBs0=
|
||||
github.com/containers/common v0.21.0/go.mod h1:8w8SVwc+P2p1MOnRMbSKNWXt1Iwd2bKFu2LLZx55DTM=
|
||||
github.com/containers/image/v5 v5.5.2 h1:fv7FArz0zUnjH0W0l8t90CqWFlFcQrPP6Pug+9dUtVI=
|
||||
github.com/containers/image/v5 v5.5.2/go.mod h1:4PyNYR0nwlGq/ybVJD9hWlhmIsNra4Q8uOQX2s6E2uM=
|
||||
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE=
|
||||
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
|
||||
github.com/containers/ocicrypt v1.0.2 h1:Q0/IPs8ohfbXNxEfyJ2pFVmvJu5BhqJUAmc6ES9NKbo=
|
||||
@ -64,8 +64,8 @@ github.com/containers/ocicrypt v1.0.3 h1:vYgl+RZ9Q3DPMuTfxmN+qp0X2Bj52uuY2vnt6Gz
|
||||
github.com/containers/ocicrypt v1.0.3/go.mod h1:CUBa+8MRNL/VkpxYIpaMtgn1WgXGyvPQj8jcy0EVG6g=
|
||||
github.com/containers/storage v1.20.2 h1:tw/uKRPDnmVrluIzer3dawTFG/bTJLP8IEUyHFhltYk=
|
||||
github.com/containers/storage v1.20.2/go.mod h1:oOB9Ie8OVPojvoaKWEGSEtHbXUAs+tSyr7RO7ZGteMc=
|
||||
github.com/containers/storage v1.23.0 h1:gYyNkBiihC2FvGiHOjOjpnfojYwgxpLVooTUlmD6pxs=
|
||||
github.com/containers/storage v1.23.0/go.mod h1:I1EIAA7B4OwWRSA0b4yq2AW1wjvvfcY0zLWQuwTa4zw=
|
||||
github.com/containers/storage v1.23.3 h1:6ZeQi+xKBXrbUXSSZvSs8HuKoNCPfRkXR4f+8TkiMsI=
|
||||
github.com/containers/storage v1.23.3/go.mod h1:0azTMiuBhArp/VUmH1o4DJAGaaH+qLtEu17pJ/iKJCg=
|
||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
@ -185,8 +185,8 @@ github.com/klauspost/compress v1.10.7 h1:7rix8v8GpI3ZBb0nSozFRgbtXKv+hOe+qfEpZqy
|
||||
github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.10.8 h1:eLeJ3dr/Y9+XRfJT4l+8ZjmtB5RPJhucH2HeCV5+IZY=
|
||||
github.com/klauspost/compress v1.10.8/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.10.10 h1:a/y8CglcM7gLGYmlbP/stPE5sR3hbhFRUjCBfd/0B3I=
|
||||
github.com/klauspost/compress v1.10.10/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.10.11 h1:K9z59aO18Aywg2b/WSgBaUX99mHy2BES18Cr5lBKZHk=
|
||||
github.com/klauspost/compress v1.10.11/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/pgzip v1.2.4 h1:TQ7CNpYKovDOmqzRHKxJh0BeaBI7UdQZYc6p7pMQh1A=
|
||||
github.com/klauspost/pgzip v1.2.4/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
|
||||
@ -304,10 +304,10 @@ github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40T
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/seccomp/containers-golang v0.6.0 h1:VWPMMIDr8pAtNjCX0WvLEEK9EQi5lAm4HtJbDtAtFvQ=
|
||||
github.com/seccomp/containers-golang v0.6.0/go.mod h1:Dd9mONHvW4YdbSzdm23yf2CFw0iqvqLhO0mEFvPIvm4=
|
||||
github.com/seccomp/libseccomp-golang v0.9.1 h1:NJjM5DNFOs0s3kYE1WUOr6G8V97sdt46rlXTMfXGWBo=
|
||||
github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
|
||||
github.com/seccomp/libseccomp-golang v0.9.2-0.20200616122406-847368b35ebf h1:b0+ZBD3rohnkQ4q5duD1+RyTXTg9yk+qTOPMSQtapO0=
|
||||
github.com/seccomp/libseccomp-golang v0.9.2-0.20200616122406-847368b35ebf/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
|
||||
@ -435,7 +435,6 @@ golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20200327173247-9dae0f8f5775/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299 h1:DYfZAGf2WMFjMxbgTjaC+2HC7NkNAQs+6Q8b9WEB/F4=
|
||||
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200720211630-cb9d2d5c5666/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1 h1:sIky/MyNRSHTrdxfsiUSS4WIAMvInbeXljJz+jDjeYE=
|
||||
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
65
vendor/github.com/containers/buildah/image.go
generated
vendored
65
vendor/github.com/containers/buildah/image.go
generated
vendored
@ -13,6 +13,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/containers/buildah/copier"
|
||||
"github.com/containers/buildah/docker"
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/image"
|
||||
@ -21,6 +22,7 @@ import (
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/ioutils"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
specs "github.com/opencontainers/image-spec/specs-go"
|
||||
@ -50,7 +52,7 @@ type containerImageRef struct {
|
||||
layerID string
|
||||
oconfig []byte
|
||||
dconfig []byte
|
||||
created time.Time
|
||||
created *time.Time
|
||||
createdBy string
|
||||
historyComment string
|
||||
annotations map[string]string
|
||||
@ -58,7 +60,7 @@ type containerImageRef struct {
|
||||
exporting bool
|
||||
squash bool
|
||||
emptyLayer bool
|
||||
tarPath func(path string) (io.ReadCloser, error)
|
||||
idMappingOptions *IDMappingOptions
|
||||
parent string
|
||||
blobDirectory string
|
||||
preEmptyLayers []v1.History
|
||||
@ -142,16 +144,25 @@ func computeLayerMIMEType(what string, layerCompression archive.Compression) (om
|
||||
|
||||
// Extract the container's whole filesystem as if it were a single layer.
|
||||
func (i *containerImageRef) extractRootfs() (io.ReadCloser, error) {
|
||||
var uidMap, gidMap []idtools.IDMap
|
||||
mountPoint, err := i.store.Mount(i.containerID, i.mountLabel)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error mounting container %q", i.containerID)
|
||||
}
|
||||
rc, err := i.tarPath(mountPoint)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error extracting rootfs from container %q", i.containerID)
|
||||
}
|
||||
return ioutils.NewReadCloserWrapper(rc, func() error {
|
||||
if err = rc.Close(); err != nil {
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
go func() {
|
||||
if i.idMappingOptions != nil {
|
||||
uidMap, gidMap = convertRuntimeIDMaps(i.idMappingOptions.UIDMap, i.idMappingOptions.GIDMap)
|
||||
}
|
||||
copierOptions := copier.GetOptions{
|
||||
UIDMap: uidMap,
|
||||
GIDMap: gidMap,
|
||||
}
|
||||
err = copier.Get(mountPoint, mountPoint, copierOptions, []string{"."}, pipeWriter)
|
||||
pipeWriter.Close()
|
||||
}()
|
||||
return ioutils.NewReadCloserWrapper(pipeReader, func() error {
|
||||
if err = pipeReader.Close(); err != nil {
|
||||
err = errors.Wrapf(err, "error closing tar archive of container %q", i.containerID)
|
||||
}
|
||||
if _, err2 := i.store.Unmount(i.containerID, false); err == nil {
|
||||
@ -167,7 +178,10 @@ func (i *containerImageRef) extractRootfs() (io.ReadCloser, error) {
|
||||
// Build fresh copies of the container configuration structures so that we can edit them
|
||||
// without making unintended changes to the original Builder.
|
||||
func (i *containerImageRef) createConfigsAndManifests() (v1.Image, v1.Manifest, docker.V2Image, docker.V2S2Manifest, error) {
|
||||
created := i.created
|
||||
created := time.Now().UTC()
|
||||
if i.created != nil {
|
||||
created = *i.created
|
||||
}
|
||||
|
||||
// Build an empty image, and then decode over it.
|
||||
oimage := v1.Image{}
|
||||
@ -285,7 +299,6 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
omitTimestamp := i.created.Equal(time.Unix(0, 0))
|
||||
|
||||
// Extract each layer and compute its digests, both compressed (if requested) and uncompressed.
|
||||
for _, layerID := range layers {
|
||||
@ -375,9 +388,9 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
|
||||
return nil, errors.Wrapf(err, "error compressing %s", what)
|
||||
}
|
||||
writer := io.MultiWriter(writeCloser, srcHasher.Hash())
|
||||
// Zero out timestamps in the layer, if we're doing that for
|
||||
// Use specified timestamps in the layer, if we're doing that for
|
||||
// history entries.
|
||||
if omitTimestamp {
|
||||
if i.created != nil {
|
||||
nestedWriteCloser := ioutils.NewWriteCloserWrapper(writer, writeCloser.Close)
|
||||
writeCloser = newTarFilterer(nestedWriteCloser, func(hdr *tar.Header) (bool, bool, io.Reader) {
|
||||
// Changing a zeroed field to a non-zero field
|
||||
@ -388,13 +401,13 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
|
||||
// changing the length) of the header that we
|
||||
// write.
|
||||
if !hdr.ModTime.IsZero() {
|
||||
hdr.ModTime = i.created
|
||||
hdr.ModTime = *i.created
|
||||
}
|
||||
if !hdr.AccessTime.IsZero() {
|
||||
hdr.AccessTime = i.created
|
||||
hdr.AccessTime = *i.created
|
||||
}
|
||||
if !hdr.ChangeTime.IsZero() {
|
||||
hdr.ChangeTime = i.created
|
||||
hdr.ChangeTime = *i.created
|
||||
}
|
||||
return false, false, nil
|
||||
})
|
||||
@ -414,7 +427,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
|
||||
} else {
|
||||
size = counter.Count
|
||||
}
|
||||
logrus.Debugf("%s size is %d bytes", what, size)
|
||||
logrus.Debugf("%s size is %d bytes, uncompressed digest %s, possibly-compressed digest %s", what, size, srcHasher.Digest().String(), destHasher.Digest().String())
|
||||
// Rename the layer so that we can more easily find it by digest later.
|
||||
finalBlobName := filepath.Join(path, destHasher.Digest().String())
|
||||
if err = os.Rename(filepath.Join(path, "layer"), finalBlobName); err != nil {
|
||||
@ -469,8 +482,12 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
|
||||
}
|
||||
}
|
||||
appendHistory(i.preEmptyLayers)
|
||||
created := time.Now().UTC()
|
||||
if i.created != nil {
|
||||
created = (*i.created).UTC()
|
||||
}
|
||||
onews := v1.History{
|
||||
Created: &i.created,
|
||||
Created: &created,
|
||||
CreatedBy: i.createdBy,
|
||||
Author: oimage.Author,
|
||||
Comment: i.historyComment,
|
||||
@ -478,7 +495,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
|
||||
}
|
||||
oimage.History = append(oimage.History, onews)
|
||||
dnews := docker.V2S2History{
|
||||
Created: i.created,
|
||||
Created: created,
|
||||
CreatedBy: i.createdBy,
|
||||
Author: dimage.Author,
|
||||
Comment: i.historyComment,
|
||||
@ -693,9 +710,10 @@ func (b *Builder) makeImageRef(options CommitOptions, exporting bool) (types.Ima
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error encoding docker-format image configuration %#v", b.Docker)
|
||||
}
|
||||
created := time.Now().UTC()
|
||||
var created *time.Time
|
||||
if options.HistoryTimestamp != nil {
|
||||
created = options.HistoryTimestamp.UTC()
|
||||
historyTimestampUTC := options.HistoryTimestamp.UTC()
|
||||
created = &historyTimestampUTC
|
||||
}
|
||||
createdBy := b.CreatedBy()
|
||||
if createdBy == "" {
|
||||
@ -705,10 +723,6 @@ func (b *Builder) makeImageRef(options CommitOptions, exporting bool) (types.Ima
|
||||
}
|
||||
}
|
||||
|
||||
if options.OmitTimestamp {
|
||||
created = time.Unix(0, 0).UTC()
|
||||
}
|
||||
|
||||
parent := ""
|
||||
if b.FromImageID != "" {
|
||||
parentDigest := digest.NewDigestFromEncoded(digest.Canonical, b.FromImageID)
|
||||
@ -735,12 +749,11 @@ func (b *Builder) makeImageRef(options CommitOptions, exporting bool) (types.Ima
|
||||
exporting: exporting,
|
||||
squash: options.Squash,
|
||||
emptyLayer: options.EmptyLayer && !options.Squash,
|
||||
tarPath: b.tarPath(&b.IDMappingOptions),
|
||||
idMappingOptions: &b.IDMappingOptions,
|
||||
parent: parent,
|
||||
blobDirectory: options.BlobDirectory,
|
||||
preEmptyLayers: b.PrependedEmptyLayers,
|
||||
postEmptyLayers: b.AppendedEmptyLayers,
|
||||
}
|
||||
|
||||
return ref, nil
|
||||
}
|
||||
|
8
vendor/github.com/containers/buildah/imagebuildah/build.go
generated
vendored
8
vendor/github.com/containers/buildah/imagebuildah/build.go
generated
vendored
@ -168,9 +168,9 @@ type BuildOptions struct {
|
||||
SignBy string
|
||||
// Architecture specifies the target architecture of the image to be built.
|
||||
Architecture string
|
||||
// OmitTimestamp forces epoch 0 as created timestamp to allow for
|
||||
// deterministic, content-addressable builds.
|
||||
OmitTimestamp bool
|
||||
// Timestamp sets the created timestamp to the specified time, allowing
|
||||
// for deterministic, content-addressable builds.
|
||||
Timestamp *time.Time
|
||||
// OS is the specifies the operating system of the image to be built.
|
||||
OS string
|
||||
// MaxPullPushRetries is the maximum number of attempts we'll make to pull or push any one
|
||||
@ -183,6 +183,8 @@ type BuildOptions struct {
|
||||
OciDecryptConfig *encconfig.DecryptConfig
|
||||
// Jobs is the number of stages to run in parallel. If not specified it defaults to 1.
|
||||
Jobs *int
|
||||
// LogRusage logs resource usage for each step.
|
||||
LogRusage bool
|
||||
}
|
||||
|
||||
// BuildDockerfiles parses a set of one or more Dockerfiles (which may be
|
||||
|
26
vendor/github.com/containers/buildah/imagebuildah/executor.go
generated
vendored
26
vendor/github.com/containers/buildah/imagebuildah/executor.go
generated
vendored
@ -24,6 +24,7 @@ import (
|
||||
encconfig "github.com/containers/ocicrypt/config"
|
||||
"github.com/containers/storage"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
"github.com/openshift/imagebuilder"
|
||||
@ -100,7 +101,7 @@ type Executor struct {
|
||||
devices []configs.Device
|
||||
signBy string
|
||||
architecture string
|
||||
omitTimestamp bool
|
||||
timestamp *time.Time
|
||||
os string
|
||||
maxPullPushRetries int
|
||||
retryPullPushDelay time.Duration
|
||||
@ -110,6 +111,7 @@ type Executor struct {
|
||||
stagesLock sync.Mutex
|
||||
stagesSemaphore *semaphore.Weighted
|
||||
jobs int
|
||||
logRusage bool
|
||||
}
|
||||
|
||||
// NewExecutor creates a new instance of the imagebuilder.Executor interface.
|
||||
@ -152,6 +154,11 @@ func NewExecutor(store storage.Store, options BuildOptions, mainNode *parser.Nod
|
||||
jobs = *options.Jobs
|
||||
}
|
||||
|
||||
writer := options.ReportWriter
|
||||
if options.Quiet {
|
||||
writer = ioutil.Discard
|
||||
}
|
||||
|
||||
exec := Executor{
|
||||
stages: make(map[string]*StageExecutor),
|
||||
store: store,
|
||||
@ -174,7 +181,7 @@ func NewExecutor(store storage.Store, options BuildOptions, mainNode *parser.Nod
|
||||
in: options.In,
|
||||
out: options.Out,
|
||||
err: options.Err,
|
||||
reportWriter: options.ReportWriter,
|
||||
reportWriter: writer,
|
||||
isolation: options.Isolation,
|
||||
namespaceOptions: options.NamespaceOptions,
|
||||
configureNetwork: options.ConfigureNetwork,
|
||||
@ -201,13 +208,14 @@ func NewExecutor(store storage.Store, options BuildOptions, mainNode *parser.Nod
|
||||
devices: devices,
|
||||
signBy: options.SignBy,
|
||||
architecture: options.Architecture,
|
||||
omitTimestamp: options.OmitTimestamp,
|
||||
timestamp: options.Timestamp,
|
||||
os: options.OS,
|
||||
maxPullPushRetries: options.MaxPullPushRetries,
|
||||
retryPullPushDelay: options.PullPushRetryDelay,
|
||||
ociDecryptConfig: options.OciDecryptConfig,
|
||||
terminatedStage: make(map[string]struct{}),
|
||||
jobs: jobs,
|
||||
logRusage: options.LogRusage,
|
||||
}
|
||||
if exec.err == nil {
|
||||
exec.err = os.Stderr
|
||||
@ -328,22 +336,22 @@ func (b *Executor) waitForStage(ctx context.Context, name string, stages imagebu
|
||||
}
|
||||
}
|
||||
|
||||
// getImageHistory returns the history of imageID.
|
||||
func (b *Executor) getImageHistory(ctx context.Context, imageID string) ([]v1.History, error) {
|
||||
// getImageHistoryAndDiffIDs returns the history and diff IDs list of imageID.
|
||||
func (b *Executor) getImageHistoryAndDiffIDs(ctx context.Context, imageID string) ([]v1.History, []digest.Digest, error) {
|
||||
imageRef, err := is.Transport.ParseStoreReference(b.store, "@"+imageID)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error getting image reference %q", imageID)
|
||||
return nil, nil, errors.Wrapf(err, "error getting image reference %q", imageID)
|
||||
}
|
||||
ref, err := imageRef.NewImage(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error creating new image from reference to image %q", imageID)
|
||||
return nil, nil, errors.Wrapf(err, "error creating new image from reference to image %q", imageID)
|
||||
}
|
||||
defer ref.Close()
|
||||
oci, err := ref.OCIConfig(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error getting possibly-converted OCI config of image %q", imageID)
|
||||
return nil, nil, errors.Wrapf(err, "error getting possibly-converted OCI config of image %q", imageID)
|
||||
}
|
||||
return oci.History, nil
|
||||
return oci.History, oci.RootFS.DiffIDs, nil
|
||||
}
|
||||
|
||||
func (b *Executor) buildStage(ctx context.Context, cleanupStages map[int]*StageExecutor, stages imagebuilder.Stages, stageIndex int) (imageID string, ref reference.Canonical, err error) {
|
||||
|
550
vendor/github.com/containers/buildah/imagebuildah/stage_executor.go
generated
vendored
550
vendor/github.com/containers/buildah/imagebuildah/stage_executor.go
generated
vendored
@ -12,8 +12,9 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/containers/buildah"
|
||||
"github.com/containers/buildah/copier"
|
||||
buildahdocker "github.com/containers/buildah/docker"
|
||||
"github.com/containers/buildah/pkg/chrootuser"
|
||||
"github.com/containers/buildah/pkg/rusage"
|
||||
"github.com/containers/buildah/util"
|
||||
cp "github.com/containers/image/v5/copy"
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
@ -23,8 +24,8 @@ import (
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
securejoin "github.com/cyphar/filepath-securejoin"
|
||||
docker "github.com/fsouza/go-dockerclient"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/openshift/imagebuilder"
|
||||
"github.com/openshift/imagebuilder/dockerfile/parser"
|
||||
@ -55,7 +56,6 @@ type StageExecutor struct {
|
||||
volumeCache map[string]string
|
||||
volumeCacheInfo map[string]os.FileInfo
|
||||
mountPoint string
|
||||
copyFrom string // Used to keep track of the --from flag from COPY and ADD
|
||||
output string
|
||||
containerIDs []string
|
||||
stage *imagebuilder.Stage
|
||||
@ -258,166 +258,11 @@ func (s *StageExecutor) volumeCacheRestore() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// digestSpecifiedContent digests any content that this next instruction would add to
|
||||
// the image, returning the digester if there is any, or nil otherwise. We
|
||||
// don't care about the details of where in the filesystem the content actually
|
||||
// goes, because we're not actually going to add it here, so this is less
|
||||
// involved than Copy().
|
||||
func (s *StageExecutor) digestSpecifiedContent(ctx context.Context, node *parser.Node, argValues []string, envValues []string) (string, error) {
|
||||
// No instruction: done.
|
||||
if node == nil {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Not adding content: done.
|
||||
switch strings.ToUpper(node.Value) {
|
||||
default:
|
||||
return "", nil
|
||||
case "ADD", "COPY":
|
||||
}
|
||||
|
||||
// Pull out everything except the first node (the instruction) and the
|
||||
// last node (the destination).
|
||||
var srcs []string
|
||||
destination := node
|
||||
for destination.Next != nil {
|
||||
destination = destination.Next
|
||||
if destination.Next != nil {
|
||||
srcs = append(srcs, destination.Value)
|
||||
}
|
||||
}
|
||||
|
||||
var sources []string
|
||||
var idMappingOptions *buildah.IDMappingOptions
|
||||
contextDir := s.executor.contextDir
|
||||
for _, flag := range node.Flags {
|
||||
if strings.HasPrefix(flag, "--from=") {
|
||||
// Flag says to read the content from another
|
||||
// container. Update the ID mappings and
|
||||
// all-content-comes-from-below-this-directory value.
|
||||
from := strings.TrimPrefix(flag, "--from=")
|
||||
|
||||
// If from has an argument within it, resolve it to its
|
||||
// value. Otherwise just return the value found.
|
||||
var fromErr error
|
||||
from, fromErr = imagebuilder.ProcessWord(from, s.stage.Builder.Arguments())
|
||||
if fromErr != nil {
|
||||
return "", errors.Wrapf(fromErr, "unable to resolve argument %q", from)
|
||||
}
|
||||
if isStage, err := s.executor.waitForStage(ctx, from, s.stages[:s.index]); isStage && err != nil {
|
||||
return "", err
|
||||
}
|
||||
if other, ok := s.executor.stages[from]; ok && other.index < s.index {
|
||||
contextDir = other.mountPoint
|
||||
idMappingOptions = &other.builder.IDMappingOptions
|
||||
} else if builder, ok := s.executor.containerMap[from]; ok {
|
||||
contextDir = builder.MountPoint
|
||||
idMappingOptions = &builder.IDMappingOptions
|
||||
} else {
|
||||
return "", errors.Errorf("the stage %q has not been built", from)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
varValues := append(argValues, envValues...)
|
||||
for _, src := range srcs {
|
||||
// If src has an argument within it, resolve it to its
|
||||
// value. Otherwise just return the value found.
|
||||
name, err := imagebuilder.ProcessWord(src, varValues)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "unable to resolve source %q", src)
|
||||
}
|
||||
src = name
|
||||
if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") {
|
||||
// Source is a URL. TODO: cache this content
|
||||
// somewhere, so that we can avoid pulling it down
|
||||
// again if we end up needing to drop it into the
|
||||
// filesystem.
|
||||
sources = append(sources, src)
|
||||
} else {
|
||||
// Source is not a URL, so it's a location relative to
|
||||
// the all-content-comes-from-below-this-directory
|
||||
// directory. Also raise an error if the src escapes
|
||||
// the context directory.
|
||||
contextSrc, err := securejoin.SecureJoin(contextDir, src)
|
||||
if err == nil && strings.HasPrefix(src, "../") {
|
||||
err = errors.New("escaping context directory error")
|
||||
}
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "forbidden path for %q, it is outside of the build context %q", src, contextDir)
|
||||
}
|
||||
sources = append(sources, contextSrc)
|
||||
}
|
||||
}
|
||||
// If the all-content-comes-from-below-this-directory is the build
|
||||
// context, read its .dockerignore.
|
||||
var excludes []string
|
||||
if contextDir == s.executor.contextDir {
|
||||
var err error
|
||||
if excludes, err = imagebuilder.ParseDockerignore(contextDir); err != nil {
|
||||
return "", errors.Wrapf(err, "error parsing .dockerignore in %s", contextDir)
|
||||
}
|
||||
}
|
||||
// Restart the digester and have it do a dry-run copy to compute the
|
||||
// digest information.
|
||||
options := buildah.AddAndCopyOptions{
|
||||
Excludes: excludes,
|
||||
ContextDir: contextDir,
|
||||
IDMappingOptions: idMappingOptions,
|
||||
DryRun: true,
|
||||
}
|
||||
s.builder.ContentDigester.Restart()
|
||||
download := strings.ToUpper(node.Value) == "ADD"
|
||||
|
||||
// If destination.Value has an argument within it, resolve it to its
|
||||
// value. Otherwise just return the value found.
|
||||
destValue, destErr := imagebuilder.ProcessWord(destination.Value, varValues)
|
||||
if destErr != nil {
|
||||
return "", errors.Wrapf(destErr, "unable to resolve destination %q", destination.Value)
|
||||
}
|
||||
err := s.builder.Add(destValue, download, options, sources...)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "error dry-running %q", node.Original)
|
||||
}
|
||||
// Return the formatted version of the digester's result.
|
||||
contentDigest := ""
|
||||
prefix, digest := s.builder.ContentDigester.Digest()
|
||||
if prefix != "" {
|
||||
prefix += ":"
|
||||
}
|
||||
if digest.Validate() == nil {
|
||||
contentDigest = prefix + digest.Encoded()
|
||||
}
|
||||
return contentDigest, nil
|
||||
}
|
||||
|
||||
// Copy copies data into the working tree. The "Download" field is how
|
||||
// imagebuilder tells us the instruction was "ADD" and not "COPY".
|
||||
func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) error {
|
||||
s.builder.ContentDigester.Restart()
|
||||
for _, copy := range copies {
|
||||
// Check the file and see if part of it is a symlink.
|
||||
// Convert it to the target if so. To be ultrasafe
|
||||
// do the same for the mountpoint.
|
||||
hadFinalPathSeparator := len(copy.Dest) > 0 && copy.Dest[len(copy.Dest)-1] == os.PathSeparator
|
||||
secureMountPoint, err := securejoin.SecureJoin("", s.mountPoint)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error resolving symlinks for copy destination %s", copy.Dest)
|
||||
}
|
||||
finalPath, err := securejoin.SecureJoin(secureMountPoint, copy.Dest)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error resolving symlinks for copy destination %s", copy.Dest)
|
||||
}
|
||||
if !strings.HasPrefix(finalPath, secureMountPoint) {
|
||||
return errors.Wrapf(err, "error resolving copy destination %s", copy.Dest)
|
||||
}
|
||||
copy.Dest = strings.TrimPrefix(finalPath, secureMountPoint)
|
||||
if len(copy.Dest) == 0 || copy.Dest[len(copy.Dest)-1] != os.PathSeparator {
|
||||
if hadFinalPathSeparator {
|
||||
copy.Dest += string(os.PathSeparator)
|
||||
}
|
||||
}
|
||||
|
||||
if copy.Download {
|
||||
logrus.Debugf("ADD %#v, %#v", excludes, copy)
|
||||
} else {
|
||||
@ -432,12 +277,21 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err
|
||||
// all-content-comes-from-below-this-directory value.
|
||||
var idMappingOptions *buildah.IDMappingOptions
|
||||
var copyExcludes []string
|
||||
stripSetuid := false
|
||||
stripSetgid := false
|
||||
preserveOwnership := false
|
||||
contextDir := s.executor.contextDir
|
||||
if len(copy.From) > 0 {
|
||||
if isStage, err := s.executor.waitForStage(s.ctx, copy.From, s.stages[:s.index]); isStage && err != nil {
|
||||
// If from has an argument within it, resolve it to its
|
||||
// value. Otherwise just return the value found.
|
||||
from, fromErr := imagebuilder.ProcessWord(copy.From, s.stage.Builder.Arguments())
|
||||
if fromErr != nil {
|
||||
return errors.Wrapf(fromErr, "unable to resolve argument %q", copy.From)
|
||||
}
|
||||
if isStage, err := s.executor.waitForStage(s.ctx, from, s.stages[:s.index]); isStage && err != nil {
|
||||
return err
|
||||
}
|
||||
if other, ok := s.executor.stages[copy.From]; ok && other.index < s.index {
|
||||
if other, ok := s.executor.stages[from]; ok && other.index < s.index {
|
||||
contextDir = other.mountPoint
|
||||
idMappingOptions = &other.builder.IDMappingOptions
|
||||
} else if builder, ok := s.executor.containerMap[copy.From]; ok {
|
||||
@ -446,9 +300,12 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err
|
||||
} else {
|
||||
return errors.Errorf("the stage %q has not been built", copy.From)
|
||||
}
|
||||
preserveOwnership = true
|
||||
copyExcludes = excludes
|
||||
} else {
|
||||
copyExcludes = append(s.executor.excludes, excludes...)
|
||||
stripSetuid = true // did this change between 18.06 and 19.03?
|
||||
stripSetgid = true // did this change between 18.06 and 19.03?
|
||||
}
|
||||
for _, src := range copy.Src {
|
||||
if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") {
|
||||
@ -460,53 +317,20 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err
|
||||
return errors.Errorf("source can't be a URL for COPY")
|
||||
}
|
||||
} else {
|
||||
// Treat the source, which is not a URL, as a
|
||||
// location relative to the
|
||||
// all-content-comes-from-below-this-directory
|
||||
// directory. Also raise an error if the src
|
||||
// escapes the context directory.
|
||||
srcSecure, err := securejoin.SecureJoin(contextDir, src)
|
||||
if err == nil && strings.HasPrefix(src, "../") {
|
||||
err = errors.New("escaping context directory error")
|
||||
}
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "forbidden path for %q, it is outside of the build context %q", src, contextDir)
|
||||
}
|
||||
if hadFinalPathSeparator {
|
||||
// If destination is a folder, we need to take extra care to
|
||||
// ensure that files are copied with correct names (since
|
||||
// resolving a symlink may result in a different name).
|
||||
_, srcName := filepath.Split(src)
|
||||
_, srcNameSecure := filepath.Split(srcSecure)
|
||||
if srcName != srcNameSecure {
|
||||
options := buildah.AddAndCopyOptions{
|
||||
Chown: copy.Chown,
|
||||
ContextDir: contextDir,
|
||||
Excludes: copyExcludes,
|
||||
IDMappingOptions: idMappingOptions,
|
||||
}
|
||||
// If we've a tar file, it will create a directory using the name of the tar
|
||||
// file if we don't blank it out.
|
||||
if strings.HasSuffix(srcName, ".tar") || strings.HasSuffix(srcName, ".gz") {
|
||||
srcName = ""
|
||||
}
|
||||
if err := s.builder.Add(filepath.Join(copy.Dest, srcName), copy.Download, options, srcSecure); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
sources = append(sources, srcSecure)
|
||||
sources = append(sources, filepath.Join(contextDir, src))
|
||||
}
|
||||
}
|
||||
options := buildah.AddAndCopyOptions{
|
||||
Chown: copy.Chown,
|
||||
ContextDir: contextDir,
|
||||
Excludes: copyExcludes,
|
||||
IDMappingOptions: idMappingOptions,
|
||||
Chown: copy.Chown,
|
||||
PreserveOwnership: preserveOwnership,
|
||||
ContextDir: contextDir,
|
||||
Excludes: copyExcludes,
|
||||
IDMappingOptions: idMappingOptions,
|
||||
StripSetuidBit: stripSetuid,
|
||||
StripSetgidBit: stripSetgid,
|
||||
}
|
||||
if err := s.builder.Add(copy.Dest, copy.Download, options, sources...); err != nil {
|
||||
return err
|
||||
return errors.Wrapf(err, "error adding sources %v", sources)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@ -767,6 +591,7 @@ func (s *StageExecutor) getImageRootfs(ctx context.Context, image string) (mount
|
||||
|
||||
// Execute runs each of the steps in the stage's parsed tree, in turn.
|
||||
func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, ref reference.Canonical, err error) {
|
||||
var resourceUsage rusage.Rusage
|
||||
stage := s.stage
|
||||
ib := stage.Builder
|
||||
checkForLayers := s.executor.layers && s.executor.useCache
|
||||
@ -789,6 +614,30 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
||||
}
|
||||
s.executor.stagesLock.Unlock()
|
||||
|
||||
// Set things up so that we can log resource usage as we go.
|
||||
logRusage := func() {
|
||||
if rusage.Supported() {
|
||||
usage, err := rusage.Get()
|
||||
if err != nil {
|
||||
fmt.Fprintf(s.executor.out, "error gathering resource usage information: %v\n", err)
|
||||
return
|
||||
}
|
||||
if !s.executor.quiet && s.executor.logRusage {
|
||||
fmt.Fprintf(s.executor.out, "%s\n", rusage.FormatDiff(usage.Subtract(resourceUsage)))
|
||||
}
|
||||
resourceUsage = usage
|
||||
}
|
||||
}
|
||||
|
||||
// Start counting resource usage before we potentially pull a base image.
|
||||
if rusage.Supported() {
|
||||
if resourceUsage, err = rusage.Get(); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
// Log the final incremental resource usage counter before we return.
|
||||
defer logRusage()
|
||||
}
|
||||
|
||||
// Create the (first) working container for this stage. Reinitializing
|
||||
// the imagebuilder configuration may alter the list of steps we have,
|
||||
// so take a snapshot of them *after* that.
|
||||
@ -824,7 +673,6 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
||||
imgID = imgID[0:11]
|
||||
}
|
||||
if s.executor.iidfile == "" {
|
||||
|
||||
fmt.Fprintf(s.executor.out, "--> %s\n", imgID)
|
||||
}
|
||||
}
|
||||
@ -859,6 +707,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
||||
}
|
||||
|
||||
for i, node := range children {
|
||||
logRusage()
|
||||
moreInstructions := i < len(children)-1
|
||||
lastInstruction := !moreInstructions
|
||||
// Resolve any arguments in this instruction.
|
||||
@ -871,11 +720,8 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
||||
s.executor.log("%s", step.Original)
|
||||
}
|
||||
|
||||
// Check if there's a --from if the step command is COPY or
|
||||
// ADD. Set copyFrom to point to either the context directory
|
||||
// or the root of the container from the specified stage.
|
||||
// Check if there's a --from if the step command is COPY.
|
||||
// Also check the chown flag for validity.
|
||||
s.copyFrom = s.executor.contextDir
|
||||
for _, flag := range step.Flags {
|
||||
command := strings.ToUpper(step.Command)
|
||||
// chown and from flags should have an '=' sign, '--chown=' or '--from='
|
||||
@ -886,31 +732,27 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
||||
return "", nil, errors.Errorf("ADD only supports the --chown=<uid:gid> flag")
|
||||
}
|
||||
if strings.Contains(flag, "--from") && command == "COPY" {
|
||||
var mountPoint string
|
||||
arr := strings.Split(flag, "=")
|
||||
if len(arr) != 2 {
|
||||
return "", nil, errors.Errorf("%s: invalid --from flag, should be --from=<name|stage>", command)
|
||||
}
|
||||
// If arr[1] has an argument within it, resolve it to its
|
||||
// value. Otherwise just return the value found.
|
||||
from, fromErr := imagebuilder.ProcessWord(arr[1], s.stage.Builder.Arguments())
|
||||
if fromErr != nil {
|
||||
return "", nil, errors.Wrapf(fromErr, "unable to resolve argument %q", arr[1])
|
||||
}
|
||||
// If the source's name corresponds to the
|
||||
// result of an earlier stage, wait for that
|
||||
// stage to finish being built.
|
||||
|
||||
// If arr[1] has an argument within it, resolve it to its
|
||||
// value. Otherwise just return the value found.
|
||||
var arr1Err error
|
||||
arr[1], arr1Err = imagebuilder.ProcessWord(arr[1], s.stage.Builder.Arguments())
|
||||
if arr1Err != nil {
|
||||
return "", nil, errors.Wrapf(arr1Err, "unable to resolve argument %q", arr[1])
|
||||
}
|
||||
if isStage, err := s.executor.waitForStage(ctx, arr[1], s.stages[:s.index]); isStage && err != nil {
|
||||
if isStage, err := s.executor.waitForStage(ctx, from, s.stages[:s.index]); isStage && err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
if otherStage, ok := s.executor.stages[arr[1]]; ok && otherStage.index < s.index {
|
||||
mountPoint = otherStage.mountPoint
|
||||
} else if mountPoint, err = s.getImageRootfs(ctx, arr[1]); err != nil {
|
||||
return "", nil, errors.Errorf("%s --from=%s: no stage or image found with that name", command, arr[1])
|
||||
if otherStage, ok := s.executor.stages[from]; ok && otherStage.index < s.index {
|
||||
break
|
||||
} else if _, err = s.getImageRootfs(ctx, from); err != nil {
|
||||
return "", nil, errors.Errorf("%s --from=%s: no stage or image found with that name", command, from)
|
||||
}
|
||||
s.copyFrom = mountPoint
|
||||
break
|
||||
}
|
||||
}
|
||||
@ -933,9 +775,14 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
||||
return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message)
|
||||
}
|
||||
// In case we added content, retrieve its digest.
|
||||
addedContentDigest, err := s.digestSpecifiedContent(ctx, node, ib.Arguments(), ib.Config().Env)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
addedContentType, addedContentDigest := s.builder.ContentDigester.Digest()
|
||||
addedContentSummary := addedContentType
|
||||
if addedContentDigest != "" {
|
||||
if addedContentSummary != "" {
|
||||
addedContentSummary = addedContentSummary + ":"
|
||||
}
|
||||
addedContentSummary = addedContentSummary + addedContentDigest.Encoded()
|
||||
logrus.Debugf("added content %s", addedContentSummary)
|
||||
}
|
||||
if moreInstructions {
|
||||
// There are still more instructions to process
|
||||
@ -943,16 +790,17 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
||||
// instruction in the history that we'll write
|
||||
// for the image when we eventually commit it.
|
||||
now := time.Now()
|
||||
s.builder.AddPrependedEmptyLayer(&now, s.getCreatedBy(node, addedContentDigest), "", "")
|
||||
s.builder.AddPrependedEmptyLayer(&now, s.getCreatedBy(node, addedContentSummary), "", "")
|
||||
continue
|
||||
} else {
|
||||
// This is the last instruction for this stage,
|
||||
// so we should commit this container to create
|
||||
// an image, but only if it's the last one, or
|
||||
// if it's used as the basis for a later stage.
|
||||
// an image, but only if it's the last stage,
|
||||
// or if it's used as the basis for a later
|
||||
// stage.
|
||||
if lastStage || imageIsUsedLater {
|
||||
logCommit(s.output, i)
|
||||
imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentDigest), false, s.output)
|
||||
imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), false, s.output)
|
||||
if err != nil {
|
||||
return "", nil, errors.Wrapf(err, "error committing container for step %+v", *step)
|
||||
}
|
||||
@ -966,10 +814,11 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
||||
|
||||
// We're in a multi-layered build.
|
||||
var (
|
||||
commitName string
|
||||
cacheID string
|
||||
err error
|
||||
rebase bool
|
||||
commitName string
|
||||
cacheID string
|
||||
err error
|
||||
rebase bool
|
||||
addedContentSummary string
|
||||
)
|
||||
|
||||
// If we have to commit for this instruction, only assign the
|
||||
@ -978,46 +827,47 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
||||
commitName = s.output
|
||||
}
|
||||
|
||||
// If we're using the cache, and we've managed to stick with
|
||||
// cached images so far, look for one that matches what we
|
||||
// expect to produce for this instruction.
|
||||
if checkForLayers && !(s.executor.squash && lastInstruction && lastStage) {
|
||||
addedContentDigest, err := s.digestSpecifiedContent(ctx, node, ib.Arguments(), ib.Config().Env)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
cacheID, err = s.intermediateImageExists(ctx, node, addedContentDigest)
|
||||
// Check if there's already an image based on our parent that
|
||||
// has the same change that we're about to make, so far as we
|
||||
// can tell.
|
||||
if checkForLayers {
|
||||
cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step))
|
||||
if err != nil {
|
||||
return "", nil, errors.Wrap(err, "error checking if cached image exists from a previous build")
|
||||
}
|
||||
if cacheID != "" {
|
||||
// Note the cache hit.
|
||||
logCacheHit(cacheID)
|
||||
} else {
|
||||
// We're not going to find any more cache hits.
|
||||
checkForLayers = false
|
||||
}
|
||||
}
|
||||
|
||||
if cacheID != "" {
|
||||
// A suitable cached image was found, so just reuse it.
|
||||
// If we need to name the resulting image because it's
|
||||
// the last step in this stage, add the name to the
|
||||
// image.
|
||||
imgID = cacheID
|
||||
if commitName != "" {
|
||||
logCommit(commitName, i)
|
||||
if imgID, ref, err = s.tagExistingImage(ctx, cacheID, commitName); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
logImageID(imgID)
|
||||
// If we didn't find a cache entry, or we need to add content
|
||||
// to find the digest of the content to check for a cached
|
||||
// image, run the step so that we can check if the result
|
||||
// matches a cache.
|
||||
if cacheID == "" {
|
||||
// Process the instruction directly.
|
||||
if err = ib.Run(step, s, noRunsRemaining); err != nil {
|
||||
logrus.Debugf("%v", errors.Wrapf(err, "error building at step %+v", *step))
|
||||
return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message)
|
||||
}
|
||||
// Update our working container to be based off of the
|
||||
// cached image, if we might need to use it as a basis
|
||||
// for the next instruction, or if we need the root
|
||||
// filesystem to match the image contents for the sake
|
||||
// of a later stage that wants to copy content from it.
|
||||
rebase = moreInstructions || rootfsIsUsedLater
|
||||
|
||||
// In case we added content, retrieve its digest.
|
||||
addedContentType, addedContentDigest := s.builder.ContentDigester.Digest()
|
||||
addedContentSummary = addedContentType
|
||||
if addedContentDigest != "" {
|
||||
if addedContentSummary != "" {
|
||||
addedContentSummary = addedContentSummary + ":"
|
||||
}
|
||||
addedContentSummary = addedContentSummary + addedContentDigest.Encoded()
|
||||
logrus.Debugf("added content %s", addedContentSummary)
|
||||
}
|
||||
|
||||
// Check if there's already an image based on our parent that
|
||||
// has the same change that we just made.
|
||||
if checkForLayers {
|
||||
cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step))
|
||||
if err != nil {
|
||||
return "", nil, errors.Wrap(err, "error checking if cached image exists from a previous build")
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// If the instruction would affect our configuration,
|
||||
// process the configuration change so that, if we fall
|
||||
// off the cache path, the filesystem changes from the
|
||||
@ -1031,34 +881,41 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
||||
return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if cacheID != "" && !(s.executor.squash && lastInstruction) {
|
||||
logCacheHit(cacheID)
|
||||
// A suitable cached image was found, so we can just
|
||||
// reuse it. If we need to add a name to the resulting
|
||||
// image because it's the last step in this stage, add
|
||||
// the name to the image.
|
||||
imgID = cacheID
|
||||
if commitName != "" {
|
||||
logCommit(commitName, i)
|
||||
if imgID, ref, err = s.tagExistingImage(ctx, cacheID, commitName); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// If we didn't find a cached image that we could just reuse,
|
||||
// process the instruction directly.
|
||||
err := ib.Run(step, s, noRunsRemaining)
|
||||
if err != nil {
|
||||
logrus.Debugf("%v", errors.Wrapf(err, "error building at step %+v", *step))
|
||||
return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message)
|
||||
}
|
||||
// In case we added content, retrieve its digest.
|
||||
addedContentDigest, err := s.digestSpecifiedContent(ctx, node, ib.Arguments(), ib.Config().Env)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
// Create a new image, maybe with a new layer.
|
||||
// We're not going to find any more cache hits, so we
|
||||
// can stop looking for them.
|
||||
checkForLayers = false
|
||||
// Create a new image, maybe with a new layer, with the
|
||||
// name for this stage if it's the last instruction.
|
||||
logCommit(s.output, i)
|
||||
imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentDigest), !s.stepRequiresLayer(step), commitName)
|
||||
imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), !s.stepRequiresLayer(step), commitName)
|
||||
if err != nil {
|
||||
return "", nil, errors.Wrapf(err, "error committing container for step %+v", *step)
|
||||
}
|
||||
logImageID(imgID)
|
||||
// We only need to build a new container rootfs
|
||||
// using this image if we plan on making
|
||||
// further changes to it. Subsequent stages
|
||||
// that just want to use the rootfs as a source
|
||||
// for COPY or ADD will be content with what we
|
||||
// already have.
|
||||
rebase = moreInstructions
|
||||
}
|
||||
logImageID(imgID)
|
||||
|
||||
// Update our working container to be based off of the cached
|
||||
// image, if we might need to use it as a basis for the next
|
||||
// instruction, or if we need the root filesystem to match the
|
||||
// image contents for the sake of a later stage that wants to
|
||||
// copy content from it.
|
||||
rebase = moreInstructions || rootfsIsUsedLater
|
||||
|
||||
if rebase {
|
||||
// Since we either committed the working container or
|
||||
@ -1105,29 +962,58 @@ func historyEntriesEqual(base, derived v1.History) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// historyMatches returns true if a candidate history matches the history of our
|
||||
// base image (if we have one), plus the current instruction.
|
||||
// historyAndDiffIDsMatch returns true if a candidate history matches the
|
||||
// history of our base image (if we have one), plus the current instruction,
|
||||
// and if the list of diff IDs for the images do for the part of the history
|
||||
// that we're comparing.
|
||||
// Used to verify whether a cache of the intermediate image exists and whether
|
||||
// to run the build again.
|
||||
func (s *StageExecutor) historyMatches(baseHistory []v1.History, child *parser.Node, history []v1.History, addedContentDigest string) bool {
|
||||
if len(baseHistory) >= len(history) {
|
||||
return false
|
||||
}
|
||||
if len(history)-len(baseHistory) != 1 {
|
||||
func (s *StageExecutor) historyAndDiffIDsMatch(baseHistory []v1.History, baseDiffIDs []digest.Digest, child *parser.Node, history []v1.History, diffIDs []digest.Digest, addedContentSummary string, buildAddsLayer bool) bool {
|
||||
// our history should be as long as the base's, plus one entry for what
|
||||
// we're doing
|
||||
if len(history) != len(baseHistory)+1 {
|
||||
return false
|
||||
}
|
||||
// check that each entry in the base history corresponds to an entry in
|
||||
// our history, and count how many of them add a layer diff
|
||||
expectedDiffIDs := 0
|
||||
for i := range baseHistory {
|
||||
if !historyEntriesEqual(baseHistory[i], history[i]) {
|
||||
return false
|
||||
}
|
||||
if !baseHistory[i].EmptyLayer {
|
||||
expectedDiffIDs++
|
||||
}
|
||||
}
|
||||
return history[len(baseHistory)].CreatedBy == s.getCreatedBy(child, addedContentDigest)
|
||||
if len(baseDiffIDs) != expectedDiffIDs {
|
||||
return false
|
||||
}
|
||||
if buildAddsLayer {
|
||||
// we're adding a layer, so we should have exactly one more
|
||||
// layer than the base image
|
||||
if len(diffIDs) != expectedDiffIDs+1 {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
// we're not adding a layer, so we should have exactly the same
|
||||
// layers as the base image
|
||||
if len(diffIDs) != expectedDiffIDs {
|
||||
return false
|
||||
}
|
||||
}
|
||||
// compare the diffs for the layers that we should have in common
|
||||
for i := range baseDiffIDs {
|
||||
if diffIDs[i] != baseDiffIDs[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return history[len(baseHistory)].CreatedBy == s.getCreatedBy(child, addedContentSummary)
|
||||
}
|
||||
|
||||
// getCreatedBy returns the command the image at node will be created by. If
|
||||
// the passed-in CompositeDigester is not nil, it is assumed to have the digest
|
||||
// information for the content if the node is ADD or COPY.
|
||||
func (s *StageExecutor) getCreatedBy(node *parser.Node, addedContentDigest string) string {
|
||||
func (s *StageExecutor) getCreatedBy(node *parser.Node, addedContentSummary string) string {
|
||||
if node == nil {
|
||||
return "/bin/sh"
|
||||
}
|
||||
@ -1143,7 +1029,7 @@ func (s *StageExecutor) getCreatedBy(node *parser.Node, addedContentDigest strin
|
||||
for destination.Next != nil {
|
||||
destination = destination.Next
|
||||
}
|
||||
return "/bin/sh -c #(nop) " + strings.ToUpper(node.Value) + " " + addedContentDigest + " in " + destination.Value + " "
|
||||
return "/bin/sh -c #(nop) " + strings.ToUpper(node.Value) + " " + addedContentSummary + " in " + destination.Value + " "
|
||||
default:
|
||||
return "/bin/sh -c #(nop) " + node.Original
|
||||
}
|
||||
@ -1212,40 +1098,54 @@ func (s *StageExecutor) tagExistingImage(ctx context.Context, cacheID, output st
|
||||
|
||||
// intermediateImageExists returns true if an intermediate image of currNode exists in the image store from a previous build.
|
||||
// It verifies this by checking the parent of the top layer of the image and the history.
|
||||
func (s *StageExecutor) intermediateImageExists(ctx context.Context, currNode *parser.Node, addedContentDigest string) (string, error) {
|
||||
func (s *StageExecutor) intermediateImageExists(ctx context.Context, currNode *parser.Node, addedContentDigest string, buildAddsLayer bool) (string, error) {
|
||||
// Get the list of images available in the image store
|
||||
images, err := s.executor.store.Images()
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "error getting image list from store")
|
||||
}
|
||||
var baseHistory []v1.History
|
||||
var baseDiffIDs []digest.Digest
|
||||
if s.builder.FromImageID != "" {
|
||||
baseHistory, err = s.executor.getImageHistory(ctx, s.builder.FromImageID)
|
||||
baseHistory, baseDiffIDs, err = s.executor.getImageHistoryAndDiffIDs(ctx, s.builder.FromImageID)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "error getting history of base image %q", s.builder.FromImageID)
|
||||
}
|
||||
}
|
||||
for _, image := range images {
|
||||
var imageTopLayer *storage.Layer
|
||||
var imageParentLayerID string
|
||||
if image.TopLayer != "" {
|
||||
imageTopLayer, err = s.executor.store.Layer(image.TopLayer)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "error getting top layer info")
|
||||
}
|
||||
// Figure out which layer from this image we should
|
||||
// compare our container's base layer to.
|
||||
imageParentLayerID = imageTopLayer.ID
|
||||
// If we haven't added a layer here, then our base
|
||||
// layer should be the same as the image's layer. If
|
||||
// did add a layer, then our base layer should be the
|
||||
// same as the parent of the image's layer.
|
||||
if buildAddsLayer {
|
||||
imageParentLayerID = imageTopLayer.Parent
|
||||
}
|
||||
}
|
||||
// If the parent of the top layer of an image is equal to the current build image's top layer,
|
||||
// it means that this image is potentially a cached intermediate image from a previous
|
||||
// build. Next we double check that the history of this image is equivalent to the previous
|
||||
// build.
|
||||
if s.builder.TopLayer != imageParentLayerID {
|
||||
continue
|
||||
}
|
||||
// Next we double check that the history of this image is equivalent to the previous
|
||||
// lines in the Dockerfile up till the point we are at in the build.
|
||||
if imageTopLayer == nil || (s.builder.TopLayer != "" && (imageTopLayer.Parent == s.builder.TopLayer || imageTopLayer.ID == s.builder.TopLayer)) {
|
||||
history, err := s.executor.getImageHistory(ctx, image.ID)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "error getting history of %q", image.ID)
|
||||
}
|
||||
// children + currNode is the point of the Dockerfile we are currently at.
|
||||
if s.historyMatches(baseHistory, currNode, history, addedContentDigest) {
|
||||
return image.ID, nil
|
||||
}
|
||||
history, diffIDs, err := s.executor.getImageHistoryAndDiffIDs(ctx, image.ID)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "error getting history of %q", image.ID)
|
||||
}
|
||||
// children + currNode is the point of the Dockerfile we are currently at.
|
||||
if s.historyAndDiffIDsMatch(baseHistory, baseDiffIDs, currNode, history, diffIDs, addedContentDigest, buildAddsLayer) {
|
||||
return image.ID, nil
|
||||
}
|
||||
}
|
||||
return "", nil
|
||||
@ -1355,7 +1255,7 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer
|
||||
SignBy: s.executor.signBy,
|
||||
MaxRetries: s.executor.maxPullPushRetries,
|
||||
RetryDelay: s.executor.retryPullPushDelay,
|
||||
OmitTimestamp: s.executor.omitTimestamp,
|
||||
HistoryTimestamp: s.executor.timestamp,
|
||||
}
|
||||
imgID, _, manifestDigest, err := s.builder.Commit(ctx, imageRef, options)
|
||||
if err != nil {
|
||||
@ -1373,29 +1273,5 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer
|
||||
}
|
||||
|
||||
func (s *StageExecutor) EnsureContainerPath(path string) error {
|
||||
targetPath, err := securejoin.SecureJoin(s.mountPoint, path)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error ensuring container path %q", path)
|
||||
}
|
||||
|
||||
_, err = os.Stat(targetPath)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
err = os.MkdirAll(targetPath, 0755)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error creating directory path %q", targetPath)
|
||||
}
|
||||
// get the uid and gid so that we can set the correct permissions on the
|
||||
// working directory
|
||||
uid, gid, _, err := chrootuser.GetUser(s.mountPoint, s.builder.User())
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error getting uid and gid for user %q", s.builder.User())
|
||||
}
|
||||
if err = os.Chown(targetPath, int(uid), int(gid)); err != nil {
|
||||
return errors.Wrapf(err, "error setting ownership on %q", targetPath)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error ensuring container path %q", path)
|
||||
}
|
||||
return nil
|
||||
return copier.Mkdir(s.mountPoint, path, copier.MkdirOptions{})
|
||||
}
|
||||
|
9
vendor/github.com/containers/buildah/pkg/cli/common.go
generated
vendored
9
vendor/github.com/containers/buildah/pkg/cli/common.go
generated
vendored
@ -65,7 +65,7 @@ type BudResults struct {
|
||||
Logfile string
|
||||
Loglevel int
|
||||
NoCache bool
|
||||
OmitTimestamp bool
|
||||
Timestamp int64
|
||||
OS string
|
||||
Platform string
|
||||
Pull bool
|
||||
@ -82,6 +82,7 @@ type BudResults struct {
|
||||
Target string
|
||||
TLSVerify bool
|
||||
Jobs int
|
||||
LogRusage bool
|
||||
}
|
||||
|
||||
// FromAndBugResults represents the results for common flags
|
||||
@ -164,7 +165,7 @@ func GetBudFlags(flags *BudResults) pflag.FlagSet {
|
||||
fs.BoolVar(&flags.NoCache, "no-cache", false, "Do not use existing cached images for the container build. Build from the start with a new set of cached layers.")
|
||||
fs.StringVar(&flags.Logfile, "logfile", "", "log to `file` instead of stdout/stderr")
|
||||
fs.IntVar(&flags.Loglevel, "loglevel", 0, "adjust logging level (range from -2 to 3)")
|
||||
fs.BoolVar(&flags.OmitTimestamp, "omit-timestamp", false, "set created timestamp to epoch 0 to allow for deterministic builds")
|
||||
fs.Int64Var(&flags.Timestamp, "timestamp", 0, "set created timestamp to the specified epoch seconds to allow for deterministic builds, defaults to current time")
|
||||
fs.StringVar(&flags.OS, "os", runtime.GOOS, "set the OS to the provided value instead of the current operating system of the host")
|
||||
fs.StringVar(&flags.Platform, "platform", parse.DefaultPlatform(), "set the OS/ARCH to the provided value instead of the current operating system and architecture of the host (for example `linux/arm`)")
|
||||
fs.BoolVar(&flags.Pull, "pull", true, "pull the image from the registry if newer or not present in store, if false, only pull the image if not present")
|
||||
@ -181,6 +182,10 @@ func GetBudFlags(flags *BudResults) pflag.FlagSet {
|
||||
fs.StringVar(&flags.Target, "target", "", "set the target build stage to build")
|
||||
fs.BoolVar(&flags.TLSVerify, "tls-verify", true, "require HTTPS and verify certificates when accessing the registry")
|
||||
fs.IntVar(&flags.Jobs, "jobs", 1, "how many stages to run in parallel")
|
||||
fs.BoolVar(&flags.LogRusage, "log-rusage", false, "log resource usage at each build step")
|
||||
if err := fs.MarkHidden("log-rusage"); err != nil {
|
||||
panic(fmt.Sprintf("error marking the log-rusage flag as hidden: %v", err))
|
||||
}
|
||||
return fs
|
||||
}
|
||||
|
||||
|
48
vendor/github.com/containers/buildah/pkg/rusage/rusage.go
generated
vendored
Normal file
48
vendor/github.com/containers/buildah/pkg/rusage/rusage.go
generated
vendored
Normal file
@ -0,0 +1,48 @@
|
||||
package rusage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
units "github.com/docker/go-units"
|
||||
)
|
||||
|
||||
// Rusage is a subset of a Unix-style resource usage counter for the current
|
||||
// process and its children. The counters are always 0 on platforms where the
|
||||
// system call is not available (i.e., systems where getrusage() doesn't
|
||||
// exist).
|
||||
type Rusage struct {
|
||||
Date time.Time
|
||||
Elapsed time.Duration
|
||||
Utime, Stime time.Duration
|
||||
Inblock, Outblock int64
|
||||
}
|
||||
|
||||
// FormatDiff formats the result of rusage.Rusage.Subtract() for logging.
|
||||
func FormatDiff(diff Rusage) string {
|
||||
return fmt.Sprintf("%s(system) %s(user) %s(elapsed) %s input %s output", diff.Stime.Round(time.Millisecond), diff.Utime.Round(time.Millisecond), diff.Elapsed.Round(time.Millisecond), units.HumanSize(float64(diff.Inblock*512)), units.HumanSize(float64(diff.Outblock*512)))
|
||||
}
|
||||
|
||||
// Subtract subtracts the items in delta from r, and returns the difference.
|
||||
// The Date field is zeroed for easier comparison with the zero value for the
|
||||
// Rusage type.
|
||||
func (r Rusage) Subtract(baseline Rusage) Rusage {
|
||||
return Rusage{
|
||||
Elapsed: r.Date.Sub(baseline.Date),
|
||||
Utime: r.Utime - baseline.Utime,
|
||||
Stime: r.Stime - baseline.Stime,
|
||||
Inblock: r.Inblock - baseline.Inblock,
|
||||
Outblock: r.Outblock - baseline.Outblock,
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns the counters for the current process and its children,
|
||||
// subtracting any values in the passed in "since" value, or an error.
|
||||
// The Elapsed field will always be set to zero.
|
||||
func Get() (Rusage, error) {
|
||||
counters, err := get()
|
||||
if err != nil {
|
||||
return Rusage{}, err
|
||||
}
|
||||
return counters, nil
|
||||
}
|
35
vendor/github.com/containers/buildah/pkg/rusage/rusage_unix.go
generated
vendored
Normal file
35
vendor/github.com/containers/buildah/pkg/rusage/rusage_unix.go
generated
vendored
Normal file
@ -0,0 +1,35 @@
|
||||
// +build !windows
|
||||
|
||||
package rusage
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func mkduration(tv syscall.Timeval) time.Duration {
|
||||
return time.Duration(tv.Sec)*time.Second + time.Duration(tv.Usec)*time.Microsecond
|
||||
}
|
||||
|
||||
func get() (Rusage, error) {
|
||||
var rusage syscall.Rusage
|
||||
err := syscall.Getrusage(syscall.RUSAGE_CHILDREN, &rusage)
|
||||
if err != nil {
|
||||
return Rusage{}, errors.Wrapf(err, "error getting resource usage")
|
||||
}
|
||||
r := Rusage{
|
||||
Date: time.Now(),
|
||||
Utime: mkduration(rusage.Utime),
|
||||
Stime: mkduration(rusage.Stime),
|
||||
Inblock: int64(rusage.Inblock), // nolint: unconvert
|
||||
Outblock: int64(rusage.Oublock), // nolint: unconvert
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// Supported returns true if resource usage counters are supported on this OS.
|
||||
func Supported() bool {
|
||||
return true
|
||||
}
|
18
vendor/github.com/containers/buildah/pkg/rusage/rusage_unsupported.go
generated
vendored
Normal file
18
vendor/github.com/containers/buildah/pkg/rusage/rusage_unsupported.go
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
// +build windows
|
||||
|
||||
package rusage
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func get() (Rusage, error) {
|
||||
return Rusage{}, errors.Wrapf(syscall.ENOTSUP, "error getting resource usage")
|
||||
}
|
||||
|
||||
// Supported returns true if resource usage counters are supported on this OS.
|
||||
func Supported() bool {
|
||||
return false
|
||||
}
|
2
vendor/github.com/containers/buildah/pull.go
generated
vendored
2
vendor/github.com/containers/buildah/pull.go
generated
vendored
@ -280,7 +280,7 @@ func pullImage(ctx context.Context, store storage.Store, srcRef types.ImageRefer
|
||||
}()
|
||||
|
||||
logrus.Debugf("copying %q to %q", transports.ImageName(srcRef), destName)
|
||||
if _, err := retryCopyImage(ctx, policyContext, maybeCachedDestRef, srcRef, srcRef, "pull", getCopyOptions(store, options.ReportWriter, sc, nil, "", options.RemoveSignatures, "", nil, nil, options.OciDecryptConfig), options.MaxRetries, options.RetryDelay); err != nil {
|
||||
if _, err := retryCopyImage(ctx, policyContext, maybeCachedDestRef, srcRef, srcRef, getCopyOptions(store, options.ReportWriter, sc, nil, "", options.RemoveSignatures, "", nil, nil, options.OciDecryptConfig), options.MaxRetries, options.RetryDelay); err != nil {
|
||||
logrus.Debugf("error copying src image [%q] to dest image [%q] err: %v", transports.ImageName(srcRef), destName, err)
|
||||
return nil, err
|
||||
}
|
||||
|
15
vendor/github.com/containers/buildah/run_linux.go
generated
vendored
15
vendor/github.com/containers/buildah/run_linux.go
generated
vendored
@ -316,7 +316,7 @@ func addCommonOptsToSpec(commonOpts *CommonBuildOptions, g *generate.Generator)
|
||||
return nil
|
||||
}
|
||||
|
||||
func runSetupBuiltinVolumes(mountLabel, mountPoint, containerDir string, copyWithTar func(srcPath, dstPath string) error, builtinVolumes []string, rootUID, rootGID int) ([]specs.Mount, error) {
|
||||
func runSetupBuiltinVolumes(mountLabel, mountPoint, containerDir string, builtinVolumes []string, rootUID, rootGID int) ([]specs.Mount, error) {
|
||||
var mounts []specs.Mount
|
||||
hostOwner := idtools.IDPair{UID: rootUID, GID: rootGID}
|
||||
// Add temporary copies of the contents of volume locations at the
|
||||
@ -359,7 +359,7 @@ func runSetupBuiltinVolumes(mountLabel, mountPoint, containerDir string, copyWit
|
||||
if err = os.Chown(volumePath, int(stat.Sys().(*syscall.Stat_t).Uid), int(stat.Sys().(*syscall.Stat_t).Gid)); err != nil {
|
||||
return nil, errors.Wrapf(err, "error chowning directory %q for volume %q", volumePath, volume)
|
||||
}
|
||||
if err = copyWithTar(srcPath, volumePath); err != nil && !os.IsNotExist(errors.Cause(err)) {
|
||||
if err = extractWithTar(mountPoint, srcPath, volumePath); err != nil && !os.IsNotExist(errors.Cause(err)) {
|
||||
return nil, errors.Wrapf(err, "error populating directory %q for volume %q using contents of %q", volumePath, volume, srcPath)
|
||||
}
|
||||
}
|
||||
@ -483,8 +483,7 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath st
|
||||
|
||||
// Add temporary copies of the contents of volume locations at the
|
||||
// volume locations, unless we already have something there.
|
||||
copyWithTar := b.copyWithTar(nil, nil, nil, false)
|
||||
builtins, err := runSetupBuiltinVolumes(b.MountLabel, mountPoint, cdir, copyWithTar, builtinVolumes, int(rootUID), int(rootGID))
|
||||
builtins, err := runSetupBuiltinVolumes(b.MountLabel, mountPoint, cdir, builtinVolumes, int(rootUID), int(rootGID))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -864,12 +863,12 @@ func runUsingRuntime(isolation Isolation, options RunOptions, configureNetwork b
|
||||
stat := exec.Command(runtime, args...)
|
||||
stat.Dir = bundlePath
|
||||
stat.Stderr = os.Stderr
|
||||
stateOutput, stateErr := stat.Output()
|
||||
if stateErr != nil {
|
||||
return 1, errors.Wrapf(stateErr, "error reading container state")
|
||||
stateOutput, err := stat.Output()
|
||||
if err != nil {
|
||||
return 1, errors.Wrapf(err, "error reading container state (got output: %q)", string(stateOutput))
|
||||
}
|
||||
if err = json.Unmarshal(stateOutput, &state); err != nil {
|
||||
return 1, errors.Wrapf(stateErr, "error parsing container state %q", string(stateOutput))
|
||||
return 1, errors.Wrapf(err, "error parsing container state %q", string(stateOutput))
|
||||
}
|
||||
switch state.Status {
|
||||
case "running":
|
||||
|
2
vendor/github.com/containers/buildah/seccomp.go
generated
vendored
2
vendor/github.com/containers/buildah/seccomp.go
generated
vendored
@ -5,9 +5,9 @@ package buildah
|
||||
import (
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/containers/common/pkg/seccomp"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
seccomp "github.com/seccomp/containers-golang"
|
||||
)
|
||||
|
||||
func setupSeccomp(spec *specs.Spec, seccompProfilePath string) error {
|
||||
|
4
vendor/github.com/containers/buildah/selinux.go
generated
vendored
4
vendor/github.com/containers/buildah/selinux.go
generated
vendored
@ -7,6 +7,10 @@ import (
|
||||
selinux "github.com/opencontainers/selinux/go-selinux"
|
||||
)
|
||||
|
||||
func selinuxGetEnabled() bool {
|
||||
return selinux.GetEnabled()
|
||||
}
|
||||
|
||||
func setupSelinux(g *generate.Generator, processLabel, mountLabel string) {
|
||||
if processLabel != "" && selinux.GetEnabled() {
|
||||
g.SetProcessSelinuxLabel(processLabel)
|
||||
|
4
vendor/github.com/containers/buildah/selinux_unsupported.go
generated
vendored
4
vendor/github.com/containers/buildah/selinux_unsupported.go
generated
vendored
@ -6,5 +6,9 @@ import (
|
||||
"github.com/opencontainers/runtime-tools/generate"
|
||||
)
|
||||
|
||||
func selinuxGetEnabled() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func setupSelinux(g *generate.Generator, processLabel, mountLabel string) {
|
||||
}
|
||||
|
287
vendor/github.com/containers/buildah/util.go
generated
vendored
287
vendor/github.com/containers/buildah/util.go
generated
vendored
@ -1,26 +1,20 @@
|
||||
package buildah
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/containers/buildah/util"
|
||||
"github.com/containers/buildah/copier"
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/pkg/sysregistriesv2"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/chrootarchive"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/pools"
|
||||
"github.com/containers/storage/pkg/reexec"
|
||||
"github.com/containers/storage/pkg/system"
|
||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
rspec "github.com/opencontainers/runtime-spec/specs-go"
|
||||
selinux "github.com/opencontainers/selinux/go-selinux"
|
||||
"github.com/opencontainers/selinux/go-selinux/label"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
@ -109,245 +103,6 @@ func convertRuntimeIDMaps(UIDMap, GIDMap []rspec.LinuxIDMapping) ([]idtools.IDMa
|
||||
return uidmap, gidmap
|
||||
}
|
||||
|
||||
// copyFileWithTar returns a function which copies a single file from outside
|
||||
// of any container, or another container, into our working container, mapping
|
||||
// read permissions using the passed-in ID maps, writing using the container's
|
||||
// ID mappings, possibly overridden using the passed-in chownOpts
|
||||
func (b *Builder) copyFileWithTar(tarIDMappingOptions *IDMappingOptions, chownOpts *idtools.IDPair, hasher io.Writer, dryRun bool) func(src, dest string) error {
|
||||
if tarIDMappingOptions == nil {
|
||||
tarIDMappingOptions = &IDMappingOptions{
|
||||
HostUIDMapping: true,
|
||||
HostGIDMapping: true,
|
||||
}
|
||||
}
|
||||
|
||||
var hardlinkChecker util.HardlinkChecker
|
||||
return func(src, dest string) error {
|
||||
var f *os.File
|
||||
|
||||
logrus.Debugf("copyFileWithTar(%s, %s)", src, dest)
|
||||
fi, err := os.Lstat(src)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading attributes of %q", src)
|
||||
}
|
||||
|
||||
sysfi, err := system.Lstat(src)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading attributes of %q", src)
|
||||
}
|
||||
|
||||
hostUID := sysfi.UID()
|
||||
hostGID := sysfi.GID()
|
||||
containerUID, containerGID, err := util.GetContainerIDs(tarIDMappingOptions.UIDMap, tarIDMappingOptions.GIDMap, hostUID, hostGID)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error mapping owner IDs of %q: %d/%d", src, hostUID, hostGID)
|
||||
}
|
||||
|
||||
hdr, err := tar.FileInfoHeader(fi, filepath.Base(src))
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error generating tar header for: %q", src)
|
||||
}
|
||||
chrootedDest, err := filepath.Rel(b.MountPoint, dest)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error generating relative-to-chroot target name for %q", dest)
|
||||
}
|
||||
hdr.Name = chrootedDest
|
||||
hdr.Uid = int(containerUID)
|
||||
hdr.Gid = int(containerGID)
|
||||
|
||||
if fi.Mode().IsRegular() && hdr.Typeflag == tar.TypeReg {
|
||||
if linkname := hardlinkChecker.Check(fi); linkname != "" {
|
||||
hdr.Typeflag = tar.TypeLink
|
||||
hdr.Linkname = linkname
|
||||
} else {
|
||||
hardlinkChecker.Add(fi, chrootedDest)
|
||||
f, err = os.Open(src)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error opening %q to copy its contents", src)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if fi.Mode()&os.ModeSymlink == os.ModeSymlink && hdr.Typeflag == tar.TypeSymlink {
|
||||
hdr.Typeflag = tar.TypeSymlink
|
||||
linkName, err := os.Readlink(src)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading destination from symlink %q", src)
|
||||
}
|
||||
hdr.Linkname = linkName
|
||||
}
|
||||
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
writer := tar.NewWriter(pipeWriter)
|
||||
var copyErr error
|
||||
go func(srcFile *os.File) {
|
||||
err := writer.WriteHeader(hdr)
|
||||
if err != nil {
|
||||
logrus.Debugf("error writing header for %s: %v", srcFile.Name(), err)
|
||||
copyErr = err
|
||||
}
|
||||
if srcFile != nil {
|
||||
n, err := pools.Copy(writer, srcFile)
|
||||
if n != hdr.Size {
|
||||
logrus.Debugf("expected to write %d bytes for %s, wrote %d instead", hdr.Size, srcFile.Name(), n)
|
||||
}
|
||||
if err != nil {
|
||||
logrus.Debugf("error copying contents of %s: %v", fi.Name(), err)
|
||||
copyErr = err
|
||||
}
|
||||
if err = srcFile.Close(); err != nil {
|
||||
logrus.Debugf("error closing %s: %v", fi.Name(), err)
|
||||
}
|
||||
}
|
||||
if err = writer.Close(); err != nil {
|
||||
logrus.Debugf("error closing write pipe for %s: %v", hdr.Name, err)
|
||||
}
|
||||
pipeWriter.Close()
|
||||
pipeWriter = nil
|
||||
}(f)
|
||||
|
||||
untar := b.untar(chownOpts, hasher, dryRun)
|
||||
err = untar(pipeReader, b.MountPoint)
|
||||
if err == nil {
|
||||
err = copyErr
|
||||
}
|
||||
if pipeWriter != nil {
|
||||
pipeWriter.Close()
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// copyWithTar returns a function which copies a directory tree from outside of
|
||||
// our container or from another container, into our working container, mapping
|
||||
// permissions at read-time using the container's ID maps, with ownership at
|
||||
// write-time possibly overridden using the passed-in chownOpts
|
||||
func (b *Builder) copyWithTar(tarIDMappingOptions *IDMappingOptions, chownOpts *idtools.IDPair, hasher io.Writer, dryRun bool) func(src, dest string) error {
|
||||
tar := b.tarPath(tarIDMappingOptions)
|
||||
return func(src, dest string) error {
|
||||
thisHasher := hasher
|
||||
if thisHasher != nil && b.ContentDigester.Hash() != nil {
|
||||
thisHasher = io.MultiWriter(thisHasher, b.ContentDigester.Hash())
|
||||
}
|
||||
if thisHasher == nil {
|
||||
thisHasher = b.ContentDigester.Hash()
|
||||
}
|
||||
untar := b.untar(chownOpts, thisHasher, dryRun)
|
||||
rc, err := tar(src)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error archiving %q for copy", src)
|
||||
}
|
||||
return untar(rc, dest)
|
||||
}
|
||||
}
|
||||
|
||||
// untarPath returns a function which extracts an archive in a specified
|
||||
// location into our working container, mapping permissions using the
|
||||
// container's ID maps, possibly overridden using the passed-in chownOpts
|
||||
func (b *Builder) untarPath(chownOpts *idtools.IDPair, hasher io.Writer, dryRun bool) func(src, dest string) error {
|
||||
convertedUIDMap, convertedGIDMap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap)
|
||||
if dryRun {
|
||||
return func(src, dest string) error {
|
||||
thisHasher := hasher
|
||||
if thisHasher != nil && b.ContentDigester.Hash() != nil {
|
||||
thisHasher = io.MultiWriter(thisHasher, b.ContentDigester.Hash())
|
||||
}
|
||||
if thisHasher == nil {
|
||||
thisHasher = b.ContentDigester.Hash()
|
||||
}
|
||||
f, err := os.Open(src)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error opening %q", src)
|
||||
}
|
||||
defer f.Close()
|
||||
_, err = io.Copy(thisHasher, f)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return func(src, dest string) error {
|
||||
thisHasher := hasher
|
||||
if thisHasher != nil && b.ContentDigester.Hash() != nil {
|
||||
thisHasher = io.MultiWriter(thisHasher, b.ContentDigester.Hash())
|
||||
}
|
||||
if thisHasher == nil {
|
||||
thisHasher = b.ContentDigester.Hash()
|
||||
}
|
||||
untarPathAndChown := chrootarchive.UntarPathAndChown(chownOpts, thisHasher, convertedUIDMap, convertedGIDMap)
|
||||
return untarPathAndChown(src, dest)
|
||||
}
|
||||
}
|
||||
|
||||
// tarPath returns a function which creates an archive of a specified location,
|
||||
// which is often somewhere in the container's filesystem, mapping permissions
|
||||
// using the container's ID maps, or the passed-in maps if specified
|
||||
func (b *Builder) tarPath(idMappingOptions *IDMappingOptions) func(path string) (io.ReadCloser, error) {
|
||||
var uidmap, gidmap []idtools.IDMap
|
||||
if idMappingOptions == nil {
|
||||
idMappingOptions = &IDMappingOptions{
|
||||
HostUIDMapping: true,
|
||||
HostGIDMapping: true,
|
||||
}
|
||||
}
|
||||
convertedUIDMap, convertedGIDMap := convertRuntimeIDMaps(idMappingOptions.UIDMap, idMappingOptions.GIDMap)
|
||||
tarMappings := idtools.NewIDMappingsFromMaps(convertedUIDMap, convertedGIDMap)
|
||||
uidmap = tarMappings.UIDs()
|
||||
gidmap = tarMappings.GIDs()
|
||||
options := &archive.TarOptions{
|
||||
Compression: archive.Uncompressed,
|
||||
UIDMaps: uidmap,
|
||||
GIDMaps: gidmap,
|
||||
}
|
||||
return func(path string) (io.ReadCloser, error) {
|
||||
return archive.TarWithOptions(path, options)
|
||||
}
|
||||
}
|
||||
|
||||
// untar returns a function which extracts an archive stream to a specified
|
||||
// location in the container's filesystem, mapping permissions using the
|
||||
// container's ID maps, possibly overridden using the passed-in chownOpts
|
||||
func (b *Builder) untar(chownOpts *idtools.IDPair, hasher io.Writer, dryRun bool) func(tarArchive io.ReadCloser, dest string) error {
|
||||
convertedUIDMap, convertedGIDMap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap)
|
||||
untarMappings := idtools.NewIDMappingsFromMaps(convertedUIDMap, convertedGIDMap)
|
||||
options := &archive.TarOptions{
|
||||
UIDMaps: untarMappings.UIDs(),
|
||||
GIDMaps: untarMappings.GIDs(),
|
||||
ChownOpts: chownOpts,
|
||||
}
|
||||
untar := chrootarchive.Untar
|
||||
if dryRun {
|
||||
untar = func(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
|
||||
if _, err := io.Copy(ioutil.Discard, tarArchive); err != nil {
|
||||
return errors.Wrapf(err, "error digesting tar stream")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
originalUntar := untar
|
||||
untarWithHasher := func(tarArchive io.Reader, dest string, options *archive.TarOptions, untarHasher io.Writer) error {
|
||||
reader := tarArchive
|
||||
if untarHasher != nil {
|
||||
reader = io.TeeReader(tarArchive, untarHasher)
|
||||
}
|
||||
return originalUntar(reader, dest, options)
|
||||
}
|
||||
return func(tarArchive io.ReadCloser, dest string) error {
|
||||
thisHasher := hasher
|
||||
if thisHasher != nil && b.ContentDigester.Hash() != nil {
|
||||
thisHasher = io.MultiWriter(thisHasher, b.ContentDigester.Hash())
|
||||
}
|
||||
if thisHasher == nil {
|
||||
thisHasher = b.ContentDigester.Hash()
|
||||
}
|
||||
err := untarWithHasher(tarArchive, dest, options, thisHasher)
|
||||
if err2 := tarArchive.Close(); err2 != nil {
|
||||
if err == nil {
|
||||
err = err2
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// isRegistryBlocked checks if the named registry is marked as blocked
|
||||
func isRegistryBlocked(registry string, sc *types.SystemContext) (bool, error) {
|
||||
reginfo, err := sysregistriesv2.FindRegistry(sc, registry)
|
||||
@ -389,10 +144,10 @@ func isReferenceBlocked(ref types.ImageReference, sc *types.SystemContext) (bool
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// ReserveSELinuxLabels reads containers storage and reserves SELinux containers
|
||||
// fall all existing buildah containers
|
||||
// ReserveSELinuxLabels reads containers storage and reserves SELinux contexts
|
||||
// which are already being used by buildah containers.
|
||||
func ReserveSELinuxLabels(store storage.Store, id string) error {
|
||||
if selinux.GetEnabled() {
|
||||
if selinuxGetEnabled() {
|
||||
containers, err := store.Containers()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error getting list of containers")
|
||||
@ -438,3 +193,35 @@ func IsContainer(id string, store storage.Store) (bool, error) {
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Copy content from the directory "src" to the directory "dest", ensuring that
|
||||
// content from outside of "root" (which is a parent of "src" or "src" itself)
|
||||
// isn't read.
|
||||
func extractWithTar(root, src, dest string) error {
|
||||
var getErr, putErr error
|
||||
var wg sync.WaitGroup
|
||||
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
getErr = copier.Get(root, src, copier.GetOptions{}, []string{"."}, pipeWriter)
|
||||
pipeWriter.Close()
|
||||
wg.Done()
|
||||
}()
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
putErr = copier.Put(dest, dest, copier.PutOptions{}, pipeReader)
|
||||
pipeReader.Close()
|
||||
wg.Done()
|
||||
}()
|
||||
wg.Wait()
|
||||
|
||||
if getErr != nil {
|
||||
return errors.Wrapf(getErr, "error reading %q", src)
|
||||
}
|
||||
if putErr != nil {
|
||||
return errors.Wrapf(putErr, "error copying contents of %q to %q", src, dest)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
Reference in New Issue
Block a user