mirror of
https://github.com/containers/podman.git
synced 2025-12-12 09:50:25 +08:00
[NO TESTS NEEDED] Vendor in containers/buildah v1.20.0
Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
This commit is contained in:
181
vendor/github.com/containers/buildah/imagebuildah/build.go
generated
vendored
181
vendor/github.com/containers/buildah/imagebuildah/build.go
generated
vendored
@@ -11,13 +11,10 @@ import (
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/containers/buildah"
|
||||
"github.com/containers/buildah/define"
|
||||
"github.com/containers/common/pkg/config"
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/types"
|
||||
encconfig "github.com/containers/ocicrypt/config"
|
||||
"github.com/containers/storage"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
@@ -28,10 +25,10 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
PullIfMissing = buildah.PullIfMissing
|
||||
PullAlways = buildah.PullAlways
|
||||
PullIfNewer = buildah.PullIfNewer
|
||||
PullNever = buildah.PullNever
|
||||
PullIfMissing = define.PullIfMissing
|
||||
PullAlways = define.PullAlways
|
||||
PullIfNewer = define.PullIfNewer
|
||||
PullNever = define.PullNever
|
||||
|
||||
Gzip = archive.Gzip
|
||||
Bzip2 = archive.Bzip2
|
||||
@@ -43,161 +40,12 @@ const (
|
||||
// Mount is a mountpoint for the build container.
|
||||
type Mount specs.Mount
|
||||
|
||||
// BuildOptions can be used to alter how an image is built.
|
||||
type BuildOptions struct {
|
||||
// ContextDirectory is the default source location for COPY and ADD
|
||||
// commands.
|
||||
ContextDirectory string
|
||||
// PullPolicy controls whether or not we pull images. It should be one
|
||||
// of PullIfMissing, PullAlways, PullIfNewer, or PullNever.
|
||||
PullPolicy buildah.PullPolicy
|
||||
// Registry is a value which is prepended to the image's name, if it
|
||||
// needs to be pulled and the image name alone can not be resolved to a
|
||||
// reference to a source image. No separator is implicitly added.
|
||||
Registry string
|
||||
// IgnoreUnrecognizedInstructions tells us to just log instructions we
|
||||
// don't recognize, and try to keep going.
|
||||
IgnoreUnrecognizedInstructions bool
|
||||
// Manifest Name to which the image will be added.
|
||||
Manifest string
|
||||
// Quiet tells us whether or not to announce steps as we go through them.
|
||||
Quiet bool
|
||||
// Isolation controls how Run() runs things.
|
||||
Isolation buildah.Isolation
|
||||
// Runtime is the name of the command to run for RUN instructions when
|
||||
// Isolation is either IsolationDefault or IsolationOCI. It should
|
||||
// accept the same arguments and flags that runc does.
|
||||
Runtime string
|
||||
// RuntimeArgs adds global arguments for the runtime.
|
||||
RuntimeArgs []string
|
||||
// TransientMounts is a list of mounts that won't be kept in the image.
|
||||
TransientMounts []string
|
||||
// Compression specifies the type of compression which is applied to
|
||||
// layer blobs. The default is to not use compression, but
|
||||
// archive.Gzip is recommended.
|
||||
Compression archive.Compression
|
||||
// Arguments which can be interpolated into Dockerfiles
|
||||
Args map[string]string
|
||||
// Name of the image to write to.
|
||||
Output string
|
||||
// Additional tags to add to the image that we write, if we know of a
|
||||
// way to add them.
|
||||
AdditionalTags []string
|
||||
// Log is a callback that will print a progress message. If no value
|
||||
// is supplied, the message will be sent to Err (or os.Stderr, if Err
|
||||
// is nil) by default.
|
||||
Log func(format string, args ...interface{})
|
||||
// In is connected to stdin for RUN instructions.
|
||||
In io.Reader
|
||||
// Out is a place where non-error log messages are sent.
|
||||
Out io.Writer
|
||||
// Err is a place where error log messages should be sent.
|
||||
Err io.Writer
|
||||
// SignaturePolicyPath specifies an override location for the signature
|
||||
// policy which should be used for verifying the new image as it is
|
||||
// being written. Except in specific circumstances, no value should be
|
||||
// specified, indicating that the shared, system-wide default policy
|
||||
// should be used.
|
||||
SignaturePolicyPath string
|
||||
// ReportWriter is an io.Writer which will be used to report the
|
||||
// progress of the (possible) pulling of the source image and the
|
||||
// writing of the new image.
|
||||
ReportWriter io.Writer
|
||||
// OutputFormat is the format of the output image's manifest and
|
||||
// configuration data.
|
||||
// Accepted values are buildah.OCIv1ImageManifest and buildah.Dockerv2ImageManifest.
|
||||
OutputFormat string
|
||||
// SystemContext holds parameters used for authentication.
|
||||
SystemContext *types.SystemContext
|
||||
// NamespaceOptions controls how we set up namespaces processes that we
|
||||
// might need when handling RUN instructions.
|
||||
NamespaceOptions []buildah.NamespaceOption
|
||||
// ConfigureNetwork controls whether or not network interfaces and
|
||||
// routing are configured for a new network namespace (i.e., when not
|
||||
// joining another's namespace and not just using the host's
|
||||
// namespace), effectively deciding whether or not the process has a
|
||||
// usable network.
|
||||
ConfigureNetwork buildah.NetworkConfigurationPolicy
|
||||
// CNIPluginPath is the location of CNI plugin helpers, if they should be
|
||||
// run from a location other than the default location.
|
||||
CNIPluginPath string
|
||||
// CNIConfigDir is the location of CNI configuration files, if the files in
|
||||
// the default configuration directory shouldn't be used.
|
||||
CNIConfigDir string
|
||||
// ID mapping options to use if we're setting up our own user namespace
|
||||
// when handling RUN instructions.
|
||||
IDMappingOptions *buildah.IDMappingOptions
|
||||
// AddCapabilities is a list of capabilities to add to the default set when
|
||||
// handling RUN instructions.
|
||||
AddCapabilities []string
|
||||
// DropCapabilities is a list of capabilities to remove from the default set
|
||||
// when handling RUN instructions. If a capability appears in both lists, it
|
||||
// will be dropped.
|
||||
DropCapabilities []string
|
||||
// CommonBuildOpts is *required*.
|
||||
CommonBuildOpts *buildah.CommonBuildOptions
|
||||
// DefaultMountsFilePath is the file path holding the mounts to be mounted in "host-path:container-path" format
|
||||
DefaultMountsFilePath string
|
||||
// IIDFile tells the builder to write the image ID to the specified file
|
||||
IIDFile string
|
||||
// Squash tells the builder to produce an image with a single layer
|
||||
// instead of with possibly more than one layer.
|
||||
Squash bool
|
||||
// Labels metadata for an image
|
||||
Labels []string
|
||||
// Annotation metadata for an image
|
||||
Annotations []string
|
||||
// OnBuild commands to be run by images based on this image
|
||||
OnBuild []string
|
||||
// Layers tells the builder to create a cache of images for each step in the Dockerfile
|
||||
Layers bool
|
||||
// NoCache tells the builder to build the image from scratch without checking for a cache.
|
||||
// It creates a new set of cached images for the build.
|
||||
NoCache bool
|
||||
// RemoveIntermediateCtrs tells the builder whether to remove intermediate containers used
|
||||
// during the build process. Default is true.
|
||||
RemoveIntermediateCtrs bool
|
||||
// ForceRmIntermediateCtrs tells the builder to remove all intermediate containers even if
|
||||
// the build was unsuccessful.
|
||||
ForceRmIntermediateCtrs bool
|
||||
// BlobDirectory is a directory which we'll use for caching layer blobs.
|
||||
BlobDirectory string
|
||||
// Target the targeted FROM in the Dockerfile to build.
|
||||
Target string
|
||||
// Devices are the additional devices to add to the containers.
|
||||
Devices []string
|
||||
// SignBy is the fingerprint of a GPG key to use for signing images.
|
||||
SignBy string
|
||||
// Architecture specifies the target architecture of the image to be built.
|
||||
Architecture string
|
||||
// Timestamp sets the created timestamp to the specified time, allowing
|
||||
// for deterministic, content-addressable builds.
|
||||
Timestamp *time.Time
|
||||
// OS is the specifies the operating system of the image to be built.
|
||||
OS string
|
||||
// MaxPullPushRetries is the maximum number of attempts we'll make to pull or push any one
|
||||
// image from or to an external registry if the first attempt fails.
|
||||
MaxPullPushRetries int
|
||||
// PullPushRetryDelay is how long to wait before retrying a pull or push attempt.
|
||||
PullPushRetryDelay time.Duration
|
||||
// OciDecryptConfig contains the config that can be used to decrypt an image if it is
|
||||
// encrypted if non-nil. If nil, it does not attempt to decrypt an image.
|
||||
OciDecryptConfig *encconfig.DecryptConfig
|
||||
// Jobs is the number of stages to run in parallel. If not specified it defaults to 1.
|
||||
Jobs *int
|
||||
// LogRusage logs resource usage for each step.
|
||||
LogRusage bool
|
||||
// Excludes is a list of excludes to be used instead of the .dockerignore file.
|
||||
Excludes []string
|
||||
// From is the image name to use to replace the value specified in the first
|
||||
// FROM instruction in the Containerfile
|
||||
From string
|
||||
}
|
||||
type BuildOptions = define.BuildOptions
|
||||
|
||||
// BuildDockerfiles parses a set of one or more Dockerfiles (which may be
|
||||
// URLs), creates a new Executor, and then runs Prepare/Execute/Commit/Delete
|
||||
// over the entire set of instructions.
|
||||
func BuildDockerfiles(ctx context.Context, store storage.Store, options BuildOptions, paths ...string) (string, reference.Canonical, error) {
|
||||
func BuildDockerfiles(ctx context.Context, store storage.Store, options define.BuildOptions, paths ...string) (string, reference.Canonical, error) {
|
||||
if len(paths) == 0 {
|
||||
return "", nil, errors.Errorf("error building: no dockerfiles specified")
|
||||
}
|
||||
@@ -236,12 +84,21 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options BuildOpt
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
var contents *os.File
|
||||
// If given a directory, add '/Dockerfile' to it.
|
||||
if dinfo.Mode().IsDir() {
|
||||
dfile = filepath.Join(dfile, "Dockerfile")
|
||||
for _, file := range []string{"Containerfile", "Dockerfile"} {
|
||||
f := filepath.Join(dfile, file)
|
||||
logrus.Debugf("reading local %q", f)
|
||||
contents, err = os.Open(f)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
contents, err = os.Open(dfile)
|
||||
}
|
||||
logrus.Debugf("reading local Dockerfile %q", dfile)
|
||||
contents, err := os.Open(dfile)
|
||||
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
25
vendor/github.com/containers/buildah/imagebuildah/executor.go
generated
vendored
25
vendor/github.com/containers/buildah/imagebuildah/executor.go
generated
vendored
@@ -13,6 +13,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/containers/buildah"
|
||||
"github.com/containers/buildah/define"
|
||||
"github.com/containers/buildah/pkg/parse"
|
||||
"github.com/containers/buildah/util"
|
||||
"github.com/containers/common/pkg/config"
|
||||
@@ -56,7 +57,7 @@ type Executor struct {
|
||||
stages map[string]*StageExecutor
|
||||
store storage.Store
|
||||
contextDir string
|
||||
pullPolicy buildah.PullPolicy
|
||||
pullPolicy define.PullPolicy
|
||||
registry string
|
||||
ignoreUnrecognizedInstructions bool
|
||||
quiet bool
|
||||
@@ -74,13 +75,13 @@ type Executor struct {
|
||||
signaturePolicyPath string
|
||||
systemContext *types.SystemContext
|
||||
reportWriter io.Writer
|
||||
isolation buildah.Isolation
|
||||
namespaceOptions []buildah.NamespaceOption
|
||||
configureNetwork buildah.NetworkConfigurationPolicy
|
||||
isolation define.Isolation
|
||||
namespaceOptions []define.NamespaceOption
|
||||
configureNetwork define.NetworkConfigurationPolicy
|
||||
cniPluginPath string
|
||||
cniConfigDir string
|
||||
idmappingOptions *buildah.IDMappingOptions
|
||||
commonBuildOptions *buildah.CommonBuildOptions
|
||||
idmappingOptions *define.IDMappingOptions
|
||||
commonBuildOptions *define.CommonBuildOptions
|
||||
defaultMountsFilePath string
|
||||
iidfile string
|
||||
squash bool
|
||||
@@ -98,7 +99,7 @@ type Executor struct {
|
||||
excludes []string
|
||||
unusedArgs map[string]struct{}
|
||||
capabilities []string
|
||||
devices buildah.ContainerDevices
|
||||
devices define.ContainerDevices
|
||||
signBy string
|
||||
architecture string
|
||||
timestamp *time.Time
|
||||
@@ -126,7 +127,7 @@ type imageTypeAndHistoryAndDiffIDs struct {
|
||||
}
|
||||
|
||||
// NewExecutor creates a new instance of the imagebuilder.Executor interface.
|
||||
func NewExecutor(store storage.Store, options BuildOptions, mainNode *parser.Node) (*Executor, error) {
|
||||
func NewExecutor(store storage.Store, options define.BuildOptions, mainNode *parser.Node) (*Executor, error) {
|
||||
defaultContainerConfig, err := config.Default()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get container config")
|
||||
@@ -144,7 +145,7 @@ func NewExecutor(store storage.Store, options BuildOptions, mainNode *parser.Nod
|
||||
return nil, err
|
||||
}
|
||||
|
||||
devices := buildah.ContainerDevices{}
|
||||
devices := define.ContainerDevices{}
|
||||
for _, device := range append(defaultContainerConfig.Containers.Devices, options.Devices...) {
|
||||
dev, err := parse.DeviceFromPath(device)
|
||||
if err != nil {
|
||||
@@ -419,7 +420,8 @@ func (b *Executor) buildStage(ctx context.Context, cleanupStages map[int]*StageE
|
||||
// build and b.forceRmIntermediateCtrs is set, make sure we
|
||||
// remove the intermediate/build containers, regardless of
|
||||
// whether or not the stage's build fails.
|
||||
if b.forceRmIntermediateCtrs || !b.layers {
|
||||
// Skip cleanup if the stage has no instructions.
|
||||
if b.forceRmIntermediateCtrs || !b.layers && len(stage.Node.Children) > 0 {
|
||||
b.stagesLock.Lock()
|
||||
cleanupStages[stage.Position] = stageExecutor
|
||||
b.stagesLock.Unlock()
|
||||
@@ -433,7 +435,8 @@ func (b *Executor) buildStage(ctx context.Context, cleanupStages map[int]*StageE
|
||||
// The stage succeeded, so remove its build container if we're
|
||||
// told to delete successful intermediate/build containers for
|
||||
// multi-layered builds.
|
||||
if b.removeIntermediateCtrs {
|
||||
// Skip cleanup if the stage has no instructions.
|
||||
if b.removeIntermediateCtrs && len(stage.Node.Children) > 0 {
|
||||
b.stagesLock.Lock()
|
||||
cleanupStages[stage.Position] = stageExecutor
|
||||
b.stagesLock.Unlock()
|
||||
|
||||
71
vendor/github.com/containers/buildah/imagebuildah/stage_executor.go
generated
vendored
71
vendor/github.com/containers/buildah/imagebuildah/stage_executor.go
generated
vendored
@@ -13,6 +13,7 @@ import (
|
||||
|
||||
"github.com/containers/buildah"
|
||||
"github.com/containers/buildah/copier"
|
||||
"github.com/containers/buildah/define"
|
||||
buildahdocker "github.com/containers/buildah/docker"
|
||||
"github.com/containers/buildah/pkg/rusage"
|
||||
"github.com/containers/buildah/util"
|
||||
@@ -27,6 +28,7 @@ import (
|
||||
docker "github.com/fsouza/go-dockerclient"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/openshift/imagebuilder"
|
||||
"github.com/openshift/imagebuilder/dockerfile/parser"
|
||||
"github.com/pkg/errors"
|
||||
@@ -184,7 +186,7 @@ func (s *StageExecutor) volumeCacheInvalidate(path string) error {
|
||||
|
||||
// Save the contents of each of the executor's list of volumes for which we
|
||||
// don't already have a cache file.
|
||||
func (s *StageExecutor) volumeCacheSave() error {
|
||||
func (s *StageExecutor) volumeCacheSaveVFS() error {
|
||||
for cachedPath, cacheFile := range s.volumeCache {
|
||||
archivedPath := filepath.Join(s.mountPoint, cachedPath)
|
||||
_, err := os.Stat(cacheFile)
|
||||
@@ -218,7 +220,7 @@ func (s *StageExecutor) volumeCacheSave() error {
|
||||
}
|
||||
|
||||
// Restore the contents of each of the executor's list of volumes.
|
||||
func (s *StageExecutor) volumeCacheRestore() error {
|
||||
func (s *StageExecutor) volumeCacheRestoreVFS() (err error) {
|
||||
for cachedPath, cacheFile := range s.volumeCache {
|
||||
archivedPath := filepath.Join(s.mountPoint, cachedPath)
|
||||
logrus.Debugf("restoring contents of volume %q from %q", archivedPath, cacheFile)
|
||||
@@ -258,6 +260,45 @@ func (s *StageExecutor) volumeCacheRestore() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Save the contents of each of the executor's list of volumes for which we
|
||||
// don't already have a cache file.
|
||||
func (s *StageExecutor) volumeCacheSaveOverlay() (mounts []specs.Mount, err error) {
|
||||
for cachedPath := range s.volumeCache {
|
||||
volumePath := filepath.Join(s.mountPoint, cachedPath)
|
||||
mount := specs.Mount{
|
||||
Source: volumePath,
|
||||
Destination: cachedPath,
|
||||
Options: []string{"O", "private"},
|
||||
}
|
||||
mounts = append(mounts, mount)
|
||||
}
|
||||
return mounts, nil
|
||||
}
|
||||
|
||||
// Reset the contents of each of the executor's list of volumes.
|
||||
func (s *StageExecutor) volumeCacheRestoreOverlay() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Save the contents of each of the executor's list of volumes for which we
|
||||
// don't already have a cache file.
|
||||
func (s *StageExecutor) volumeCacheSave() (mounts []specs.Mount, err error) {
|
||||
switch s.executor.store.GraphDriverName() {
|
||||
case "overlay":
|
||||
return s.volumeCacheSaveOverlay()
|
||||
}
|
||||
return nil, s.volumeCacheSaveVFS()
|
||||
}
|
||||
|
||||
// Reset the contents of each of the executor's list of volumes.
|
||||
func (s *StageExecutor) volumeCacheRestore() error {
|
||||
switch s.executor.store.GraphDriverName() {
|
||||
case "overlay":
|
||||
return s.volumeCacheRestoreOverlay()
|
||||
}
|
||||
return s.volumeCacheRestoreVFS()
|
||||
}
|
||||
|
||||
// Copy copies data into the working tree. The "Download" field is how
|
||||
// imagebuilder tells us the instruction was "ADD" and not "COPY".
|
||||
func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) error {
|
||||
@@ -275,7 +316,7 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err
|
||||
// The From field says to read the content from another
|
||||
// container. Update the ID mappings and
|
||||
// all-content-comes-from-below-this-directory value.
|
||||
var idMappingOptions *buildah.IDMappingOptions
|
||||
var idMappingOptions *define.IDMappingOptions
|
||||
var copyExcludes []string
|
||||
stripSetuid := false
|
||||
stripSetgid := false
|
||||
@@ -321,6 +362,7 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err
|
||||
}
|
||||
}
|
||||
options := buildah.AddAndCopyOptions{
|
||||
Chmod: copy.Chmod,
|
||||
Chown: copy.Chown,
|
||||
PreserveOwnership: preserveOwnership,
|
||||
ContextDir: contextDir,
|
||||
@@ -378,16 +420,18 @@ func (s *StageExecutor) Run(run imagebuilder.Run, config docker.Config) error {
|
||||
|
||||
args := run.Args
|
||||
if run.Shell {
|
||||
if len(config.Shell) > 0 && s.builder.Format == buildah.Dockerv2ImageManifest {
|
||||
if len(config.Shell) > 0 && s.builder.Format == define.Dockerv2ImageManifest {
|
||||
args = append(config.Shell, args...)
|
||||
} else {
|
||||
args = append([]string{"/bin/sh", "-c"}, args...)
|
||||
}
|
||||
}
|
||||
if err := s.volumeCacheSave(); err != nil {
|
||||
mounts, err := s.volumeCacheSave()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err := s.builder.Run(args, options)
|
||||
options.Mounts = append(options.Mounts, mounts...)
|
||||
err = s.builder.Run(args, options)
|
||||
if err2 := s.volumeCacheRestore(); err2 != nil {
|
||||
if err == nil {
|
||||
return err2
|
||||
@@ -722,15 +766,15 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
|
||||
}
|
||||
|
||||
// Check if there's a --from if the step command is COPY.
|
||||
// Also check the chown flag for validity.
|
||||
// Also check the chmod and the chown flags for validity.
|
||||
for _, flag := range step.Flags {
|
||||
command := strings.ToUpper(step.Command)
|
||||
// chown and from flags should have an '=' sign, '--chown=' or '--from='
|
||||
if command == "COPY" && (flag == "--chown" || flag == "--from") {
|
||||
return "", nil, errors.Errorf("COPY only supports the --chown=<uid:gid> and the --from=<image|stage> flags")
|
||||
// chmod, chown and from flags should have an '=' sign, '--chmod=', '--chown=' or '--from='
|
||||
if command == "COPY" && (flag == "--chmod" || flag == "--chown" || flag == "--from") {
|
||||
return "", nil, errors.Errorf("COPY only supports the --chmod=<permissions> --chown=<uid:gid> and the --from=<image|stage> flags")
|
||||
}
|
||||
if command == "ADD" && flag == "--chown" {
|
||||
return "", nil, errors.Errorf("ADD only supports the --chown=<uid:gid> flag")
|
||||
if command == "ADD" && (flag == "--chmod" || flag == "--chown") {
|
||||
return "", nil, errors.Errorf("ADD only supports the --chmod=<permissions> and the --chown=<uid:gid> flags")
|
||||
}
|
||||
if strings.Contains(flag, "--from") && command == "COPY" {
|
||||
arr := strings.Split(flag, "=")
|
||||
@@ -1234,7 +1278,7 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer
|
||||
s.builder.SetHealthcheck(nil)
|
||||
}
|
||||
s.builder.ClearLabels()
|
||||
s.builder.SetLabel(buildah.BuilderIdentityAnnotation, buildah.Version)
|
||||
|
||||
for k, v := range config.Labels {
|
||||
s.builder.SetLabel(k, v)
|
||||
}
|
||||
@@ -1246,6 +1290,7 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer
|
||||
s.builder.SetLabel(label[0], "")
|
||||
}
|
||||
}
|
||||
s.builder.SetLabel(buildah.BuilderIdentityAnnotation, define.Version)
|
||||
for _, annotationSpec := range s.executor.annotations {
|
||||
annotation := strings.SplitN(annotationSpec, "=", 2)
|
||||
if len(annotation) > 1 {
|
||||
|
||||
141
vendor/github.com/containers/buildah/imagebuildah/util.go
generated
vendored
141
vendor/github.com/containers/buildah/imagebuildah/util.go
generated
vendored
@@ -1,151 +1,10 @@
|
||||
package imagebuildah
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/buildah"
|
||||
"github.com/containers/storage/pkg/chrootarchive"
|
||||
"github.com/containers/storage/pkg/ioutils"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func cloneToDirectory(url, dir string) error {
|
||||
if !strings.HasPrefix(url, "git://") && !strings.HasSuffix(url, ".git") {
|
||||
url = "git://" + url
|
||||
}
|
||||
gitBranch := strings.Split(url, "#")
|
||||
var cmd *exec.Cmd
|
||||
if len(gitBranch) < 2 {
|
||||
logrus.Debugf("cloning %q to %q", url, dir)
|
||||
cmd = exec.Command("git", "clone", url, dir)
|
||||
} else {
|
||||
logrus.Debugf("cloning repo %q and branch %q to %q", gitBranch[0], gitBranch[1], dir)
|
||||
cmd = exec.Command("git", "clone", "-b", gitBranch[1], gitBranch[0], dir)
|
||||
}
|
||||
return cmd.Run()
|
||||
}
|
||||
|
||||
func downloadToDirectory(url, dir string) error {
|
||||
logrus.Debugf("extracting %q to %q", url, dir)
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.ContentLength == 0 {
|
||||
return errors.Errorf("no contents in %q", url)
|
||||
}
|
||||
if err := chrootarchive.Untar(resp.Body, dir, nil); err != nil {
|
||||
resp1, err := http.Get(url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp1.Body.Close()
|
||||
body, err := ioutil.ReadAll(resp1.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dockerfile := filepath.Join(dir, "Dockerfile")
|
||||
// Assume this is a Dockerfile
|
||||
if err := ioutils.AtomicWriteFile(dockerfile, body, 0600); err != nil {
|
||||
return errors.Wrapf(err, "Failed to write %q to %q", url, dockerfile)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func stdinToDirectory(dir string) error {
|
||||
logrus.Debugf("extracting stdin to %q", dir)
|
||||
r := bufio.NewReader(os.Stdin)
|
||||
b, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Failed to read from stdin")
|
||||
}
|
||||
reader := bytes.NewReader(b)
|
||||
if err := chrootarchive.Untar(reader, dir, nil); err != nil {
|
||||
dockerfile := filepath.Join(dir, "Dockerfile")
|
||||
// Assume this is a Dockerfile
|
||||
if err := ioutils.AtomicWriteFile(dockerfile, b, 0600); err != nil {
|
||||
return errors.Wrapf(err, "Failed to write bytes to %q", dockerfile)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// TempDirForURL checks if the passed-in string looks like a URL or -. If it is,
|
||||
// TempDirForURL creates a temporary directory, arranges for its contents to be
|
||||
// the contents of that URL, and returns the temporary directory's path, along
|
||||
// with the name of a subdirectory which should be used as the build context
|
||||
// (which may be empty or "."). Removal of the temporary directory is the
|
||||
// responsibility of the caller. If the string doesn't look like a URL,
|
||||
// TempDirForURL returns empty strings and a nil error code.
|
||||
func TempDirForURL(dir, prefix, url string) (name string, subdir string, err error) {
|
||||
if !strings.HasPrefix(url, "http://") &&
|
||||
!strings.HasPrefix(url, "https://") &&
|
||||
!strings.HasPrefix(url, "git://") &&
|
||||
!strings.HasPrefix(url, "github.com/") &&
|
||||
url != "-" {
|
||||
return "", "", nil
|
||||
}
|
||||
name, err = ioutil.TempDir(dir, prefix)
|
||||
if err != nil {
|
||||
return "", "", errors.Wrapf(err, "error creating temporary directory for %q", url)
|
||||
}
|
||||
if strings.HasPrefix(url, "git://") || strings.HasSuffix(url, ".git") {
|
||||
err = cloneToDirectory(url, name)
|
||||
if err != nil {
|
||||
if err2 := os.RemoveAll(name); err2 != nil {
|
||||
logrus.Debugf("error removing temporary directory %q: %v", name, err2)
|
||||
}
|
||||
return "", "", err
|
||||
}
|
||||
return name, "", nil
|
||||
}
|
||||
if strings.HasPrefix(url, "github.com/") {
|
||||
ghurl := url
|
||||
url = fmt.Sprintf("https://%s/archive/master.tar.gz", ghurl)
|
||||
logrus.Debugf("resolving url %q to %q", ghurl, url)
|
||||
subdir = path.Base(ghurl) + "-master"
|
||||
}
|
||||
if strings.HasPrefix(url, "http://") || strings.HasPrefix(url, "https://") {
|
||||
err = downloadToDirectory(url, name)
|
||||
if err != nil {
|
||||
if err2 := os.RemoveAll(name); err2 != nil {
|
||||
logrus.Debugf("error removing temporary directory %q: %v", name, err2)
|
||||
}
|
||||
return "", subdir, err
|
||||
}
|
||||
return name, subdir, nil
|
||||
}
|
||||
if url == "-" {
|
||||
err = stdinToDirectory(name)
|
||||
if err != nil {
|
||||
if err2 := os.RemoveAll(name); err2 != nil {
|
||||
logrus.Debugf("error removing temporary directory %q: %v", name, err2)
|
||||
}
|
||||
return "", subdir, err
|
||||
}
|
||||
logrus.Debugf("Build context is at %q", name)
|
||||
return name, subdir, nil
|
||||
}
|
||||
logrus.Debugf("don't know how to retrieve %q", url)
|
||||
if err2 := os.Remove(name); err2 != nil {
|
||||
logrus.Debugf("error removing temporary directory %q: %v", name, err2)
|
||||
}
|
||||
return "", "", errors.Errorf("unreachable code reached")
|
||||
}
|
||||
|
||||
// InitReexec is a wrapper for buildah.InitReexec(). It should be called at
|
||||
// the start of main(), and if it returns true, main() should return
|
||||
// immediately.
|
||||
|
||||
Reference in New Issue
Block a user