Vendor in latest buildah code

Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
This commit is contained in:
Daniel J Walsh
2019-04-26 14:32:57 -04:00
parent 29c65d0ae4
commit 0a64268752
18 changed files with 388 additions and 211 deletions

View File

@ -94,7 +94,7 @@ k8s.io/apimachinery kubernetes-1.10.13-beta.0 https://github.com/kubernetes/apim
k8s.io/client-go kubernetes-1.10.13-beta.0 https://github.com/kubernetes/client-go k8s.io/client-go kubernetes-1.10.13-beta.0 https://github.com/kubernetes/client-go
github.com/mrunalp/fileutils 7d4729fb36185a7c1719923406c9d40e54fb93c7 github.com/mrunalp/fileutils 7d4729fb36185a7c1719923406c9d40e54fb93c7
github.com/varlink/go 3ac79db6fd6aec70924193b090962f92985fe199 github.com/varlink/go 3ac79db6fd6aec70924193b090962f92985fe199
github.com/containers/buildah fcc12bdadf6a5fab77e62e1bd12663bb6fbc3eda github.com/containers/buildah v1.8.0
# TODO: Gotty has not been updated since 2012. Can we find replacement? # TODO: Gotty has not been updated since 2012. Can we find replacement?
github.com/Nvveen/Gotty cd527374f1e5bff4938207604a14f2e38a9cf512 github.com/Nvveen/Gotty cd527374f1e5bff4938207604a14f2e38a9cf512
github.com/fsouza/go-dockerclient v1.3.0 github.com/fsouza/go-dockerclient v1.3.0

View File

@ -292,7 +292,7 @@ func addHelper(excludes []DockerIgnore, extract bool, dest string, destfi os.Fil
break break
} }
// combine the filename with the dest directory // combine the filename with the dest directory
fpath := strings.TrimPrefix(path, options.ContextDir) fpath := strings.TrimPrefix(path, esrc)
if err = copyFileWithTar(path, filepath.Join(dest, fpath)); err != nil { if err = copyFileWithTar(path, filepath.Join(dest, fpath)); err != nil {
return errors.Wrapf(err, "error copying %q to %q", path, dest) return errors.Wrapf(err, "error copying %q to %q", path, dest)
} }

View File

@ -26,7 +26,7 @@ const (
Package = "buildah" Package = "buildah"
// Version for the Package. Bump version in contrib/rpm/buildah.spec // Version for the Package. Bump version in contrib/rpm/buildah.spec
// too. // too.
Version = "1.8-dev" Version = "1.8.0"
// The value we use to identify what type of information, currently a // The value we use to identify what type of information, currently a
// serialized Builder structure, we are using as per-container state. // serialized Builder structure, we are using as per-container state.
// This should only be changed when we make incompatible changes to // This should only be changed when we make incompatible changes to
@ -282,6 +282,8 @@ type CommonBuildOptions struct {
CPUSetCPUs string CPUSetCPUs string
// CPUSetMems memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. // CPUSetMems memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems.
CPUSetMems string CPUSetMems string
// HTTPProxy determines whether *_proxy env vars from the build host are passed into the container.
HTTPProxy bool
// Memory is the upper limit (in bytes) on how much memory running containers can use. // Memory is the upper limit (in bytes) on how much memory running containers can use.
Memory int64 Memory int64
// DNSSearch is the list of DNS search domains to add to the build container's /etc/resolv.conf // DNSSearch is the list of DNS search domains to add to the build container's /etc/resolv.conf

View File

@ -512,7 +512,7 @@ func runUsingChroot(spec *specs.Spec, bundlePath string, ctty *os.File, stdin io
logNamespaceDiagnostics(spec) logNamespaceDiagnostics(spec)
// If we have configured ID mappings, set them here so that they can apply to the child. // If we have configured ID mappings, set them here so that they can apply to the child.
hostUidmap, hostGidmap, err := util.GetHostIDMappings("") hostUidmap, hostGidmap, err := unshare.GetHostIDMappings("")
if err != nil { if err != nil {
return 1, err return 1, err
} }

View File

@ -13,7 +13,7 @@ import (
// setSelinuxLabel sets the process label for child processes that we'll start. // setSelinuxLabel sets the process label for child processes that we'll start.
func setSelinuxLabel(spec *specs.Spec) error { func setSelinuxLabel(spec *specs.Spec) error {
logrus.Debugf("setting selinux label") logrus.Debugf("setting selinux label")
if spec.Process.SelinuxLabel != "" && selinux.EnforceMode() != selinux.Disabled { if spec.Process.SelinuxLabel != "" && selinux.GetEnabled() {
if err := label.SetProcessLabel(spec.Process.SelinuxLabel); err != nil { if err := label.SetProcessLabel(spec.Process.SelinuxLabel); err != nil {
return errors.Wrapf(err, "error setting process label to %q", spec.Process.SelinuxLabel) return errors.Wrapf(err, "error setting process label to %q", spec.Process.SelinuxLabel)
} }

View File

@ -64,12 +64,9 @@ type CommitOptions struct {
// manifest of the new image will reference the blobs rather than // manifest of the new image will reference the blobs rather than
// on-disk layers. // on-disk layers.
BlobDirectory string BlobDirectory string
// EmptyLayer tells the builder to omit the diff for the working
// OnBuild is a list of commands to be run by images based on this image // container.
OnBuild []string EmptyLayer bool
// Parent is the base image that this image was created by.
Parent string
// OmitTimestamp forces epoch 0 as created timestamp to allow for // OmitTimestamp forces epoch 0 as created timestamp to allow for
// deterministic, content-addressable builds. // deterministic, content-addressable builds.
OmitTimestamp bool OmitTimestamp bool
@ -169,7 +166,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
} }
} }
// Build an image reference from which we can copy the finished image. // Build an image reference from which we can copy the finished image.
src, err := b.makeImageRef(options.PreferredManifestType, options.Parent, exportBaseLayers, options.Squash, options.BlobDirectory, options.Compression, options.HistoryTimestamp, options.OmitTimestamp) src, err := b.makeImageRef(options, exportBaseLayers)
if err != nil { if err != nil {
return imgID, nil, "", errors.Wrapf(err, "error computing layer digests and building metadata for container %q", b.ContainerID) return imgID, nil, "", errors.Wrapf(err, "error computing layer digests and building metadata for container %q", b.ContainerID)
} }

View File

@ -56,6 +56,7 @@ type containerImageRef struct {
preferredManifestType string preferredManifestType string
exporting bool exporting bool
squash bool squash bool
emptyLayer bool
tarPath func(path string) (io.ReadCloser, error) tarPath func(path string) (io.ReadCloser, error)
parent string parent string
blobDirectory string blobDirectory string
@ -184,7 +185,7 @@ func (i *containerImageRef) createConfigsAndManifests() (v1.Image, v1.Manifest,
if err := json.Unmarshal(i.dconfig, &dimage); err != nil { if err := json.Unmarshal(i.dconfig, &dimage); err != nil {
return v1.Image{}, v1.Manifest{}, docker.V2Image{}, docker.V2S2Manifest{}, err return v1.Image{}, v1.Manifest{}, docker.V2Image{}, docker.V2S2Manifest{}, err
} }
dimage.Parent = docker.ID(digest.FromString(i.parent)) dimage.Parent = docker.ID(i.parent)
// Always replace this value, since we're newer than our base image. // Always replace this value, since we're newer than our base image.
dimage.Created = created dimage.Created = created
// Clear the list of diffIDs, since we always repopulate it. // Clear the list of diffIDs, since we always repopulate it.
@ -290,6 +291,11 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "unable to locate layer %q", layerID) return nil, errors.Wrapf(err, "unable to locate layer %q", layerID)
} }
// If we're up to the final layer, but we don't want to include
// a diff for it, we're done.
if i.emptyLayer && layerID == i.layerID {
continue
}
// If we're not re-exporting the data, and we're reusing layers individually, reuse // If we're not re-exporting the data, and we're reusing layers individually, reuse
// the blobsum and diff IDs. // the blobsum and diff IDs.
if !i.exporting && !i.squash && layerID != i.layerID { if !i.exporting && !i.squash && layerID != i.layerID {
@ -433,7 +439,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
CreatedBy: i.createdBy, CreatedBy: i.createdBy,
Author: oimage.Author, Author: oimage.Author,
Comment: i.historyComment, Comment: i.historyComment,
EmptyLayer: false, EmptyLayer: i.emptyLayer,
} }
oimage.History = append(oimage.History, onews) oimage.History = append(oimage.History, onews)
dnews := docker.V2S2History{ dnews := docker.V2S2History{
@ -441,11 +447,11 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
CreatedBy: i.createdBy, CreatedBy: i.createdBy,
Author: dimage.Author, Author: dimage.Author,
Comment: i.historyComment, Comment: i.historyComment,
EmptyLayer: false, EmptyLayer: i.emptyLayer,
} }
dimage.History = append(dimage.History, dnews) dimage.History = append(dimage.History, dnews)
appendHistory(i.postEmptyLayers) appendHistory(i.postEmptyLayers)
dimage.Parent = docker.ID(digest.FromString(i.parent)) dimage.Parent = docker.ID(i.parent)
// Sanity check that we didn't just create a mismatch between non-empty layers in the // Sanity check that we didn't just create a mismatch between non-empty layers in the
// history and the number of diffIDs. // history and the number of diffIDs.
@ -636,7 +642,7 @@ func (i *containerImageSource) GetBlob(ctx context.Context, blob types.BlobInfo,
return ioutils.NewReadCloserWrapper(layerFile, closer), size, nil return ioutils.NewReadCloserWrapper(layerFile, closer), size, nil
} }
func (b *Builder) makeImageRef(manifestType, parent string, exporting bool, squash bool, blobDirectory string, compress archive.Compression, historyTimestamp *time.Time, omitTimestamp bool) (types.ImageReference, error) { func (b *Builder) makeImageRef(options CommitOptions, exporting bool) (types.ImageReference, error) {
var name reference.Named var name reference.Named
container, err := b.store.Container(b.ContainerID) container, err := b.store.Container(b.ContainerID)
if err != nil { if err != nil {
@ -647,6 +653,7 @@ func (b *Builder) makeImageRef(manifestType, parent string, exporting bool, squa
name = parsed name = parsed
} }
} }
manifestType := options.PreferredManifestType
if manifestType == "" { if manifestType == "" {
manifestType = OCIv1ImageManifest manifestType = OCIv1ImageManifest
} }
@ -659,8 +666,8 @@ func (b *Builder) makeImageRef(manifestType, parent string, exporting bool, squa
return nil, errors.Wrapf(err, "error encoding docker-format image configuration %#v", b.Docker) return nil, errors.Wrapf(err, "error encoding docker-format image configuration %#v", b.Docker)
} }
created := time.Now().UTC() created := time.Now().UTC()
if historyTimestamp != nil { if options.HistoryTimestamp != nil {
created = historyTimestamp.UTC() created = options.HistoryTimestamp.UTC()
} }
createdBy := b.CreatedBy() createdBy := b.CreatedBy()
if createdBy == "" { if createdBy == "" {
@ -670,13 +677,21 @@ func (b *Builder) makeImageRef(manifestType, parent string, exporting bool, squa
} }
} }
if omitTimestamp { if options.OmitTimestamp {
created = time.Unix(0, 0) created = time.Unix(0, 0)
} }
parent := ""
if b.FromImageID != "" {
parentDigest := digest.NewDigestFromEncoded(digest.Canonical, b.FromImageID)
if parentDigest.Validate() == nil {
parent = parentDigest.String()
}
}
ref := &containerImageRef{ ref := &containerImageRef{
store: b.store, store: b.store,
compression: compress, compression: options.Compression,
name: name, name: name,
names: container.Names, names: container.Names,
containerID: container.ID, containerID: container.ID,
@ -690,10 +705,11 @@ func (b *Builder) makeImageRef(manifestType, parent string, exporting bool, squa
annotations: b.Annotations(), annotations: b.Annotations(),
preferredManifestType: manifestType, preferredManifestType: manifestType,
exporting: exporting, exporting: exporting,
squash: squash, squash: options.Squash,
emptyLayer: options.EmptyLayer,
tarPath: b.tarPath(), tarPath: b.tarPath(),
parent: parent, parent: parent,
blobDirectory: blobDirectory, blobDirectory: options.BlobDirectory,
preEmptyLayers: b.PrependedEmptyLayers, preEmptyLayers: b.PrependedEmptyLayers,
postEmptyLayers: b.AppendedEmptyLayers, postEmptyLayers: b.AppendedEmptyLayers,
} }

View File

@ -28,7 +28,7 @@ import (
"github.com/containers/storage" "github.com/containers/storage"
"github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/archive"
docker "github.com/fsouza/go-dockerclient" docker "github.com/fsouza/go-dockerclient"
"github.com/opencontainers/image-spec/specs-go/v1" v1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/runtime-spec/specs-go"
"github.com/openshift/imagebuilder" "github.com/openshift/imagebuilder"
"github.com/openshift/imagebuilder/dockerfile/parser" "github.com/openshift/imagebuilder/dockerfile/parser"
@ -215,9 +215,12 @@ type Executor struct {
forceRmIntermediateCtrs bool forceRmIntermediateCtrs bool
imageMap map[string]string // Used to map images that we create to handle the AS construct. imageMap map[string]string // Used to map images that we create to handle the AS construct.
containerMap map[string]*buildah.Builder // Used to map from image names to only-created-for-the-rootfs containers. containerMap map[string]*buildah.Builder // Used to map from image names to only-created-for-the-rootfs containers.
baseMap map[string]bool // Holds the names of every base image, as given.
rootfsMap map[string]bool // Holds the names of every stage whose rootfs is referenced in a COPY or ADD instruction.
blobDirectory string blobDirectory string
excludes []string excludes []string
unusedArgs map[string]struct{} unusedArgs map[string]struct{}
buildArgs map[string]string
} }
// StageExecutor bundles up what we need to know when executing one stage of a // StageExecutor bundles up what we need to know when executing one stage of a
@ -480,6 +483,19 @@ func (s *StageExecutor) volumeCacheRestore() error {
// imagebuilder tells us the instruction was "ADD" and not "COPY". // imagebuilder tells us the instruction was "ADD" and not "COPY".
func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) error { func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) error {
for _, copy := range copies { for _, copy := range copies {
// If the file exists, check to see if it's a symlink.
// If it is a symlink, convert to it's target otherwise
// the symlink will be overwritten.
fileDest, _ := os.Lstat(filepath.Join(s.mountPoint, copy.Dest))
if fileDest != nil {
if fileDest.Mode()&os.ModeSymlink != 0 {
if symLink, err := resolveSymlink(s.mountPoint, copy.Dest); err == nil {
copy.Dest = symLink
} else {
return errors.Wrapf(err, "error reading symbolic link to %q", copy.Dest)
}
}
}
if copy.Download { if copy.Download {
logrus.Debugf("ADD %#v, %#v", excludes, copy) logrus.Debugf("ADD %#v, %#v", excludes, copy)
} else { } else {
@ -590,7 +606,7 @@ func (s *StageExecutor) Run(run imagebuilder.Run, config docker.Config) error {
// UnrecognizedInstruction is called when we encounter an instruction that the // UnrecognizedInstruction is called when we encounter an instruction that the
// imagebuilder parser didn't understand. // imagebuilder parser didn't understand.
func (s *StageExecutor) UnrecognizedInstruction(step *imagebuilder.Step) error { func (s *StageExecutor) UnrecognizedInstruction(step *imagebuilder.Step) error {
errStr := fmt.Sprintf("Build error: Unknown instruction: %q ", step.Command) errStr := fmt.Sprintf("Build error: Unknown instruction: %q ", strings.ToUpper(step.Command))
err := fmt.Sprintf(errStr+"%#v", step) err := fmt.Sprintf(errStr+"%#v", step)
if s.executor.ignoreUnrecognizedInstructions { if s.executor.ignoreUnrecognizedInstructions {
logrus.Debugf(err) logrus.Debugf(err)
@ -610,7 +626,7 @@ func (s *StageExecutor) UnrecognizedInstruction(step *imagebuilder.Step) error {
} }
// NewExecutor creates a new instance of the imagebuilder.Executor interface. // NewExecutor creates a new instance of the imagebuilder.Executor interface.
func NewExecutor(store storage.Store, options BuildOptions) (*Executor, error) { func NewExecutor(store storage.Store, options BuildOptions, mainNode *parser.Node) (*Executor, error) {
excludes, err := imagebuilder.ParseDockerignore(options.ContextDirectory) excludes, err := imagebuilder.ParseDockerignore(options.ContextDirectory)
if err != nil { if err != nil {
return nil, err return nil, err
@ -656,8 +672,11 @@ func NewExecutor(store storage.Store, options BuildOptions) (*Executor, error) {
forceRmIntermediateCtrs: options.ForceRmIntermediateCtrs, forceRmIntermediateCtrs: options.ForceRmIntermediateCtrs,
imageMap: make(map[string]string), imageMap: make(map[string]string),
containerMap: make(map[string]*buildah.Builder), containerMap: make(map[string]*buildah.Builder),
baseMap: make(map[string]bool),
rootfsMap: make(map[string]bool),
blobDirectory: options.BlobDirectory, blobDirectory: options.BlobDirectory,
unusedArgs: make(map[string]struct{}), unusedArgs: make(map[string]struct{}),
buildArgs: options.Args,
} }
if exec.err == nil { if exec.err == nil {
exec.err = os.Stderr exec.err = os.Stderr
@ -679,6 +698,25 @@ func NewExecutor(store storage.Store, options BuildOptions) (*Executor, error) {
exec.unusedArgs[arg] = struct{}{} exec.unusedArgs[arg] = struct{}{}
} }
} }
for _, line := range mainNode.Children {
node := line
for node != nil { // tokens on this line, though we only care about the first
switch strings.ToUpper(node.Value) { // first token - instruction
case "ARG":
arg := node.Next
if arg != nil {
// We have to be careful here - it's either an argument
// and value, or just an argument, since they can be
// separated by either "=" or whitespace.
list := strings.SplitN(arg.Value, "=", 2)
if _, stillUnused := exec.unusedArgs[list[0]]; stillUnused {
delete(exec.unusedArgs, list[0])
}
}
}
break
}
}
return &exec, nil return &exec, nil
} }
@ -845,9 +883,9 @@ func (b *Executor) resolveNameToImageRef(output string) (types.ImageReference, e
return imageRef, nil return imageRef, nil
} }
// stepRequiresCommit indicates whether or not the step should be followed by // stepRequiresLayer indicates whether or not the step should be followed by
// committing the in-progress container to create an intermediate image. // committing a layer container when creating an intermediate image.
func (*StageExecutor) stepRequiresCommit(step *imagebuilder.Step) bool { func (*StageExecutor) stepRequiresLayer(step *imagebuilder.Step) bool {
switch strings.ToUpper(step.Command) { switch strings.ToUpper(step.Command) {
case "ADD", "COPY", "RUN": case "ADD", "COPY", "RUN":
return true return true
@ -875,6 +913,10 @@ func (s *StageExecutor) getImageRootfs(ctx context.Context, stage imagebuilder.S
func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, base string) (imgID string, ref reference.Canonical, err error) { func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, base string) (imgID string, ref reference.Canonical, err error) {
ib := stage.Builder ib := stage.Builder
checkForLayers := s.executor.layers && s.executor.useCache checkForLayers := s.executor.layers && s.executor.useCache
moreStages := s.index < s.stages-1
lastStage := !moreStages
imageIsUsedLater := moreStages && (s.executor.baseMap[stage.Name] || s.executor.baseMap[fmt.Sprintf("%d", stage.Position)])
rootfsIsUsedLater := moreStages && (s.executor.rootfsMap[stage.Name] || s.executor.rootfsMap[fmt.Sprintf("%d", stage.Position)])
// If the base image's name corresponds to the result of an earlier // If the base image's name corresponds to the result of an earlier
// stage, substitute that image's ID for the base image's name here. // stage, substitute that image's ID for the base image's name here.
@ -896,7 +938,8 @@ func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, b
// A helper function to only log "COMMIT" as an explicit step if it's // A helper function to only log "COMMIT" as an explicit step if it's
// the very last step of a (possibly multi-stage) build. // the very last step of a (possibly multi-stage) build.
logCommit := func(output string, instruction int) { logCommit := func(output string, instruction int) {
if instruction < len(children)-1 || s.index < s.stages-1 { moreInstructions := instruction < len(children)-1
if moreInstructions || moreStages {
return return
} }
commitMessage := "COMMIT" commitMessage := "COMMIT"
@ -921,7 +964,7 @@ func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, b
// squash the contents of the base image. Whichever is // squash the contents of the base image. Whichever is
// the case, we need to commit() to create a new image. // the case, we need to commit() to create a new image.
logCommit(s.output, -1) logCommit(s.output, -1)
if imgID, ref, err = s.commit(ctx, ib, getCreatedBy(nil), s.output); err != nil { if imgID, ref, err = s.commit(ctx, ib, s.executor.getCreatedBy(nil), false, s.output); err != nil {
return "", nil, errors.Wrapf(err, "error committing base container") return "", nil, errors.Wrapf(err, "error committing base container")
} }
} else { } else {
@ -936,6 +979,8 @@ func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, b
} }
for i, node := range children { for i, node := range children {
moreInstructions := i < len(children)-1
lastInstruction := !moreInstructions
// Resolve any arguments in this instruction. // Resolve any arguments in this instruction.
step := ib.Step() step := ib.Step()
if err := step.Resolve(node); err != nil { if err := step.Resolve(node); err != nil {
@ -946,30 +991,19 @@ func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, b
s.executor.log("%s", step.Original) s.executor.log("%s", step.Original)
} }
// If this instruction declares an argument, remove it from the
// set of arguments that we were passed but which we haven't
// yet seen used by the Dockerfile.
if step.Command == "arg" {
for _, Arg := range step.Args {
list := strings.SplitN(Arg, "=", 2)
if _, stillUnused := s.executor.unusedArgs[list[0]]; stillUnused {
delete(s.executor.unusedArgs, list[0])
}
}
}
// Check if there's a --from if the step command is COPY or // Check if there's a --from if the step command is COPY or
// ADD. Set copyFrom to point to either the context directory // ADD. Set copyFrom to point to either the context directory
// or the root of the container from the specified stage. // or the root of the container from the specified stage.
s.copyFrom = s.executor.contextDir s.copyFrom = s.executor.contextDir
for _, n := range step.Flags { for _, n := range step.Flags {
if strings.Contains(n, "--from") && (step.Command == "copy" || step.Command == "add") { command := strings.ToUpper(step.Command)
if strings.Contains(n, "--from") && (command == "COPY" || command == "ADD") {
var mountPoint string var mountPoint string
arr := strings.Split(n, "=") arr := strings.Split(n, "=")
otherStage, ok := s.executor.stages[arr[1]] otherStage, ok := s.executor.stages[arr[1]]
if !ok { if !ok {
if mountPoint, err = s.getImageRootfs(ctx, stage, arr[1]); err != nil { if mountPoint, err = s.getImageRootfs(ctx, stage, arr[1]); err != nil {
return "", nil, errors.Errorf("%s --from=%s: no stage or image found with that name", step.Command, arr[1]) return "", nil, errors.Errorf("%s --from=%s: no stage or image found with that name", command, arr[1])
} }
} else { } else {
mountPoint = otherStage.mountPoint mountPoint = otherStage.mountPoint
@ -984,7 +1018,7 @@ func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, b
// contents of any volumes declared between now and when we // contents of any volumes declared between now and when we
// finish. // finish.
noRunsRemaining := false noRunsRemaining := false
if i < len(children)-1 { if moreInstructions {
noRunsRemaining = !ib.RequiresStart(&parser.Node{Children: children[i+1:]}) noRunsRemaining = !ib.RequiresStart(&parser.Node{Children: children[i+1:]})
} }
@ -996,24 +1030,29 @@ func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, b
logrus.Debugf("%v", errors.Wrapf(err, "error building at step %+v", *step)) logrus.Debugf("%v", errors.Wrapf(err, "error building at step %+v", *step))
return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message) return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message)
} }
if i < len(children)-1 { if moreInstructions {
// There are still more instructions to process // There are still more instructions to process
// for this stage. Make a note of the // for this stage. Make a note of the
// instruction in the history that we'll write // instruction in the history that we'll write
// for the image when we eventually commit it. // for the image when we eventually commit it.
now := time.Now() now := time.Now()
s.builder.AddPrependedEmptyLayer(&now, getCreatedBy(node), "", "") s.builder.AddPrependedEmptyLayer(&now, s.executor.getCreatedBy(node), "", "")
continue continue
} else { } else {
// This is the last instruction for this stage, // This is the last instruction for this stage,
// so we should commit this container to create // so we should commit this container to create
// an image. // an image, but only if it's the last one, or
// if it's used as the basis for a later stage.
if lastStage || imageIsUsedLater {
logCommit(s.output, i) logCommit(s.output, i)
imgID, ref, err = s.commit(ctx, ib, getCreatedBy(node), s.output) imgID, ref, err = s.commit(ctx, ib, s.executor.getCreatedBy(node), false, s.output)
if err != nil { if err != nil {
return "", nil, errors.Wrapf(err, "error committing container for step %+v", *step) return "", nil, errors.Wrapf(err, "error committing container for step %+v", *step)
} }
logImageID(imgID) logImageID(imgID)
} else {
imgID = ""
}
break break
} }
} }
@ -1028,18 +1067,14 @@ func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, b
// If we have to commit for this instruction, only assign the // If we have to commit for this instruction, only assign the
// stage's configured output name to the last layer. // stage's configured output name to the last layer.
if i == len(children)-1 { if lastInstruction {
commitName = s.output commitName = s.output
} }
// If we're using the cache, and we've managed to stick with // If we're using the cache, and we've managed to stick with
// cached images so far, look for one that matches what we // cached images so far, look for one that matches what we
// expect to produce for this instruction. // expect to produce for this instruction.
// Only check at steps where we commit, so that we don't if checkForLayers && !(s.executor.squash && lastInstruction && lastStage) {
// abandon the cache at this step just because we can't find an
// image with a history entry in it that we wouldn't have
// committed.
if checkForLayers && (s.stepRequiresCommit(step) || i == len(children)-1) && !(s.executor.squash && i == len(children)-1 && s.index == s.stages-1) {
cacheID, err = s.layerExists(ctx, node, children[:i]) cacheID, err = s.layerExists(ctx, node, children[:i])
if err != nil { if err != nil {
return "", nil, errors.Wrap(err, "error checking if cached image exists from a previous build") return "", nil, errors.Wrap(err, "error checking if cached image exists from a previous build")
@ -1059,17 +1094,32 @@ func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, b
// the last step in this stage, add the name to the // the last step in this stage, add the name to the
// image. // image.
imgID = cacheID imgID = cacheID
if commitName != "" && (s.stepRequiresCommit(step) || i == len(children)-1) { if commitName != "" {
logCommit(s.output, i) logCommit(commitName, i)
if imgID, ref, err = s.copyExistingImage(ctx, cacheID, commitName); err != nil { if imgID, ref, err = s.copyExistingImage(ctx, cacheID, commitName); err != nil {
return "", nil, err return "", nil, err
} }
logImageID(imgID) logImageID(imgID)
} }
// Update our working container to be based off of the // Update our working container to be based off of the
// cached image, in case we need to read content from // cached image, if we might need to use it as a basis
// its root filesystem. // for the next instruction, or if we need the root
rebase = true // filesystem to match the image contents for the sake
// of a later stage that wants to copy content from it.
rebase = moreInstructions || rootfsIsUsedLater
// If the instruction would affect our configuration,
// process the configuration change so that, if we fall
// off the cache path, the filesystem changes from the
// last cache image will be all that we need, since we
// still don't want to restart using the image's
// configuration blob.
if !s.stepRequiresLayer(step) {
err := ib.Run(step, s, noRunsRemaining)
if err != nil {
logrus.Debugf("%v", errors.Wrapf(err, "error building at step %+v", *step))
return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message)
}
}
} else { } else {
// If we didn't find a cached image that we could just reuse, // If we didn't find a cached image that we could just reuse,
// process the instruction directly. // process the instruction directly.
@ -1078,17 +1128,9 @@ func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, b
logrus.Debugf("%v", errors.Wrapf(err, "error building at step %+v", *step)) logrus.Debugf("%v", errors.Wrapf(err, "error building at step %+v", *step))
return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message) return "", nil, errors.Wrapf(err, "error building at STEP \"%s\"", step.Message)
} }
if s.stepRequiresCommit(step) || i == len(children)-1 { // Create a new image, maybe with a new layer.
// Either this is the last instruction, or
// there are more instructions and we need to
// create a layer from this one before
// continuing.
// TODO: only commit for the last instruction
// case if we need to use this stage's image as
// a base image later, or if we're the final
// stage.
logCommit(s.output, i) logCommit(s.output, i)
imgID, ref, err = s.commit(ctx, ib, getCreatedBy(node), commitName) imgID, ref, err = s.commit(ctx, ib, s.executor.getCreatedBy(node), !s.stepRequiresLayer(step), commitName)
if err != nil { if err != nil {
return "", nil, errors.Wrapf(err, "error committing container for step %+v", *step) return "", nil, errors.Wrapf(err, "error committing container for step %+v", *step)
} }
@ -1099,15 +1141,7 @@ func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, b
// that just want to use the rootfs as a source // that just want to use the rootfs as a source
// for COPY or ADD will be content with what we // for COPY or ADD will be content with what we
// already have. // already have.
rebase = i < len(children)-1 rebase = moreInstructions
} else {
// There are still more instructions to process
// for this stage, and we don't need to commit
// here. Make a note of the instruction in the
// history for the next commit.
now := time.Now()
s.builder.AddPrependedEmptyLayer(&now, getCreatedBy(node), "", "")
}
} }
if rebase { if rebase {
@ -1122,8 +1156,6 @@ func (s *StageExecutor) Execute(ctx context.Context, stage imagebuilder.Stage, b
// creating a new working container with the // creating a new working container with the
// just-committed or updated cached image as its new // just-committed or updated cached image as its new
// base image. // base image.
// TODO: only create a new container if we know that
// we'll need the updated root filesystem.
if _, err := s.prepare(ctx, stage, imgID, false, true); err != nil { if _, err := s.prepare(ctx, stage, imgID, false, true); err != nil {
return "", nil, errors.Wrap(err, "error preparing container for next step") return "", nil, errors.Wrap(err, "error preparing container for next step")
} }
@ -1195,13 +1227,13 @@ func (s *StageExecutor) layerExists(ctx context.Context, currNode *parser.Node,
// it means that this image is potentially a cached intermediate image from a previous // it means that this image is potentially a cached intermediate image from a previous
// build. Next we double check that the history of this image is equivalent to the previous // build. Next we double check that the history of this image is equivalent to the previous
// lines in the Dockerfile up till the point we are at in the build. // lines in the Dockerfile up till the point we are at in the build.
if layer.Parent == s.executor.topLayers[len(s.executor.topLayers)-1] { if layer.Parent == s.executor.topLayers[len(s.executor.topLayers)-1] || layer.ID == s.executor.topLayers[len(s.executor.topLayers)-1] {
history, err := s.executor.getImageHistory(ctx, image.ID) history, err := s.executor.getImageHistory(ctx, image.ID)
if err != nil { if err != nil {
return "", errors.Wrapf(err, "error getting history of %q", image.ID) return "", errors.Wrapf(err, "error getting history of %q", image.ID)
} }
// children + currNode is the point of the Dockerfile we are currently at. // children + currNode is the point of the Dockerfile we are currently at.
if historyMatches(append(children, currNode), history) { if s.executor.historyMatches(append(children, currNode), history) {
// This checks if the files copied during build have been changed if the node is // This checks if the files copied during build have been changed if the node is
// a COPY or ADD command. // a COPY or ADD command.
filesMatch, err := s.copiedFilesMatch(currNode, history[len(history)-1].Created) filesMatch, err := s.copiedFilesMatch(currNode, history[len(history)-1].Created)
@ -1225,21 +1257,26 @@ func (b *Executor) getImageHistory(ctx context.Context, imageID string) ([]v1.Hi
} }
ref, err := imageRef.NewImage(ctx, nil) ref, err := imageRef.NewImage(ctx, nil)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "error creating new image from reference") return nil, errors.Wrapf(err, "error creating new image from reference to image %q", imageID)
} }
defer ref.Close()
oci, err := ref.OCIConfig(ctx) oci, err := ref.OCIConfig(ctx)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "error getting oci config of image %q", imageID) return nil, errors.Wrapf(err, "error getting possibly-converted OCI config of image %q", imageID)
} }
return oci.History, nil return oci.History, nil
} }
// getCreatedBy returns the command the image at node will be created by. // getCreatedBy returns the command the image at node will be created by.
func getCreatedBy(node *parser.Node) string { func (b *Executor) getCreatedBy(node *parser.Node) string {
if node == nil { if node == nil {
return "/bin/sh" return "/bin/sh"
} }
if node.Value == "run" { if node.Value == "run" {
buildArgs := b.getBuildArgs()
if buildArgs != "" {
return "|" + strconv.Itoa(len(strings.Split(buildArgs, " "))) + " " + buildArgs + " /bin/sh -c " + node.Original[4:]
}
return "/bin/sh -c " + node.Original[4:] return "/bin/sh -c " + node.Original[4:]
} }
return "/bin/sh -c #(nop) " + node.Original return "/bin/sh -c #(nop) " + node.Original
@ -1249,12 +1286,23 @@ func getCreatedBy(node *parser.Node) string {
// in the Dockerfile till the point of build we are at. // in the Dockerfile till the point of build we are at.
// Used to verify whether a cache of the intermediate image exists and whether // Used to verify whether a cache of the intermediate image exists and whether
// to run the build again. // to run the build again.
func historyMatches(children []*parser.Node, history []v1.History) bool { func (b *Executor) historyMatches(children []*parser.Node, history []v1.History) bool {
i := len(history) - 1 i := len(history) - 1
for j := len(children) - 1; j >= 0; j-- { for j := len(children) - 1; j >= 0; j-- {
instruction := children[j].Original instruction := children[j].Original
if children[j].Value == "run" { if children[j].Value == "run" {
instruction = instruction[4:] instruction = instruction[4:]
buildArgs := b.getBuildArgs()
// If a previous image was built with some build-args but the new build process doesn't have any build-args
// specified, so compare the lengths of the old instruction with the current one
// 11 is the length of "/bin/sh -c " that is used to run the run commands
if buildArgs == "" && len(history[i].CreatedBy) > len(instruction)+11 {
return false
}
// There are build-args, so check if anything with the build-args has changed
if buildArgs != "" && !strings.Contains(history[i].CreatedBy, buildArgs) {
return false
}
} }
if !strings.Contains(history[i].CreatedBy, instruction) { if !strings.Contains(history[i].CreatedBy, instruction) {
return false return false
@ -1264,6 +1312,18 @@ func historyMatches(children []*parser.Node, history []v1.History) bool {
return true return true
} }
// getBuildArgs returns a string of the build-args specified during the build process
// it excludes any build-args that were not used in the build process
func (b *Executor) getBuildArgs() string {
var buildArgs []string
for k, v := range b.buildArgs {
if _, ok := b.unusedArgs[k]; !ok {
buildArgs = append(buildArgs, k+"="+v)
}
}
return strings.Join(buildArgs, " ")
}
// getFilesToCopy goes through node to get all the src files that are copied, added or downloaded. // getFilesToCopy goes through node to get all the src files that are copied, added or downloaded.
// It is possible for the Dockerfile to have src as hom*, which means all files that have hom as a prefix. // It is possible for the Dockerfile to have src as hom*, which means all files that have hom as a prefix.
// Another format is hom?.txt, which means all files that have that name format with the ? replaced by another character. // Another format is hom?.txt, which means all files that have that name format with the ? replaced by another character.
@ -1348,7 +1408,7 @@ func urlContentModified(url string, historyTime *time.Time) (bool, error) {
// commit writes the container's contents to an image, using a passed-in tag as // commit writes the container's contents to an image, using a passed-in tag as
// the name if there is one, generating a unique ID-based one otherwise. // the name if there is one, generating a unique ID-based one otherwise.
func (s *StageExecutor) commit(ctx context.Context, ib *imagebuilder.Builder, createdBy, output string) (string, reference.Canonical, error) { func (s *StageExecutor) commit(ctx context.Context, ib *imagebuilder.Builder, createdBy string, emptyLayer bool, output string) (string, reference.Canonical, error) {
var imageRef types.ImageReference var imageRef types.ImageReference
if output != "" { if output != "" {
imageRef2, err := s.executor.resolveNameToImageRef(output) imageRef2, err := s.executor.resolveNameToImageRef(output)
@ -1438,8 +1498,8 @@ func (s *StageExecutor) commit(ctx context.Context, ib *imagebuilder.Builder, cr
PreferredManifestType: s.executor.outputFormat, PreferredManifestType: s.executor.outputFormat,
SystemContext: s.executor.systemContext, SystemContext: s.executor.systemContext,
Squash: s.executor.squash, Squash: s.executor.squash,
EmptyLayer: emptyLayer,
BlobDirectory: s.executor.blobDirectory, BlobDirectory: s.executor.blobDirectory,
Parent: s.builder.FromImageID,
} }
imgID, _, manifestDigest, err := s.builder.Commit(ctx, imageRef, options) imgID, _, manifestDigest, err := s.builder.Commit(ctx, imageRef, options)
if err != nil { if err != nil {
@ -1510,6 +1570,46 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image
} }
defer cleanup() defer cleanup()
// Build maps of every named base image and every referenced stage root
// filesystem. Individual stages can use them to determine whether or
// not they can skip certain steps near the end of their stages.
for _, stage := range stages {
node := stage.Node // first line
for node != nil { // each line
for _, child := range node.Children { // tokens on this line, though we only care about the first
switch strings.ToUpper(child.Value) { // first token - instruction
case "FROM":
if child.Next != nil { // second token on this line
base := child.Next.Value
if base != "scratch" {
// TODO: this didn't undergo variable and arg
// expansion, so if the AS clause in another
// FROM instruction uses argument values,
// we might not record the right value here.
b.baseMap[base] = true
logrus.Debugf("base: %q", base)
}
}
case "ADD", "COPY":
for _, flag := range child.Flags { // flags for this instruction
if strings.HasPrefix(flag, "--from=") {
// TODO: this didn't undergo variable and
// arg expansion, so if the previous stage
// was named using argument values, we might
// not record the right value here.
rootfs := flag[7:]
b.rootfsMap[rootfs] = true
logrus.Debugf("rootfs: %q", rootfs)
}
}
}
break
}
node = node.Next // next line
}
}
// Run through the build stages, one at a time.
for stageIndex, stage := range stages { for stageIndex, stage := range stages {
var lastErr error var lastErr error
@ -1555,7 +1655,7 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image
// If this is an intermediate stage, make a note of the ID, so // If this is an intermediate stage, make a note of the ID, so
// that we can look it up later. // that we can look it up later.
if stageIndex < len(stages)-1 { if stageIndex < len(stages)-1 && imageID != "" {
b.imageMap[stage.Name] = imageID b.imageMap[stage.Name] = imageID
// We're not populating the cache with intermediate // We're not populating the cache with intermediate
// images, so add this one to the list of images that // images, so add this one to the list of images that
@ -1671,7 +1771,7 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options BuildOpt
} }
mainNode.Children = append(mainNode.Children, additionalNode.Children...) mainNode.Children = append(mainNode.Children, additionalNode.Children...)
} }
exec, err := NewExecutor(store, options) exec, err := NewExecutor(store, options, mainNode)
if err != nil { if err != nil {
return "", nil, errors.Wrapf(err, "error creating build executor") return "", nil, errors.Wrapf(err, "error creating build executor")
} }

View File

@ -129,20 +129,20 @@ func resolveModifiedTime(rootdir, filename, historyTime string) (bool, error) {
func modTimeIsGreater(rootdir, path string, historyTime string) (bool, error) { func modTimeIsGreater(rootdir, path string, historyTime string) (bool, error) {
var timeIsGreater bool var timeIsGreater bool
// the Walk below doesn't work if rootdir and path are equal
if rootdir == path {
return false, nil
}
// Convert historyTime from string to time.Time for comparison // Convert historyTime from string to time.Time for comparison
histTime, err := time.Parse(time.RFC3339Nano, historyTime) histTime, err := time.Parse(time.RFC3339Nano, historyTime)
if err != nil { if err != nil {
return false, errors.Wrapf(err, "error converting string to time.Time %q", historyTime) return false, errors.Wrapf(err, "error converting string to time.Time %q", historyTime)
} }
// Since we are chroot in rootdir, we want a relative path, i.e (path - rootdir)
relPath, err := filepath.Rel(rootdir, path)
if err != nil {
return false, errors.Wrapf(err, "error making path %q relative to %q", path, rootdir)
}
// Walk the file tree and check the time stamps. // Walk the file tree and check the time stamps.
// Since we are chroot in rootdir, only want the path of the actual filename, i.e path - rootdir. err = filepath.Walk(relPath, func(path string, info os.FileInfo, err error) error {
// +1 to account for the extra "/" (e.g rootdir=/home/user/mydir, path=/home/user/mydir/myfile.json)
err = filepath.Walk(path[len(rootdir)+1:], func(path string, info os.FileInfo, err error) error {
// If using cached images, it is possible for files that are being copied to come from // If using cached images, it is possible for files that are being copied to come from
// previous build stages. But if using cached images, then the copied file won't exist // previous build stages. But if using cached images, then the copied file won't exist
// since a container won't have been created for the previous build stage and info will be nil. // since a container won't have been created for the previous build stage and info will be nil.
@ -154,6 +154,9 @@ func modTimeIsGreater(rootdir, path string, historyTime string) (bool, error) {
if info.Mode()&os.ModeSymlink == os.ModeSymlink { if info.Mode()&os.ModeSymlink == os.ModeSymlink {
// Evaluate any symlink that occurs to get updated modified information // Evaluate any symlink that occurs to get updated modified information
resolvedPath, err := filepath.EvalSymlinks(path) resolvedPath, err := filepath.EvalSymlinks(path)
if err != nil && os.IsNotExist(err) {
return errors.Wrapf(errDanglingSymlink, "%q", path)
}
if err != nil { if err != nil {
return errors.Wrapf(err, "error evaluating symlink %q", path) return errors.Wrapf(err, "error evaluating symlink %q", path)
} }
@ -169,7 +172,12 @@ func modTimeIsGreater(rootdir, path string, historyTime string) (bool, error) {
} }
return nil return nil
}) })
if err != nil { if err != nil {
// if error is due to dangling symlink, ignore error and return nil
if errors.Cause(err) == errDanglingSymlink {
return false, nil
}
return false, errors.Wrapf(err, "error walking file tree %q", path) return false, errors.Wrapf(err, "error walking file tree %q", path)
} }
return timeIsGreater, err return timeIsGreater, err

View File

@ -0,0 +1,7 @@
package imagebuildah
import "errors"
var (
errDanglingSymlink = errors.New("error evaluating dangling symlink")
)

View File

@ -89,6 +89,7 @@ type FromAndBudResults struct {
DNSSearch []string DNSSearch []string
DNSServers []string DNSServers []string
DNSOptions []string DNSOptions []string
HttpProxy bool
Isolation string Isolation string
Memory string Memory string
MemorySwap string MemorySwap string
@ -182,6 +183,7 @@ func GetFromAndBudFlags(flags *FromAndBudResults, usernsResults *UserNSResults,
fs.StringSliceVar(&flags.DNSSearch, "dns-search", []string{}, "Set custom DNS search domains") fs.StringSliceVar(&flags.DNSSearch, "dns-search", []string{}, "Set custom DNS search domains")
fs.StringSliceVar(&flags.DNSServers, "dns", []string{}, "Set custom DNS servers") fs.StringSliceVar(&flags.DNSServers, "dns", []string{}, "Set custom DNS servers")
fs.StringSliceVar(&flags.DNSOptions, "dns-option", []string{}, "Set custom DNS options") fs.StringSliceVar(&flags.DNSOptions, "dns-option", []string{}, "Set custom DNS options")
fs.BoolVar(&flags.HttpProxy, "http-proxy", true, "pass thru HTTP Proxy environment variables")
fs.StringVar(&flags.Isolation, "isolation", DefaultIsolation(), "`type` of process isolation to use. Use BUILDAH_ISOLATION environment variable to override.") fs.StringVar(&flags.Isolation, "isolation", DefaultIsolation(), "`type` of process isolation to use. Use BUILDAH_ISOLATION environment variable to override.")
fs.StringVarP(&flags.Memory, "memory", "m", "", "memory limit (format: <number>[<unit>], where unit = b, k, m or g)") fs.StringVarP(&flags.Memory, "memory", "m", "", "memory limit (format: <number>[<unit>], where unit = b, k, m or g)")
fs.StringVar(&flags.MemorySwap, "memory-swap", "", "swap limit equal to memory plus swap: '-1' to enable unlimited swap") fs.StringVar(&flags.MemorySwap, "memory-swap", "", "swap limit equal to memory plus swap: '-1' to enable unlimited swap")

View File

@ -86,6 +86,7 @@ func CommonBuildOptions(c *cobra.Command) (*buildah.CommonBuildOptions, error) {
cpuPeriod, _ := c.Flags().GetUint64("cpu-period") cpuPeriod, _ := c.Flags().GetUint64("cpu-period")
cpuQuota, _ := c.Flags().GetInt64("cpu-quota") cpuQuota, _ := c.Flags().GetInt64("cpu-quota")
cpuShares, _ := c.Flags().GetUint64("cpu-shared") cpuShares, _ := c.Flags().GetUint64("cpu-shared")
httpProxy, _ := c.Flags().GetBool("http-proxy")
ulimit, _ := c.Flags().GetStringSlice("ulimit") ulimit, _ := c.Flags().GetStringSlice("ulimit")
commonOpts := &buildah.CommonBuildOptions{ commonOpts := &buildah.CommonBuildOptions{
AddHost: addHost, AddHost: addHost,
@ -98,6 +99,7 @@ func CommonBuildOptions(c *cobra.Command) (*buildah.CommonBuildOptions, error) {
DNSSearch: dnsSearch, DNSSearch: dnsSearch,
DNSServers: dnsServers, DNSServers: dnsServers,
DNSOptions: dnsOptions, DNSOptions: dnsOptions,
HTTPProxy: httpProxy,
Memory: memoryLimit, Memory: memoryLimit,
MemorySwap: memorySwap, MemorySwap: memorySwap,
ShmSize: c.Flag("shm-size").Value.String(), ShmSize: c.Flag("shm-size").Value.String(),

View File

@ -3,6 +3,7 @@
package unshare package unshare
import ( import (
"bufio"
"bytes" "bytes"
"fmt" "fmt"
"io" "io"
@ -15,7 +16,7 @@ import (
"sync" "sync"
"syscall" "syscall"
"github.com/containers/buildah/util" "github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/reexec" "github.com/containers/storage/pkg/reexec"
"github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors" "github.com/pkg/errors"
@ -157,7 +158,7 @@ func (c *Cmd) Start() error {
} }
if len(c.UidMappings) == 0 || len(c.GidMappings) == 0 { if len(c.UidMappings) == 0 || len(c.GidMappings) == 0 {
uidmap, gidmap, err := util.GetHostIDMappings("") uidmap, gidmap, err := GetHostIDMappings("")
if err != nil { if err != nil {
fmt.Fprintf(continueWrite, "error reading ID mappings in parent: %v", err) fmt.Fprintf(continueWrite, "error reading ID mappings in parent: %v", err)
return errors.Wrapf(err, "error reading ID mappings in parent") return errors.Wrapf(err, "error reading ID mappings in parent")
@ -352,7 +353,7 @@ func MaybeReexecUsingUserNamespace(evenForRoot bool) {
// Read the set of ID mappings that we're allowed to use. Each // Read the set of ID mappings that we're allowed to use. Each
// range in /etc/subuid and /etc/subgid file is a starting host // range in /etc/subuid and /etc/subgid file is a starting host
// ID and a range size. // ID and a range size.
uidmap, gidmap, err = util.GetSubIDMappings(me.Username, me.Username) uidmap, gidmap, err = GetSubIDMappings(me.Username, me.Username)
bailOnError(err, "error reading allowed ID mappings") bailOnError(err, "error reading allowed ID mappings")
if len(uidmap) == 0 { if len(uidmap) == 0 {
logrus.Warnf("Found no UID ranges set aside for user %q in /etc/subuid.", me.Username) logrus.Warnf("Found no UID ranges set aside for user %q in /etc/subuid.", me.Username)
@ -384,7 +385,7 @@ func MaybeReexecUsingUserNamespace(evenForRoot bool) {
return return
} }
// Read the set of ID mappings that we're currently using. // Read the set of ID mappings that we're currently using.
uidmap, gidmap, err = util.GetHostIDMappings("") uidmap, gidmap, err = GetHostIDMappings("")
bailOnError(err, "error reading current ID mappings") bailOnError(err, "error reading current ID mappings")
// Just reuse them. // Just reuse them.
for i := range uidmap { for i := range uidmap {
@ -404,6 +405,16 @@ func MaybeReexecUsingUserNamespace(evenForRoot bool) {
err = os.Setenv(UsernsEnvName, "1") err = os.Setenv(UsernsEnvName, "1")
bailOnError(err, "error setting %s=1 in environment", UsernsEnvName) bailOnError(err, "error setting %s=1 in environment", UsernsEnvName)
// Set the default isolation type to use the "rootless" method.
if _, present := os.LookupEnv("BUILDAH_ISOLATION"); !present {
if err = os.Setenv("BUILDAH_ISOLATION", "rootless"); err != nil {
if err := os.Setenv("BUILDAH_ISOLATION", "rootless"); err != nil {
logrus.Errorf("error setting BUILDAH_ISOLATION=rootless in environment: %v", err)
os.Exit(1)
}
}
}
// Reuse our stdio. // Reuse our stdio.
cmd.Stdin = os.Stdin cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout cmd.Stdout = os.Stdout
@ -446,3 +457,89 @@ func ExecRunnable(cmd Runnable) {
} }
os.Exit(0) os.Exit(0)
} }
// getHostIDMappings reads mappings from the named node under /proc.
func getHostIDMappings(path string) ([]specs.LinuxIDMapping, error) {
var mappings []specs.LinuxIDMapping
f, err := os.Open(path)
if err != nil {
return nil, errors.Wrapf(err, "error reading ID mappings from %q", path)
}
defer f.Close()
scanner := bufio.NewScanner(f)
for scanner.Scan() {
line := scanner.Text()
fields := strings.Fields(line)
if len(fields) != 3 {
return nil, errors.Errorf("line %q from %q has %d fields, not 3", line, path, len(fields))
}
cid, err := strconv.ParseUint(fields[0], 10, 32)
if err != nil {
return nil, errors.Wrapf(err, "error parsing container ID value %q from line %q in %q", fields[0], line, path)
}
hid, err := strconv.ParseUint(fields[1], 10, 32)
if err != nil {
return nil, errors.Wrapf(err, "error parsing host ID value %q from line %q in %q", fields[1], line, path)
}
size, err := strconv.ParseUint(fields[2], 10, 32)
if err != nil {
return nil, errors.Wrapf(err, "error parsing size value %q from line %q in %q", fields[2], line, path)
}
mappings = append(mappings, specs.LinuxIDMapping{ContainerID: uint32(cid), HostID: uint32(hid), Size: uint32(size)})
}
return mappings, nil
}
// GetHostIDMappings reads mappings for the specified process (or the current
// process if pid is "self" or an empty string) from the kernel.
func GetHostIDMappings(pid string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) {
if pid == "" {
pid = "self"
}
uidmap, err := getHostIDMappings(fmt.Sprintf("/proc/%s/uid_map", pid))
if err != nil {
return nil, nil, err
}
gidmap, err := getHostIDMappings(fmt.Sprintf("/proc/%s/gid_map", pid))
if err != nil {
return nil, nil, err
}
return uidmap, gidmap, nil
}
// GetSubIDMappings reads mappings from /etc/subuid and /etc/subgid.
func GetSubIDMappings(user, group string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) {
mappings, err := idtools.NewIDMappings(user, group)
if err != nil {
return nil, nil, errors.Wrapf(err, "error reading subuid mappings for user %q and subgid mappings for group %q", user, group)
}
var uidmap, gidmap []specs.LinuxIDMapping
for _, m := range mappings.UIDs() {
uidmap = append(uidmap, specs.LinuxIDMapping{
ContainerID: uint32(m.ContainerID),
HostID: uint32(m.HostID),
Size: uint32(m.Size),
})
}
for _, m := range mappings.GIDs() {
gidmap = append(gidmap, specs.LinuxIDMapping{
ContainerID: uint32(m.ContainerID),
HostID: uint32(m.HostID),
Size: uint32(m.Size),
})
}
return uidmap, gidmap, nil
}
// ParseIDMappings parses mapping triples.
func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap, error) {
uid, err := idtools.ParseIDMap(uidmap, "userns-uid-map")
if err != nil {
return nil, nil, err
}
gid, err := idtools.ParseIDMap(gidmap, "userns-gid-map")
if err != nil {
return nil, nil, err
}
return uid, gid, nil
}

View File

@ -4,6 +4,9 @@ package unshare
import ( import (
"os" "os"
"github.com/containers/storage/pkg/idtools"
"github.com/opencontainers/runtime-spec/specs-go"
) )
const ( const (
@ -29,3 +32,14 @@ func RootlessEnv() []string {
// MaybeReexecUsingUserNamespace re-exec the process in a new namespace // MaybeReexecUsingUserNamespace re-exec the process in a new namespace
func MaybeReexecUsingUserNamespace(evenForRoot bool) { func MaybeReexecUsingUserNamespace(evenForRoot bool) {
} }
// GetHostIDMappings reads mappings for the specified process (or the current
// process if pid is "self" or an empty string) from the kernel.
func GetHostIDMappings(pid string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) {
return nil, nil, nil
}
// ParseIDMappings parses mapping triples.
func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap, error) {
return nil, nil, nil
}

View File

@ -865,7 +865,7 @@ func setupNamespaces(g *generate.Generator, namespaceOptions NamespaceOptions, i
if err := g.AddOrReplaceLinuxNamespace(specs.UserNamespace, ""); err != nil { if err := g.AddOrReplaceLinuxNamespace(specs.UserNamespace, ""); err != nil {
return false, nil, false, errors.Wrapf(err, "error adding new %q namespace for run", string(specs.UserNamespace)) return false, nil, false, errors.Wrapf(err, "error adding new %q namespace for run", string(specs.UserNamespace))
} }
hostUidmap, hostGidmap, err := util.GetHostIDMappings("") hostUidmap, hostGidmap, err := unshare.GetHostIDMappings("")
if err != nil { if err != nil {
return false, nil, false, err return false, nil, false, err
} }
@ -983,6 +983,24 @@ func (b *Builder) configureUIDGID(g *generate.Generator, mountPoint string, opti
func (b *Builder) configureEnvironment(g *generate.Generator, options RunOptions) { func (b *Builder) configureEnvironment(g *generate.Generator, options RunOptions) {
g.ClearProcessEnv() g.ClearProcessEnv()
if b.CommonBuildOpts.HTTPProxy {
for _, envSpec := range []string{
"http_proxy",
"HTTP_PROXY",
"https_proxy",
"HTTPS_PROXY",
"ftp_proxy",
"FTP_PROXY",
"no_proxy",
"NO_PROXY",
} {
envVal := os.Getenv(envSpec)
if envVal != "" {
g.AddProcessEnv(envSpec, envVal)
}
}
}
for _, envSpec := range append(b.Env(), options.Env...) { for _, envSpec := range append(b.Env(), options.Env...) {
env := strings.SplitN(envSpec, "=", 2) env := strings.SplitN(envSpec, "=", 2)
if len(env) > 1 { if len(env) > 1 {

View File

@ -4,9 +4,12 @@ package buildah
import ( import (
"github.com/opencontainers/runtime-tools/generate" "github.com/opencontainers/runtime-tools/generate"
selinux "github.com/opencontainers/selinux/go-selinux"
) )
func setupSelinux(g *generate.Generator, processLabel, mountLabel string) { func setupSelinux(g *generate.Generator, processLabel, mountLabel string) {
if processLabel != "" && selinux.GetEnabled() {
g.SetProcessSelinuxLabel(processLabel) g.SetProcessSelinuxLabel(processLabel)
g.SetLinuxMountLabel(mountLabel) g.SetLinuxMountLabel(mountLabel)
}
} }

View File

@ -1,13 +1,11 @@
package util package util
import ( import (
"bufio"
"fmt" "fmt"
"io" "io"
"net/url" "net/url"
"os" "os"
"path" "path"
"strconv"
"strings" "strings"
"syscall" "syscall"
@ -18,7 +16,6 @@ import (
"github.com/containers/image/transports" "github.com/containers/image/transports"
"github.com/containers/image/types" "github.com/containers/image/types"
"github.com/containers/storage" "github.com/containers/storage"
"github.com/containers/storage/pkg/idtools"
"github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/errcode"
specs "github.com/opencontainers/runtime-spec/specs-go" specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors" "github.com/pkg/errors"
@ -299,92 +296,6 @@ func GetHostRootIDs(spec *specs.Spec) (uint32, uint32, error) {
return GetHostIDs(spec.Linux.UIDMappings, spec.Linux.GIDMappings, 0, 0) return GetHostIDs(spec.Linux.UIDMappings, spec.Linux.GIDMappings, 0, 0)
} }
// getHostIDMappings reads mappings from the named node under /proc.
func getHostIDMappings(path string) ([]specs.LinuxIDMapping, error) {
var mappings []specs.LinuxIDMapping
f, err := os.Open(path)
if err != nil {
return nil, errors.Wrapf(err, "error reading ID mappings from %q", path)
}
defer f.Close()
scanner := bufio.NewScanner(f)
for scanner.Scan() {
line := scanner.Text()
fields := strings.Fields(line)
if len(fields) != 3 {
return nil, errors.Errorf("line %q from %q has %d fields, not 3", line, path, len(fields))
}
cid, err := strconv.ParseUint(fields[0], 10, 32)
if err != nil {
return nil, errors.Wrapf(err, "error parsing container ID value %q from line %q in %q", fields[0], line, path)
}
hid, err := strconv.ParseUint(fields[1], 10, 32)
if err != nil {
return nil, errors.Wrapf(err, "error parsing host ID value %q from line %q in %q", fields[1], line, path)
}
size, err := strconv.ParseUint(fields[2], 10, 32)
if err != nil {
return nil, errors.Wrapf(err, "error parsing size value %q from line %q in %q", fields[2], line, path)
}
mappings = append(mappings, specs.LinuxIDMapping{ContainerID: uint32(cid), HostID: uint32(hid), Size: uint32(size)})
}
return mappings, nil
}
// GetHostIDMappings reads mappings for the specified process (or the current
// process if pid is "self" or an empty string) from the kernel.
func GetHostIDMappings(pid string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) {
if pid == "" {
pid = "self"
}
uidmap, err := getHostIDMappings(fmt.Sprintf("/proc/%s/uid_map", pid))
if err != nil {
return nil, nil, err
}
gidmap, err := getHostIDMappings(fmt.Sprintf("/proc/%s/gid_map", pid))
if err != nil {
return nil, nil, err
}
return uidmap, gidmap, nil
}
// GetSubIDMappings reads mappings from /etc/subuid and /etc/subgid.
func GetSubIDMappings(user, group string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) {
mappings, err := idtools.NewIDMappings(user, group)
if err != nil {
return nil, nil, errors.Wrapf(err, "error reading subuid mappings for user %q and subgid mappings for group %q", user, group)
}
var uidmap, gidmap []specs.LinuxIDMapping
for _, m := range mappings.UIDs() {
uidmap = append(uidmap, specs.LinuxIDMapping{
ContainerID: uint32(m.ContainerID),
HostID: uint32(m.HostID),
Size: uint32(m.Size),
})
}
for _, m := range mappings.GIDs() {
gidmap = append(gidmap, specs.LinuxIDMapping{
ContainerID: uint32(m.ContainerID),
HostID: uint32(m.HostID),
Size: uint32(m.Size),
})
}
return uidmap, gidmap, nil
}
// ParseIDMappings parses mapping triples.
func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap, error) {
uid, err := idtools.ParseIDMap(uidmap, "userns-uid-map")
if err != nil {
return nil, nil, err
}
gid, err := idtools.ParseIDMap(gidmap, "userns-gid-map")
if err != nil {
return nil, nil, err
}
return uid, gid, nil
}
// GetPolicyContext sets up, initializes and returns a new context for the specified policy // GetPolicyContext sets up, initializes and returns a new context for the specified policy
func GetPolicyContext(ctx *types.SystemContext) (*signature.PolicyContext, error) { func GetPolicyContext(ctx *types.SystemContext) (*signature.PolicyContext, error) {
policy, err := signature.DefaultPolicy(ctx) policy, err := signature.DefaultPolicy(ctx)

View File

@ -8,7 +8,7 @@ github.com/vbauerster/mpb v3.3.4
github.com/mattn/go-isatty v0.0.4 github.com/mattn/go-isatty v0.0.4
github.com/VividCortex/ewma v1.1.1 github.com/VividCortex/ewma v1.1.1
github.com/boltdb/bolt v1.3.1 github.com/boltdb/bolt v1.3.1
github.com/containers/storage v1.12.2 github.com/containers/storage v1.12.3
github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716 github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716
github.com/docker/docker 54dddadc7d5d89fe0be88f76979f6f6ab0dede83 github.com/docker/docker 54dddadc7d5d89fe0be88f76979f6f6ab0dede83
github.com/docker/docker-credential-helpers v0.6.1 github.com/docker/docker-credential-helpers v0.6.1