Vendor in new version of Buildah

This also pulls in latest runc and containers/common

Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
This commit is contained in:
Daniel J Walsh
2020-07-08 16:05:12 -04:00
parent 5c6002bf9d
commit b020d1ad13
42 changed files with 985 additions and 752 deletions

View File

@@ -177,6 +177,8 @@ type BuildOptions struct {
// OciDecryptConfig contains the config that can be used to decrypt an image if it is
// encrypted if non-nil. If nil, it does not attempt to decrypt an image.
OciDecryptConfig *encconfig.DecryptConfig
// Jobs is the number of stages to run in parallel. If not specified it defaults to 1.
Jobs *int
}
// BuildDockerfiles parses a set of one or more Dockerfiles (which may be

View File

@@ -9,6 +9,7 @@ import (
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/containers/buildah"
@@ -29,6 +30,7 @@ import (
"github.com/openshift/imagebuilder/dockerfile/parser"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sync/semaphore"
)
// builtinAllowedBuildArgs is list of built-in allowed build args. Normally we
@@ -102,6 +104,11 @@ type Executor struct {
maxPullPushRetries int
retryPullPushDelay time.Duration
ociDecryptConfig *encconfig.DecryptConfig
lastError error
terminatedStage map[string]struct{}
stagesLock sync.Mutex
stagesSemaphore *semaphore.Weighted
jobs int
}
// NewExecutor creates a new instance of the imagebuilder.Executor interface.
@@ -139,7 +146,13 @@ func NewExecutor(store storage.Store, options BuildOptions, mainNode *parser.Nod
transientMounts = append([]Mount{Mount(mount)}, transientMounts...)
}
jobs := 1
if options.Jobs != nil {
jobs = *options.Jobs
}
exec := Executor{
stages: make(map[string]*StageExecutor),
store: store,
contextDir: options.ContextDirectory,
excludes: excludes,
@@ -191,6 +204,8 @@ func NewExecutor(store storage.Store, options BuildOptions, mainNode *parser.Nod
maxPullPushRetries: options.MaxPullPushRetries,
retryPullPushDelay: options.PullPushRetryDelay,
ociDecryptConfig: options.OciDecryptConfig,
terminatedStage: make(map[string]struct{}),
jobs: jobs,
}
if exec.err == nil {
exec.err = os.Stderr
@@ -236,10 +251,7 @@ func NewExecutor(store storage.Store, options BuildOptions, mainNode *parser.Nod
// startStage creates a new stage executor that will be referenced whenever a
// COPY or ADD statement uses a --from=NAME flag.
func (b *Executor) startStage(stage *imagebuilder.Stage, stages int, output string) *StageExecutor {
if b.stages == nil {
b.stages = make(map[string]*StageExecutor)
}
func (b *Executor) startStage(stage *imagebuilder.Stage, stages imagebuilder.Stages, output string) *StageExecutor {
stageExec := &StageExecutor{
executor: b,
index: stage.Position,
@@ -277,6 +289,35 @@ func (b *Executor) resolveNameToImageRef(output string) (types.ImageReference, e
return imageRef, nil
}
func (b *Executor) waitForStage(ctx context.Context, name string) error {
stage := b.stages[name]
if stage == nil {
return errors.Errorf("unknown stage %q", name)
}
for {
if b.lastError != nil {
return b.lastError
}
if stage.stage == nil {
return nil
}
b.stagesLock.Lock()
_, terminated := b.terminatedStage[name]
b.stagesLock.Unlock()
if terminated {
return nil
}
b.stagesSemaphore.Release(1)
time.Sleep(time.Millisecond * 10)
if err := b.stagesSemaphore.Acquire(ctx, 1); err != nil {
return errors.Wrapf(err, "error reacquiring job semaphore")
}
}
}
// getImageHistory returns the history of imageID.
func (b *Executor) getImageHistory(ctx context.Context, imageID string) ([]v1.History, error) {
imageRef, err := is.Transport.ParseStoreReference(b.store, "@"+imageID)
@@ -295,6 +336,55 @@ func (b *Executor) getImageHistory(ctx context.Context, imageID string) ([]v1.Hi
return oci.History, nil
}
func (b *Executor) buildStage(ctx context.Context, cleanupStages map[int]*StageExecutor, stages imagebuilder.Stages, stageIndex int) (imageID string, ref reference.Canonical, err error) {
stage := stages[stageIndex]
ib := stage.Builder
node := stage.Node
base, err := ib.From(node)
// If this is the last stage, then the image that we produce at
// its end should be given the desired output name.
output := ""
if stageIndex == len(stages)-1 {
output = b.output
}
if err != nil {
logrus.Debugf("Build(node.Children=%#v)", node.Children)
return "", nil, err
}
b.stagesLock.Lock()
stageExecutor := b.startStage(&stage, stages, output)
b.stagesLock.Unlock()
// If this a single-layer build, or if it's a multi-layered
// build and b.forceRmIntermediateCtrs is set, make sure we
// remove the intermediate/build containers, regardless of
// whether or not the stage's build fails.
if b.forceRmIntermediateCtrs || !b.layers {
b.stagesLock.Lock()
cleanupStages[stage.Position] = stageExecutor
b.stagesLock.Unlock()
}
// Build this stage.
if imageID, ref, err = stageExecutor.Execute(ctx, base); err != nil {
return "", nil, err
}
// The stage succeeded, so remove its build container if we're
// told to delete successful intermediate/build containers for
// multi-layered builds.
if b.removeIntermediateCtrs {
b.stagesLock.Lock()
cleanupStages[stage.Position] = stageExecutor
b.stagesLock.Unlock()
}
return imageID, ref, nil
}
// Build takes care of the details of running Prepare/Execute/Commit/Delete
// over each of the one or more parsed Dockerfiles and stages.
func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (imageID string, ref reference.Canonical, err error) {
@@ -314,12 +404,16 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image
// Clean up any containers associated with the final container
// built by a stage, for stages that succeeded, since we no
// longer need their filesystem contents.
b.stagesLock.Lock()
for _, stage := range cleanupStages {
if err := stage.Delete(); err != nil {
logrus.Debugf("Failed to cleanup stage containers: %v", err)
lastErr = err
}
}
b.stagesLock.Unlock()
cleanupStages = nil
// Clean up any builders that we used to get data from images.
for _, builder := range b.containerMap {
@@ -404,61 +498,87 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image
}
}
// Run through the build stages, one at a time.
for stageIndex, stage := range stages {
var lastErr error
type Result struct {
Index int
ImageID string
Ref reference.Canonical
Error error
}
ib := stage.Builder
node := stage.Node
base, err := ib.From(node)
if err != nil {
logrus.Debugf("Build(node.Children=%#v)", node.Children)
return "", nil, err
ch := make(chan Result)
jobs := int64(b.jobs)
if jobs < 0 {
return "", nil, errors.New("error building: invalid value for jobs. It must be a positive integer")
} else if jobs == 0 {
jobs = int64(len(stages))
}
b.stagesSemaphore = semaphore.NewWeighted(jobs)
var wg sync.WaitGroup
wg.Add(len(stages))
go func() {
for stageIndex := range stages {
index := stageIndex
// Acquire the semaphore before creating the goroutine so we are sure they
// run in the specified order.
if err := b.stagesSemaphore.Acquire(ctx, 1); err != nil {
b.lastError = err
return
}
go func() {
defer b.stagesSemaphore.Release(1)
defer wg.Done()
imageID, ref, err = b.buildStage(ctx, cleanupStages, stages, index)
if err != nil {
ch <- Result{
Index: index,
Error: err,
}
return
}
ch <- Result{
Index: index,
ImageID: imageID,
Ref: ref,
Error: nil,
}
}()
}
}()
go func() {
wg.Wait()
close(ch)
}()
// If this is the last stage, then the image that we produce at
// its end should be given the desired output name.
output := ""
if stageIndex == len(stages)-1 {
output = b.output
}
for r := range ch {
stage := stages[r.Index]
stageExecutor := b.startStage(&stage, len(stages), output)
b.stagesLock.Lock()
b.terminatedStage[stage.Name] = struct{}{}
b.stagesLock.Unlock()
// If this a single-layer build, or if it's a multi-layered
// build and b.forceRmIntermediateCtrs is set, make sure we
// remove the intermediate/build containers, regardless of
// whether or not the stage's build fails.
if b.forceRmIntermediateCtrs || !b.layers {
cleanupStages[stage.Position] = stageExecutor
}
// Build this stage.
if imageID, ref, err = stageExecutor.Execute(ctx, base); err != nil {
lastErr = err
}
if lastErr != nil {
return "", nil, lastErr
}
// The stage succeeded, so remove its build container if we're
// told to delete successful intermediate/build containers for
// multi-layered builds.
if b.removeIntermediateCtrs {
cleanupStages[stage.Position] = stageExecutor
if r.Error != nil {
b.lastError = r.Error
return "", nil, r.Error
}
// If this is an intermediate stage, make a note of the ID, so
// that we can look it up later.
if stageIndex < len(stages)-1 && imageID != "" {
b.imageMap[stage.Name] = imageID
if r.Index < len(stages)-1 && r.ImageID != "" {
b.imageMap[stage.Name] = r.ImageID
// We're not populating the cache with intermediate
// images, so add this one to the list of images that
// we'll remove later.
if !b.layers {
cleanupImages = append(cleanupImages, imageID)
cleanupImages = append(cleanupImages, r.ImageID)
}
imageID = ""
}
if r.Index == len(stages)-1 {
imageID = r.ImageID
}
}

View File

@@ -46,7 +46,7 @@ import (
type StageExecutor struct {
executor *Executor
index int
stages int
stages imagebuilder.Stages
name string
builder *buildah.Builder
preserved int
@@ -753,16 +753,23 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
stage := s.stage
ib := stage.Builder
checkForLayers := s.executor.layers && s.executor.useCache
moreStages := s.index < s.stages-1
moreStages := s.index < len(s.stages)-1
lastStage := !moreStages
imageIsUsedLater := moreStages && (s.executor.baseMap[stage.Name] || s.executor.baseMap[fmt.Sprintf("%d", stage.Position)])
rootfsIsUsedLater := moreStages && (s.executor.rootfsMap[stage.Name] || s.executor.rootfsMap[fmt.Sprintf("%d", stage.Position)])
// If the base image's name corresponds to the result of an earlier
// stage, substitute that image's ID for the base image's name here.
// If not, then go on assuming that it's just a regular image that's
// either in local storage, or one that we have to pull from a
// registry.
// stage, make sure that stage has finished building an image, and
// substitute that image's ID for the base image's name here. If not,
// then go on assuming that it's just a regular image that's either in
// local storage, or one that we have to pull from a registry.
for _, previousStage := range s.stages[:s.index] {
if previousStage.Name == base {
if err := s.executor.waitForStage(ctx, previousStage.Name); err != nil {
return "", nil, err
}
}
}
if stageImage, isPreviousStage := s.executor.imageMap[base]; isPreviousStage {
base = stageImage
}
@@ -870,6 +877,9 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
return "", nil, errors.Errorf("%s: invalid --from flag, should be --from=<name|stage>", command)
}
if otherStage, ok := s.executor.stages[arr[1]]; ok && otherStage.index < s.index {
if err := s.executor.waitForStage(ctx, otherStage.name); err != nil {
return "", nil, err
}
mountPoint = otherStage.mountPoint
} else if mountPoint, err = s.getImageRootfs(ctx, arr[1]); err != nil {
return "", nil, errors.Errorf("%s --from=%s: no stage or image found with that name", command, arr[1])
@@ -1230,8 +1240,12 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer
}
s.builder.SetHostname(config.Hostname)
s.builder.SetDomainname(config.Domainname)
s.builder.SetArchitecture(s.executor.architecture)
s.builder.SetOS(s.executor.os)
if s.executor.architecture != "" {
s.builder.SetArchitecture(s.executor.architecture)
}
if s.executor.os != "" {
s.builder.SetOS(s.executor.os)
}
s.builder.SetUser(config.User)
s.builder.ClearPorts()
for p := range config.ExposedPorts {