Merge pull request #6634 from baude/v2buildfixes

fix misc remote build issues
This commit is contained in:
OpenShift Merge Robot
2020-06-17 15:36:02 -04:00
committed by GitHub
4 changed files with 127 additions and 46 deletions

View File

@ -49,7 +49,7 @@ func BuildImage(w http.ResponseWriter, r *http.Request) {
query := struct { query := struct {
Dockerfile string `schema:"dockerfile"` Dockerfile string `schema:"dockerfile"`
Tag string `schema:"t"` Tag []string `schema:"t"`
ExtraHosts string `schema:"extrahosts"` ExtraHosts string `schema:"extrahosts"`
Remote string `schema:"remote"` Remote string `schema:"remote"`
Quiet bool `schema:"q"` Quiet bool `schema:"q"`
@ -75,7 +75,7 @@ func BuildImage(w http.ResponseWriter, r *http.Request) {
Registry string `schema:"registry"` Registry string `schema:"registry"`
}{ }{
Dockerfile: "Dockerfile", Dockerfile: "Dockerfile",
Tag: "", Tag: []string{},
ExtraHosts: "", ExtraHosts: "",
Remote: "", Remote: "",
Quiet: false, Quiet: false,
@ -107,20 +107,19 @@ func BuildImage(w http.ResponseWriter, r *http.Request) {
} }
var ( var (
// Tag is the name with optional tag... output string
name = query.Tag additionalNames []string
tag = "latest"
) )
if strings.Contains(query.Tag, ":") { if len(query.Tag) > 0 {
tokens := strings.SplitN(query.Tag, ":", 2) output = query.Tag[0]
name = tokens[0] }
tag = tokens[1] if len(query.Tag) > 1 {
additionalNames = query.Tag[1:]
} }
if _, found := r.URL.Query()["target"]; found { if _, found := r.URL.Query()["target"]; found {
name = query.Target output = query.Target
} }
var buildArgs = map[string]string{} var buildArgs = map[string]string{}
if _, found := r.URL.Query()["buildargs"]; found { if _, found := r.URL.Query()["buildargs"]; found {
if err := json.Unmarshal([]byte(query.BuildArgs), &buildArgs); err != nil { if err := json.Unmarshal([]byte(query.BuildArgs), &buildArgs); err != nil {
@ -168,8 +167,8 @@ func BuildImage(w http.ResponseWriter, r *http.Request) {
TransientMounts: nil, TransientMounts: nil,
Compression: archive.Gzip, Compression: archive.Gzip,
Args: buildArgs, Args: buildArgs,
Output: name, Output: output,
AdditionalTags: []string{tag}, AdditionalTags: additionalNames,
Log: func(format string, args ...interface{}) { Log: func(format string, args ...interface{}) {
buildEvents = append(buildEvents, fmt.Sprintf(format, args...)) buildEvents = append(buildEvents, fmt.Sprintf(format, args...))
}, },

View File

@ -410,7 +410,7 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
// swagger:operation POST /build compat buildImage // swagger:operation POST /build compat buildImage
// --- // ---
// tags: // tags:
// - images // - images (compat)
// summary: Create image // summary: Create image
// description: Build an image from the given Dockerfile(s) // description: Build an image from the given Dockerfile(s)
// parameters: // parameters:
@ -425,7 +425,7 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
// name: t // name: t
// type: string // type: string
// default: latest // default: latest
// description: A name and optional tag to apply to the image in the `name:tag` format. // description: A name and optional tag to apply to the image in the `name:tag` format. If you omit the tag the default latest value is assumed. You can provide several t parameters.
// - in: query // - in: query
// name: extrahosts // name: extrahosts
// type: string // type: string
@ -1211,7 +1211,7 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
// name: t // name: t
// type: string // type: string
// default: latest // default: latest
// description: A name and optional tag to apply to the image in the `name:tag` format. // description: A name and optional tag to apply to the image in the `name:tag` format. If you omit the tag the default latest value is assumed. You can provide several t parameters.
// - in: query // - in: query
// name: extrahosts // name: extrahosts
// type: string // type: string

View File

@ -229,6 +229,9 @@ func Build(ctx context.Context, containerFiles []string, options entities.BuildO
if t := options.Output; len(t) > 0 { if t := options.Output; len(t) > 0 {
params.Set("t", t) params.Set("t", t)
} }
for _, tag := range options.AdditionalTags {
params.Add("t", tag)
}
// TODO Remote, Quiet // TODO Remote, Quiet
if options.NoCache { if options.NoCache {
params.Set("nocache", "1") params.Set("nocache", "1")

View File

@ -1,10 +1,13 @@
package tunnel package tunnel
import ( import (
"archive/tar"
"bytes"
"context" "context"
"io"
"io/ioutil" "io/ioutil"
"os" "os"
"path" "path/filepath"
"strings" "strings"
"github.com/containers/common/pkg/config" "github.com/containers/common/pkg/config"
@ -16,6 +19,7 @@ import (
utils2 "github.com/containers/libpod/utils" utils2 "github.com/containers/libpod/utils"
"github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/archive"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/sirupsen/logrus"
) )
func (ir *ImageEngine) Exists(_ context.Context, nameOrID string) (*entities.BoolReport, error) { func (ir *ImageEngine) Exists(_ context.Context, nameOrID string) (*entities.BoolReport, error) {
@ -276,14 +280,27 @@ func (ir *ImageEngine) Config(_ context.Context) (*config.Config, error) {
} }
func (ir *ImageEngine) Build(ctx context.Context, containerFiles []string, opts entities.BuildOptions) (*entities.BuildReport, error) { func (ir *ImageEngine) Build(ctx context.Context, containerFiles []string, opts entities.BuildOptions) (*entities.BuildReport, error) {
if len(containerFiles) > 1 { var tarReader io.Reader
return nil, errors.New("something") tarfile, err := archive.Tar(opts.ContextDirectory, 0)
}
tarfile, err := archive.Tar(path.Base(containerFiles[0]), 0)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return images.Build(ir.ClientCxt, containerFiles, opts, tarfile) tarReader = tarfile
cwd, err := os.Getwd()
if err != nil {
return nil, err
}
if cwd != opts.ContextDirectory {
fn := func(h *tar.Header, r io.Reader) (data []byte, update bool, skip bool, err error) {
h.Name = filepath.Join(filepath.Base(opts.ContextDirectory), h.Name)
return nil, false, false, nil
}
tarReader, err = transformArchive(tarfile, false, fn)
if err != nil {
return nil, err
}
}
return images.Build(ir.ClientCxt, containerFiles, opts, tarReader)
} }
func (ir *ImageEngine) Tree(ctx context.Context, nameOrID string, opts entities.ImageTreeOptions) (*entities.ImageTreeReport, error) { func (ir *ImageEngine) Tree(ctx context.Context, nameOrID string, opts entities.ImageTreeOptions) (*entities.ImageTreeReport, error) {
@ -297,3 +314,65 @@ func (ir *ImageEngine) Shutdown(_ context.Context) {
func (ir *ImageEngine) Sign(ctx context.Context, names []string, options entities.SignOptions) (*entities.SignReport, error) { func (ir *ImageEngine) Sign(ctx context.Context, names []string, options entities.SignOptions) (*entities.SignReport, error) {
return nil, errors.New("not implemented yet") return nil, errors.New("not implemented yet")
} }
// Sourced from openshift image builder
// TransformFileFunc is given a chance to transform an arbitrary input file.
type TransformFileFunc func(h *tar.Header, r io.Reader) (data []byte, update bool, skip bool, err error)
// filterArchive transforms the provided input archive to a new archive,
// giving the fn a chance to transform arbitrary files.
func filterArchive(r io.Reader, w io.Writer, fn TransformFileFunc) error {
tr := tar.NewReader(r)
tw := tar.NewWriter(w)
var body io.Reader = tr
for {
h, err := tr.Next()
if err == io.EOF {
return tw.Close()
}
if err != nil {
return err
}
name := h.Name
data, ok, skip, err := fn(h, tr)
logrus.Debugf("Transform %q -> %q: data=%t ok=%t skip=%t err=%v", name, h.Name, data != nil, ok, skip, err)
if err != nil {
return err
}
if skip {
continue
}
if ok {
h.Size = int64(len(data))
body = bytes.NewBuffer(data)
}
if err := tw.WriteHeader(h); err != nil {
return err
}
if _, err := io.Copy(tw, body); err != nil {
return err
}
}
}
func transformArchive(r io.Reader, compressed bool, fn TransformFileFunc) (io.Reader, error) {
var cwe error
pr, pw := io.Pipe()
go func() {
if compressed {
in, err := archive.DecompressStream(r)
if err != nil {
cwe = pw.CloseWithError(err)
return
}
r = in
}
err := filterArchive(r, pw, fn)
cwe = pw.CloseWithError(err)
}()
return pr, cwe
}