Merge pull request #4912 from jwhonce/wip/swagger

[CI:DOCS] Update build images
This commit is contained in:
OpenShift Merge Robot
2020-01-22 12:53:57 -05:00
committed by GitHub
5 changed files with 366 additions and 116 deletions

View File

@ -2,6 +2,12 @@ export GO111MODULE=off
SWAGGER_OUT ?= swagger.yaml SWAGGER_OUT ?= swagger.yaml
swagger: .PHONY: ${SWAGGER_OUT}
${SWAGGER_OUT}:
# generate doesn't remove file on error
rm -f ${SWAGGER_OUT}
swagger generate spec -o ${SWAGGER_OUT} -w ./ swagger generate spec -o ${SWAGGER_OUT} -w ./
cat tags.yaml >> swagger.yaml
# TODO: when pass validation move it under swagger.
validate:
swagger validate ${SWAGGER_OUT}

View File

@ -36,11 +36,11 @@ func getRuntime(r *http.Request) *libpod.Runtime {
return r.Context().Value("runtime").(*libpod.Runtime) return r.Context().Value("runtime").(*libpod.Runtime)
} }
func getHeader(r *http.Request, k string) string { // func getHeader(r *http.Request, k string) string {
return r.Header.Get(k) // return r.Header.Get(k)
} // }
//
func hasHeader(r *http.Request, k string) bool { // func hasHeader(r *http.Request, k string) bool {
_, found := r.Header[k] // _, found := r.Header[k]
return found // return found
} // }

View File

@ -1,6 +1,7 @@
package handlers package handlers
import ( import (
"bytes"
"encoding/base64" "encoding/base64"
"encoding/json" "encoding/json"
"fmt" "fmt"
@ -9,58 +10,66 @@ import (
"net/http" "net/http"
"os" "os"
"path/filepath" "path/filepath"
"strconv"
"strings" "strings"
"github.com/containers/buildah" "github.com/containers/buildah"
"github.com/containers/buildah/imagebuildah" "github.com/containers/buildah/imagebuildah"
"github.com/containers/libpod/pkg/api/handlers/utils" "github.com/containers/libpod/pkg/api/handlers/utils"
"github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/archive"
log "github.com/sirupsen/logrus" "github.com/gorilla/mux"
) )
func BuildImage(w http.ResponseWriter, r *http.Request) { func BuildImage(w http.ResponseWriter, r *http.Request) {
authConfigs := map[string]AuthConfig{} authConfigs := map[string]AuthConfig{}
if hasHeader(r, "X-Registry-Config") { if hdr, found := r.Header["X-Registry-Config"]; found && len(hdr) > 0 {
registryHeader := getHeader(r, "X-Registry-Config") authConfigsJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(hdr[0]))
authConfigsJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(registryHeader))
if json.NewDecoder(authConfigsJSON).Decode(&authConfigs) != nil { if json.NewDecoder(authConfigsJSON).Decode(&authConfigs) != nil {
utils.BadRequest(w, "X-Registry-Config", registryHeader, json.NewDecoder(authConfigsJSON).Decode(&authConfigs)) utils.BadRequest(w, "X-Registry-Config", hdr[0], json.NewDecoder(authConfigsJSON).Decode(&authConfigs))
return return
} }
} }
if hdr, found := r.Header["Content-Type"]; found && len(hdr) > 0 {
if hdr[0] != "application/x-tar" {
utils.BadRequest(w, "Content-Type", hdr[0],
fmt.Errorf("Content-Type: %s is not supported. Should be \"application/x-tar\"", hdr[0]))
}
}
anchorDir, err := extractTarFile(r, w) anchorDir, err := extractTarFile(r, w)
if err != nil { if err != nil {
utils.InternalServerError(w, err) utils.InternalServerError(w, err)
return return
} }
// defer os.RemoveAll(anchorDir) defer os.RemoveAll(anchorDir)
query := struct { query := struct {
Dockerfile string `json:"dockerfile"` Dockerfile string `schema:"dockerfile"`
Tag string `json:"t"` Tag string `schema:"t"`
ExtraHosts string `json:"extrahosts"` ExtraHosts string `schema:"extrahosts"`
Remote string `json:"remote"` Remote string `schema:"remote"`
Quiet bool `json:"q"` Quiet bool `schema:"q"`
NoCache bool `json:"nocache"` NoCache bool `schema:"nocache"`
CacheFrom string `json:"cachefrom"` CacheFrom string `schema:"cachefrom"`
Pull string `json:"pull"` Pull bool `schema:"pull"`
Rm bool `json:"rm"` Rm bool `schema:"rm"`
ForceRm bool `json:"forcerm"` ForceRm bool `schema:"forcerm"`
Memory int `json:"memory"` Memory int64 `schema:"memory"`
MemSwap int `json:"memswap"` MemSwap int64 `schema:"memswap"`
CpuShares int `json:"cpushares"` CpuShares uint64 `schema:"cpushares"`
CpuSetCpus string `json:"cpusetcpus"` CpuSetCpus string `schema:"cpusetcpus"`
CpuPeriod int `json:"cpuperiod"` CpuPeriod uint64 `schema:"cpuperiod"`
CpuQuota int `json:"cpuquota"` CpuQuota int64 `schema:"cpuquota"`
BuildArgs string `json:"buildargs"` BuildArgs string `schema:"buildargs"`
ShmSize int `json:"shmsize"` ShmSize int `schema:"shmsize"`
Squash bool `json:"squash"` Squash bool `schema:"squash"`
Labels string `json:"labels"` Labels string `schema:"labels"`
NetworkMode string `json:"networkmode"` NetworkMode string `schema:"networkmode"`
Platform string `json:"platform"` Platform string `schema:"platform"`
Target string `json:"target"` Target string `schema:"target"`
Outputs string `json:"outputs"` Outputs string `schema:"outputs"`
Registry string `schema:"registry"`
}{ }{
Dockerfile: "Dockerfile", Dockerfile: "Dockerfile",
Tag: "", Tag: "",
@ -69,7 +78,7 @@ func BuildImage(w http.ResponseWriter, r *http.Request) {
Quiet: false, Quiet: false,
NoCache: false, NoCache: false,
CacheFrom: "", CacheFrom: "",
Pull: "", Pull: false,
Rm: true, Rm: true,
ForceRm: false, ForceRm: false,
Memory: 0, Memory: 0,
@ -86,6 +95,7 @@ func BuildImage(w http.ResponseWriter, r *http.Request) {
Platform: "", Platform: "",
Target: "", Target: "",
Outputs: "", Outputs: "",
Registry: "docker.io",
} }
if err := decodeQuery(r, &query); err != nil { if err := decodeQuery(r, &query); err != nil {
@ -93,80 +103,121 @@ func BuildImage(w http.ResponseWriter, r *http.Request) {
return return
} }
// Tag is the name with optional tag... var (
var name = query.Tag // Tag is the name with optional tag...
var tag string name = query.Tag
tag = "latest"
)
if strings.Contains(query.Tag, ":") { if strings.Contains(query.Tag, ":") {
tokens := strings.SplitN(query.Tag, ":", 2) tokens := strings.SplitN(query.Tag, ":", 2)
name = tokens[0] name = tokens[0]
tag = tokens[1] tag = tokens[1]
} }
if t, found := mux.Vars(r)["target"]; found {
name = t
}
var buildArgs = map[string]string{} var buildArgs = map[string]string{}
if found := hasVar(r, "buildargs"); found { if a, found := mux.Vars(r)["buildargs"]; found {
if err := json.Unmarshal([]byte(query.BuildArgs), &buildArgs); err != nil { if err := json.Unmarshal([]byte(a), &buildArgs); err != nil {
utils.BadRequest(w, "buildargs", query.BuildArgs, err) utils.BadRequest(w, "buildargs", a, err)
return return
} }
} }
// convert label formats // convert label formats
var labels = []string{} var labels = []string{}
if hasVar(r, "labels") { if l, found := mux.Vars(r)["labels"]; found {
var m = map[string]string{} var m = map[string]string{}
if err := json.Unmarshal([]byte(query.Labels), &m); err != nil { if err := json.Unmarshal([]byte(l), &m); err != nil {
utils.BadRequest(w, "labels", query.Labels, err) utils.BadRequest(w, "labels", l, err)
return return
} }
for k, v := range m { for k, v := range m {
labels = append(labels, fmt.Sprintf("%s=%v", k, v)) labels = append(labels, k+"="+v)
} }
} }
pullPolicy := buildah.PullIfMissing
if _, found := mux.Vars(r)["pull"]; found {
if query.Pull {
pullPolicy = buildah.PullAlways
}
}
// build events will be recorded here
var (
buildEvents = []string{}
progress = bytes.Buffer{}
)
buildOptions := imagebuildah.BuildOptions{ buildOptions := imagebuildah.BuildOptions{
ContextDirectory: filepath.Join(anchorDir, "build"), ContextDirectory: filepath.Join(anchorDir, "build"),
PullPolicy: 0, PullPolicy: pullPolicy,
Registry: "", Registry: query.Registry,
IgnoreUnrecognizedInstructions: false, IgnoreUnrecognizedInstructions: true,
Quiet: query.Quiet, Quiet: query.Quiet,
Isolation: 0, Isolation: buildah.IsolationChroot,
Runtime: "", Runtime: "",
RuntimeArgs: nil, RuntimeArgs: nil,
TransientMounts: nil, TransientMounts: nil,
Compression: 0, Compression: archive.Gzip,
Args: buildArgs, Args: buildArgs,
Output: name, Output: name,
AdditionalTags: []string{tag}, AdditionalTags: []string{tag},
Log: nil, Log: func(format string, args ...interface{}) {
In: nil, buildEvents = append(buildEvents, fmt.Sprintf(format, args...))
Out: nil, },
Err: nil, In: nil,
SignaturePolicyPath: "", Out: &progress,
ReportWriter: nil, Err: &progress,
OutputFormat: "", SignaturePolicyPath: "",
SystemContext: nil, ReportWriter: &progress,
NamespaceOptions: nil, OutputFormat: buildah.Dockerv2ImageManifest,
ConfigureNetwork: 0, SystemContext: nil,
CNIPluginPath: "", NamespaceOptions: nil,
CNIConfigDir: "", ConfigureNetwork: 0,
IDMappingOptions: nil, CNIPluginPath: "",
AddCapabilities: nil, CNIConfigDir: "",
DropCapabilities: nil, IDMappingOptions: nil,
CommonBuildOpts: &buildah.CommonBuildOptions{}, AddCapabilities: nil,
DefaultMountsFilePath: "", DropCapabilities: nil,
IIDFile: "", CommonBuildOpts: &buildah.CommonBuildOptions{
Squash: query.Squash, AddHost: nil,
Labels: labels, CgroupParent: "",
Annotations: nil, CPUPeriod: query.CpuPeriod,
OnBuild: nil, CPUQuota: query.CpuQuota,
Layers: false, CPUShares: query.CpuShares,
NoCache: query.NoCache, CPUSetCPUs: query.CpuSetCpus,
RemoveIntermediateCtrs: query.Rm, CPUSetMems: "",
ForceRmIntermediateCtrs: query.ForceRm, HTTPProxy: false,
BlobDirectory: "", Memory: query.Memory,
Target: query.Target, DNSSearch: nil,
Devices: nil, DNSServers: nil,
DNSOptions: nil,
MemorySwap: query.MemSwap,
LabelOpts: nil,
SeccompProfilePath: "",
ApparmorProfile: "",
ShmSize: strconv.Itoa(query.ShmSize),
Ulimit: nil,
Volumes: nil,
},
DefaultMountsFilePath: "",
IIDFile: "",
Squash: query.Squash,
Labels: labels,
Annotations: nil,
OnBuild: nil,
Layers: false,
NoCache: query.NoCache,
RemoveIntermediateCtrs: query.Rm,
ForceRmIntermediateCtrs: query.ForceRm,
BlobDirectory: "",
Target: query.Target,
Devices: nil,
} }
id, _, err := getRuntime(r).Build(r.Context(), buildOptions, query.Dockerfile) id, _, err := getRuntime(r).Build(r.Context(), buildOptions, query.Dockerfile)
@ -179,17 +230,13 @@ func BuildImage(w http.ResponseWriter, r *http.Request) {
struct { struct {
Stream string `json:"stream"` Stream string `json:"stream"`
}{ }{
Stream: fmt.Sprintf("Successfully built %s\n", id), Stream: progress.String() + "\n" +
strings.Join(buildEvents, "\n") +
fmt.Sprintf("\nSuccessfully built %s\n", id),
}) })
} }
func extractTarFile(r *http.Request, w http.ResponseWriter) (string, error) { func extractTarFile(r *http.Request, w http.ResponseWriter) (string, error) {
var (
// length int64
// n int64
copyErr error
)
// build a home for the request body // build a home for the request body
anchorDir, err := ioutil.TempDir("", "libpod_builder") anchorDir, err := ioutil.TempDir("", "libpod_builder")
if err != nil { if err != nil {
@ -204,26 +251,14 @@ func extractTarFile(r *http.Request, w http.ResponseWriter) (string, error) {
} }
defer tarBall.Close() defer tarBall.Close()
// if hasHeader(r, "Content-Length") { // Content-Length not used as too many existing API clients didn't honor it
// length, err := strconv.ParseInt(getHeader(r, "Content-Length"), 10, 64) _, err = io.Copy(tarBall, r.Body)
// if err != nil {
// return "", errors.New(fmt.Sprintf("Failed request: unable to parse Content-Length of '%s'", getHeader(r, "Content-Length")))
// }
// n, copyErr = io.CopyN(tarBall, r.Body, length+1)
// } else {
_, copyErr = io.Copy(tarBall, r.Body)
// }
r.Body.Close() r.Body.Close()
if copyErr != nil { if err != nil {
utils.InternalServerError(w, utils.InternalServerError(w,
fmt.Errorf("failed Request: Unable to copy tar file from request body %s", r.RequestURI)) fmt.Errorf("failed Request: Unable to copy tar file from request body %s", r.RequestURI))
} }
log.Debugf("Content-Length: %s", getVar(r, "Content-Length"))
// if hasHeader(r, "Content-Length") && n != length {
// return "", errors.New(fmt.Sprintf("Failed request: Given Content-Length does not match file size %d != %d", n, length))
// }
_, _ = tarBall.Seek(0, 0) _, _ = tarBall.Seek(0, 0)
if err := archive.Untar(tarBall, buildDir, &archive.TarOptions{}); err != nil { if err := archive.Untar(tarBall, buildDir, &archive.TarOptions{}); err != nil {

View File

@ -342,6 +342,210 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
// $ref: '#/responses/InternalError' // $ref: '#/responses/InternalError'
r.Handle(VersionedPath("/commit"), APIHandler(s.Context, generic.CommitContainer)).Methods(http.MethodPost) r.Handle(VersionedPath("/commit"), APIHandler(s.Context, generic.CommitContainer)).Methods(http.MethodPost)
// swagger:operation POST /build images buildImage
// ---
// tags:
// - images
// summary: Create image
// description: Build an image from the given Dockerfile(s)
// parameters:
// - in: query
// name: dockerfile
// type: string
// default: Dockerfile
// description: |
// Path within the build context to the `Dockerfile`.
// This is ignored if remote is specified and points to an external `Dockerfile`.
// - in: query
// name: t
// type: string
// default: latest
// description: A name and optional tag to apply to the image in the `name:tag` format.
// - in: query
// name: extrahosts
// type: string
// default:
// description: |
// TBD Extra hosts to add to /etc/hosts
// (As of version 1.xx)
// - in: query
// name: remote
// type: string
// default:
// description: |
// A Git repository URI or HTTP/HTTPS context URI.
// If the URI points to a single text file, the files contents are placed
// into a file called Dockerfile and the image is built from that file. If
// the URI points to a tarball, the file is downloaded by the daemon and the
// contents therein used as the context for the build. If the URI points to a
// tarball and the dockerfile parameter is also specified, there must be a file
// with the corresponding path inside the tarball.
// (As of version 1.xx)
// - in: query
// name: q
// type: boolean
// default: false
// description: |
// Suppress verbose build output
// - in: query
// name: nocache
// type: boolean
// default: false
// description: |
// Do not use the cache when building the image
// (As of version 1.xx)
// - in: query
// name: cachefrom
// type: string
// default:
// description: |
// JSON array of images used to build cache resolution
// (As of version 1.xx)
// - in: query
// name: pull
// type: boolean
// default: false
// description: |
// Attempt to pull the image even if an older image exists locally
// (As of version 1.xx)
// - in: query
// name: rm
// type: boolean
// default: true
// description: |
// Remove intermediate containers after a successful build
// (As of version 1.xx)
// - in: query
// name: forcerm
// type: boolean
// default: false
// description: |
// Always remove intermediate containers, even upon failure
// (As of version 1.xx)
// - in: query
// name: memory
// type: integer
// description: |
// Memory is the upper limit (in bytes) on how much memory running containers can use
// (As of version 1.xx)
// - in: query
// name: memswap
// type: integer
// description: |
// MemorySwap limits the amount of memory and swap together
// (As of version 1.xx)
// - in: query
// name: cpushares
// type: integer
// description: |
// CPUShares (relative weight
// (As of version 1.xx)
// - in: query
// name: cpusetcpus
// type: string
// description: |
// CPUSetCPUs in which to allow execution (0-3, 0,1)
// (As of version 1.xx)
// - in: query
// name: cpuperiod
// type: integer
// description: |
// CPUPeriod limits the CPU CFS (Completely Fair Scheduler) period
// (As of version 1.xx)
// - in: query
// name: cpuquota
// type: integer
// description: |
// CPUQuota limits the CPU CFS (Completely Fair Scheduler) quota
// (As of version 1.xx)
// - in: query
// name: buildargs
// type: string
// default:
// description: |
// JSON map of string pairs denoting build-time variables.
// For example, the build argument `Foo` with the value of `bar` would be encoded in JSON as `["Foo":"bar"]`.
//
// For example, buildargs={"Foo":"bar"}.
//
// Note(s):
// * This should not be used to pass secrets.
// * The value of buildargs should be URI component encoded before being passed to the API.
//
// (As of version 1.xx)
// - in: query
// name: shmsize
// type: integer
// default: 67108864
// description: |
// ShmSize is the "size" value to use when mounting an shmfs on the container's /dev/shm directory.
// Default is 64MB
// (As of version 1.xx)
// - in: query
// name: squash
// type: boolean
// default: false
// description: |
// Silently ignored.
// Squash the resulting images layers into a single layer
// (As of version 1.xx)
// - in: query
// name: labels
// type: string
// default:
// description: |
// JSON map of key, value pairs to set as labels on the new image
// (As of version 1.xx)
// - in: query
// name: networkmode
// type: string
// default: bridge
// description: |
// Sets the networking mode for the run commands during build.
// Supported standard values are:
// * `bridge` limited to containers within a single host, port mapping required for external access
// * `host` no isolation between host and containers on this network
// * `none` disable all networking for this container
// * container:<nameOrID> share networking with given container
// ---All other values are assumed to be a custom network's name
// (As of version 1.xx)
// - in: query
// name: platform
// type: string
// default:
// description: |
// Platform format os[/arch[/variant]]
// (As of version 1.xx)
// - in: query
// name: target
// type: string
// default:
// description: |
// Target build stage
// (As of version 1.xx)
// - in: query
// name: outputs
// type: string
// default:
// description: |
// output configuration TBD
// (As of version 1.xx)
// produces:
// - application/json
// responses:
// 200:
// description: OK (As of version 1.xx)
// schema:
// type: object
// required:
// - stream
// properties:
// stream:
// type: string
// example: |
// (build details...)
// Successfully built 8ba084515c724cbf90d447a63600c0a6
r.Handle(VersionedPath("/build"), APIHandler(s.Context, handlers.BuildImage)).Methods(http.MethodPost)
/* /*
libpod endpoints libpod endpoints
*/ */
@ -603,6 +807,5 @@ func (s *APIServer) registerImagesHandlers(r *mux.Router) error {
// $ref: '#/responses/InternalError' // $ref: '#/responses/InternalError'
r.Handle(VersionedPath("/libpod/images/{name:..*}/tag"), APIHandler(s.Context, handlers.TagImage)).Methods(http.MethodPost) r.Handle(VersionedPath("/libpod/images/{name:..*}/tag"), APIHandler(s.Context, handlers.TagImage)).Methods(http.MethodPost)
r.Handle(VersionedPath("/build"), APIHandler(s.Context, handlers.BuildImage)).Methods(http.MethodPost)
return nil return nil
} }

View File

@ -1,6 +1,6 @@
// Package serviceapi Provides a Container compatible interface (EXPERIMENTAL) // Package api Provides a container compatible interface.
// //
// This documentation describes the HTTP LibPod interface. It is to be consider // This documentation describes the HTTP Libpod interface. It is to be consider
// only as experimental as this point. The endpoints, parameters, inputs, and // only as experimental as this point. The endpoints, parameters, inputs, and
// return values can all change. // return values can all change.
// //
@ -25,12 +25,18 @@
// - text/html // - text/html
// //
// tags: // tags:
// - name: "Containers" // - name: containers
// description: manage containers // description: Actions related to containers
// - name: "Images" // - name: images
// description: manage images // description: Actions related to images
// - name: "System" // - name: pods
// description: manage system resources // description: Actions related to pods
// - name: volumes
// description: Actions related to volumes
// - name: containers (compat)
// description: Actions related to containers for the compatibility endpoints
// - name: images (compat)
// description: Actions related to images for the compatibility endpoints
// //
// swagger:meta // swagger:meta
package server package server