mirror of
https://github.com/containers/podman.git
synced 2025-09-26 16:25:00 +08:00
Update vendor of Buildah and imagebuilder
Fixes the testing issues we are hitting. Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
This commit is contained in:
19
vendor/github.com/openshift/imagebuilder/README.md
generated
vendored
19
vendor/github.com/openshift/imagebuilder/README.md
generated
vendored
@ -1,4 +1,4 @@
|
||||
Docker / OCI Image Builder
|
||||
OCI Image Builder
|
||||
==========================
|
||||
|
||||
[](https://goreportcard.com/report/github.com/openshift/imagebuilder)
|
||||
@ -6,22 +6,22 @@ Docker / OCI Image Builder
|
||||
[](https://travis-ci.org/openshift/imagebuilder)
|
||||
[](http://webchat.freenode.net/?channels=%23openshift-dev)
|
||||
|
||||
Note: this library is beta and may contain bugs that prevent images from being identical to Docker build. Test your images (and add to our conformance suite)!
|
||||
Please test your images (and add to our conformance suite)!
|
||||
|
||||
This library supports using the Dockerfile syntax to build Docker
|
||||
compatible images, without invoking Docker build. It is intended to give
|
||||
clients more control over how a Docker build is run, including:
|
||||
This library supports using the Dockerfile syntax to build OCI & Docker
|
||||
compatible images, without invoking a container build command such as `buildah bud` or `docker build`. It is intended to give
|
||||
clients more control over how they build container images, including:
|
||||
|
||||
* Instead of building one layer per line, run all instructions in the
|
||||
same container
|
||||
* Set Docker HostConfig settings like network and memory controls that
|
||||
are not available when running Docker builds
|
||||
* Set HostConfig settings like network and memory controls that
|
||||
are not available when running container builds
|
||||
* Mount external files into the build that are not persisted as part of
|
||||
the final image (i.e. "secrets")
|
||||
* If there are no RUN commands in the Dockerfile, the container is created
|
||||
and committed, but never started.
|
||||
|
||||
The final image should be 99.9% compatible with regular docker builds,
|
||||
The final image should be 99.9% compatible with regular container builds,
|
||||
but bugs are always possible.
|
||||
|
||||
Future goals include:
|
||||
@ -54,9 +54,6 @@ $ imagebuilder --mount ~/secrets/private.key:/etc/keys/private.key path/to/my/co
|
||||
|
||||
Any processes in the Dockerfile will have access to `/etc/keys/private.key`, but that file will not be part of the committed image.
|
||||
|
||||
Running `--mount` requires Docker 1.10 or newer, as it uses a Docker volume to hold the mounted files and the volume API was not
|
||||
available in earlier versions.
|
||||
|
||||
You can also customize which Dockerfile is run, or run multiple Dockerfiles in sequence (the FROM is ignored on
|
||||
later files):
|
||||
|
||||
|
4
vendor/github.com/openshift/imagebuilder/builder.go
generated
vendored
4
vendor/github.com/openshift/imagebuilder/builder.go
generated
vendored
@ -13,8 +13,8 @@ import (
|
||||
|
||||
docker "github.com/fsouza/go-dockerclient"
|
||||
|
||||
"github.com/docker/docker/builder/dockerfile/command"
|
||||
"github.com/docker/docker/builder/dockerfile/parser"
|
||||
"github.com/openshift/imagebuilder/dockerfile/command"
|
||||
"github.com/openshift/imagebuilder/dockerfile/parser"
|
||||
)
|
||||
|
||||
// Copy defines a copy operation required on the container.
|
||||
|
26
vendor/github.com/openshift/imagebuilder/dockerfile/NOTICE
generated
vendored
Normal file
26
vendor/github.com/openshift/imagebuilder/dockerfile/NOTICE
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
Source files in this directory and all sub-directories have been
|
||||
copied from github.com/docker/docker/builder/dockerfile and are
|
||||
Licensed under the Apache License Version 2.0.
|
||||
|
||||
Note that the fork of github.com/docker/docker used commit
|
||||
b68221c37ee597950364788204546f9c9d0e46a1.
|
||||
|
||||
Docker
|
||||
Copyright 2012-2017 Docker, Inc.
|
||||
|
||||
This product includes software developed at Docker, Inc. (https://www.docker.com).
|
||||
|
||||
This product contains software (https://github.com/kr/pty) developed
|
||||
by Keith Rarick, licensed under the MIT License.
|
||||
|
||||
The following is courtesy of our legal counsel:
|
||||
|
||||
|
||||
Use and transfer of Docker may be subject to certain restrictions by the
|
||||
United States and other governments.
|
||||
It is your responsibility to ensure that your use and/or transfer does not
|
||||
violate applicable laws.
|
||||
|
||||
For more information, please see https://www.bis.doc.gov
|
||||
|
||||
See also https://www.apache.org/dev/crypto.html and/or seek legal counsel.
|
46
vendor/github.com/openshift/imagebuilder/dockerfile/command/command.go
generated
vendored
Normal file
46
vendor/github.com/openshift/imagebuilder/dockerfile/command/command.go
generated
vendored
Normal file
@ -0,0 +1,46 @@
|
||||
// Package command contains the set of Dockerfile commands.
|
||||
package command
|
||||
|
||||
// Define constants for the command strings
|
||||
const (
|
||||
Add = "add"
|
||||
Arg = "arg"
|
||||
Cmd = "cmd"
|
||||
Copy = "copy"
|
||||
Entrypoint = "entrypoint"
|
||||
Env = "env"
|
||||
Expose = "expose"
|
||||
From = "from"
|
||||
Healthcheck = "healthcheck"
|
||||
Label = "label"
|
||||
Maintainer = "maintainer"
|
||||
Onbuild = "onbuild"
|
||||
Run = "run"
|
||||
Shell = "shell"
|
||||
StopSignal = "stopsignal"
|
||||
User = "user"
|
||||
Volume = "volume"
|
||||
Workdir = "workdir"
|
||||
)
|
||||
|
||||
// Commands is list of all Dockerfile commands
|
||||
var Commands = map[string]struct{}{
|
||||
Add: {},
|
||||
Arg: {},
|
||||
Cmd: {},
|
||||
Copy: {},
|
||||
Entrypoint: {},
|
||||
Env: {},
|
||||
Expose: {},
|
||||
From: {},
|
||||
Healthcheck: {},
|
||||
Label: {},
|
||||
Maintainer: {},
|
||||
Onbuild: {},
|
||||
Run: {},
|
||||
Shell: {},
|
||||
StopSignal: {},
|
||||
User: {},
|
||||
Volume: {},
|
||||
Workdir: {},
|
||||
}
|
399
vendor/github.com/openshift/imagebuilder/dockerfile/parser/line_parsers.go
generated
vendored
Normal file
399
vendor/github.com/openshift/imagebuilder/dockerfile/parser/line_parsers.go
generated
vendored
Normal file
@ -0,0 +1,399 @@
|
||||
package parser
|
||||
|
||||
// line parsers are dispatch calls that parse a single unit of text into a
|
||||
// Node object which contains the whole statement. Dockerfiles have varied
|
||||
// (but not usually unique, see ONBUILD for a unique example) parsing rules
|
||||
// per-command, and these unify the processing in a way that makes it
|
||||
// manageable.
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/openshift/imagebuilder/dockerfile/command"
|
||||
)
|
||||
|
||||
var (
|
||||
errDockerfileNotStringArray = errors.New("when using JSON array syntax, arrays must be comprised of strings only")
|
||||
)
|
||||
|
||||
const (
|
||||
commandLabel = "LABEL"
|
||||
)
|
||||
|
||||
// ignore the current argument. This will still leave a command parsed, but
|
||||
// will not incorporate the arguments into the ast.
|
||||
func parseIgnore(rest string, d *Directive) (*Node, map[string]bool, error) {
|
||||
return &Node{}, nil, nil
|
||||
}
|
||||
|
||||
// used for onbuild. Could potentially be used for anything that represents a
|
||||
// statement with sub-statements.
|
||||
//
|
||||
// ONBUILD RUN foo bar -> (onbuild (run foo bar))
|
||||
//
|
||||
func parseSubCommand(rest string, d *Directive) (*Node, map[string]bool, error) {
|
||||
if rest == "" {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
child, err := newNodeFromLine(rest, d)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return &Node{Children: []*Node{child}}, nil, nil
|
||||
}
|
||||
|
||||
// helper to parse words (i.e space delimited or quoted strings) in a statement.
|
||||
// The quotes are preserved as part of this function and they are stripped later
|
||||
// as part of processWords().
|
||||
func parseWords(rest string, d *Directive) []string {
|
||||
const (
|
||||
inSpaces = iota // looking for start of a word
|
||||
inWord
|
||||
inQuote
|
||||
)
|
||||
|
||||
words := []string{}
|
||||
phase := inSpaces
|
||||
word := ""
|
||||
quote := '\000'
|
||||
blankOK := false
|
||||
var ch rune
|
||||
var chWidth int
|
||||
|
||||
for pos := 0; pos <= len(rest); pos += chWidth {
|
||||
if pos != len(rest) {
|
||||
ch, chWidth = utf8.DecodeRuneInString(rest[pos:])
|
||||
}
|
||||
|
||||
if phase == inSpaces { // Looking for start of word
|
||||
if pos == len(rest) { // end of input
|
||||
break
|
||||
}
|
||||
if unicode.IsSpace(ch) { // skip spaces
|
||||
continue
|
||||
}
|
||||
phase = inWord // found it, fall through
|
||||
}
|
||||
if (phase == inWord || phase == inQuote) && (pos == len(rest)) {
|
||||
if blankOK || len(word) > 0 {
|
||||
words = append(words, word)
|
||||
}
|
||||
break
|
||||
}
|
||||
if phase == inWord {
|
||||
if unicode.IsSpace(ch) {
|
||||
phase = inSpaces
|
||||
if blankOK || len(word) > 0 {
|
||||
words = append(words, word)
|
||||
}
|
||||
word = ""
|
||||
blankOK = false
|
||||
continue
|
||||
}
|
||||
if ch == '\'' || ch == '"' {
|
||||
quote = ch
|
||||
blankOK = true
|
||||
phase = inQuote
|
||||
}
|
||||
if ch == d.escapeToken {
|
||||
if pos+chWidth == len(rest) {
|
||||
continue // just skip an escape token at end of line
|
||||
}
|
||||
// If we're not quoted and we see an escape token, then always just
|
||||
// add the escape token plus the char to the word, even if the char
|
||||
// is a quote.
|
||||
word += string(ch)
|
||||
pos += chWidth
|
||||
ch, chWidth = utf8.DecodeRuneInString(rest[pos:])
|
||||
}
|
||||
word += string(ch)
|
||||
continue
|
||||
}
|
||||
if phase == inQuote {
|
||||
if ch == quote {
|
||||
phase = inWord
|
||||
}
|
||||
// The escape token is special except for ' quotes - can't escape anything for '
|
||||
if ch == d.escapeToken && quote != '\'' {
|
||||
if pos+chWidth == len(rest) {
|
||||
phase = inWord
|
||||
continue // just skip the escape token at end
|
||||
}
|
||||
pos += chWidth
|
||||
word += string(ch)
|
||||
ch, chWidth = utf8.DecodeRuneInString(rest[pos:])
|
||||
}
|
||||
word += string(ch)
|
||||
}
|
||||
}
|
||||
|
||||
return words
|
||||
}
|
||||
|
||||
// parse environment like statements. Note that this does *not* handle
|
||||
// variable interpolation, which will be handled in the evaluator.
|
||||
func parseNameVal(rest string, key string, d *Directive) (*Node, error) {
|
||||
// This is kind of tricky because we need to support the old
|
||||
// variant: KEY name value
|
||||
// as well as the new one: KEY name=value ...
|
||||
// The trigger to know which one is being used will be whether we hit
|
||||
// a space or = first. space ==> old, "=" ==> new
|
||||
|
||||
words := parseWords(rest, d)
|
||||
if len(words) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Old format (KEY name value)
|
||||
if !strings.Contains(words[0], "=") {
|
||||
parts := tokenWhitespace.Split(rest, 2)
|
||||
if len(parts) < 2 {
|
||||
return nil, fmt.Errorf(key + " must have two arguments")
|
||||
}
|
||||
return newKeyValueNode(parts[0], parts[1]), nil
|
||||
}
|
||||
|
||||
var rootNode *Node
|
||||
var prevNode *Node
|
||||
for _, word := range words {
|
||||
if !strings.Contains(word, "=") {
|
||||
return nil, fmt.Errorf("Syntax error - can't find = in %q. Must be of the form: name=value", word)
|
||||
}
|
||||
|
||||
parts := strings.SplitN(word, "=", 2)
|
||||
node := newKeyValueNode(parts[0], parts[1])
|
||||
rootNode, prevNode = appendKeyValueNode(node, rootNode, prevNode)
|
||||
}
|
||||
|
||||
return rootNode, nil
|
||||
}
|
||||
|
||||
func newKeyValueNode(key, value string) *Node {
|
||||
return &Node{
|
||||
Value: key,
|
||||
Next: &Node{Value: value},
|
||||
}
|
||||
}
|
||||
|
||||
func appendKeyValueNode(node, rootNode, prevNode *Node) (*Node, *Node) {
|
||||
if rootNode == nil {
|
||||
rootNode = node
|
||||
}
|
||||
if prevNode != nil {
|
||||
prevNode.Next = node
|
||||
}
|
||||
|
||||
prevNode = node.Next
|
||||
return rootNode, prevNode
|
||||
}
|
||||
|
||||
func parseEnv(rest string, d *Directive) (*Node, map[string]bool, error) {
|
||||
node, err := parseNameVal(rest, "ENV", d)
|
||||
return node, nil, err
|
||||
}
|
||||
|
||||
func parseLabel(rest string, d *Directive) (*Node, map[string]bool, error) {
|
||||
node, err := parseNameVal(rest, commandLabel, d)
|
||||
return node, nil, err
|
||||
}
|
||||
|
||||
// NodeFromLabels returns a Node for the injected labels
|
||||
func NodeFromLabels(labels map[string]string) *Node {
|
||||
keys := []string{}
|
||||
for key := range labels {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
// Sort the label to have a repeatable order
|
||||
sort.Strings(keys)
|
||||
|
||||
labelPairs := []string{}
|
||||
var rootNode *Node
|
||||
var prevNode *Node
|
||||
for _, key := range keys {
|
||||
value := labels[key]
|
||||
labelPairs = append(labelPairs, fmt.Sprintf("%q='%s'", key, value))
|
||||
// Value must be single quoted to prevent env variable expansion
|
||||
// See https://github.com/docker/docker/issues/26027
|
||||
node := newKeyValueNode(key, "'"+value+"'")
|
||||
rootNode, prevNode = appendKeyValueNode(node, rootNode, prevNode)
|
||||
}
|
||||
|
||||
return &Node{
|
||||
Value: command.Label,
|
||||
Original: commandLabel + " " + strings.Join(labelPairs, " "),
|
||||
Next: rootNode,
|
||||
}
|
||||
}
|
||||
|
||||
// parses a statement containing one or more keyword definition(s) and/or
|
||||
// value assignments, like `name1 name2= name3="" name4=value`.
|
||||
// Note that this is a stricter format than the old format of assignment,
|
||||
// allowed by parseNameVal(), in a way that this only allows assignment of the
|
||||
// form `keyword=[<value>]` like `name2=`, `name3=""`, and `name4=value` above.
|
||||
// In addition, a keyword definition alone is of the form `keyword` like `name1`
|
||||
// above. And the assignments `name2=` and `name3=""` are equivalent and
|
||||
// assign an empty value to the respective keywords.
|
||||
func parseNameOrNameVal(rest string, d *Directive) (*Node, map[string]bool, error) {
|
||||
words := parseWords(rest, d)
|
||||
if len(words) == 0 {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
var (
|
||||
rootnode *Node
|
||||
prevNode *Node
|
||||
)
|
||||
for i, word := range words {
|
||||
node := &Node{}
|
||||
node.Value = word
|
||||
if i == 0 {
|
||||
rootnode = node
|
||||
} else {
|
||||
prevNode.Next = node
|
||||
}
|
||||
prevNode = node
|
||||
}
|
||||
|
||||
return rootnode, nil, nil
|
||||
}
|
||||
|
||||
// parses a whitespace-delimited set of arguments. The result is effectively a
|
||||
// linked list of string arguments.
|
||||
func parseStringsWhitespaceDelimited(rest string, d *Directive) (*Node, map[string]bool, error) {
|
||||
if rest == "" {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
node := &Node{}
|
||||
rootnode := node
|
||||
prevnode := node
|
||||
for _, str := range tokenWhitespace.Split(rest, -1) { // use regexp
|
||||
prevnode = node
|
||||
node.Value = str
|
||||
node.Next = &Node{}
|
||||
node = node.Next
|
||||
}
|
||||
|
||||
// XXX to get around regexp.Split *always* providing an empty string at the
|
||||
// end due to how our loop is constructed, nil out the last node in the
|
||||
// chain.
|
||||
prevnode.Next = nil
|
||||
|
||||
return rootnode, nil, nil
|
||||
}
|
||||
|
||||
// parseString just wraps the string in quotes and returns a working node.
|
||||
func parseString(rest string, d *Directive) (*Node, map[string]bool, error) {
|
||||
if rest == "" {
|
||||
return nil, nil, nil
|
||||
}
|
||||
n := &Node{}
|
||||
n.Value = rest
|
||||
return n, nil, nil
|
||||
}
|
||||
|
||||
// parseJSON converts JSON arrays to an AST.
|
||||
func parseJSON(rest string, d *Directive) (*Node, map[string]bool, error) {
|
||||
rest = strings.TrimLeftFunc(rest, unicode.IsSpace)
|
||||
if !strings.HasPrefix(rest, "[") {
|
||||
return nil, nil, fmt.Errorf(`Error parsing "%s" as a JSON array`, rest)
|
||||
}
|
||||
|
||||
var myJSON []interface{}
|
||||
if err := json.NewDecoder(strings.NewReader(rest)).Decode(&myJSON); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
var top, prev *Node
|
||||
for _, str := range myJSON {
|
||||
s, ok := str.(string)
|
||||
if !ok {
|
||||
return nil, nil, errDockerfileNotStringArray
|
||||
}
|
||||
|
||||
node := &Node{Value: s}
|
||||
if prev == nil {
|
||||
top = node
|
||||
} else {
|
||||
prev.Next = node
|
||||
}
|
||||
prev = node
|
||||
}
|
||||
|
||||
return top, map[string]bool{"json": true}, nil
|
||||
}
|
||||
|
||||
// parseMaybeJSON determines if the argument appears to be a JSON array. If
|
||||
// so, passes to parseJSON; if not, quotes the result and returns a single
|
||||
// node.
|
||||
func parseMaybeJSON(rest string, d *Directive) (*Node, map[string]bool, error) {
|
||||
if rest == "" {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
node, attrs, err := parseJSON(rest, d)
|
||||
|
||||
if err == nil {
|
||||
return node, attrs, nil
|
||||
}
|
||||
if err == errDockerfileNotStringArray {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
node = &Node{}
|
||||
node.Value = rest
|
||||
return node, nil, nil
|
||||
}
|
||||
|
||||
// parseMaybeJSONToList determines if the argument appears to be a JSON array. If
|
||||
// so, passes to parseJSON; if not, attempts to parse it as a whitespace
|
||||
// delimited string.
|
||||
func parseMaybeJSONToList(rest string, d *Directive) (*Node, map[string]bool, error) {
|
||||
node, attrs, err := parseJSON(rest, d)
|
||||
|
||||
if err == nil {
|
||||
return node, attrs, nil
|
||||
}
|
||||
if err == errDockerfileNotStringArray {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return parseStringsWhitespaceDelimited(rest, d)
|
||||
}
|
||||
|
||||
// The HEALTHCHECK command is like parseMaybeJSON, but has an extra type argument.
|
||||
func parseHealthConfig(rest string, d *Directive) (*Node, map[string]bool, error) {
|
||||
// Find end of first argument
|
||||
var sep int
|
||||
for ; sep < len(rest); sep++ {
|
||||
if unicode.IsSpace(rune(rest[sep])) {
|
||||
break
|
||||
}
|
||||
}
|
||||
next := sep
|
||||
for ; next < len(rest); next++ {
|
||||
if !unicode.IsSpace(rune(rest[next])) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if sep == 0 {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
typ := rest[:sep]
|
||||
cmd, attrs, err := parseMaybeJSON(rest[next:], d)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return &Node{Value: typ, Next: cmd}, attrs, err
|
||||
}
|
355
vendor/github.com/openshift/imagebuilder/dockerfile/parser/parser.go
generated
vendored
Normal file
355
vendor/github.com/openshift/imagebuilder/dockerfile/parser/parser.go
generated
vendored
Normal file
@ -0,0 +1,355 @@
|
||||
// Package parser implements a parser and parse tree dumper for Dockerfiles.
|
||||
package parser
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
|
||||
"github.com/openshift/imagebuilder/dockerfile/command"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Node is a structure used to represent a parse tree.
|
||||
//
|
||||
// In the node there are three fields, Value, Next, and Children. Value is the
|
||||
// current token's string value. Next is always the next non-child token, and
|
||||
// children contains all the children. Here's an example:
|
||||
//
|
||||
// (value next (child child-next child-next-next) next-next)
|
||||
//
|
||||
// This data structure is frankly pretty lousy for handling complex languages,
|
||||
// but lucky for us the Dockerfile isn't very complicated. This structure
|
||||
// works a little more effectively than a "proper" parse tree for our needs.
|
||||
//
|
||||
type Node struct {
|
||||
Value string // actual content
|
||||
Next *Node // the next item in the current sexp
|
||||
Children []*Node // the children of this sexp
|
||||
Attributes map[string]bool // special attributes for this node
|
||||
Original string // original line used before parsing
|
||||
Flags []string // only top Node should have this set
|
||||
StartLine int // the line in the original dockerfile where the node begins
|
||||
endLine int // the line in the original dockerfile where the node ends
|
||||
}
|
||||
|
||||
// Dump dumps the AST defined by `node` as a list of sexps.
|
||||
// Returns a string suitable for printing.
|
||||
func (node *Node) Dump() string {
|
||||
str := ""
|
||||
str += node.Value
|
||||
|
||||
if len(node.Flags) > 0 {
|
||||
str += fmt.Sprintf(" %q", node.Flags)
|
||||
}
|
||||
|
||||
for _, n := range node.Children {
|
||||
str += "(" + n.Dump() + ")\n"
|
||||
}
|
||||
|
||||
for n := node.Next; n != nil; n = n.Next {
|
||||
if len(n.Children) > 0 {
|
||||
str += " " + n.Dump()
|
||||
} else {
|
||||
str += " " + strconv.Quote(n.Value)
|
||||
}
|
||||
}
|
||||
|
||||
return strings.TrimSpace(str)
|
||||
}
|
||||
|
||||
func (node *Node) lines(start, end int) {
|
||||
node.StartLine = start
|
||||
node.endLine = end
|
||||
}
|
||||
|
||||
// AddChild adds a new child node, and updates line information
|
||||
func (node *Node) AddChild(child *Node, startLine, endLine int) {
|
||||
child.lines(startLine, endLine)
|
||||
if node.StartLine < 0 {
|
||||
node.StartLine = startLine
|
||||
}
|
||||
node.endLine = endLine
|
||||
node.Children = append(node.Children, child)
|
||||
}
|
||||
|
||||
var (
|
||||
dispatch map[string]func(string, *Directive) (*Node, map[string]bool, error)
|
||||
tokenWhitespace = regexp.MustCompile(`[\t\v\f\r ]+`)
|
||||
tokenEscapeCommand = regexp.MustCompile(`^#[ \t]*escape[ \t]*=[ \t]*(?P<escapechar>.).*$`)
|
||||
tokenPlatformCommand = regexp.MustCompile(`^#[ \t]*platform[ \t]*=[ \t]*(?P<platform>.*)$`)
|
||||
tokenComment = regexp.MustCompile(`^#.*$`)
|
||||
)
|
||||
|
||||
// DefaultEscapeToken is the default escape token
|
||||
const DefaultEscapeToken = '\\'
|
||||
|
||||
// defaultPlatformToken is the platform assumed for the build if not explicitly provided
|
||||
var defaultPlatformToken = runtime.GOOS
|
||||
|
||||
// Directive is the structure used during a build run to hold the state of
|
||||
// parsing directives.
|
||||
type Directive struct {
|
||||
escapeToken rune // Current escape token
|
||||
platformToken string // Current platform token
|
||||
lineContinuationRegex *regexp.Regexp // Current line continuation regex
|
||||
processingComplete bool // Whether we are done looking for directives
|
||||
escapeSeen bool // Whether the escape directive has been seen
|
||||
platformSeen bool // Whether the platform directive has been seen
|
||||
}
|
||||
|
||||
// setEscapeToken sets the default token for escaping characters in a Dockerfile.
|
||||
func (d *Directive) setEscapeToken(s string) error {
|
||||
if s != "`" && s != "\\" {
|
||||
return fmt.Errorf("invalid ESCAPE '%s'. Must be ` or \\", s)
|
||||
}
|
||||
d.escapeToken = rune(s[0])
|
||||
d.lineContinuationRegex = regexp.MustCompile(`\` + s + `[ \t]*$`)
|
||||
return nil
|
||||
}
|
||||
|
||||
// setPlatformToken sets the default platform for pulling images in a Dockerfile.
|
||||
func (d *Directive) setPlatformToken(s string) error {
|
||||
s = strings.ToLower(s)
|
||||
valid := []string{runtime.GOOS}
|
||||
if system.LCOWSupported() {
|
||||
valid = append(valid, "linux")
|
||||
}
|
||||
for _, item := range valid {
|
||||
if s == item {
|
||||
d.platformToken = s
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("invalid PLATFORM '%s'. Must be one of %v", s, valid)
|
||||
}
|
||||
|
||||
// possibleParserDirective looks for one or more parser directives '# escapeToken=<char>' and
|
||||
// '# platform=<string>'. Parser directives must precede any builder instruction
|
||||
// or other comments, and cannot be repeated.
|
||||
func (d *Directive) possibleParserDirective(line string) error {
|
||||
if d.processingComplete {
|
||||
return nil
|
||||
}
|
||||
|
||||
tecMatch := tokenEscapeCommand.FindStringSubmatch(strings.ToLower(line))
|
||||
if len(tecMatch) != 0 {
|
||||
for i, n := range tokenEscapeCommand.SubexpNames() {
|
||||
if n == "escapechar" {
|
||||
if d.escapeSeen == true {
|
||||
return errors.New("only one escape parser directive can be used")
|
||||
}
|
||||
d.escapeSeen = true
|
||||
return d.setEscapeToken(tecMatch[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO @jhowardmsft LCOW Support: Eventually this check can be removed,
|
||||
// but only recognise a platform token if running in LCOW mode.
|
||||
if system.LCOWSupported() {
|
||||
tpcMatch := tokenPlatformCommand.FindStringSubmatch(strings.ToLower(line))
|
||||
if len(tpcMatch) != 0 {
|
||||
for i, n := range tokenPlatformCommand.SubexpNames() {
|
||||
if n == "platform" {
|
||||
if d.platformSeen == true {
|
||||
return errors.New("only one platform parser directive can be used")
|
||||
}
|
||||
d.platformSeen = true
|
||||
return d.setPlatformToken(tpcMatch[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
d.processingComplete = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewDefaultDirective returns a new Directive with the default escapeToken token
|
||||
func NewDefaultDirective() *Directive {
|
||||
directive := Directive{}
|
||||
directive.setEscapeToken(string(DefaultEscapeToken))
|
||||
directive.setPlatformToken(defaultPlatformToken)
|
||||
return &directive
|
||||
}
|
||||
|
||||
func init() {
|
||||
// Dispatch Table. see line_parsers.go for the parse functions.
|
||||
// The command is parsed and mapped to the line parser. The line parser
|
||||
// receives the arguments but not the command, and returns an AST after
|
||||
// reformulating the arguments according to the rules in the parser
|
||||
// functions. Errors are propagated up by Parse() and the resulting AST can
|
||||
// be incorporated directly into the existing AST as a next.
|
||||
dispatch = map[string]func(string, *Directive) (*Node, map[string]bool, error){
|
||||
command.Add: parseMaybeJSONToList,
|
||||
command.Arg: parseNameOrNameVal,
|
||||
command.Cmd: parseMaybeJSON,
|
||||
command.Copy: parseMaybeJSONToList,
|
||||
command.Entrypoint: parseMaybeJSON,
|
||||
command.Env: parseEnv,
|
||||
command.Expose: parseStringsWhitespaceDelimited,
|
||||
command.From: parseStringsWhitespaceDelimited,
|
||||
command.Healthcheck: parseHealthConfig,
|
||||
command.Label: parseLabel,
|
||||
command.Maintainer: parseString,
|
||||
command.Onbuild: parseSubCommand,
|
||||
command.Run: parseMaybeJSON,
|
||||
command.Shell: parseMaybeJSON,
|
||||
command.StopSignal: parseString,
|
||||
command.User: parseString,
|
||||
command.Volume: parseMaybeJSONToList,
|
||||
command.Workdir: parseString,
|
||||
}
|
||||
}
|
||||
|
||||
// newNodeFromLine splits the line into parts, and dispatches to a function
|
||||
// based on the command and command arguments. A Node is created from the
|
||||
// result of the dispatch.
|
||||
func newNodeFromLine(line string, directive *Directive) (*Node, error) {
|
||||
cmd, flags, args, err := splitCommand(line)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fn := dispatch[cmd]
|
||||
// Ignore invalid Dockerfile instructions
|
||||
if fn == nil {
|
||||
fn = parseIgnore
|
||||
}
|
||||
next, attrs, err := fn(args, directive)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Node{
|
||||
Value: cmd,
|
||||
Original: line,
|
||||
Flags: flags,
|
||||
Next: next,
|
||||
Attributes: attrs,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Result is the result of parsing a Dockerfile
|
||||
type Result struct {
|
||||
AST *Node
|
||||
EscapeToken rune
|
||||
Platform string
|
||||
Warnings []string
|
||||
}
|
||||
|
||||
// PrintWarnings to the writer
|
||||
func (r *Result) PrintWarnings(out io.Writer) {
|
||||
if len(r.Warnings) == 0 {
|
||||
return
|
||||
}
|
||||
fmt.Fprintf(out, strings.Join(r.Warnings, "\n")+"\n")
|
||||
}
|
||||
|
||||
// Parse reads lines from a Reader, parses the lines into an AST and returns
|
||||
// the AST and escape token
|
||||
func Parse(rwc io.Reader) (*Result, error) {
|
||||
d := NewDefaultDirective()
|
||||
currentLine := 0
|
||||
root := &Node{StartLine: -1}
|
||||
scanner := bufio.NewScanner(rwc)
|
||||
warnings := []string{}
|
||||
|
||||
var err error
|
||||
for scanner.Scan() {
|
||||
bytesRead := scanner.Bytes()
|
||||
if currentLine == 0 {
|
||||
// First line, strip the byte-order-marker if present
|
||||
bytesRead = bytes.TrimPrefix(bytesRead, utf8bom)
|
||||
}
|
||||
bytesRead, err = processLine(d, bytesRead, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
currentLine++
|
||||
|
||||
startLine := currentLine
|
||||
line, isEndOfLine := trimContinuationCharacter(string(bytesRead), d)
|
||||
if isEndOfLine && line == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
var hasEmptyContinuationLine bool
|
||||
for !isEndOfLine && scanner.Scan() {
|
||||
bytesRead, err := processLine(d, scanner.Bytes(), false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
currentLine++
|
||||
|
||||
if isEmptyContinuationLine(bytesRead) {
|
||||
hasEmptyContinuationLine = true
|
||||
continue
|
||||
}
|
||||
|
||||
continuationLine := string(bytesRead)
|
||||
continuationLine, isEndOfLine = trimContinuationCharacter(continuationLine, d)
|
||||
line += continuationLine
|
||||
}
|
||||
|
||||
if hasEmptyContinuationLine {
|
||||
warning := "[WARNING]: Empty continuation line found in:\n " + line
|
||||
warnings = append(warnings, warning)
|
||||
}
|
||||
|
||||
child, err := newNodeFromLine(line, d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
root.AddChild(child, startLine, currentLine)
|
||||
}
|
||||
|
||||
if len(warnings) > 0 {
|
||||
warnings = append(warnings, "[WARNING]: Empty continuation lines will become errors in a future release.")
|
||||
}
|
||||
return &Result{
|
||||
AST: root,
|
||||
Warnings: warnings,
|
||||
EscapeToken: d.escapeToken,
|
||||
Platform: d.platformToken,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func trimComments(src []byte) []byte {
|
||||
return tokenComment.ReplaceAll(src, []byte{})
|
||||
}
|
||||
|
||||
func trimWhitespace(src []byte) []byte {
|
||||
return bytes.TrimLeftFunc(src, unicode.IsSpace)
|
||||
}
|
||||
|
||||
func isEmptyContinuationLine(line []byte) bool {
|
||||
return len(trimComments(trimWhitespace(line))) == 0
|
||||
}
|
||||
|
||||
var utf8bom = []byte{0xEF, 0xBB, 0xBF}
|
||||
|
||||
func trimContinuationCharacter(line string, d *Directive) (string, bool) {
|
||||
if d.lineContinuationRegex.MatchString(line) {
|
||||
line = d.lineContinuationRegex.ReplaceAllString(line, "")
|
||||
return line, false
|
||||
}
|
||||
return line, true
|
||||
}
|
||||
|
||||
// TODO: remove stripLeftWhitespace after deprecation period. It seems silly
|
||||
// to preserve whitespace on continuation lines. Why is that done?
|
||||
func processLine(d *Directive, token []byte, stripLeftWhitespace bool) ([]byte, error) {
|
||||
if stripLeftWhitespace {
|
||||
token = trimWhitespace(token)
|
||||
}
|
||||
return trimComments(token), d.possibleParserDirective(string(token))
|
||||
}
|
118
vendor/github.com/openshift/imagebuilder/dockerfile/parser/split_command.go
generated
vendored
Normal file
118
vendor/github.com/openshift/imagebuilder/dockerfile/parser/split_command.go
generated
vendored
Normal file
@ -0,0 +1,118 @@
|
||||
package parser
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
// splitCommand takes a single line of text and parses out the cmd and args,
|
||||
// which are used for dispatching to more exact parsing functions.
|
||||
func splitCommand(line string) (string, []string, string, error) {
|
||||
var args string
|
||||
var flags []string
|
||||
|
||||
// Make sure we get the same results irrespective of leading/trailing spaces
|
||||
cmdline := tokenWhitespace.Split(strings.TrimSpace(line), 2)
|
||||
cmd := strings.ToLower(cmdline[0])
|
||||
|
||||
if len(cmdline) == 2 {
|
||||
var err error
|
||||
args, flags, err = extractBuilderFlags(cmdline[1])
|
||||
if err != nil {
|
||||
return "", nil, "", err
|
||||
}
|
||||
}
|
||||
|
||||
return cmd, flags, strings.TrimSpace(args), nil
|
||||
}
|
||||
|
||||
func extractBuilderFlags(line string) (string, []string, error) {
|
||||
// Parses the BuilderFlags and returns the remaining part of the line
|
||||
|
||||
const (
|
||||
inSpaces = iota // looking for start of a word
|
||||
inWord
|
||||
inQuote
|
||||
)
|
||||
|
||||
words := []string{}
|
||||
phase := inSpaces
|
||||
word := ""
|
||||
quote := '\000'
|
||||
blankOK := false
|
||||
var ch rune
|
||||
|
||||
for pos := 0; pos <= len(line); pos++ {
|
||||
if pos != len(line) {
|
||||
ch = rune(line[pos])
|
||||
}
|
||||
|
||||
if phase == inSpaces { // Looking for start of word
|
||||
if pos == len(line) { // end of input
|
||||
break
|
||||
}
|
||||
if unicode.IsSpace(ch) { // skip spaces
|
||||
continue
|
||||
}
|
||||
|
||||
// Only keep going if the next word starts with --
|
||||
if ch != '-' || pos+1 == len(line) || rune(line[pos+1]) != '-' {
|
||||
return line[pos:], words, nil
|
||||
}
|
||||
|
||||
phase = inWord // found something with "--", fall through
|
||||
}
|
||||
if (phase == inWord || phase == inQuote) && (pos == len(line)) {
|
||||
if word != "--" && (blankOK || len(word) > 0) {
|
||||
words = append(words, word)
|
||||
}
|
||||
break
|
||||
}
|
||||
if phase == inWord {
|
||||
if unicode.IsSpace(ch) {
|
||||
phase = inSpaces
|
||||
if word == "--" {
|
||||
return line[pos:], words, nil
|
||||
}
|
||||
if blankOK || len(word) > 0 {
|
||||
words = append(words, word)
|
||||
}
|
||||
word = ""
|
||||
blankOK = false
|
||||
continue
|
||||
}
|
||||
if ch == '\'' || ch == '"' {
|
||||
quote = ch
|
||||
blankOK = true
|
||||
phase = inQuote
|
||||
continue
|
||||
}
|
||||
if ch == '\\' {
|
||||
if pos+1 == len(line) {
|
||||
continue // just skip \ at end
|
||||
}
|
||||
pos++
|
||||
ch = rune(line[pos])
|
||||
}
|
||||
word += string(ch)
|
||||
continue
|
||||
}
|
||||
if phase == inQuote {
|
||||
if ch == quote {
|
||||
phase = inWord
|
||||
continue
|
||||
}
|
||||
if ch == '\\' {
|
||||
if pos+1 == len(line) {
|
||||
phase = inWord
|
||||
continue // just skip \ at end
|
||||
}
|
||||
pos++
|
||||
ch = rune(line[pos])
|
||||
}
|
||||
word += string(ch)
|
||||
}
|
||||
}
|
||||
|
||||
return "", words, nil
|
||||
}
|
4
vendor/github.com/openshift/imagebuilder/evaluator.go
generated
vendored
4
vendor/github.com/openshift/imagebuilder/evaluator.go
generated
vendored
@ -5,8 +5,8 @@ import (
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/builder/dockerfile/command"
|
||||
"github.com/docker/docker/builder/dockerfile/parser"
|
||||
"github.com/openshift/imagebuilder/dockerfile/command"
|
||||
"github.com/openshift/imagebuilder/dockerfile/parser"
|
||||
)
|
||||
|
||||
// ParseDockerfile parses the provided stream as a canonical Dockerfile
|
||||
|
21
vendor/github.com/openshift/imagebuilder/vendor.conf
generated
vendored
Normal file
21
vendor/github.com/openshift/imagebuilder/vendor.conf
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
github.com/Azure/go-ansiterm d6e3b3328b783f23731bc4d058875b0371ff8109
|
||||
github.com/containerd/continuity 004b46473808b3e7a4a3049c20e4376c91eb966d
|
||||
github.com/docker/docker b68221c37ee597950364788204546f9c9d0e46a1
|
||||
github.com/docker/go-connections 97c2040d34dfae1d1b1275fa3a78dbdd2f41cf7e
|
||||
github.com/docker/go-units 2fb04c6466a548a03cb009c5569ee1ab1e35398e
|
||||
github.com/fsouza/go-dockerclient openshift-4.0 https://github.com/openshift/go-dockerclient.git
|
||||
github.com/gogo/protobuf c5a62797aee0054613cc578653a16c6237fef080
|
||||
github.com/golang/glog 23def4e6c14b4da8ac2ed8007337bc5eb5007998
|
||||
github.com/golang/protobuf v1.3.0
|
||||
github.com/konsorten/go-windows-terminal-sequences f55edac94c9bbba5d6182a4be46d86a2c9b5b50e
|
||||
github.com/Microsoft/go-winio 1a8911d1ed007260465c3bfbbc785ac6915a0bb8
|
||||
github.com/Nvveen/Gotty cd527374f1e5bff4938207604a14f2e38a9cf512
|
||||
github.com/opencontainers/go-digest ac19fd6e7483ff933754af248d80be865e543d22
|
||||
github.com/opencontainers/image-spec 243ea084a44451d27322fed02b682d99e2af3ba9
|
||||
github.com/opencontainers/runc 923a8f8a9a07aceada5fc48c4d37e905d9b019b5
|
||||
github.com/pkg/errors 27936f6d90f9c8e1145f11ed52ffffbfdb9e0af7
|
||||
github.com/sirupsen/logrus d7b6bf5e4d26448fd977d07d745a2a66097ddecb
|
||||
golang.org/x/crypto ff983b9c42bc9fbf91556e191cc8efb585c16908
|
||||
golang.org/x/net 45ffb0cd1ba084b73e26dee67e667e1be5acce83
|
||||
golang.org/x/sync 37e7f081c4d4c64e13b10787722085407fe5d15f
|
||||
golang.org/x/sys 7fbe1cd0fcc20051e1fcb87fbabec4a1bacaaeba
|
Reference in New Issue
Block a user